remove un-needed stuff
This commit is contained in:
233
.github/workflows/deploy.yml
vendored
233
.github/workflows/deploy.yml
vendored
@@ -1,233 +0,0 @@
|
||||
name: Deploy to Windows Server
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, master ]
|
||||
pull_request:
|
||||
branches: [ main, master ]
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
GO_VERSION: '1.23'
|
||||
SERVICE_NAME: 'ACC Server Manager'
|
||||
BINARY_NAME: 'acc-server-manager'
|
||||
MIGRATE_BINARY_NAME: 'acc-migrate'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Run tests
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Build API for Windows
|
||||
env:
|
||||
GOOS: windows
|
||||
GOARCH: amd64
|
||||
run: |
|
||||
go build -o ${{ env.BINARY_NAME }}.exe ./cmd/api
|
||||
|
||||
- name: Build Migration tool for Windows
|
||||
env:
|
||||
GOOS: windows
|
||||
GOARCH: amd64
|
||||
run: |
|
||||
go build -o ${{ env.MIGRATE_BINARY_NAME }}.exe ./cmd/migrate
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: windows-binaries
|
||||
path: |
|
||||
${{ env.BINARY_NAME }}.exe
|
||||
${{ env.MIGRATE_BINARY_NAME }}.exe
|
||||
retention-days: 5
|
||||
|
||||
deploy:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: windows-binaries
|
||||
path: ./build
|
||||
|
||||
- name: Setup SSH key
|
||||
uses: webfactory/ssh-agent@v0.7.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.WINDOWS_SERVER_SSH_KEY }}
|
||||
|
||||
- name: Deploy to Windows Server
|
||||
env:
|
||||
SERVER_HOST: ${{ secrets.WINDOWS_SERVER_HOST }}
|
||||
SERVER_USER: ${{ secrets.WINDOWS_SERVER_USER }}
|
||||
SERVER_PASSWORD: ${{ secrets.WINDOWS_SERVER_PASSWORD }}
|
||||
DEPLOY_PATH: ${{ secrets.DEPLOY_PATH }}
|
||||
run: |
|
||||
# Install PowerShell Core if not available
|
||||
if ! command -v pwsh &> /dev/null; then
|
||||
echo "Installing PowerShell Core..."
|
||||
wget -q https://github.com/PowerShell/PowerShell/releases/download/v7.4.0/powershell_7.4.0-1.deb_amd64.deb
|
||||
sudo dpkg -i powershell_7.4.0-1.deb_amd64.deb
|
||||
sudo apt-get install -f
|
||||
fi
|
||||
|
||||
# Create deployment script
|
||||
cat > deploy_script.ps1 << 'EOF'
|
||||
param(
|
||||
[string]$ServiceName = "${{ env.SERVICE_NAME }}",
|
||||
[string]$BinaryName = "${{ env.BINARY_NAME }}",
|
||||
[string]$MigrateBinaryName = "${{ env.MIGRATE_BINARY_NAME }}",
|
||||
[string]$DeployPath = "$env:DEPLOY_PATH"
|
||||
)
|
||||
|
||||
Write-Host "Starting deployment process..." -ForegroundColor Green
|
||||
|
||||
# Check if service exists and stop it
|
||||
$service = Get-Service -Name $ServiceName -ErrorAction SilentlyContinue
|
||||
if ($service) {
|
||||
Write-Host "Stopping service: $ServiceName" -ForegroundColor Yellow
|
||||
Stop-Service -Name $ServiceName -Force
|
||||
|
||||
# Wait for service to stop
|
||||
$timeout = 30
|
||||
$elapsed = 0
|
||||
while ($service.Status -ne 'Stopped' -and $elapsed -lt $timeout) {
|
||||
Start-Sleep -Seconds 1
|
||||
$elapsed++
|
||||
$service.Refresh()
|
||||
}
|
||||
|
||||
if ($service.Status -ne 'Stopped') {
|
||||
Write-Error "Failed to stop service within timeout"
|
||||
exit 1
|
||||
}
|
||||
Write-Host "Service stopped successfully" -ForegroundColor Green
|
||||
} else {
|
||||
Write-Host "Service not found: $ServiceName" -ForegroundColor Yellow
|
||||
}
|
||||
|
||||
# Create backup of current deployment
|
||||
$backupPath = "$DeployPath\backup_$(Get-Date -Format 'yyyyMMdd_HHmmss')"
|
||||
if (Test-Path "$DeployPath\$BinaryName.exe") {
|
||||
Write-Host "Creating backup at: $backupPath" -ForegroundColor Yellow
|
||||
New-Item -ItemType Directory -Path $backupPath -Force | Out-Null
|
||||
Copy-Item "$DeployPath\*" -Destination $backupPath -Recurse -Force
|
||||
}
|
||||
|
||||
# Copy new binaries
|
||||
Write-Host "Copying new binaries to: $DeployPath" -ForegroundColor Yellow
|
||||
Copy-Item "C:\temp\$BinaryName.exe" -Destination "$DeployPath\$BinaryName.exe" -Force
|
||||
Copy-Item "C:\temp\$MigrateBinaryName.exe" -Destination "$DeployPath\$MigrateBinaryName.exe" -Force
|
||||
|
||||
# Run migrations
|
||||
Write-Host "Running database migrations..." -ForegroundColor Yellow
|
||||
$migrateResult = & "$DeployPath\$MigrateBinaryName.exe" 2>&1
|
||||
Write-Host "Migration output: $migrateResult" -ForegroundColor Cyan
|
||||
|
||||
# Start service
|
||||
if ($service) {
|
||||
Write-Host "Starting service: $ServiceName" -ForegroundColor Yellow
|
||||
Start-Service -Name $ServiceName
|
||||
|
||||
# Wait for service to start
|
||||
$timeout = 30
|
||||
$elapsed = 0
|
||||
while ($service.Status -ne 'Running' -and $elapsed -lt $timeout) {
|
||||
Start-Sleep -Seconds 1
|
||||
$elapsed++
|
||||
$service.Refresh()
|
||||
}
|
||||
|
||||
if ($service.Status -ne 'Running') {
|
||||
Write-Error "Failed to start service within timeout"
|
||||
# Rollback
|
||||
Write-Host "Rolling back deployment..." -ForegroundColor Red
|
||||
if (Test-Path $backupPath) {
|
||||
Copy-Item "$backupPath\*" -Destination $DeployPath -Recurse -Force
|
||||
Start-Service -Name $ServiceName
|
||||
}
|
||||
exit 1
|
||||
}
|
||||
Write-Host "Service started successfully" -ForegroundColor Green
|
||||
} else {
|
||||
Write-Host "Service not configured. Manual start required." -ForegroundColor Yellow
|
||||
}
|
||||
|
||||
# Cleanup old backups (keep last 5)
|
||||
$backupDir = Split-Path $DeployPath -Parent
|
||||
Get-ChildItem -Path $backupDir -Directory -Name "backup_*" |
|
||||
Sort-Object -Descending |
|
||||
Select-Object -Skip 5 |
|
||||
ForEach-Object { Remove-Item -Path "$backupDir\$_" -Recurse -Force }
|
||||
|
||||
Write-Host "Deployment completed successfully!" -ForegroundColor Green
|
||||
EOF
|
||||
|
||||
# Copy files to Windows server using SCP
|
||||
echo "Copying files to Windows server..."
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
./build/${{ env.BINARY_NAME }}.exe \
|
||||
./build/${{ env.MIGRATE_BINARY_NAME }}.exe \
|
||||
deploy_script.ps1 \
|
||||
${{ env.SERVER_USER }}@${{ env.SERVER_HOST }}:C:/temp/
|
||||
|
||||
# Execute deployment script on Windows server
|
||||
echo "Executing deployment script on Windows server..."
|
||||
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
${{ env.SERVER_USER }}@${{ env.SERVER_HOST }} \
|
||||
"powershell.exe -ExecutionPolicy Bypass -File C:/temp/deploy_script.ps1"
|
||||
|
||||
# Cleanup temp files
|
||||
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
${{ env.SERVER_USER }}@${{ env.SERVER_HOST }} \
|
||||
"del C:/temp/${{ env.BINARY_NAME }}.exe C:/temp/${{ env.MIGRATE_BINARY_NAME }}.exe C:/temp/deploy_script.ps1"
|
||||
|
||||
echo "Deployment completed successfully!"
|
||||
|
||||
notify:
|
||||
needs: [build, deploy]
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
|
||||
steps:
|
||||
- name: Notify deployment status
|
||||
env:
|
||||
WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
run: |
|
||||
if [ "${{ needs.deploy.result }}" == "success" ]; then
|
||||
STATUS="✅ SUCCESS"
|
||||
COLOR="good"
|
||||
else
|
||||
STATUS="❌ FAILED"
|
||||
COLOR="danger"
|
||||
fi
|
||||
|
||||
if [ -n "$WEBHOOK_URL" ]; then
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
--data "{\"text\":\"ACC Server Manager Deployment $STATUS\",\"attachments\":[{\"color\":\"$COLOR\",\"fields\":[{\"title\":\"Branch\",\"value\":\"${{ github.ref_name }}\",\"short\":true},{\"title\":\"Commit\",\"value\":\"${{ github.sha }}\",\"short\":true}]}]}" \
|
||||
$WEBHOOK_URL
|
||||
fi
|
||||
@@ -1,168 +0,0 @@
|
||||
-- Migration 002: Migrate servers and related tables from integer IDs to UUIDs
|
||||
-- This migration handles: servers, configs, state_histories, steam_credentials, system_configs
|
||||
|
||||
PRAGMA foreign_keys=OFF;
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
-- Step 1: Create new servers table with UUID primary key
|
||||
CREATE TABLE servers_new (
|
||||
id TEXT PRIMARY KEY, -- UUID stored as TEXT in SQLite
|
||||
name TEXT NOT NULL,
|
||||
ip TEXT NOT NULL,
|
||||
port INTEGER NOT NULL,
|
||||
path TEXT NOT NULL, -- Updated from config_path to path to match Go model
|
||||
service_name TEXT NOT NULL,
|
||||
date_created DATETIME,
|
||||
from_steam_cmd BOOLEAN NOT NULL DEFAULT 1 -- Added to match Go model
|
||||
);
|
||||
|
||||
-- Step 2: Generate UUIDs for existing servers and migrate data
|
||||
INSERT INTO servers_new (id, name, ip, port, path, service_name, from_steam_cmd)
|
||||
SELECT
|
||||
LOWER(HEX(RANDOMBLOB(4)) || '-' || HEX(RANDOMBLOB(2)) || '-' || '4' || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' ||
|
||||
SUBSTR('89AB', ABS(RANDOM()) % 4 + 1, 1) || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' || HEX(RANDOMBLOB(6))) as id,
|
||||
name,
|
||||
COALESCE(ip, '') as ip,
|
||||
COALESCE(port, 0) as port,
|
||||
COALESCE(path, '') as path,
|
||||
service_name,
|
||||
1 as from_steam_cmd
|
||||
FROM servers;
|
||||
|
||||
-- Step 3: Create mapping table to track old ID to new UUID mapping
|
||||
CREATE TEMP TABLE server_id_mapping AS
|
||||
SELECT
|
||||
s_old.id as old_id,
|
||||
s_new.id as new_id
|
||||
FROM servers s_old
|
||||
JOIN servers_new s_new ON s_old.name = s_new.name AND s_old.service_name = s_new.service_name;
|
||||
|
||||
-- Step 4: Drop old servers table and rename new one
|
||||
DROP TABLE servers;
|
||||
ALTER TABLE servers_new RENAME TO servers;
|
||||
|
||||
-- Step 5: Create new configs table with UUID references
|
||||
CREATE TABLE configs_new (
|
||||
id TEXT PRIMARY KEY, -- UUID for configs
|
||||
server_id TEXT NOT NULL, -- UUID reference to servers (GORM expects snake_case)
|
||||
config_file TEXT NOT NULL,
|
||||
old_config TEXT,
|
||||
new_config TEXT,
|
||||
changed_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Step 6: Migrate configs data with UUID references
|
||||
INSERT INTO configs_new (id, server_id, config_file, old_config, new_config, changed_at)
|
||||
SELECT
|
||||
LOWER(HEX(RANDOMBLOB(4)) || '-' || HEX(RANDOMBLOB(2)) || '-' || '4' || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' ||
|
||||
SUBSTR('89AB', ABS(RANDOM()) % 4 + 1, 1) || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' || HEX(RANDOMBLOB(6))) as id,
|
||||
sim.new_id as server_id,
|
||||
c.config_file,
|
||||
c.old_config,
|
||||
c.new_config,
|
||||
c.changed_at
|
||||
FROM configs c
|
||||
JOIN server_id_mapping sim ON c.server_id = sim.old_id;
|
||||
|
||||
-- Step 7: Drop old configs table and rename new one
|
||||
DROP TABLE configs;
|
||||
ALTER TABLE configs_new RENAME TO configs;
|
||||
|
||||
-- Step 8: Create new state_histories table with UUID references
|
||||
CREATE TABLE state_histories_new (
|
||||
id TEXT PRIMARY KEY, -- UUID for state_histories records
|
||||
server_id TEXT NOT NULL, -- UUID reference to servers (GORM expects snake_case)
|
||||
session TEXT,
|
||||
track TEXT,
|
||||
player_count INTEGER,
|
||||
date_created DATETIME,
|
||||
session_start DATETIME,
|
||||
session_duration_minutes INTEGER,
|
||||
session_id TEXT NOT NULL -- Changed to TEXT to store UUID
|
||||
);
|
||||
|
||||
-- Step 9: Migrate state_histories data with UUID references
|
||||
INSERT INTO state_histories_new (id, server_id, session, track, player_count, date_created, session_start, session_duration_minutes, session_id)
|
||||
SELECT
|
||||
LOWER(HEX(RANDOMBLOB(4)) || '-' || HEX(RANDOMBLOB(2)) || '-' || '4' || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' ||
|
||||
SUBSTR('89AB', ABS(RANDOM()) % 4 + 1, 1) || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' || HEX(RANDOMBLOB(6))) as id,
|
||||
sim.new_id as server_id,
|
||||
sh.session,
|
||||
sh.track,
|
||||
sh.player_count,
|
||||
sh.date_created,
|
||||
sh.session_start,
|
||||
sh.session_duration_minutes,
|
||||
LOWER(HEX(RANDOMBLOB(4)) || '-' || HEX(RANDOMBLOB(2)) || '-' || '4' || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' ||
|
||||
SUBSTR('89AB', ABS(RANDOM()) % 4 + 1, 1) || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' || HEX(RANDOMBLOB(6))) as session_id
|
||||
FROM state_histories sh
|
||||
JOIN server_id_mapping sim ON sh.server_id = sim.old_id;
|
||||
|
||||
-- Step 10: Drop old state_histories table and rename new one
|
||||
DROP TABLE state_histories;
|
||||
ALTER TABLE state_histories_new RENAME TO state_histories;
|
||||
|
||||
-- Step 11: Create new steam_credentials table with UUID primary key
|
||||
CREATE TABLE steam_credentials_new (
|
||||
id TEXT PRIMARY KEY, -- UUID for steam_credentials
|
||||
username TEXT NOT NULL,
|
||||
password TEXT NOT NULL,
|
||||
date_created DATETIME,
|
||||
last_updated DATETIME
|
||||
);
|
||||
|
||||
-- Step 12: Migrate steam_credentials data
|
||||
INSERT INTO steam_credentials_new (id, username, password, date_created, last_updated)
|
||||
SELECT
|
||||
LOWER(HEX(RANDOMBLOB(4)) || '-' || HEX(RANDOMBLOB(2)) || '-' || '4' || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' ||
|
||||
SUBSTR('89AB', ABS(RANDOM()) % 4 + 1, 1) || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' || HEX(RANDOMBLOB(6))) as id,
|
||||
username,
|
||||
password,
|
||||
date_created,
|
||||
last_updated
|
||||
FROM steam_credentials;
|
||||
|
||||
-- Step 13: Drop old steam_credentials table and rename new one
|
||||
DROP TABLE steam_credentials;
|
||||
ALTER TABLE steam_credentials_new RENAME TO steam_credentials;
|
||||
|
||||
-- Step 14: Create new system_configs table with UUID primary key
|
||||
CREATE TABLE system_configs_new (
|
||||
id TEXT PRIMARY KEY, -- UUID for system_configs
|
||||
key TEXT,
|
||||
value TEXT,
|
||||
default_value TEXT,
|
||||
description TEXT,
|
||||
date_modified TEXT
|
||||
);
|
||||
|
||||
-- Step 15: Migrate system_configs data
|
||||
INSERT INTO system_configs_new (id, key, value, default_value, description, date_modified)
|
||||
SELECT
|
||||
LOWER(HEX(RANDOMBLOB(4)) || '-' || HEX(RANDOMBLOB(2)) || '-' || '4' || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' ||
|
||||
SUBSTR('89AB', ABS(RANDOM()) % 4 + 1, 1) || SUBSTR(HEX(RANDOMBLOB(2)), 2) || '-' || HEX(RANDOMBLOB(6))) as id,
|
||||
key,
|
||||
value,
|
||||
default_value,
|
||||
description,
|
||||
date_modified
|
||||
FROM system_configs;
|
||||
|
||||
-- Step 16: Drop old system_configs table and rename new one
|
||||
DROP TABLE system_configs;
|
||||
ALTER TABLE system_configs_new RENAME TO system_configs;
|
||||
|
||||
-- Step 17: Create migration record
|
||||
CREATE TABLE IF NOT EXISTS migration_records (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
migration_name TEXT UNIQUE NOT NULL,
|
||||
applied_at TEXT NOT NULL,
|
||||
success BOOLEAN NOT NULL,
|
||||
notes TEXT
|
||||
);
|
||||
|
||||
INSERT INTO migration_records (migration_name, applied_at, success, notes)
|
||||
VALUES ('002_migrate_servers_to_uuid', datetime('now'), 1, 'Migrated servers, configs, state_histories, steam_credentials, and system_configs to UUID primary keys');
|
||||
|
||||
COMMIT;
|
||||
PRAGMA foreign_keys=ON;
|
||||
@@ -1,148 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"acc-server-manager/local/migrations"
|
||||
"acc-server-manager/local/utl/logging"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Initialize logging
|
||||
logging.Init(true) // Enable debug logging
|
||||
|
||||
// Get database path from command line args or use default
|
||||
dbPath := "acc.db"
|
||||
if len(os.Args) > 1 {
|
||||
dbPath = os.Args[1]
|
||||
}
|
||||
|
||||
// Make sure we're running from the correct directory
|
||||
if !fileExists(dbPath) {
|
||||
// Try to find the database in common locations
|
||||
possiblePaths := []string{
|
||||
"acc.db",
|
||||
"../acc.db",
|
||||
"../../acc.db",
|
||||
"cmd/api/acc.db",
|
||||
"../cmd/api/acc.db",
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, path := range possiblePaths {
|
||||
if fileExists(path) {
|
||||
dbPath = path
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.Fatalf("Database file not found. Please run from the project root or specify the correct path.")
|
||||
}
|
||||
}
|
||||
|
||||
// Get absolute path for database
|
||||
absDbPath, err := filepath.Abs(dbPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get absolute path for database: %v", err)
|
||||
}
|
||||
|
||||
logging.Info("Using database: %s", absDbPath)
|
||||
|
||||
// Open database connection
|
||||
db, err := gorm.Open(sqlite.Open(absDbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Info),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to database: %v", err)
|
||||
}
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get underlying sql.DB: %v", err)
|
||||
}
|
||||
defer sqlDB.Close()
|
||||
|
||||
// Run migrations in order
|
||||
logging.Info("Starting database migrations...")
|
||||
|
||||
// Migration 001: Password security upgrade (if it exists and hasn't run)
|
||||
logging.Info("Checking Migration 001: Password Security Upgrade...")
|
||||
if err := migrations.RunPasswordSecurityMigration(db); err != nil {
|
||||
log.Fatalf("Migration 001 failed: %v", err)
|
||||
}
|
||||
|
||||
// Migration 002: UUID migration
|
||||
logging.Info("Checking Migration 002: UUID Migration...")
|
||||
if err := migrations.RunUUIDMigration(db); err != nil {
|
||||
log.Fatalf("Migration 002 failed: %v", err)
|
||||
}
|
||||
|
||||
logging.Info("All migrations completed successfully!")
|
||||
|
||||
// Print summary of migration status
|
||||
printMigrationStatus(db)
|
||||
}
|
||||
|
||||
// fileExists checks if a file exists and is not a directory
|
||||
func fileExists(filename string) bool {
|
||||
info, err := os.Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return !info.IsDir()
|
||||
}
|
||||
|
||||
// printMigrationStatus prints a summary of applied migrations
|
||||
func printMigrationStatus(db *gorm.DB) {
|
||||
logging.Info("Migration Status Summary:")
|
||||
logging.Info("========================")
|
||||
|
||||
// Check if migration_records table exists
|
||||
var tableExists int
|
||||
err := db.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='migration_records'").Scan(&tableExists).Error
|
||||
if err != nil || tableExists == 0 {
|
||||
logging.Info("No migration tracking table found - this may be a fresh database")
|
||||
return
|
||||
}
|
||||
|
||||
// Get all migration records
|
||||
var records []struct {
|
||||
MigrationName string `gorm:"column:migration_name"`
|
||||
AppliedAt string `gorm:"column:applied_at"`
|
||||
Success bool `gorm:"column:success"`
|
||||
Notes string `gorm:"column:notes"`
|
||||
}
|
||||
|
||||
err = db.Table("migration_records").Find(&records).Error
|
||||
if err != nil {
|
||||
logging.Error("Failed to fetch migration records: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(records) == 0 {
|
||||
logging.Info("No migrations have been applied yet")
|
||||
return
|
||||
}
|
||||
|
||||
for _, record := range records {
|
||||
status := "✓ SUCCESS"
|
||||
if !record.Success {
|
||||
status = "✗ FAILED"
|
||||
}
|
||||
|
||||
logging.Info(" %s - %s (%s)", record.MigrationName, status, record.AppliedAt)
|
||||
if record.Notes != "" {
|
||||
logging.Info(" Notes: %s", record.Notes)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nTotal migrations applied: %d\n", len(records))
|
||||
}
|
||||
@@ -1,392 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"acc-server-manager/local/migrations"
|
||||
"acc-server-manager/local/model"
|
||||
"acc-server-manager/local/repository"
|
||||
"acc-server-manager/local/service"
|
||||
"acc-server-manager/local/utl/logging"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Initialize logging
|
||||
logging.Init(true) // Enable debug logging
|
||||
|
||||
// Create a test database
|
||||
testDbPath := "test_migrations.db"
|
||||
|
||||
// Remove existing test database if it exists
|
||||
if fileExists(testDbPath) {
|
||||
os.Remove(testDbPath)
|
||||
}
|
||||
|
||||
logging.Info("Creating test database: %s", testDbPath)
|
||||
|
||||
// Open database connection
|
||||
db, err := gorm.Open(sqlite.Open(testDbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Info),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to test database: %v", err)
|
||||
}
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get underlying sql.DB: %v", err)
|
||||
}
|
||||
defer sqlDB.Close()
|
||||
|
||||
// Create initial schema with integer IDs to simulate old database
|
||||
logging.Info("Creating initial schema with integer IDs...")
|
||||
createOldSchema(db)
|
||||
|
||||
// Insert test data with integer IDs
|
||||
logging.Info("Inserting test data...")
|
||||
insertTestData(db)
|
||||
|
||||
// Run UUID migration
|
||||
logging.Info("Running UUID migration...")
|
||||
if err := migrations.RunUUIDMigration(db); err != nil {
|
||||
log.Fatalf("UUID migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify migration worked
|
||||
logging.Info("Verifying migration results...")
|
||||
if err := verifyMigration(db); err != nil {
|
||||
log.Fatalf("Migration verification failed: %v", err)
|
||||
}
|
||||
|
||||
// Test role system
|
||||
logging.Info("Testing role system...")
|
||||
if err := testRoleSystem(db); err != nil {
|
||||
log.Fatalf("Role system test failed: %v", err)
|
||||
}
|
||||
|
||||
// Test Super Admin deletion prevention
|
||||
logging.Info("Testing Super Admin deletion prevention...")
|
||||
if err := testSuperAdminDeletionPrevention(db); err != nil {
|
||||
log.Fatalf("Super Admin deletion prevention test failed: %v", err)
|
||||
}
|
||||
|
||||
logging.Info("All tests passed successfully!")
|
||||
|
||||
// Clean up
|
||||
os.Remove(testDbPath)
|
||||
logging.Info("Test database cleaned up")
|
||||
}
|
||||
|
||||
func createOldSchema(db *gorm.DB) {
|
||||
// Create tables with integer primary keys to simulate old schema
|
||||
db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS servers (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
ip TEXT NOT NULL,
|
||||
port INTEGER NOT NULL,
|
||||
config_path TEXT NOT NULL,
|
||||
service_name TEXT NOT NULL,
|
||||
date_created DATETIME,
|
||||
from_steam_cmd BOOLEAN NOT NULL DEFAULT 1
|
||||
)
|
||||
`)
|
||||
|
||||
db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS configs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
server_id INTEGER NOT NULL,
|
||||
config_file TEXT NOT NULL,
|
||||
old_config TEXT,
|
||||
new_config TEXT,
|
||||
changed_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`)
|
||||
|
||||
db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS state_histories (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
server_id INTEGER NOT NULL,
|
||||
session TEXT,
|
||||
track TEXT,
|
||||
player_count INTEGER,
|
||||
date_created DATETIME,
|
||||
session_start DATETIME,
|
||||
session_duration_minutes INTEGER,
|
||||
session_id INTEGER NOT NULL DEFAULT 0
|
||||
)
|
||||
`)
|
||||
|
||||
db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS steam_credentials (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
username TEXT NOT NULL,
|
||||
password TEXT NOT NULL,
|
||||
date_created DATETIME,
|
||||
last_updated DATETIME
|
||||
)
|
||||
`)
|
||||
|
||||
db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS system_configs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
key TEXT,
|
||||
value TEXT,
|
||||
default_value TEXT,
|
||||
description TEXT,
|
||||
date_modified TEXT
|
||||
)
|
||||
`)
|
||||
}
|
||||
|
||||
func insertTestData(db *gorm.DB) {
|
||||
// Insert test server
|
||||
db.Exec(`
|
||||
INSERT INTO servers (name, ip, port, config_path, service_name, date_created, from_steam_cmd)
|
||||
VALUES ('Test Server', '127.0.0.1', 9600, '/test/path', 'TestService', datetime('now'), 1)
|
||||
`)
|
||||
|
||||
// Insert test config
|
||||
db.Exec(`
|
||||
INSERT INTO configs (server_id, config_file, old_config, new_config)
|
||||
VALUES (1, 'test.json', '{"old": true}', '{"new": true}')
|
||||
`)
|
||||
|
||||
// Insert test state history
|
||||
db.Exec(`
|
||||
INSERT INTO state_histories (server_id, session, track, player_count, date_created, session_duration_minutes, session_id)
|
||||
VALUES (1, 'Practice', 'monza', 5, datetime('now'), 60, 1)
|
||||
`)
|
||||
|
||||
// Insert test steam credentials
|
||||
db.Exec(`
|
||||
INSERT INTO steam_credentials (username, password, date_created, last_updated)
|
||||
VALUES ('testuser', 'testpass', datetime('now'), datetime('now'))
|
||||
`)
|
||||
|
||||
// Insert test system config
|
||||
db.Exec(`
|
||||
INSERT INTO system_configs (key, value, default_value, description, date_modified)
|
||||
VALUES ('test_key', 'test_value', 'default_value', 'Test config', datetime('now'))
|
||||
`)
|
||||
}
|
||||
|
||||
func verifyMigration(db *gorm.DB) error {
|
||||
// Check that all tables now have UUID primary keys
|
||||
|
||||
// Check servers table
|
||||
var serverID string
|
||||
err := db.Raw("SELECT id FROM servers LIMIT 1").Scan(&serverID).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query servers table: %v", err)
|
||||
}
|
||||
if _, err := uuid.Parse(serverID); err != nil {
|
||||
return fmt.Errorf("servers table ID is not a valid UUID: %s", serverID)
|
||||
}
|
||||
|
||||
// Check configs table
|
||||
var configID, configServerID string
|
||||
err = db.Raw("SELECT id, server_id FROM configs LIMIT 1").Row().Scan(&configID, &configServerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query configs table: %v", err)
|
||||
}
|
||||
if _, err := uuid.Parse(configID); err != nil {
|
||||
return fmt.Errorf("configs table ID is not a valid UUID: %s", configID)
|
||||
}
|
||||
if _, err := uuid.Parse(configServerID); err != nil {
|
||||
return fmt.Errorf("configs table server_id is not a valid UUID: %s", configServerID)
|
||||
}
|
||||
|
||||
// Check state_histories table
|
||||
var stateID, stateServerID string
|
||||
err = db.Raw("SELECT id, server_id FROM state_histories LIMIT 1").Row().Scan(&stateID, &stateServerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query state_histories table: %v", err)
|
||||
}
|
||||
if _, err := uuid.Parse(stateID); err != nil {
|
||||
return fmt.Errorf("state_histories table ID is not a valid UUID: %s", stateID)
|
||||
}
|
||||
if _, err := uuid.Parse(stateServerID); err != nil {
|
||||
return fmt.Errorf("state_histories table server_id is not a valid UUID: %s", stateServerID)
|
||||
}
|
||||
|
||||
// Check steam_credentials table
|
||||
var steamID string
|
||||
err = db.Raw("SELECT id FROM steam_credentials LIMIT 1").Scan(&steamID).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query steam_credentials table: %v", err)
|
||||
}
|
||||
if _, err := uuid.Parse(steamID); err != nil {
|
||||
return fmt.Errorf("steam_credentials table ID is not a valid UUID: %s", steamID)
|
||||
}
|
||||
|
||||
// Check system_configs table
|
||||
var systemID string
|
||||
err = db.Raw("SELECT id FROM system_configs LIMIT 1").Scan(&systemID).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query system_configs table: %v", err)
|
||||
}
|
||||
if _, err := uuid.Parse(systemID); err != nil {
|
||||
return fmt.Errorf("system_configs table ID is not a valid UUID: %s", systemID)
|
||||
}
|
||||
|
||||
logging.Info("✓ All tables successfully migrated to UUID primary keys")
|
||||
return nil
|
||||
}
|
||||
|
||||
func testRoleSystem(db *gorm.DB) error {
|
||||
// Auto-migrate the models for role system
|
||||
db.AutoMigrate(&model.Role{}, &model.Permission{}, &model.User{})
|
||||
|
||||
// Create repository and service
|
||||
repo := repository.NewMembershipRepository(db)
|
||||
service := service.NewMembershipService(repo)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Setup initial data (this should create Super Admin, Admin, and Manager roles)
|
||||
if err := service.SetupInitialData(ctx); err != nil {
|
||||
return fmt.Errorf("failed to setup initial data: %v", err)
|
||||
}
|
||||
|
||||
// Test that all three roles were created
|
||||
roles, err := service.GetAllRoles(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get roles: %v", err)
|
||||
}
|
||||
|
||||
expectedRoles := map[string]bool{
|
||||
"Super Admin": false,
|
||||
"Admin": false,
|
||||
"Manager": false,
|
||||
}
|
||||
|
||||
for _, role := range roles {
|
||||
if _, exists := expectedRoles[role.Name]; exists {
|
||||
expectedRoles[role.Name] = true
|
||||
}
|
||||
}
|
||||
|
||||
for roleName, found := range expectedRoles {
|
||||
if !found {
|
||||
return fmt.Errorf("role '%s' was not created", roleName)
|
||||
}
|
||||
}
|
||||
|
||||
// Test permissions for each role
|
||||
superAdminRole, err := repo.FindRoleByName(ctx, "Super Admin")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find Super Admin role: %v", err)
|
||||
}
|
||||
|
||||
adminRole, err := repo.FindRoleByName(ctx, "Admin")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find Admin role: %v", err)
|
||||
}
|
||||
|
||||
managerRole, err := repo.FindRoleByName(ctx, "Manager")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find Manager role: %v", err)
|
||||
}
|
||||
|
||||
// Load permissions for roles
|
||||
db.Preload("Permissions").Find(superAdminRole)
|
||||
db.Preload("Permissions").Find(adminRole)
|
||||
db.Preload("Permissions").Find(managerRole)
|
||||
|
||||
// Super Admin and Admin should have all permissions
|
||||
allPermissions := model.AllPermissions()
|
||||
if len(superAdminRole.Permissions) != len(allPermissions) {
|
||||
return fmt.Errorf("Super Admin should have all %d permissions, but has %d", len(allPermissions), len(superAdminRole.Permissions))
|
||||
}
|
||||
|
||||
if len(adminRole.Permissions) != len(allPermissions) {
|
||||
return fmt.Errorf("Admin should have all %d permissions, but has %d", len(allPermissions), len(adminRole.Permissions))
|
||||
}
|
||||
|
||||
// Manager should have limited permissions (no create/delete for membership, role, user, server)
|
||||
expectedManagerPermissions := []string{
|
||||
model.ServerView,
|
||||
model.ServerUpdate,
|
||||
model.ServerStart,
|
||||
model.ServerStop,
|
||||
model.ConfigView,
|
||||
model.ConfigUpdate,
|
||||
model.UserView,
|
||||
model.RoleView,
|
||||
model.MembershipView,
|
||||
}
|
||||
|
||||
if len(managerRole.Permissions) != len(expectedManagerPermissions) {
|
||||
return fmt.Errorf("Manager should have %d permissions, but has %d", len(expectedManagerPermissions), len(managerRole.Permissions))
|
||||
}
|
||||
|
||||
// Verify Manager doesn't have restricted permissions
|
||||
restrictedPermissions := []string{
|
||||
model.ServerCreate,
|
||||
model.ServerDelete,
|
||||
model.UserCreate,
|
||||
model.UserDelete,
|
||||
model.RoleCreate,
|
||||
model.RoleDelete,
|
||||
model.MembershipCreate,
|
||||
}
|
||||
|
||||
for _, restrictedPerm := range restrictedPermissions {
|
||||
for _, managerPerm := range managerRole.Permissions {
|
||||
if managerPerm.Name == restrictedPerm {
|
||||
return fmt.Errorf("Manager should not have permission '%s'", restrictedPerm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logging.Info("✓ Role system working correctly")
|
||||
logging.Info(" - Super Admin role: %d permissions", len(superAdminRole.Permissions))
|
||||
logging.Info(" - Admin role: %d permissions", len(adminRole.Permissions))
|
||||
logging.Info(" - Manager role: %d permissions", len(managerRole.Permissions))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testSuperAdminDeletionPrevention(db *gorm.DB) error {
|
||||
// Create repository and service
|
||||
repo := repository.NewMembershipRepository(db)
|
||||
service := service.NewMembershipService(repo)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find the default admin user (should be Super Admin)
|
||||
adminUser, err := repo.FindUserByUsername(ctx, "admin")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find admin user: %v", err)
|
||||
}
|
||||
|
||||
// Try to delete the Super Admin user (should fail)
|
||||
err = service.DeleteUser(ctx, adminUser.ID)
|
||||
if err == nil {
|
||||
return fmt.Errorf("deleting Super Admin user should have failed, but it succeeded")
|
||||
}
|
||||
|
||||
if err.Error() != "cannot delete Super Admin user" {
|
||||
return fmt.Errorf("expected 'cannot delete Super Admin user' error, got: %v", err)
|
||||
}
|
||||
|
||||
logging.Info("✓ Super Admin deletion prevention working correctly")
|
||||
return nil
|
||||
}
|
||||
|
||||
// fileExists checks if a file exists and is not a directory
|
||||
func fileExists(filename string) bool {
|
||||
info, err := os.Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return !info.IsDir()
|
||||
}
|
||||
Reference in New Issue
Block a user