Migra banco de dados de SQLite para PostgreSQL

- Muda provider Prisma de sqlite para postgresql
- Remove dependencias SQLite (better-sqlite3, adapter)
- Atualiza Better Auth para provider postgresql
- Simplifica prisma.ts removendo adapter SQLite
- Atualiza stack.yml para usar PostgreSQL existente com 2 replicas
- Remove logica de rebuild better-sqlite3 do start-web.sh
- Adiciona script de migracao de dados SQLite -> PostgreSQL
- Atualiza healthcheck para testar PostgreSQL via Prisma
- Habilita start-first deploy para zero-downtime

Melhoria: permite multiplas replicas e deploys sem downtime.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
esdrasrenan 2025-12-11 00:35:27 -03:00
parent fb97d9bec8
commit 33a59634e7
10 changed files with 362 additions and 223 deletions

View file

@ -0,0 +1,264 @@
#!/usr/bin/env node
/**
* Script de migracao SQLite -> PostgreSQL
* Executa com acesso ao SQLite e PostgreSQL para migrar todos os dados
*
* Uso:
* SQLITE_PATH=/path/to/db.sqlite POSTGRES_URL=postgresql://... node scripts/migrate-sqlite-to-postgres.mjs
*/
import Database from "better-sqlite3"
import pg from "pg"
const SQLITE_PATH = process.env.SQLITE_PATH || "/app/data/db.sqlite"
const POSTGRES_URL = process.env.POSTGRES_URL || process.env.DATABASE_URL
if (!POSTGRES_URL) {
console.error("ERRO: POSTGRES_URL ou DATABASE_URL e obrigatorio")
process.exit(1)
}
console.log("=".repeat(60))
console.log("MIGRACAO SQLite -> PostgreSQL")
console.log("=".repeat(60))
console.log(`SQLite: ${SQLITE_PATH}`)
console.log(`PostgreSQL: ${POSTGRES_URL.replace(/:[^:@]+@/, ":***@")}`)
console.log("")
// Ordem das tabelas respeitando foreign keys
const TABLES_ORDER = [
// Tabelas sem dependencias
"Team",
"SlaPolicy",
"Company",
// Tabelas com dependencias simples
"User",
"Queue",
"AuthUser",
// Tabelas com chave composta
"TeamMember",
// Tabelas principais
"Ticket",
// Tabelas dependentes de Ticket
"TicketEvent",
"TicketComment",
"TicketRating",
"TicketAccessToken",
// Tabelas de usuario
"NotificationPreferences",
// Tabelas de relatorios
"ReportExportSchedule",
"ReportExportRun",
// Tabelas de autenticacao
"AuthSession",
"AuthAccount",
"AuthInvite",
"AuthInviteEvent",
"AuthVerification",
]
let sqlite
let pgClient
async function connect() {
console.log("[1/4] Conectando aos bancos de dados...")
try {
sqlite = new Database(SQLITE_PATH, { readonly: true })
console.log(" SQLite: conectado")
} catch (error) {
console.error(" SQLite: ERRO -", error.message)
process.exit(1)
}
try {
pgClient = new pg.Client({ connectionString: POSTGRES_URL })
await pgClient.connect()
console.log(" PostgreSQL: conectado")
} catch (error) {
console.error(" PostgreSQL: ERRO -", error.message)
process.exit(1)
}
}
async function migrateTable(tableName) {
let rows
try {
rows = sqlite.prepare(`SELECT * FROM "${tableName}"`).all()
} catch (error) {
console.log(` ${tableName}: tabela nao existe no SQLite, pulando`)
return { table: tableName, migrated: 0, skipped: true }
}
if (rows.length === 0) {
console.log(` ${tableName}: 0 registros`)
return { table: tableName, migrated: 0 }
}
const columns = Object.keys(rows[0])
const quotedColumns = columns.map(c => `"${c}"`).join(", ")
const placeholders = columns.map((_, i) => `$${i + 1}`).join(", ")
const insertSql = `INSERT INTO "${tableName}" (${quotedColumns}) VALUES (${placeholders}) ON CONFLICT DO NOTHING`
let migrated = 0
let errors = 0
for (const row of rows) {
const values = columns.map(col => {
let val = row[col]
// Converter JSON string para objeto se necessario (PostgreSQL usa JSONB)
if (typeof val === "string") {
// Detectar campos JSON pelo conteudo
if ((val.startsWith("{") && val.endsWith("}")) || (val.startsWith("[") && val.endsWith("]"))) {
try {
val = JSON.parse(val)
} catch {
// Manter como string se nao for JSON valido
}
}
}
// SQLite usa 0/1 para boolean, PostgreSQL usa true/false
// Prisma ja trata isso, mas vamos garantir
if (val === 0 || val === 1) {
// Detectar campos boolean pelo nome
const booleanFields = ["emailVerified", "isLead", "isAvulso", "emailEnabled", "hasBranches", "privacyPolicyAccepted"]
if (booleanFields.includes(col)) {
val = val === 1
}
}
return val
})
try {
await pgClient.query(insertSql, values)
migrated++
} catch (error) {
errors++
if (errors <= 3) {
console.error(` Erro ao inserir em ${tableName}:`, error.message)
}
}
}
console.log(` ${tableName}: ${migrated}/${rows.length} registros migrados${errors > 0 ? ` (${errors} erros)` : ""}`)
return { table: tableName, migrated, total: rows.length, errors }
}
async function updateSequences() {
console.log("\n[3/4] Atualizando sequences do PostgreSQL...")
// PostgreSQL precisa atualizar sequences para auto-increment funcionar corretamente
// Mas como usamos CUIDs, isso geralmente nao e necessario
// Vamos apenas verificar se ha sequences e atualiza-las se existirem
const sequenceQuery = `
SELECT
t.relname as table_name,
a.attname as column_name,
pg_get_serial_sequence(t.relname::text, a.attname::text) as sequence_name
FROM pg_class t
JOIN pg_attribute a ON a.attrelid = t.oid
WHERE t.relkind = 'r'
AND pg_get_serial_sequence(t.relname::text, a.attname::text) IS NOT NULL
`
try {
const result = await pgClient.query(sequenceQuery)
for (const row of result.rows) {
const updateSeq = `
SELECT setval('${row.sequence_name}',
COALESCE((SELECT MAX("${row.column_name}") FROM "${row.table_name}"), 0) + 1, false)
`
try {
await pgClient.query(updateSeq)
console.log(` Sequence ${row.sequence_name} atualizada`)
} catch {
// Ignorar erros de sequence
}
}
} catch {
console.log(" Nenhuma sequence encontrada (usando CUIDs)")
}
}
async function validateMigration() {
console.log("\n[4/4] Validando migracao...")
const validation = []
for (const tableName of TABLES_ORDER) {
let sqliteCount = 0
let pgCount = 0
try {
const sqliteResult = sqlite.prepare(`SELECT COUNT(*) as count FROM "${tableName}"`).get()
sqliteCount = sqliteResult?.count || 0
} catch {
// Tabela nao existe no SQLite
continue
}
try {
const pgResult = await pgClient.query(`SELECT COUNT(*) as count FROM "${tableName}"`)
pgCount = parseInt(pgResult.rows[0]?.count || 0)
} catch {
pgCount = 0
}
const match = sqliteCount === pgCount
validation.push({ table: tableName, sqlite: sqliteCount, postgres: pgCount, match })
const status = match ? "OK" : "DIFF"
console.log(` ${tableName}: SQLite=${sqliteCount}, PostgreSQL=${pgCount} [${status}]`)
}
const allMatch = validation.every(v => v.match)
return allMatch
}
async function main() {
await connect()
console.log("\n[2/4] Migrando tabelas...")
try {
await pgClient.query("BEGIN")
for (const tableName of TABLES_ORDER) {
await migrateTable(tableName)
}
await pgClient.query("COMMIT")
console.log("\n Transacao commitada com sucesso!")
} catch (error) {
await pgClient.query("ROLLBACK")
console.error("\nERRO: Rollback executado -", error.message)
process.exit(1)
}
await updateSequences()
const valid = await validateMigration()
console.log("\n" + "=".repeat(60))
if (valid) {
console.log("MIGRACAO CONCLUIDA COM SUCESSO!")
} else {
console.log("MIGRACAO CONCLUIDA COM DIFERENCAS (verifique os logs)")
}
console.log("=".repeat(60))
sqlite.close()
await pgClient.end()
process.exit(valid ? 0 : 1)
}
main().catch(error => {
console.error("Erro fatal:", error)
process.exit(1)
})

View file

@ -9,28 +9,11 @@ cd /app
export BUN_INSTALL_CACHE_DIR="${BUN_INSTALL_CACHE_DIR:-/tmp/bun-cache}"
mkdir -p "$BUN_INSTALL_CACHE_DIR"
DB_PATH="/app/data/db.sqlite"
echo "[start-web] Using bun cache dir: $BUN_INSTALL_CACHE_DIR"
echo "[start-web] Using APP_DIR=$(pwd)"
echo "[start-web] NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-}"
echo "[start-web] NEXT_PUBLIC_CONVEX_URL=${NEXT_PUBLIC_CONVEX_URL:-}"
ensure_db_writable() {
mkdir -p "$(dirname "$DB_PATH")"
if [ ! -e "$DB_PATH" ]; then
touch "$DB_PATH" || true
chmod 660 "$DB_PATH" 2>/dev/null || true
fi
if ! touch "$DB_PATH" >/dev/null 2>&1; then
echo "[start-web] ERRO: não foi possível escrever em $DB_PATH (verifique permissões do volume /app/data)" >&2
ls -ld /app/data "$DB_PATH" >/dev/null 2>&1 || true
exit 1
fi
}
ensure_db_writable
echo "[start-web] DATABASE_URL=${DATABASE_URL:+set}"
# Ensure system deps for native modules (best-effort, idempotent)
if command -v apt-get >/dev/null 2>&1; then
@ -76,113 +59,59 @@ else
echo "[start-web] apt-get unavailable; skipping system deps install" >&2
fi
# Rebuild native better-sqlite3 bindings for the current Node version
if command -v npm >/dev/null 2>&1; then
check_better_sqlite3() {
node - <<'EOF'
const path = require("node:path")
try {
const pkgPath = require.resolve("better-sqlite3/package.json")
const pkg = require(pkgPath)
const binding = path.join(path.dirname(pkgPath), "build", "Release", "better_sqlite3.node")
require("better-sqlite3")
console.log(`[start-web] better-sqlite3 ok (v${pkg.version}) binding=${binding}`)
process.exit(0)
} catch (error) {
console.error("[start-web] better-sqlite3 load failed:", error?.message || error)
process.exit(1)
}
EOF
}
# Aguardar PostgreSQL estar pronto
wait_for_postgres() {
local max_attempts=30
local attempt=1
copy_fallback_binding() {
local temp_dir="/tmp/bsql-fallback"
rm -rf "$temp_dir"
mkdir -p "$temp_dir"
(
cd "$temp_dir" &&
npm install better-sqlite3@11.10.0 --build-from-source --no-save >/dev/null 2>&1
) || {
echo "[start-web] fallback npm install falhou"
return 1
}
local src_pkg="$temp_dir/node_modules/better-sqlite3"
local dest_pkg="/app/node_modules/.bun/better-sqlite3@11.10.0/node_modules/better-sqlite3"
mkdir -p "$dest_pkg"
cp -R "$src_pkg/"* "$dest_pkg/" || return 1
echo "[start-web] fallback: pacote better-sqlite3 copiado para .bun store"
return 0
}
echo "[start-web] Aguardando PostgreSQL..."
rebuild_and_repin_sqlite() {
echo "[start-web] rebuilding better-sqlite3 para a runtime atual"
npm rebuild better-sqlite3 --build-from-source >/dev/null 2>&1 || {
echo "[start-web] rebuild falhou; tentando fallback" >&2
copy_fallback_binding || echo "[start-web] fallback também falhou" >&2
}
node - <<'EOF'
const fs = require("node:fs")
const path = require("node:path")
try {
const pkgPath = require.resolve("better-sqlite3/package.json")
const pkgDir = path.dirname(pkgPath)
const pkg = require(pkgPath)
const built = path.join(pkgDir, "build", "Release", "better_sqlite3.node")
// Copy to bun store
const store = path.join(process.cwd(), "node_modules", ".bun", `better-sqlite3@${pkg.version}`, "node_modules", "better-sqlite3", "build", "Release", "better_sqlite3.node")
fs.mkdirSync(path.dirname(store), { recursive: true })
fs.copyFileSync(built, store)
console.log(`[start-web] better-sqlite3 (v${pkg.version}) copiado para store bun: ${store}`)
// Copy to @prisma/adapter-better-sqlite3 and @prisma/client locations
const nodeVersion = process.version.slice(1).split('.')[0] + '.' + process.version.slice(1).split('.')[1] + '.' + process.version.slice(1).split('.')[2]
const adapterPaths = [
// Adapter locations
path.join(process.cwd(), "node_modules", "@prisma", "adapter-better-sqlite3", "build", "Release", "better_sqlite3.node"),
path.join(process.cwd(), "node_modules", "@prisma", "adapter-better-sqlite3", "node_modules", "better-sqlite3", "build", "Release", "better_sqlite3.node"),
// @prisma/client locations that the adapter might be looking at
path.join(process.cwd(), "node_modules", "@prisma", "client", "build", "Release", "better_sqlite3.node"),
path.join(process.cwd(), "node_modules", "@prisma", "client", "Release", "better_sqlite3.node"),
path.join(process.cwd(), "node_modules", "@prisma", "client", "compiled", nodeVersion, "linux", "x64", "better_sqlite3.node"),
// Bun store locations
path.join(process.cwd(), "node_modules", ".bun", "@prisma-adapter-better-sqlite3@7.0.0", "node_modules", "@prisma", "adapter-better-sqlite3", "node_modules", "better-sqlite3", "build", "Release", "better_sqlite3.node"),
]
for (const dest of adapterPaths) {
try {
fs.mkdirSync(path.dirname(dest), { recursive: true })
fs.copyFileSync(built, dest)
console.log(`[start-web] binding copiado para: ${dest}`)
} catch (e) {
// Ignore path copy errors
}
}
} catch (error) {
console.error("[start-web] não foi possível copiar binding para .bun store:", error?.message || error)
}
EOF
}
if [ "${SKIP_SQLITE_REBUILD:-false}" = "true" ]; then
echo "[start-web] SKIP_SQLITE_REBUILD=true; tentando usar bindings existentes"
if ! check_better_sqlite3; then
echo "[start-web] bindings inválidos; forçando rebuild mesmo com SKIP_SQLITE_REBUILD=true"
rebuild_and_repin_sqlite
check_better_sqlite3 || {
echo "[start-web] ERRO: better-sqlite3 continua inválido após rebuild" >&2
exit 1
}
while [ $attempt -le $max_attempts ]; do
if node -e "
const url = process.env.DATABASE_URL;
if (!url) { console.error('DATABASE_URL not set'); process.exit(1); }
fetch(url.replace(/^postgresql:/, 'http:').replace(/\/[^/]+$/, '/'), { method: 'HEAD', signal: AbortSignal.timeout(2000) })
.then(() => process.exit(0))
.catch(() => process.exit(1));
" 2>/dev/null; then
echo "[start-web] PostgreSQL pronto!"
return 0
fi
else
rebuild_and_repin_sqlite
check_better_sqlite3 || {
echo "[start-web] ERRO: better-sqlite3 inválido após rebuild; tentando fallback"
copy_fallback_binding && check_better_sqlite3 || {
echo "[start-web] ERRO: better-sqlite3 continua inválido após fallback" >&2
exit 1
}
}
fi
# Fallback: tenta via psql se disponivel
if command -v psql >/dev/null 2>&1; then
if psql "$DATABASE_URL" -c "SELECT 1" >/dev/null 2>&1; then
echo "[start-web] PostgreSQL pronto (via psql)!"
return 0
fi
fi
# Fallback simples: verifica se o host responde
local pg_host=$(echo "$DATABASE_URL" | sed -E 's/.*@([^:\/]+).*/\1/')
local pg_port=$(echo "$DATABASE_URL" | sed -E 's/.*:([0-9]+)\/.*/\1/')
if timeout 2 bash -c "echo >/dev/tcp/$pg_host/$pg_port" 2>/dev/null; then
echo "[start-web] PostgreSQL acessivel na porta $pg_port!"
return 0
fi
echo "[start-web] Tentativa $attempt/$max_attempts - PostgreSQL nao disponivel"
sleep 2
attempt=$((attempt + 1))
done
echo "[start-web] AVISO: PostgreSQL nao confirmado apos $max_attempts tentativas, continuando mesmo assim..."
return 0
}
# Verificar se DATABASE_URL esta definida
if [ -z "${DATABASE_URL:-}" ]; then
echo "[start-web] ERRO: DATABASE_URL nao definida"
exit 1
fi
# Aguardar PostgreSQL em producao
if [ "${NODE_ENV:-}" = "production" ]; then
wait_for_postgres
fi
# Bun keeps its store in node_modules/.bun by default; ensure it exists and is writable