Migra banco de dados de SQLite para PostgreSQL

- Muda provider Prisma de sqlite para postgresql
- Remove dependencias SQLite (better-sqlite3, adapter)
- Atualiza Better Auth para provider postgresql
- Simplifica prisma.ts removendo adapter SQLite
- Atualiza stack.yml para usar PostgreSQL existente com 2 replicas
- Remove logica de rebuild better-sqlite3 do start-web.sh
- Adiciona script de migracao de dados SQLite -> PostgreSQL
- Atualiza healthcheck para testar PostgreSQL via Prisma
- Habilita start-first deploy para zero-downtime

Melhoria: permite multiplas replicas e deploys sem downtime.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
esdrasrenan 2025-12-11 00:35:27 -03:00
parent fb97d9bec8
commit 33a59634e7
10 changed files with 362 additions and 223 deletions

View file

@ -293,16 +293,24 @@ jobs:
- name: Swarm deploy (stack.yml) - name: Swarm deploy (stack.yml)
run: | run: |
cd "$EFFECTIVE_APP_DIR"
# Exporta variáveis do .env para substituição no stack (ex.: MACHINE_PROVISIONING_SECRET)
set -o allexport
if [ -f .env ]; then . ./.env; fi
set +o allexport
APP_DIR_STABLE="$HOME/apps/sistema" APP_DIR_STABLE="$HOME/apps/sistema"
if [ ! -d "$APP_DIR_STABLE" ]; then if [ ! -d "$APP_DIR_STABLE" ]; then
echo "ERROR: Stable APP_DIR does not exist: $APP_DIR_STABLE" >&2; exit 1 echo "ERROR: Stable APP_DIR does not exist: $APP_DIR_STABLE" >&2; exit 1
fi fi
cd "$APP_DIR_STABLE"
# Exporta variáveis do .env (do diretório de produção) para substituição no stack
# IMPORTANTE: Usar o .env do APP_DIR_STABLE, não do EFFECTIVE_APP_DIR (build temporário)
set -o allexport
if [ -f .env ]; then
echo "Loading .env from $APP_DIR_STABLE"
. ./.env
else
echo "WARNING: No .env found at $APP_DIR_STABLE - stack vars may be empty!"
fi
set +o allexport
echo "Using APP_DIR (stable)=$APP_DIR_STABLE" echo "Using APP_DIR (stable)=$APP_DIR_STABLE"
echo "NEXT_PUBLIC_CONVEX_URL=${NEXT_PUBLIC_CONVEX_URL:-<not set>}"
echo "NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-<not set>}"
APP_DIR="$APP_DIR_STABLE" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema APP_DIR="$APP_DIR_STABLE" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema
# Removido: "Ensure Convex service envs" - as env vars já são passadas pelo stack.yml # Removido: "Ensure Convex service envs" - as env vars já são passadas pelo stack.yml

View file

@ -33,7 +33,6 @@
"@hookform/resolvers": "5.2.2", "@hookform/resolvers": "5.2.2",
"@noble/hashes": "2.0.1", "@noble/hashes": "2.0.1",
"@paper-design/shaders-react": "0.0.68", "@paper-design/shaders-react": "0.0.68",
"@prisma/adapter-better-sqlite3": "^7.0.0",
"@prisma/client": "^7.0.0", "@prisma/client": "^7.0.0",
"@radix-ui/react-accordion": "^1.2.12", "@radix-ui/react-accordion": "^1.2.12",
"@radix-ui/react-avatar": "^1.1.10", "@radix-ui/react-avatar": "^1.1.10",
@ -62,7 +61,6 @@
"@tiptap/starter-kit": "3.13.0", "@tiptap/starter-kit": "3.13.0",
"@tiptap/suggestion": "3.13.0", "@tiptap/suggestion": "3.13.0",
"better-auth": "^1.3.26", "better-auth": "^1.3.26",
"better-sqlite3": "12.5.0",
"class-variance-authority": "^0.7.1", "class-variance-authority": "^0.7.1",
"clsx": "^2.1.1", "clsx": "^2.1.1",
"convex": "^1.29.2", "convex": "^1.29.2",

View file

@ -1,3 +1,3 @@
# Please do not edit this file manually # Please do not edit this file manually
# It should be added in your version-control system (e.g., Git) # It should be added in your version-control system (e.g., Git)
provider = "sqlite" provider = "postgresql"

View file

@ -6,7 +6,8 @@ generator client {
} }
datasource db { datasource db {
provider = "sqlite" provider = "postgresql"
url = env("DATABASE_URL")
} }
enum UserRole { enum UserRole {

View file

@ -0,0 +1,264 @@
#!/usr/bin/env node
/**
* Script de migracao SQLite -> PostgreSQL
* Executa com acesso ao SQLite e PostgreSQL para migrar todos os dados
*
* Uso:
* SQLITE_PATH=/path/to/db.sqlite POSTGRES_URL=postgresql://... node scripts/migrate-sqlite-to-postgres.mjs
*/
import Database from "better-sqlite3"
import pg from "pg"
const SQLITE_PATH = process.env.SQLITE_PATH || "/app/data/db.sqlite"
const POSTGRES_URL = process.env.POSTGRES_URL || process.env.DATABASE_URL
if (!POSTGRES_URL) {
console.error("ERRO: POSTGRES_URL ou DATABASE_URL e obrigatorio")
process.exit(1)
}
console.log("=".repeat(60))
console.log("MIGRACAO SQLite -> PostgreSQL")
console.log("=".repeat(60))
console.log(`SQLite: ${SQLITE_PATH}`)
console.log(`PostgreSQL: ${POSTGRES_URL.replace(/:[^:@]+@/, ":***@")}`)
console.log("")
// Ordem das tabelas respeitando foreign keys
const TABLES_ORDER = [
// Tabelas sem dependencias
"Team",
"SlaPolicy",
"Company",
// Tabelas com dependencias simples
"User",
"Queue",
"AuthUser",
// Tabelas com chave composta
"TeamMember",
// Tabelas principais
"Ticket",
// Tabelas dependentes de Ticket
"TicketEvent",
"TicketComment",
"TicketRating",
"TicketAccessToken",
// Tabelas de usuario
"NotificationPreferences",
// Tabelas de relatorios
"ReportExportSchedule",
"ReportExportRun",
// Tabelas de autenticacao
"AuthSession",
"AuthAccount",
"AuthInvite",
"AuthInviteEvent",
"AuthVerification",
]
let sqlite
let pgClient
async function connect() {
console.log("[1/4] Conectando aos bancos de dados...")
try {
sqlite = new Database(SQLITE_PATH, { readonly: true })
console.log(" SQLite: conectado")
} catch (error) {
console.error(" SQLite: ERRO -", error.message)
process.exit(1)
}
try {
pgClient = new pg.Client({ connectionString: POSTGRES_URL })
await pgClient.connect()
console.log(" PostgreSQL: conectado")
} catch (error) {
console.error(" PostgreSQL: ERRO -", error.message)
process.exit(1)
}
}
async function migrateTable(tableName) {
let rows
try {
rows = sqlite.prepare(`SELECT * FROM "${tableName}"`).all()
} catch (error) {
console.log(` ${tableName}: tabela nao existe no SQLite, pulando`)
return { table: tableName, migrated: 0, skipped: true }
}
if (rows.length === 0) {
console.log(` ${tableName}: 0 registros`)
return { table: tableName, migrated: 0 }
}
const columns = Object.keys(rows[0])
const quotedColumns = columns.map(c => `"${c}"`).join(", ")
const placeholders = columns.map((_, i) => `$${i + 1}`).join(", ")
const insertSql = `INSERT INTO "${tableName}" (${quotedColumns}) VALUES (${placeholders}) ON CONFLICT DO NOTHING`
let migrated = 0
let errors = 0
for (const row of rows) {
const values = columns.map(col => {
let val = row[col]
// Converter JSON string para objeto se necessario (PostgreSQL usa JSONB)
if (typeof val === "string") {
// Detectar campos JSON pelo conteudo
if ((val.startsWith("{") && val.endsWith("}")) || (val.startsWith("[") && val.endsWith("]"))) {
try {
val = JSON.parse(val)
} catch {
// Manter como string se nao for JSON valido
}
}
}
// SQLite usa 0/1 para boolean, PostgreSQL usa true/false
// Prisma ja trata isso, mas vamos garantir
if (val === 0 || val === 1) {
// Detectar campos boolean pelo nome
const booleanFields = ["emailVerified", "isLead", "isAvulso", "emailEnabled", "hasBranches", "privacyPolicyAccepted"]
if (booleanFields.includes(col)) {
val = val === 1
}
}
return val
})
try {
await pgClient.query(insertSql, values)
migrated++
} catch (error) {
errors++
if (errors <= 3) {
console.error(` Erro ao inserir em ${tableName}:`, error.message)
}
}
}
console.log(` ${tableName}: ${migrated}/${rows.length} registros migrados${errors > 0 ? ` (${errors} erros)` : ""}`)
return { table: tableName, migrated, total: rows.length, errors }
}
async function updateSequences() {
console.log("\n[3/4] Atualizando sequences do PostgreSQL...")
// PostgreSQL precisa atualizar sequences para auto-increment funcionar corretamente
// Mas como usamos CUIDs, isso geralmente nao e necessario
// Vamos apenas verificar se ha sequences e atualiza-las se existirem
const sequenceQuery = `
SELECT
t.relname as table_name,
a.attname as column_name,
pg_get_serial_sequence(t.relname::text, a.attname::text) as sequence_name
FROM pg_class t
JOIN pg_attribute a ON a.attrelid = t.oid
WHERE t.relkind = 'r'
AND pg_get_serial_sequence(t.relname::text, a.attname::text) IS NOT NULL
`
try {
const result = await pgClient.query(sequenceQuery)
for (const row of result.rows) {
const updateSeq = `
SELECT setval('${row.sequence_name}',
COALESCE((SELECT MAX("${row.column_name}") FROM "${row.table_name}"), 0) + 1, false)
`
try {
await pgClient.query(updateSeq)
console.log(` Sequence ${row.sequence_name} atualizada`)
} catch {
// Ignorar erros de sequence
}
}
} catch {
console.log(" Nenhuma sequence encontrada (usando CUIDs)")
}
}
async function validateMigration() {
console.log("\n[4/4] Validando migracao...")
const validation = []
for (const tableName of TABLES_ORDER) {
let sqliteCount = 0
let pgCount = 0
try {
const sqliteResult = sqlite.prepare(`SELECT COUNT(*) as count FROM "${tableName}"`).get()
sqliteCount = sqliteResult?.count || 0
} catch {
// Tabela nao existe no SQLite
continue
}
try {
const pgResult = await pgClient.query(`SELECT COUNT(*) as count FROM "${tableName}"`)
pgCount = parseInt(pgResult.rows[0]?.count || 0)
} catch {
pgCount = 0
}
const match = sqliteCount === pgCount
validation.push({ table: tableName, sqlite: sqliteCount, postgres: pgCount, match })
const status = match ? "OK" : "DIFF"
console.log(` ${tableName}: SQLite=${sqliteCount}, PostgreSQL=${pgCount} [${status}]`)
}
const allMatch = validation.every(v => v.match)
return allMatch
}
async function main() {
await connect()
console.log("\n[2/4] Migrando tabelas...")
try {
await pgClient.query("BEGIN")
for (const tableName of TABLES_ORDER) {
await migrateTable(tableName)
}
await pgClient.query("COMMIT")
console.log("\n Transacao commitada com sucesso!")
} catch (error) {
await pgClient.query("ROLLBACK")
console.error("\nERRO: Rollback executado -", error.message)
process.exit(1)
}
await updateSequences()
const valid = await validateMigration()
console.log("\n" + "=".repeat(60))
if (valid) {
console.log("MIGRACAO CONCLUIDA COM SUCESSO!")
} else {
console.log("MIGRACAO CONCLUIDA COM DIFERENCAS (verifique os logs)")
}
console.log("=".repeat(60))
sqlite.close()
await pgClient.end()
process.exit(valid ? 0 : 1)
}
main().catch(error => {
console.error("Erro fatal:", error)
process.exit(1)
})

View file

@ -9,28 +9,11 @@ cd /app
export BUN_INSTALL_CACHE_DIR="${BUN_INSTALL_CACHE_DIR:-/tmp/bun-cache}" export BUN_INSTALL_CACHE_DIR="${BUN_INSTALL_CACHE_DIR:-/tmp/bun-cache}"
mkdir -p "$BUN_INSTALL_CACHE_DIR" mkdir -p "$BUN_INSTALL_CACHE_DIR"
DB_PATH="/app/data/db.sqlite"
echo "[start-web] Using bun cache dir: $BUN_INSTALL_CACHE_DIR" echo "[start-web] Using bun cache dir: $BUN_INSTALL_CACHE_DIR"
echo "[start-web] Using APP_DIR=$(pwd)" echo "[start-web] Using APP_DIR=$(pwd)"
echo "[start-web] NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-}" echo "[start-web] NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-}"
echo "[start-web] NEXT_PUBLIC_CONVEX_URL=${NEXT_PUBLIC_CONVEX_URL:-}" echo "[start-web] NEXT_PUBLIC_CONVEX_URL=${NEXT_PUBLIC_CONVEX_URL:-}"
echo "[start-web] DATABASE_URL=${DATABASE_URL:+set}"
ensure_db_writable() {
mkdir -p "$(dirname "$DB_PATH")"
if [ ! -e "$DB_PATH" ]; then
touch "$DB_PATH" || true
chmod 660 "$DB_PATH" 2>/dev/null || true
fi
if ! touch "$DB_PATH" >/dev/null 2>&1; then
echo "[start-web] ERRO: não foi possível escrever em $DB_PATH (verifique permissões do volume /app/data)" >&2
ls -ld /app/data "$DB_PATH" >/dev/null 2>&1 || true
exit 1
fi
}
ensure_db_writable
# Ensure system deps for native modules (best-effort, idempotent) # Ensure system deps for native modules (best-effort, idempotent)
if command -v apt-get >/dev/null 2>&1; then if command -v apt-get >/dev/null 2>&1; then
@ -76,113 +59,59 @@ else
echo "[start-web] apt-get unavailable; skipping system deps install" >&2 echo "[start-web] apt-get unavailable; skipping system deps install" >&2
fi fi
# Rebuild native better-sqlite3 bindings for the current Node version # Aguardar PostgreSQL estar pronto
if command -v npm >/dev/null 2>&1; then wait_for_postgres() {
check_better_sqlite3() { local max_attempts=30
node - <<'EOF' local attempt=1
const path = require("node:path")
try {
const pkgPath = require.resolve("better-sqlite3/package.json")
const pkg = require(pkgPath)
const binding = path.join(path.dirname(pkgPath), "build", "Release", "better_sqlite3.node")
require("better-sqlite3")
console.log(`[start-web] better-sqlite3 ok (v${pkg.version}) binding=${binding}`)
process.exit(0)
} catch (error) {
console.error("[start-web] better-sqlite3 load failed:", error?.message || error)
process.exit(1)
}
EOF
}
copy_fallback_binding() { echo "[start-web] Aguardando PostgreSQL..."
local temp_dir="/tmp/bsql-fallback"
rm -rf "$temp_dir" while [ $attempt -le $max_attempts ]; do
mkdir -p "$temp_dir" if node -e "
( const url = process.env.DATABASE_URL;
cd "$temp_dir" && if (!url) { console.error('DATABASE_URL not set'); process.exit(1); }
npm install better-sqlite3@11.10.0 --build-from-source --no-save >/dev/null 2>&1 fetch(url.replace(/^postgresql:/, 'http:').replace(/\/[^/]+$/, '/'), { method: 'HEAD', signal: AbortSignal.timeout(2000) })
) || { .then(() => process.exit(0))
echo "[start-web] fallback npm install falhou" .catch(() => process.exit(1));
return 1 " 2>/dev/null; then
} echo "[start-web] PostgreSQL pronto!"
local src_pkg="$temp_dir/node_modules/better-sqlite3"
local dest_pkg="/app/node_modules/.bun/better-sqlite3@11.10.0/node_modules/better-sqlite3"
mkdir -p "$dest_pkg"
cp -R "$src_pkg/"* "$dest_pkg/" || return 1
echo "[start-web] fallback: pacote better-sqlite3 copiado para .bun store"
return 0 return 0
} fi
rebuild_and_repin_sqlite() { # Fallback: tenta via psql se disponivel
echo "[start-web] rebuilding better-sqlite3 para a runtime atual" if command -v psql >/dev/null 2>&1; then
npm rebuild better-sqlite3 --build-from-source >/dev/null 2>&1 || { if psql "$DATABASE_URL" -c "SELECT 1" >/dev/null 2>&1; then
echo "[start-web] rebuild falhou; tentando fallback" >&2 echo "[start-web] PostgreSQL pronto (via psql)!"
copy_fallback_binding || echo "[start-web] fallback também falhou" >&2 return 0
} fi
node - <<'EOF' fi
const fs = require("node:fs")
const path = require("node:path")
try {
const pkgPath = require.resolve("better-sqlite3/package.json")
const pkgDir = path.dirname(pkgPath)
const pkg = require(pkgPath)
const built = path.join(pkgDir, "build", "Release", "better_sqlite3.node")
// Copy to bun store # Fallback simples: verifica se o host responde
const store = path.join(process.cwd(), "node_modules", ".bun", `better-sqlite3@${pkg.version}`, "node_modules", "better-sqlite3", "build", "Release", "better_sqlite3.node") local pg_host=$(echo "$DATABASE_URL" | sed -E 's/.*@([^:\/]+).*/\1/')
fs.mkdirSync(path.dirname(store), { recursive: true }) local pg_port=$(echo "$DATABASE_URL" | sed -E 's/.*:([0-9]+)\/.*/\1/')
fs.copyFileSync(built, store) if timeout 2 bash -c "echo >/dev/tcp/$pg_host/$pg_port" 2>/dev/null; then
console.log(`[start-web] better-sqlite3 (v${pkg.version}) copiado para store bun: ${store}`) echo "[start-web] PostgreSQL acessivel na porta $pg_port!"
return 0
fi
// Copy to @prisma/adapter-better-sqlite3 and @prisma/client locations echo "[start-web] Tentativa $attempt/$max_attempts - PostgreSQL nao disponivel"
const nodeVersion = process.version.slice(1).split('.')[0] + '.' + process.version.slice(1).split('.')[1] + '.' + process.version.slice(1).split('.')[2] sleep 2
const adapterPaths = [ attempt=$((attempt + 1))
// Adapter locations done
path.join(process.cwd(), "node_modules", "@prisma", "adapter-better-sqlite3", "build", "Release", "better_sqlite3.node"),
path.join(process.cwd(), "node_modules", "@prisma", "adapter-better-sqlite3", "node_modules", "better-sqlite3", "build", "Release", "better_sqlite3.node"), echo "[start-web] AVISO: PostgreSQL nao confirmado apos $max_attempts tentativas, continuando mesmo assim..."
// @prisma/client locations that the adapter might be looking at return 0
path.join(process.cwd(), "node_modules", "@prisma", "client", "build", "Release", "better_sqlite3.node"),
path.join(process.cwd(), "node_modules", "@prisma", "client", "Release", "better_sqlite3.node"),
path.join(process.cwd(), "node_modules", "@prisma", "client", "compiled", nodeVersion, "linux", "x64", "better_sqlite3.node"),
// Bun store locations
path.join(process.cwd(), "node_modules", ".bun", "@prisma-adapter-better-sqlite3@7.0.0", "node_modules", "@prisma", "adapter-better-sqlite3", "node_modules", "better-sqlite3", "build", "Release", "better_sqlite3.node"),
]
for (const dest of adapterPaths) {
try {
fs.mkdirSync(path.dirname(dest), { recursive: true })
fs.copyFileSync(built, dest)
console.log(`[start-web] binding copiado para: ${dest}`)
} catch (e) {
// Ignore path copy errors
}
}
} catch (error) {
console.error("[start-web] não foi possível copiar binding para .bun store:", error?.message || error)
} }
EOF
}
if [ "${SKIP_SQLITE_REBUILD:-false}" = "true" ]; then # Verificar se DATABASE_URL esta definida
echo "[start-web] SKIP_SQLITE_REBUILD=true; tentando usar bindings existentes" if [ -z "${DATABASE_URL:-}" ]; then
if ! check_better_sqlite3; then echo "[start-web] ERRO: DATABASE_URL nao definida"
echo "[start-web] bindings inválidos; forçando rebuild mesmo com SKIP_SQLITE_REBUILD=true"
rebuild_and_repin_sqlite
check_better_sqlite3 || {
echo "[start-web] ERRO: better-sqlite3 continua inválido após rebuild" >&2
exit 1 exit 1
} fi
fi
else # Aguardar PostgreSQL em producao
rebuild_and_repin_sqlite if [ "${NODE_ENV:-}" = "production" ]; then
check_better_sqlite3 || { wait_for_postgres
echo "[start-web] ERRO: better-sqlite3 inválido após rebuild; tentando fallback"
copy_fallback_binding && check_better_sqlite3 || {
echo "[start-web] ERRO: better-sqlite3 continua inválido após fallback" >&2
exit 1
}
}
fi
fi fi
# Bun keeps its store in node_modules/.bun by default; ensure it exists and is writable # Bun keeps its store in node_modules/.bun by default; ensure it exists and is writable

View file

@ -18,27 +18,13 @@ export async function GET() {
}, },
} }
// Test SQLite binding // Test PostgreSQL connection via Prisma
try {
const Database = (await import("better-sqlite3")).default
const testDb = new Database(":memory:")
testDb.exec("SELECT 1")
testDb.close()
diagnostics.sqlite = { status: "ok", message: "better-sqlite3 binding works" }
} catch (error) {
diagnostics.sqlite = {
status: "error",
message: error instanceof Error ? error.message : String(error),
}
}
// Test Prisma connection
try { try {
const { prisma } = await import("@/lib/prisma") const { prisma } = await import("@/lib/prisma")
await prisma.$queryRaw`SELECT 1` await prisma.$queryRaw`SELECT 1`
diagnostics.prisma = { status: "ok", message: "Prisma connection works" } diagnostics.postgres = { status: "ok", message: "PostgreSQL connection works" }
} catch (error) { } catch (error) {
diagnostics.prisma = { diagnostics.postgres = {
status: "error", status: "error",
message: error instanceof Error ? error.message : String(error), message: error instanceof Error ? error.message : String(error),
} }

View file

@ -26,7 +26,7 @@ export const auth = betterAuth({
) )
), ),
database: prismaAdapter(prisma, { database: prismaAdapter(prisma, {
provider: "sqlite", provider: "postgresql",
}), }),
user: { user: {
// Use the exact Prisma client property names (lower camel case) // Use the exact Prisma client property names (lower camel case)

View file

@ -1,6 +1,3 @@
import path from "node:path"
import { PrismaBetterSqlite3 } from "@prisma/adapter-better-sqlite3"
import { PrismaClient } from "@/generated/prisma/client" import { PrismaClient } from "@/generated/prisma/client"
type PrismaClientInstance = InstanceType<typeof PrismaClient> type PrismaClientInstance = InstanceType<typeof PrismaClient>
@ -17,69 +14,18 @@ declare global {
var prisma: PrismaClientInstance | undefined var prisma: PrismaClientInstance | undefined
} }
// Resolve a robust DATABASE_URL for all runtimes (prod/dev) // PostgreSQL connection - requires DATABASE_URL environment variable
const PROJECT_ROOT = process.cwd() const databaseUrl = process.env.DATABASE_URL
const PRISMA_DIR = path.join(PROJECT_ROOT, "prisma")
function resolveFileUrl(url: string) { if (!databaseUrl) {
if (!url.startsWith("file:")) { throw new Error("DATABASE_URL environment variable is required for PostgreSQL connection")
return url
}
const filePath = url.slice("file:".length)
if (filePath.startsWith("//")) {
return url
}
if (path.isAbsolute(filePath)) {
return `file:${path.normalize(filePath)}`
}
const normalized = path.normalize(filePath)
const prismaPrefix = `prisma${path.sep}`
const relativeToPrisma = normalized.startsWith(prismaPrefix)
? normalized.slice(prismaPrefix.length)
: normalized
const absolutePath = path.resolve(PRISMA_DIR, relativeToPrisma)
if (!absolutePath.startsWith(PROJECT_ROOT)) {
throw new Error(`DATABASE_URL path escapes project directory: ${filePath}`)
}
return `file:${absolutePath}`
} }
function normalizeDatasourceUrl(envUrl?: string | null) { export const prisma = global.prisma ?? new PrismaClient()
const trimmed = envUrl?.trim()
if (trimmed) {
return resolveFileUrl(trimmed)
}
if (process.env.NODE_ENV === "production") {
return "file:/app/data/db.sqlite"
}
return resolveFileUrl("file:./db.dev.sqlite")
}
const resolvedDatabaseUrl = normalizeDatasourceUrl(process.env.DATABASE_URL)
process.env.DATABASE_URL = resolvedDatabaseUrl
const sqliteAdapter = new PrismaBetterSqlite3({
url: resolvedDatabaseUrl,
})
export const prisma = global.prisma ?? new PrismaClient({ adapter: sqliteAdapter })
if (process.env.NODE_ENV !== "production") { if (process.env.NODE_ENV !== "production") {
global.prisma = prisma global.prisma = prisma
} console.log("[prisma] Using PostgreSQL database")
if (process.env.NODE_ENV !== "production") {
// Helps detect mismatched DB path during dev server bootstrap
console.log("[prisma] Using database:", resolvedDatabaseUrl)
} }
export * from "@/generated/prisma/client" export * from "@/generated/prisma/client"

View file

@ -11,7 +11,7 @@ services:
bash -lc "bash /app/scripts/start-web.sh" bash -lc "bash /app/scripts/start-web.sh"
volumes: volumes:
- ${APP_DIR:-/srv/apps/sistema}:/app - ${APP_DIR:-/srv/apps/sistema}:/app
- sistema_db:/app/data # Removido: sistema_db (SQLite) - agora usa PostgreSQL
environment: environment:
NODE_ENV: "production" NODE_ENV: "production"
BUN_INSTALL_CACHE_DIR: "/tmp/bun-cache" BUN_INSTALL_CACHE_DIR: "/tmp/bun-cache"
@ -29,35 +29,39 @@ services:
BETTER_AUTH_SECRET: "${BETTER_AUTH_SECRET}" BETTER_AUTH_SECRET: "${BETTER_AUTH_SECRET}"
REPORTS_CRON_SECRET: "${REPORTS_CRON_SECRET}" REPORTS_CRON_SECRET: "${REPORTS_CRON_SECRET}"
REPORTS_CRON_BASE_URL: "${REPORTS_CRON_BASE_URL}" REPORTS_CRON_BASE_URL: "${REPORTS_CRON_BASE_URL}"
# Mantém o SQLite fora do repositório # PostgreSQL connection string (usa o servico 'postgres' existente na rede traefik_public)
DATABASE_URL: "file:/app/data/db.sqlite" DATABASE_URL: "postgresql://${POSTGRES_USER:-sistema}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB:-sistema_chamados}"
# Evita apt-get na inicialização porque a imagem já vem com toolchain pronta # Evita apt-get na inicialização porque a imagem já vem com toolchain pronta
SKIP_APT_BOOTSTRAP: "true" SKIP_APT_BOOTSTRAP: "true"
# Sempre revalida/rebuild better-sqlite3 para a runtime atual
SKIP_SQLITE_REBUILD: "false"
# Usado para forçar novo rollout a cada deploy (setado pelo CI) # Usado para forçar novo rollout a cada deploy (setado pelo CI)
RELEASE_SHA: "${RELEASE_SHA:-dev}" RELEASE_SHA: "${RELEASE_SHA:-dev}"
# Tempo para graceful shutdown antes do SIGKILL
stop_grace_period: 10s
deploy: deploy:
mode: replicated mode: replicated
# IMPORTANTE: SQLite nao suporta multiplas conexoes de escrita simultaneas. # PostgreSQL suporta múltiplas conexões - agora podemos ter 2 réplicas!
# Manter sempre 1 replica para evitar "attempt to write a readonly database". replicas: 2
replicas: 1
update_config: update_config:
parallelism: 1 parallelism: 1
# start-first evita downtime: sobe o novo task antes de parar o anterior # PostgreSQL permite start-first para zero-downtime deploys
order: start-first order: start-first
failure_action: rollback failure_action: rollback
# Delay entre updates para garantir que o healthcheck passa # Delay entre updates para dar tempo ao container iniciar
delay: 10s delay: 5s
# Monitor: tempo que o Swarm espera após o deploy para verificar estabilidade # Monitor: tempo que o Swarm espera apos o deploy para verificar estabilidade
monitor: 30s monitor: 30s
rollback_config: rollback_config:
order: start-first order: stop-first
resources: resources:
limits: limits:
memory: "2G" memory: "2G"
restart_policy: restart_policy:
condition: any condition: any
# Delay antes de tentar restart em caso de falha
delay: 5s
# Maximo de restarts em uma janela de tempo
max_attempts: 3
window: 120s
placement: placement:
constraints: constraints:
- node.role == manager - node.role == manager
@ -81,6 +85,9 @@ services:
# O novo container só entra em serviço APÓS passar no healthcheck # O novo container só entra em serviço APÓS passar no healthcheck
start_period: 180s start_period: 180s
# PostgreSQL: usando o servico 'postgres' existente na rede traefik_public
# Nao e necessario definir aqui pois ja existe um servico global
convex_backend: convex_backend:
# Versao estavel - crons movidos para /api/cron/* chamados via crontab do Linux # Versao estavel - crons movidos para /api/cron/* chamados via crontab do Linux
image: ghcr.io/get-convex/convex-backend:precompiled-2025-12-04-cc6af4c image: ghcr.io/get-convex/convex-backend:precompiled-2025-12-04-cc6af4c
@ -159,7 +166,7 @@ services:
- traefik_public - traefik_public
volumes: volumes:
sistema_db: sistema_db: # Mantido para rollback caso necessário (SQLite)
convex_data: convex_data:
networks: networks: