feat: SSE para chat desktop, rate limiting, retry, testes e atualizacao de stack

- Implementa Server-Sent Events (SSE) para chat no desktop com fallback HTTP
- Adiciona rate limiting nas APIs de chat (poll, messages, sessions)
- Adiciona retry com backoff exponencial para mutations
- Cria testes para modulo liveChat (20 testes)
- Corrige testes de SMTP (unit tests para extractEnvelopeAddress)
- Adiciona indice by_status_lastActivity para cron de sessoes inativas
- Atualiza stack: Bun 1.3.4, React 19, recharts 3, noble/hashes 2, etc

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
esdrasrenan 2025-12-07 16:29:18 -03:00
parent 0e0bd9a49c
commit d01c37522f
19 changed files with 1465 additions and 443 deletions

View file

@ -4,6 +4,8 @@ import { api } from "@/convex/_generated/api"
import type { Id } from "@/convex/_generated/dataModel"
import { createCorsPreflight, jsonWithCors } from "@/server/cors"
import { createConvexClient, ConvexConfigurationError } from "@/server/convex-client"
import { checkRateLimit, RATE_LIMITS, rateLimitHeaders } from "@/server/rate-limit"
import { withRetry } from "@/server/retry"
const getMessagesSchema = z.object({
machineToken: z.string().min(1),
@ -58,6 +60,26 @@ export async function POST(request: Request) {
}
const action = raw.action ?? "list"
const machineToken = raw.machineToken as string | undefined
// Rate limiting por token de maquina
if (machineToken) {
const rateLimit = checkRateLimit(
`chat-messages:${machineToken}`,
RATE_LIMITS.CHAT_MESSAGES.maxRequests,
RATE_LIMITS.CHAT_MESSAGES.windowMs
)
if (!rateLimit.allowed) {
return jsonWithCors(
{ error: "Rate limit exceeded", retryAfterMs: rateLimit.retryAfterMs },
429,
origin,
CORS_METHODS,
rateLimitHeaders(rateLimit)
)
}
}
if (action === "list") {
let payload
@ -101,19 +123,24 @@ export async function POST(request: Request) {
}
try {
const result = await client.mutation(api.liveChat.postMachineMessage, {
machineToken: payload.machineToken,
ticketId: payload.ticketId as Id<"tickets">,
body: payload.body,
attachments: payload.attachments as
| Array<{
storageId: Id<"_storage">
name: string
size?: number
type?: string
}>
| undefined,
})
// Retry com backoff exponencial para falhas transientes
const result = await withRetry(
() =>
client.mutation(api.liveChat.postMachineMessage, {
machineToken: payload.machineToken,
ticketId: payload.ticketId as Id<"tickets">,
body: payload.body,
attachments: payload.attachments as
| Array<{
storageId: Id<"_storage">
name: string
size?: number
type?: string
}>
| undefined,
}),
{ maxRetries: 3, baseDelayMs: 100, maxDelayMs: 2000 }
)
return jsonWithCors(result, 200, origin, CORS_METHODS)
} catch (error) {
console.error("[machines.chat.messages] Falha ao enviar mensagem", error)

View file

@ -3,6 +3,7 @@ import { z } from "zod"
import { api } from "@/convex/_generated/api"
import { createCorsPreflight, jsonWithCors } from "@/server/cors"
import { createConvexClient, ConvexConfigurationError } from "@/server/convex-client"
import { checkRateLimit, RATE_LIMITS, rateLimitHeaders } from "@/server/rate-limit"
const pollSchema = z.object({
machineToken: z.string().min(1),
@ -43,12 +44,29 @@ export async function POST(request: Request) {
)
}
// Rate limiting por token de maquina
const rateLimit = checkRateLimit(
`chat-poll:${payload.machineToken}`,
RATE_LIMITS.CHAT_POLL.maxRequests,
RATE_LIMITS.CHAT_POLL.windowMs
)
if (!rateLimit.allowed) {
return jsonWithCors(
{ error: "Rate limit exceeded", retryAfterMs: rateLimit.retryAfterMs },
429,
origin,
CORS_METHODS,
rateLimitHeaders(rateLimit)
)
}
try {
const result = await client.query(api.liveChat.checkMachineUpdates, {
machineToken: payload.machineToken,
lastCheckedAt: payload.lastCheckedAt,
})
return jsonWithCors(result, 200, origin, CORS_METHODS)
return jsonWithCors(result, 200, origin, CORS_METHODS, rateLimitHeaders(rateLimit))
} catch (error) {
console.error("[machines.chat.poll] Falha ao verificar atualizacoes", error)
const details = error instanceof Error ? error.message : String(error)

View file

@ -3,6 +3,7 @@ import { z } from "zod"
import { api } from "@/convex/_generated/api"
import { createCorsPreflight, jsonWithCors } from "@/server/cors"
import { createConvexClient, ConvexConfigurationError } from "@/server/convex-client"
import { checkRateLimit, RATE_LIMITS, rateLimitHeaders } from "@/server/rate-limit"
const sessionsSchema = z.object({
machineToken: z.string().min(1),
@ -42,11 +43,28 @@ export async function POST(request: Request) {
)
}
// Rate limiting por token de maquina
const rateLimit = checkRateLimit(
`chat-sessions:${payload.machineToken}`,
RATE_LIMITS.CHAT_SESSIONS.maxRequests,
RATE_LIMITS.CHAT_SESSIONS.windowMs
)
if (!rateLimit.allowed) {
return jsonWithCors(
{ error: "Rate limit exceeded", retryAfterMs: rateLimit.retryAfterMs },
429,
origin,
CORS_METHODS,
rateLimitHeaders(rateLimit)
)
}
try {
const sessions = await client.query(api.liveChat.listMachineSessions, {
machineToken: payload.machineToken,
})
return jsonWithCors({ sessions }, 200, origin, CORS_METHODS)
return jsonWithCors({ sessions }, 200, origin, CORS_METHODS, rateLimitHeaders(rateLimit))
} catch (error) {
console.error("[machines.chat.sessions] Falha ao listar sessoes", error)
const details = error instanceof Error ? error.message : String(error)

View file

@ -0,0 +1,167 @@
import { api } from "@/convex/_generated/api"
import { createConvexClient, ConvexConfigurationError } from "@/server/convex-client"
import { resolveCorsOrigin } from "@/server/cors"
export const runtime = "nodejs"
export const dynamic = "force-dynamic"
// GET /api/machines/chat/stream?token=xxx
// Server-Sent Events endpoint para atualizacoes de chat em tempo real
export async function GET(request: Request) {
const origin = request.headers.get("origin")
const resolvedOrigin = resolveCorsOrigin(origin)
// Extrair token da query string
const url = new URL(request.url)
const token = url.searchParams.get("token")
if (!token) {
return new Response("Missing token", {
status: 400,
headers: {
"Access-Control-Allow-Origin": resolvedOrigin,
"Access-Control-Allow-Credentials": resolvedOrigin !== "*" ? "true" : "false",
},
})
}
let client
try {
client = createConvexClient()
} catch (error) {
if (error instanceof ConvexConfigurationError) {
return new Response(error.message, {
status: 500,
headers: {
"Access-Control-Allow-Origin": resolvedOrigin,
"Access-Control-Allow-Credentials": resolvedOrigin !== "*" ? "true" : "false",
},
})
}
throw error
}
// Validar token antes de iniciar stream
try {
await client.query(api.liveChat.checkMachineUpdates, { machineToken: token })
} catch (error) {
const message = error instanceof Error ? error.message : "Token invalido"
return new Response(message, {
status: 401,
headers: {
"Access-Control-Allow-Origin": resolvedOrigin,
"Access-Control-Allow-Credentials": resolvedOrigin !== "*" ? "true" : "false",
},
})
}
const encoder = new TextEncoder()
const stream = new ReadableStream({
async start(controller) {
let isAborted = false
let previousState: string | null = null
const sendEvent = (event: string, data: unknown) => {
if (isAborted) return
try {
controller.enqueue(encoder.encode(`event: ${event}\n`))
controller.enqueue(encoder.encode(`data: ${JSON.stringify(data)}\n\n`))
} catch {
// Stream fechado
isAborted = true
}
}
// Heartbeat a cada 30s para manter conexao viva
const heartbeatInterval = setInterval(() => {
if (isAborted) {
clearInterval(heartbeatInterval)
return
}
sendEvent("heartbeat", { ts: Date.now() })
}, 30_000)
// Poll interno a cada 2s e push via SSE
const pollInterval = setInterval(async () => {
if (isAborted) {
clearInterval(pollInterval)
return
}
try {
const result = await client.query(api.liveChat.checkMachineUpdates, {
machineToken: token,
})
// Criar hash do estado para detectar mudancas
const currentState = JSON.stringify({
hasActiveSessions: result.hasActiveSessions,
totalUnread: result.totalUnread,
sessions: result.sessions,
})
// Enviar update apenas se houver mudancas
if (currentState !== previousState) {
sendEvent("update", {
...result,
ts: Date.now(),
})
previousState = currentState
}
} catch (error) {
console.error("[SSE] Poll error:", error)
// Enviar erro e fechar conexao
sendEvent("error", { message: "Poll failed" })
isAborted = true
clearInterval(pollInterval)
clearInterval(heartbeatInterval)
controller.close()
}
}, 2_000)
// Enviar evento inicial de conexao
sendEvent("connected", { ts: Date.now() })
// Cleanup quando conexao for abortada
request.signal.addEventListener("abort", () => {
isAborted = true
clearInterval(heartbeatInterval)
clearInterval(pollInterval)
try {
controller.close()
} catch {
// Ja fechado
}
})
},
})
return new Response(stream, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
"X-Accel-Buffering": "no", // Desabilita buffering no nginx
"Access-Control-Allow-Origin": resolvedOrigin,
"Access-Control-Allow-Credentials": resolvedOrigin !== "*" ? "true" : "false",
},
})
}
// OPTIONS para CORS preflight
export async function OPTIONS(request: Request) {
const origin = request.headers.get("origin")
const resolvedOrigin = resolveCorsOrigin(origin)
return new Response(null, {
status: 204,
headers: {
"Access-Control-Allow-Origin": resolvedOrigin,
"Access-Control-Allow-Methods": "GET, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type, Authorization",
"Access-Control-Allow-Credentials": resolvedOrigin !== "*" ? "true" : "false",
"Access-Control-Max-Age": "86400",
},
})
}

View file

@ -36,7 +36,18 @@ export function createCorsPreflight(origin: string | null, methods = "POST, OPTI
return applyCorsHeaders(response, origin, methods)
}
export function jsonWithCors<T>(data: T, init: number | ResponseInit, origin: string | null, methods = "POST, OPTIONS") {
export function jsonWithCors<T>(
data: T,
init: number | ResponseInit,
origin: string | null,
methods = "POST, OPTIONS",
extraHeaders?: Record<string, string>
) {
const response = NextResponse.json(data, typeof init === "number" ? { status: init } : init)
if (extraHeaders) {
for (const [key, value] of Object.entries(extraHeaders)) {
response.headers.set(key, value)
}
}
return applyCorsHeaders(response, origin, methods)
}

105
src/server/rate-limit.ts Normal file
View file

@ -0,0 +1,105 @@
/**
* Rate Limiting simples em memoria para APIs de maquina.
* Adequado para VPS single-node. Para escalar horizontalmente,
* considerar usar Redis ou outro store distribuido.
*/
type RateLimitEntry = {
count: number
resetAt: number
}
// Store em memoria - limpo automaticamente
const store = new Map<string, RateLimitEntry>()
export type RateLimitResult = {
allowed: boolean
remaining: number
resetAt: number
retryAfterMs: number
}
/**
* Verifica se uma requisicao deve ser permitida baseado no rate limit.
*
* @param key - Identificador unico (ex: `chat-poll:${token}`)
* @param maxRequests - Numero maximo de requisicoes permitidas na janela
* @param windowMs - Tamanho da janela em milissegundos
* @returns Resultado com status e informacoes de limite
*/
export function checkRateLimit(
key: string,
maxRequests: number,
windowMs: number
): RateLimitResult {
const now = Date.now()
const entry = store.get(key)
// Se nao existe entrada ou expirou, criar nova
if (!entry || entry.resetAt <= now) {
const resetAt = now + windowMs
store.set(key, { count: 1, resetAt })
return {
allowed: true,
remaining: maxRequests - 1,
resetAt,
retryAfterMs: 0,
}
}
// Se atingiu o limite
if (entry.count >= maxRequests) {
return {
allowed: false,
remaining: 0,
resetAt: entry.resetAt,
retryAfterMs: entry.resetAt - now,
}
}
// Incrementar contador
entry.count++
return {
allowed: true,
remaining: maxRequests - entry.count,
resetAt: entry.resetAt,
retryAfterMs: 0,
}
}
/**
* Limites pre-definidos para APIs de maquina
*/
export const RATE_LIMITS = {
// Polling: 60 req/min (permite polling a cada 1s)
CHAT_POLL: { maxRequests: 60, windowMs: 60_000 },
// Mensagens: 30 req/min
CHAT_MESSAGES: { maxRequests: 30, windowMs: 60_000 },
// Sessoes: 30 req/min
CHAT_SESSIONS: { maxRequests: 30, windowMs: 60_000 },
// Upload: 10 req/min
CHAT_UPLOAD: { maxRequests: 10, windowMs: 60_000 },
} as const
/**
* Gera headers de rate limit para a resposta HTTP
*/
export function rateLimitHeaders(result: RateLimitResult): Record<string, string> {
return {
"X-RateLimit-Remaining": String(result.remaining),
"X-RateLimit-Reset": String(Math.ceil(result.resetAt / 1000)),
...(result.allowed ? {} : { "Retry-After": String(Math.ceil(result.retryAfterMs / 1000)) }),
}
}
// Limpar entradas expiradas a cada 60 segundos
if (typeof setInterval !== "undefined") {
setInterval(() => {
const now = Date.now()
for (const [key, entry] of store) {
if (entry.resetAt <= now) {
store.delete(key)
}
}
}, 60_000)
}

84
src/server/retry.ts Normal file
View file

@ -0,0 +1,84 @@
/**
* Retry com backoff exponencial para operacoes transientes.
* Util para mutations do Convex que podem falhar temporariamente.
*/
export type RetryOptions = {
/** Numero maximo de tentativas (default: 3) */
maxRetries?: number
/** Delay base em ms (default: 100) */
baseDelayMs?: number
/** Delay maximo em ms (default: 2000) */
maxDelayMs?: number
/** Funcao para determinar se erro e retryable (default: true para todos exceto validacao) */
isRetryable?: (error: unknown) => boolean
}
const DEFAULT_OPTIONS: Required<RetryOptions> = {
maxRetries: 3,
baseDelayMs: 100,
maxDelayMs: 2000,
isRetryable: (error: unknown) => {
// Nao retry em erros de validacao
if (error instanceof Error) {
const msg = error.message.toLowerCase()
if (
msg.includes("invalido") ||
msg.includes("invalid") ||
msg.includes("not found") ||
msg.includes("unauthorized") ||
msg.includes("forbidden")
) {
return false
}
}
return true
},
}
/**
* Executa uma funcao com retry e backoff exponencial.
*
* @example
* ```ts
* const result = await withRetry(
* () => client.mutation(api.liveChat.postMachineMessage, { ... }),
* { maxRetries: 3, baseDelayMs: 100 }
* )
* ```
*/
export async function withRetry<T>(fn: () => Promise<T>, options: RetryOptions = {}): Promise<T> {
const opts = { ...DEFAULT_OPTIONS, ...options }
let lastError: unknown
for (let attempt = 0; attempt <= opts.maxRetries; attempt++) {
try {
return await fn()
} catch (error) {
lastError = error
// Nao retry se erro nao for retryable
if (!opts.isRetryable(error)) {
throw error
}
// Ultima tentativa - nao esperar, apenas lancar
if (attempt >= opts.maxRetries) {
break
}
// Calcular delay com backoff exponencial + jitter
const exponentialDelay = opts.baseDelayMs * Math.pow(2, attempt)
const jitter = Math.random() * opts.baseDelayMs
const delay = Math.min(exponentialDelay + jitter, opts.maxDelayMs)
await sleep(delay)
}
}
throw lastError
}
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms))
}