fix(convex): corrigir memory leak com .collect() sem limite e adicionar otimizacoes
Problema: Convex backend consumindo 16GB+ de RAM causando OOM kills Correcoes aplicadas: - Substituido todos os .collect() por .take(LIMIT) em 27+ arquivos - Adicionado indice by_usbPolicyStatus para otimizar query de maquinas - Corrigido N+1 problem em alerts.ts usando Map lookup - Corrigido full table scan em usbPolicy.ts - Corrigido subscription leaks no frontend (tickets-view, use-ticket-categories) - Atualizado versao do Convex backend para precompiled-2025-12-04-cc6af4c Arquivos principais modificados: - convex/*.ts - limites em todas as queries .collect() - convex/schema.ts - novo indice by_usbPolicyStatus - convex/alerts.ts - N+1 fix com Map - convex/usbPolicy.ts - uso do novo indice - src/components/tickets/tickets-view.tsx - skip condicional - src/hooks/use-ticket-categories.ts - skip condicional 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
a4b46b08ba
commit
638faeb287
33 changed files with 139 additions and 128 deletions
|
|
@ -509,9 +509,9 @@ async function forEachScopedTicketByResolvedRangeChunked(
|
|||
.order("desc");
|
||||
|
||||
// Coleta tickets do chunk (o chunk ja e limitado por periodo)
|
||||
const snapshot = await query.collect();
|
||||
const snapshot = await query.take(1000);
|
||||
// Limita processamento a 1000 tickets por chunk para evitar timeout
|
||||
const limitedSnapshot = snapshot.slice(0, 1000);
|
||||
const limitedSnapshot = snapshot;
|
||||
for (const ticket of limitedSnapshot) {
|
||||
const resolvedAt = typeof ticket.resolvedAt === "number" ? ticket.resolvedAt : null;
|
||||
if (resolvedAt === null) continue;
|
||||
|
|
@ -535,11 +535,10 @@ export async function fetchOpenScopedTickets(
|
|||
// Limita a 500 tickets por status para evitar OOM
|
||||
const MAX_PER_STATUS = 500;
|
||||
for (const status of statuses) {
|
||||
const allTickets = await ctx.db
|
||||
const snapshot = await ctx.db
|
||||
.query("tickets")
|
||||
.withIndex("by_tenant_status", (q) => q.eq("tenantId", tenantId).eq("status", status))
|
||||
.collect();
|
||||
const snapshot = allTickets.slice(0, MAX_PER_STATUS);
|
||||
.take(500);
|
||||
for (const ticket of snapshot) {
|
||||
if (!OPEN_STATUSES.has(normalizeStatus(ticket.status))) continue;
|
||||
if (scopedCompanyId && ticket.companyId !== scopedCompanyId) continue;
|
||||
|
|
@ -620,7 +619,7 @@ async function fetchCategoryMap(ctx: QueryCtx, tenantId: string) {
|
|||
const categories = await ctx.db
|
||||
.query("ticketCategories")
|
||||
.withIndex("by_tenant", (q) => q.eq("tenantId", tenantId))
|
||||
.collect();
|
||||
.take(500);
|
||||
const map = new Map<string, Doc<"ticketCategories">>();
|
||||
for (const category of categories) {
|
||||
map.set(String(category._id), category);
|
||||
|
|
@ -702,7 +701,7 @@ async function fetchQueues(ctx: QueryCtx, tenantId: string) {
|
|||
return ctx.db
|
||||
.query("queues")
|
||||
.withIndex("by_tenant", (q) => q.eq("tenantId", tenantId))
|
||||
.collect();
|
||||
.take(1000);
|
||||
}
|
||||
|
||||
type CompanySummary = {
|
||||
|
|
@ -1023,7 +1022,7 @@ export async function csatOverviewHandler(
|
|||
const events = await ctx.db
|
||||
.query("ticketEvents")
|
||||
.withIndex("by_ticket", (q) => q.eq("ticketId", ticket._id))
|
||||
.collect();
|
||||
.take(1000);
|
||||
|
||||
for (const event of events) {
|
||||
if (event.type !== "CSAT_RECEIVED" && event.type !== "CSAT_RATED") continue;
|
||||
|
|
@ -1420,11 +1419,10 @@ export async function agentProductivityHandler(
|
|||
|
||||
for (const [agentId, acc] of map) {
|
||||
// Limita a 1000 sessoes por agente para evitar OOM
|
||||
const allSessions = await ctx.db
|
||||
const sessions = await ctx.db
|
||||
.query("ticketWorkSessions")
|
||||
.withIndex("by_agent", (q) => q.eq("agentId", agentId as Id<"users">))
|
||||
.collect()
|
||||
const sessions = allSessions.slice(0, 1000)
|
||||
.take(1000)
|
||||
let total = 0
|
||||
for (const s of sessions) {
|
||||
const started = s.startedAt
|
||||
|
|
@ -1481,7 +1479,7 @@ export async function ticketCategoryInsightsHandler(
|
|||
const categories = await ctx.db
|
||||
.query("ticketCategories")
|
||||
.withIndex("by_tenant", (q) => q.eq("tenantId", tenantId))
|
||||
.collect()
|
||||
.take(500)
|
||||
|
||||
const categoriesById = new Map<Id<"ticketCategories">, Doc<"ticketCategories">>()
|
||||
for (const category of categories) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue