chore: sync staging

This commit is contained in:
Esdras Renan 2025-11-10 01:57:45 -03:00
parent c5ddd54a3e
commit 561b19cf66
610 changed files with 105285 additions and 1206 deletions

View file

@ -23,6 +23,7 @@ import type * as deviceFields from "../deviceFields.js";
import type * as devices from "../devices.js";
import type * as fields from "../fields.js";
import type * as files from "../files.js";
import type * as incidents from "../incidents.js";
import type * as invites from "../invites.js";
import type * as machines from "../machines.js";
import type * as metrics from "../metrics.js";
@ -70,6 +71,7 @@ declare const fullApi: ApiFromModules<{
devices: typeof devices;
fields: typeof fields;
files: typeof files;
incidents: typeof incidents;
invites: typeof invites;
machines: typeof machines;
metrics: typeof metrics;

View file

@ -225,6 +225,8 @@ export const list = query({
slug: category.slug,
description: category.description,
order: category.order,
createdAt: category.createdAt,
updatedAt: category.updatedAt,
secondary: subcategories
.filter((item) => item.categoryId === category._id)
.sort((a, b) => a.order - b.order)
@ -233,6 +235,9 @@ export const list = query({
name: item.name,
slug: item.slug,
order: item.order,
categoryId: String(item.categoryId),
createdAt: item.createdAt,
updatedAt: item.updatedAt,
})),
}))
},

View file

@ -1,5 +1,13 @@
import { cronJobs } from "convex/server"
import { api } from "./_generated/api"
const crons = cronJobs()
crons.interval(
"report-export-runner",
{ minutes: 15 },
api.reports.triggerScheduledExports,
{}
)
export default crons

178
convex/incidents.ts Normal file
View file

@ -0,0 +1,178 @@
import { ConvexError, v } from "convex/values"
import { mutation, query } from "./_generated/server"
import type { Id } from "./_generated/dataModel"
import { requireStaff } from "./rbac"
const DEFAULT_STATUS = "investigating"
function timelineId() {
if (typeof crypto !== "undefined" && typeof crypto.randomUUID === "function") {
return crypto.randomUUID()
}
return `${Date.now()}-${Math.random().toString(36).slice(2, 10)}`
}
export const list = query({
args: { tenantId: v.string(), viewerId: v.id("users") },
handler: async (ctx, { tenantId, viewerId }) => {
await requireStaff(ctx, viewerId, tenantId)
const incidents = await ctx.db
.query("incidents")
.withIndex("by_tenant_updated", (q) => q.eq("tenantId", tenantId))
.order("desc")
.collect()
return incidents
},
})
export const createIncident = mutation({
args: {
tenantId: v.string(),
actorId: v.id("users"),
title: v.string(),
severity: v.string(),
impactSummary: v.optional(v.string()),
affectedQueues: v.optional(v.array(v.string())),
initialUpdate: v.optional(v.string()),
},
handler: async (ctx, { tenantId, actorId, title, severity, impactSummary, affectedQueues, initialUpdate }) => {
const viewer = await requireStaff(ctx, actorId, tenantId)
const normalizedTitle = title.trim()
if (normalizedTitle.length < 3) {
throw new ConvexError("Informe um título válido para o incidente")
}
const now = Date.now()
const timelineEntry = {
id: timelineId(),
authorId: actorId,
authorName: viewer.user.name ?? viewer.user.email ?? "Equipe",
message: initialUpdate?.trim().length ? initialUpdate.trim() : "Incidente registrado.",
type: "created",
createdAt: now,
}
const id = await ctx.db.insert("incidents", {
tenantId,
title: normalizedTitle,
status: DEFAULT_STATUS,
severity,
impactSummary: impactSummary?.trim() || undefined,
affectedQueues: affectedQueues ?? [],
ownerId: actorId,
ownerName: viewer.user.name ?? undefined,
ownerEmail: viewer.user.email ?? undefined,
startedAt: now,
updatedAt: now,
resolvedAt: undefined,
timeline: [timelineEntry],
})
return id
},
})
export const updateIncidentStatus = mutation({
args: {
tenantId: v.string(),
actorId: v.id("users"),
incidentId: v.id("incidents"),
status: v.string(),
},
handler: async (ctx, { tenantId, actorId, incidentId, status }) => {
const viewer = await requireStaff(ctx, actorId, tenantId)
const incident = await ctx.db.get(incidentId)
if (!incident || incident.tenantId !== tenantId) {
throw new ConvexError("Incidente não encontrado")
}
const now = Date.now()
const timeline = [
...(incident.timeline ?? []),
{
id: timelineId(),
authorId: actorId,
authorName: viewer.user.name ?? viewer.user.email ?? "Equipe",
message: `Status atualizado para ${status}`,
type: "status",
createdAt: now,
},
]
await ctx.db.patch(incidentId, {
status,
updatedAt: now,
resolvedAt: status === "resolved" ? now : incident.resolvedAt ?? undefined,
timeline,
})
},
})
export const bulkUpdateIncidentStatus = mutation({
args: {
tenantId: v.string(),
actorId: v.id("users"),
incidentIds: v.array(v.id("incidents")),
status: v.string(),
},
handler: async (ctx, { tenantId, actorId, incidentIds, status }) => {
const viewer = await requireStaff(ctx, actorId, tenantId)
const now = Date.now()
for (const incidentId of incidentIds) {
const incident = await ctx.db.get(incidentId)
if (!incident || incident.tenantId !== tenantId) continue
const timeline = [
...(incident.timeline ?? []),
{
id: timelineId(),
authorId: actorId,
authorName: viewer.user.name ?? viewer.user.email ?? "Equipe",
message: `Status atualizado em massa para ${status}`,
type: "status",
createdAt: now,
},
]
await ctx.db.patch(incidentId, {
status,
updatedAt: now,
resolvedAt: status === "resolved" ? now : incident.resolvedAt ?? undefined,
timeline,
})
}
},
})
export const addIncidentUpdate = mutation({
args: {
tenantId: v.string(),
actorId: v.id("users"),
incidentId: v.id("incidents"),
message: v.string(),
status: v.optional(v.string()),
},
handler: async (ctx, { tenantId, actorId, incidentId, message, status }) => {
const viewer = await requireStaff(ctx, actorId, tenantId)
const incident = await ctx.db.get(incidentId)
if (!incident || incident.tenantId !== tenantId) {
throw new ConvexError("Incidente não encontrado")
}
const trimmed = message.trim()
if (trimmed.length < 3) {
throw new ConvexError("Descreva a atualização do incidente")
}
const now = Date.now()
const timeline = [
...(incident.timeline ?? []),
{
id: timelineId(),
authorId: actorId,
authorName: viewer.user.name ?? viewer.user.email ?? "Equipe",
message: trimmed,
type: "update",
createdAt: now,
},
]
await ctx.db.patch(incidentId, {
timeline,
status: status ?? incident.status,
updatedAt: now,
resolvedAt: status === "resolved" ? now : incident.resolvedAt ?? undefined,
})
},
})

View file

@ -1,4 +1,4 @@
import { query } from "./_generated/server";
import { action, query } from "./_generated/server";
import type { QueryCtx } from "./_generated/server";
import { ConvexError, v } from "convex/values";
import type { Doc, Id } from "./_generated/dataModel";
@ -503,6 +503,41 @@ export const slaOverview = query({
handler: slaOverviewHandler,
});
export const triggerScheduledExports = action({
args: {
tenantId: v.optional(v.string()),
},
handler: async (_ctx, args) => {
const secret = process.env.REPORTS_CRON_SECRET
const baseUrl =
process.env.REPORTS_CRON_BASE_URL ??
process.env.NEXT_PUBLIC_APP_URL ??
process.env.BETTER_AUTH_URL
if (!secret || !baseUrl) {
console.warn("[reports] cron skip: missing REPORTS_CRON_SECRET or base URL")
return { skipped: true }
}
const endpoint = `${baseUrl.replace(/\/$/, "")}/api/reports/schedules/run`
const response = await fetch(endpoint, {
method: "POST",
headers: {
Authorization: `Bearer ${secret}`,
"Content-Type": "application/json",
},
body: JSON.stringify({ tenantId: args.tenantId }),
})
if (!response.ok) {
const detail = await response.text().catch(() => response.statusText)
throw new ConvexError(`Falha ao disparar agendamentos: ${response.status} ${detail}`)
}
return response.json()
},
})
export async function csatOverviewHandler(
ctx: QueryCtx,
{ tenantId, viewerId, range, companyId }: { tenantId: string; viewerId: Id<"users">; range?: string; companyId?: Id<"companies"> }
@ -716,6 +751,101 @@ export const backlogOverview = query({
handler: backlogOverviewHandler,
});
type QueueTrendPoint = { date: string; opened: number; resolved: number }
type QueueTrendEntry = {
id: string
name: string
openedTotal: number
resolvedTotal: number
series: Map<string, QueueTrendPoint>
}
export async function queueLoadTrendHandler(
ctx: QueryCtx,
{
tenantId,
viewerId,
range,
limit,
}: { tenantId: string; viewerId: Id<"users">; range?: string; limit?: number }
) {
const viewer = await requireStaff(ctx, viewerId, tenantId)
const days = range === "90d" ? 90 : range === "30d" ? 30 : 14
const end = new Date()
end.setUTCHours(0, 0, 0, 0)
const endMs = end.getTime() + ONE_DAY_MS
const startMs = endMs - days * ONE_DAY_MS
const tickets = await fetchScopedTickets(ctx, tenantId, viewer)
const queues = await fetchQueues(ctx, tenantId)
const queueNames = new Map<string, string>()
queues.forEach((queue) => queueNames.set(String(queue._id), queue.name))
queueNames.set("unassigned", "Sem fila")
const dayKeys: string[] = []
for (let i = days - 1; i >= 0; i--) {
const key = formatDateKey(endMs - (i + 1) * ONE_DAY_MS)
dayKeys.push(key)
}
const stats = new Map<string, QueueTrendEntry>()
const ensureEntry = (queueId: string) => {
if (!stats.has(queueId)) {
const series = new Map<string, QueueTrendPoint>()
dayKeys.forEach((key) => {
series.set(key, { date: key, opened: 0, resolved: 0 })
})
stats.set(queueId, {
id: queueId,
name: queueNames.get(queueId) ?? "Sem fila",
openedTotal: 0,
resolvedTotal: 0,
series,
})
}
return stats.get(queueId)!
}
for (const ticket of tickets) {
const queueId = ticket.queueId ? String(ticket.queueId) : "unassigned"
if (ticket.createdAt >= startMs && ticket.createdAt < endMs) {
const entry = ensureEntry(queueId)
const bucket = entry.series.get(formatDateKey(ticket.createdAt))
if (bucket) {
bucket.opened += 1
}
entry.openedTotal += 1
}
if (typeof ticket.resolvedAt === "number" && ticket.resolvedAt >= startMs && ticket.resolvedAt < endMs) {
const entry = ensureEntry(queueId)
const bucket = entry.series.get(formatDateKey(ticket.resolvedAt))
if (bucket) {
bucket.resolved += 1
}
entry.resolvedTotal += 1
}
}
const maxEntries = Math.max(1, Math.min(limit ?? 3, 6))
const queuesTrend = Array.from(stats.values())
.sort((a, b) => b.openedTotal - a.openedTotal)
.slice(0, maxEntries)
.map((entry) => ({
id: entry.id,
name: entry.name,
openedTotal: entry.openedTotal,
resolvedTotal: entry.resolvedTotal,
series: dayKeys.map((key) => entry.series.get(key)!),
}))
return { rangeDays: days, queues: queuesTrend }
}
export const queueLoadTrend = query({
args: { tenantId: v.string(), viewerId: v.id("users"), range: v.optional(v.string()), limit: v.optional(v.number()) },
handler: queueLoadTrendHandler,
})
// Touch to ensure CI convex_deploy runs and that agentProductivity is deployed
export async function agentProductivityHandler(
ctx: QueryCtx,

View file

@ -436,6 +436,34 @@ export default defineSchema({
.index("by_ticket_agent", ["ticketId", "agentId"])
.index("by_agent", ["agentId"]),
incidents: defineTable({
tenantId: v.string(),
title: v.string(),
status: v.string(),
severity: v.string(),
impactSummary: v.optional(v.string()),
affectedQueues: v.array(v.string()),
ownerId: v.optional(v.id("users")),
ownerName: v.optional(v.string()),
ownerEmail: v.optional(v.string()),
startedAt: v.number(),
updatedAt: v.number(),
resolvedAt: v.optional(v.number()),
timeline: v.array(
v.object({
id: v.string(),
authorId: v.id("users"),
authorName: v.optional(v.string()),
message: v.string(),
type: v.optional(v.string()),
createdAt: v.number(),
})
),
})
.index("by_tenant_status", ["tenantId", "status"])
.index("by_tenant_updated", ["tenantId", "updatedAt"])
.index("by_tenant", ["tenantId"]),
ticketCategories: defineTable({
tenantId: v.string(),
name: v.string(),

View file

@ -12,6 +12,7 @@ const eslintConfig = [
"apps/desktop/dist/**",
"apps/desktop/src-tauri/target/**",
"nova-calendar-main/**",
"referência/**",
"next-env.d.ts",
"convex/_generated/**",
],

View file

@ -209,6 +209,52 @@ model Ticket {
@@index([tenantId, companyId])
}
model ReportExportSchedule {
id String @id @default(cuid())
tenantId String
name String
reportKeys Json
range String @default("30d")
companyId String?
companyName String?
format String @default("xlsx")
frequency String
dayOfWeek Int?
dayOfMonth Int?
hour Int @default(8)
minute Int @default(0)
timezone String @default("America/Sao_Paulo")
recipients Json
status String @default("ACTIVE")
lastRunAt DateTime?
nextRunAt DateTime?
createdBy String
updatedBy String?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
runs ReportExportRun[]
@@index([tenantId, status])
@@index([tenantId, nextRunAt])
}
model ReportExportRun {
id String @id @default(cuid())
tenantId String
scheduleId String
status String @default("PENDING")
startedAt DateTime @default(now())
completedAt DateTime?
error String?
artifacts Json?
schedule ReportExportSchedule @relation(fields: [scheduleId], references: [id], onDelete: Cascade)
@@index([tenantId, status])
@@index([tenantId, scheduleId])
}
model TicketEvent {
id String @id @default(cuid())
ticketId String

View file

@ -0,0 +1,28 @@
NODE_ENV=development
# Public app URL
NEXT_PUBLIC_APP_URL=http://localhost:3000
# Better Auth
BETTER_AUTH_URL=http://localhost:3000
BETTER_AUTH_SECRET=change-me-in-prod
# Convex (dev server URL)
NEXT_PUBLIC_CONVEX_URL=http://127.0.0.1:3210
# SQLite database (local dev)
DATABASE_URL=file:./prisma/db.dev.sqlite
# Optional SMTP (dev)
# SMTP_ADDRESS=localhost
# SMTP_PORT=1025
# SMTP_TLS=false
# SMTP_USERNAME=
# SMTP_PASSWORD=
# SMTP_AUTHENTICATION=login
# SMTP_ENABLE_STARTTLS_AUTO=false
# MAILER_SENDER_EMAIL=no-reply@example.com
# Dev-only bypass to simplify local testing (do NOT enable in prod)
# DEV_BYPASS_AUTH=0
# NEXT_PUBLIC_DEV_BYPASS_AUTH=0

View file

@ -0,0 +1,533 @@
name: CI/CD Web + Desktop
on:
push:
branches: [ main ]
tags:
- 'v*.*.*'
workflow_dispatch:
inputs:
force_web_deploy:
description: 'Forçar deploy do Web (ignorar filtro)?'
required: false
default: 'false'
force_convex_deploy:
description: 'Forçar deploy do Convex (ignorar filtro)?'
required: false
default: 'false'
env:
APP_DIR: /srv/apps/sistema
VPS_UPDATES_DIR: /var/www/updates
RUN_MACHINE_SMOKE: ${{ vars.RUN_MACHINE_SMOKE || secrets.RUN_MACHINE_SMOKE || 'false' }}
jobs:
changes:
name: Detect changes
runs-on: ubuntu-latest
outputs:
convex: ${{ steps.filter.outputs.convex }}
web: ${{ steps.filter.outputs.web }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Paths filter
id: filter
uses: dorny/paths-filter@v3
with:
filters: |
convex:
- 'convex/**'
web:
- 'src/**'
- 'public/**'
- 'prisma/**'
- 'next.config.ts'
- 'package.json'
- 'pnpm-lock.yaml'
- 'tsconfig.json'
- 'middleware.ts'
- 'stack.yml'
deploy:
name: Deploy (VPS Linux)
needs: changes
# Executa em qualquer push na main (independente do filtro) ou quando disparado manualmente
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
runs-on: [ self-hosted, linux, vps ]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
# Use a web-specific build dir to avoid clashes with convex job
FALLBACK_DIR="$HOME/apps/web.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.20.0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: 1.3.1
- name: Verify Bun runtime
run: bun --version
- name: Permissions diagnostic (server paths)
run: |
set +e
echo "== Basic context =="
whoami || true
id || true
groups || true
umask || true
echo "HOME=$HOME"
echo "APP_DIR(default)=${APP_DIR:-/srv/apps/sistema}"
echo "EFFECTIVE_APP_DIR=$EFFECTIVE_APP_DIR"
echo "\n== Permissions check =="
check_path() {
P="$1"
echo "-- $P"
if [ -e "$P" ]; then
stat -c '%A %U:%G %n' "$P" 2>/dev/null || ls -ld "$P" || true
echo -n "WRITABLE? "; [ -w "$P" ] && echo yes || echo no
if command -v namei >/dev/null 2>&1; then
namei -l "$P" || true
fi
TMP="$P/.permtest.$$"
(echo test > "$TMP" 2>/dev/null && echo "CREATE_FILE: ok" && rm -f "$TMP") || echo "CREATE_FILE: failed"
else
echo "(missing)"
fi
}
check_path "/srv/apps/sistema"
check_path "/srv/apps/sistema/src/app/machines/handshake"
check_path "/srv/apps/sistema/apps/desktop/node_modules"
check_path "/srv/apps/sistema/node_modules"
check_path "$EFFECTIVE_APP_DIR"
check_path "$EFFECTIVE_APP_DIR/node_modules"
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
# Excluir .env apenas quando copiando para o diretório padrão (/srv) para preservar segredos locais
EXCLUDE_ENV="--exclude '.env*' --exclude 'apps/desktop/.env*' --exclude 'convex/.env*'"
if [ "$EFFECTIVE_APP_DIR" != "${APP_DIR:-/srv/apps/sistema}" ]; then
EXCLUDE_ENV=""
fi
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--filter='protect .env' \
--filter='protect .env*' \
--filter='protect apps/desktop/.env*' \
--filter='protect convex/.env*' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
$EXCLUDE_ENV \
./ "$EFFECTIVE_APP_DIR"/
- name: Acquire Convex admin key
id: key
run: |
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -z "$CID" ]; then echo "No convex container"; exit 1; fi
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
- name: Copy production .env if present
run: |
DEFAULT_DIR="${APP_DIR:-/srv/apps/sistema}"
if [ "$EFFECTIVE_APP_DIR" != "$DEFAULT_DIR" ] && [ -f "$DEFAULT_DIR/.env" ]; then
echo "Copying production .env from $DEFAULT_DIR to $EFFECTIVE_APP_DIR"
cp -f "$DEFAULT_DIR/.env" "$EFFECTIVE_APP_DIR/.env"
fi
- name: Prune workspace for server-only build
run: |
cd "$EFFECTIVE_APP_DIR"
# Keep only root (web) as a package in this effective workspace
printf "packages:\n - .\n\nignoredBuiltDependencies:\n - '@prisma/client'\n - '@prisma/engines'\n - '@tailwindcss/oxide'\n - esbuild\n - prisma\n - sharp\n - unrs-resolver\n" > pnpm-workspace.yaml
- name: Ensure Next.js cache directory exists and is writable
run: |
cd "$EFFECTIVE_APP_DIR"
mkdir -p .next/cache
chmod -R u+rwX .next || true
- name: Cache Next.js build cache (.next/cache)
uses: actions/cache@v4
with:
path: ${{ env.EFFECTIVE_APP_DIR }}/.next/cache
key: ${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-${{ hashFiles('src/**/*.ts', 'src/**/*.tsx', 'src/**/*.js', 'src/**/*.jsx', 'next.config.ts') }}
restore-keys: |
${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-
- name: Install and build (Next.js)
run: |
cd "$EFFECTIVE_APP_DIR"
bun install --frozen-lockfile --filter '!appsdesktop'
bun run prisma:generate
bun run build:bun
- name: Publish build to stable APP_DIR directory
run: |
set -e
DEST="$HOME/apps/sistema"
mkdir -p "$DEST"
mkdir -p "$DEST/.next/static"
# One-time fix for old root-owned files (esp. .pnpm-store) left by previous containers
docker run --rm -v "$DEST":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true' || true
# Preserve previously published static assets to keep stale chunks available for clients mid-navigation
if [ -d "$EFFECTIVE_APP_DIR/.next/static" ]; then
rsync -a \
"$EFFECTIVE_APP_DIR/.next/static/" "$DEST/.next/static/"
fi
# Publish new build; exclude .pnpm-store to avoid Permission denied on old entries
rsync -a --delete \
--chown=1000:1000 \
--exclude '.pnpm-store' --exclude '.pnpm-store/**' \
--exclude '.next/static' \
"$EFFECTIVE_APP_DIR"/ "$DEST"/
echo "Published build to: $DEST"
- name: Swarm deploy (stack.yml)
run: |
cd "$EFFECTIVE_APP_DIR"
# Exporta variáveis do .env para substituição no stack (ex.: MACHINE_PROVISIONING_SECRET)
set -o allexport
if [ -f .env ]; then . ./.env; fi
set +o allexport
APP_DIR_STABLE="$HOME/apps/sistema"
if [ ! -d "$APP_DIR_STABLE" ]; then
echo "ERROR: Stable APP_DIR does not exist: $APP_DIR_STABLE" >&2; exit 1
fi
echo "Using APP_DIR (stable)=$APP_DIR_STABLE"
APP_DIR="$APP_DIR_STABLE" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema
- name: Ensure Convex service envs and restart
run: |
cd "$EFFECTIVE_APP_DIR"
set -o allexport
if [ -f .env ]; then . ./.env; fi
set +o allexport
echo "Ensuring Convex envs on service: sistema_convex_backend"
if [ -n "${MACHINE_PROVISIONING_SECRET:-}" ]; then
docker service update --env-add MACHINE_PROVISIONING_SECRET="${MACHINE_PROVISIONING_SECRET}" sistema_convex_backend || true
fi
if [ -n "${MACHINE_TOKEN_TTL_MS:-}" ]; then
docker service update --env-add MACHINE_TOKEN_TTL_MS="${MACHINE_TOKEN_TTL_MS}" sistema_convex_backend || true
fi
if [ -n "${FLEET_SYNC_SECRET:-}" ]; then
docker service update --env-add FLEET_SYNC_SECRET="${FLEET_SYNC_SECRET}" sistema_convex_backend || true
fi
echo "Current envs:"
docker service inspect sistema_convex_backend --format '{{range .Spec.TaskTemplate.ContainerSpec.Env}}{{println .}}{{end}}' || true
echo "Forcing service restart..."
docker service update --force sistema_convex_backend || true
- name: Smoke test — register + heartbeat
run: |
set -e
if [ "${RUN_MACHINE_SMOKE:-false}" != "true" ]; then
echo "RUN_MACHINE_SMOKE != true — pulando smoke test"; exit 0
fi
# Load MACHINE_PROVISIONING_SECRET from production .env on the host
if [ -f /srv/apps/sistema/.env ]; then
set -o allexport
. /srv/apps/sistema/.env
set +o allexport
fi
if [ -z "${MACHINE_PROVISIONING_SECRET:-}" ]; then
echo "MACHINE_PROVISIONING_SECRET ausente — pulando smoke test"; exit 0
fi
HOSTNAME_TEST="ci-smoke-$(date +%s)"
BODY='{"provisioningSecret":"'"$MACHINE_PROVISIONING_SECRET"'","tenantId":"tenant-atlas","hostname":"'"$HOSTNAME_TEST"'","os":{"name":"Linux","version":"6.1.0","architecture":"x86_64"},"macAddresses":["AA:BB:CC:DD:EE:FF"],"serialNumbers":[],"metadata":{"inventory":{"cpu":"i7","ramGb":16}},"registeredBy":"ci-smoke"}'
HTTP=$(curl -sS -o resp.json -w "%{http_code}" -H 'Content-Type: application/json' -d "$BODY" https://tickets.esdrasrenan.com.br/api/machines/register || true)
echo "Register HTTP=$HTTP"
if [ "$HTTP" != "201" ]; then
echo "Register failed:"; tail -c 600 resp.json || true; exit 1; fi
TOKEN=$(node -e 'try{const j=require("fs").readFileSync("resp.json","utf8");process.stdout.write(JSON.parse(j).machineToken||"");}catch(e){process.stdout.write("")}' )
if [ -z "$TOKEN" ]; then echo "Missing token in register response"; exit 1; fi
HB=$(curl -sS -o /dev/null -w "%{http_code}" -H 'Content-Type: application/json' -d '{"machineToken":"'"$TOKEN"'","status":"online","metrics":{"cpuPct":5,"memFreePct":70}}' https://tickets.esdrasrenan.com.br/api/machines/heartbeat || true)
echo "Heartbeat HTTP=$HB"
if [ "$HB" != "200" ]; then echo "Heartbeat failed"; exit 1; fi
- name: Cleanup old build workdirs (keep last 2)
run: |
set -e
ROOT="$HOME/apps"
KEEP=2
PATTERN='web.build.*'
ACTIVE="$HOME/apps/sistema"
echo "Scanning $ROOT for old $PATTERN dirs"
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
[ -z "$dir" ] && continue
if [ -n "$ACTIVE" ] && [ "$(readlink -f "$dir")" = "$ACTIVE" ]; then
echo "Skipping active dir (in use by APP_DIR): $dir"; continue
fi
echo "Removing $dir"
chmod -R u+rwX "$dir" 2>/dev/null || true
rm -rf "$dir" || {
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
}
done
echo "Disk usage (top 10 under $ROOT):"
du -sh "$ROOT"/* 2>/dev/null | sort -rh | head -n 10 || true
- name: Restart web service with new code (skip — stack deploy already updated)
if: ${{ always() && false }}
run: |
docker service update --force sistema_web
- name: Restart Convex backend service (optional)
run: |
# Fail the job if the convex backend cannot restart
docker service update --force sistema_convex_backend
convex_deploy:
name: Deploy Convex functions
needs: changes
# Executa quando convex/** mudar ou via workflow_dispatch
if: ${{ github.event_name == 'workflow_dispatch' || needs.changes.outputs.convex == 'true' }}
runs-on: [ self-hosted, linux, vps ]
env:
APP_DIR: /srv/apps/sistema
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
# Use a convex-specific build dir to avoid clashes with web job
FALLBACK_DIR="$HOME/apps/convex.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--exclude '.env*' \
--exclude 'apps/desktop/.env*' \
--exclude 'convex/.env*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
./ "$EFFECTIVE_APP_DIR"/
- name: Acquire Convex admin key
id: key
run: |
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -z "$CID" ]; then echo "No convex container"; exit 1; fi
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
- name: Bring convex.json from live app if present
run: |
if [ -f "$APP_DIR/convex.json" ]; then
echo "Copying $APP_DIR/convex.json -> $EFFECTIVE_APP_DIR/convex.json"
cp -f "$APP_DIR/convex.json" "$EFFECTIVE_APP_DIR/convex.json"
else
echo "No existing convex.json found at $APP_DIR; convex CLI will need self-hosted vars"
fi
- name: Set Convex env vars (self-hosted)
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
MACHINE_PROVISIONING_SECRET: ${{ secrets.MACHINE_PROVISIONING_SECRET }}
MACHINE_TOKEN_TTL_MS: ${{ secrets.MACHINE_TOKEN_TTL_MS }}
FLEET_SYNC_SECRET: ${{ secrets.FLEET_SYNC_SECRET }}
run: |
set -e
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
-e MACHINE_PROVISIONING_SECRET \
-e MACHINE_TOKEN_TTL_MS \
-e FLEET_SYNC_SECRET \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; bun install --frozen-lockfile; \
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then bunx convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\"; fi; \
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then bunx convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\"; fi; \
if [ -n \"$FLEET_SYNC_SECRET\" ]; then bunx convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\"; fi; \
bunx convex env list"
- name: Ensure .env is not present for Convex deploy
run: |
cd "$EFFECTIVE_APP_DIR"
if [ -f .env ]; then
echo "Renaming .env -> .env.bak (Convex self-hosted deploy)"
mv -f .env .env.bak
fi
- name: Deploy functions to Convex self-hosted
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
run: |
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CI=true \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; bun install --frozen-lockfile; bunx convex deploy"
- name: Cleanup old convex build workdirs (keep last 2)
run: |
set -e
ROOT="$HOME/apps"
KEEP=2
PATTERN='convex.build.*'
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
[ -z "$dir" ] && continue
echo "Removing $dir"
chmod -R u+rwX "$dir" 2>/dev/null || true
rm -rf "$dir" || {
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
}
done
desktop_release:
name: Desktop Release (Windows)
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
runs-on: [ self-hosted, windows, desktop ]
defaults:
run:
working-directory: apps/desktop
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.20.0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Install deps (desktop)
run: pnpm install --frozen-lockfile
- name: Build with Tauri
uses: tauri-apps/tauri-action@v0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
with:
projectPath: apps/desktop
- name: Upload latest.json + bundles to VPS
uses: appleboy/scp-action@v0.1.7
with:
host: ${{ secrets.VPS_HOST }}
username: ${{ secrets.VPS_USER }}
key: ${{ secrets.VPS_SSH_KEY }}
source: |
**/bundle/**/latest.json
**/bundle/**/*
target: ${{ env.VPS_UPDATES_DIR }}
overwrite: true
diagnose_convex:
name: Diagnose Convex (env + register test)
if: ${{ github.event_name == 'workflow_dispatch' }}
runs-on: [ self-hosted, linux, vps ]
steps:
- name: Print service env and .env subset
run: |
echo "=== Convex service env ==="
docker service inspect sistema_convex_backend --format '{{range .Spec.TaskTemplate.ContainerSpec.Env}}{{println .}}{{end}}' || true
echo
echo "=== /srv/apps/sistema/.env subset ==="
[ -f /srv/apps/sistema/.env ] && grep -E '^(MACHINE_PROVISIONING_SECRET|MACHINE_TOKEN_TTL_MS|FLEET_SYNC_SECRET|NEXT_PUBLIC_CONVEX_URL)=' -n /srv/apps/sistema/.env || echo '(no .env)'
- name: Acquire Convex admin key
id: key
run: |
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -z "$CID" ]; then echo "No convex container"; exit 1; fi
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
- name: List Convex env and set missing
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
run: |
set -e
if [ -f /srv/apps/sistema/.env ]; then
set -o allexport
. /srv/apps/sistema/.env
set +o allexport
fi
docker run --rm -i \
-v /srv/apps/sistema:/app -w /app \
-e CONVEX_SELF_HOSTED_URL -e CONVEX_SELF_HOSTED_ADMIN_KEY="$ADMIN_KEY" \
-e MACHINE_PROVISIONING_SECRET -e MACHINE_TOKEN_TTL_MS -e FLEET_SYNC_SECRET \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; bun install --frozen-lockfile; \
unset CONVEX_DEPLOYMENT; bunx convex env list; \
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then bunx convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\"; fi; \
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then bunx convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\"; fi; \
if [ -n \"$FLEET_SYNC_SECRET\" ]; then bunx convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\"; fi; \
bunx convex env list"
- name: Test register from runner
run: |
HOST="vm-teste-$(date +%s)"
DATA='{"provisioningSecret":"'"${MACHINE_PROVISIONING_SECRET:-"71daa9ef54cb224547e378f8121ca898b614446c142a132f73c2221b4d53d7d6"}"'","tenantId":"tenant-atlas","hostname":"'"$HOST"'","os":{"name":"Linux","version":"6.1.0","architecture":"x86_64"},"macAddresses":["AA:BB:CC:DD:EE:FF"],"serialNumbers":[],"metadata":{"inventario":{"cpu":"i7","ramGb":16}},"registeredBy":"diag-test"}'
HTTP=$(curl -sS -o resp.json -w "%{http_code}" -H 'Content-Type: application/json' -d "$DATA" https://tickets.esdrasrenan.com.br/api/machines/register || true)
echo "Register HTTP=$HTTP" && tail -c 400 resp.json || true

View file

@ -0,0 +1,67 @@
name: Desktop Release (Tauri)
on:
workflow_dispatch:
push:
tags:
- 'desktop-v*'
permissions:
contents: write
jobs:
build:
name: Build ${{ matrix.platform }}
runs-on: ${{ matrix.runner }}
strategy:
fail-fast: false
matrix:
include:
- platform: linux
runner: ubuntu-latest
- platform: windows
runner: windows-latest
- platform: macos
runner: macos-latest
defaults:
run:
shell: bash
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Enable Corepack
run: corepack enable && corepack prepare pnpm@10.20.0 --activate
- name: Install Rust (stable)
uses: dtolnay/rust-toolchain@stable
- name: Install Linux deps
if: matrix.platform == 'linux'
run: |
sudo apt-get update
sudo apt-get install -y libwebkit2gtk-4.1-dev libayatana-appindicator3-dev librsvg2-dev libxdo-dev libssl-dev build-essential curl wget file
- name: Install pnpm deps
run: pnpm -C apps/desktop install --frozen-lockfile
- name: Build desktop
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
VITE_APP_URL: https://tickets.esdrasrenan.com.br
VITE_API_BASE_URL: https://tickets.esdrasrenan.com.br
run: pnpm -C apps/desktop tauri build
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: desktop-${{ matrix.platform }}
path: apps/desktop/src-tauri/target/release/bundle

View file

@ -0,0 +1,60 @@
name: Quality Checks
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
lint-test-build:
name: Lint, Test and Build
runs-on: ubuntu-latest
env:
BETTER_AUTH_SECRET: test-secret
NEXT_PUBLIC_APP_URL: http://localhost:3000
BETTER_AUTH_URL: http://localhost:3000
NEXT_PUBLIC_CONVEX_URL: http://localhost:3210
DATABASE_URL: file:./prisma/db.dev.sqlite
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: 1.3.1
- name: Verify Bun
run: bun --version
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Cache Next.js build cache
uses: actions/cache@v4
with:
path: |
${{ github.workspace }}/.next/cache
key: ${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-${{ hashFiles('**/*.{js,jsx,ts,tsx}') }}
restore-keys: |
${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-
- name: Generate Prisma client
run: bun run prisma:generate
- name: Lint
run: bun run lint
- name: Test
run: bun test
- name: Build
run: bun run build:bun

View file

@ -0,0 +1,60 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.*
.yarn/*
!.yarn/patches
!.yarn/plugins
!.yarn/releases
!.yarn/versions
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
*.sqlite
# external experiments
nova-calendar-main/
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*
# env files (can opt-in for committing if needed)
.env*
!.env.example
!apps/desktop/.env.example
# Accidental Windows duplicate downloads (e.g., "env (1)")
env (*)
env (1)
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts
# backups locais
.archive/
# arquivos locais temporários
Captura de tela *.png
Screenshot*.png
# Ignore NTFS ADS streams accidentally committed from Windows downloads
*:*Zone.Identifier
*:\:Zone.Identifier

View file

@ -0,0 +1,93 @@
Copyright 2020 The Inter Project Authors (https://github.com/rsms/inter)
This Font Software is licensed under the SIL Open Font License, Version 1.1.
This license is copied below, and is also available with a FAQ at:
https://openfontlicense.org
-----------------------------------------------------------
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
-----------------------------------------------------------
PREAMBLE
The goals of the Open Font License (OFL) are to stimulate worldwide
development of collaborative font projects, to support the font creation
efforts of academic and linguistic communities, and to provide a free and
open framework in which fonts may be shared and improved in partnership
with others.
The OFL allows the licensed fonts to be used, studied, modified and
redistributed freely as long as they are not sold by themselves. The
fonts, including any derivative works, can be bundled, embedded,
redistributed and/or sold with any software provided that any reserved
names are not used by derivative works. The fonts and derivatives,
however, cannot be released under any other type of license. The
requirement for fonts to remain under this license does not apply
to any document created using the fonts or their derivatives.
DEFINITIONS
"Font Software" refers to the set of files released by the Copyright
Holder(s) under this license and clearly marked as such. This may
include source files, build scripts and documentation.
"Reserved Font Name" refers to any names specified as such after the
copyright statement(s).
"Original Version" refers to the collection of Font Software components as
distributed by the Copyright Holder(s).
"Modified Version" refers to any derivative made by adding to, deleting,
or substituting -- in part or in whole -- any of the components of the
Original Version, by changing formats or by porting the Font Software to a
new environment.
"Author" refers to any designer, engineer, programmer, technical
writer or other person who contributed to the Font Software.
PERMISSION & CONDITIONS
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Font Software, to use, study, copy, merge, embed, modify,
redistribute, and sell modified and unmodified copies of the Font
Software, subject to the following conditions:
1) Neither the Font Software nor any of its individual components,
in Original or Modified Versions, may be sold by itself.
2) Original or Modified Versions of the Font Software may be bundled,
redistributed and/or sold with any software, provided that each copy
contains the above copyright notice and this license. These can be
included either as stand-alone text files, human-readable headers or
in the appropriate machine-readable metadata fields within text or
binary files as long as those fields can be easily viewed by the user.
3) No Modified Version of the Font Software may use the Reserved Font
Name(s) unless explicit written permission is granted by the corresponding
Copyright Holder. This restriction only applies to the primary font name as
presented to the users.
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
Software shall not be used to promote, endorse or advertise any
Modified Version, except to acknowledge the contribution(s) of the
Copyright Holder(s) and the Author(s) or with their explicit written
permission.
5) The Font Software, modified or unmodified, in part or in whole,
must be distributed entirely under this license, and must not be
distributed under any other license. The requirement for fonts to
remain under this license does not apply to any document created
using the Font Software.
TERMINATION
This license becomes null and void if any of the above conditions are
not met.
DISCLAIMER
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
OTHER DEALINGS IN THE FONT SOFTWARE.

View file

@ -0,0 +1,118 @@
Inter Variable Font
===================
This download contains Inter as both variable fonts and static fonts.
Inter is a variable font with these axes:
opsz
wght
This means all the styles are contained in these files:
Inter/Inter-VariableFont_opsz,wght.ttf
Inter/Inter-Italic-VariableFont_opsz,wght.ttf
If your app fully supports variable fonts, you can now pick intermediate styles
that arent available as static fonts. Not all apps support variable fonts, and
in those cases you can use the static font files for Inter:
Inter/static/Inter_18pt-Thin.ttf
Inter/static/Inter_18pt-ExtraLight.ttf
Inter/static/Inter_18pt-Light.ttf
Inter/static/Inter_18pt-Regular.ttf
Inter/static/Inter_18pt-Medium.ttf
Inter/static/Inter_18pt-SemiBold.ttf
Inter/static/Inter_18pt-Bold.ttf
Inter/static/Inter_18pt-ExtraBold.ttf
Inter/static/Inter_18pt-Black.ttf
Inter/static/Inter_24pt-Thin.ttf
Inter/static/Inter_24pt-ExtraLight.ttf
Inter/static/Inter_24pt-Light.ttf
Inter/static/Inter_24pt-Regular.ttf
Inter/static/Inter_24pt-Medium.ttf
Inter/static/Inter_24pt-SemiBold.ttf
Inter/static/Inter_24pt-Bold.ttf
Inter/static/Inter_24pt-ExtraBold.ttf
Inter/static/Inter_24pt-Black.ttf
Inter/static/Inter_28pt-Thin.ttf
Inter/static/Inter_28pt-ExtraLight.ttf
Inter/static/Inter_28pt-Light.ttf
Inter/static/Inter_28pt-Regular.ttf
Inter/static/Inter_28pt-Medium.ttf
Inter/static/Inter_28pt-SemiBold.ttf
Inter/static/Inter_28pt-Bold.ttf
Inter/static/Inter_28pt-ExtraBold.ttf
Inter/static/Inter_28pt-Black.ttf
Inter/static/Inter_18pt-ThinItalic.ttf
Inter/static/Inter_18pt-ExtraLightItalic.ttf
Inter/static/Inter_18pt-LightItalic.ttf
Inter/static/Inter_18pt-Italic.ttf
Inter/static/Inter_18pt-MediumItalic.ttf
Inter/static/Inter_18pt-SemiBoldItalic.ttf
Inter/static/Inter_18pt-BoldItalic.ttf
Inter/static/Inter_18pt-ExtraBoldItalic.ttf
Inter/static/Inter_18pt-BlackItalic.ttf
Inter/static/Inter_24pt-ThinItalic.ttf
Inter/static/Inter_24pt-ExtraLightItalic.ttf
Inter/static/Inter_24pt-LightItalic.ttf
Inter/static/Inter_24pt-Italic.ttf
Inter/static/Inter_24pt-MediumItalic.ttf
Inter/static/Inter_24pt-SemiBoldItalic.ttf
Inter/static/Inter_24pt-BoldItalic.ttf
Inter/static/Inter_24pt-ExtraBoldItalic.ttf
Inter/static/Inter_24pt-BlackItalic.ttf
Inter/static/Inter_28pt-ThinItalic.ttf
Inter/static/Inter_28pt-ExtraLightItalic.ttf
Inter/static/Inter_28pt-LightItalic.ttf
Inter/static/Inter_28pt-Italic.ttf
Inter/static/Inter_28pt-MediumItalic.ttf
Inter/static/Inter_28pt-SemiBoldItalic.ttf
Inter/static/Inter_28pt-BoldItalic.ttf
Inter/static/Inter_28pt-ExtraBoldItalic.ttf
Inter/static/Inter_28pt-BlackItalic.ttf
Get started
-----------
1. Install the font files you want to use
2. Use your app's font picker to view the font family and all the
available styles
Learn more about variable fonts
-------------------------------
https://developers.google.com/web/fundamentals/design-and-ux/typography/variable-fonts
https://variablefonts.typenetwork.com
https://medium.com/variable-fonts
In desktop apps
https://theblog.adobe.com/can-variable-fonts-illustrator-cc
https://helpx.adobe.com/nz/photoshop/using/fonts.html#variable_fonts
Online
https://developers.google.com/fonts/docs/getting_started
https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Fonts/Variable_Fonts_Guide
https://developer.microsoft.com/en-us/microsoft-edge/testdrive/demos/variable-fonts
Installing fonts
MacOS: https://support.apple.com/en-us/HT201749
Linux: https://www.google.com/search?q=how+to+install+a+font+on+gnu%2Blinux
Windows: https://support.microsoft.com/en-us/help/314960/how-to-install-or-remove-a-font-in-windows
Android Apps
https://developers.google.com/fonts/docs/android
https://developer.android.com/guide/topics/ui/look-and-feel/downloadable-fonts
License
-------
Please read the full license text (OFL.txt) to understand the permissions,
restrictions and requirements for usage, redistribution, and modification.
You can use them in your products & projects print or digital,
commercial or otherwise.
This isn't legal advice, please consider consulting a lawyer and see the full
license for all details.

View file

@ -0,0 +1,127 @@
## Sistema de Chamados
Aplicação **Next.js 16 (App Router)** com **React 19**, **Convex** e **Better Auth** para gestão de tickets da Rever. A stack ainda inclui **Prisma 6** (SQLite padrão para DEV), **Tailwind** e **Turbopack** em desenvolvimento (o build de produção roda com o webpack padrão do Next). Todo o código-fonte fica na raiz do monorepo seguindo as convenções do App Router.
## Requisitos
- Bun >= 1.3 (recomendado 1.3.1). Após instalar via script oficial, adicione `export PATH="$HOME/.bun/bin:$PATH"` ao seu shell (ex.: `.bashrc`) para ter `bun` disponível globalmente.
- Node.js >= 20 (necessário para ferramentas auxiliares como Prisma CLI e Next.js em modo fallback).
- CLI do Convex (`bunx convex dev` instalará automaticamente no primeiro uso, se ainda não estiver presente).
## Configuração rápida
1. Instale as dependências:
```bash
bun install
```
2. Ajuste o arquivo `.env` (ou crie a partir de `.env.example`) e confirme os valores de:
- `NEXT_PUBLIC_CONVEX_URL` (gerado pelo Convex Dev)
- `BETTER_AUTH_SECRET`, `BETTER_AUTH_URL`, `DATABASE_URL` (por padrão `file:./db.dev.sqlite`, que mapeia para `prisma/db.dev.sqlite`)
3. Aplique as migrações e gere o client Prisma:
```bash
bunx prisma migrate deploy
bun run prisma:generate
```
4. Popule usuários padrão do Better Auth:
```bash
bun run auth:seed
```
> Sempre que trocar de máquina ou quiser “zerar” o ambiente local, basta repetir os passos 3 e 4 com a mesma `DATABASE_URL`.
### Resetar rapidamente o ambiente local
1. Garanta que `DATABASE_URL` aponte para o arquivo desejado (ex.: `file:./db.dev.sqlite` para desenvolvimento, `file:./db.sqlite` em produção local).
2. Aplique as migrações no arquivo informado:
```bash
DATABASE_URL=file:./db.dev.sqlite bunx prisma migrate deploy
```
3. Recrie/garanta as contas padrão de login:
```bash
DATABASE_URL=file:./db.dev.sqlite bun run auth:seed
```
4. Suba o servidor normalmente com `bun run dev`. Esses três comandos bastam para reconstruir o ambiente sempre que trocar de computador.
### Subir serviços locais
- (Opcional) Para re-sincronizar manualmente as filas padrão, execute `bun run queues:ensure`.
- Em um terminal, rode o backend em tempo real do Convex com `bun run convex:dev:bun` (ou `bun run convex:dev` para o runtime Node).
- Em outro terminal, suba o frontend Next.js (Turpoback) com `bun run dev:bun` (`bun run dev:webpack` serve como fallback).
- Com o Convex rodando, acesse `http://localhost:3000/dev/seed` uma vez para popular dados de demonstração (tickets, usuários, comentários).
> Se o CLI perguntar sobre configuração do projeto Convex, escolha criar um novo deployment local (opção padrão) e confirme. As credenciais são armazenadas em `.convex/` automaticamente.
### Documentação
- Índice de docs: `docs/README.md`
- Operações (produção): `docs/OPERATIONS.md` (versão EN) e `docs/OPERACAO-PRODUCAO.md` (PT-BR)
- Guia de DEV: `docs/DEV.md`
- Testes automatizados (Vitest/Playwright): `docs/testes-vitest.md`
- Stack Swarm: `stack.yml` (roteado por Traefik, rede `traefik_public`).
### Variáveis de ambiente
- Exemplo na raiz: `.env.example` — copie para `.env` e preencha segredos.
- App Desktop: `apps/desktop/.env.example` — copie para `apps/desktop/.env` e ajuste `VITE_APP_URL`.
- Nunca faça commit de arquivos `.env` com valores reais (já ignorados em `.gitignore`).
### Guia de DEV (Prisma, Auth e Desktop/Tauri)
Para fluxos detalhados de desenvolvimento — banco de dados local (SQLite/Prisma), seed do Better Auth, ajustes do Prisma CLI no DEV e build do Desktop (Tauri) — consulte `docs/DEV.md`.
## Scripts úteis
- `bun run dev:bun` — padrão atual para o Next.js com runtime Bun (`bun run dev:webpack` permanece como fallback).
- `bun run convex:dev:bun` — runtime Bun para o Convex (`bun run convex:dev` mantém o fluxo antigo usando Node).
- `bun run build:bun` / `bun run start:bun` — build e serve com Bun; `bun run build` mantém o fallback Node.
- `bun run dev:webpack` — fallback do Next.js em modo desenvolvimento (webpack).
- `bun run lint` — ESLint com as regras do projeto.
- `bun test` — suíte de testes unitários usando o runner do Bun (o teste de screenshot fica automaticamente ignorado se o matcher não existir).
- `bun run build` — executa `next build --webpack` (webpack padrão do Next).
- `bun run build:turbopack` — executa `next build --turbopack` para reproduzir/debugar problemas.
- `bun run auth:seed` — atualiza/cria contas padrão do Better Auth (credenciais em `agents.md`).
- `bunx prisma migrate deploy` — aplica migrações ao banco SQLite local.
- `bun run convex:dev` — roda o Convex em modo desenvolvimento com Node, gerando tipos em `convex/_generated`.
## Transferir dispositivo entre colaboradores
Quando uma dispositivo trocar de responsável:
1. Abra `Admin > Dispositivos`, selecione o equipamento e clique em **Resetar agente**.
2. No equipamento, execute o reset local do agente (`rever-agent reset` ou reinstale o serviço) e reprovisione com o código da empresa.
3. Após o agente gerar um novo token, associe a dispositivo ao novo colaborador no painel.
Sem o reset de agente, o Convex reaproveita o token anterior e o inventário continua vinculado ao usuário antigo.
## Estrutura principal
- `app/` dentro de `src/` — rotas e layouts do Next.js (App Router).
- `components/` — componentes reutilizáveis (UI, formulários, layouts).
- `convex/` — queries, mutations e seeds do Convex.
- `prisma/` — schema, migrações e banco SQLite (`prisma/db.sqlite`).
- `scripts/` — utilitários em Node para sincronização e seeds adicionais.
- `agents.md` — guia operacional e contexto funcional (em PT-BR).
- `PROXIMOS_PASSOS.md` — backlog de melhorias futuras.
## Credenciais de demonstração
Após executar `bun run auth:seed`, as credenciais padrão ficam disponíveis conforme descrito em `agents.md` (seção “Credenciais padrão”). Ajuste variáveis `SEED_USER_*` se precisar sobrepor usuários ou senhas durante o seed.
## Próximos passos
Consulte `PROXIMOS_PASSOS.md` para acompanhar o backlog funcional e o progresso das iniciativas planejadas.
### Executar com Bun
- `bun install` é o fluxo padrão (o arquivo `bun.lock` deve ser versionado; use `bun install --frozen-lockfile` em CI).
- `bun run dev:bun`, `bun run convex:dev:bun`, `bun run build:bun` e `bun run start:bun` já estão configurados; internamente executam `bun run --bun <script>` para usar o runtime do Bun sem abrir mão dos scripts existentes. O `cross-env` garante os valores esperados de `NODE_ENV` (`development`/`production`).
- Se precisar validar o bundler experimental, use `bun run build:turbopack`; para o fluxo estável mantenha `bun run build` (webpack).
- `bun test` utiliza o test runner do Bun. O teste de snapshot de screenshot é automaticamente ignorado quando o matcher não está disponível; testes de navegador completos continuam via `bun run test:browser` (Vitest + Playwright).
<!-- ci: smoke test 3 -->
## Diagnóstico de sessão da dispositivo (Desktop)
- Quando o portal for aberto via app desktop, use a página `https://seu-app/portal/debug` para validar cookies e contexto:
- `/api/auth/get-session` deve idealmente mostrar `user.role = "machine"` (em alguns ambientes WebView pode retornar `null`, o que não é bloqueante).
- `/api/machines/session` deve retornar `200` com `assignedUserId/assignedUserEmail`.
- O frontend agora preenche `machineContext` mesmo que `get-session` retorne `null`, e deriva o papel efetivo a partir desse contexto.
- Se `machines/session` retornar `401/403`, revise CORS/credenciais e o fluxo de handshake documentados em `docs/OPERACAO-PRODUCAO.md`.

View file

@ -0,0 +1,171 @@
# Plano de Desenvolvimento — Sistema de Chamados
> **Diretriz máxima**: documentação, comunicação e respostas sempre em português brasileiro.
## Contatos
- **Esdras Renan** — monkeyesdras@gmail.com
## Credenciais padrão (Better Auth)
| Papel | Usuário | Senha |
| --- | --- | --- |
| Administrador | `admin@sistema.dev` | `admin123` |
| Painel telão | `suporte@rever.com.br` | `agent123` |
Os demais colaboradores reais são provisionados via **Convites & acessos**. Caso existam vestígios de dados demo, execute `node scripts/remove-legacy-demo-users.mjs` para limpá-los.
> Execute `bun run auth:seed` após configurar `.env` para (re)criar os usuários acima (campos `SEED_USER_*` podem sobrescrever credenciais).
## Backend Convex
- Seeds de usuários/tickets demo: `convex/seed.ts`.
- Para DEV: rode `bun run convex:dev:bun` e acesse `/dev/seed` uma vez para popular dados realistas.
## Stack atual (06/11/2025)
- **Next.js**: `16.0.1` (Turbopack em desenvolvimento; builds de produção usam webpack).
- Whitelist de domínios em `src/config/allowed-hosts.ts` é aplicada pelo `middleware.ts`.
- **React / React DOM**: `19.2.0`.
- **Trilha de testes**: Vitest (`bun test`) sem modo watch por padrão (`--run --passWithNoTests`).
- **CI**: workflow `Quality Checks` (`.github/workflows/quality-checks.yml`) roda `bun install`, `bun run prisma:generate`, `bun run lint`, `bun test`, `bun run build:bun`. Variáveis críticas (`BETTER_AUTH_SECRET`, `NEXT_PUBLIC_APP_URL`, etc.) são definidas apenas no runner — não afetam a VPS.
- **Disciplina pós-mudanças**: sempre que fizer alterações locais, rode **obrigatoriamente** `bun run lint`, `bun run build:bun` e `bun test` antes de entregar ou abrir PR. Esses comandos são mandatórios também para os agentes/automations, garantindo que o projeto continua íntegro.
- **Deploy**: pipeline `ci-cd-web-desktop.yml` (runner self-hosted). Build roda com Bun 1.3 + Node 20. Web é publicado em `/home/renan/apps/sistema` e o Swarm aponta `sistema_web` para essa pasta.
## Setup local (atualizado)
1. `bun install`
2. Copie `.env.example``.env.local`.
- Principais variáveis para DEV:
```
NODE_ENV=development
NEXT_PUBLIC_APP_URL=http://localhost:3000
BETTER_AUTH_URL=http://localhost:3000
BETTER_AUTH_SECRET=dev-only-long-random-string
NEXT_PUBLIC_CONVEX_URL=http://127.0.0.1:3210
DATABASE_URL=file:./prisma/db.dev.sqlite
```
3. `bun run auth:seed`
4. (Opcional) `bun run queues:ensure`
5. `bun run convex:dev:bun`
6. Em outro terminal: `bun run dev:bun`
7. Acesse `http://localhost:3000` e valide login com os usuários padrão.
### Banco de dados
- Local (DEV): `DATABASE_URL=file:./prisma/db.dev.sqlite` (guardado em `prisma/prisma/`).
- Produção: SQLite persistido no volume Swarm `sistema_sistema_db`. Migrations em PROD devem apontar para esse volume (ver `docs/DEPLOY-RUNBOOK.md`).
- Limpeza de legados: `node scripts/remove-legacy-demo-users.mjs` remove contas demo antigas (Cliente Demo, gestores fictícios etc.).
### Verificações antes de PR/deploy
```bash
bun run lint
bun test
bun run build:bun
```
## Aplicativo Desktop (Tauri)
- Código-fonte: `apps/desktop` (Tauri v2 + Vite + React 19).
- URLs:
- Produção: `https://tickets.esdrasrenan.com.br`
- DEV: configure `apps/desktop/.env` (exemplo fornecido).
- Comandos:
- `bun run --cwd apps/desktop tauri dev` — desenvolvimento (porta 1420).
- `bun run --cwd apps/desktop tauri build` — gera instaladores.
- **Fluxo do agente**:
1. Coleta perfil da dispositivo (hostname, OS, MAC, seriais, métricas).
2. Provisiona via `POST /api/machines/register` usando `MACHINE_PROVISIONING_SECRET`, informando perfil de acesso (Colaborador/Gestor) + dados do colaborador.
3. Envia heartbeats periódicos (`/api/machines/heartbeat`) com inventário básico + estendido (discos SMART, GPUs, serviços, softwares, CPU window).
4. Realiza handshake em `APP_URL/machines/handshake?token=...&redirect=...` para receber cookies Better Auth + sessão (colaborador → `/portal`, gestor → `/dashboard`).
5. Token persistido no cofre do SO (Keyring); store guarda apenas metadados.
6. Envio manual de inventário via botão (POST `/api/machines/inventory`).
7. Updates automáticos: plugin `@tauri-apps/plugin-updater` consulta `latest.json` publicado nos releases do GitHub.
- **Admin ▸ Dispositivos**: permite ajustar perfil/email associado, visualizar inventário completo e remover dispositivo.
### Sessão "machine" no frontend
- Ao autenticar como dispositivo, o front chama `/api/machines/session`, popula `machineContext` (assignedUser*, persona) e deriva role/`viewerId`.
- Mesmo quando `get-session` é `null` na WebView, o portal utiliza `machineContext` para saber o colaborador/gestor logado.
- UI remove opção "Sair" no menu do usuário quando detecta sessão de dispositivo.
- `/portal/debug` exibe JSON de `get-session` e `machines/session` (útil para diagnosticar cookies/bearer).
### Observações adicionais
- Planejamos usar um cookie `desktop_shell` no futuro para diferenciar acessos do desktop vs navegador (não implementado).
## Qualidade e testes
- **Lint**: `bun run lint` (ESLint flat config).
- **Testes unitários/integrados (Vitest)**:
- Cobertura atual inclui utilitários (`tests/*.test.ts`), rotas `/api/machines/*` e `sendSmtpMail`.
- Executar `bun test -- --watch` apenas quando precisar de modo interativo.
- **Build**: `bun run build:bun` (`next build --webpack`, webpack). Para reproduzir problemas do bundler experimental, use `bun run build:turbopack`.
- **CI**: falhas mais comuns
- `ERR_BUN_LOCKFILE_OUTDATED`: confirme que o `bun.lock` foi regenerado (`bun install`) após alterar dependências, especialmente do app desktop.
- Variáveis Better Auth ausentes (`BETTER_AUTH_SECRET`): definidas no workflow (`Quality Checks`).
- Falha de host: confira `src/config/allowed-hosts.ts`; o middleware retorna 403 quando o domínio do Traefik não está listado.
## Produção / Deploy
- Runner self-hosted (VPS). Build roda fora de `/srv/apps/sistema` e rsync publica em `/home/renan/apps/sistema`.
- Swarm: `stack.yml` monta `/home/renan/apps/sistema.current``/app` (via symlink).
- Para liberar novo release manualmente:
```bash
ln -sfn /home/renan/apps/sistema.build.<novo> /home/renan/apps/sistema.current
docker service update --force sistema_web
```
- Resolver `P3009` (migration falhou) sempre no volume `sistema_sistema_db`:
```bash
docker service scale sistema_web=0
docker run --rm -it -e DATABASE_URL=file:/app/data/db.sqlite \
-v /home/renan/apps/sistema.current:/app \
-v sistema_sistema_db:/app/data -w /app \
oven/bun:1 bash -lc "bun install --frozen-lockfile && bun x prisma migrate resolve --rolled-back <migration> && bun x prisma migrate deploy"
docker service scale sistema_web=1
```
## Estado do portal / app web
- Autenticação Better Auth com `AuthGuard`.
- Sidebar inferior agrega avatar, link para `/settings` e logout (oculto em sessões de dispositivo).
- Formulários de ticket (novo/editar/comentários) usam editor rico + anexos; placeholders e validação PT-BR.
- Relatórios e painéis utilizam `AppShell` + `SiteHeader`.
- `usePersistentCompanyFilter` mantém filtro global de empresa em relatórios/admin.
- Exportações CSV: backlog, canais, CSAT, SLA, horas (rotas `/api/reports/*.csv`).
- PDF do ticket (`/api/tickets/[id]/export/pdf`).
- Play interno/externo com métricas por tipo.
- Admin > Empresas: cadastro + “Cliente avulso?”, horas contratadas, vínculos de usuários.
- Admin > Usuários/Equipe:
- Abas separadas: "Equipe" (administradores e agentes) e "Usuários" (gestores e colaboradores).
- Multiseleção + ações em massa: excluir usuários, remover agentes de dispositivo e revogar convites pendentes.
- Filtros por papel, empresa e espaço (tenant) quando aplicável; busca unificada.
- Convites: campo "Espaço (ID interno)" removido da UI de geração.
- Admin > Usuários: vincular colaborador à empresa.
- Alertas enviados: acessível agora em Configurações → Administração do workspace (link direto para /admin/alerts). Removido da sidebar.
- Dashboard: cards por fila e indicadores principais.
## Fluxos suportados
- **Equipe interna** (`admin`, `agent`, `collaborator`): cria/acompanha tickets, comenta, altera status/fila, gera relatórios.
- **Gestores** (`manager`): visualizam tickets da empresa, comentam publicamente, acessam dashboards.
- **Colaboradores** (`collaborator`): portal (`/portal`), tickets próprios, comentários públicos, editor rico, anexos.
- **Sessão Dispositivo**: desktop registra heartbeat/inventário e redireciona colaborador/gestor ao portal apropriado com cookies válidos.
### Correções recentes
- Temporizador do ticket (atendimento em andamento): a UI passa a aplicar atualização otimista na abertura/pausa da sessão para que o tempo corrente não "salte" para minutos indevidos. O backend continua a fonte da verdade (total acumulado é reconciliado ao pausar).
## Backlog recomendado
1. E-mails automáticos quando uso de horas ≥ 90% do contratado.
2. Ações rápidas (status/fila) diretamente na lista de tickets.
3. Limites de anexos por tenant + monitoramento.
4. Layout do PDF do ticket alinhado ao visual da aplicação.
5. Experimentos com React Compiler (Next 16).
## Referências rápidas
- **Endpoints agent desktop**:
- `POST /api/machines/register`
- `POST /api/machines/heartbeat`
- `POST /api/machines/inventory`
- **Relatórios XLSX**:
- Backlog: `/api/reports/backlog.xlsx?range=7d|30d|90d[&companyId=...]`
- Canais: `/api/reports/tickets-by-channel.xlsx?...`
- CSAT: `/api/reports/csat.xlsx?...`
- SLA: `/api/reports/sla.xlsx?...`
- Horas: `/api/reports/hours-by-client.xlsx?...`
- Inventário de dispositivos: `/api/reports/machines-inventory.xlsx?[companyId=...]`
- **Docs complementares**:
- `docs/DEV.md` — guia diário atualizado.
- `docs/STATUS-2025-10-16.md` — snapshot do estado atual e backlog.
- `docs/DEPLOY-RUNBOOK.md` — runbook do Swarm.
- `docs/admin-inventory-ui.md`, `docs/plano-app-desktop-maquinas.md` — detalhes do inventário/agente.
---
_Última atualização: 06/11/2025 (Next.js 16, build de produção com webpack, fluxos desktop + portal documentados)._

View file

@ -0,0 +1,14 @@
# Ambiente local do App Desktop (Vite/Tauri)
# Copie para `apps/desktop/.env` e ajuste.
# URL da aplicação web (Next.js) que será carregada dentro do app desktop.
# Em produção, o app já usa por padrão: https://tickets.esdrasrenan.com.br
VITE_APP_URL=http://localhost:3000
# Base da API (para as rotas /api/machines/*)
# Se não definir, cai no mesmo valor de VITE_APP_URL
VITE_API_BASE_URL=
# Opcional: IP do host para desenvolvimento com HMR fora do localhost
# Ex.: 192.168.0.10
TAURI_DEV_HOST=

View file

@ -0,0 +1,24 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

View file

@ -0,0 +1,3 @@
{
"recommendations": ["tauri-apps.tauri-vscode", "rust-lang.rust-analyzer"]
}

View file

@ -0,0 +1,80 @@
# Sistema de Chamados — App Desktop (Tauri)
Cliente desktop (Tauri v2 + Vite) que:
- Coleta perfil/métricas da dispositivo via comandos Rust.
- Registra a dispositivo com um código de provisionamento.
- Envia heartbeat periódico ao backend (`/api/machines/heartbeat`).
- Redireciona para a UI web do sistema após provisionamento.
- Armazena o token da dispositivo com segurança no cofre do SO (Keyring).
- Exibe abas de Resumo, Inventário, Diagnóstico e Configurações; permite “Enviar inventário agora”.
## URLs e ambiente
- Em produção, o app usa por padrão `https://tickets.esdrasrenan.com.br`.
- Em desenvolvimento, use `apps/desktop/.env` (copiado do `.env.example`):
```
VITE_APP_URL=http://localhost:3000
# Opcional: se vazio, usa o mesmo do APP_URL
VITE_API_BASE_URL=
```
## Comandos
- Dev (abre janela Tauri e Vite em 1420):
- `bun run --cwd apps/desktop tauri dev`
- Build frontend (somente Vite):
- `bun run --cwd apps/desktop build`
- Build executável (bundle):
- `bun run --cwd apps/desktop tauri build`
Saída dos pacotes: `apps/desktop/src-tauri/target/release/bundle/`.
### Windows (NSIS) — instalação e dados
- Instalador NSIS com suporte a “perMachine” (Arquivos de Programas) e diretório customizável (ex.: `C:\Raven`).
- Atalho é criado na Área de Trabalho apontando para o executável instalado.
- Dados do app (token/config) ficam em AppData local do usuário (via `@tauri-apps/plugin-store` com `appLocalDataDir`).
#### NSIS — Idiomas e modo de instalação
- Idioma: o instalador inclui Português do Brasil e exibe seletor de idioma.
- Arquivo: `apps/desktop/src-tauri/tauri.conf.json:54``"displayLanguageSelector": true`
- Arquivo: `apps/desktop/src-tauri/tauri.conf.json:57``"languages": ["PortugueseBR"]`
- Comportamento: usa o idioma do SO; sem correspondência, cai no primeiro da lista.
- Referência de idiomas NSIS: NSIS “Language files/PortugueseBR”.
- Modo de instalação: Program Files (requer elevação/UAC).
- Arquivo: `apps/desktop/src-tauri/tauri.conf.json:56``"installMode": "perMachine"`
- Alternativas: `"currentUser"` (padrão) ou `"both"` (usuário escolhe; exige UAC).
Build rápido e leve em dev:
```bash
bun run --cwd apps/desktop tauri build --bundles nsis
```
Assinatura do updater (opcional em dev):
```powershell
$privB64 = '<COLE_SUA_CHAVE_PRIVADA_EM_BASE64>'
$env:TAURI_SIGNING_PRIVATE_KEY = [Text.Encoding]::UTF8.GetString([Convert]::FromBase64String($privB64))
$env:TAURI_SIGNING_PRIVATE_KEY_PASSWORD = 'SENHA_AQUI'
bun run --cwd apps/desktop tauri build --bundles nsis
```
## Prérequisitos Tauri
- Rust toolchain instalado.
- Dependências nativas por SO (webkit2gtk no Linux, WebView2/VS Build Tools no Windows, Xcode CLT no macOS).
Consulte https://tauri.app/start/prerequisites/
## Fluxo (resumo)
1) Ao abrir, o app coleta o perfil da dispositivo e exibe um resumo.
2) Informe o “código de provisionamento” (chave definida no servidor) e confirme.
3) O servidor retorna um `machineToken`; o app salva e inicia o heartbeat.
4) O app abre `APP_URL/machines/handshake?token=...` no WebView para autenticar a sessão na UI.
5) Pelas abas, é possível revisar inventário local e disparar sincronização manual.
## Segurança do token
- O `machineToken` é salvo no cofre nativo do SO via plugin Keyring (Linux Secret Service, Windows Credential Manager, macOS Keychain).
- O arquivo de preferências (`Store`) guarda apenas metadados não sensíveis (IDs, URLs, datas).
## Suporte
- Logs do Rust aparecem no console do Tauri (dev) e em stderr (release). Em caso de falha de rede, o app exibe alertas na própria UI.
- Para alterar endpoints/domínios, use as variáveis de ambiente acima.

View file

@ -0,0 +1,461 @@
# Guia completo CI/CD Web + Desktop
> Este material detalha, passo a passo, como configurar o pipeline que entrega o front/backend (Next.js + Convex) e os instaladores do aplicativo Tauri, usando apenas uma VPS Linux (Ubuntu) e um computador/VM Windows. Siga na ordem sugerida e marque cada etapa conforme concluir.
---
## 1. Visão geral rápida
- Objetivo: ao fazer push na branch `main`, a VPS atualiza o site/backend. Ao criar uma tag `vX.Y.Z`, o app desktop é reconstruído, assinado e disponibilizado em `/updates` para auto-update.
- Ferramentas principais:
- GitHub Actions com dois runners self-hosted (Linux e Windows).
- Docker Compose (ou scripts equivalentes) para subir Next.js/Convex na VPS.
- Tauri para build dos instaladores desktop.
- Nginx servindo arquivos estáticos de update.
- Fluxo:
1. Desenvolvedor envia código para o GitHub.
2. Job **deploy** roda na própria VPS (runner Linux) e atualiza containers/processos.
3. Ao criar uma tag `v*.*.*`, job **desktop_release** roda no runner Windows, gera instaladores, assina e envia `latest.json` + binários para a VPS.
---
## 2. Pré-requisitos obrigatórios
1. Repositório GitHub com o código do projeto (`sistema-de-chamados`).
2. VPS Ubuntu com:
- Acesso SSH com usuário sudo (ex.: `renan`).
- Docker + Docker Compose (ou ambiente que você desejar usar em produção).
- Nginx (ou outro servidor web capaz de servir `/updates` via HTTPS).
3. Computador/VM Windows 10 ou 11 (64 bits) que ficará ligado durante os builds:
- Acesso administrador.
- Espaço livre para builds (mínimo 15 GB).
4. Conta GitHub com permissão Admin no repositório (para registrar runners e secrets).
5. SSH key dedicada para o pipeline acessar a VPS (não reaproveite a sua pessoal).
---
## 3. Preparação do repositório
1. Na raiz do projeto, confirme os caminhos usados pelo workflow:
- `APP_DIR`: diretório na VPS onde o código (ou docker-compose) ficará. Exemplo: `/srv/apps/sistema`.
- `VPS_UPDATES_DIR`: diretório público servido pelo Nginx. Exemplo: `/var/www/updates`.
2. Garanta que o arquivo `apps/desktop/src-tauri/tauri.conf.json` será atualizado com:
- Chave pública do updater (`updater.pubkey`).
- URL do `latest.json` (por exemplo `https://seu-dominio.com/updates/latest.json`).
- Exemplo de bloco a adicionar mais tarde:
```json5
"updater": {
"active": true,
"endpoints": ["https://seu-dominio.com/updates/latest.json"],
"pubkey": "FINGERPRINT_PUBLIC_KEY"
}
```
3. Crie (ou mantenha) um arquivo `.github/workflows/ci-cd-web-desktop.yml` para o workflow. O conteúdo será incluído na etapa 9 após todos os preparativos. Como você utiliza Docker Swarm com Portainer, já separe o `stack.yml` (ou compose compatível) que o workflow irá acionar.
---
## 4. Ajustes iniciais na VPS (Ubuntu)
1. Atualize pacotes:
```bash
sudo apt update && sudo apt upgrade -y
```
2. Instale Docker (o Swarm usa o próprio engine; mantenha o plugin compose se quiser testar localmente):
```bash
sudo apt install -y docker.io docker-compose-plugin
sudo systemctl enable docker
sudo usermod -aG docker $USER
```
> Saia e entre novamente na sessão SSH para aplicar o grupo Docker.
3. Se o Swarm ainda não estiver ativo, inicialize-o no nó manager:
```bash
docker info | grep Swarm
# se retornar "inactive", rode:
sudo docker swarm init
```
4. Verifique o Portainer:
- Acesse o painel na porta configurada (padrão `https://seu-dominio:9443`).
- Confirme que o cluster Swarm está saudável e que o nó aparece como manager.
5. Crie diretórios usados pelo deploy:
```bash
sudo mkdir -p /srv/apps/sistema
sudo mkdir -p /var/www/updates
sudo chown -R $USER:$USER /srv/apps/sistema
sudo chown -R $USER:$USER /var/www/updates
```
6. (Opcional) Clone o repositório atual dentro de `/srv/apps/sistema` se você mantém arquivos como `stack.yml` ali:
```bash
git clone git@github.com:SEU_USUARIO/sistema-de-chamados.git /srv/apps/sistema
```
7. Teste manualmente seu processo de deploy (Docker Swarm/Portainer ou scripts equivalentes) antes de automatizar. Exemplo via CLI:
```bash
docker stack deploy --with-registry-auth -c stack.yml sistema
docker stack services sistema
```
Se preferir Portainer, faça o deploy manual pelo painel para validar. Confirme que o site sobe corretamente e que `/var/www/updates` é servido pelo Nginx (ver etapa 7).
8. Sobre o Convex:
- **Convex Cloud (recomendado):** apenas garanta que suas variáveis `NEXT_PUBLIC_CONVEX_URL` e `CONVEX_DEPLOYMENT` apontam para o deploy gerenciado. Não é necessário subir container.
- **Convex self-hosted:** inclua um serviço adicional no `stack.yml` (ex.: `convex`) com a imagem oficial (`ghcr.io/get-convex/convex:latest`). Configure volume para o diretório de dados e exponha a porta 3210 internamente. Atualize o Next.js para apontar para `http://convex:3210` dentro da rede do Swarm.
9. Exemplo de `stack.yml` integrado (baseado no modelo que você já usa no Portainer):
```yaml
version: "3.8"
services:
web:
image: ghcr.io/SEU_USUARIO/sistema-web:latest # ajuste para a imagem real
deploy:
mode: replicated
replicas: 2
placement:
constraints:
- node.role == manager
resources:
limits:
cpus: "1.0"
memory: 1.5G
labels:
- traefik.enable=true
- traefik.http.routers.sistema.rule=Host(`app.seu-dominio.com.br`)
- traefik.http.routers.sistema.entrypoints=websecure
- traefik.http.routers.sistema.tls=true
- traefik.http.routers.sistema.tls.certresolver=le
- traefik.http.services.sistema.loadbalancer.server.port=3000
env_file:
- ./envs/web.env # variáveis do Next.js
networks:
- traefik_public
- sistema_network
convex:
image: ghcr.io/get-convex/convex:latest
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == manager
command: ["start", "--port", "3210"]
volumes:
- convex_data:/convex/data
networks:
- sistema_network
networks:
traefik_public:
external: true
sistema_network:
external: false
volumes:
convex_data:
external: false
```
- Adapte nomes das imagens (`web`, `convex`) e os labels do Traefik conforme seu ambiente.
- Caso use Portainer, faça upload desse arquivo na interface e execute o deploy da stack.
---
## 5. Gerar chaves do updater Tauri
1. Em qualquer dispositivo com Bun instalado (pode ser seu computador local):
```bash
bun install
bun install --cwd apps/desktop
bun run --cwd apps/desktop tauri signer generate
```
2. O comando gera:
- Chave privada (`tauri.private.key`).
- Chave pública (`tauri.public.key`).
3. Guarde os arquivos em local seguro. Você usará o conteúdo da chave privada nos secrets `TAURI_PRIVATE_KEY` e `TAURI_KEY_PASSWORD`. A chave pública vai no `tauri.conf.json`.
4. Copie a chave pública para o arquivo `apps/desktop/src-tauri/tauri.conf.json` no bloco `"updater"` (conforme indicado na etapa 3).
---
## 6. Configurar Nginx para servir as atualizações
1. Certifique-se de ter um domínio apontando para a VPS e um certificado TLS válido (Let's Encrypt é suficiente).
2. Crie (ou edite) o arquivo `/etc/nginx/sites-available/sistema-updates.conf` com algo semelhante:
```nginx
server {
listen 80;
listen 443 ssl;
server_name seu-dominio.com;
# Configuração SSL (ajuste conforme seu certificado)
ssl_certificate /etc/letsencrypt/live/seu-dominio.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/seu-dominio.com/privkey.pem;
location /updates/ {
alias /var/www/updates/;
autoindex off;
add_header Cache-Control "no-cache";
}
}
```
3. Crie o link simbólico e teste:
```bash
sudo ln -s /etc/nginx/sites-available/sistema-updates.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
```
4. Verifique pelo navegador: `https://seu-dominio.com/updates/` deve listar vazio (ou mostrar erro 403 se o autoindex estiver desativado, o que é aceitável). Apenas confirme que não retorna 404.
---
## 7. Registrar runner self-hosted na VPS (Linux)
1. No GitHub, acesse o repositório → *Settings**Actions**Runners**New self-hosted runner*.
2. Escolha Linux x64 e anote a URL e o token fornecidos.
3. Na VPS, prepare um usuário dedicado (opcional, mas recomendado):
```bash
sudo adduser --disabled-password --gecos "" actions
sudo usermod -aG docker actions
sudo su - actions
```
4. Baixe e instale o runner (substitua `<URL>` e `<TOKEN>`):
```bash
mkdir actions-runner && cd actions-runner
curl -o actions-runner.tar.gz -L <URL>
tar xzf actions-runner.tar.gz
./config.sh --url https://github.com/SEU_USUARIO/sistema-de-chamados \
--token <TOKEN> \
--labels "self-hosted,linux,vps"
```
5. Instale como serviço:
```bash
sudo ./svc.sh install
sudo ./svc.sh start
```
6. Volte ao GitHub e confirme que o runner aparece como `online`.
7. Teste executando um workflow simples (pode ser o pipeline de deploy após concluir todas as etapas). Lembre-se: o runner precisa ter permissão de escrita para `/srv/apps/sistema` e `/var/www/updates`.
---
## 8. Registrar runner self-hosted no Windows
1. Baixe e instale os pré-requisitos:
- Git para Windows.
- Bun 1.3+: instale via instalador oficial (`iwr https://bun.sh/install.ps1 | invoke-expression`) e garanta que `bun` esteja no `PATH`.
- Node.js 20 (opcional, caso precise rodar scripts em Node durante o build).
- Rust toolchain: https://rustup.rs (instale padrão).
- Visual Studio Build Tools (C++ build tools) ou `Desktop development with C++`.
- WebView2 Runtime (https://developer.microsoft.com/microsoft-edge/webview2/).
2. Opcional: instale as dependências do Tauri rodando uma vez:
```powershell
bun install
bun install --cwd apps/desktop
bun run --cwd apps/desktop tauri info
```
3. No GitHub → *Settings**Actions**Runners**New self-hosted runner* → escolha Windows x64 e copie URL/token.
4. Em `C:\actions-runner` (recomendado):
```powershell
mkdir C:\actions-runner
cd C:\actions-runner
Invoke-WebRequest -Uri <URL> -OutFile actions-runner.zip
Expand-Archive -Path actions-runner.zip -DestinationPath .
.\config.cmd --url https://github.com/SEU_USUARIO/sistema-de-chamados `
--token <TOKEN> `
--labels "self-hosted,windows,desktop"
```
5. Instale como serviço (PowerShell administrador):
```powershell
.\svc install
.\svc start
```
6. Confirme no GitHub que o runner aparece como `online`.
7. Mantenha a dispositivo ligada e conectada durante o período em que o workflow precisa rodar:
- Para releases desktop, o runner só precisa estar ligado enquanto o job `desktop_release` estiver em execução (crie a tag e aguarde o workflow terminar).
- Após a conclusão, você pode desligar o computador até a próxima release.
8. Observação importante: o runner Windows pode ser sua dispositivo pessoal. Garanta apenas que:
- Você confia no código que será executado (o runner processa os jobs do repositório).
- O serviço do runner esteja ativo enquanto o workflow rodar (caso desligue o PC, as releases ficam na fila).
- Há espaço em disco suficiente e nenhuma política corporativa bloqueando a instalação dos pré-requisitos.
---
## 9. Configurar secrets e variables no GitHub
1. Acesse o repositório → *Settings**Secrets and variables**Actions*.
2. Adicione os secrets:
- `VPS_HOST` → domínio ou IP da VPS.
- `VPS_USER` → usuário com acesso SSH (ex.: `renan`).
- `VPS_SSH_KEY` → conteúdo **completo** da chave privada gerada apenas para o pipeline (ver abaixo).
- `TAURI_PRIVATE_KEY` → conteúdo do arquivo `tauri.private.key`.
- `TAURI_KEY_PASSWORD` → senha informada ao gerar a chave (se deixou em branco, repita em branco aqui).
3. Gerar chave SSH exclusiva para o pipeline (se ainda não fez):
```bash
ssh-keygen -t ed25519 -C "github-actions@seu-dominio" -f ~/.ssh/github-actions
```
- Suba o conteúdo de `~/.ssh/github-actions` (privada) para o secret `VPS_SSH_KEY`.
- Adicione a chave pública `~/.ssh/github-actions.pub` em `~/.ssh/authorized_keys` do usuário na VPS.
4. Adicione **Environment variables** (opcional) para evitar editar o YAML:
- `APP_DIR``/srv/apps/sistema`
- `VPS_UPDATES_DIR``/var/www/updates`
(Se preferir, mantenha-as definidas direto no workflow.)
---
## 10. Criar o workflow GitHub Actions
1. No repositório, crie o arquivo `.github/workflows/ci-cd-web-desktop.yml` com o conteúdo:
```yaml
name: CI/CD - Web + Desktop
on:
push:
branches: [ main ]
tags:
- "v*.*.*"
permissions:
contents: write
env:
VPS_UPDATES_DIR: /var/www/updates
APP_DIR: /srv/apps/sistema
jobs:
deploy:
name: Deploy Web/Backend (VPS)
runs-on: [self-hosted, linux, vps]
if: startsWith(github.ref, 'refs/heads/main')
steps:
- uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: 1.3.1
- name: Deploy stack (Docker Swarm)
working-directory: ${{ env.APP_DIR }}
run: |
# git pull origin main || true
# Atualize o arquivo stack.yml ou compose compatível antes do deploy.
docker stack deploy --with-registry-auth -c stack.yml sistema
docker stack services sistema
desktop_release:
name: Release Desktop (Tauri)
runs-on: [self-hosted, windows, desktop]
if: startsWith(github.ref, 'refs/tags/v')
steps:
- uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: 1.3.1
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Install deps
run: bun install --frozen-lockfile
- name: Build + Sign + Release (tauri-action)
uses: tauri-apps/tauri-action@v0
env:
TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
with:
tagName: ${{ github.ref_name }}
releaseName: "Desktop ${{ github.ref_name }}"
releaseDraft: false
prerelease: false
updaterJsonKeepName: true
- name: Upload latest.json + bundles para VPS
uses: appleboy/scp-action@v0.1.7
with:
host: ${{ secrets.VPS_HOST }}
username: ${{ secrets.VPS_USER }}
key: ${{ secrets.VPS_SSH_KEY }}
source: |
**/bundle/**/latest.json
**/bundle/**/*
target: ${{ env.VPS_UPDATES_DIR }}
overwrite: true
```
2. Ajuste o bloco de deploy conforme seu processo (por exemplo, use `bun run build && pm2 restart` se não usar Docker ou substitua por chamada à API do Portainer caso faça o deploy por lá).
3. Faça commit desse arquivo e suba para o GitHub (`git add .github/workflows/ci-cd-web-desktop.yml`, `git commit`, `git push`).
---
## 11. Testar o pipeline
1. **Teste do runner Linux:**
- Faça uma alteração simples na branch `main`.
- `git push origin main`.
- No GitHub, verifique o workflow → job `deploy`.
- Confirme via SSH na VPS (ex.: `docker stack services sistema`) ou pelo Portainer que os serviços foram atualizados.
2. **Teste do runner Windows:**
- Atualize `apps/desktop/src-tauri/tauri.conf.json` com a chave pública e URL do updater.
- Faça commit e `git push`.
- Crie a tag: `git tag v1.0.0``git push origin v1.0.0`.
- Verifique no GitHub → job `desktop_release`.
- Após concluir, confira em `/var/www/updates` se existem `latest.json` e os instaladores gerados.
3. Instale o app desktop (Windows, macOS ou Linux conforme artefatos) e abra-o:
- O aplicativo deve carregar a interface web apontando para sua URL.
- Ao publicar nova tag (ex.: `v1.0.1`), o app deve oferecer update automático.
---
## 12. Rotina diária de uso
1. Desenvolvimento comum:
- Trabalhe em branch própria.
- Abra PR para `main`.
- Ao fazer merge na `main`, o job `deploy` roda e publica a nova versão da stack no Swarm (visível no Portainer).
2. Nova versão desktop:
- Ajuste o app, aumente o campo `version` no `tauri.conf.json`.
- `git commit` e `git push`.
- Crie tag `vX.Y.Z` e envie (`git tag v1.2.0`, `git push origin v1.2.0`).
- Aguarde a finalização do job `desktop_release`.
- Usuários recebem o update automático na próxima abertura.
3. Renovação de certificado:
- Garanta que o certificado TLS usado pelo Nginx é renovado (p. ex. `certbot renew`).
4. Manter runners:
- VPS: monitore serviço `actions.runner.*`. Reinicie se necessário (`sudo ./svc.sh restart`).
- Windows: mantenha dispositivo ligada e atualizada. Se o serviço parar, abra `services.msc``GitHub Actions Runner` → Start.
---
## 13. Boas práticas e segurança
- Proteja a chave privada do updater; trate como segredo de produção.
- Use usuário dedicado na VPS para o runner e restrinja permissões apenas aos diretórios necessários.
- Faça backup periódico de `/var/www/updates` (para poder servir instaladores antigos se necessário).
- Nunca faça commit do arquivo `.env` nem das chaves privadas.
- Atualize Docker, Node e Rust periodicamente.
---
## 14. Solução de problemas comuns
| Sintoma | Possível causa | Como corrigir |
| --- | --- | --- |
| Job `deploy` falha com “permission denied” | Runner não tem acesso ao diretório do app | Ajuste permissões (`sudo chown -R actions:actions /srv/apps/sistema`). |
| Job `desktop_release` falha na etapa `tauri-action` | Toolchain incompleto no Windows | Reinstale Rust, WebView2 e componentes C++ do Visual Studio. |
| Artefatos não chegam à VPS | Caminho incorreto ou chave SSH inválida | Verifique `VPS_HOST`, `VPS_USER`, `VPS_SSH_KEY` e se a pasta `/var/www/updates` existe. |
| App não encontra update | URL ou chave pública divergente no `tauri.conf.json` | Confirme que `endpoints` bate com o domínio HTTPS e que `pubkey` é exatamente a chave pública gerada. |
| Runner aparece offline no GitHub | Serviço parado ou dispositivo desligada | VPS: `sudo ./svc.sh status`; Windows: abra `Services` e reinicie o `GitHub Actions Runner`. |
---
## 15. Checklist final de implantação
1. [ ] VPS atualizada, Docker/Nginx funcionando.
2. [ ] Diretórios `/srv/apps/sistema` e `/var/www/updates` criados com permissões corretas.
3. [ ] Nginx servindo `https://seu-dominio.com/updates/`.
4. [ ] Runner Linux registrado com labels `self-hosted,linux,vps`.
5. [ ] Runner Windows registrado com labels `self-hosted,windows,desktop`.
6. [ ] Chaves do updater Tauri geradas e chave pública no `tauri.conf.json`.
7. [ ] Secrets e variables configurados no GitHub (`VPS_*`, `TAURI_*`).
8. [ ] Workflow `.github/workflows/ci-cd-web-desktop.yml` criado e commitado.
9. [ ] Deploy automático testado com push em `main`.
10. [ ] Release desktop testada com tag `v1.0.0`.
Com todos os itens marcados, o pipeline estará pronto para ser usado sempre que você fizer novas entregas.

View file

@ -0,0 +1,14 @@
<!doctype html>
<html lang="pt-BR">
<head>
<meta charset="UTF-8" />
<meta name="color-scheme" content="light" />
<link rel="stylesheet" href="/src/index.css" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Raven — Agente Desktop</title>
<script type="module" src="/src/main.tsx" defer></script>
</head>
<body>
<div id="root"></div>
</body>
</html>

View file

@ -0,0 +1,31 @@
{
"name": "appsdesktop",
"private": true,
"version": "0.1.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "tsc && vite build",
"preview": "vite preview",
"tauri": "node ./scripts/tauri-with-stub.mjs",
"gen:icon": "node ./scripts/build-icon.mjs"
},
"dependencies": {
"@radix-ui/react-tabs": "^1.1.13",
"@tauri-apps/api": "^2",
"@tauri-apps/plugin-opener": "^2",
"@tauri-apps/plugin-process": "^2",
"@tauri-apps/plugin-store": "^2",
"@tauri-apps/plugin-updater": "^2",
"lucide-react": "^0.544.0",
"react": "^19.0.0",
"react-dom": "^19.0.0"
},
"devDependencies": {
"png-to-ico": "^3.0.1",
"@tauri-apps/cli": "^2",
"@vitejs/plugin-react": "^4.3.4",
"typescript": "~5.6.2",
"vite": "^6.0.3"
}
}

View file

@ -0,0 +1,11 @@
{
"version": "0.1.6",
"notes": "Correções e melhorias do desktop",
"pub_date": "2025-10-14T12:00:00Z",
"platforms": {
"windows-x86_64": {
"signature": "ZFc1MGNuVnpkR1ZrSUdOdmJXMWxiblE2SUhOcFoyNWhkSFZ5WlNCbWNtOXRJSFJoZFhKcElITmxZM0psZENCclpYa0tVbFZVZDNFeFUwRlJRalJVUjJOU1NqUnpTVmhXU1ZoeVUwZElNSGxETW5KSE1FTnBWa3BWU1dzelVYVlRNV1JTV0Vrdk1XMUZVa0Z3YTBWc2QySnZhVnBxUWs5bVoyODNNbEZaYUZsMFVHTlRLMUFyT0hJMVdGZ3lWRkZYT1V3ekwzZG5QUXAwY25WemRHVmtJR052YlcxbGJuUTZJSFJwYldWemRHRnRjRG94TnpZd016azVOVEkzQ1dacGJHVTZVbUYyWlc1Zk1DNHhMalZmZURZMExYTmxkSFZ3TG1WNFpRcHdkME15THpOVlZtUXpiSG9yZGpRd1pFZHFhV1JvVkZCb0wzVnNabWh1ZURJdmFtUlZOalEwTkRSVVdVY3JUVGhLTUdrNU5scFNUSFZVWkRsc1lYVTJUR2dyWTNWeWJuWTVhRGh3ZVVnM1dFWjVhSFZDUVQwOUNnPT0=",
"url": "https://github.com/esdrasrenan/sistema-de-chamados/raw/main/apps/desktop/public/releases/Raven_0.1.6_x64-setup.exe"
}
}
}

View file

@ -0,0 +1 @@
dW50cnVzdGVkIGNvbW1lbnQ6IHNpZ25hdHVyZSBmcm9tIHRhdXJpIHNlY3JldCBrZXkKUlVUd3ExU0FRQjRUR2NSSjRzSVhWSVhyU0dIMHlDMnJHMENpVkpVSWszUXVTMWRSWEkvMW1FUkFwa0Vsd2JvaVpqQk9mZ283MlFZaFl0UGNTK1ArOHI1WFgyVFFXOUwzL3dnPQp0cnVzdGVkIGNvbW1lbnQ6IHRpbWVzdGFtcDoxNzYwMzk5NTI3CWZpbGU6UmF2ZW5fMC4xLjVfeDY0LXNldHVwLmV4ZQpwd0MyLzNVVmQzbHordjQwZEdqaWRoVFBoL3VsZmhueDIvamRVNjQ0NDRUWUcrTThKMGk5NlpSTHVUZDlsYXU2TGgrY3VybnY5aDhweUg3WEZ5aHVCQT09Cg==

View file

@ -0,0 +1,38 @@
#!/usr/bin/env node
import { promises as fs } from 'node:fs'
import path from 'node:path'
import pngToIco from 'png-to-ico'
async function fileExists(p) {
try { await fs.access(p); return true } catch { return false }
}
async function main() {
const root = path.resolve(process.cwd(), 'src-tauri', 'icons')
// Inclua apenas tamanhos suportados pelo NSIS (até 256px).
// Evite 512px para não gerar ICO inválido para o instalador.
const candidates = [
'icon-256.png', // preferencial
'128x128@2x.png', // alias de 256
'icon-128.png',
'icon-64.png',
'icon-32.png',
]
const sources = []
for (const name of candidates) {
const p = path.join(root, name)
if (await fileExists(p)) sources.push(p)
}
if (sources.length === 0) {
console.error('[gen:icon] Nenhuma imagem base encontrada em src-tauri/icons')
process.exit(1)
}
console.log('[gen:icon] Gerando icon.ico a partir de:', sources.map((s) => path.basename(s)).join(', '))
const buffer = await pngToIco(sources)
const outPath = path.join(root, 'icon.ico')
await fs.writeFile(outPath, buffer)
console.log('[gen:icon] Escrito:', outPath)
}
main().catch((err) => { console.error(err); process.exit(1) })

View file

@ -0,0 +1,237 @@
#!/usr/bin/env python3
"""
Generate icon PNGs/ICO for the desktop installer using the high-resolution Raven artwork.
The script reads the square logo (`logo-raven-fund-azul.png`) and resizes it to the
target sizes with a simple bilinear filter implemented with the Python standard library,
avoiding additional dependencies.
"""
from __future__ import annotations
import math
import struct
import zlib
from binascii import crc32
from pathlib import Path
ICON_DIR = Path(__file__).resolve().parents[1] / "src-tauri" / "icons"
BASE_IMAGE = ICON_DIR / "logo-raven-fund-azul.png"
TARGET_SIZES = [32, 64, 128, 256, 512]
def read_png(path: Path) -> tuple[int, int, list[list[tuple[int, int, int, int]]]]:
data = path.read_bytes()
if not data.startswith(b"\x89PNG\r\n\x1a\n"):
raise ValueError(f"{path} is not a PNG")
pos = 8
width = height = bit_depth = color_type = None
compressed_parts = []
while pos < len(data):
length = struct.unpack(">I", data[pos : pos + 4])[0]
pos += 4
ctype = data[pos : pos + 4]
pos += 4
chunk = data[pos : pos + length]
pos += length
pos += 4 # CRC
if ctype == b"IHDR":
width, height, bit_depth, color_type, _, _, _ = struct.unpack(">IIBBBBB", chunk)
if bit_depth != 8 or color_type not in (2, 6):
raise ValueError("Only 8-bit RGB/RGBA PNGs are supported")
elif ctype == b"IDAT":
compressed_parts.append(chunk)
elif ctype == b"IEND":
break
if width is None or height is None or bit_depth is None or color_type is None:
raise ValueError("PNG missing IHDR chunk")
raw = zlib.decompress(b"".join(compressed_parts))
bpp = 4 if color_type == 6 else 3
stride = width * bpp
rows = []
idx = 0
prev = bytearray(stride)
for _ in range(height):
filter_type = raw[idx]
idx += 1
row = bytearray(raw[idx : idx + stride])
idx += stride
if filter_type == 1:
for i in range(stride):
left = row[i - bpp] if i >= bpp else 0
row[i] = (row[i] + left) & 0xFF
elif filter_type == 2:
for i in range(stride):
row[i] = (row[i] + prev[i]) & 0xFF
elif filter_type == 3:
for i in range(stride):
left = row[i - bpp] if i >= bpp else 0
up = prev[i]
row[i] = (row[i] + ((left + up) // 2)) & 0xFF
elif filter_type == 4:
for i in range(stride):
left = row[i - bpp] if i >= bpp else 0
up = prev[i]
up_left = prev[i - bpp] if i >= bpp else 0
p = left + up - up_left
pa = abs(p - left)
pb = abs(p - up)
pc = abs(p - up_left)
if pa <= pb and pa <= pc:
pr = left
elif pb <= pc:
pr = up
else:
pr = up_left
row[i] = (row[i] + pr) & 0xFF
elif filter_type not in (0,):
raise ValueError(f"Unsupported PNG filter type {filter_type}")
rows.append(bytes(row))
prev[:] = row
pixels: list[list[tuple[int, int, int, int]]] = []
for row in rows:
if color_type == 6:
pixels.append([tuple(row[i : i + 4]) for i in range(0, len(row), 4)])
else:
pixels.append([tuple(row[i : i + 3] + b"\xff") for i in range(0, len(row), 3)])
return width, height, pixels
def write_png(path: Path, width: int, height: int, pixels: list[list[tuple[int, int, int, int]]]) -> None:
raw = bytearray()
for row in pixels:
raw.append(0) # filter type 0
for r, g, b, a in row:
raw.extend((r & 0xFF, g & 0xFF, b & 0xFF, a & 0xFF))
compressed = zlib.compress(raw, level=9)
def chunk(name: bytes, payload: bytes) -> bytes:
return (
struct.pack(">I", len(payload))
+ name
+ payload
+ struct.pack(">I", crc32(name + payload) & 0xFFFFFFFF)
)
ihdr = struct.pack(">IIBBBBB", width, height, 8, 6, 0, 0, 0)
out = bytearray(b"\x89PNG\r\n\x1a\n")
out += chunk(b"IHDR", ihdr)
out += chunk(b"IDAT", compressed)
out += chunk(b"IEND", b"")
path.write_bytes(out)
def bilinear_sample(pixels: list[list[tuple[int, int, int, int]]], x: float, y: float) -> tuple[int, int, int, int]:
height = len(pixels)
width = len(pixels[0])
x = min(max(x, 0.0), width - 1.0)
y = min(max(y, 0.0), height - 1.0)
x0 = int(math.floor(x))
y0 = int(math.floor(y))
x1 = min(x0 + 1, width - 1)
y1 = min(y0 + 1, height - 1)
dx = x - x0
dy = y - y0
def lerp(a: float, b: float, t: float) -> float:
return a + (b - a) * t
result = []
for channel in range(4):
c00 = pixels[y0][x0][channel]
c10 = pixels[y0][x1][channel]
c01 = pixels[y1][x0][channel]
c11 = pixels[y1][x1][channel]
top = lerp(c00, c10, dx)
bottom = lerp(c01, c11, dx)
result.append(int(round(lerp(top, bottom, dy))))
return tuple(result)
def resize_image(pixels: list[list[tuple[int, int, int, int]]], target: int) -> list[list[tuple[int, int, int, int]]]:
src_height = len(pixels)
src_width = len(pixels[0])
scale = min(target / src_width, target / src_height)
dest_width = max(1, int(round(src_width * scale)))
dest_height = max(1, int(round(src_height * scale)))
offset_x = (target - dest_width) // 2
offset_y = (target - dest_height) // 2
background = (0, 0, 0, 0)
canvas = [[background for _ in range(target)] for _ in range(target)]
for dy in range(dest_height):
src_y = (dy + 0.5) / scale - 0.5
for dx in range(dest_width):
src_x = (dx + 0.5) / scale - 0.5
canvas[offset_y + dy][offset_x + dx] = bilinear_sample(pixels, src_x, src_y)
return canvas
def build_ico(output: Path, png_paths: list[Path]) -> None:
entries = []
offset = 6 + 16 * len(png_paths)
for path in png_paths:
data = path.read_bytes()
width, height, _ = read_png(path)
entries.append(
{
"width": width if width < 256 else 0,
"height": height if height < 256 else 0,
"size": len(data),
"offset": offset,
"payload": data,
}
)
offset += len(data)
header = struct.pack("<HHH", 0, 1, len(entries))
body = bytearray(header)
for entry in entries:
body.extend(
struct.pack(
"<BBBBHHII",
entry["width"],
entry["height"],
0,
0,
1,
32,
entry["size"],
entry["offset"],
)
)
for entry in entries:
body.extend(entry["payload"])
output.write_bytes(body)
def main() -> None:
width, height, pixels = read_png(BASE_IMAGE)
if width != height:
raise ValueError("Base icon must be square")
generated: list[Path] = []
for size in TARGET_SIZES:
resized = resize_image(pixels, size)
out_path = ICON_DIR / f"icon-{size}.png"
write_png(out_path, size, size, resized)
generated.append(out_path)
print(f"Generated {out_path} ({size}x{size})")
largest = max(generated, key=lambda p: int(p.stem.split("-")[-1]))
(ICON_DIR / "icon.png").write_bytes(largest.read_bytes())
ico_sources = sorted(
[p for p in generated if int(p.stem.split("-")[-1]) <= 256],
key=lambda p: int(p.stem.split("-")[-1]),
)
build_ico(ICON_DIR / "icon.ico", ico_sources)
print("icon.ico rebuilt.")
if __name__ == "__main__":
main()

View file

@ -0,0 +1,239 @@
#!/usr/bin/env python3
"""
Utility script to convert a PNG file (non-interlaced, 8-bit RGBA/RGB)
into a 24-bit BMP with optional letterboxing resize.
The script is intentionally lightweight and relies only on Python's
standard library so it can run in constrained build environments.
"""
from __future__ import annotations
import argparse
import struct
import sys
import zlib
from pathlib import Path
PNG_SIGNATURE = b"\x89PNG\r\n\x1a\n"
def parse_png(path: Path):
data = path.read_bytes()
if not data.startswith(PNG_SIGNATURE):
raise ValueError("Input is not a PNG file")
idx = len(PNG_SIGNATURE)
width = height = bit_depth = color_type = None
compressed = bytearray()
interlaced = False
while idx < len(data):
if idx + 8 > len(data):
raise ValueError("Corrupted PNG (unexpected EOF)")
length = struct.unpack(">I", data[idx : idx + 4])[0]
idx += 4
chunk_type = data[idx : idx + 4]
idx += 4
chunk_data = data[idx : idx + length]
idx += length
crc = data[idx : idx + 4] # noqa: F841 - crc skipped (validated by reader)
idx += 4
if chunk_type == b"IHDR":
width, height, bit_depth, color_type, compression, filter_method, interlace = struct.unpack(
">IIBBBBB", chunk_data
)
if compression != 0 or filter_method != 0:
raise ValueError("Unsupported PNG compression/filter method")
interlaced = interlace != 0
elif chunk_type == b"IDAT":
compressed.extend(chunk_data)
elif chunk_type == b"IEND":
break
if interlaced:
raise ValueError("Interlaced PNGs are not supported by this script")
if bit_depth != 8:
raise ValueError(f"Unsupported bit depth: {bit_depth}")
if color_type not in (2, 6):
raise ValueError(f"Unsupported color type: {color_type}")
raw = zlib.decompress(bytes(compressed))
bytes_per_pixel = 3 if color_type == 2 else 4
stride = width * bytes_per_pixel
expected = (stride + 1) * height
if len(raw) != expected:
raise ValueError("Corrupted PNG data")
# Apply PNG scanline filters
image = bytearray(width * height * 4) # Force RGBA output
prev_row = [0] * (stride)
def paeth(a, b, c):
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
return a
if pb <= pc:
return b
return c
out_idx = 0
for y in range(height):
offset = y * (stride + 1)
filter_type = raw[offset]
row = bytearray(raw[offset + 1 : offset + 1 + stride])
if filter_type == 1: # Sub
for i in range(stride):
left = row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
row[i] = (row[i] + left) & 0xFF
elif filter_type == 2: # Up
for i in range(stride):
row[i] = (row[i] + prev_row[i]) & 0xFF
elif filter_type == 3: # Average
for i in range(stride):
left = row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
up = prev_row[i]
row[i] = (row[i] + ((left + up) >> 1)) & 0xFF
elif filter_type == 4: # Paeth
for i in range(stride):
left = row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
up = prev_row[i]
up_left = prev_row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
row[i] = (row[i] + paeth(left, up, up_left)) & 0xFF
elif filter_type != 0:
raise ValueError(f"Unsupported PNG filter type: {filter_type}")
# Convert to RGBA
for x in range(width):
if color_type == 2:
r, g, b = row[x * 3 : x * 3 + 3]
a = 255
else:
r, g, b, a = row[x * 4 : x * 4 + 4]
image[out_idx : out_idx + 4] = bytes((r, g, b, a))
out_idx += 4
prev_row = list(row)
return width, height, image
def resize_with_letterbox(image, width, height, target_w, target_h, background, scale_factor=1.0):
if width == target_w and height == target_h and abs(scale_factor - 1.0) < 1e-6:
return image, width, height
bg_r, bg_g, bg_b = background
base_scale = min(target_w / width, target_h / height)
base_scale *= scale_factor
base_scale = max(base_scale, 1 / max(width, height)) # avoid zero / collapse
scaled_w = max(1, int(round(width * base_scale)))
scaled_h = max(1, int(round(height * base_scale)))
output = bytearray(target_w * target_h * 4)
# Fill background
for i in range(0, len(output), 4):
output[i : i + 4] = bytes((bg_r, bg_g, bg_b, 255))
offset_x = (target_w - scaled_w) // 2
offset_y = (target_h - scaled_h) // 2
for y in range(scaled_h):
src_y = min(height - 1, int(round(y / base_scale)))
for x in range(scaled_w):
src_x = min(width - 1, int(round(x / base_scale)))
src_idx = (src_y * width + src_x) * 4
dst_idx = ((y + offset_y) * target_w + (x + offset_x)) * 4
output[dst_idx : dst_idx + 4] = image[src_idx : src_idx + 4]
return output, target_w, target_h
def blend_to_rgb(image):
rgb = bytearray(len(image) // 4 * 3)
for i in range(0, len(image), 4):
r, g, b, a = image[i : i + 4]
if a == 255:
rgb[(i // 4) * 3 : (i // 4) * 3 + 3] = bytes((b, g, r)) # BMP stores BGR
else:
alpha = a / 255.0
bg = (255, 255, 255)
rr = int(round(r * alpha + bg[0] * (1 - alpha)))
gg = int(round(g * alpha + bg[1] * (1 - alpha)))
bb = int(round(b * alpha + bg[2] * (1 - alpha)))
rgb[(i // 4) * 3 : (i // 4) * 3 + 3] = bytes((bb, gg, rr))
return rgb
def write_bmp(path: Path, width: int, height: int, rgb: bytearray):
row_stride = (width * 3 + 3) & ~3 # align to 4 bytes
padding = row_stride - width * 3
pixel_data = bytearray()
for y in range(height - 1, -1, -1):
start = y * width * 3
end = start + width * 3
pixel_data.extend(rgb[start:end])
if padding:
pixel_data.extend(b"\0" * padding)
file_size = 14 + 40 + len(pixel_data)
header = struct.pack("<2sIHHI", b"BM", file_size, 0, 0, 14 + 40)
dib_header = struct.pack(
"<IIIHHIIIIII",
40, # header size
width,
height,
1, # planes
24, # bits per pixel
0, # compression
len(pixel_data),
2835, # horizontal resolution (px/m ~72dpi)
2835, # vertical resolution
0,
0,
)
path.write_bytes(header + dib_header + pixel_data)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", type=Path)
parser.add_argument("output", type=Path)
parser.add_argument("--width", type=int, help="Target width (px)")
parser.add_argument("--height", type=int, help="Target height (px)")
parser.add_argument(
"--scale",
type=float,
default=1.0,
help="Optional multiplier applied to the fitted image size (e.g. 0.7 adds padding).",
)
parser.add_argument(
"--background",
type=str,
default="FFFFFF",
help="Background hex color used for transparent pixels (default: FFFFFF)",
)
args = parser.parse_args()
try:
width, height, image = parse_png(args.input)
if args.width and args.height:
bg = tuple(int(args.background[i : i + 2], 16) for i in (0, 2, 4))
image, width, height = resize_with_letterbox(
image, width, height, args.width, args.height, bg, max(args.scale, 0.05)
)
rgb = blend_to_rgb(image)
write_bmp(args.output, width, height, rgb)
except Exception as exc: # noqa: BLE001
print(f"Error: {exc}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,80 @@
#!/usr/bin/env python3
"""
Utility to build an .ico file from a list of PNGs of different sizes.
Uses only Python's standard library so it can run in restricted environments.
"""
from __future__ import annotations
import argparse
import struct
from pathlib import Path
PNG_SIGNATURE = b"\x89PNG\r\n\x1a\n"
def read_png_dimensions(data: bytes) -> tuple[int, int]:
if not data.startswith(PNG_SIGNATURE):
raise ValueError("All inputs must be PNG files.")
width, height = struct.unpack(">II", data[16:24])
return width, height
def build_icon(png_paths: list[Path], output: Path) -> None:
png_data = [p.read_bytes() for p in png_paths]
entries = []
offset = 6 + 16 * len(png_data) # icon header + entries
for data in png_data:
width, height = read_png_dimensions(data)
entry = {
"width": width if width < 256 else 0,
"height": height if height < 256 else 0,
"colors": 0,
"reserved": 0,
"planes": 1,
"bit_count": 32,
"size": len(data),
"offset": offset,
"data": data,
}
entries.append(entry)
offset += entry["size"]
header = struct.pack("<HHH", 0, 1, len(entries))
table = bytearray()
for entry in entries:
table.extend(
struct.pack(
"<BBBBHHII",
entry["width"],
entry["height"],
entry["colors"],
entry["reserved"],
entry["planes"],
entry["bit_count"],
entry["size"],
entry["offset"],
)
)
payload = header + table + b"".join(entry["data"] for entry in entries)
output.write_bytes(payload)
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("output", type=Path)
parser.add_argument("inputs", nargs="+", type=Path)
args = parser.parse_args()
if not args.inputs:
raise SystemExit("Provide at least one PNG input.")
build_icon(args.inputs, args.output)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,38 @@
import { spawn } from "node:child_process"
import { fileURLToPath } from "node:url"
import { dirname, resolve } from "node:path"
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
const pathKey = process.platform === "win32" ? "Path" : "PATH"
const currentPath = process.env[pathKey] ?? process.env[pathKey.toUpperCase()] ?? ""
const separator = process.platform === "win32" ? ";" : ":"
const stubDir = resolve(__dirname)
process.env[pathKey] = [stubDir, currentPath].filter(Boolean).join(separator)
if (pathKey !== "PATH") {
process.env.PATH = process.env[pathKey]
}
if (!process.env.TAURI_BUNDLE_TARGETS) {
if (process.platform === "linux") {
process.env.TAURI_BUNDLE_TARGETS = "deb rpm"
} else if (process.platform === "win32") {
process.env.TAURI_BUNDLE_TARGETS = "nsis"
}
}
const executable = process.platform === "win32" ? "tauri.cmd" : "tauri"
const child = spawn(executable, process.argv.slice(2), {
stdio: "inherit",
shell: process.platform === "win32",
})
child.on("exit", (code, signal) => {
if (signal) {
process.kill(process.pid, signal)
} else {
process.exit(code ?? 0)
}
})

View file

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# Minimal stub to satisfy tools that expect xdg-open during bundling.
# Fails silently when the real binary is unavailable.
if command -v xdg-open >/dev/null 2>&1; then
exec xdg-open "$@"
else
exit 0
fi

View file

@ -0,0 +1,7 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# Generated by Tauri
# will have schema files for capabilities auto-completion
/gen/schemas

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,37 @@
[package]
name = "appsdesktop"
version = "0.1.0"
description = "A Tauri App"
authors = ["you"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
# The `_lib` suffix may seem redundant but it is necessary
# to make the lib name unique and wouldn't conflict with the bin name.
# This seems to be only an issue on Windows, see https://github.com/rust-lang/cargo/issues/8519
name = "appsdesktop_lib"
crate-type = ["staticlib", "cdylib", "rlib"]
[build-dependencies]
tauri-build = { version = "2.4.1", features = [] }
[dependencies]
tauri = { version = "2.8.5", features = ["wry", "devtools"] }
tauri-plugin-opener = "2.5.0"
tauri-plugin-store = "2.4.0"
tauri-plugin-updater = "2.9.0"
tauri-plugin-process = "2.3.0"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
sysinfo = { version = "0.31", default-features = false, features = ["multithread", "network", "system", "disk"] }
get_if_addrs = "0.5"
reqwest = { version = "0.12", features = ["json", "rustls-tls"], default-features = false }
tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] }
once_cell = "1.19"
thiserror = "1.0"
chrono = { version = "0.4", features = ["serde"] }
parking_lot = "0.12"
hostname = "0.4"
base64 = "0.22"

View file

@ -0,0 +1,3 @@
fn main() {
tauri_build::build()
}

View file

@ -0,0 +1,18 @@
{
"$schema": "../gen/schemas/desktop-schema.json",
"identifier": "default",
"description": "Capability for the main window",
"windows": ["main"],
"permissions": [
"core:default",
"opener:default",
"store:default",
"store:allow-load",
"store:allow-set",
"store:allow-get",
"store:allow-save",
"store:allow-delete",
"updater:default",
"process:default"
]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 567 KiB

Some files were not shown because too many files have changed in this diff Show more