Compare commits

..

No commits in common. "main" and "desktop-v.0.1.7" have entirely different histories.

644 changed files with 26249 additions and 128157 deletions

View file

@ -1,91 +0,0 @@
{
"permissions": {
"allow": [
"Bash(ssh:*)",
"Bash(bun run lint)",
"Bash(bun run prisma:generate:*)",
"Bash(bun run build:bun:*)",
"WebSearch",
"Bash(bun add:*)",
"Bash(bun run tauri:*)",
"Bash(curl:*)",
"Bash(dir \"D:\\Projetos IA\\sistema-de-chamados\")",
"Bash(findstr:*)",
"Bash(cat:*)",
"Bash(chmod:*)",
"Bash(find:*)",
"Bash(grep:*)",
"WebFetch(domain:medium.com)",
"WebFetch(domain:henrywithu.com)",
"WebFetch(domain:hub.docker.com)",
"Bash(python3:*)",
"WebFetch(domain:www.npmjs.com)",
"WebFetch(domain:docs.strapi.io)",
"Bash(tablename)",
"Bash(\"\"\" OWNER TO renan; FROM pg_tables WHERE schemaname = public;\"\" | docker exec -i c95ebc27eb82 psql -U sistema -d strapi_blog\")",
"Bash(sequence_name)",
"Bash(\"\"\" OWNER TO renan; FROM information_schema.sequences WHERE sequence_schema = public;\"\" | docker exec -i c95ebc27eb82 psql -U sistema -d strapi_blog\")",
"Bash(git add:*)",
"Bash(git commit:*)",
"Bash(git push:*)",
"Bash(cargo check:*)",
"Bash(bun run:*)",
"Bash(icacls \"D:\\Projetos IA\\sistema-de-chamados\\codex_ed25519\")",
"Bash(copy \"D:\\Projetos IA\\sistema-de-chamados\\codex_ed25519\" \"%TEMP%\\codex_key\")",
"Bash(icacls \"%TEMP%\\codex_key\" /inheritance:r /grant:r \"%USERNAME%:R\")",
"Bash(cmd /c \"echo %TEMP%\")",
"Bash(cmd /c \"dir \"\"%TEMP%\\codex_key\"\"\")",
"Bash(where:*)",
"Bash(ssh-keygen:*)",
"Bash(/c/Program\\ Files/Git/usr/bin/ssh:*)",
"Bash(npx convex deploy:*)",
"Bash(dir \"%LOCALAPPDATA%\\Raven\")",
"Bash(dir \"%APPDATA%\\Raven\")",
"Bash(dir \"%LOCALAPPDATA%\\com.raven.app\")",
"Bash(dir \"%APPDATA%\\com.raven.app\")",
"Bash(tasklist:*)",
"Bash(dir /s /b %LOCALAPPDATA%*raven*)",
"Bash(cmd /c \"tasklist | findstr /i raven\")",
"Bash(cmd /c \"dir /s /b %LOCALAPPDATA%\\*raven* 2>nul\")",
"Bash(powershell -Command \"Get-Process | Where-Object {$_ProcessName -like ''*raven*'' -or $_ProcessName -like ''*appsdesktop*''} | Select-Object ProcessName, Id\")",
"Bash(node:*)",
"Bash(bun scripts/test-all-emails.tsx:*)",
"Bash(bun scripts/send-test-react-email.tsx:*)",
"Bash(dir:*)",
"Bash(git reset:*)",
"Bash(npx convex:*)",
"Bash(bun tsc:*)",
"Bash(scp:*)",
"Bash(docker run:*)",
"Bash(cmd /c \"docker run -d --name postgres-dev -p 5432:5432 -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=sistema_chamados postgres:18\")",
"Bash(cmd /c \"docker ps -a --filter name=postgres-dev\")",
"Bash(cmd /c \"docker --version && docker ps -a\")",
"Bash(powershell -Command \"docker --version\")",
"Bash(powershell -Command \"docker run -d --name postgres-dev -p 5432:5432 -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=sistema_chamados postgres:18\")",
"Bash(dir \"D:\\Projetos IA\\sistema-de-chamados\" /b)",
"Bash(bunx prisma migrate:*)",
"Bash(bunx prisma db push:*)",
"Bash(bun run auth:seed:*)",
"Bash(set DATABASE_URL=postgresql://postgres:dev@localhost:5432/sistema_chamados:*)",
"Bash(bun tsx:*)",
"Bash(DATABASE_URL=\"postgresql://postgres:dev@localhost:5432/sistema_chamados\" bun tsx:*)",
"Bash(docker stop:*)",
"Bash(docker rm:*)",
"Bash(git commit -m \"$(cat <<''EOF''\nfeat(checklist): exibe descricao do template e do item no ticket\n\n- Adiciona campo templateDescription ao schema do checklist\n- Copia descricao do template ao aplicar checklist no ticket\n- Exibe ambas descricoes na visualizacao do ticket (template em italico)\n- Adiciona documentacao de desenvolvimento local (docs/LOCAL-DEV.md)\n- Corrige prisma-client.mjs para usar PostgreSQL em vez de SQLite\n\n🤖 Generated with [Claude Code](https://claude.com/claude-code)\n\nCo-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>\nEOF\n)\")",
"Bash(timeout 90 git push:*)",
"Bash(docker ps:*)",
"Bash(docker start:*)",
"Bash(docker inspect:*)",
"Bash(docker exec:*)",
"Bash(timeout 90 git push)",
"Bash(bun test:*)",
"Bash(git restore:*)",
"Bash(cd:*)",
"Bash(dir \"D:\\Projetos IA\\sistema-de-chamados\\src\\components\\ui\" /b)",
"Bash(timeout 120 bun:*)",
"Bash(bun run tauri:build:*)",
"Bash(git remote:*)",
"Bash(powershell.exe -NoProfile -ExecutionPolicy Bypass -File \"D:/Projetos IA/sistema-de-chamados/scripts/test-windows-collection.ps1\")"
]
}
}

View file

@ -1,38 +1,62 @@
NODE_ENV=development # Ambiente local — Sistema de Chamados
# Copie este arquivo para `.env` e preencha os valores sensíveis.
# Nunca faça commit de `.env` com segredos reais.
# Public app URL # Convex
CONVEX_DEPLOYMENT=anonymous:anonymous-sistema-de-chamados
NEXT_PUBLIC_CONVEX_URL=http://127.0.0.1:3210
CONVEX_SYNC_SECRET=dev-sync-secret
# Next.js / App URL
NEXT_PUBLIC_APP_URL=http://localhost:3000 NEXT_PUBLIC_APP_URL=http://localhost:3000
# Better Auth # Better Auth
# Gere um segredo forte (ex.: `openssl rand -hex 32`)
BETTER_AUTH_SECRET=change-me
BETTER_AUTH_URL=http://localhost:3000 BETTER_AUTH_URL=http://localhost:3000
BETTER_AUTH_SECRET=your-secret-key-at-least-32-chars-long
# Convex (dev server URL) # Banco de dados (Prisma)
NEXT_PUBLIC_CONVEX_URL=http://127.0.0.1:3210 DATABASE_URL=file:./prisma/db.sqlite
CONVEX_INTERNAL_URL=http://127.0.0.1:3210
# Intervalo (ms) para aceitar token revogado ao sincronizar acessos remotos (opcional)
REMOTE_ACCESS_TOKEN_GRACE_MS=900000
# Token interno opcional para o dashboard de saude (/admin/health) e queries internas
INTERNAL_HEALTH_TOKEN=dev-health-token
# Segredo para crons HTTP (reutilize em prod se preferir um unico token)
REPORTS_CRON_SECRET=reports-cron-secret
# Diretório para arquivamento local de tickets (JSONL/backup)
ARCHIVE_DIR=./archives
# PostgreSQL database (versao 18) # Seeds automáticos (Better Auth)
# Para desenvolvimento local, use Docker: # Por padrão (true), garantindo apenas existência dos usuários padrão sem resetar senhas
# docker run -d --name postgres-chamados -p 5432:5432 -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=sistema_chamados postgres:18 SEED_ENSURE_ONLY=true
DATABASE_URL=postgresql://postgres:dev@localhost:5432/sistema_chamados
# SMTP Configuration (production values in docs/SMTP.md) # Provisionamento e inventário de máquinas
SMTP_HOST=smtp.c.inova.com.br # Segredo obrigatório para registrar/atualizar máquinas (Convex)
SMTP_PORT=587 MACHINE_PROVISIONING_SECRET=change-me-provisioning
SMTP_SECURE=false # Tempo de vida do token de máquina (ms) — padrão 30 dias
SMTP_USER=envio@rever.com.br MACHINE_TOKEN_TTL_MS=2592000000
SMTP_PASS=CAAJQm6ZT6AUdhXRTDYu # Opcional: segredo dedicado para webhook do FleetDM (senão usa o de provisionamento)
SMTP_FROM_NAME=Sistema de Chamados FLEET_SYNC_SECRET=
SMTP_FROM_EMAIL=envio@rever.com.br
# Dev-only bypass to simplify local testing (do NOT enable in prod) # SMTP (envio de e-mails)
# DEV_BYPASS_AUTH=0 SMTP_ADDRESS=
# NEXT_PUBLIC_DEV_BYPASS_AUTH=0 SMTP_PORT=465
SMTP_DOMAIN=
SMTP_USERNAME=
SMTP_PASSWORD=
SMTP_AUTHENTICATION=login
SMTP_ENABLE_STARTTLS_AUTO=false
SMTP_TLS=true
MAILER_SENDER_EMAIL="Suporte <no-reply@seu-dominio.com>"
# Alertas (actions do Convex)
# Hora local (America/Sao_Paulo) para rodar alertas automáticos
ALERTS_LOCAL_HOUR=8
# Seeds e sincronizações auxiliares
SYNC_TENANT_ID=tenant-atlas
SYNC_DEFAULT_ASSIGNEE=agent@example.com
SEED_TENANT_ID=tenant-atlas
SEED_ADMIN_PASSWORD=admin123
SEED_AGENT_PASSWORD=agent123
SEED_USER_TENANT=tenant-atlas
SEED_USER_EMAIL=
SEED_USER_PASSWORD=
SEED_USER_NAME=
SEED_USER_ROLE=admin
# Desenvolvimento Desktop (Tauri/Vite)
# Em redes locais, defina o IP do host para HMR.
TAURI_DEV_HOST=

View file

@ -1,492 +0,0 @@
name: CI/CD Web + Desktop
on:
push:
branches: [ main ]
tags:
- 'v*.*.*'
workflow_dispatch:
inputs:
force_web_deploy:
description: 'Forcar deploy do Web (ignorar filtro)?'
type: boolean
required: false
default: false
force_convex_deploy:
description: 'Forcar deploy do Convex (ignorar filtro)?'
type: boolean
required: false
default: false
env:
APP_DIR: /srv/apps/sistema
VPS_UPDATES_DIR: /var/www/updates
jobs:
changes:
name: Detect changes
runs-on: [ self-hosted, linux, vps ]
timeout-minutes: 5
outputs:
convex: ${{ steps.filter.outputs.convex }}
web: ${{ steps.filter.outputs.web }}
steps:
- name: Checkout
uses: https://github.com/actions/checkout@v4
- name: Paths filter
id: filter
uses: https://github.com/dorny/paths-filter@v3
with:
filters: |
convex:
- 'convex/**'
web:
- 'src/**'
- 'public/**'
- 'prisma/**'
- 'next.config.ts'
- 'package.json'
- 'bun.lock'
- 'tsconfig.json'
- 'middleware.ts'
- 'stack.yml'
deploy:
name: Deploy (VPS Linux)
needs: changes
timeout-minutes: 30
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
runs-on: [ self-hosted, linux, vps ]
steps:
- name: Checkout
uses: https://github.com/actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
FALLBACK_DIR="$HOME/apps/web.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Setup Bun
uses: https://github.com/oven-sh/setup-bun@v2
with:
bun-version: 1.3.4
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
EXCLUDE_ENV="--exclude '.env*' --exclude 'apps/desktop/.env*' --exclude 'convex/.env*'"
if [ "$EFFECTIVE_APP_DIR" != "${APP_DIR:-/srv/apps/sistema}" ]; then
EXCLUDE_ENV=""
fi
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--filter='protect .env' \
--filter='protect .env*' \
--filter='protect apps/desktop/.env*' \
--filter='protect convex/.env*' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
$EXCLUDE_ENV \
./ "$EFFECTIVE_APP_DIR"/
- name: Acquire Convex admin key
id: key
run: |
echo "Waiting for Convex container..."
CID=""
for attempt in $(seq 1 12); do
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -n "$CID" ]; then
echo "Convex container ready (CID=$CID)"
break
fi
echo "Attempt $attempt/12: container not ready yet; waiting 5s..."
sleep 5
done
CONVEX_IMAGE="ghcr.io/get-convex/convex-backend:latest"
if [ -n "$CID" ]; then
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "No running convex container detected; attempting offline admin key extraction..."
VOLUME="sistema_convex_data"
if docker volume inspect "$VOLUME" >/dev/null 2>&1; then
KEY=$(docker run --rm --entrypoint /bin/sh -v "$VOLUME":/convex/data "$CONVEX_IMAGE" -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "Volume $VOLUME nao encontrado; nao foi possivel extrair a chave admin"
fi
fi
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
if [ -z "$KEY" ]; then
echo "ERRO: Nao foi possivel obter a chave admin do Convex"
docker service ps sistema_convex_backend || true
exit 1
fi
- name: Copy production .env if present
run: |
DEFAULT_DIR="${APP_DIR:-/srv/apps/sistema}"
if [ "$EFFECTIVE_APP_DIR" != "$DEFAULT_DIR" ] && [ -f "$DEFAULT_DIR/.env" ]; then
echo "Copying production .env from $DEFAULT_DIR to $EFFECTIVE_APP_DIR"
cp -f "$DEFAULT_DIR/.env" "$EFFECTIVE_APP_DIR/.env"
fi
- name: Ensure Next.js cache directory exists and is writable
run: |
cd "$EFFECTIVE_APP_DIR"
mkdir -p .next/cache
chmod -R u+rwX .next || true
- name: Cache Next.js build cache (.next/cache)
uses: https://github.com/actions/cache@v4
with:
path: ${{ env.EFFECTIVE_APP_DIR }}/.next/cache
key: ${{ runner.os }}-nextjs-${{ hashFiles('bun.lock') }}-${{ hashFiles('next.config.ts') }}
restore-keys: |
${{ runner.os }}-nextjs-${{ hashFiles('bun.lock') }}-
${{ runner.os }}-nextjs-
- name: Lint check (fail fast before build)
run: |
cd "$EFFECTIVE_APP_DIR"
docker run --rm \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
sistema_web:node22-bun \
bash -lc "set -euo pipefail; bun install --frozen-lockfile --filter '!appsdesktop'; bun run lint"
- name: Install and build (Next.js)
env:
PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING: "1"
run: |
cd "$EFFECTIVE_APP_DIR"
docker run --rm \
-e PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING="$PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING" \
-e NODE_OPTIONS="--max-old-space-size=4096" \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
sistema_web:node22-bun \
bash -lc "set -euo pipefail; bun install --frozen-lockfile --filter '!appsdesktop'; bun run prisma:generate; bun run build:bun"
- name: Fix Docker-created file permissions
run: |
# Docker cria arquivos como root - corrigir para o usuario runner (UID 1000)
docker run --rm -v "$EFFECTIVE_APP_DIR":/target alpine:3 \
chown -R 1000:1000 /target
echo "Permissoes do build corrigidas"
- name: Atualizar symlink do APP_DIR estavel (deploy atomico)
run: |
set -euo pipefail
ROOT="$HOME/apps"
STABLE_LINK="$ROOT/sistema.current"
mkdir -p "$ROOT"
# Sanidade: se esses arquivos nao existirem, o container vai falhar no boot.
test -f "$EFFECTIVE_APP_DIR/scripts/start-web.sh" || { echo "ERROR: scripts/start-web.sh nao encontrado em $EFFECTIVE_APP_DIR" >&2; exit 1; }
test -f "$EFFECTIVE_APP_DIR/stack.yml" || { echo "ERROR: stack.yml nao encontrado em $EFFECTIVE_APP_DIR" >&2; exit 1; }
test -d "$EFFECTIVE_APP_DIR/node_modules" || { echo "ERROR: node_modules nao encontrado em $EFFECTIVE_APP_DIR (necessario para next start)" >&2; exit 1; }
test -d "$EFFECTIVE_APP_DIR/.next" || { echo "ERROR: .next nao encontrado em $EFFECTIVE_APP_DIR (build nao gerado)" >&2; exit 1; }
PREV=""
if [ -L "$STABLE_LINK" ]; then
PREV="$(readlink -f "$STABLE_LINK" || true)"
fi
echo "PREV_APP_DIR=$PREV" >> "$GITHUB_ENV"
ln -sfn "$EFFECTIVE_APP_DIR" "$STABLE_LINK"
# Compat: mantem $HOME/apps/sistema como symlink quando possivel (nao mexe se for pasta).
if [ -L "$ROOT/sistema" ] || [ ! -e "$ROOT/sistema" ]; then
ln -sfn "$STABLE_LINK" "$ROOT/sistema"
fi
echo "APP_DIR estavel -> $(readlink -f "$STABLE_LINK")"
- name: Swarm deploy (stack.yml)
run: |
APP_DIR_STABLE="$HOME/apps/sistema.current"
if [ ! -d "$APP_DIR_STABLE" ]; then
echo "ERROR: Stable APP_DIR does not exist: $APP_DIR_STABLE" >&2; exit 1
fi
cd "$APP_DIR_STABLE"
set -o allexport
if [ -f .env ]; then
echo "Loading .env from $APP_DIR_STABLE"
. ./.env
else
echo "WARNING: No .env found at $APP_DIR_STABLE - stack vars may be empty!"
fi
set +o allexport
echo "Using APP_DIR (stable)=$APP_DIR_STABLE"
echo "NEXT_PUBLIC_CONVEX_URL=${NEXT_PUBLIC_CONVEX_URL:-<not set>}"
echo "NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-<not set>}"
APP_DIR="$APP_DIR_STABLE" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema
- name: Wait for services to be healthy
run: |
echo "Aguardando servicos ficarem saudaveis..."
for i in $(seq 1 18); do
WEB_STATUS=$(docker service ls --filter "name=sistema_web" --format "{{.Replicas}}" 2>/dev/null || echo "0/0")
CONVEX_STATUS=$(docker service ls --filter "name=sistema_convex_backend" --format "{{.Replicas}}" 2>/dev/null || echo "0/0")
echo "Tentativa $i/18: web=$WEB_STATUS convex=$CONVEX_STATUS"
if echo "$WEB_STATUS" | grep -q "2/2" && echo "$CONVEX_STATUS" | grep -q "1/1"; then
echo "Todos os servicos estao saudaveis!"
exit 0
fi
sleep 10
done
echo "ERRO: Timeout aguardando servicos. Status atual:"
docker service ls --filter "label=com.docker.stack.namespace=sistema" || true
docker service ps sistema_web --no-trunc || true
docker service logs sistema_web --since 5m --raw 2>/dev/null | tail -n 200 || true
if [ -n "${PREV_APP_DIR:-}" ]; then
echo "Rollback: revertendo APP_DIR estavel para: $PREV_APP_DIR"
ln -sfn "$PREV_APP_DIR" "$HOME/apps/sistema.current"
cd "$HOME/apps/sistema.current"
set -o allexport
if [ -f .env ]; then
. ./.env
fi
set +o allexport
APP_DIR="$HOME/apps/sistema.current" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema || true
fi
exit 1
- name: Cleanup old build workdirs (keep last 2)
run: |
set -e
ROOT="$HOME/apps"
KEEP=2
PATTERN='web.build.*'
ACTIVE="$(readlink -f "$HOME/apps/sistema.current" 2>/dev/null || true)"
echo "Scanning $ROOT for old $PATTERN dirs"
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
[ -z "$dir" ] && continue
if [ -n "$ACTIVE" ] && [ "$(readlink -f "$dir")" = "$ACTIVE" ]; then
echo "Skipping active dir (in use by APP_DIR): $dir"; continue
fi
echo "Removing $dir"
chmod -R u+rwX "$dir" 2>/dev/null || true
rm -rf "$dir" || {
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
}
done
echo "Disk usage (top 10 under $ROOT):"
du -sh "$ROOT"/* 2>/dev/null | sort -rh | head -n 10 || true
convex_deploy:
name: Deploy Convex functions
needs: changes
timeout-minutes: 20
if: ${{ github.event_name == 'workflow_dispatch' || needs.changes.outputs.convex == 'true' }}
runs-on: [ self-hosted, linux, vps ]
env:
APP_DIR: /srv/apps/sistema
steps:
- name: Checkout
uses: https://github.com/actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
FALLBACK_DIR="$HOME/apps/convex.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--exclude '.env*' \
--exclude 'apps/desktop/.env*' \
--exclude 'convex/.env*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
./ "$EFFECTIVE_APP_DIR"/
- name: Acquire Convex admin key
id: key
run: |
echo "Waiting for Convex container..."
CID=""
for attempt in $(seq 1 12); do
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -n "$CID" ]; then
echo "Convex container ready (CID=$CID)"
break
fi
echo "Attempt $attempt/12: container not ready yet; waiting 5s..."
sleep 5
done
CONVEX_IMAGE="ghcr.io/get-convex/convex-backend:latest"
if [ -n "$CID" ]; then
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "No running convex container detected; attempting offline admin key extraction..."
VOLUME="sistema_convex_data"
if docker volume inspect "$VOLUME" >/dev/null 2>&1; then
KEY=$(docker run --rm --entrypoint /bin/sh -v "$VOLUME":/convex/data "$CONVEX_IMAGE" -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "Volume $VOLUME nao encontrado; nao foi possivel extrair a chave admin"
fi
fi
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
if [ -z "$KEY" ]; then
echo "ERRO: Nao foi possivel obter a chave admin do Convex"
docker service ps sistema_convex_backend || true
exit 1
fi
- name: Bring convex.json from live app if present
run: |
if [ -f "$APP_DIR/convex.json" ]; then
echo "Copying $APP_DIR/convex.json -> $EFFECTIVE_APP_DIR/convex.json"
cp -f "$APP_DIR/convex.json" "$EFFECTIVE_APP_DIR/convex.json"
else
echo "No existing convex.json found at $APP_DIR; convex CLI will need self-hosted vars"
fi
- name: Set Convex env vars (self-hosted)
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
MACHINE_PROVISIONING_SECRET: ${{ secrets.MACHINE_PROVISIONING_SECRET }}
MACHINE_TOKEN_TTL_MS: ${{ secrets.MACHINE_TOKEN_TTL_MS }}
FLEET_SYNC_SECRET: ${{ secrets.FLEET_SYNC_SECRET }}
run: |
set -e
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
-e MACHINE_PROVISIONING_SECRET \
-e MACHINE_TOKEN_TTL_MS \
-e FLEET_SYNC_SECRET \
-e CONVEX_TMPDIR=/app/.convex-tmp \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; export CONVEX_TMPDIR=/app/.convex-tmp; bun install --frozen-lockfile; \
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then bunx convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\"; fi; \
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then bunx convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\"; fi; \
if [ -n \"$FLEET_SYNC_SECRET\" ]; then bunx convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\"; fi; \
bunx convex env list"
- name: Prepare Convex deploy workspace
run: |
cd "$EFFECTIVE_APP_DIR"
if [ -f .env ]; then
echo "Renaming .env -> .env.bak (Convex self-hosted deploy)"
mv -f .env .env.bak
fi
mkdir -p .convex-tmp
- name: Deploy functions to Convex self-hosted
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
run: |
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CI=true \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
-e CONVEX_TMPDIR=/app/.convex-tmp \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; export CONVEX_TMPDIR=/app/.convex-tmp; bun install --frozen-lockfile; bunx convex deploy"
- name: Cleanup old convex build workdirs (keep last 2)
run: |
set -e
ROOT="$HOME/apps"
KEEP=2
PATTERN='convex.build.*'
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
[ -z "$dir" ] && continue
echo "Removing $dir"
chmod -R u+rwX "$dir" 2>/dev/null || true
rm -rf "$dir" || {
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
}
done
# NOTA: Job comentado porque nao ha runner Windows configurado.
# Descomentar quando configurar um runner com labels: [self-hosted, windows, desktop]
#
# desktop_release:
# name: Desktop Release (Windows)
# timeout-minutes: 30
# if: ${{ startsWith(github.ref, 'refs/tags/v') }}
# runs-on: [ self-hosted, windows, desktop ]
# defaults:
# run:
# working-directory: apps/desktop
# steps:
# - name: Checkout
# uses: https://github.com/actions/checkout@v4
#
# - name: Setup pnpm
# uses: https://github.com/pnpm/action-setup@v4
# with:
# version: 10.20.0
#
# - name: Setup Node.js
# uses: https://github.com/actions/setup-node@v4
# with:
# node-version: 20
#
# - name: Install deps (desktop)
# run: pnpm install --frozen-lockfile
#
# - name: Build with Tauri
# uses: https://github.com/tauri-apps/tauri-action@v0
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
# TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
# with:
# projectPath: apps/desktop
#
# - name: Upload bundles to VPS
# run: |
# # Upload via SCP (configurar chave SSH no runner Windows)
# # scp -r src-tauri/target/release/bundle/* user@vps:/var/www/updates/
# echo "TODO: Configurar upload para VPS"

View file

@ -1,54 +0,0 @@
name: Quality Checks
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
lint-test-build:
name: Lint, Test and Build
runs-on: [ self-hosted, linux, vps ]
env:
BETTER_AUTH_SECRET: test-secret
NEXT_PUBLIC_APP_URL: http://localhost:3000
BETTER_AUTH_URL: http://localhost:3000
NEXT_PUBLIC_CONVEX_URL: http://localhost:3210
DATABASE_URL: file:./prisma/db.dev.sqlite
steps:
- name: Checkout
uses: https://github.com/actions/checkout@v4
- name: Setup Bun
uses: https://github.com/oven-sh/setup-bun@v2
with:
bun-version: 1.3.4
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Cache Next.js build cache
uses: https://github.com/actions/cache@v4
with:
path: |
${{ github.workspace }}/.next/cache
key: ${{ runner.os }}-nextjs-${{ hashFiles('bun.lock') }}-${{ hashFiles('**/*.{js,jsx,ts,tsx}') }}
restore-keys: |
${{ runner.os }}-nextjs-${{ hashFiles('bun.lock') }}-
- name: Generate Prisma client
env:
PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING: "1"
run: bun run prisma:generate
- name: Lint
run: bun run lint
- name: Test
run: bun test
- name: Build
run: bun run build:bun

View file

@ -1,639 +0,0 @@
name: CI/CD Web + Desktop
on:
push:
branches: [ main ]
tags:
- 'v*.*.*'
workflow_dispatch:
inputs:
force_web_deploy:
description: 'Forçar deploy do Web (ignorar filtro)?'
required: false
default: 'false'
force_convex_deploy:
description: 'Forçar deploy do Convex (ignorar filtro)?'
required: false
default: 'false'
env:
APP_DIR: /srv/apps/sistema
VPS_UPDATES_DIR: /var/www/updates
RUN_MACHINE_SMOKE: ${{ vars.RUN_MACHINE_SMOKE || secrets.RUN_MACHINE_SMOKE || 'false' }}
jobs:
changes:
name: Detect changes
runs-on: ubuntu-latest
timeout-minutes: 5
outputs:
convex: ${{ steps.filter.outputs.convex }}
web: ${{ steps.filter.outputs.web }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Paths filter
id: filter
uses: dorny/paths-filter@v3
with:
filters: |
convex:
- 'convex/**'
web:
- 'src/**'
- 'public/**'
- 'prisma/**'
- 'next.config.ts'
- 'package.json'
- 'pnpm-lock.yaml'
- 'tsconfig.json'
- 'middleware.ts'
- 'stack.yml'
deploy:
name: Deploy (VPS Linux)
needs: changes
timeout-minutes: 30
# Executa em qualquer push na main (independente do filtro) ou quando disparado manualmente
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
runs-on: [ self-hosted, linux, vps ]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
# Use a web-specific build dir to avoid clashes with convex job
FALLBACK_DIR="$HOME/apps/web.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.20.0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: 1.3.1
- name: Verify Bun runtime
run: bun --version
- name: Permissions diagnostic (server paths)
run: |
set +e
echo "== Basic context =="
whoami || true
id || true
groups || true
umask || true
echo "HOME=$HOME"
echo "APP_DIR(default)=${APP_DIR:-/srv/apps/sistema}"
echo "EFFECTIVE_APP_DIR=$EFFECTIVE_APP_DIR"
echo "\n== Permissions check =="
check_path() {
P="$1"
echo "-- $P"
if [ -e "$P" ]; then
stat -c '%A %U:%G %n' "$P" 2>/dev/null || ls -ld "$P" || true
echo -n "WRITABLE? "; [ -w "$P" ] && echo yes || echo no
if command -v namei >/dev/null 2>&1; then
namei -l "$P" || true
fi
TMP="$P/.permtest.$$"
(echo test > "$TMP" 2>/dev/null && echo "CREATE_FILE: ok" && rm -f "$TMP") || echo "CREATE_FILE: failed"
else
echo "(missing)"
fi
}
check_path "/srv/apps/sistema"
check_path "/srv/apps/sistema/src/app/machines/handshake"
check_path "/srv/apps/sistema/apps/desktop/node_modules"
check_path "/srv/apps/sistema/node_modules"
check_path "$EFFECTIVE_APP_DIR"
check_path "$EFFECTIVE_APP_DIR/node_modules"
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
# Excluir .env apenas quando copiando para o diretório padrão (/srv) para preservar segredos locais
EXCLUDE_ENV="--exclude '.env*' --exclude 'apps/desktop/.env*' --exclude 'convex/.env*'"
if [ "$EFFECTIVE_APP_DIR" != "${APP_DIR:-/srv/apps/sistema}" ]; then
EXCLUDE_ENV=""
fi
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--filter='protect .env' \
--filter='protect .env*' \
--filter='protect apps/desktop/.env*' \
--filter='protect convex/.env*' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
$EXCLUDE_ENV \
./ "$EFFECTIVE_APP_DIR"/
- name: Acquire Convex admin key
id: key
run: |
echo "Waiting for Convex container..."
CID=""
# Aguarda ate 60s (12 tentativas x 5s) pelo container ficar pronto
# Nao forca restart - deixa o Swarm gerenciar via health checks
for attempt in $(seq 1 12); do
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -n "$CID" ]; then
echo "Convex container ready (CID=$CID)"
break
fi
echo "Attempt $attempt/12: container not ready yet; waiting 5s..."
sleep 5
done
CONVEX_IMAGE="ghcr.io/get-convex/convex-backend:latest"
if [ -n "$CID" ]; then
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "No running convex container detected; attempting offline admin key extraction..."
VOLUME="sistema_convex_data"
if docker volume inspect "$VOLUME" >/dev/null 2>&1; then
KEY=$(docker run --rm --entrypoint /bin/sh -v "$VOLUME":/convex/data "$CONVEX_IMAGE" -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "Volume $VOLUME nao encontrado; nao foi possivel extrair a chave admin"
fi
fi
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
if [ -z "$KEY" ]; then
echo "ERRO: Nao foi possivel obter a chave admin do Convex"
docker service ps sistema_convex_backend || true
exit 1
fi
- name: Copy production .env if present
run: |
DEFAULT_DIR="${APP_DIR:-/srv/apps/sistema}"
if [ "$EFFECTIVE_APP_DIR" != "$DEFAULT_DIR" ] && [ -f "$DEFAULT_DIR/.env" ]; then
echo "Copying production .env from $DEFAULT_DIR to $EFFECTIVE_APP_DIR"
cp -f "$DEFAULT_DIR/.env" "$EFFECTIVE_APP_DIR/.env"
fi
- name: Prune workspace for server-only build
run: |
cd "$EFFECTIVE_APP_DIR"
# Keep only root (web) as a package in this effective workspace
printf "packages:\n - .\n\nignoredBuiltDependencies:\n - '@prisma/client'\n - '@prisma/engines'\n - '@tailwindcss/oxide'\n - esbuild\n - prisma\n - sharp\n - unrs-resolver\n" > pnpm-workspace.yaml
- name: Ensure Next.js cache directory exists and is writable
run: |
cd "$EFFECTIVE_APP_DIR"
mkdir -p .next/cache
chmod -R u+rwX .next || true
- name: Cache Next.js build cache (.next/cache)
uses: actions/cache@v4
with:
path: ${{ env.EFFECTIVE_APP_DIR }}/.next/cache
key: ${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-${{ hashFiles('src/**/*.ts', 'src/**/*.tsx', 'src/**/*.js', 'src/**/*.jsx', 'next.config.ts') }}
restore-keys: |
${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-
- name: Lint check (fail fast before build)
run: |
cd "$EFFECTIVE_APP_DIR"
docker run --rm \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
sistema_web:node22-bun \
bash -lc "set -euo pipefail; bun install --frozen-lockfile --filter '!appsdesktop'; bun run lint"
- name: Install and build (Next.js)
env:
PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING: "1"
run: |
cd "$EFFECTIVE_APP_DIR"
docker run --rm \
-e PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING="$PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING" \
-e NODE_OPTIONS="--max-old-space-size=4096" \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
sistema_web:node22-bun \
bash -lc "set -euo pipefail; bun install --frozen-lockfile --filter '!appsdesktop'; bun run prisma:generate; bun run build:bun"
- name: Publish build to stable APP_DIR directory
run: |
set -e
DEST="$HOME/apps/sistema"
mkdir -p "$DEST"
mkdir -p "$DEST/.next/static"
# One-time fix for old root-owned files (esp. .pnpm-store) left by previous containers
docker run --rm -v "$DEST":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true' || true
# Preserve previously published static assets to keep stale chunks available for clients mid-navigation
if [ -d "$EFFECTIVE_APP_DIR/.next/static" ]; then
rsync -a \
"$EFFECTIVE_APP_DIR/.next/static/" "$DEST/.next/static/"
fi
# Publish new build; exclude .pnpm-store to avoid Permission denied on old entries
rsync -a --delete \
--chown=1000:1000 \
--exclude '.pnpm-store' --exclude '.pnpm-store/**' \
--exclude '.next/static' \
"$EFFECTIVE_APP_DIR"/ "$DEST"/
echo "Published build to: $DEST"
- name: Swarm deploy (stack.yml)
run: |
APP_DIR_STABLE="$HOME/apps/sistema"
if [ ! -d "$APP_DIR_STABLE" ]; then
echo "ERROR: Stable APP_DIR does not exist: $APP_DIR_STABLE" >&2; exit 1
fi
cd "$APP_DIR_STABLE"
# Exporta variáveis do .env (do diretório de produção) para substituição no stack
# IMPORTANTE: Usar o .env do APP_DIR_STABLE, não do EFFECTIVE_APP_DIR (build temporário)
set -o allexport
if [ -f .env ]; then
echo "Loading .env from $APP_DIR_STABLE"
. ./.env
else
echo "WARNING: No .env found at $APP_DIR_STABLE - stack vars may be empty!"
fi
set +o allexport
echo "Using APP_DIR (stable)=$APP_DIR_STABLE"
echo "NEXT_PUBLIC_CONVEX_URL=${NEXT_PUBLIC_CONVEX_URL:-<not set>}"
echo "NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-<not set>}"
APP_DIR="$APP_DIR_STABLE" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema
- name: Wait for services to be healthy
run: |
echo "Aguardando servicos ficarem saudaveis..."
# Aguarda ate 3 minutos (18 tentativas x 10s) pelos servicos
for i in $(seq 1 18); do
WEB_STATUS=$(docker service ls --filter "name=sistema_web" --format "{{.Replicas}}" 2>/dev/null || echo "0/0")
CONVEX_STATUS=$(docker service ls --filter "name=sistema_convex_backend" --format "{{.Replicas}}" 2>/dev/null || echo "0/0")
echo "Tentativa $i/18: web=$WEB_STATUS convex=$CONVEX_STATUS"
# Verifica se web tem 2/2 replicas e convex tem 1/1
if echo "$WEB_STATUS" | grep -q "2/2" && echo "$CONVEX_STATUS" | grep -q "1/1"; then
echo "Todos os servicos estao saudaveis!"
exit 0
fi
sleep 10
done
echo "AVISO: Timeout aguardando servicos. Status atual:"
docker service ls --filter "label=com.docker.stack.namespace=sistema"
# Nao falha o deploy, apenas avisa (o Swarm continua o rolling update em background)
- name: Smoke test — register + heartbeat
run: |
set -e
if [ "${RUN_MACHINE_SMOKE:-false}" != "true" ]; then
echo "RUN_MACHINE_SMOKE != true — pulando smoke test"; exit 0
fi
# Load MACHINE_PROVISIONING_SECRET from production .env on the host
if [ -f /srv/apps/sistema/.env ]; then
set -o allexport
. /srv/apps/sistema/.env
set +o allexport
fi
if [ -z "${MACHINE_PROVISIONING_SECRET:-}" ]; then
echo "MACHINE_PROVISIONING_SECRET ausente — pulando smoke test"; exit 0
fi
HOSTNAME_TEST="ci-smoke-$(date +%s)"
BODY='{"provisioningSecret":"'"$MACHINE_PROVISIONING_SECRET"'","tenantId":"tenant-atlas","hostname":"'"$HOSTNAME_TEST"'","os":{"name":"Linux","version":"6.1.0","architecture":"x86_64"},"macAddresses":["AA:BB:CC:DD:EE:FF"],"serialNumbers":[],"metadata":{"inventory":{"cpu":"i7","ramGb":16}},"registeredBy":"ci-smoke"}'
HTTP=$(curl -sS -o resp.json -w "%{http_code}" -H 'Content-Type: application/json' -d "$BODY" https://tickets.esdrasrenan.com.br/api/machines/register || true)
echo "Register HTTP=$HTTP"
if [ "$HTTP" != "201" ]; then
echo "Register failed:"; tail -c 600 resp.json || true; exit 1; fi
TOKEN=$(node -e 'try{const j=require("fs").readFileSync("resp.json","utf8");process.stdout.write(JSON.parse(j).machineToken||"");}catch(e){process.stdout.write("")}' )
if [ -z "$TOKEN" ]; then echo "Missing token in register response"; exit 1; fi
HB=$(curl -sS -o /dev/null -w "%{http_code}" -H 'Content-Type: application/json' -d '{"machineToken":"'"$TOKEN"'","status":"online","metrics":{"cpuPct":5,"memFreePct":70}}' https://tickets.esdrasrenan.com.br/api/machines/heartbeat || true)
echo "Heartbeat HTTP=$HB"
if [ "$HB" != "200" ]; then echo "Heartbeat failed"; exit 1; fi
- name: Cleanup old build workdirs (keep last 2)
run: |
set -e
ROOT="$HOME/apps"
KEEP=2
PATTERN='web.build.*'
ACTIVE="$HOME/apps/sistema"
echo "Scanning $ROOT for old $PATTERN dirs"
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
[ -z "$dir" ] && continue
if [ -n "$ACTIVE" ] && [ "$(readlink -f "$dir")" = "$ACTIVE" ]; then
echo "Skipping active dir (in use by APP_DIR): $dir"; continue
fi
echo "Removing $dir"
chmod -R u+rwX "$dir" 2>/dev/null || true
rm -rf "$dir" || {
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
}
done
echo "Disk usage (top 10 under $ROOT):"
du -sh "$ROOT"/* 2>/dev/null | sort -rh | head -n 10 || true
- name: Restart web service with new code (skip — stack deploy already updated)
if: ${{ always() && false }}
run: |
docker service update --force sistema_web
# Comentado: o stack deploy já atualiza os serviços com update_config.order: start-first
# Forçar update aqui causa downtime porque ignora a estratégia de rolling update
# - name: Restart Convex backend service (optional)
# run: |
# docker service update --force sistema_convex_backend
convex_deploy:
name: Deploy Convex functions
needs: changes
timeout-minutes: 20
# Executa quando convex/** mudar ou via workflow_dispatch
if: ${{ github.event_name == 'workflow_dispatch' || needs.changes.outputs.convex == 'true' }}
runs-on: [ self-hosted, linux, vps ]
env:
APP_DIR: /srv/apps/sistema
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
# Use a convex-specific build dir to avoid clashes with web job
FALLBACK_DIR="$HOME/apps/convex.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--exclude '.env*' \
--exclude 'apps/desktop/.env*' \
--exclude 'convex/.env*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
./ "$EFFECTIVE_APP_DIR"/
- name: Acquire Convex admin key
id: key
run: |
echo "Waiting for Convex container..."
CID=""
# Aguarda ate 60s (12 tentativas x 5s) pelo container ficar pronto
# Nao forca restart - deixa o Swarm gerenciar via health checks
for attempt in $(seq 1 12); do
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -n "$CID" ]; then
echo "Convex container ready (CID=$CID)"
break
fi
echo "Attempt $attempt/12: container not ready yet; waiting 5s..."
sleep 5
done
CONVEX_IMAGE="ghcr.io/get-convex/convex-backend:latest"
if [ -n "$CID" ]; then
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "No running convex container detected; attempting offline admin key extraction..."
VOLUME="sistema_convex_data"
if docker volume inspect "$VOLUME" >/dev/null 2>&1; then
KEY=$(docker run --rm --entrypoint /bin/sh -v "$VOLUME":/convex/data "$CONVEX_IMAGE" -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "Volume $VOLUME nao encontrado; nao foi possivel extrair a chave admin"
fi
fi
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
if [ -z "$KEY" ]; then
echo "ERRO: Nao foi possivel obter a chave admin do Convex"
docker service ps sistema_convex_backend || true
exit 1
fi
- name: Bring convex.json from live app if present
run: |
if [ -f "$APP_DIR/convex.json" ]; then
echo "Copying $APP_DIR/convex.json -> $EFFECTIVE_APP_DIR/convex.json"
cp -f "$APP_DIR/convex.json" "$EFFECTIVE_APP_DIR/convex.json"
else
echo "No existing convex.json found at $APP_DIR; convex CLI will need self-hosted vars"
fi
- name: Set Convex env vars (self-hosted)
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
MACHINE_PROVISIONING_SECRET: ${{ secrets.MACHINE_PROVISIONING_SECRET }}
MACHINE_TOKEN_TTL_MS: ${{ secrets.MACHINE_TOKEN_TTL_MS }}
FLEET_SYNC_SECRET: ${{ secrets.FLEET_SYNC_SECRET }}
run: |
set -e
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
-e MACHINE_PROVISIONING_SECRET \
-e MACHINE_TOKEN_TTL_MS \
-e FLEET_SYNC_SECRET \
-e CONVEX_TMPDIR=/app/.convex-tmp \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; export CONVEX_TMPDIR=/app/.convex-tmp; bun install --frozen-lockfile; \
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then bunx convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\"; fi; \
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then bunx convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\"; fi; \
if [ -n \"$FLEET_SYNC_SECRET\" ]; then bunx convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\"; fi; \
bunx convex env list"
- name: Prepare Convex deploy workspace
run: |
cd "$EFFECTIVE_APP_DIR"
if [ -f .env ]; then
echo "Renaming .env -> .env.bak (Convex self-hosted deploy)"
mv -f .env .env.bak
fi
# Dedicated tmp dir outside convex/_generated so CLI cleanups don't remove it
mkdir -p .convex-tmp
- name: Deploy functions to Convex self-hosted
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
run: |
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CI=true \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
-e CONVEX_TMPDIR=/app/.convex-tmp \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; export CONVEX_TMPDIR=/app/.convex-tmp; bun install --frozen-lockfile; bunx convex deploy"
- name: Cleanup old convex build workdirs (keep last 2)
run: |
set -e
ROOT="$HOME/apps"
KEEP=2
PATTERN='convex.build.*'
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
[ -z "$dir" ] && continue
echo "Removing $dir"
chmod -R u+rwX "$dir" 2>/dev/null || true
rm -rf "$dir" || {
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
}
done
desktop_release:
name: Desktop Release (Windows)
timeout-minutes: 30
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
runs-on: [ self-hosted, windows, desktop ]
defaults:
run:
working-directory: apps/desktop
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.20.0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Install deps (desktop)
run: pnpm install --frozen-lockfile
- name: Build with Tauri
uses: tauri-apps/tauri-action@v0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
with:
projectPath: apps/desktop
- name: Upload latest.json + bundles to VPS
uses: appleboy/scp-action@v0.1.7
with:
host: ${{ secrets.VPS_HOST }}
username: ${{ secrets.VPS_USER }}
key: ${{ secrets.VPS_SSH_KEY }}
source: |
**/bundle/**/latest.json
**/bundle/**/*
target: ${{ env.VPS_UPDATES_DIR }}
overwrite: true
diagnose_convex:
name: Diagnose Convex (env + register test)
timeout-minutes: 10
if: ${{ github.event_name == 'workflow_dispatch' }}
runs-on: [ self-hosted, linux, vps ]
steps:
- name: Print service env and .env subset
run: |
echo "=== Convex service env ==="
docker service inspect sistema_convex_backend --format '{{range .Spec.TaskTemplate.ContainerSpec.Env}}{{println .}}{{end}}' || true
echo
echo "=== /srv/apps/sistema/.env subset ==="
[ -f /srv/apps/sistema/.env ] && grep -E '^(MACHINE_PROVISIONING_SECRET|MACHINE_TOKEN_TTL_MS|FLEET_SYNC_SECRET|NEXT_PUBLIC_CONVEX_URL)=' -n /srv/apps/sistema/.env || echo '(no .env)'
- name: Acquire Convex admin key
id: key
run: |
echo "Waiting for Convex container..."
CID=""
# Aguarda ate 60s (12 tentativas x 5s) pelo container ficar pronto
for attempt in $(seq 1 12); do
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -n "$CID" ]; then
echo "Convex container ready (CID=$CID)"
break
fi
echo "Attempt $attempt/12: container not ready yet; waiting 5s..."
sleep 5
done
CONVEX_IMAGE="ghcr.io/get-convex/convex-backend:latest"
if [ -n "$CID" ]; then
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "No running convex container detected; attempting offline admin key extraction..."
VOLUME="sistema_convex_data"
if docker volume inspect "$VOLUME" >/dev/null 2>&1; then
KEY=$(docker run --rm --entrypoint /bin/sh -v "$VOLUME":/convex/data "$CONVEX_IMAGE" -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
else
echo "Volume $VOLUME nao encontrado; nao foi possivel extrair a chave admin"
fi
fi
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
- name: List Convex env and set missing
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
run: |
set -e
if [ -f /srv/apps/sistema/.env ]; then
set -o allexport
. /srv/apps/sistema/.env
set +o allexport
fi
docker run --rm -i \
-v /srv/apps/sistema:/app -w /app \
-e CONVEX_SELF_HOSTED_URL -e CONVEX_SELF_HOSTED_ADMIN_KEY="$ADMIN_KEY" \
-e MACHINE_PROVISIONING_SECRET -e MACHINE_TOKEN_TTL_MS -e FLEET_SYNC_SECRET \
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; bun install --frozen-lockfile; \
unset CONVEX_DEPLOYMENT; bunx convex env list; \
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then bunx convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\"; fi; \
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then bunx convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\"; fi; \
if [ -n \"$FLEET_SYNC_SECRET\" ]; then bunx convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\"; fi; \
bunx convex env list"
- name: Test register from runner
run: |
HOST="vm-teste-$(date +%s)"
DATA='{"provisioningSecret":"'"${MACHINE_PROVISIONING_SECRET:-"71daa9ef54cb224547e378f8121ca898b614446c142a132f73c2221b4d53d7d6"}"'","tenantId":"tenant-atlas","hostname":"'"$HOST"'","os":{"name":"Linux","version":"6.1.0","architecture":"x86_64"},"macAddresses":["AA:BB:CC:DD:EE:FF"],"serialNumbers":[],"metadata":{"inventario":{"cpu":"i7","ramGb":16}},"registeredBy":"diag-test"}'
HTTP=$(curl -sS -o resp.json -w "%{http_code}" -H 'Content-Type: application/json' -d "$DATA" https://tickets.esdrasrenan.com.br/api/machines/register || true)
echo "Register HTTP=$HTTP" && tail -c 400 resp.json || true

View file

@ -1,62 +0,0 @@
name: Quality Checks
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
lint-test-build:
name: Lint, Test and Build
runs-on: ubuntu-latest
env:
BETTER_AUTH_SECRET: test-secret
NEXT_PUBLIC_APP_URL: http://localhost:3000
BETTER_AUTH_URL: http://localhost:3000
NEXT_PUBLIC_CONVEX_URL: http://localhost:3210
DATABASE_URL: file:./prisma/db.dev.sqlite
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: 1.3.1
- name: Verify Bun
run: bun --version
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Cache Next.js build cache
uses: actions/cache@v4
with:
path: |
${{ github.workspace }}/.next/cache
key: ${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-${{ hashFiles('**/*.{js,jsx,ts,tsx}') }}
restore-keys: |
${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-
- name: Generate Prisma client
env:
PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING: "1"
run: bun run prisma:generate
- name: Lint
run: bun run lint
- name: Test
run: bun test
- name: Build
run: bun run build:bun

432
.github/workflows/ci-cd-web-desktop.yml vendored Normal file
View file

@ -0,0 +1,432 @@
name: CI/CD Web + Desktop
on:
push:
branches: [ main ]
tags:
- 'v*.*.*'
workflow_dispatch:
inputs:
force_web_deploy:
description: 'Forçar deploy do Web (ignorar filtro)?'
required: false
default: 'false'
force_convex_deploy:
description: 'Forçar deploy do Convex (ignorar filtro)?'
required: false
default: 'false'
env:
APP_DIR: /srv/apps/sistema
VPS_UPDATES_DIR: /var/www/updates
RUN_MACHINE_SMOKE: ${{ vars.RUN_MACHINE_SMOKE || secrets.RUN_MACHINE_SMOKE || 'false' }}
jobs:
changes:
name: Detect changes
runs-on: ubuntu-latest
outputs:
convex: ${{ steps.filter.outputs.convex }}
web: ${{ steps.filter.outputs.web }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Paths filter
id: filter
uses: dorny/paths-filter@v3
with:
filters: |
convex:
- 'convex/**'
web:
- 'src/**'
- 'public/**'
- 'prisma/**'
- 'next.config.ts'
- 'package.json'
- 'pnpm-lock.yaml'
- 'tsconfig.json'
- 'middleware.ts'
- 'stack.yml'
deploy:
name: Deploy (VPS Linux)
needs: changes
# Executa em qualquer push na main (independente do filtro) ou quando disparado manualmente
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
runs-on: [ self-hosted, linux, vps ]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
FALLBACK_DIR="$HOME/apps/sistema.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'pnpm'
- name: Permissions diagnostic (server paths)
run: |
set +e
echo "== Basic context =="
whoami || true
id || true
groups || true
umask || true
echo "HOME=$HOME"
echo "APP_DIR(default)=${APP_DIR:-/srv/apps/sistema}"
echo "EFFECTIVE_APP_DIR=$EFFECTIVE_APP_DIR"
echo "\n== Permissions check =="
check_path() {
P="$1"
echo "-- $P"
if [ -e "$P" ]; then
stat -c '%A %U:%G %n' "$P" 2>/dev/null || ls -ld "$P" || true
echo -n "WRITABLE? "; [ -w "$P" ] && echo yes || echo no
if command -v namei >/dev/null 2>&1; then
namei -l "$P" || true
fi
TMP="$P/.permtest.$$"
(echo test > "$TMP" 2>/dev/null && echo "CREATE_FILE: ok" && rm -f "$TMP") || echo "CREATE_FILE: failed"
else
echo "(missing)"
fi
}
check_path "/srv/apps/sistema"
check_path "/srv/apps/sistema/src/app/machines/handshake"
check_path "/srv/apps/sistema/apps/desktop/node_modules"
check_path "/srv/apps/sistema/node_modules"
check_path "$EFFECTIVE_APP_DIR"
check_path "$EFFECTIVE_APP_DIR/node_modules"
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
# Excluir .env apenas quando copiando para o diretório padrão (/srv) para preservar segredos locais
EXCLUDE_ENV="--exclude '.env*' --exclude 'apps/desktop/.env*' --exclude 'convex/.env*'"
if [ "$EFFECTIVE_APP_DIR" != "${APP_DIR:-/srv/apps/sistema}" ]; then
EXCLUDE_ENV=""
fi
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--filter='protect .env' \
--filter='protect .env*' \
--filter='protect apps/desktop/.env*' \
--filter='protect convex/.env*' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
$EXCLUDE_ENV \
./ "$EFFECTIVE_APP_DIR"/
- name: Copy production .env if present
run: |
DEFAULT_DIR="${APP_DIR:-/srv/apps/sistema}"
if [ "$EFFECTIVE_APP_DIR" != "$DEFAULT_DIR" ] && [ -f "$DEFAULT_DIR/.env" ]; then
echo "Copying production .env from $DEFAULT_DIR to $EFFECTIVE_APP_DIR"
cp -f "$DEFAULT_DIR/.env" "$EFFECTIVE_APP_DIR/.env"
fi
- name: Prune workspace for server-only build
run: |
cd "$EFFECTIVE_APP_DIR"
# Keep only root (web) as a package in this effective workspace
printf "packages:\n - .\n\nignoredBuiltDependencies:\n - '@prisma/client'\n - '@prisma/engines'\n - '@tailwindcss/oxide'\n - esbuild\n - prisma\n - sharp\n - unrs-resolver\n" > pnpm-workspace.yaml
# Remove desktop app to avoid pnpm touching its node_modules on this runner
rm -rf apps/desktop || true
- name: Clean Next.js cache (.next) to avoid EACCES
run: |
cd "$EFFECTIVE_APP_DIR"
if [ -e .next ]; then
echo "Removing existing .next (may be root-owned from previous container)"
rm -rf .next || (mv .next ".next.old.$(date +%s)" || true)
fi
mkdir -p .next
chmod -R u+rwX .next || true
- name: Install and build (Next.js)
run: |
cd "$EFFECTIVE_APP_DIR"
corepack enable || true
pnpm --filter web install --no-frozen-lockfile
pnpm prisma:generate
pnpm build
- name: Swarm deploy (stack.yml)
run: |
cd "$EFFECTIVE_APP_DIR"
# Exporta variáveis do .env para substituição no stack (ex.: MACHINE_PROVISIONING_SECRET)
set -o allexport
if [ -f .env ]; then . ./.env; fi
set +o allexport
APP_DIR="$EFFECTIVE_APP_DIR" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema
- name: Ensure Convex service envs and restart
run: |
cd "$EFFECTIVE_APP_DIR"
set -o allexport
if [ -f .env ]; then . ./.env; fi
set +o allexport
echo "Ensuring Convex envs on service: sistema_convex_backend"
if [ -n "${MACHINE_PROVISIONING_SECRET:-}" ]; then
docker service update --env-add MACHINE_PROVISIONING_SECRET="${MACHINE_PROVISIONING_SECRET}" sistema_convex_backend || true
fi
if [ -n "${MACHINE_TOKEN_TTL_MS:-}" ]; then
docker service update --env-add MACHINE_TOKEN_TTL_MS="${MACHINE_TOKEN_TTL_MS}" sistema_convex_backend || true
fi
if [ -n "${FLEET_SYNC_SECRET:-}" ]; then
docker service update --env-add FLEET_SYNC_SECRET="${FLEET_SYNC_SECRET}" sistema_convex_backend || true
fi
echo "Current envs:"
docker service inspect sistema_convex_backend --format '{{range .Spec.TaskTemplate.ContainerSpec.Env}}{{println .}}{{end}}' || true
echo "Forcing service restart..."
docker service update --force sistema_convex_backend || true
- name: Smoke test — register + heartbeat
run: |
set -e
if [ "${RUN_MACHINE_SMOKE:-false}" != "true" ]; then
echo "RUN_MACHINE_SMOKE != true — pulando smoke test"; exit 0
fi
# Load MACHINE_PROVISIONING_SECRET from production .env on the host
if [ -f /srv/apps/sistema/.env ]; then
set -o allexport
. /srv/apps/sistema/.env
set +o allexport
fi
if [ -z "${MACHINE_PROVISIONING_SECRET:-}" ]; then
echo "MACHINE_PROVISIONING_SECRET ausente — pulando smoke test"; exit 0
fi
HOSTNAME_TEST="ci-smoke-$(date +%s)"
BODY='{"provisioningSecret":"'"$MACHINE_PROVISIONING_SECRET"'","tenantId":"tenant-atlas","hostname":"'"$HOSTNAME_TEST"'","os":{"name":"Linux","version":"6.1.0","architecture":"x86_64"},"macAddresses":["AA:BB:CC:DD:EE:FF"],"serialNumbers":[],"metadata":{"inventory":{"cpu":"i7","ramGb":16}},"registeredBy":"ci-smoke"}'
HTTP=$(curl -sS -o resp.json -w "%{http_code}" -H 'Content-Type: application/json' -d "$BODY" https://tickets.esdrasrenan.com.br/api/machines/register || true)
echo "Register HTTP=$HTTP"
if [ "$HTTP" != "201" ]; then
echo "Register failed:"; tail -c 600 resp.json || true; exit 1; fi
TOKEN=$(node -e 'try{const j=require("fs").readFileSync("resp.json","utf8");process.stdout.write(JSON.parse(j).machineToken||"");}catch(e){process.stdout.write("")}' )
if [ -z "$TOKEN" ]; then echo "Missing token in register response"; exit 1; fi
HB=$(curl -sS -o /dev/null -w "%{http_code}" -H 'Content-Type: application/json' -d '{"machineToken":"'"$TOKEN"'","status":"online","metrics":{"cpuPct":5,"memFreePct":70}}' https://tickets.esdrasrenan.com.br/api/machines/heartbeat || true)
echo "Heartbeat HTTP=$HB"
if [ "$HB" != "200" ]; then echo "Heartbeat failed"; exit 1; fi
- name: Cleanup old build workdirs (keep last 3)
run: |
set -e
find "$HOME/apps" -maxdepth 1 -type d -name 'sistema.build.*' | sort -r | tail -n +4 | while read dir; do
echo "Removing $dir"
sudo rm -rf "$dir"
done || true
- name: Restart web service with new code
run: |
docker service update --force sistema_web || true
- name: Restart Convex backend service (optional)
run: |
docker service update --force sistema_convex_backend || true
convex_deploy:
name: Deploy Convex functions
needs: changes
# Executa em workflow_dispatch, push na main, ou quando convex/** mudar
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' || needs.changes.outputs.convex == 'true' }}
runs-on: [ self-hosted, linux, vps ]
env:
APP_DIR: /srv/apps/sistema
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Determine APP_DIR (fallback safe path)
id: appdir
run: |
TS=$(date +%s)
FALLBACK_DIR="$HOME/apps/sistema.build.$TS"
mkdir -p "$FALLBACK_DIR"
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
- name: Sync workspace to APP_DIR (preserving local env)
run: |
mkdir -p "$EFFECTIVE_APP_DIR"
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
rsync $RSYNC_FLAGS \
--filter='protect .next.old*' \
--exclude '.next.old*' \
--exclude '.env*' \
--exclude 'apps/desktop/.env*' \
--exclude 'convex/.env*' \
--filter='protect node_modules' \
--filter='protect node_modules/**' \
--filter='protect .pnpm-store' \
--filter='protect .pnpm-store/**' \
--exclude '.git' \
--exclude '.next' \
--exclude 'node_modules' \
--exclude 'node_modules/**' \
--exclude '.pnpm-store' \
--exclude '.pnpm-store/**' \
./ "$EFFECTIVE_APP_DIR"/
- name: Set Convex env vars (self-hosted)
env:
CONVEX_SELF_HOSTED_URL: ${{ secrets.CONVEX_SELF_HOSTED_URL }}
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ secrets.CONVEX_SELF_HOSTED_ADMIN_KEY }}
MACHINE_PROVISIONING_SECRET: ${{ secrets.MACHINE_PROVISIONING_SECRET }}
MACHINE_TOKEN_TTL_MS: ${{ secrets.MACHINE_TOKEN_TTL_MS }}
FLEET_SYNC_SECRET: ${{ secrets.FLEET_SYNC_SECRET }}
run: |
set -e
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
-e MACHINE_PROVISIONING_SECRET \
-e MACHINE_TOKEN_TTL_MS \
-e FLEET_SYNC_SECRET \
node:20-bullseye bash -lc "set -euo pipefail; unset CONVEX_DEPLOYMENT; corepack enable; corepack prepare pnpm@9 --activate; pnpm install --frozen-lockfile --prod=false; \
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then pnpm exec convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\" -y; fi; \
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then pnpm exec convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\" -y; fi; \
if [ -n \"$FLEET_SYNC_SECRET\" ]; then pnpm exec convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\" -y; fi; \
pnpm exec convex env list"
- name: Ensure .env is not present for Convex deploy
run: |
cd "$EFFECTIVE_APP_DIR"
if [ -f .env ]; then
echo "Renaming .env -> .env.bak (Convex self-hosted deploy)"
mv -f .env .env.bak
fi
- name: Deploy functions to Convex self-hosted
env:
CONVEX_SELF_HOSTED_URL: ${{ secrets.CONVEX_SELF_HOSTED_URL }}
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ secrets.CONVEX_SELF_HOSTED_ADMIN_KEY }}
run: |
docker run --rm -i \
-v "$EFFECTIVE_APP_DIR":/app \
-w /app \
-e CI=true \
-e CONVEX_SELF_HOSTED_URL \
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
node:20-bullseye bash -lc "set -euo pipefail; unset CONVEX_DEPLOYMENT; corepack enable; corepack prepare pnpm@9 --activate; pnpm install --frozen-lockfile --prod=false; pnpm exec convex deploy"
desktop_release:
name: Desktop Release (Windows)
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
runs-on: [ self-hosted, windows, desktop ]
defaults:
run:
working-directory: apps/desktop
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'pnpm'
- name: Install deps (desktop)
run: pnpm install --frozen-lockfile
- name: Build with Tauri
uses: tauri-apps/tauri-action@v0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
with:
projectPath: apps/desktop
- name: Upload latest.json + bundles to VPS
uses: appleboy/scp-action@v0.1.7
with:
host: ${{ secrets.VPS_HOST }}
username: ${{ secrets.VPS_USER }}
key: ${{ secrets.VPS_SSH_KEY }}
source: |
**/bundle/**/latest.json
**/bundle/**/*
target: ${{ env.VPS_UPDATES_DIR }}
overwrite: true
diagnose_convex:
name: Diagnose Convex (env + register test)
if: ${{ github.event_name == 'workflow_dispatch' }}
runs-on: [ self-hosted, linux, vps ]
steps:
- name: Print service env and .env subset
run: |
echo "=== Convex service env ==="
docker service inspect sistema_convex_backend --format '{{range .Spec.TaskTemplate.ContainerSpec.Env}}{{println .}}{{end}}' || true
echo
echo "=== /srv/apps/sistema/.env subset ==="
[ -f /srv/apps/sistema/.env ] && grep -E '^(MACHINE_PROVISIONING_SECRET|MACHINE_TOKEN_TTL_MS|FLEET_SYNC_SECRET|NEXT_PUBLIC_CONVEX_URL)=' -n /srv/apps/sistema/.env || echo '(no .env)'
- name: Acquire Convex admin key
id: key
run: |
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
if [ -z "$CID" ]; then echo "No convex container"; exit 1; fi
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
- name: List Convex env and set missing
env:
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
run: |
set -e
if [ -f /srv/apps/sistema/.env ]; then
set -o allexport
. /srv/apps/sistema/.env
set +o allexport
fi
docker run --rm -i \
-v /srv/apps/sistema:/app -w /app \
-e CONVEX_SELF_HOSTED_URL -e CONVEX_SELF_HOSTED_ADMIN_KEY="$ADMIN_KEY" \
-e MACHINE_PROVISIONING_SECRET -e MACHINE_TOKEN_TTL_MS -e FLEET_SYNC_SECRET \
node:20-bullseye bash -lc "set -euo pipefail; corepack enable; corepack prepare pnpm@9 --activate; pnpm i --frozen-lockfile --prod=false; \
unset CONVEX_DEPLOYMENT; pnpm exec convex env list; \
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then pnpm exec convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\" -y; fi; \
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then pnpm exec convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\" -y; fi; \
if [ -n \"$FLEET_SYNC_SECRET\" ]; then pnpm exec convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\" -y; fi; \
pnpm exec convex env list"
- name: Test register from runner
run: |
HOST="vm-teste-$(date +%s)"
DATA='{"provisioningSecret":"'"${MACHINE_PROVISIONING_SECRET:-"71daa9ef54cb224547e378f8121ca898b614446c142a132f73c2221b4d53d7d6"}"'","tenantId":"tenant-atlas","hostname":"'"$HOST"'","os":{"name":"Linux","version":"6.1.0","architecture":"x86_64"},"macAddresses":["AA:BB:CC:DD:EE:FF"],"serialNumbers":[],"metadata":{"inventario":{"cpu":"i7","ramGb":16}},"registeredBy":"diag-test"}'
HTTP=$(curl -sS -o resp.json -w "%{http_code}" -H 'Content-Type: application/json' -d "$DATA" https://tickets.esdrasrenan.com.br/api/machines/register || true)
echo "Register HTTP=$HTTP" && tail -c 400 resp.json || true

View file

@ -36,7 +36,7 @@ jobs:
node-version: 20 node-version: 20
- name: Enable Corepack - name: Enable Corepack
run: corepack enable && corepack prepare pnpm@10.20.0 --activate run: corepack enable && corepack prepare pnpm@9 --activate
- name: Install Rust (stable) - name: Install Rust (stable)
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable

79
.gitignore vendored
View file

@ -1,40 +1,34 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies # dependencies
/node_modules /node_modules
/.pnp /.pnp
.pnp.* .pnp.*
.yarn/* .yarn/*
!.yarn/patches !.yarn/patches
!.yarn/plugins !.yarn/plugins
!.yarn/releases !.yarn/releases
!.yarn/versions !.yarn/versions
# testing # testing
/coverage /coverage
# next.js # next.js
/.next/ /.next/
/out/ /out/
# React Email # production
/.react-email/ /build
/emails/out/
# misc
# production .DS_Store
/build *.pem
# misc
.DS_Store
*.pem
*.sqlite *.sqlite
# external experiments
nova-calendar-main/ # debug
npm-debug.log*
# debug yarn-debug.log*
npm-debug.log* yarn-error.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log* .pnpm-debug.log*
# env files (can opt-in for committing if needed) # env files (can opt-in for committing if needed)
@ -42,10 +36,6 @@ yarn-error.log*
!.env.example !.env.example
!apps/desktop/.env.example !apps/desktop/.env.example
# Accidental Windows duplicate downloads (e.g., "env (1)")
env (*)
env (1)
# vercel # vercel
.vercel .vercel
@ -62,12 +52,3 @@ Screenshot*.png
# Ignore NTFS ADS streams accidentally committed from Windows downloads # Ignore NTFS ADS streams accidentally committed from Windows downloads
*:*Zone.Identifier *:*Zone.Identifier
*:\:Zone.Identifier *:\:Zone.Identifier
# Infrastructure secrets
.ci.env
# ferramentas externas
rustdesk/
# Prisma generated files
src/generated/
apps/desktop/service/target/

View file

@ -1,29 +0,0 @@
# Runtime image with Node 22 + Bun 1.3.4 and build toolchain preinstalled
FROM node:22-bullseye-slim
ENV BUN_INSTALL=/root/.bun
ENV PATH="$BUN_INSTALL/bin:$PATH"
RUN apt-get update -y \
&& apt-get install -y --no-install-recommends \
ca-certificates \
curl \
gnupg \
unzip \
build-essential \
python3 \
make \
pkg-config \
git \
&& rm -rf /var/lib/apt/lists/*
# Install Bun 1.3.4
RUN curl -fsSL https://bun.sh/install \
| bash -s -- bun-v1.3.4 \
&& ln -sf /root/.bun/bin/bun /usr/local/bin/bun \
&& ln -sf /root/.bun/bin/bun /usr/local/bin/bunx
WORKDIR /app
# We'll mount the app code at runtime; image just provides runtimes/toolchains.
CMD ["bash"]

101
README.md
View file

@ -1,65 +1,51 @@
## Sistema de Chamados ## Sistema de Chamados
Aplicacao **Next.js 16 (App Router)** com **React 19**, **Convex** e **Better Auth** para gestao de tickets da Rever. A stack ainda inclui **Prisma 7** (PostgreSQL), **Tailwind** e **Turbopack** como bundler padrao (webpack permanece disponivel como fallback). Todo o codigo-fonte fica na raiz do monorepo seguindo as convencoes do App Router. Aplicação Next.js 15 com Convex e Better Auth para gestão de tickets da Rever. Todo o código-fonte está organizado diretamente na raiz do repositório, conforme convenções do Next.js.
## Requisitos ## Requisitos
- Bun >= 1.3 (recomendado 1.3.1). Após instalar via script oficial, adicione `export PATH="$HOME/.bun/bin:$PATH"` ao seu shell (ex.: `.bashrc`) para ter `bun` disponível globalmente. - Node.js >= 20
- Node.js >= 20 (necessário para ferramentas auxiliares como Prisma CLI e Next.js em modo fallback). - pnpm >= 8
- CLI do Convex (`bunx convex dev` instalará automaticamente no primeiro uso, se ainda não estiver presente). - CLI do Convex (`pnpm dlx convex dev` instalará automaticamente no primeiro uso)
- GitHub Actions/autodeploy dependem dessas versões e do CLI do Convex disponível; use `npx convex --help` para confirmar.
## Configuração rápida ## Configuração rápida
1. Instale as dependências: 1. Instale as dependências:
```bash ```bash
bun install pnpm install
``` ```
2. Ajuste o arquivo `.env` (ou crie a partir de `.env.example`) e confirme os valores de: 2. Ajuste o arquivo `.env` (ou crie a partir de `.env.example`) e confirme os valores de:
- `NEXT_PUBLIC_CONVEX_URL` (gerado pelo Convex Dev) - `NEXT_PUBLIC_CONVEX_URL` (gerado pelo Convex Dev)
- `BETTER_AUTH_SECRET`, `BETTER_AUTH_URL`, `DATABASE_URL` (PostgreSQL, ex: `postgresql://postgres:dev@localhost:5432/sistema_chamados`) - `BETTER_AUTH_SECRET`, `BETTER_AUTH_URL`, `DATABASE_URL`
3. Aplique as migrações e gere o client Prisma: 3. Aplique as migrações e gere o client Prisma:
```bash ```bash
bunx prisma migrate deploy pnpm prisma migrate deploy
bun run prisma:generate pnpm prisma:generate
``` ```
4. Popule usuários padrão do Better Auth: 4. Popule usuários padrão do Better Auth:
```bash ```bash
bun run auth:seed pnpm auth:seed
``` ```
> Sempre que trocar de máquina ou quiser “zerar” o ambiente local, basta repetir os passos 3 e 4 com a mesma `DATABASE_URL`. 5. (Opcional) Para re-sincronizar manualmente as filas padrão, execute:
### Resetar rapidamente o ambiente local
1. Suba um PostgreSQL local (Docker recomendado):
```bash ```bash
docker run -d --name postgres-dev -p 5432:5432 -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=sistema_chamados postgres:18 pnpm queues:ensure
``` ```
2. Aplique as migracoes: 6. Em um terminal, execute o backend em tempo real do Convex:
```bash ```bash
bunx prisma migrate deploy pnpm convex:dev
``` ```
3. Recrie/garanta as contas padrao de login: 7. Em outro terminal, suba o frontend Next.js:
```bash ```bash
bun run auth:seed pnpm dev
``` ```
4. Suba o servidor normalmente com `bun run dev`. 8. Com o Convex ativo, acesse `http://localhost:3000/dev/seed` uma vez para popular dados de demonstração (tickets, usuários, comentários) diretamente no banco do Convex.
### Subir serviços locais
- (Opcional) Para re-sincronizar manualmente as filas padrão, execute `bun run queues:ensure`.
- Em um terminal, rode o backend em tempo real do Convex com `bun run convex:dev:bun` (ou `bun run convex:dev` para o runtime Node).
- Em outro terminal, suba o frontend Next.js (Turbopack) com `bun run dev:bun` (`bun run dev:webpack` serve como fallback).
- Com o Convex rodando, acesse `http://localhost:3000/dev/seed` uma vez para popular dados de demonstração (tickets, usuários, comentários).
> Se o CLI perguntar sobre configuração do projeto Convex, escolha criar um novo deployment local (opção padrão) e confirme. As credenciais são armazenadas em `.convex/` automaticamente. > Se o CLI perguntar sobre configuração do projeto Convex, escolha criar um novo deployment local (opção padrão) e confirme. As credenciais são armazenadas em `.convex/` automaticamente.
### Documentação ### Deploy em produção (Traefik + Convex selfhosted)
- Índice de docs: `docs/README.md` - Guia completo: `docs/OPERACAO-PRODUCAO.md:1`.
- Operações (produção): `docs/OPERATIONS.md` (versão EN) e `docs/OPERACAO-PRODUCAO.md` (PT-BR) - Histórico de setup/decisões: `docs/SETUP-HISTORICO.md:1`.
- Guia de DEV: `docs/DEV.md` - Stack Swarm: `stack.yml:1` (roteado por Traefik, rede `traefik_public`).
- Testes automatizados (Vitest/Playwright): `docs/testes-vitest.md`
- Stack Swarm: `stack.yml` (roteado por Traefik, rede `traefik_public`).
### Variáveis de ambiente ### Variáveis de ambiente
@ -69,63 +55,32 @@ Aplicacao **Next.js 16 (App Router)** com **React 19**, **Convex** e **Better Au
### Guia de DEV (Prisma, Auth e Desktop/Tauri) ### Guia de DEV (Prisma, Auth e Desktop/Tauri)
Para fluxos detalhados de desenvolvimento — banco de dados local (PostgreSQL/Prisma), seed do Better Auth, ajustes do Prisma CLI no DEV e build do Desktop (Tauri) — consulte `docs/DEV.md`. Para fluxos detalhados de desenvolvimento — banco de dados local (SQLite/Prisma), seed do Better Auth, ajustes do Prisma CLI no DEV e build do Desktop (Tauri) — consulte `docs/DEV.md`.
## Scripts úteis ## Scripts úteis
- `bun run dev:bun` — padrão atual para o Next.js com runtime Bun (`bun run dev:webpack` permanece como fallback). - `pnpm lint` — ESLint com as regras do projeto.
- `bun run convex:dev:bun` — runtime Bun para o Convex (`bun run convex:dev` mantém o fluxo antigo usando Node). - `pnpm exec vitest run` — suíte de testes unitários.
- `bun run build:bun` / `bun run start:bun` — build e serve com Bun usando Turbopack (padrão atual). - `pnpm auth:seed` — atualiza/cria contas padrão do Better Auth (credenciais em `agents.md`).
- `bun run dev:webpack` — fallback do Next.js em modo desenvolvimento (webpack). - `pnpm prisma migrate deploy` — aplica migrações ao banco SQLite local.
- `bun run lint` — ESLint com as regras do projeto. - `pnpm convex:dev` — roda o Convex em modo desenvolvimento, gerando tipos em `convex/_generated`.
- `bun test` — suíte de testes unitários usando o runner do Bun (o teste de screenshot fica automaticamente ignorado se o matcher não existir).
- `bun run build` — executa `next build --turbopack` (runtime Node, caso prefira evitar o `--bun`).
- `bun run build:webpack` — executa `next build --webpack` como fallback oficial.
- `bun run auth:seed` — atualiza/cria contas padrao do Better Auth (credenciais em `agents.md`).
- `bunx prisma migrate deploy` — aplica migracoes ao banco PostgreSQL.
- `bun run convex:dev` — roda o Convex em modo desenvolvimento com Node, gerando tipos em `convex/_generated`.
## Transferir dispositivo entre colaboradores
Quando uma dispositivo trocar de responsável:
1. Abra `Admin > Dispositivos`, selecione o equipamento e clique em **Resetar agente**.
2. No equipamento, execute o reset local do agente (`rever-agent reset` ou reinstale o serviço) e reprovisione com o código da empresa.
3. Após o agente gerar um novo token, associe a dispositivo ao novo colaborador no painel.
Sem o reset de agente, o Convex reaproveita o token anterior e o inventário continua vinculado ao usuário antigo.
## Estrutura principal ## Estrutura principal
- `app/` dentro de `src/` — rotas e layouts do Next.js (App Router). - `app/` dentro de `src/` — rotas e layouts do Next.js (App Router).
- `components/` — componentes reutilizáveis (UI, formulários, layouts). - `components/` — componentes reutilizáveis (UI, formulários, layouts).
- `convex/` — queries, mutations e seeds do Convex. - `convex/` — queries, mutations e seeds do Convex.
- `prisma/` — schema e migracoes do Prisma (PostgreSQL). - `prisma/` — schema, migrações e banco SQLite (`prisma/db.sqlite`).
- `scripts/` — utilitários em Node para sincronização e seeds adicionais. - `scripts/` — utilitários em Node para sincronização e seeds adicionais.
- `agents.md` — guia operacional e contexto funcional (em PT-BR). - `agents.md` — guia operacional e contexto funcional (em PT-BR).
- `PROXIMOS_PASSOS.md` — backlog de melhorias futuras. - `PROXIMOS_PASSOS.md` — backlog de melhorias futuras.
## Credenciais de demonstração ## Credenciais de demonstração
Após executar `bun run auth:seed`, as credenciais padrão ficam disponíveis conforme descrito em `agents.md` (seção “Credenciais padrão”). Ajuste variáveis `SEED_USER_*` se precisar sobrepor usuários ou senhas durante o seed. Após executar `pnpm auth:seed`, as credenciais padrão ficam disponíveis conforme descrito em `agents.md` (seção “Credenciais padrão”). Ajuste variáveis `SEED_USER_*` se precisar sobrepor usuários ou senhas durante o seed.
## Próximos passos ## Próximos passos
Consulte `PROXIMOS_PASSOS.md` para acompanhar o backlog funcional e o progresso das iniciativas planejadas. Consulte `PROXIMOS_PASSOS.md` para acompanhar o backlog funcional e o progresso das iniciativas planejadas.
### Executar com Bun
- `bun install` é o fluxo padrão (o arquivo `bun.lock` deve ser versionado; use `bun install --frozen-lockfile` em CI).
- `bun run dev:bun`, `bun run convex:dev:bun`, `bun run build:bun` e `bun run start:bun` já estão configurados; internamente executam `bun run --bun <script>` para usar o runtime do Bun sem abrir mão dos scripts existentes. O `cross-env` garante os valores esperados de `NODE_ENV` (`development`/`production`).
- O bundler padrão é o Turbopack; se precisar comparar/debugar com webpack, use `bun run build:webpack`.
- `bun test` utiliza o test runner do Bun. O teste de snapshot de screenshot é automaticamente ignorado quando o matcher não está disponível; testes de navegador completos continuam via `bun run test:browser` (Vitest + Playwright).
<!-- ci: smoke test 3 --> <!-- ci: smoke test 3 -->
## Diagnóstico de sessão da dispositivo (Desktop)
- Quando o portal for aberto via app desktop, use a página `https://seu-app/portal/debug` para validar cookies e contexto:
- `/api/auth/get-session` deve idealmente mostrar `user.role = "machine"` (em alguns ambientes WebView pode retornar `null`, o que não é bloqueante).
- `/api/machines/session` deve retornar `200` com `assignedUserId/assignedUserEmail`.
- O frontend agora preenche `machineContext` mesmo que `get-session` retorne `null`, e deriva o papel efetivo a partir desse contexto.
- Se `machines/session` retornar `401/403`, revise CORS/credenciais e o fluxo de handshake documentados em `docs/OPERACAO-PRODUCAO.md`.

5965
RENAN.txt Normal file

File diff suppressed because it is too large Load diff

375
agents.md
View file

@ -1,214 +1,203 @@
# Plano de Desenvolvimento — Sistema de Chamados # Plano de Desenvolvimento — Sistema de Chamados
> **Diretriz máxima**: documentação, comunicação e respostas sempre em português brasileiro.
## Contatos
- **Esdras Renan** — monkeyesdras@gmail.com
> **Diretriz máxima:** todas as respostas, comunicações e documentações devem ser redigidas em português brasileiro.
## Contato principal
- **Esdras Renan** — monkeyesdras@gmail.com
## Credenciais padrão (Better Auth) ## Credenciais padrão (Better Auth)
| Papel | Usuário | Senha | - Administrador: `admin@sistema.dev` / `admin123`
| --- | --- | --- | - Agente Demo: `agente.demo@sistema.dev` / `agent123`
| Administrador | `admin@sistema.dev` | `admin123` | - Cliente Demo: `cliente.demo@sistema.dev` / `cliente123`
| Painel telão | `suporte@rever.com.br` | `agent123` | > Execute `pnpm auth:seed` após configurar `.env`. O script atualiza as contas acima ou cria novas conforme variáveis `SEED_USER_*`.
## Sincronização com Convex
- Usuários e tickets demo são garantidos via `convex/seed.ts`.
- Após iniciar `pnpm convex:dev`, acesse `/dev/seed` uma vez por ambiente local para carregar dados reais de demonstração no banco do Convex.
## Setup local rápido
1. `pnpm install`
2. Ajuste `.env` (ou crie a partir do exemplo) e confirme `NEXT_PUBLIC_CONVEX_URL` apontando para o Convex local.
3. `pnpm auth:seed`
4. (Opcional) `pnpm queues:ensure`
5. `pnpm convex:dev`
6. Em outro terminal: `pnpm dev`
Os demais colaboradores reais são provisionados via **Convites & acessos**. Caso existam vestígios de dados demo, execute `node scripts/remove-legacy-demo-users.mjs` para limpá-los. ## App Desktop (Agente de Máquinas)
- Código: `apps/desktop` (Tauri v2 + Vite).
- Padrões de URL:
- Produção: usa `https://tickets.esdrasrenan.com.br` por padrão (fallback em release).
- Desenvolvimento: use `apps/desktop/.env` (ver `.env.example`).
- Comandos úteis:
- `pnpm -C apps/desktop tauri dev` — dev completo (abre WebView em 1420 + backend Rust).
- `pnpm -C apps/desktop build` — build do frontend (dist).
- `pnpm -C apps/desktop tauri build` — gera instaladores (bundle) por SO.
- Saída dos pacotes: `apps/desktop/src-tauri/target/release/bundle/`.
- Fluxo atualizado:
1) Coleta perfil (hostname/OS/MAC/seriais/métricas).
2) Provisiona via `POST /api/machines/register` com `MACHINE_PROVISIONING_SECRET`, solicitando o **perfil de acesso** (Colaborador ou Gestor) e os dados do usuário associado. O backend garante a vinculação única da máquina ao colaborador ou gestor informado.
3) Envia heartbeats a cada 5 min para `/api/machines/heartbeat` com inventário básico + estendido (discos, GPUs, serviços, softwares).
4) Abre `APP_URL/machines/handshake?token=...&redirect=...` para autenticar a sessão: colaboradores são direcionados ao portal (`/portal`), gestores ao painel completo (`/dashboard`).
- Segurança: token salvo no cofre do SO (Keyring). Store guarda apenas metadados não sensíveis.
- Endpoint extra: `POST /api/machines/inventory` (atualiza inventário por token ou provisioningSecret).
- Atualizações automáticas: o plugin `@tauri-apps/plugin-updater` verifica `latest.json` nos releases do GitHub. Publicar uma nova release com manifestos atualiza os clientes sem reinstalação manual.
- Ajustes administrativos: em **Admin ▸ Máquinas** é possível vincular ou alterar o perfil (colaborador/gestor) e e-mail associado através do botão “Ajustar acesso”.
> Execute `bun run auth:seed` após configurar `.env` para (re)criar os usuários acima (campos `SEED_USER_*` podem sobrescrever credenciais). ## Desenvolvimento local — boas práticas (atualizado)
- Ambientes separados: mantenha seu `.env.local` só para DEV e o `.env` da VPS só para PROD. Nunca commitar arquivos `.env`.
- Convex em DEV: rode `pnpm convex:dev` e aponte o front para `http://127.0.0.1:3210` via `NEXT_PUBLIC_CONVEX_URL`.
- Banco local: por padrão `DATABASE_URL=file:./prisma/db.sqlite`. Se quiser isolar por projeto, use `db.dev.sqlite`.
- Seeds em DEV: use `pnpm auth:seed` (usuários Better Auth) e acesse `/dev/seed` uma vez para dados de demonstração do Convex.
- Seeds em PROD: só quando realmente necessário; não fazem parte do deploy automático.
## Backend Convex ### Exemplo de `.env.local` (DEV)
- Seeds de usuários/tickets demo: `convex/seed.ts`. ```
- Para DEV: rode `bun run convex:dev:bun` e acesse `/dev/seed` uma vez para popular dados realistas. # Base do app
NODE_ENV=development
NEXT_PUBLIC_APP_URL=http://localhost:3000
BETTER_AUTH_URL=http://localhost:3000
BETTER_AUTH_SECRET=dev-only-long-random-string
## Stack atual (18/12/2025) # Convex (DEV)
- **Next.js**: `16.0.10` (Turbopack por padrão; webpack fica como fallback). NEXT_PUBLIC_CONVEX_URL=http://127.0.0.1:3210
- Whitelist de domínios em `src/config/allowed-hosts.ts` é aplicada pelo `middleware.ts`.
- **React / React DOM**: `19.2.1`.
- **Trilha de testes**: Vitest (`bun test`) sem modo watch por padrão (`--run --passWithNoTests`).
- **CI**: workflow `Quality Checks` (`.github/workflows/quality-checks.yml`) roda `bun install`, `bun run prisma:generate`, `bun run lint`, `bun test`, `bun run build:bun`. Variáveis críticas (`BETTER_AUTH_SECRET`, `NEXT_PUBLIC_APP_URL`, etc.) são definidas apenas no runner — não afetam a VPS.
- **Disciplina pós-mudanças**: sempre que fizer alterações locais, rode **obrigatoriamente** `bun run lint`, `bun run build:bun` e `bun test` antes de entregar ou abrir PR. Esses comandos são mandatórios também para os agentes/automations, garantindo que o projeto continua íntegro.
- **Deploy**: pipeline `ci-cd-web-desktop.yml` (runner self-hosted). Build roda com Bun 1.3 + Node 20. Web é publicado em `/home/renan/apps/sistema` e o Swarm aponta `sistema_web` para essa pasta.
## Setup local (atualizado) # Banco local (Prisma)
1. `bun install` DATABASE_URL=file:./prisma/db.sqlite
2. Copie `.env.example``.env.local`.
- Principais variáveis para DEV:
```
NODE_ENV=development
NEXT_PUBLIC_APP_URL=http://localhost:3000
BETTER_AUTH_URL=http://localhost:3000
BETTER_AUTH_SECRET=dev-only-long-random-string
NEXT_PUBLIC_CONVEX_URL=http://127.0.0.1:3210
DATABASE_URL=postgresql://postgres:dev@localhost:5432/sistema_chamados
```
3. `bun run auth:seed`
4. (Opcional) `bun run queues:ensure`
5. `bun run convex:dev:bun`
6. Em outro terminal: `bun run dev:bun`
7. Acesse `http://localhost:3000` e valide login com os usuários padrão.
### Banco de dados # SMTP de desenvolvimento (ex.: Mailpit)
- Local (DEV): PostgreSQL local (ex.: `postgres:18`) com `DATABASE_URL=postgresql://postgres:dev@localhost:5432/sistema_chamados`. SMTP_ADDRESS=localhost
- Produção: PostgreSQL no Swarm (serviço `postgres` em uso hoje; `postgres18` provisionado para migração). Migrations em PROD devem apontar para o `DATABASE_URL` ativo (ver `docs/OPERATIONS.md`). SMTP_PORT=1025
- Limpeza de legados: `node scripts/remove-legacy-demo-users.mjs` remove contas demo antigas (Cliente Demo, gestores fictícios etc.). SMTP_TLS=false
SMTP_USERNAME=
SMTP_PASSWORD=
MAILER_SENDER_EMAIL="Dev <no-reply@localhost>"
### Verificações antes de PR/deploy # (Opcional) OAuth DEV não usado por padrão neste projeto
```bash GITHUB_CLIENT_ID=
bun run lint GITHUB_CLIENT_SECRET=
bun test GOOGLE_CLIENT_ID=
bun run build:bun GOOGLE_CLIENT_SECRET=
``` ```
## Aplicativo Desktop (Tauri) Observações:
- Código-fonte: `apps/desktop` (Tauri v2 + Vite + React 19). - `COOKIE_DOMAIN` não é necessário em DEV neste projeto.
- URLs: - Variáveis de provisionamento de máquinas (`MACHINE_PROVISIONING_SECRET`, etc.) só se você for testar as rotas de máquinas/inventário.
- Produção: `https://tickets.esdrasrenan.com.br`
- DEV: configure `apps/desktop/.env` (exemplo fornecido).
- Comandos:
- `bun run --cwd apps/desktop tauri dev` — desenvolvimento (porta 1420).
- `bun run --cwd apps/desktop tauri build` — gera instaladores.
- **Fluxo do agente**:
1. Coleta perfil da dispositivo (hostname, OS, MAC, seriais, métricas).
2. Provisiona via `POST /api/machines/register` usando `MACHINE_PROVISIONING_SECRET`, informando perfil de acesso (Colaborador/Gestor) + dados do colaborador.
3. Envia heartbeats periódicos (`/api/machines/heartbeat`) com inventário básico + estendido (discos SMART, GPUs, serviços, softwares, CPU window).
4. Realiza handshake em `APP_URL/machines/handshake?token=...&redirect=...` para receber cookies Better Auth + sessão (colaborador → `/portal`, gestor → `/dashboard`).
5. Token persistido no cofre do SO (Keyring); store guarda apenas metadados.
6. Envio manual de inventário via botão (POST `/api/machines/inventory`).
7. Updates automáticos: plugin `@tauri-apps/plugin-updater` consulta `latest.json` publicado nos releases do GitHub.
- **Admin ▸ Dispositivos**: permite ajustar perfil/email associado, visualizar inventário completo e remover dispositivo.
### Sessão "machine" no frontend ### Passo a passo local
- Ao autenticar como dispositivo, o front chama `/api/machines/session`, popula `machineContext` (assignedUser*, persona) e deriva role/`viewerId`. 1) `pnpm install`
- Mesmo quando `get-session` é `null` na WebView, o portal utiliza `machineContext` para saber o colaborador/gestor logado. 2) `pnpm prisma:generate`
- UI remove opção "Sair" no menu do usuário quando detecta sessão de dispositivo. 3) `pnpm convex:dev` (terminal A)
- `/portal/debug` exibe JSON de `get-session` e `machines/session` (útil para diagnosticar cookies/bearer). 4) `pnpm dev` (terminal B)
5) (Opcional) `pnpm auth:seed` e visitar `http://localhost:3000/dev/seed`
### Observações adicionais ## Deploy via GitHub Actions (produção)
- Planejamos usar um cookie `desktop_shell` no futuro para diferenciar acessos do desktop vs navegador (não implementado). - Fluxo: `git push main` ⇒ runner selfhosted na VPS sincroniza código e aplica o stack (Traefik/Swarm) sem derrubar o serviço (start-first).
- Disparo do deploy web: apenas quando há mudanças em arquivos do app (src/, public/, prisma/, next.config.ts, package.json, pnpm-lock.yaml, tsconfig.json, middleware.ts, stack.yml).
- Disparo do deploy Convex: apenas quando há mudanças em `convex/**`.
- O `.env` da VPS é preservado; caches do servidor (`node_modules`, `.pnpm-store`) não são tocados.
- Smoke de provisionamento (`/api/machines/register` + heartbeat) roda só se `RUN_MACHINE_SMOKE=true` (default: desativado para evitar quedas em caso de instabilidade).
- Banco Prisma (SQLite) persiste em volume nomeado (`sistema_db`); não é recriado a cada deploy.
## Qualidade e testes ## Bancos e seeds — DEV x PROD
- **Lint**: `bun run lint` (ESLint flat config). - DEV: use os seeds à vontade (usuários com `pnpm auth:seed`, dados demo do Convex em `/dev/seed`).
- **Testes unitários/integrados (Vitest)**: - PROD: evite seeds automáticos; para criar um admin use `SEED_USER_*` e `pnpm auth:seed` em um container Node efêmero.
- Cobertura atual inclui utilitários (`tests/*.test.ts`), rotas `/api/machines/*` e `sendSmtpMail`. - Alterações de schema: sempre via migrações (`prisma migrate`). O CI aplica `migrate deploy` no start do container web.
- Executar `bun test -- --watch` apenas quando precisar de modo interativo.
- **Build**: `bun run build:bun` (`next build --turbopack`). Quando precisar do fallback oficial, rode `bun run build:webpack`.
- **CI**: falhas mais comuns
- `ERR_BUN_LOCKFILE_OUTDATED`: confirme que o `bun.lock` foi regenerado (`bun install`) após alterar dependências, especialmente do app desktop.
- Variáveis Better Auth ausentes (`BETTER_AUTH_SECRET`): definidas no workflow (`Quality Checks`).
- Falha de host: confira `src/config/allowed-hosts.ts`; o middleware retorna 403 quando o domínio do Traefik não está listado.
## Produção / Deploy ## Dicas rápidas
- Runner self-hosted (VPS). Build roda fora de `/srv/apps/sistema` e rsync publica em `/home/renan/apps/sistema`. - Imagens em `public/`: trocou o arquivo → push. Para bust de cache, versionar o nome (ex.: `logo.v2.png`) ou usar query (`?v=sha`).
- Swarm: `stack.yml` monta `/home/renan/apps/sistema.current``/app` (via symlink). - Problemas de permissão de build: garanta que `.next` pertence ao usuário do runner (se necessário, remover `.next` no host e rebuildar).
- Para liberar novo release manualmente: - Se precisar inspecionar/backup do SQLite em PROD, prefira um bind dedicado (`/srv/apps/sistema-data:/app/data`) ou use `docker run -v sistema_db:/data` para copiar o arquivo.
```bash
ln -sfn /home/renan/apps/sistema.build.<novo> /home/renan/apps/sistema.current
docker service update --force sistema_web
```
- Resolver `P3009` (migration falhou) no PostgreSQL ativo:
```bash
docker service scale sistema_web=0
docker run --rm -it --network traefik_public \
--env-file /home/renan/apps/sistema.current/.env \
-v /home/renan/apps/sistema.current:/app \
oven/bun:1 bash -lc "bun install --frozen-lockfile && bun x prisma migrate resolve --rolled-back <migration> && bun x prisma migrate deploy"
docker service scale sistema_web=1
```
## Estado do portal / app web ## Checklist para novo computador
- Autenticação Better Auth com `AuthGuard`. 1. Instale Node.js 20+ e habilite o Corepack (`corepack enable`) para usar o `pnpm`.
- Sidebar inferior agrega avatar, link para `/settings` e logout (oculto em sessões de dispositivo). 2. Garanta o `pnpm` atualizado (`corepack prepare pnpm@latest --activate`) antes de clonar o repositório.
- Formulários de ticket (novo/editar/comentários) usam editor rico + anexos; placeholders e validação PT-BR. 3. Clone o projeto: `git clone git@github.com:esdrasrenan/sistema-de-chamados.git` e entre na pasta.
- Relatórios e painéis utilizam `AppShell` + `SiteHeader`. 4. Copie o arquivo `.env` já configurado do computador atual para a raiz do repositório (nunca faça commit desse arquivo).
- `usePersistentCompanyFilter` mantém filtro global de empresa em relatórios/admin. 5. Instale as dependências com `pnpm install`.
- Exportações CSV: backlog, canais, CSAT, SLA, horas (rotas `/api/reports/*.csv`). 6. Gere os clientes locais necessários: `pnpm prisma:generate`.
- PDF do ticket (`/api/tickets/[id]/export/pdf`). 7. Semeie as credenciais Better Auth: `pnpm auth:seed`.
- Play interno/externo com métricas por tipo. 8. Se for trabalhar com filas padrão, execute `pnpm queues:ensure`.
- Admin > Empresas: cadastro + “Cliente avulso?”, horas contratadas, vínculos de usuários. 9. Inicie o backend Convex em um terminal (`pnpm convex:dev`) e, em outro, suba a aplicação Next.js (`pnpm dev`).
- Admin > Usuários/Equipe: 10. Acesse `http://localhost:3000` e teste login com os usuários padrão listados acima antes de continuar o desenvolvimento.
- Abas separadas: "Equipe" (administradores e agentes) e "Usuários" (gestores e colaboradores).
- Multiseleção + ações em massa: excluir usuários, remover agentes de dispositivo e revogar convites pendentes. ## Estado atual
- Filtros por papel, empresa e espaço (tenant) quando aplicável; busca unificada. - Autenticação Better Auth com guardas client-side (`AuthGuard`) bloqueando rotas protegidas.
- Convites: campo "Espaço (ID interno)" removido da UI de geração. - Menu de usuário (rodapé da sidebar) concentra acesso às configurações ("Meu perfil" → `/settings`) e logout. Removemos o item redundante "Configurações" do menu lateral.
- Formulários de novo ticket (dialog, página e portal) com seleção de responsável, placeholders claros e validação obrigatória de assunto/descrição/categorias.
- Relatórios, dashboards e páginas administrativas utilizam `AppShell`, garantindo header/sidebar consistentes.
- Use `SiteHeader` no `header` do `AppShell` para título/lead e ações.
- O conteúdo deve ficar dentro de `<div className="mx-auto w-full max-w-6xl px-4 pb-12 lg:px-6">`.
- Persistir filtro global de empresa com `usePersistentCompanyFilter` (localStorage) para manter consistência entre relatórios.
## Entregas recentes
- Exportações CSV (Backlog, Canais, CSAT, SLA e Horas por cliente) com parâmetros de período.
- PDF do ticket (via pdfkit standalone), com espaçamento e traduções PT-BR.
- Play interno/externo com somatório por tipo por ticket e relatório por cliente.
- Admin > Empresas & clientes: cadastro/edição, `Cliente avulso?` e `Horas contratadas/mês`.
- Admin > Usuários: vincular colaborador à empresa. - Admin > Usuários: vincular colaborador à empresa.
- Alertas enviados: acessível agora em Configurações → Administração do workspace (link direto para /admin/alerts). Removido da sidebar. - Dashboard: cards de filas (Chamados/Laboratório/Visitas) e indicadores principais.
- Dashboard: cards por fila e indicadores principais. - Lista de tickets: filtro por Empresa, coluna Empresa, alinhamento vertical e melhor espaçamento entre colunas.
## Entregas recentes relevantes
- Correção do redirecionamento após logout evitando retorno imediato ao dashboard.
- Validações manuais dos formulários de rich text para eliminar `ZodError` durante edição.
- Dropdown de responsáveis na criação de tickets com preenchimento automático pelo autor e evento inicial de comentário.
- Indicadores visuais de campos obrigatórios e botão "Novo ticket" funcional no cabeçalho do detalhe.
- Seeds (Better Auth e Convex) ampliados para incluir agente e cliente de teste.
## Fluxos suportados
### Equipe interna (admin/agent/collaborator)
- Criar tickets com categorias, responsável inicial e anexos.
- Abrir novos tickets diretamente a partir do detalhe via dialog reutilizável.
- Acessar `/settings` para ajustes pessoais e efetuar logout pelo menu.
### Papéis
- Papéis válidos: `admin`, `manager`, `agent`, `collaborator` (papel `customer` removido).
- Colaboradores acessam o portal (`/portal`) e visualizam apenas os próprios tickets; gestores herdam a visão completa da empresa mesmo quando autenticados via agente desktop.
- Gestores veem os tickets da própria empresa e só podem registrar comentários públicos.
## Próximos passos sugeridos
1. Disparo de e-mails automáticos quando uso de horas ≥ 90% do contratado.
2. Ações rápidas (status/fila) diretamente na listagem de tickets.
3. Limites e monitoramento para anexos por tenant.
4. PDF do ticket com layout idêntico ao app (logo/cores/fontes).
## Fluxos suportados ## Referências de endpoints úteis
- **Equipe interna** (`admin`, `agent`, `collaborator`): cria/acompanha tickets, comenta, altera status/fila, gera relatórios. - Backlog CSV: `/api/reports/backlog.csv?range=7d|30d|90d[&companyId=...]`
- **Gestores** (`manager`): visualizam tickets da empresa, comentam publicamente, acessam dashboards. - Canais CSV: `/api/reports/tickets-by-channel.csv?range=7d|30d|90d[&companyId=...]`
- **Colaboradores** (`collaborator`): portal (`/portal`), tickets próprios, comentários públicos, editor rico, anexos. - CSAT CSV: `/api/reports/csat.csv?range=7d|30d|90d`
- **Sessão Dispositivo**: desktop registra heartbeat/inventário e redireciona colaborador/gestor ao portal apropriado com cookies válidos. - SLA CSV: `/api/reports/sla.csv`
- Horas por cliente CSV: `/api/reports/hours-by-client.csv?range=7d|30d|90d`
### Correções recentes
- Temporizador do ticket (atendimento em andamento): a UI passa a aplicar atualização otimista na abertura/pausa da sessão para que o tempo corrente não "salte" para minutos indevidos. O backend continua a fonte da verdade (total acumulado é reconciliado ao pausar). ## Referências de inventário de máquinas
- UI (Admin > Máquinas): filtros, pesquisa, inventário enriquecido (GPUs, discos, serviços) e exclusão de máquina — ver docs/admin-inventory-ui.md
## Backlog recomendado - Endpoints do agente:
1. E-mails automáticos quando uso de horas ≥ 90% do contratado.
2. Ações rápidas (status/fila) diretamente na lista de tickets.
3. Limites de anexos por tenant + monitoramento.
4. Layout do PDF do ticket alinhado ao visual da aplicação.
5. Experimentos com React Compiler (Next 16).
## Referências rápidas
- **Endpoints agent desktop**:
- `POST /api/machines/register` - `POST /api/machines/register`
- `POST /api/machines/heartbeat` - `POST /api/machines/heartbeat`
- `POST /api/machines/inventory` - `POST /api/machines/inventory`
- **Relatórios XLSX**:
- Backlog: `/api/reports/backlog.xlsx?range=7d|30d|90d[&companyId=...]` ## Rotina antes de abrir PR
- Canais: `/api/reports/tickets-by-channel.xlsx?...` - `pnpm lint`
- CSAT: `/api/reports/csat.xlsx?...` - `pnpm build --turbopack`
- SLA: `/api/reports/sla.xlsx?...` - `pnpm exec vitest run`
- Horas: `/api/reports/hours-by-client.xlsx?...` - Revisar toasts/labels em PT-BR e ausência de segredos no diff.
- Inventário de dispositivos: `/api/reports/machines-inventory.xlsx?[companyId=...]`
- **Docs complementares**: ## Convenções
- `docs/DEV.md` — guia diário atualizado. - Convex deve retornar apenas tipos primitivos; converta datas via mappers em `src/lib/mappers`.
- `docs/STATUS-2025-10-16.md` — snapshot do estado atual e backlog. - Manter textos em PT-BR e evitar comentários supérfluos no código.
- `docs/OPERATIONS.md` — runbook do Swarm. - Reutilizar componentes shadcn existentes e seguir o estilo do arquivo editado.
- `docs/admin-inventory-ui.md`, `docs/plano-app-desktop-maquinas.md` — detalhes do inventário/agente. - Validações client-side críticas devem sinalizar erros inline e exibir toast.
## Regras de Codigo ## Estrutura útil
- `convex/` — queries e mutations (ex.: `tickets.ts`, `users.ts`).
### Tooltips Nativos do Navegador - `src/components/tickets/` — UI interna (dialog, listas, header, timeline).
- `src/components/portal/` — formulários e fluxos do portal do cliente.
**NAO use o atributo `title` em elementos HTML** (button, span, a, div, etc). - `scripts/` — seeds Better Auth e utilidades.
- `src/components/auth/auth-guard.tsx` — proteção de rotas client-side.
O atributo `title` causa tooltips nativos do navegador que sao inconsistentes visualmente e nao seguem o design system da aplicacao.
## Histórico resumido
```tsx - Scaffold Next.js + Turbopack configurado com Better Auth e Convex.
// ERRADO - causa tooltip nativo do navegador - Portal do cliente entregue com isolamento por `viewerId`.
<button title="Remover item"> - Fluxo de convites e painel administrativo operacionais.
<Trash2 className="size-4" /> - Iteração atual focada em UX de criação de tickets, consistência de layout e guardas de sessão.
</button>
// CORRETO - sem tooltip nativo
<button>
<Trash2 className="size-4" />
</button>
// CORRETO - se precisar de tooltip, use o componente Tooltip do shadcn/ui
<Tooltip>
<TooltipTrigger asChild>
<button>
<Trash2 className="size-4" />
</button>
</TooltipTrigger>
<TooltipContent>Remover item</TooltipContent>
</Tooltip>
```
**Excecoes:**
- Props `title` de componentes customizados (CardTitle, DialogTitle, etc) sao permitidas pois nao geram tooltips nativos.
### Acessibilidade
Para manter acessibilidade em botoes apenas com icone, prefira usar `aria-label`:
```tsx
<button aria-label="Remover item">
<Trash2 className="size-4" />
</button>
```
---
_Última atualização: 18/12/2025 (Next.js 16, build padrão com Turbopack e fallback webpack documentado)._

View file

@ -9,14 +9,6 @@ VITE_APP_URL=http://localhost:3000
# Se não definir, cai no mesmo valor de VITE_APP_URL # Se não definir, cai no mesmo valor de VITE_APP_URL
VITE_API_BASE_URL= VITE_API_BASE_URL=
# RustDesk provisioning (opcionais; se vazios, o app usa o TOML padrão embutido)
VITE_RUSTDESK_CONFIG_STRING=
VITE_RUSTDESK_DEFAULT_PASSWORD=FMQ9MA>e73r.FI<b*34Vmx_8P
# Assinatura Tauri (dev/CI). Em producao, pode sobrescrever por env seguro.
TAURI_SIGNING_PRIVATE_KEY=dW50cnVzdGVkIGNvbW1lbnQ6IHJzaWduIGVuY3J5cHRlZCBzZWNyZXQga2V5ClJXUlRZMEl5WkhWOUtzd1BvV0ZlSjEvNzYwaHYxdEloNnV4cmZlNGhha1BNbmNtZEkrZ0FBQkFBQUFBQUFBQUFBQUlBQUFBQS9JbCtsd3VFbHN4empFRUNiU0dva1hKK3ZYUzE2S1V6Q1FhYkRUWGtGMTBkUmJodi9PaXVub3hEMisyTXJoYU5UeEdwZU9aMklacG9ualNWR1NaTm1PMVBpVXYrNTltZU1YOFdwYzdkOHd2STFTc0x4ZktpNXFENnFTdW0xNzY3WC9EcGlIRGFmK2c9Cg==
TAURI_SIGNING_PRIVATE_KEY_PASSWORD=revertech
# Opcional: IP do host para desenvolvimento com HMR fora do localhost # Opcional: IP do host para desenvolvimento com HMR fora do localhost
# Ex.: 192.168.0.10 # Ex.: 192.168.0.10
TAURI_DEV_HOST= TAURI_DEV_HOST=

View file

@ -1,11 +1,11 @@
# Sistema de Chamados — App Desktop (Tauri) # Sistema de Chamados — App Desktop (Tauri)
Cliente desktop (Tauri v2 + Vite) que: Cliente desktop (Tauri v2 + Vite) que:
- Coleta perfil/métricas da dispositivo via comandos Rust. - Coleta perfil/métricas da máquina via comandos Rust.
- Registra a dispositivo com um código de provisionamento. - Registra a máquina com um código de provisionamento.
- Envia heartbeat periódico ao backend (`/api/machines/heartbeat`). - Envia heartbeat periódico ao backend (`/api/machines/heartbeat`).
- Redireciona para a UI web do sistema após provisionamento. - Redireciona para a UI web do sistema após provisionamento.
- Armazena o token da dispositivo com segurança no cofre do SO (Keyring). - Armazena o token da máquina com segurança no cofre do SO (Keyring).
- Exibe abas de Resumo, Inventário, Diagnóstico e Configurações; permite “Enviar inventário agora”. - Exibe abas de Resumo, Inventário, Diagnóstico e Configurações; permite “Enviar inventário agora”.
## URLs e ambiente ## URLs e ambiente
@ -22,42 +22,13 @@ VITE_API_BASE_URL=
## Comandos ## Comandos
- Dev (abre janela Tauri e Vite em 1420): - Dev (abre janela Tauri e Vite em 1420):
- `bun run --cwd apps/desktop tauri dev` - `pnpm -C apps/desktop tauri dev`
- Build frontend (somente Vite): - Build frontend (somente Vite):
- `bun run --cwd apps/desktop build` - `pnpm -C apps/desktop build`
- Build executável (bundle): - Build executável (bundle):
- `bun run --cwd apps/desktop tauri build` - `pnpm -C apps/desktop tauri build`
Saída dos pacotes: `apps/desktop/src-tauri/target/release/bundle/`. Saída dos pacotes: `apps/desktop/src-tauri/target/release/bundle/` (AppImage/deb/msi/dmg conforme SO).
### Windows (NSIS) — instalação e dados
- Instalador NSIS com suporte a “perMachine” (Arquivos de Programas) e diretório customizável (ex.: `C:\Raven`).
- Atalho é criado na Área de Trabalho apontando para o executável instalado.
- Dados do app (token/config) ficam em AppData local do usuário (via `@tauri-apps/plugin-store` com `appLocalDataDir`).
#### NSIS — Idiomas e modo de instalação
- Idioma: o instalador inclui Português do Brasil e exibe seletor de idioma.
- Arquivo: `apps/desktop/src-tauri/tauri.conf.json:54``"displayLanguageSelector": true`
- Arquivo: `apps/desktop/src-tauri/tauri.conf.json:57``"languages": ["PortugueseBR"]`
- Comportamento: usa o idioma do SO; sem correspondência, cai no primeiro da lista.
- Referência de idiomas NSIS: NSIS “Language files/PortugueseBR”.
- Modo de instalação: Program Files (requer elevação/UAC).
- Arquivo: `apps/desktop/src-tauri/tauri.conf.json:56``"installMode": "perMachine"`
- Alternativas: `"currentUser"` (padrão) ou `"both"` (usuário escolhe; exige UAC).
Build rápido e leve em dev:
```bash
bun run --cwd apps/desktop tauri build --bundles nsis
```
Assinatura do updater (opcional em dev):
```powershell
$privB64 = '<COLE_SUA_CHAVE_PRIVADA_EM_BASE64>'
$env:TAURI_SIGNING_PRIVATE_KEY = [Text.Encoding]::UTF8.GetString([Convert]::FromBase64String($privB64))
$env:TAURI_SIGNING_PRIVATE_KEY_PASSWORD = 'SENHA_AQUI'
bun run --cwd apps/desktop tauri build --bundles nsis
```
## Prérequisitos Tauri ## Prérequisitos Tauri
- Rust toolchain instalado. - Rust toolchain instalado.
@ -65,7 +36,7 @@ bun run --cwd apps/desktop tauri build --bundles nsis
Consulte https://tauri.app/start/prerequisites/ Consulte https://tauri.app/start/prerequisites/
## Fluxo (resumo) ## Fluxo (resumo)
1) Ao abrir, o app coleta o perfil da dispositivo e exibe um resumo. 1) Ao abrir, o app coleta o perfil da máquina e exibe um resumo.
2) Informe o “código de provisionamento” (chave definida no servidor) e confirme. 2) Informe o “código de provisionamento” (chave definida no servidor) e confirme.
3) O servidor retorna um `machineToken`; o app salva e inicia o heartbeat. 3) O servidor retorna um `machineToken`; o app salva e inicia o heartbeat.
4) O app abre `APP_URL/machines/handshake?token=...` no WebView para autenticar a sessão na UI. 4) O app abre `APP_URL/machines/handshake?token=...` no WebView para autenticar a sessão na UI.

View file

@ -159,11 +159,10 @@
## 5. Gerar chaves do updater Tauri ## 5. Gerar chaves do updater Tauri
1. Em qualquer dispositivo com Bun instalado (pode ser seu computador local): 1. Em qualquer máquina com Node/pnpm (pode ser seu computador local):
```bash ```bash
bun install pnpm install
bun install --cwd apps/desktop pnpm --filter appsdesktop tauri signer generate
bun run --cwd apps/desktop tauri signer generate
``` ```
2. O comando gera: 2. O comando gera:
- Chave privada (`tauri.private.key`). - Chave privada (`tauri.private.key`).
@ -237,16 +236,19 @@
1. Baixe e instale os pré-requisitos: 1. Baixe e instale os pré-requisitos:
- Git para Windows. - Git para Windows.
- Bun 1.3+: instale via instalador oficial (`iwr https://bun.sh/install.ps1 | invoke-expression`) e garanta que `bun` esteja no `PATH`. - Node.js 20 (instalação inclui npm).
- Node.js 20 (opcional, caso precise rodar scripts em Node durante o build). - Habilite o Corepack: abra o PowerShell como administrador e rode:
```powershell
corepack enable
corepack prepare pnpm@latest --activate
```
- Rust toolchain: https://rustup.rs (instale padrão). - Rust toolchain: https://rustup.rs (instale padrão).
- Visual Studio Build Tools (C++ build tools) ou `Desktop development with C++`. - Visual Studio Build Tools (C++ build tools) ou `Desktop development with C++`.
- WebView2 Runtime (https://developer.microsoft.com/microsoft-edge/webview2/). - WebView2 Runtime (https://developer.microsoft.com/microsoft-edge/webview2/).
2. Opcional: instale as dependências do Tauri rodando uma vez: 2. Opcional: instale as dependências do Tauri rodando uma vez:
```powershell ```powershell
bun install pnpm install
bun install --cwd apps/desktop pnpm --filter appsdesktop tauri info
bun run --cwd apps/desktop tauri info
``` ```
3. No GitHub → *Settings**Actions**Runners**New self-hosted runner* → escolha Windows x64 e copie URL/token. 3. No GitHub → *Settings**Actions**Runners**New self-hosted runner* → escolha Windows x64 e copie URL/token.
4. Em `C:\actions-runner` (recomendado): 4. Em `C:\actions-runner` (recomendado):
@ -265,10 +267,10 @@
.\svc start .\svc start
``` ```
6. Confirme no GitHub que o runner aparece como `online`. 6. Confirme no GitHub que o runner aparece como `online`.
7. Mantenha a dispositivo ligada e conectada durante o período em que o workflow precisa rodar: 7. Mantenha a máquina ligada e conectada durante o período em que o workflow precisa rodar:
- Para releases desktop, o runner só precisa estar ligado enquanto o job `desktop_release` estiver em execução (crie a tag e aguarde o workflow terminar). - Para releases desktop, o runner só precisa estar ligado enquanto o job `desktop_release` estiver em execução (crie a tag e aguarde o workflow terminar).
- Após a conclusão, você pode desligar o computador até a próxima release. - Após a conclusão, você pode desligar o computador até a próxima release.
8. Observação importante: o runner Windows pode ser sua dispositivo pessoal. Garanta apenas que: 8. Observação importante: o runner Windows pode ser sua máquina pessoal. Garanta apenas que:
- Você confia no código que será executado (o runner processa os jobs do repositório). - Você confia no código que será executado (o runner processa os jobs do repositório).
- O serviço do runner esteja ativo enquanto o workflow rodar (caso desligue o PC, as releases ficam na fila). - O serviço do runner esteja ativo enquanto o workflow rodar (caso desligue o PC, as releases ficam na fila).
- Há espaço em disco suficiente e nenhuma política corporativa bloqueando a instalação dos pré-requisitos. - Há espaço em disco suficiente e nenhuma política corporativa bloqueando a instalação dos pré-requisitos.
@ -324,10 +326,14 @@
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Setup Bun - name: Setup pnpm & Node
uses: oven-sh/setup-bun@v1 uses: pnpm/action-setup@v4
with: with:
bun-version: 1.3.1 version: 9
- uses: actions/setup-node@v4
with:
node-version: 20
cache: pnpm
- name: Deploy stack (Docker Swarm) - name: Deploy stack (Docker Swarm)
working-directory: ${{ env.APP_DIR }} working-directory: ${{ env.APP_DIR }}
@ -344,16 +350,20 @@
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Setup Bun - name: Setup pnpm & Node
uses: oven-sh/setup-bun@v1 uses: pnpm/action-setup@v4
with: with:
bun-version: 1.3.1 version: 9
- uses: actions/setup-node@v4
with:
node-version: 20
cache: pnpm
- name: Setup Rust toolchain - name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
- name: Install deps - name: Install deps
run: bun install --frozen-lockfile run: pnpm install --frozen-lockfile
- name: Build + Sign + Release (tauri-action) - name: Build + Sign + Release (tauri-action)
uses: tauri-apps/tauri-action@v0 uses: tauri-apps/tauri-action@v0
@ -379,7 +389,7 @@
target: ${{ env.VPS_UPDATES_DIR }} target: ${{ env.VPS_UPDATES_DIR }}
overwrite: true overwrite: true
``` ```
2. Ajuste o bloco de deploy conforme seu processo (por exemplo, use `bun run build && pm2 restart` se não usar Docker ou substitua por chamada à API do Portainer caso faça o deploy por lá). 2. Ajuste o bloco de deploy conforme seu processo (por exemplo, use `pnpm build && pm2 restart` se não usar Docker ou substitua por chamada à API do Portainer caso faça o deploy por lá).
3. Faça commit desse arquivo e suba para o GitHub (`git add .github/workflows/ci-cd-web-desktop.yml`, `git commit`, `git push`). 3. Faça commit desse arquivo e suba para o GitHub (`git add .github/workflows/ci-cd-web-desktop.yml`, `git commit`, `git push`).
--- ---
@ -419,7 +429,7 @@
- Garanta que o certificado TLS usado pelo Nginx é renovado (p. ex. `certbot renew`). - Garanta que o certificado TLS usado pelo Nginx é renovado (p. ex. `certbot renew`).
4. Manter runners: 4. Manter runners:
- VPS: monitore serviço `actions.runner.*`. Reinicie se necessário (`sudo ./svc.sh restart`). - VPS: monitore serviço `actions.runner.*`. Reinicie se necessário (`sudo ./svc.sh restart`).
- Windows: mantenha dispositivo ligada e atualizada. Se o serviço parar, abra `services.msc``GitHub Actions Runner` → Start. - Windows: mantenha máquina ligada e atualizada. Se o serviço parar, abra `services.msc``GitHub Actions Runner` → Start.
--- ---
@ -441,7 +451,7 @@
| Job `desktop_release` falha na etapa `tauri-action` | Toolchain incompleto no Windows | Reinstale Rust, WebView2 e componentes C++ do Visual Studio. | | Job `desktop_release` falha na etapa `tauri-action` | Toolchain incompleto no Windows | Reinstale Rust, WebView2 e componentes C++ do Visual Studio. |
| Artefatos não chegam à VPS | Caminho incorreto ou chave SSH inválida | Verifique `VPS_HOST`, `VPS_USER`, `VPS_SSH_KEY` e se a pasta `/var/www/updates` existe. | | Artefatos não chegam à VPS | Caminho incorreto ou chave SSH inválida | Verifique `VPS_HOST`, `VPS_USER`, `VPS_SSH_KEY` e se a pasta `/var/www/updates` existe. |
| App não encontra update | URL ou chave pública divergente no `tauri.conf.json` | Confirme que `endpoints` bate com o domínio HTTPS e que `pubkey` é exatamente a chave pública gerada. | | App não encontra update | URL ou chave pública divergente no `tauri.conf.json` | Confirme que `endpoints` bate com o domínio HTTPS e que `pubkey` é exatamente a chave pública gerada. |
| Runner aparece offline no GitHub | Serviço parado ou dispositivo desligada | VPS: `sudo ./svc.sh status`; Windows: abra `Services` e reinicie o `GitHub Actions Runner`. | | Runner aparece offline no GitHub | Serviço parado ou máquina desligada | VPS: `sudo ./svc.sh status`; Windows: abra `Services` e reinicie o `GitHub Actions Runner`. |
--- ---

View file

@ -7,21 +7,15 @@
"dev": "vite", "dev": "vite",
"build": "tsc && vite build", "build": "tsc && vite build",
"preview": "vite preview", "preview": "vite preview",
"tauri": "node ./scripts/tauri-with-stub.mjs", "tauri": "node ./scripts/tauri-with-stub.mjs"
"gen:icon": "node ./scripts/build-icon.mjs",
"build:service": "cd service && cargo build --release",
"build:all": "bun run build:service && bun run tauri build"
}, },
"dependencies": { "dependencies": {
"@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-tabs": "^1.1.13", "@radix-ui/react-tabs": "^1.1.13",
"@tauri-apps/api": "^2.9.1", "@tauri-apps/api": "^2",
"@tauri-apps/plugin-dialog": "^2.4.2",
"@tauri-apps/plugin-opener": "^2", "@tauri-apps/plugin-opener": "^2",
"@tauri-apps/plugin-process": "^2", "@tauri-apps/plugin-process": "^2",
"@tauri-apps/plugin-store": "^2", "@tauri-apps/plugin-store": "^2",
"@tauri-apps/plugin-updater": "^2", "@tauri-apps/plugin-updater": "^2",
"convex": "^1.31.0",
"lucide-react": "^0.544.0", "lucide-react": "^0.544.0",
"react": "^19.0.0", "react": "^19.0.0",
"react-dom": "^19.0.0" "react-dom": "^19.0.0"
@ -29,8 +23,6 @@
"devDependencies": { "devDependencies": {
"@tauri-apps/cli": "^2", "@tauri-apps/cli": "^2",
"@vitejs/plugin-react": "^4.3.4", "@vitejs/plugin-react": "^4.3.4",
"baseline-browser-mapping": "^2.9.2",
"png-to-ico": "^3.0.1",
"typescript": "~5.6.2", "typescript": "~5.6.2",
"vite": "^6.0.3" "vite": "^6.0.3"
} }

View file

@ -1,11 +1,11 @@
{ {
"version": "0.1.6", "version": "0.1.6",
"notes": "Correções e melhorias do desktop", "notes": "Correções e melhorias do desktop",
"pub_date": "2025-10-14T12:00:00Z", "pub_date": "2025-10-12T08:00:00Z",
"platforms": { "platforms": {
"windows-x86_64": { "windows-x86_64": {
"signature": "ZFc1MGNuVnpkR1ZrSUdOdmJXMWxiblE2SUhOcFoyNWhkSFZ5WlNCbWNtOXRJSFJoZFhKcElITmxZM0psZENCclpYa0tVbFZVZDNFeFUwRlJRalJVUjJOU1NqUnpTVmhXU1ZoeVUwZElNSGxETW5KSE1FTnBWa3BWU1dzelVYVlRNV1JTV0Vrdk1XMUZVa0Z3YTBWc2QySnZhVnBxUWs5bVoyODNNbEZaYUZsMFVHTlRLMUFyT0hJMVdGZ3lWRkZYT1V3ekwzZG5QUXAwY25WemRHVmtJR052YlcxbGJuUTZJSFJwYldWemRHRnRjRG94TnpZd016azVOVEkzQ1dacGJHVTZVbUYyWlc1Zk1DNHhMalZmZURZMExYTmxkSFZ3TG1WNFpRcHdkME15THpOVlZtUXpiSG9yZGpRd1pFZHFhV1JvVkZCb0wzVnNabWh1ZURJdmFtUlZOalEwTkRSVVdVY3JUVGhLTUdrNU5scFNUSFZVWkRsc1lYVTJUR2dyWTNWeWJuWTVhRGh3ZVVnM1dFWjVhSFZDUVQwOUNnPT0=", "signature": "dW50cnVzdGVkIGNvbW1lbnQ6IHNpZ25hdHVyZSBmcm9tIHRhdXJpIHNlY3JldCBrZXkKUlVURzQreDA5a3BReGtNbmhDdS96TWdmMkRjQ1NYRUZ1TkVjclR1S2dJSEFieElIU0dyOXhDZ2JvN0lBemJkRklZRnNNTlNEdUlmN1pmdGMyZkhwUklXNEJaYWd1ejhkaFFVPQp0cnVzdGVkIGNvbW1lbnQ6IHRpbWVzdGFtcDoxNzYwMjUzNzE3CWZpbGU6UmF2ZW5fMC4xLjVfeDY0LXNldHVwLmV4ZQp0SU4zQnBYNnVqNDc4UG9tWjN6bUlKeXJsZWdPZkFWNzFBWUVFenh0RFFGeGYzSTBzak0xdEwwTXBDdkFGMXRTdHZCckpzRlBlZWpWdkdHcnB4aEtBQT09Cg==",
"url": "https://github.com/esdrasrenan/sistema-de-chamados/raw/main/apps/desktop/public/releases/Raven_0.1.6_x64-setup.exe" "url": "https://github.com/esdrasrenan/sistema-de-chamados/releases/download/v0.1.6/Raven_0.1.5_x64-setup.exe"
} }
} }
} }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 MiB

View file

@ -1 +0,0 @@
dW50cnVzdGVkIGNvbW1lbnQ6IHNpZ25hdHVyZSBmcm9tIHRhdXJpIHNlY3JldCBrZXkKUlVUd3ExU0FRQjRUR2NSSjRzSVhWSVhyU0dIMHlDMnJHMENpVkpVSWszUXVTMWRSWEkvMW1FUkFwa0Vsd2JvaVpqQk9mZ283MlFZaFl0UGNTK1ArOHI1WFgyVFFXOUwzL3dnPQp0cnVzdGVkIGNvbW1lbnQ6IHRpbWVzdGFtcDoxNzYwMzk5NTI3CWZpbGU6UmF2ZW5fMC4xLjVfeDY0LXNldHVwLmV4ZQpwd0MyLzNVVmQzbHordjQwZEdqaWRoVFBoL3VsZmhueDIvamRVNjQ0NDRUWUcrTThKMGk5NlpSTHVUZDlsYXU2TGgrY3VybnY5aDhweUg3WEZ5aHVCQT09Cg==

View file

@ -1,38 +0,0 @@
#!/usr/bin/env node
import { promises as fs } from 'node:fs'
import path from 'node:path'
import pngToIco from 'png-to-ico'
async function fileExists(p) {
try { await fs.access(p); return true } catch { return false }
}
async function main() {
const root = path.resolve(process.cwd(), 'src-tauri', 'icons')
// Inclua apenas tamanhos suportados pelo NSIS (até 256px).
// Evite 512px para não gerar ICO inválido para o instalador.
const candidates = [
'icon-256.png', // preferencial
'128x128@2x.png', // alias de 256
'icon-128.png',
'icon-64.png',
'icon-32.png',
]
const sources = []
for (const name of candidates) {
const p = path.join(root, name)
if (await fileExists(p)) sources.push(p)
}
if (sources.length === 0) {
console.error('[gen:icon] Nenhuma imagem base encontrada em src-tauri/icons')
process.exit(1)
}
console.log('[gen:icon] Gerando icon.ico a partir de:', sources.map((s) => path.basename(s)).join(', '))
const buffer = await pngToIco(sources)
const outPath = path.join(root, 'icon.ico')
await fs.writeFile(outPath, buffer)
console.log('[gen:icon] Escrito:', outPath)
}
main().catch((err) => { console.error(err); process.exit(1) })

View file

@ -1,237 +0,0 @@
#!/usr/bin/env python3
"""
Generate icon PNGs/ICO for the desktop installer using the high-resolution Raven artwork.
The script reads the square logo (`logo-raven-fund-azul.png`) and resizes it to the
target sizes with a simple bilinear filter implemented with the Python standard library,
avoiding additional dependencies.
"""
from __future__ import annotations
import math
import struct
import zlib
from binascii import crc32
from pathlib import Path
ICON_DIR = Path(__file__).resolve().parents[1] / "src-tauri" / "icons"
BASE_IMAGE = ICON_DIR / "logo-raven-fund-azul.png"
TARGET_SIZES = [32, 64, 128, 256, 512]
def read_png(path: Path) -> tuple[int, int, list[list[tuple[int, int, int, int]]]]:
data = path.read_bytes()
if not data.startswith(b"\x89PNG\r\n\x1a\n"):
raise ValueError(f"{path} is not a PNG")
pos = 8
width = height = bit_depth = color_type = None
compressed_parts = []
while pos < len(data):
length = struct.unpack(">I", data[pos : pos + 4])[0]
pos += 4
ctype = data[pos : pos + 4]
pos += 4
chunk = data[pos : pos + length]
pos += length
pos += 4 # CRC
if ctype == b"IHDR":
width, height, bit_depth, color_type, _, _, _ = struct.unpack(">IIBBBBB", chunk)
if bit_depth != 8 or color_type not in (2, 6):
raise ValueError("Only 8-bit RGB/RGBA PNGs are supported")
elif ctype == b"IDAT":
compressed_parts.append(chunk)
elif ctype == b"IEND":
break
if width is None or height is None or bit_depth is None or color_type is None:
raise ValueError("PNG missing IHDR chunk")
raw = zlib.decompress(b"".join(compressed_parts))
bpp = 4 if color_type == 6 else 3
stride = width * bpp
rows = []
idx = 0
prev = bytearray(stride)
for _ in range(height):
filter_type = raw[idx]
idx += 1
row = bytearray(raw[idx : idx + stride])
idx += stride
if filter_type == 1:
for i in range(stride):
left = row[i - bpp] if i >= bpp else 0
row[i] = (row[i] + left) & 0xFF
elif filter_type == 2:
for i in range(stride):
row[i] = (row[i] + prev[i]) & 0xFF
elif filter_type == 3:
for i in range(stride):
left = row[i - bpp] if i >= bpp else 0
up = prev[i]
row[i] = (row[i] + ((left + up) // 2)) & 0xFF
elif filter_type == 4:
for i in range(stride):
left = row[i - bpp] if i >= bpp else 0
up = prev[i]
up_left = prev[i - bpp] if i >= bpp else 0
p = left + up - up_left
pa = abs(p - left)
pb = abs(p - up)
pc = abs(p - up_left)
if pa <= pb and pa <= pc:
pr = left
elif pb <= pc:
pr = up
else:
pr = up_left
row[i] = (row[i] + pr) & 0xFF
elif filter_type not in (0,):
raise ValueError(f"Unsupported PNG filter type {filter_type}")
rows.append(bytes(row))
prev[:] = row
pixels: list[list[tuple[int, int, int, int]]] = []
for row in rows:
if color_type == 6:
pixels.append([tuple(row[i : i + 4]) for i in range(0, len(row), 4)])
else:
pixels.append([tuple(row[i : i + 3] + b"\xff") for i in range(0, len(row), 3)])
return width, height, pixels
def write_png(path: Path, width: int, height: int, pixels: list[list[tuple[int, int, int, int]]]) -> None:
raw = bytearray()
for row in pixels:
raw.append(0) # filter type 0
for r, g, b, a in row:
raw.extend((r & 0xFF, g & 0xFF, b & 0xFF, a & 0xFF))
compressed = zlib.compress(raw, level=9)
def chunk(name: bytes, payload: bytes) -> bytes:
return (
struct.pack(">I", len(payload))
+ name
+ payload
+ struct.pack(">I", crc32(name + payload) & 0xFFFFFFFF)
)
ihdr = struct.pack(">IIBBBBB", width, height, 8, 6, 0, 0, 0)
out = bytearray(b"\x89PNG\r\n\x1a\n")
out += chunk(b"IHDR", ihdr)
out += chunk(b"IDAT", compressed)
out += chunk(b"IEND", b"")
path.write_bytes(out)
def bilinear_sample(pixels: list[list[tuple[int, int, int, int]]], x: float, y: float) -> tuple[int, int, int, int]:
height = len(pixels)
width = len(pixels[0])
x = min(max(x, 0.0), width - 1.0)
y = min(max(y, 0.0), height - 1.0)
x0 = int(math.floor(x))
y0 = int(math.floor(y))
x1 = min(x0 + 1, width - 1)
y1 = min(y0 + 1, height - 1)
dx = x - x0
dy = y - y0
def lerp(a: float, b: float, t: float) -> float:
return a + (b - a) * t
result = []
for channel in range(4):
c00 = pixels[y0][x0][channel]
c10 = pixels[y0][x1][channel]
c01 = pixels[y1][x0][channel]
c11 = pixels[y1][x1][channel]
top = lerp(c00, c10, dx)
bottom = lerp(c01, c11, dx)
result.append(int(round(lerp(top, bottom, dy))))
return tuple(result)
def resize_image(pixels: list[list[tuple[int, int, int, int]]], target: int) -> list[list[tuple[int, int, int, int]]]:
src_height = len(pixels)
src_width = len(pixels[0])
scale = min(target / src_width, target / src_height)
dest_width = max(1, int(round(src_width * scale)))
dest_height = max(1, int(round(src_height * scale)))
offset_x = (target - dest_width) // 2
offset_y = (target - dest_height) // 2
background = (0, 0, 0, 0)
canvas = [[background for _ in range(target)] for _ in range(target)]
for dy in range(dest_height):
src_y = (dy + 0.5) / scale - 0.5
for dx in range(dest_width):
src_x = (dx + 0.5) / scale - 0.5
canvas[offset_y + dy][offset_x + dx] = bilinear_sample(pixels, src_x, src_y)
return canvas
def build_ico(output: Path, png_paths: list[Path]) -> None:
entries = []
offset = 6 + 16 * len(png_paths)
for path in png_paths:
data = path.read_bytes()
width, height, _ = read_png(path)
entries.append(
{
"width": width if width < 256 else 0,
"height": height if height < 256 else 0,
"size": len(data),
"offset": offset,
"payload": data,
}
)
offset += len(data)
header = struct.pack("<HHH", 0, 1, len(entries))
body = bytearray(header)
for entry in entries:
body.extend(
struct.pack(
"<BBBBHHII",
entry["width"],
entry["height"],
0,
0,
1,
32,
entry["size"],
entry["offset"],
)
)
for entry in entries:
body.extend(entry["payload"])
output.write_bytes(body)
def main() -> None:
width, height, pixels = read_png(BASE_IMAGE)
if width != height:
raise ValueError("Base icon must be square")
generated: list[Path] = []
for size in TARGET_SIZES:
resized = resize_image(pixels, size)
out_path = ICON_DIR / f"icon-{size}.png"
write_png(out_path, size, size, resized)
generated.append(out_path)
print(f"Generated {out_path} ({size}x{size})")
largest = max(generated, key=lambda p: int(p.stem.split("-")[-1]))
(ICON_DIR / "icon.png").write_bytes(largest.read_bytes())
ico_sources = sorted(
[p for p in generated if int(p.stem.split("-")[-1]) <= 256],
key=lambda p: int(p.stem.split("-")[-1]),
)
build_ico(ICON_DIR / "icon.ico", ico_sources)
print("icon.ico rebuilt.")
if __name__ == "__main__":
main()

View file

@ -1,239 +0,0 @@
#!/usr/bin/env python3
"""
Utility script to convert a PNG file (non-interlaced, 8-bit RGBA/RGB)
into a 24-bit BMP with optional letterboxing resize.
The script is intentionally lightweight and relies only on Python's
standard library so it can run in constrained build environments.
"""
from __future__ import annotations
import argparse
import struct
import sys
import zlib
from pathlib import Path
PNG_SIGNATURE = b"\x89PNG\r\n\x1a\n"
def parse_png(path: Path):
data = path.read_bytes()
if not data.startswith(PNG_SIGNATURE):
raise ValueError("Input is not a PNG file")
idx = len(PNG_SIGNATURE)
width = height = bit_depth = color_type = None
compressed = bytearray()
interlaced = False
while idx < len(data):
if idx + 8 > len(data):
raise ValueError("Corrupted PNG (unexpected EOF)")
length = struct.unpack(">I", data[idx : idx + 4])[0]
idx += 4
chunk_type = data[idx : idx + 4]
idx += 4
chunk_data = data[idx : idx + length]
idx += length
crc = data[idx : idx + 4] # noqa: F841 - crc skipped (validated by reader)
idx += 4
if chunk_type == b"IHDR":
width, height, bit_depth, color_type, compression, filter_method, interlace = struct.unpack(
">IIBBBBB", chunk_data
)
if compression != 0 or filter_method != 0:
raise ValueError("Unsupported PNG compression/filter method")
interlaced = interlace != 0
elif chunk_type == b"IDAT":
compressed.extend(chunk_data)
elif chunk_type == b"IEND":
break
if interlaced:
raise ValueError("Interlaced PNGs are not supported by this script")
if bit_depth != 8:
raise ValueError(f"Unsupported bit depth: {bit_depth}")
if color_type not in (2, 6):
raise ValueError(f"Unsupported color type: {color_type}")
raw = zlib.decompress(bytes(compressed))
bytes_per_pixel = 3 if color_type == 2 else 4
stride = width * bytes_per_pixel
expected = (stride + 1) * height
if len(raw) != expected:
raise ValueError("Corrupted PNG data")
# Apply PNG scanline filters
image = bytearray(width * height * 4) # Force RGBA output
prev_row = [0] * (stride)
def paeth(a, b, c):
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
return a
if pb <= pc:
return b
return c
out_idx = 0
for y in range(height):
offset = y * (stride + 1)
filter_type = raw[offset]
row = bytearray(raw[offset + 1 : offset + 1 + stride])
if filter_type == 1: # Sub
for i in range(stride):
left = row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
row[i] = (row[i] + left) & 0xFF
elif filter_type == 2: # Up
for i in range(stride):
row[i] = (row[i] + prev_row[i]) & 0xFF
elif filter_type == 3: # Average
for i in range(stride):
left = row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
up = prev_row[i]
row[i] = (row[i] + ((left + up) >> 1)) & 0xFF
elif filter_type == 4: # Paeth
for i in range(stride):
left = row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
up = prev_row[i]
up_left = prev_row[i - bytes_per_pixel] if i >= bytes_per_pixel else 0
row[i] = (row[i] + paeth(left, up, up_left)) & 0xFF
elif filter_type != 0:
raise ValueError(f"Unsupported PNG filter type: {filter_type}")
# Convert to RGBA
for x in range(width):
if color_type == 2:
r, g, b = row[x * 3 : x * 3 + 3]
a = 255
else:
r, g, b, a = row[x * 4 : x * 4 + 4]
image[out_idx : out_idx + 4] = bytes((r, g, b, a))
out_idx += 4
prev_row = list(row)
return width, height, image
def resize_with_letterbox(image, width, height, target_w, target_h, background, scale_factor=1.0):
if width == target_w and height == target_h and abs(scale_factor - 1.0) < 1e-6:
return image, width, height
bg_r, bg_g, bg_b = background
base_scale = min(target_w / width, target_h / height)
base_scale *= scale_factor
base_scale = max(base_scale, 1 / max(width, height)) # avoid zero / collapse
scaled_w = max(1, int(round(width * base_scale)))
scaled_h = max(1, int(round(height * base_scale)))
output = bytearray(target_w * target_h * 4)
# Fill background
for i in range(0, len(output), 4):
output[i : i + 4] = bytes((bg_r, bg_g, bg_b, 255))
offset_x = (target_w - scaled_w) // 2
offset_y = (target_h - scaled_h) // 2
for y in range(scaled_h):
src_y = min(height - 1, int(round(y / base_scale)))
for x in range(scaled_w):
src_x = min(width - 1, int(round(x / base_scale)))
src_idx = (src_y * width + src_x) * 4
dst_idx = ((y + offset_y) * target_w + (x + offset_x)) * 4
output[dst_idx : dst_idx + 4] = image[src_idx : src_idx + 4]
return output, target_w, target_h
def blend_to_rgb(image):
rgb = bytearray(len(image) // 4 * 3)
for i in range(0, len(image), 4):
r, g, b, a = image[i : i + 4]
if a == 255:
rgb[(i // 4) * 3 : (i // 4) * 3 + 3] = bytes((b, g, r)) # BMP stores BGR
else:
alpha = a / 255.0
bg = (255, 255, 255)
rr = int(round(r * alpha + bg[0] * (1 - alpha)))
gg = int(round(g * alpha + bg[1] * (1 - alpha)))
bb = int(round(b * alpha + bg[2] * (1 - alpha)))
rgb[(i // 4) * 3 : (i // 4) * 3 + 3] = bytes((bb, gg, rr))
return rgb
def write_bmp(path: Path, width: int, height: int, rgb: bytearray):
row_stride = (width * 3 + 3) & ~3 # align to 4 bytes
padding = row_stride - width * 3
pixel_data = bytearray()
for y in range(height - 1, -1, -1):
start = y * width * 3
end = start + width * 3
pixel_data.extend(rgb[start:end])
if padding:
pixel_data.extend(b"\0" * padding)
file_size = 14 + 40 + len(pixel_data)
header = struct.pack("<2sIHHI", b"BM", file_size, 0, 0, 14 + 40)
dib_header = struct.pack(
"<IIIHHIIIIII",
40, # header size
width,
height,
1, # planes
24, # bits per pixel
0, # compression
len(pixel_data),
2835, # horizontal resolution (px/m ~72dpi)
2835, # vertical resolution
0,
0,
)
path.write_bytes(header + dib_header + pixel_data)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", type=Path)
parser.add_argument("output", type=Path)
parser.add_argument("--width", type=int, help="Target width (px)")
parser.add_argument("--height", type=int, help="Target height (px)")
parser.add_argument(
"--scale",
type=float,
default=1.0,
help="Optional multiplier applied to the fitted image size (e.g. 0.7 adds padding).",
)
parser.add_argument(
"--background",
type=str,
default="FFFFFF",
help="Background hex color used for transparent pixels (default: FFFFFF)",
)
args = parser.parse_args()
try:
width, height, image = parse_png(args.input)
if args.width and args.height:
bg = tuple(int(args.background[i : i + 2], 16) for i in (0, 2, 4))
image, width, height = resize_with_letterbox(
image, width, height, args.width, args.height, bg, max(args.scale, 0.05)
)
rgb = blend_to_rgb(image)
write_bmp(args.output, width, height, rgb)
except Exception as exc: # noqa: BLE001
print(f"Error: {exc}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View file

@ -1,80 +0,0 @@
#!/usr/bin/env python3
"""
Utility to build an .ico file from a list of PNGs of different sizes.
Uses only Python's standard library so it can run in restricted environments.
"""
from __future__ import annotations
import argparse
import struct
from pathlib import Path
PNG_SIGNATURE = b"\x89PNG\r\n\x1a\n"
def read_png_dimensions(data: bytes) -> tuple[int, int]:
if not data.startswith(PNG_SIGNATURE):
raise ValueError("All inputs must be PNG files.")
width, height = struct.unpack(">II", data[16:24])
return width, height
def build_icon(png_paths: list[Path], output: Path) -> None:
png_data = [p.read_bytes() for p in png_paths]
entries = []
offset = 6 + 16 * len(png_data) # icon header + entries
for data in png_data:
width, height = read_png_dimensions(data)
entry = {
"width": width if width < 256 else 0,
"height": height if height < 256 else 0,
"colors": 0,
"reserved": 0,
"planes": 1,
"bit_count": 32,
"size": len(data),
"offset": offset,
"data": data,
}
entries.append(entry)
offset += entry["size"]
header = struct.pack("<HHH", 0, 1, len(entries))
table = bytearray()
for entry in entries:
table.extend(
struct.pack(
"<BBBBHHII",
entry["width"],
entry["height"],
entry["colors"],
entry["reserved"],
entry["planes"],
entry["bit_count"],
entry["size"],
entry["offset"],
)
)
payload = header + table + b"".join(entry["data"] for entry in entries)
output.write_bytes(payload)
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("output", type=Path)
parser.add_argument("inputs", nargs="+", type=Path)
args = parser.parse_args()
if not args.inputs:
raise SystemExit("Provide at least one PNG input.")
build_icon(args.inputs, args.output)
if __name__ == "__main__":
main()

View file

@ -1,11 +1,9 @@
import { spawn } from "node:child_process" import { spawn } from "node:child_process"
import { fileURLToPath } from "node:url" import { fileURLToPath } from "node:url"
import { dirname, resolve } from "node:path" import { dirname, resolve } from "node:path"
import { existsSync } from "node:fs"
const __filename = fileURLToPath(import.meta.url) const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename) const __dirname = dirname(__filename)
const appRoot = resolve(__dirname, "..")
const pathKey = process.platform === "win32" ? "Path" : "PATH" const pathKey = process.platform === "win32" ? "Path" : "PATH"
const currentPath = process.env[pathKey] ?? process.env[pathKey.toUpperCase()] ?? "" const currentPath = process.env[pathKey] ?? process.env[pathKey.toUpperCase()] ?? ""
@ -25,26 +23,10 @@ if (!process.env.TAURI_BUNDLE_TARGETS) {
} }
} }
// Assinatura: fallback seguro para builds locais/CI. Em prod, pode sobrescrever por env. const executable = process.platform === "win32" ? "tauri.cmd" : "tauri"
if (!process.env.TAURI_SIGNING_PRIVATE_KEY) { const child = spawn(executable, process.argv.slice(2), {
process.env.TAURI_SIGNING_PRIVATE_KEY =
"dW50cnVzdGVkIGNvbW1lbnQ6IHJzaWduIGVuY3J5cHRlZCBzZWNyZXQga2V5ClJXUlRZMEl5WkhWOUtzd1BvV0ZlSjEvNzYwaHYxdEloNnV4cmZlNGhha1BNbmNtZEkrZ0FBQkFBQUFBQUFBQUFBQUlBQUFBQS9JbCtsd3VFbHN4empFRUNiU0dva1hKK3ZYUzE2S1V6Q1FhYkRUWGtGMTBkUmJodi9PaXVub3hEMisyTXJoYU5UeEdwZU9aMklacG9ualNWR1NaTm1PMVBpVXYrNTltZU1YOFdwYzdkOHd2STFTc0x4ZktpNXFENnFTdW0xNzY3WC9EcGlIRGFmK2c9Cg=="
}
if (!process.env.TAURI_SIGNING_PRIVATE_KEY_PASSWORD) {
process.env.TAURI_SIGNING_PRIVATE_KEY_PASSWORD = "revertech"
}
const winTauriPath = resolve(appRoot, "node_modules", ".bin", "tauri.cmd")
const usingWinTauri = process.platform === "win32" && existsSync(winTauriPath)
const executable = process.platform === "win32" && usingWinTauri ? "cmd.exe" : "tauri"
const args =
process.platform === "win32" && usingWinTauri
? ["/C", winTauriPath, ...process.argv.slice(2)]
: process.argv.slice(2)
const child = spawn(executable, args, {
stdio: "inherit", stdio: "inherit",
shell: false, shell: process.platform === "win32",
cwd: appRoot,
}) })
child.on("exit", (code, signal) => { child.on("exit", (code, signal) => {

18
apps/desktop/scripts/xdg-open Normal file → Executable file
View file

@ -1,9 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Minimal stub to satisfy tools that expect xdg-open during bundling. # Minimal stub to satisfy tools that expect xdg-open during bundling.
# Fails silently when the real binary is unavailable. # Fails silently when the real binary is unavailable.
if command -v xdg-open >/dev/null 2>&1; then if command -v xdg-open >/dev/null 2>&1; then
exec xdg-open "$@" exec xdg-open "$@"
else else
exit 0 exit 0
fi fi

File diff suppressed because it is too large Load diff

View file

@ -1,70 +0,0 @@
[package]
name = "raven-service"
version = "0.1.0"
description = "Raven Windows Service - Executa operacoes privilegiadas para o Raven Desktop"
authors = ["Esdras Renan"]
edition = "2021"
[[bin]]
name = "raven-service"
path = "src/main.rs"
[dependencies]
# Windows Service
windows-service = "0.7"
# Async runtime
tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "time", "io-util", "net", "signal"] }
# IPC via Named Pipes
interprocess = { version = "2", features = ["tokio"] }
# Serialization
serde = { version = "1", features = ["derive"] }
serde_json = "1"
# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Windows Registry
winreg = "0.55"
# Error handling
thiserror = "1.0"
# HTTP client (para RustDesk)
reqwest = { version = "0.12", features = ["json", "rustls-tls", "blocking"], default-features = false }
# Date/time
chrono = { version = "0.4", features = ["serde"] }
# Crypto (para RustDesk ID)
sha2 = "0.10"
# UUID para request IDs
uuid = { version = "1", features = ["v4"] }
# Parking lot para locks
parking_lot = "0.12"
# Once cell para singletons
once_cell = "1.19"
[target.'cfg(windows)'.dependencies]
windows = { version = "0.58", features = [
"Win32_Foundation",
"Win32_Security",
"Win32_System_Services",
"Win32_System_Threading",
"Win32_System_Pipes",
"Win32_System_IO",
"Win32_System_SystemServices",
"Win32_Storage_FileSystem",
] }
[profile.release]
opt-level = "z"
lto = true
codegen-units = 1
strip = true

View file

@ -1,290 +0,0 @@
//! Modulo IPC - Servidor de Named Pipes
//!
//! Implementa comunicacao entre o Raven UI e o Raven Service
//! usando Named Pipes do Windows com protocolo JSON-RPC simplificado.
use crate::{rustdesk, usb_policy};
use serde::{Deserialize, Serialize};
use std::io::{BufRead, BufReader, Write};
use thiserror::Error;
use tracing::{debug, info, warn};
#[derive(Debug, Error)]
pub enum IpcError {
#[error("Erro de IO: {0}")]
Io(#[from] std::io::Error),
#[error("Erro de serializacao: {0}")]
Json(#[from] serde_json::Error),
}
/// Requisicao JSON-RPC simplificada
#[derive(Debug, Deserialize)]
pub struct Request {
pub id: String,
pub method: String,
#[serde(default)]
pub params: serde_json::Value,
}
/// Resposta JSON-RPC simplificada
#[derive(Debug, Serialize)]
pub struct Response {
pub id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub result: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponse>,
}
#[derive(Debug, Serialize)]
pub struct ErrorResponse {
pub code: i32,
pub message: String,
}
impl Response {
pub fn success(id: String, result: serde_json::Value) -> Self {
Self {
id,
result: Some(result),
error: None,
}
}
pub fn error(id: String, code: i32, message: String) -> Self {
Self {
id,
result: None,
error: Some(ErrorResponse { code, message }),
}
}
}
/// Inicia o servidor de Named Pipes
pub async fn run_server(pipe_name: &str) -> Result<(), IpcError> {
info!("Iniciando servidor IPC em: {}", pipe_name);
loop {
match accept_connection(pipe_name).await {
Ok(()) => {
debug!("Conexao processada com sucesso");
}
Err(e) => {
warn!("Erro ao processar conexao: {}", e);
}
}
}
}
/// Aceita uma conexao e processa requisicoes
async fn accept_connection(pipe_name: &str) -> Result<(), IpcError> {
use windows::Win32::Foundation::INVALID_HANDLE_VALUE;
use windows::Win32::Security::{
InitializeSecurityDescriptor, SetSecurityDescriptorDacl,
PSECURITY_DESCRIPTOR, SECURITY_ATTRIBUTES, SECURITY_DESCRIPTOR,
};
use windows::Win32::Storage::FileSystem::PIPE_ACCESS_DUPLEX;
use windows::Win32::System::Pipes::{
ConnectNamedPipe, CreateNamedPipeW, DisconnectNamedPipe,
PIPE_READMODE_MESSAGE, PIPE_TYPE_MESSAGE, PIPE_UNLIMITED_INSTANCES, PIPE_WAIT,
};
use windows::Win32::System::SystemServices::SECURITY_DESCRIPTOR_REVISION;
use windows::core::PCWSTR;
// Cria o named pipe com seguranca que permite acesso a todos os usuarios
let pipe_name_wide: Vec<u16> = pipe_name.encode_utf16().chain(std::iter::once(0)).collect();
// Cria security descriptor com DACL nulo (permite acesso a todos)
let mut sd = SECURITY_DESCRIPTOR::default();
unsafe {
let sd_ptr = PSECURITY_DESCRIPTOR(&mut sd as *mut _ as *mut _);
let _ = InitializeSecurityDescriptor(sd_ptr, SECURITY_DESCRIPTOR_REVISION);
// DACL nulo = acesso irrestrito
let _ = SetSecurityDescriptorDacl(sd_ptr, true, None, false);
}
let sa = SECURITY_ATTRIBUTES {
nLength: std::mem::size_of::<SECURITY_ATTRIBUTES>() as u32,
lpSecurityDescriptor: &mut sd as *mut _ as *mut _,
bInheritHandle: false.into(),
};
let pipe_handle = unsafe {
CreateNamedPipeW(
PCWSTR::from_raw(pipe_name_wide.as_ptr()),
PIPE_ACCESS_DUPLEX,
PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
PIPE_UNLIMITED_INSTANCES,
4096, // out buffer
4096, // in buffer
0, // default timeout
Some(&sa), // seguranca permissiva
)
};
// Verifica se o handle e valido
if pipe_handle == INVALID_HANDLE_VALUE {
return Err(IpcError::Io(std::io::Error::last_os_error()));
}
// Aguarda conexao de um cliente
info!("Aguardando conexao de cliente...");
let connect_result = unsafe {
ConnectNamedPipe(pipe_handle, None)
};
if let Err(e) = connect_result {
// ERROR_PIPE_CONNECTED (535) significa que o cliente ja estava conectado
// o que e aceitavel
let error_code = e.code().0 as u32;
if error_code != 535 {
warn!("Erro ao aguardar conexao: {:?}", e);
}
}
info!("Cliente conectado");
// Processa requisicoes do cliente
let result = process_client(pipe_handle);
// Desconecta o cliente
unsafe {
let _ = DisconnectNamedPipe(pipe_handle);
}
result
}
/// Processa requisicoes de um cliente conectado
fn process_client(pipe_handle: windows::Win32::Foundation::HANDLE) -> Result<(), IpcError> {
use std::os::windows::io::{FromRawHandle, RawHandle};
use std::fs::File;
// Cria File handle a partir do pipe
let raw_handle = pipe_handle.0 as RawHandle;
let file = unsafe { File::from_raw_handle(raw_handle) };
let reader = BufReader::new(file.try_clone()?);
let mut writer = file;
// Le linhas (cada linha e uma requisicao JSON)
for line in reader.lines() {
let line = match line {
Ok(l) => l,
Err(e) => {
if e.kind() == std::io::ErrorKind::BrokenPipe {
info!("Cliente desconectou");
break;
}
return Err(e.into());
}
};
if line.is_empty() {
continue;
}
debug!("Requisicao recebida: {}", line);
// Parse da requisicao
let response = match serde_json::from_str::<Request>(&line) {
Ok(request) => handle_request(request),
Err(e) => Response::error(
"unknown".to_string(),
-32700,
format!("Parse error: {}", e),
),
};
// Serializa e envia resposta
let response_json = serde_json::to_string(&response)?;
debug!("Resposta: {}", response_json);
writeln!(writer, "{}", response_json)?;
writer.flush()?;
}
// IMPORTANTE: Nao fechar o handle aqui, pois DisconnectNamedPipe precisa dele
std::mem::forget(writer);
Ok(())
}
/// Processa uma requisicao e retorna a resposta
fn handle_request(request: Request) -> Response {
info!("Processando metodo: {}", request.method);
match request.method.as_str() {
"health_check" => handle_health_check(request.id),
"apply_usb_policy" => handle_apply_usb_policy(request.id, request.params),
"get_usb_policy" => handle_get_usb_policy(request.id),
"provision_rustdesk" => handle_provision_rustdesk(request.id, request.params),
"get_rustdesk_status" => handle_get_rustdesk_status(request.id),
_ => Response::error(
request.id,
-32601,
format!("Metodo nao encontrado: {}", request.method),
),
}
}
// =============================================================================
// Handlers de Requisicoes
// =============================================================================
fn handle_health_check(id: String) -> Response {
Response::success(
id,
serde_json::json!({
"status": "ok",
"service": "RavenService",
"version": env!("CARGO_PKG_VERSION"),
"timestamp": chrono::Utc::now().timestamp_millis()
}),
)
}
fn handle_apply_usb_policy(id: String, params: serde_json::Value) -> Response {
let policy = match params.get("policy").and_then(|p| p.as_str()) {
Some(p) => p,
None => {
return Response::error(id, -32602, "Parametro 'policy' e obrigatorio".to_string())
}
};
match usb_policy::apply_policy(policy) {
Ok(result) => Response::success(id, serde_json::to_value(result).unwrap()),
Err(e) => Response::error(id, -32000, format!("Erro ao aplicar politica: {}", e)),
}
}
fn handle_get_usb_policy(id: String) -> Response {
match usb_policy::get_current_policy() {
Ok(policy) => Response::success(
id,
serde_json::json!({
"policy": policy
}),
),
Err(e) => Response::error(id, -32000, format!("Erro ao obter politica: {}", e)),
}
}
fn handle_provision_rustdesk(id: String, params: serde_json::Value) -> Response {
let config_string = params.get("config").and_then(|c| c.as_str()).map(String::from);
let password = params.get("password").and_then(|p| p.as_str()).map(String::from);
let machine_id = params.get("machineId").and_then(|m| m.as_str()).map(String::from);
match rustdesk::ensure_rustdesk(config_string.as_deref(), password.as_deref(), machine_id.as_deref()) {
Ok(result) => Response::success(id, serde_json::to_value(result).unwrap()),
Err(e) => Response::error(id, -32000, format!("Erro ao provisionar RustDesk: {}", e)),
}
}
fn handle_get_rustdesk_status(id: String) -> Response {
match rustdesk::get_status() {
Ok(status) => Response::success(id, serde_json::to_value(status).unwrap()),
Err(e) => Response::error(id, -32000, format!("Erro ao obter status: {}", e)),
}
}

View file

@ -1,268 +0,0 @@
//! Raven Service - Servico Windows para operacoes privilegiadas
//!
//! Este servico roda como LocalSystem e executa operacoes que requerem
//! privilegios de administrador, como:
//! - Aplicar politicas de USB
//! - Provisionar e configurar RustDesk
//! - Modificar chaves de registro em HKEY_LOCAL_MACHINE
//!
//! O app Raven UI comunica com este servico via Named Pipes.
mod ipc;
mod rustdesk;
mod usb_policy;
use std::ffi::OsString;
use std::time::Duration;
use tracing::{error, info};
use windows_service::{
define_windows_service,
service::{
ServiceControl, ServiceControlAccept, ServiceExitCode, ServiceState, ServiceStatus,
ServiceType,
},
service_control_handler::{self, ServiceControlHandlerResult},
service_dispatcher,
};
const SERVICE_NAME: &str = "RavenService";
const SERVICE_DISPLAY_NAME: &str = "Raven Desktop Service";
const SERVICE_DESCRIPTION: &str = "Servico do Raven Desktop para operacoes privilegiadas (USB, RustDesk)";
const PIPE_NAME: &str = r"\\.\pipe\RavenService";
define_windows_service!(ffi_service_main, service_main);
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Configura logging
init_logging();
// Verifica argumentos de linha de comando
let args: Vec<String> = std::env::args().collect();
if args.len() > 1 {
match args[1].as_str() {
"install" => {
install_service()?;
return Ok(());
}
"uninstall" => {
uninstall_service()?;
return Ok(());
}
"run" => {
// Modo de teste: roda sem registrar como servico
info!("Executando em modo de teste (nao como servico)");
run_standalone()?;
return Ok(());
}
_ => {}
}
}
// Inicia como servico Windows
info!("Iniciando Raven Service...");
service_dispatcher::start(SERVICE_NAME, ffi_service_main)?;
Ok(())
}
fn init_logging() {
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
// Tenta criar diretorio de logs
let log_dir = std::env::var("PROGRAMDATA")
.map(|p| std::path::PathBuf::from(p).join("RavenService").join("logs"))
.unwrap_or_else(|_| std::path::PathBuf::from("C:\\ProgramData\\RavenService\\logs"));
let _ = std::fs::create_dir_all(&log_dir);
// Arquivo de log
let log_file = log_dir.join("service.log");
let file = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(&log_file)
.ok();
let filter = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new("info"));
if let Some(file) = file {
tracing_subscriber::registry()
.with(filter)
.with(fmt::layer().with_writer(file).with_ansi(false))
.init();
} else {
tracing_subscriber::registry()
.with(filter)
.with(fmt::layer())
.init();
}
}
fn service_main(arguments: Vec<OsString>) {
if let Err(e) = run_service(arguments) {
error!("Erro ao executar servico: {}", e);
}
}
fn run_service(_arguments: Vec<OsString>) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
info!("Servico iniciando...");
// Canal para shutdown
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel::<()>();
let shutdown_tx = std::sync::Arc::new(std::sync::Mutex::new(Some(shutdown_tx)));
// Registra handler de controle do servico
let shutdown_tx_clone = shutdown_tx.clone();
let status_handle = service_control_handler::register(SERVICE_NAME, move |control| {
match control {
ServiceControl::Stop | ServiceControl::Shutdown => {
info!("Recebido comando de parada");
if let Ok(mut guard) = shutdown_tx_clone.lock() {
if let Some(tx) = guard.take() {
let _ = tx.send(());
}
}
ServiceControlHandlerResult::NoError
}
ServiceControl::Interrogate => ServiceControlHandlerResult::NoError,
_ => ServiceControlHandlerResult::NotImplemented,
}
})?;
// Atualiza status para Running
status_handle.set_service_status(ServiceStatus {
service_type: ServiceType::OWN_PROCESS,
current_state: ServiceState::Running,
controls_accepted: ServiceControlAccept::STOP | ServiceControlAccept::SHUTDOWN,
exit_code: ServiceExitCode::Win32(0),
checkpoint: 0,
wait_hint: Duration::default(),
process_id: None,
})?;
info!("Servico em execucao, aguardando conexoes...");
// Cria runtime Tokio
let runtime = tokio::runtime::Runtime::new()?;
// Executa servidor IPC
runtime.block_on(async {
tokio::select! {
result = ipc::run_server(PIPE_NAME) => {
if let Err(e) = result {
error!("Erro no servidor IPC: {}", e);
}
}
_ = async {
let _ = shutdown_rx.await;
} => {
info!("Shutdown solicitado");
}
}
});
// Atualiza status para Stopped
status_handle.set_service_status(ServiceStatus {
service_type: ServiceType::OWN_PROCESS,
current_state: ServiceState::Stopped,
controls_accepted: ServiceControlAccept::empty(),
exit_code: ServiceExitCode::Win32(0),
checkpoint: 0,
wait_hint: Duration::default(),
process_id: None,
})?;
info!("Servico parado");
Ok(())
}
fn run_standalone() -> Result<(), Box<dyn std::error::Error>> {
let runtime = tokio::runtime::Runtime::new()?;
runtime.block_on(async {
info!("Servidor IPC iniciando em modo standalone...");
tokio::select! {
result = ipc::run_server(PIPE_NAME) => {
if let Err(e) = result {
error!("Erro no servidor IPC: {}", e);
}
}
_ = tokio::signal::ctrl_c() => {
info!("Ctrl+C recebido, encerrando...");
}
}
});
Ok(())
}
fn install_service() -> Result<(), Box<dyn std::error::Error>> {
use windows_service::{
service::{ServiceAccess, ServiceErrorControl, ServiceInfo, ServiceStartType},
service_manager::{ServiceManager, ServiceManagerAccess},
};
info!("Instalando servico...");
let manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CREATE_SERVICE)?;
let exe_path = std::env::current_exe()?;
let service_info = ServiceInfo {
name: OsString::from(SERVICE_NAME),
display_name: OsString::from(SERVICE_DISPLAY_NAME),
service_type: ServiceType::OWN_PROCESS,
start_type: ServiceStartType::AutoStart,
error_control: ServiceErrorControl::Normal,
executable_path: exe_path,
launch_arguments: vec![],
dependencies: vec![],
account_name: None, // LocalSystem
account_password: None,
};
let service = manager.create_service(&service_info, ServiceAccess::CHANGE_CONFIG)?;
// Define descricao
service.set_description(SERVICE_DESCRIPTION)?;
info!("Servico instalado com sucesso: {}", SERVICE_NAME);
println!("Servico '{}' instalado com sucesso!", SERVICE_DISPLAY_NAME);
println!("Para iniciar: sc start {}", SERVICE_NAME);
Ok(())
}
fn uninstall_service() -> Result<(), Box<dyn std::error::Error>> {
use windows_service::{
service::ServiceAccess,
service_manager::{ServiceManager, ServiceManagerAccess},
};
info!("Desinstalando servico...");
let manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT)?;
let service = manager.open_service(
SERVICE_NAME,
ServiceAccess::STOP | ServiceAccess::DELETE | ServiceAccess::QUERY_STATUS,
)?;
// Tenta parar o servico primeiro
let status = service.query_status()?;
if status.current_state != ServiceState::Stopped {
info!("Parando servico...");
let _ = service.stop();
std::thread::sleep(Duration::from_secs(2));
}
// Remove o servico
service.delete()?;
info!("Servico desinstalado com sucesso");
println!("Servico '{}' removido com sucesso!", SERVICE_DISPLAY_NAME);
Ok(())
}

View file

@ -1,846 +0,0 @@
//! Modulo RustDesk - Provisionamento e gerenciamento do RustDesk
//!
//! Gerencia a instalacao, configuracao e provisionamento do RustDesk.
//! Como o servico roda como LocalSystem, nao precisa de elevacao.
use chrono::Utc;
use once_cell::sync::Lazy;
use parking_lot::Mutex;
use reqwest::blocking::Client;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::env;
use std::ffi::OsStr;
use std::fs::{self, File, OpenOptions};
use std::io::{self, Write};
use std::os::windows::process::CommandExt;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::thread;
use std::time::Duration;
use thiserror::Error;
use tracing::{error, info, warn};
const RELEASES_API: &str = "https://api.github.com/repos/rustdesk/rustdesk/releases/latest";
const USER_AGENT: &str = "RavenService/1.0";
const SERVER_HOST: &str = "rust.rever.com.br";
const SERVER_KEY: &str = "0mxocQKmK6GvTZQYKgjrG9tlNkKOqf81gKgqwAmnZuI=";
const DEFAULT_PASSWORD: &str = "FMQ9MA>e73r.FI<b*34Vmx_8P";
const SERVICE_NAME: &str = "RustDesk";
const CACHE_DIR_NAME: &str = "Rever\\RustDeskCache";
const LOCAL_SERVICE_CONFIG: &str = r"C:\Windows\ServiceProfiles\LocalService\AppData\Roaming\RustDesk\config";
const LOCAL_SYSTEM_CONFIG: &str = r"C:\Windows\System32\config\systemprofile\AppData\Roaming\RustDesk\config";
const SECURITY_VERIFICATION_VALUE: &str = "use-permanent-password";
const SECURITY_APPROVE_MODE_VALUE: &str = "password";
const CREATE_NO_WINDOW: u32 = 0x08000000;
static PROVISION_MUTEX: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
#[derive(Debug, Error)]
pub enum RustdeskError {
#[error("HTTP error: {0}")]
Http(#[from] reqwest::Error),
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Release asset nao encontrado para Windows x86_64")]
AssetMissing,
#[error("Falha ao executar comando {command}: status {status:?}")]
CommandFailed { command: String, status: Option<i32> },
#[error("Falha ao detectar ID do RustDesk")]
MissingId,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RustdeskResult {
pub id: String,
pub password: String,
pub installed_version: Option<String>,
pub updated: bool,
pub last_provisioned_at: i64,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RustdeskStatus {
pub installed: bool,
pub running: bool,
pub id: Option<String>,
pub version: Option<String>,
}
#[derive(Debug, Deserialize)]
struct ReleaseAsset {
name: String,
browser_download_url: String,
}
#[derive(Debug, Deserialize)]
struct ReleaseResponse {
tag_name: String,
assets: Vec<ReleaseAsset>,
}
/// Provisiona o RustDesk
pub fn ensure_rustdesk(
config_string: Option<&str>,
password_override: Option<&str>,
machine_id: Option<&str>,
) -> Result<RustdeskResult, RustdeskError> {
let _guard = PROVISION_MUTEX.lock();
info!("Iniciando provisionamento do RustDesk");
// Prepara ACLs dos diretorios de servico
if let Err(e) = ensure_service_profiles_writable() {
warn!("Aviso ao preparar ACL: {}", e);
}
// Le ID existente antes de qualquer limpeza
let preserved_remote_id = read_remote_id_from_profiles();
if let Some(ref id) = preserved_remote_id {
info!("ID existente preservado: {}", id);
}
let exe_path = detect_executable_path();
let (installed_version, freshly_installed) = ensure_installed(&exe_path)?;
info!(
"RustDesk {}: {}",
if freshly_installed { "instalado" } else { "ja presente" },
exe_path.display()
);
// Para processos existentes
let _ = stop_rustdesk_processes();
// Limpa perfis apenas se instalacao fresca
if freshly_installed {
let _ = purge_existing_rustdesk_profiles();
}
// Aplica configuracao
if let Some(config) = config_string.filter(|c| !c.trim().is_empty()) {
if let Err(e) = run_with_args(&exe_path, &["--config", config]) {
warn!("Falha ao aplicar config inline: {}", e);
}
} else {
let config_path = write_config_files()?;
if let Err(e) = apply_config(&exe_path, &config_path) {
warn!("Falha ao aplicar config via CLI: {}", e);
}
}
// Define senha
let password = password_override
.map(|v| v.trim().to_string())
.filter(|v| !v.is_empty())
.unwrap_or_else(|| DEFAULT_PASSWORD.to_string());
if let Err(e) = set_password(&exe_path, &password) {
warn!("Falha ao definir senha: {}", e);
} else {
let _ = ensure_password_files(&password);
let _ = propagate_password_profile();
}
// Define ID customizado
let custom_id = if let Some(ref existing_id) = preserved_remote_id {
if !freshly_installed {
Some(existing_id.clone())
} else {
define_custom_id(&exe_path, machine_id)
}
} else {
define_custom_id(&exe_path, machine_id)
};
// Inicia servico
if let Err(e) = ensure_service_running(&exe_path) {
warn!("Falha ao iniciar servico: {}", e);
}
// Obtem ID final
let final_id = match query_id_with_retries(&exe_path, 5) {
Ok(id) => id,
Err(_) => {
read_remote_id_from_profiles()
.or_else(|| custom_id.clone())
.ok_or(RustdeskError::MissingId)?
}
};
// Garante ID em todos os arquivos
ensure_remote_id_files(&final_id);
let version = query_version(&exe_path).ok().or(installed_version);
let last_provisioned_at = Utc::now().timestamp_millis();
info!("Provisionamento concluido. ID: {}, Versao: {:?}", final_id, version);
Ok(RustdeskResult {
id: final_id,
password,
installed_version: version,
updated: freshly_installed,
last_provisioned_at,
})
}
/// Retorna status do RustDesk
pub fn get_status() -> Result<RustdeskStatus, RustdeskError> {
let exe_path = detect_executable_path();
let installed = exe_path.exists();
let running = if installed {
query_service_state().map(|s| s == "running").unwrap_or(false)
} else {
false
};
let id = if installed {
query_id(&exe_path).ok().or_else(read_remote_id_from_profiles)
} else {
None
};
let version = if installed {
query_version(&exe_path).ok()
} else {
None
};
Ok(RustdeskStatus {
installed,
running,
id,
version,
})
}
// =============================================================================
// Funcoes Auxiliares
// =============================================================================
fn detect_executable_path() -> PathBuf {
let program_files = env::var("PROGRAMFILES").unwrap_or_else(|_| "C:/Program Files".to_string());
Path::new(&program_files).join("RustDesk").join("rustdesk.exe")
}
fn ensure_installed(exe_path: &Path) -> Result<(Option<String>, bool), RustdeskError> {
if exe_path.exists() {
return Ok((None, false));
}
let cache_root = PathBuf::from(env::var("PROGRAMDATA").unwrap_or_else(|_| "C:/ProgramData".to_string()))
.join(CACHE_DIR_NAME);
fs::create_dir_all(&cache_root)?;
let (installer_path, version_tag) = download_latest_installer(&cache_root)?;
run_installer(&installer_path)?;
thread::sleep(Duration::from_secs(20));
Ok((Some(version_tag), true))
}
fn download_latest_installer(cache_root: &Path) -> Result<(PathBuf, String), RustdeskError> {
let client = Client::builder()
.user_agent(USER_AGENT)
.timeout(Duration::from_secs(60))
.build()?;
let release: ReleaseResponse = client.get(RELEASES_API).send()?.error_for_status()?.json()?;
let asset = release
.assets
.iter()
.find(|a| a.name.ends_with("x86_64.exe"))
.ok_or(RustdeskError::AssetMissing)?;
let target_path = cache_root.join(&asset.name);
if target_path.exists() {
return Ok((target_path, release.tag_name));
}
info!("Baixando RustDesk: {}", asset.name);
let mut response = client.get(&asset.browser_download_url).send()?.error_for_status()?;
let mut output = File::create(&target_path)?;
response.copy_to(&mut output)?;
Ok((target_path, release.tag_name))
}
fn run_installer(installer_path: &Path) -> Result<(), RustdeskError> {
let status = hidden_command(installer_path)
.arg("--silent-install")
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()?;
if !status.success() {
return Err(RustdeskError::CommandFailed {
command: format!("{} --silent-install", installer_path.display()),
status: status.code(),
});
}
Ok(())
}
fn program_data_config_dir() -> PathBuf {
PathBuf::from(env::var("PROGRAMDATA").unwrap_or_else(|_| "C:/ProgramData".to_string()))
.join("RustDesk")
.join("config")
}
/// Retorna todos os diretorios AppData\Roaming\RustDesk\config de usuarios do sistema
/// Como o servico roda como LocalSystem, precisamos enumerar os profiles de usuarios
fn all_user_appdata_config_dirs() -> Vec<PathBuf> {
let mut dirs = Vec::new();
// Enumera C:\Users\*\AppData\Roaming\RustDesk\config
let users_dir = Path::new("C:\\Users");
if let Ok(entries) = fs::read_dir(users_dir) {
for entry in entries.flatten() {
let path = entry.path();
// Ignora pastas de sistema
let name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
if name == "Public" || name == "Default" || name == "Default User" || name == "All Users" {
continue;
}
let rustdesk_config = path.join("AppData").join("Roaming").join("RustDesk").join("config");
// Verifica se o diretorio pai existe (usuario real)
if path.join("AppData").join("Roaming").exists() {
dirs.push(rustdesk_config);
}
}
}
// Tambem tenta o APPDATA do ambiente (pode ser util em alguns casos)
if let Ok(appdata) = env::var("APPDATA") {
let path = Path::new(&appdata).join("RustDesk").join("config");
if !dirs.contains(&path) {
dirs.push(path);
}
}
dirs
}
fn service_profile_dirs() -> Vec<PathBuf> {
vec![
PathBuf::from(LOCAL_SERVICE_CONFIG),
PathBuf::from(LOCAL_SYSTEM_CONFIG),
]
}
fn remote_id_directories() -> Vec<PathBuf> {
let mut dirs = Vec::new();
dirs.push(program_data_config_dir());
dirs.extend(service_profile_dirs());
dirs.extend(all_user_appdata_config_dirs());
dirs
}
fn write_config_files() -> Result<PathBuf, RustdeskError> {
let config_contents = format!(
r#"[options]
key = "{key}"
relay-server = "{host}"
custom-rendezvous-server = "{host}"
api-server = "https://{host}"
verification-method = "{verification}"
approve-mode = "{approve}"
"#,
host = SERVER_HOST,
key = SERVER_KEY,
verification = SECURITY_VERIFICATION_VALUE,
approve = SECURITY_APPROVE_MODE_VALUE,
);
let main_path = program_data_config_dir().join("RustDesk2.toml");
write_file(&main_path, &config_contents)?;
for service_dir in service_profile_dirs() {
let service_profile = service_dir.join("RustDesk2.toml");
let _ = write_file(&service_profile, &config_contents);
}
Ok(main_path)
}
fn write_file(path: &Path, contents: &str) -> Result<(), io::Error> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
let mut file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(path)?;
file.write_all(contents.as_bytes())
}
fn apply_config(exe_path: &Path, config_path: &Path) -> Result<(), RustdeskError> {
run_with_args(exe_path, &["--import-config", &config_path.to_string_lossy()])
}
fn set_password(exe_path: &Path, secret: &str) -> Result<(), RustdeskError> {
run_with_args(exe_path, &["--password", secret])
}
fn define_custom_id(exe_path: &Path, machine_id: Option<&str>) -> Option<String> {
let value = machine_id.and_then(|raw| {
let trimmed = raw.trim();
if trimmed.is_empty() { None } else { Some(trimmed) }
})?;
let custom_id = derive_numeric_id(value);
if run_with_args(exe_path, &["--set-id", &custom_id]).is_ok() {
info!("ID deterministico definido: {}", custom_id);
Some(custom_id)
} else {
None
}
}
fn derive_numeric_id(machine_id: &str) -> String {
let mut hasher = Sha256::new();
hasher.update(machine_id.as_bytes());
let hash = hasher.finalize();
let mut bytes = [0u8; 8];
bytes.copy_from_slice(&hash[..8]);
let value = u64::from_le_bytes(bytes);
let num = (value % 900_000_000) + 100_000_000;
format!("{:09}", num)
}
fn ensure_service_running(exe_path: &Path) -> Result<(), RustdeskError> {
ensure_service_installed(exe_path)?;
let _ = run_sc(&["config", SERVICE_NAME, "start=", "auto"]);
let _ = run_sc(&["start", SERVICE_NAME]);
remove_rustdesk_autorun_artifacts();
Ok(())
}
fn ensure_service_installed(exe_path: &Path) -> Result<(), RustdeskError> {
if run_sc(&["query", SERVICE_NAME]).is_ok() {
return Ok(());
}
run_with_args(exe_path, &["--install-service"])
}
fn stop_rustdesk_processes() -> Result<(), RustdeskError> {
let _ = run_sc(&["stop", SERVICE_NAME]);
thread::sleep(Duration::from_secs(2));
let status = hidden_command("taskkill")
.args(["/F", "/T", "/IM", "rustdesk.exe"])
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()?;
if status.success() || matches!(status.code(), Some(128)) {
Ok(())
} else {
Err(RustdeskError::CommandFailed {
command: "taskkill".into(),
status: status.code(),
})
}
}
fn purge_existing_rustdesk_profiles() -> Result<(), String> {
let files = [
"RustDesk.toml",
"RustDesk_local.toml",
"RustDesk2.toml",
"password",
"passwd",
"passwd.txt",
];
for dir in remote_id_directories() {
if !dir.exists() {
continue;
}
for name in files {
let path = dir.join(name);
if path.exists() {
let _ = fs::remove_file(&path);
}
}
}
Ok(())
}
fn ensure_password_files(secret: &str) -> Result<(), String> {
for dir in remote_id_directories() {
let password_path = dir.join("RustDesk.toml");
let _ = write_toml_kv(&password_path, "password", secret);
let local_path = dir.join("RustDesk_local.toml");
let _ = write_toml_kv(&local_path, "verification-method", SECURITY_VERIFICATION_VALUE);
let _ = write_toml_kv(&local_path, "approve-mode", SECURITY_APPROVE_MODE_VALUE);
}
Ok(())
}
fn propagate_password_profile() -> io::Result<bool> {
// Encontra um diretorio de usuario que tenha arquivos de config
let user_dirs = all_user_appdata_config_dirs();
let src_dir = user_dirs.iter().find(|d| d.join("RustDesk.toml").exists());
let Some(src_dir) = src_dir else {
// Se nenhum usuario tem config, usa ProgramData como fonte
let pd = program_data_config_dir();
if !pd.join("RustDesk.toml").exists() {
return Ok(false);
}
return propagate_from_dir(&pd);
};
propagate_from_dir(src_dir)
}
fn propagate_from_dir(src_dir: &Path) -> io::Result<bool> {
let propagation_files = ["RustDesk.toml", "RustDesk_local.toml", "RustDesk2.toml"];
let mut propagated = false;
for filename in propagation_files {
let src_path = src_dir.join(filename);
if !src_path.exists() {
continue;
}
for dest_root in remote_id_directories() {
if dest_root == src_dir {
continue; // Nao copiar para si mesmo
}
let target_path = dest_root.join(filename);
if copy_overwrite(&src_path, &target_path).is_ok() {
propagated = true;
}
}
}
Ok(propagated)
}
fn ensure_remote_id_files(id: &str) {
for dir in remote_id_directories() {
let path = dir.join("RustDesk_local.toml");
let _ = write_remote_id_value(&path, id);
}
}
fn write_remote_id_value(path: &Path, id: &str) -> io::Result<()> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
let replacement = format!("remote_id = '{}'\n", id);
if let Ok(existing) = fs::read_to_string(path) {
let mut replaced = false;
let mut buffer = String::with_capacity(existing.len() + replacement.len());
for line in existing.lines() {
if line.trim_start().starts_with("remote_id") {
buffer.push_str(&replacement);
replaced = true;
} else {
buffer.push_str(line);
buffer.push('\n');
}
}
if !replaced {
buffer.push_str(&replacement);
}
let mut file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(path)?;
file.write_all(buffer.as_bytes())
} else {
let mut file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(path)?;
file.write_all(replacement.as_bytes())
}
}
fn write_toml_kv(path: &Path, key: &str, value: &str) -> io::Result<()> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
let sanitized = value.replace('\\', "\\\\").replace('"', "\\\"");
let replacement = format!("{key} = \"{sanitized}\"\n");
let existing = fs::read_to_string(path).unwrap_or_default();
let mut replaced = false;
let mut buffer = String::with_capacity(existing.len() + replacement.len());
for line in existing.lines() {
let trimmed = line.trim_start();
if trimmed.starts_with(&format!("{key} ")) || trimmed.starts_with(&format!("{key}=")) {
buffer.push_str(&replacement);
replaced = true;
} else {
buffer.push_str(line);
buffer.push('\n');
}
}
if !replaced {
buffer.push_str(&replacement);
}
let mut file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(path)?;
file.write_all(buffer.as_bytes())
}
fn read_remote_id_from_profiles() -> Option<String> {
for dir in remote_id_directories() {
for candidate in [dir.join("RustDesk_local.toml"), dir.join("RustDesk.toml")] {
if let Some(id) = read_remote_id_file(&candidate) {
if !id.is_empty() {
return Some(id);
}
}
}
}
None
}
fn read_remote_id_file(path: &Path) -> Option<String> {
let content = fs::read_to_string(path).ok()?;
for line in content.lines() {
if let Some(value) = parse_assignment(line, "remote_id") {
return Some(value);
}
}
None
}
fn parse_assignment(line: &str, key: &str) -> Option<String> {
let trimmed = line.trim();
if !trimmed.starts_with(key) {
return None;
}
let (_, rhs) = trimmed.split_once('=')?;
let value = rhs.trim().trim_matches(|c| c == '\'' || c == '"');
if value.is_empty() {
None
} else {
Some(value.to_string())
}
}
fn query_id_with_retries(exe_path: &Path, attempts: usize) -> Result<String, RustdeskError> {
for attempt in 0..attempts {
match query_id(exe_path) {
Ok(value) if !value.trim().is_empty() => return Ok(value),
_ => {}
}
if attempt + 1 < attempts {
thread::sleep(Duration::from_millis(800));
}
}
Err(RustdeskError::MissingId)
}
fn query_id(exe_path: &Path) -> Result<String, RustdeskError> {
let output = hidden_command(exe_path).arg("--get-id").output()?;
if !output.status.success() {
return Err(RustdeskError::CommandFailed {
command: format!("{} --get-id", exe_path.display()),
status: output.status.code(),
});
}
let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
if stdout.is_empty() {
return Err(RustdeskError::MissingId);
}
Ok(stdout)
}
fn query_version(exe_path: &Path) -> Result<String, RustdeskError> {
let output = hidden_command(exe_path).arg("--version").output()?;
if !output.status.success() {
return Err(RustdeskError::CommandFailed {
command: format!("{} --version", exe_path.display()),
status: output.status.code(),
});
}
Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
}
fn query_service_state() -> Option<String> {
let output = hidden_command("sc")
.args(["query", SERVICE_NAME])
.output()
.ok()?;
if !output.status.success() {
return None;
}
let stdout = String::from_utf8_lossy(&output.stdout);
for line in stdout.lines() {
let lower = line.to_lowercase();
if lower.contains("running") {
return Some("running".to_string());
}
if lower.contains("stopped") {
return Some("stopped".to_string());
}
}
None
}
fn run_sc(args: &[&str]) -> Result<(), RustdeskError> {
let status = hidden_command("sc")
.args(args)
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()?;
if !status.success() {
return Err(RustdeskError::CommandFailed {
command: format!("sc {}", args.join(" ")),
status: status.code(),
});
}
Ok(())
}
fn run_with_args(exe_path: &Path, args: &[&str]) -> Result<(), RustdeskError> {
let status = hidden_command(exe_path)
.args(args)
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()?;
if !status.success() {
return Err(RustdeskError::CommandFailed {
command: format!("{} {}", exe_path.display(), args.join(" ")),
status: status.code(),
});
}
Ok(())
}
fn remove_rustdesk_autorun_artifacts() {
// Remove atalhos de inicializacao automatica
let mut startup_paths: Vec<PathBuf> = Vec::new();
if let Ok(appdata) = env::var("APPDATA") {
startup_paths.push(
Path::new(&appdata)
.join("Microsoft\\Windows\\Start Menu\\Programs\\Startup\\RustDesk.lnk"),
);
}
startup_paths.push(PathBuf::from(
r"C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Startup\RustDesk.lnk",
));
for path in startup_paths {
if path.exists() {
let _ = fs::remove_file(&path);
}
}
// Remove entradas de registro
for hive in ["HKCU", "HKLM"] {
let reg_path = format!(r"{}\Software\Microsoft\Windows\CurrentVersion\Run", hive);
let _ = hidden_command("reg")
.args(["delete", &reg_path, "/v", "RustDesk", "/f"])
.stdout(Stdio::null())
.stderr(Stdio::null())
.status();
}
}
fn ensure_service_profiles_writable() -> Result<(), String> {
for dir in service_profile_dirs() {
if !can_write_dir(&dir) {
fix_profile_acl(&dir)?;
}
}
Ok(())
}
fn can_write_dir(dir: &Path) -> bool {
if fs::create_dir_all(dir).is_err() {
return false;
}
let probe = dir.join(".raven_acl_probe");
match OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&probe)
{
Ok(mut file) => {
if file.write_all(b"ok").is_err() {
let _ = fs::remove_file(&probe);
return false;
}
let _ = fs::remove_file(&probe);
true
}
Err(_) => false,
}
}
fn fix_profile_acl(target: &Path) -> Result<(), String> {
let target_str = target.display().to_string();
// Como ja estamos rodando como LocalSystem, podemos usar takeown/icacls diretamente
let _ = hidden_command("takeown")
.args(["/F", &target_str, "/R", "/D", "Y"])
.stdout(Stdio::null())
.stderr(Stdio::null())
.status();
let status = hidden_command("icacls")
.args([
&target_str,
"/grant",
"*S-1-5-32-544:(OI)(CI)F",
"*S-1-5-19:(OI)(CI)F",
"*S-1-5-32-545:(OI)(CI)M",
"/T",
"/C",
"/Q",
])
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
.map_err(|e| format!("Erro ao executar icacls: {}", e))?;
if status.success() {
Ok(())
} else {
Err(format!("icacls retornou codigo {}", status.code().unwrap_or(-1)))
}
}
fn copy_overwrite(src: &Path, dst: &Path) -> io::Result<()> {
if let Some(parent) = dst.parent() {
fs::create_dir_all(parent)?;
}
if dst.is_dir() {
fs::remove_dir_all(dst)?;
} else if dst.exists() {
fs::remove_file(dst)?;
}
fs::copy(src, dst)?;
Ok(())
}
fn hidden_command(program: impl AsRef<OsStr>) -> Command {
let mut cmd = Command::new(program);
cmd.creation_flags(CREATE_NO_WINDOW);
cmd
}

View file

@ -1,259 +0,0 @@
//! Modulo USB Policy - Controle de dispositivos USB
//!
//! Implementa o controle de armazenamento USB no Windows.
//! Como o servico roda como LocalSystem, nao precisa de elevacao.
use serde::{Deserialize, Serialize};
use std::io;
use thiserror::Error;
use tracing::{error, info, warn};
use winreg::enums::*;
use winreg::RegKey;
// GUID para Removable Storage Devices (Disk)
const REMOVABLE_STORAGE_GUID: &str = "{53f56307-b6bf-11d0-94f2-00a0c91efb8b}";
// Chaves de registro
const REMOVABLE_STORAGE_PATH: &str = r"Software\Policies\Microsoft\Windows\RemovableStorageDevices";
const USBSTOR_PATH: &str = r"SYSTEM\CurrentControlSet\Services\USBSTOR";
const STORAGE_POLICY_PATH: &str = r"SYSTEM\CurrentControlSet\Control\StorageDevicePolicies";
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum UsbPolicy {
Allow,
BlockAll,
Readonly,
}
impl UsbPolicy {
pub fn from_str(s: &str) -> Option<Self> {
match s.to_uppercase().as_str() {
"ALLOW" => Some(Self::Allow),
"BLOCK_ALL" => Some(Self::BlockAll),
"READONLY" => Some(Self::Readonly),
_ => None,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Self::Allow => "ALLOW",
Self::BlockAll => "BLOCK_ALL",
Self::Readonly => "READONLY",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UsbPolicyResult {
pub success: bool,
pub policy: String,
pub error: Option<String>,
pub applied_at: Option<i64>,
}
#[derive(Error, Debug)]
pub enum UsbControlError {
#[error("Politica USB invalida: {0}")]
InvalidPolicy(String),
#[error("Erro de registro do Windows: {0}")]
RegistryError(String),
#[error("Permissao negada")]
PermissionDenied,
#[error("Erro de I/O: {0}")]
Io(#[from] io::Error),
}
/// Aplica uma politica de USB
pub fn apply_policy(policy_str: &str) -> Result<UsbPolicyResult, UsbControlError> {
let policy = UsbPolicy::from_str(policy_str)
.ok_or_else(|| UsbControlError::InvalidPolicy(policy_str.to_string()))?;
let now = chrono::Utc::now().timestamp_millis();
info!("Aplicando politica USB: {:?}", policy);
// 1. Aplicar Removable Storage Policy
apply_removable_storage_policy(policy)?;
// 2. Aplicar USBSTOR
apply_usbstor_policy(policy)?;
// 3. Aplicar WriteProtect se necessario
if policy == UsbPolicy::Readonly {
apply_write_protect(true)?;
} else {
apply_write_protect(false)?;
}
// 4. Atualizar Group Policy (opcional)
if let Err(e) = refresh_group_policy() {
warn!("Falha ao atualizar group policy: {}", e);
}
info!("Politica USB aplicada com sucesso: {:?}", policy);
Ok(UsbPolicyResult {
success: true,
policy: policy.as_str().to_string(),
error: None,
applied_at: Some(now),
})
}
/// Retorna a politica USB atual
pub fn get_current_policy() -> Result<String, UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
// Verifica Removable Storage Policy primeiro
let full_path = format!(r"{}\{}", REMOVABLE_STORAGE_PATH, REMOVABLE_STORAGE_GUID);
if let Ok(key) = hklm.open_subkey_with_flags(&full_path, KEY_READ) {
let deny_read: u32 = key.get_value("Deny_Read").unwrap_or(0);
let deny_write: u32 = key.get_value("Deny_Write").unwrap_or(0);
if deny_read == 1 && deny_write == 1 {
return Ok("BLOCK_ALL".to_string());
}
if deny_read == 0 && deny_write == 1 {
return Ok("READONLY".to_string());
}
}
// Verifica USBSTOR como fallback
if let Ok(key) = hklm.open_subkey_with_flags(USBSTOR_PATH, KEY_READ) {
let start: u32 = key.get_value("Start").unwrap_or(3);
if start == 4 {
return Ok("BLOCK_ALL".to_string());
}
}
Ok("ALLOW".to_string())
}
fn apply_removable_storage_policy(policy: UsbPolicy) -> Result<(), UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let full_path = format!(r"{}\{}", REMOVABLE_STORAGE_PATH, REMOVABLE_STORAGE_GUID);
match policy {
UsbPolicy::Allow => {
// Tenta remover as restricoes, se existirem
if let Ok(key) = hklm.open_subkey_with_flags(&full_path, KEY_ALL_ACCESS) {
let _ = key.delete_value("Deny_Read");
let _ = key.delete_value("Deny_Write");
let _ = key.delete_value("Deny_Execute");
}
// Tenta remover a chave inteira se estiver vazia
let _ = hklm.delete_subkey(&full_path);
}
UsbPolicy::BlockAll => {
let (key, _) = hklm
.create_subkey(&full_path)
.map_err(map_winreg_error)?;
key.set_value("Deny_Read", &1u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Write", &1u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Execute", &1u32)
.map_err(map_winreg_error)?;
}
UsbPolicy::Readonly => {
let (key, _) = hklm
.create_subkey(&full_path)
.map_err(map_winreg_error)?;
// Permite leitura, bloqueia escrita
key.set_value("Deny_Read", &0u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Write", &1u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Execute", &0u32)
.map_err(map_winreg_error)?;
}
}
Ok(())
}
fn apply_usbstor_policy(policy: UsbPolicy) -> Result<(), UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let key = hklm
.open_subkey_with_flags(USBSTOR_PATH, KEY_ALL_ACCESS)
.map_err(map_winreg_error)?;
match policy {
UsbPolicy::Allow => {
// Start = 3 habilita o driver
key.set_value("Start", &3u32)
.map_err(map_winreg_error)?;
}
UsbPolicy::BlockAll => {
// Start = 4 desabilita o driver
key.set_value("Start", &4u32)
.map_err(map_winreg_error)?;
}
UsbPolicy::Readonly => {
// Readonly mantem driver ativo
key.set_value("Start", &3u32)
.map_err(map_winreg_error)?;
}
}
Ok(())
}
fn apply_write_protect(enable: bool) -> Result<(), UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
if enable {
let (key, _) = hklm
.create_subkey(STORAGE_POLICY_PATH)
.map_err(map_winreg_error)?;
key.set_value("WriteProtect", &1u32)
.map_err(map_winreg_error)?;
} else if let Ok(key) = hklm.open_subkey_with_flags(STORAGE_POLICY_PATH, KEY_ALL_ACCESS) {
let _ = key.set_value("WriteProtect", &0u32);
}
Ok(())
}
fn refresh_group_policy() -> Result<(), UsbControlError> {
use std::os::windows::process::CommandExt;
use std::process::Command;
const CREATE_NO_WINDOW: u32 = 0x08000000;
let output = Command::new("gpupdate")
.args(["/target:computer", "/force"])
.creation_flags(CREATE_NO_WINDOW)
.output()
.map_err(UsbControlError::Io)?;
if !output.status.success() {
warn!(
"gpupdate retornou erro: {}",
String::from_utf8_lossy(&output.stderr)
);
}
Ok(())
}
fn map_winreg_error(error: io::Error) -> UsbControlError {
if let Some(code) = error.raw_os_error() {
if code == 5 {
return UsbControlError::PermissionDenied;
}
}
UsbControlError::RegistryError(error.to_string())
}

File diff suppressed because it is too large Load diff

View file

@ -18,33 +18,19 @@ crate-type = ["staticlib", "cdylib", "rlib"]
tauri-build = { version = "2.4.1", features = [] } tauri-build = { version = "2.4.1", features = [] }
[dependencies] [dependencies]
tauri = { version = "2.9", features = ["wry", "devtools", "tray-icon"] } tauri = { version = "2.8.5", features = ["wry"] }
tauri-plugin-dialog = "2.4.2"
tauri-plugin-opener = "2.5.0" tauri-plugin-opener = "2.5.0"
tauri-plugin-store = "2.4.0" tauri-plugin-store = "2.4.0"
tauri-plugin-updater = "2.9.0" tauri-plugin-updater = "2.9.0"
tauri-plugin-process = "2.3.0" tauri-plugin-process = "2.3.0"
tauri-plugin-notification = "2"
tauri-plugin-deep-link = "2"
tauri-plugin-single-instance = "2"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
sysinfo = { version = "0.31", default-features = false, features = ["multithread", "network", "system", "disk"] } sysinfo = { version = "0.31", default-features = false, features = ["multithread", "network", "system", "disk"] }
get_if_addrs = "0.5" get_if_addrs = "0.5"
reqwest = { version = "0.12", features = ["json", "rustls-tls", "blocking", "stream"], default-features = false } reqwest = { version = "0.12", features = ["json", "rustls-tls"], default-features = false }
futures-util = "0.3"
tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] } tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] }
once_cell = "1.19" once_cell = "1.19"
thiserror = "1.0" thiserror = "1.0"
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
parking_lot = "0.12" parking_lot = "0.12"
hostname = "0.4" hostname = "0.4"
base64 = "0.22"
sha2 = "0.10"
convex = "0.10.2"
uuid = { version = "1", features = ["v4"] }
dirs = "5"
# SSE usa reqwest com stream, nao precisa de websocket
[target.'cfg(windows)'.dependencies]
winreg = "0.55"

View file

@ -1,31 +1,3 @@
fn main() { fn main() {
// Custom manifest keeps Common-Controls v6 dependency to avoid TaskDialogIndirect errors. tauri_build::build()
let windows = tauri_build::WindowsAttributes::new().app_manifest(
r#"
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="*"
publicKeyToken="6595b64144ccf1df"
language="*" />
</dependentAssembly>
</dependency>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false" />
</requestedPrivileges>
</security>
</trustInfo>
</assembly>
"#,
);
let attrs = tauri_build::Attributes::new().windows_attributes(windows);
tauri_build::try_build(attrs).expect("failed to run Tauri build script");
} }

View file

@ -1,21 +1,10 @@
{ {
"$schema": "../gen/schemas/desktop-schema.json", "$schema": "../gen/schemas/desktop-schema.json",
"identifier": "default", "identifier": "default",
"description": "Capability for all windows", "description": "Capability for the main window",
"windows": ["main", "chat-*", "chat-hub"], "windows": ["main"],
"permissions": [ "permissions": [
"core:default", "core:default",
"core:event:default",
"core:event:allow-listen",
"core:event:allow-unlisten",
"core:event:allow-emit",
"core:window:default",
"core:window:allow-close",
"core:window:allow-hide",
"core:window:allow-show",
"core:window:allow-set-focus",
"core:window:allow-start-dragging",
"dialog:allow-open",
"opener:default", "opener:default",
"store:default", "store:default",
"store:allow-load", "store:allow-load",
@ -24,10 +13,6 @@
"store:allow-save", "store:allow-save",
"store:allow-delete", "store:allow-delete",
"updater:default", "updater:default",
"process:default", "process:default"
"notification:default",
"notification:allow-notify",
"notification:allow-request-permission",
"notification:allow-is-permission-granted"
] ]
} }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 6.2 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 2.5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 567 KiB

After

Width:  |  Height:  |  Size: 132 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 5 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 7.2 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 7.7 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 1,020 B

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 376 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.2 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.4 KiB

After

Width:  |  Height:  |  Size: 2.9 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.9 KiB

After

Width:  |  Height:  |  Size: 3.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.8 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 182 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 182 KiB

After

Width:  |  Height:  |  Size: 50 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 542 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 151 KiB

View file

@ -1,121 +0,0 @@
; Hooks customizadas do instalador NSIS (Tauri)
;
; Objetivo:
; - Remover a marca "Nullsoft Install System" exibida no canto inferior esquerdo
; - Instalar o Raven Service para operacoes privilegiadas sem UAC
;
; Nota: o bundler do Tauri injeta estes macros no script principal do instalador.
BrandingText " "
!macro NSIS_HOOK_PREINSTALL
; Para e remove qualquer instancia anterior do servico antes de atualizar
DetailPrint "Parando servicos anteriores..."
; Para o servico
nsExec::ExecToLog 'sc stop RavenService'
; Aguarda o servico parar completamente (ate 10 segundos)
nsExec::ExecToLog 'powershell -Command "$$i=0; while((Get-Service RavenService -ErrorAction SilentlyContinue).Status -eq \"Running\" -and $$i -lt 10){Start-Sleep 1;$$i++}"'
; Remove o servico antigo (IMPORTANTE para reinstalacoes)
DetailPrint "Removendo servico antigo..."
IfFileExists "$INSTDIR\raven-service.exe" 0 +2
nsExec::ExecToLog '"$INSTDIR\raven-service.exe" uninstall'
; Fallback: remove via sc delete se o executavel nao existir
nsExec::ExecToLog 'sc delete RavenService'
; Forca encerramento de processos remanescentes
nsExec::ExecToLog 'taskkill /F /IM raven-service.exe'
nsExec::ExecToLog 'taskkill /F /IM appsdesktop.exe'
; Aguarda liberacao dos arquivos e remocao completa do servico
Sleep 3000
!macroend
!macro NSIS_HOOK_POSTINSTALL
; =========================================================================
; Instala e inicia o Raven Service
; =========================================================================
DetailPrint "Instalando Raven Service..."
; Garante que nao ha servico residual
nsExec::ExecToLog 'sc delete RavenService'
Sleep 1000
; O servico ja esta em $INSTDIR (copiado como resource pelo Tauri)
; Registra o servico Windows
nsExec::ExecToLog '"$INSTDIR\raven-service.exe" install'
Pop $0
${If} $0 != 0
DetailPrint "Aviso: Falha ao registrar servico (codigo: $0)"
; Tenta remover completamente e reinstalar
nsExec::ExecToLog '"$INSTDIR\raven-service.exe" uninstall'
nsExec::ExecToLog 'sc delete RavenService'
Sleep 1000
nsExec::ExecToLog '"$INSTDIR\raven-service.exe" install'
Pop $0
${EndIf}
; Aguarda registro do servico
Sleep 500
; Inicia o servico
DetailPrint "Iniciando Raven Service..."
nsExec::ExecToLog 'sc start RavenService'
Pop $0
${If} $0 == 0
DetailPrint "Raven Service iniciado com sucesso!"
${Else}
; Tenta novamente apos breve espera
Sleep 1000
nsExec::ExecToLog 'sc start RavenService'
Pop $0
${If} $0 == 0
DetailPrint "Raven Service iniciado com sucesso (segunda tentativa)!"
${Else}
DetailPrint "Aviso: Servico sera iniciado na proxima reinicializacao (codigo: $0)"
${EndIf}
${EndIf}
; =========================================================================
; Verifica se RustDesk esta instalado
; Se nao estiver, o Raven Service instalara automaticamente no primeiro uso
; =========================================================================
IfFileExists "$PROGRAMFILES\RustDesk\rustdesk.exe" rustdesk_found rustdesk_not_found
rustdesk_not_found:
DetailPrint "RustDesk sera instalado automaticamente pelo Raven Service."
Goto rustdesk_done
rustdesk_found:
DetailPrint "RustDesk ja esta instalado."
rustdesk_done:
!macroend
!macro NSIS_HOOK_PREUNINSTALL
; =========================================================================
; Para e remove o Raven Service
; =========================================================================
DetailPrint "Parando Raven Service..."
nsExec::ExecToLog 'sc stop RavenService'
Sleep 1000
DetailPrint "Removendo Raven Service..."
nsExec::ExecToLog '"$INSTDIR\raven-service.exe" uninstall'
; Aguarda um pouco para garantir que o servico foi removido
Sleep 500
!macroend
!macro NSIS_HOOK_POSTUNINSTALL
; Nada adicional necessario
!macroend

View file

@ -13,7 +13,7 @@ use tokio::sync::Notify;
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum AgentError { pub enum AgentError {
#[error("Falha ao obter hostname da dispositivo")] #[error("Falha ao obter hostname da máquina")]
Hostname, Hostname,
#[error("Nenhum identificador de hardware disponível (MAC/serial)")] #[error("Nenhum identificador de hardware disponível (MAC/serial)")]
MissingIdentifiers, MissingIdentifiers,
@ -294,27 +294,7 @@ fn build_inventory_metadata(system: &System) -> serde_json::Value {
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
{ {
let mut extended = collect_windows_extended(); let extended = collect_windows_extended();
// Fallback: se osInfo vier vazio, preenche com dados do sysinfo
if let Some(win) = extended.get_mut("windows").and_then(|v| v.as_object_mut()) {
let needs_os_info = match win.get("osInfo") {
Some(v) => v.as_object().map(|m| m.is_empty()).unwrap_or(true),
None => true,
};
if needs_os_info {
let mut osmap = serde_json::Map::new();
if let Some(name) = System::name() {
osmap.insert("ProductName".into(), json!(name));
}
if let Some(ver) = System::os_version() {
osmap.insert("Version".into(), json!(ver));
}
if let Some(build) = System::kernel_version() {
osmap.insert("BuildNumber".into(), json!(build));
}
win.insert("osInfo".into(), serde_json::Value::Object(osmap));
}
}
if let Some(obj) = inventory.as_object_mut() { if let Some(obj) = inventory.as_object_mut() {
obj.insert("extended".into(), extended); obj.insert("extended".into(), extended);
} }
@ -330,7 +310,10 @@ fn build_inventory_metadata(system: &System) -> serde_json::Value {
// Normalização de software/serviços no topo do inventário // Normalização de software/serviços no topo do inventário
if let Some(obj) = inventory.as_object_mut() { if let Some(obj) = inventory.as_object_mut() {
let extended_snapshot = obj.get("extended").and_then(|v| v.as_object()).cloned(); let extended_snapshot = obj
.get("extended")
.and_then(|v| v.as_object())
.cloned();
// Merge software // Merge software
let mut software: Vec<serde_json::Value> = Vec::new(); let mut software: Vec<serde_json::Value> = Vec::new();
if let Some(existing) = obj.get("software").and_then(|v| v.as_array()) { if let Some(existing) = obj.get("software").and_then(|v| v.as_array()) {
@ -676,114 +659,28 @@ fn collect_linux_extended() -> serde_json::Value {
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
fn collect_windows_extended() -> serde_json::Value { fn collect_windows_extended() -> serde_json::Value {
use base64::engine::general_purpose::STANDARD;
use base64::Engine as _;
use std::os::windows::process::CommandExt; use std::os::windows::process::CommandExt;
use std::process::Command; use std::process::Command;
const CREATE_NO_WINDOW: u32 = 0x08000000; const CREATE_NO_WINDOW: u32 = 0x08000000;
fn decode_powershell_text(bytes: &[u8]) -> Option<String> {
if bytes.is_empty() {
return None;
}
if bytes.starts_with(&[0xFF, 0xFE]) {
return decode_utf16_le_to_string(&bytes[2..]);
}
if bytes.len() >= 2 && bytes[1] == 0 {
if let Some(s) = decode_utf16_le_to_string(bytes) {
return Some(s);
}
}
if bytes.contains(&0) {
if let Some(s) = decode_utf16_le_to_string(bytes) {
return Some(s);
}
}
let text = std::str::from_utf8(bytes).ok()?.trim().to_string();
if text.is_empty() {
None
} else {
Some(text)
}
}
fn decode_utf16_le_to_string(bytes: &[u8]) -> Option<String> {
if !bytes.len().is_multiple_of(2) {
return None;
}
let utf16: Vec<u16> = bytes
.chunks_exact(2)
.map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
.collect();
let text = String::from_utf16(&utf16).ok()?;
let trimmed = text.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
}
fn preview_base64(bytes: &[u8], max_len: usize) -> String {
if bytes.is_empty() {
return "<empty>".to_string();
}
let prefix = if bytes.len() > max_len {
&bytes[..max_len]
} else {
bytes
};
format!("base64:{}...", STANDARD.encode(prefix))
}
fn encode_ps_script(script: &str) -> String {
let mut bytes = Vec::with_capacity(script.len() * 2);
for unit in script.encode_utf16() {
bytes.extend_from_slice(&unit.to_le_bytes());
}
STANDARD.encode(bytes)
}
fn ps(cmd: &str) -> Option<serde_json::Value> { fn ps(cmd: &str) -> Option<serde_json::Value> {
let script = format!( let ps_cmd = format!(
"$ErrorActionPreference='SilentlyContinue';$ProgressPreference='SilentlyContinue';$result = & {{\n{}\n}};if ($null -eq $result) {{ return }};$json = $result | ConvertTo-Json -Depth 4 -Compress;if ([string]::IsNullOrWhiteSpace($json)) {{ return }};[Console]::OutputEncoding = [System.Text.Encoding]::UTF8;$json;", "$ErrorActionPreference='SilentlyContinue'; {} | ConvertTo-Json -Depth 4 -Compress",
cmd cmd
); );
let encoded = encode_ps_script(&script);
let out = Command::new("powershell") let out = Command::new("powershell")
.creation_flags(CREATE_NO_WINDOW) .creation_flags(CREATE_NO_WINDOW)
.arg("-NoProfile") .arg("-NoProfile")
.arg("-WindowStyle")
.arg("Hidden")
.arg("-NoLogo") .arg("-NoLogo")
.arg("-NonInteractive") .arg("-Command")
.arg("-ExecutionPolicy") .arg(ps_cmd)
.arg("Bypass")
.arg("-EncodedCommand")
.arg(encoded)
.output() .output()
.ok()?; .ok()?;
let stdout_text = decode_powershell_text(&out.stdout); if out.stdout.is_empty() {
if cfg!(test) { return None;
if let Some(ref txt) = stdout_text {
let preview = txt.chars().take(512).collect::<String>();
eprintln!("[collect_windows_extended] stdout `{cmd}` => {preview}");
} else {
let preview = preview_base64(&out.stdout, 512);
eprintln!(
"[collect_windows_extended] stdout `{cmd}` => <não decodificado {preview}>"
);
}
if !out.stderr.is_empty() {
if let Some(err) = decode_powershell_text(&out.stderr) {
eprintln!("[collect_windows_extended] stderr `{cmd}` => {err}");
} else {
let preview = preview_base64(&out.stderr, 512);
eprintln!(
"[collect_windows_extended] stderr `{cmd}` => <não decodificado {preview}>"
);
}
}
} }
stdout_text.and_then(|text| serde_json::from_str::<serde_json::Value>(&text).ok()) serde_json::from_slice::<serde_json::Value>(&out.stdout).ok()
} }
let software = ps(r#"@(Get-ItemProperty 'HKLM:\Software\Microsoft\Windows\CurrentVersion\Uninstall\*'; Get-ItemProperty 'HKLM:\Software\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall\*') | Where-Object { $_.DisplayName } | Select-Object DisplayName, DisplayVersion, Publisher"#) let software = ps(r#"@(Get-ItemProperty 'HKLM:\Software\Microsoft\Windows\CurrentVersion\Uninstall\*'; Get-ItemProperty 'HKLM:\Software\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall\*') | Where-Object { $_.DisplayName } | Select-Object DisplayName, DisplayVersion, Publisher"#)
@ -792,134 +689,21 @@ fn collect_windows_extended() -> serde_json::Value {
ps("@(Get-Service | Select-Object Name,Status,DisplayName)").unwrap_or_else(|| json!([])); ps("@(Get-Service | Select-Object Name,Status,DisplayName)").unwrap_or_else(|| json!([]));
let defender = ps("Get-MpComputerStatus | Select-Object AMRunningMode,AntivirusEnabled,RealTimeProtectionEnabled,AntispywareEnabled").unwrap_or_else(|| json!({})); let defender = ps("Get-MpComputerStatus | Select-Object AMRunningMode,AntivirusEnabled,RealTimeProtectionEnabled,AntispywareEnabled").unwrap_or_else(|| json!({}));
let hotfix = ps("Get-HotFix | Select-Object HotFixID,InstalledOn").unwrap_or_else(|| json!([])); let hotfix = ps("Get-HotFix | Select-Object HotFixID,InstalledOn").unwrap_or_else(|| json!([]));
let bitlocker = ps(
"@(if (Get-Command -Name Get-BitLockerVolume -ErrorAction SilentlyContinue) { Get-BitLockerVolume | Select-Object MountPoint,VolumeStatus,ProtectionStatus,LockStatus,EncryptionMethod,EncryptionPercentage,CapacityGB,KeyProtector } else { @() })",
)
.unwrap_or_else(|| json!([]));
let tpm = ps(
"if (Get-Command -Name Get-Tpm -ErrorAction SilentlyContinue) { Get-Tpm | Select-Object TpmPresent,TpmReady,TpmEnabled,TpmActivated,ManagedAuthLevel,OwnerAuth,ManufacturerId,ManufacturerIdTxt,ManufacturerVersion,ManufacturerVersionFull20,SpecVersion } else { $null }",
)
.unwrap_or_else(|| json!({}));
let secure_boot = ps(
r#"
if (-not (Get-Command -Name Confirm-SecureBootUEFI -ErrorAction SilentlyContinue)) {
[PSCustomObject]@{ Supported = $false; Enabled = $null; Error = 'Cmdlet Confirm-SecureBootUEFI indisponível' }
} else {
try {
$enabled = Confirm-SecureBootUEFI
[PSCustomObject]@{ Supported = $true; Enabled = [bool]$enabled; Error = $null }
} catch {
[PSCustomObject]@{ Supported = $true; Enabled = $null; Error = $_.Exception.Message }
}
}
"#,
)
.unwrap_or_else(|| json!({}));
let device_guard = ps(
"@(Get-CimInstance -ClassName Win32_DeviceGuard | Select-Object SecurityServicesConfigured,SecurityServicesRunning,RequiredSecurityProperties,AvailableSecurityProperties,VirtualizationBasedSecurityStatus)",
)
.unwrap_or_else(|| json!([]));
let firewall_profiles = ps(
"@(Get-NetFirewallProfile | Select-Object Name,Enabled,DefaultInboundAction,DefaultOutboundAction,NotifyOnListen)",
)
.unwrap_or_else(|| json!([]));
let windows_update = ps(
r#"
$reg = Get-ItemProperty 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update' -ErrorAction SilentlyContinue
if ($null -eq $reg) { return $null }
$last = $null
if ($reg.PSObject.Properties.Name -contains 'LastSuccessTime') {
$raw = $reg.LastSuccessTime
if ($raw) {
try {
if ($raw -is [DateTime]) {
$last = ($raw.ToUniversalTime()).ToString('o')
} elseif ($raw -is [string]) {
$last = $raw
} else {
$last = [DateTime]::FromFileTimeUtc([long]$raw).ToString('o')
}
} catch {
$last = $raw
}
}
}
[PSCustomObject]@{
AUOptions = $reg.AUOptions
NoAutoUpdate = $reg.NoAutoUpdate
ScheduledInstallDay = $reg.ScheduledInstallDay
ScheduledInstallTime = $reg.ScheduledInstallTime
DetectionFrequency = $reg.DetectionFrequencyEnabled
LastSuccessTime = $last
}
"#,
)
.unwrap_or_else(|| json!({}));
let computer_system = ps(
"Get-CimInstance Win32_ComputerSystem | Select-Object Manufacturer,Model,Domain,DomainRole,PartOfDomain,Workgroup,TotalPhysicalMemory,HypervisorPresent,PCSystemType,PCSystemTypeEx",
)
.unwrap_or_else(|| json!({}));
let device_join = ps(
r#"
$output = & dsregcmd.exe /status 2>$null
if (-not $output) { return $null }
$map = [ordered]@{}
$current = $null
foreach ($line in $output) {
if ([string]::IsNullOrWhiteSpace($line)) { continue }
if ($line -match '^\[(.+)\]$') {
$current = $matches[1].Trim()
if (-not $map.Contains($current)) {
$map[$current] = [ordered]@{}
}
continue
}
if (-not $current) { continue }
$parts = $line.Split(':', 2)
if ($parts.Length -ne 2) { continue }
$key = $parts[0].Trim()
$value = $parts[1].Trim()
if ($key) {
($map[$current])[$key] = $value
}
}
if ($map.Count -eq 0) { return $null }
$obj = [ordered]@{}
foreach ($entry in $map.GetEnumerator()) {
$obj[$entry.Key] = [PSCustomObject]$entry.Value
}
[PSCustomObject]$obj
"#,
)
.unwrap_or_else(|| json!({}));
// Informações de build/edição e ativação // Informações de build/edição e ativação
let os_info = ps(r#" let os_info = ps(r#"
$cv = Get-ItemProperty 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion'; $cv = Get-ItemProperty 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion';
$os = Get-CimInstance Win32_OperatingSystem -ErrorAction SilentlyContinue; $ls = Get-CimInstance -Query "SELECT Name, LicenseStatus FROM SoftwareLicensingProduct WHERE PartialProductKey IS NOT NULL" | Where-Object { $_.Name -like 'Windows*' } | Select-Object -First 1;
$lsItems = Get-CimInstance -Query "SELECT Name, LicenseStatus, PartialProductKey FROM SoftwareLicensingProduct WHERE PartialProductKey IS NOT NULL" | Where-Object { $_.Name -like 'Windows*' }; $lsCode = if ($ls -and $ls.LicenseStatus -ne $null) { [int]$ls.LicenseStatus } else { 0 };
$activatedItem = $lsItems | Where-Object { $_.LicenseStatus -eq 1 } | Select-Object -First 1;
$primaryItem = if ($activatedItem) { $activatedItem } else { $lsItems | Select-Object -First 1 };
$lsCode = if ($primaryItem -and $primaryItem.LicenseStatus -ne $null) { [int]$primaryItem.LicenseStatus } else { 0 };
[PSCustomObject]@{ [PSCustomObject]@{
ProductName = $cv.ProductName ProductName = $cv.ProductName
CurrentBuild = $cv.CurrentBuild CurrentBuild = $cv.CurrentBuild
CurrentBuildNumber = $cv.CurrentBuildNumber CurrentBuildNumber = $cv.CurrentBuildNumber
DisplayVersion = $cv.DisplayVersion DisplayVersion = $cv.DisplayVersion
ReleaseId = $cv.ReleaseId ReleaseId = $cv.ReleaseId
EditionID = $cv.EditionID EditionID = $cv.EditionID
UBR = $cv.UBR LicenseStatus = $lsCode
CompositionEditionID = $cv.CompositionEditionID IsActivated = ($lsCode -eq 1)
InstallationType = $cv.InstallationType
InstallDate = $cv.InstallDate
InstallationDate = $os.InstallDate
InstalledOn = $os.InstallDate
Version = $os.Version
BuildNumber = $os.BuildNumber
Caption = $os.Caption
FeatureExperiencePack = $cv.FeatureExperiencePack
LicenseStatus = $lsCode
IsActivated = ($activatedItem -ne $null)
} }
"#).unwrap_or_else(|| json!({})); "#).unwrap_or_else(|| json!({}));
@ -931,209 +715,9 @@ fn collect_windows_extended() -> serde_json::Value {
.unwrap_or_else(|| json!({})); .unwrap_or_else(|| json!({}));
let bios = ps("Get-CimInstance Win32_BIOS | Select-Object Manufacturer,SMBIOSBIOSVersion,ReleaseDate,Version").unwrap_or_else(|| json!({})); let bios = ps("Get-CimInstance Win32_BIOS | Select-Object Manufacturer,SMBIOSBIOSVersion,ReleaseDate,Version").unwrap_or_else(|| json!({}));
let memory = ps("@(Get-CimInstance Win32_PhysicalMemory | Select-Object BankLabel,Capacity,Manufacturer,PartNumber,SerialNumber,ConfiguredClockSpeed,Speed,ConfiguredVoltage)").unwrap_or_else(|| json!([])); let memory = ps("@(Get-CimInstance Win32_PhysicalMemory | Select-Object BankLabel,Capacity,Manufacturer,PartNumber,SerialNumber,ConfiguredClockSpeed,Speed,ConfiguredVoltage)").unwrap_or_else(|| json!([]));
// Coleta de GPU com VRAM correta (nvidia-smi para NVIDIA, registro como fallback para >4GB) let video = ps("@(Get-CimInstance Win32_VideoController | Select-Object Name,AdapterRAM,DriverVersion,PNPDeviceID)").unwrap_or_else(|| json!([]));
let video = ps(r#"
$gpus = @()
$wmiGpus = Get-CimInstance Win32_VideoController | Select-Object Name,AdapterRAM,DriverVersion,PNPDeviceID
foreach ($gpu in $wmiGpus) {
$vram = $gpu.AdapterRAM
# Tenta nvidia-smi para GPUs NVIDIA (retorna valor correto para >4GB)
if ($gpu.Name -match 'NVIDIA') {
try {
$nvidiaSmi = & 'nvidia-smi' '--query-gpu=memory.total' '--format=csv,noheader,nounits' 2>$null
if ($nvidiaSmi) {
$vramMB = [int64]($nvidiaSmi.Trim())
$vram = $vramMB * 1024 * 1024
}
} catch {}
}
# Fallback: tenta registro do Windows (qwMemorySize é uint64)
if ($vram -le 4294967296 -and $vram -gt 0) {
try {
$regPath = 'HKLM:\SYSTEM\ControlSet001\Control\Class\{4d36e968-e325-11ce-bfc1-08002be10318}\0*'
$regGpus = Get-ItemProperty $regPath -ErrorAction SilentlyContinue
foreach ($reg in $regGpus) {
if ($reg.DriverDesc -eq $gpu.Name -and $reg.'HardwareInformation.qwMemorySize') {
$vram = [int64]$reg.'HardwareInformation.qwMemorySize'
break
}
}
} catch {}
}
$gpus += [PSCustomObject]@{
Name = $gpu.Name
AdapterRAM = $vram
DriverVersion = $gpu.DriverVersion
PNPDeviceID = $gpu.PNPDeviceID
}
}
@($gpus)
"#).unwrap_or_else(|| json!([]));
let disks = ps("@(Get-CimInstance Win32_DiskDrive | Select-Object Model,SerialNumber,Size,InterfaceType,MediaType)").unwrap_or_else(|| json!([])); let disks = ps("@(Get-CimInstance Win32_DiskDrive | Select-Object Model,SerialNumber,Size,InterfaceType,MediaType)").unwrap_or_else(|| json!([]));
// Bateria (notebooks/laptops)
let battery = ps(r#"
$batteries = @(Get-CimInstance Win32_Battery | Select-Object Name,DeviceID,Status,BatteryStatus,EstimatedChargeRemaining,EstimatedRunTime,DesignCapacity,FullChargeCapacity,DesignVoltage,Chemistry,BatteryRechargeTime)
if ($batteries.Count -eq 0) {
[PSCustomObject]@{ Present = $false; Batteries = @() }
} else {
# Mapeia status numérico para texto
$statusMap = @{
1 = 'Discharging'
2 = 'AC Power'
3 = 'Fully Charged'
4 = 'Low'
5 = 'Critical'
6 = 'Charging'
7 = 'Charging High'
8 = 'Charging Low'
9 = 'Charging Critical'
10 = 'Undefined'
11 = 'Partially Charged'
}
foreach ($b in $batteries) {
if ($b.BatteryStatus) {
$b | Add-Member -NotePropertyName 'BatteryStatusText' -NotePropertyValue ($statusMap[[int]$b.BatteryStatus] ?? 'Unknown') -Force
}
}
[PSCustomObject]@{ Present = $true; Batteries = $batteries }
}
"#).unwrap_or_else(|| json!({ "Present": false, "Batteries": [] }));
// Sensores térmicos (temperatura CPU/GPU quando disponível)
let thermal = ps(r#"
$temps = @()
# Tenta WMI thermal zone (requer admin em alguns sistemas)
try {
$zones = Get-CimInstance -Namespace 'root/WMI' -ClassName MSAcpi_ThermalZoneTemperature -ErrorAction SilentlyContinue
foreach ($z in $zones) {
if ($z.CurrentTemperature) {
$celsius = [math]::Round(($z.CurrentTemperature - 2732) / 10, 1)
$temps += [PSCustomObject]@{
Source = 'ThermalZone'
Name = $z.InstanceName
TemperatureCelsius = $celsius
CriticalTripPoint = if ($z.CriticalTripPoint) { [math]::Round(($z.CriticalTripPoint - 2732) / 10, 1) } else { $null }
}
}
}
} catch {}
# CPU temp via Open Hardware Monitor WMI (se instalado)
try {
$ohm = Get-CimInstance -Namespace 'root/OpenHardwareMonitor' -ClassName Sensor -ErrorAction SilentlyContinue | Where-Object { $_.SensorType -eq 'Temperature' }
foreach ($s in $ohm) {
$temps += [PSCustomObject]@{
Source = 'OpenHardwareMonitor'
Name = $s.Name
TemperatureCelsius = $s.Value
Parent = $s.Parent
}
}
} catch {}
@($temps)
"#).unwrap_or_else(|| json!([]));
// Adaptadores de rede (físicos e virtuais)
let network_adapters = ps(r#"
@(Get-CimInstance Win32_NetworkAdapter | Where-Object { $_.PhysicalAdapter -eq $true -or $_.NetConnectionStatus -ne $null } | Select-Object Name,Description,MACAddress,Speed,NetConnectionStatus,AdapterType,Manufacturer,NetConnectionID,PNPDeviceID | ForEach-Object {
$statusMap = @{
0 = 'Disconnected'
1 = 'Connecting'
2 = 'Connected'
3 = 'Disconnecting'
4 = 'Hardware not present'
5 = 'Hardware disabled'
6 = 'Hardware malfunction'
7 = 'Media disconnected'
8 = 'Authenticating'
9 = 'Authentication succeeded'
10 = 'Authentication failed'
11 = 'Invalid address'
12 = 'Credentials required'
}
$_ | Add-Member -NotePropertyName 'StatusText' -NotePropertyValue ($statusMap[[int]$_.NetConnectionStatus] ?? 'Unknown') -Force
$_
})
"#).unwrap_or_else(|| json!([]));
// Monitores conectados
let monitors = ps(r#"
@(Get-CimInstance WmiMonitorID -Namespace root/wmi -ErrorAction SilentlyContinue | ForEach-Object {
$decode = { param($arr) if ($arr) { -join ($arr | Where-Object { $_ -ne 0 } | ForEach-Object { [char]$_ }) } else { $null } }
[PSCustomObject]@{
ManufacturerName = & $decode $_.ManufacturerName
ProductCodeID = & $decode $_.ProductCodeID
SerialNumberID = & $decode $_.SerialNumberID
UserFriendlyName = & $decode $_.UserFriendlyName
YearOfManufacture = $_.YearOfManufacture
WeekOfManufacture = $_.WeekOfManufacture
}
})
"#).unwrap_or_else(|| json!([]));
// Fonte de alimentação / chassis
let power_supply = ps(r#"
$chassis = Get-CimInstance Win32_SystemEnclosure | Select-Object ChassisTypes,Manufacturer,SerialNumber,SMBIOSAssetTag
$chassisTypeMap = @{
1 = 'Other'; 2 = 'Unknown'; 3 = 'Desktop'; 4 = 'Low Profile Desktop'
5 = 'Pizza Box'; 6 = 'Mini Tower'; 7 = 'Tower'; 8 = 'Portable'
9 = 'Laptop'; 10 = 'Notebook'; 11 = 'Hand Held'; 12 = 'Docking Station'
13 = 'All in One'; 14 = 'Sub Notebook'; 15 = 'Space-Saving'; 16 = 'Lunch Box'
17 = 'Main Server Chassis'; 18 = 'Expansion Chassis'; 19 = 'SubChassis'
20 = 'Bus Expansion Chassis'; 21 = 'Peripheral Chassis'; 22 = 'RAID Chassis'
23 = 'Rack Mount Chassis'; 24 = 'Sealed-case PC'; 25 = 'Multi-system chassis'
30 = 'Tablet'; 31 = 'Convertible'; 32 = 'Detachable'
}
$types = @()
if ($chassis.ChassisTypes) {
foreach ($t in $chassis.ChassisTypes) {
$types += $chassisTypeMap[[int]$t] ?? "Type$t"
}
}
[PSCustomObject]@{
ChassisTypes = $chassis.ChassisTypes
ChassisTypesText = $types
Manufacturer = $chassis.Manufacturer
SerialNumber = $chassis.SerialNumber
SMBIOSAssetTag = $chassis.SMBIOSAssetTag
}
"#).unwrap_or_else(|| json!({}));
// Último reinício e contagem de boots
let boot_info = ps(r#"
$os = Get-CimInstance Win32_OperatingSystem | Select-Object LastBootUpTime
$lastBoot = $os.LastBootUpTime
# Calcula uptime
$uptime = if ($lastBoot) { (New-TimeSpan -Start $lastBoot -End (Get-Date)).TotalSeconds } else { 0 }
# Conta eventos de boot (ID 6005) - últimos 30 dias para performance
$startDate = (Get-Date).AddDays(-30)
$bootEvents = @()
$bootCount = 0
try {
$events = Get-WinEvent -FilterHashtable @{
LogName = 'System'
ID = 6005
StartTime = $startDate
} -MaxEvents 50 -ErrorAction SilentlyContinue
$bootCount = @($events).Count
$bootEvents = @($events | Select-Object -First 10 | ForEach-Object {
@{
TimeCreated = $_.TimeCreated.ToString('o')
Computer = $_.MachineName
}
})
} catch {}
[PSCustomObject]@{
LastBootTime = if ($lastBoot) { $lastBoot.ToString('o') } else { $null }
UptimeSeconds = [math]::Round($uptime)
BootCountLast30Days = $bootCount
RecentBoots = $bootEvents
}
"#).unwrap_or_else(|| json!({ "LastBootTime": null, "UptimeSeconds": 0, "BootCountLast30Days": 0, "RecentBoots": [] }));
json!({ json!({
"windows": { "windows": {
"software": software, "software": software,
@ -1147,20 +731,6 @@ fn collect_windows_extended() -> serde_json::Value {
"memoryModules": memory, "memoryModules": memory,
"videoControllers": video, "videoControllers": video,
"disks": disks, "disks": disks,
"bitLocker": bitlocker,
"tpm": tpm,
"secureBoot": secure_boot,
"deviceGuard": device_guard,
"firewallProfiles": firewall_profiles,
"windowsUpdate": windows_update,
"computerSystem": computer_system,
"azureAdStatus": device_join,
"battery": battery,
"thermal": thermal,
"networkAdapters": network_adapters,
"monitors": monitors,
"chassis": power_supply,
"bootInfo": boot_info,
} }
}) })
} }
@ -1255,7 +825,7 @@ pub fn collect_profile() -> Result<MachineProfile, AgentError> {
let system = collect_system(); let system = collect_system();
let os_name = System::name() let os_name = System::name()
.or_else(System::long_os_version) .or_else(|| System::long_os_version())
.unwrap_or_else(|| "desconhecido".to_string()); .unwrap_or_else(|| "desconhecido".to_string());
let os_version = System::os_version(); let os_version = System::os_version();
let architecture = std::env::consts::ARCH.to_string(); let architecture = std::env::consts::ARCH.to_string();
@ -1315,7 +885,7 @@ async fn post_heartbeat(
.into_owned(); .into_owned();
let os = MachineOs { let os = MachineOs {
name: System::name() name: System::name()
.or_else(System::long_os_version) .or_else(|| System::long_os_version())
.unwrap_or_else(|| "desconhecido".to_string()), .unwrap_or_else(|| "desconhecido".to_string()),
version: System::os_version(), version: System::os_version(),
architecture: Some(std::env::consts::ARCH.to_string()), architecture: Some(std::env::consts::ARCH.to_string()),
@ -1335,232 +905,6 @@ async fn post_heartbeat(
Ok(()) Ok(())
} }
#[derive(Debug, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
struct UsbPolicyResponse {
pending: bool,
policy: Option<String>,
#[allow(dead_code)]
applied_at: Option<i64>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "camelCase")]
struct UsbPolicyStatusReport {
machine_token: String,
status: String,
error: Option<String>,
current_policy: Option<String>,
}
async fn check_and_apply_usb_policy(base_url: &str, token: &str) {
crate::log_info!("Verificando politica USB pendente...");
let url = format!("{}/api/machines/usb-policy?machineToken={}", base_url, token);
let response = match HTTP_CLIENT.get(&url).send().await {
Ok(resp) => {
crate::log_info!("Resposta da verificacao de politica USB: status={}", resp.status());
resp
}
Err(e) => {
crate::log_error!("Falha ao verificar politica USB: {e}");
return;
}
};
let policy_response: UsbPolicyResponse = match response.json().await {
Ok(data) => data,
Err(e) => {
crate::log_error!("Falha ao parsear resposta de politica USB: {e}");
return;
}
};
if !policy_response.pending {
crate::log_info!("Nenhuma politica USB pendente");
return;
}
let policy_str = match policy_response.policy {
Some(p) => p,
None => {
crate::log_warn!("Politica USB pendente mas sem valor de policy");
return;
}
};
crate::log_info!("Politica USB pendente encontrada: {}", policy_str);
#[cfg(target_os = "windows")]
{
use crate::usb_control::{get_current_policy, UsbPolicy};
use crate::service_client;
let policy = match UsbPolicy::from_str(&policy_str) {
Some(p) => p,
None => {
crate::log_error!("Politica USB invalida: {}", policy_str);
report_usb_policy_status(base_url, token, "FAILED", Some(format!("Politica invalida: {}", policy_str)), None).await;
return;
}
};
// Verifica se a politica ja esta aplicada localmente
match get_current_policy() {
Ok(current) if current == policy => {
crate::log_info!("Politica USB ja esta aplicada localmente: {}", policy_str);
let reported = report_usb_policy_status(base_url, token, "APPLIED", None, Some(policy_str.clone())).await;
if !reported {
crate::log_error!("Falha ao reportar politica ja aplicada");
}
return;
}
Ok(current) => {
crate::log_info!("Politica atual: {:?}, esperada: {:?}", current, policy);
}
Err(e) => {
crate::log_warn!("Nao foi possivel ler politica atual: {e}");
}
}
crate::log_info!("Aplicando politica USB: {}", policy_str);
// Reporta APPLYING para progress bar real no frontend
let _ = report_usb_policy_status(base_url, token, "APPLYING", None, None).await;
// Tenta primeiro via RavenService (privilegiado)
crate::log_info!("Tentando aplicar politica via RavenService...");
match service_client::apply_usb_policy(&policy_str) {
Ok(result) => {
if result.success {
crate::log_info!("Politica USB aplicada com sucesso via RavenService: {:?}", result);
let reported = report_usb_policy_status(base_url, token, "APPLIED", None, Some(policy_str.clone())).await;
if !reported {
crate::log_error!("CRITICO: Politica aplicada mas falha ao reportar ao servidor!");
let base_url = base_url.to_string();
let token = token.to_string();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
crate::log_info!("Retry agendado: reportando politica USB...");
let _ = report_usb_policy_status(&base_url, &token, "APPLIED", None, Some(policy_str)).await;
});
}
return;
} else {
let err_msg = result.error.unwrap_or_else(|| "Erro desconhecido".to_string());
crate::log_error!("RavenService retornou erro: {}", err_msg);
report_usb_policy_status(base_url, token, "FAILED", Some(err_msg), None).await;
}
}
Err(service_client::ServiceClientError::ServiceUnavailable(msg)) => {
crate::log_warn!("RavenService nao disponivel: {}", msg);
// Tenta fallback direto (vai falhar se nao tiver privilegio)
crate::log_info!("Tentando aplicar politica diretamente...");
match crate::usb_control::apply_usb_policy(policy) {
Ok(result) => {
crate::log_info!("Politica USB aplicada com sucesso (direto): {:?}", result);
let reported = report_usb_policy_status(base_url, token, "APPLIED", None, Some(policy_str.clone())).await;
if !reported {
crate::log_error!("CRITICO: Politica aplicada mas falha ao reportar ao servidor!");
let base_url = base_url.to_string();
let token = token.to_string();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
crate::log_info!("Retry agendado: reportando politica USB...");
let _ = report_usb_policy_status(&base_url, &token, "APPLIED", None, Some(policy_str)).await;
});
}
}
Err(e) => {
let err_msg = format!("RavenService indisponivel e aplicacao direta falhou: {}. Instale ou inicie o RavenService.", e);
crate::log_error!("{}", err_msg);
report_usb_policy_status(base_url, token, "FAILED", Some(err_msg), None).await;
}
}
}
Err(e) => {
crate::log_error!("Falha ao comunicar com RavenService: {e}");
report_usb_policy_status(base_url, token, "FAILED", Some(e.to_string()), None).await;
}
}
}
#[cfg(not(target_os = "windows"))]
{
crate::log_warn!("Controle de USB nao suportado neste sistema operacional");
report_usb_policy_status(base_url, token, "FAILED", Some("Sistema operacional nao suportado".to_string()), None).await;
}
}
async fn report_usb_policy_status(
base_url: &str,
token: &str,
status: &str,
error: Option<String>,
current_policy: Option<String>,
) -> bool {
let url = format!("{}/api/machines/usb-policy", base_url);
let report = UsbPolicyStatusReport {
machine_token: token.to_string(),
status: status.to_string(),
error,
current_policy,
};
crate::log_info!("Reportando status de politica USB: status={}", status);
// Retry simples: 1 tentativa imediata + 1 retry após 2s
let delays = [2];
let mut last_error = None;
for (attempt, delay_secs) in delays.iter().enumerate() {
match HTTP_CLIENT.post(&url).json(&report).send().await {
Ok(response) => {
let status_code = response.status();
if status_code.is_success() {
crate::log_info!(
"Report de politica USB enviado com sucesso na tentativa {}",
attempt + 1
);
return true;
} else {
let body = response.text().await.unwrap_or_default();
last_error = Some(format!("HTTP {} - {}", status_code, body));
crate::log_warn!(
"Report de politica USB falhou (tentativa {}): HTTP {}",
attempt + 1,
status_code
);
}
}
Err(e) => {
last_error = Some(e.to_string());
crate::log_warn!(
"Report de politica USB falhou (tentativa {}): {}",
attempt + 1,
e
);
}
}
if attempt < delays.len() - 1 {
crate::log_info!("Retentando report de politica USB em {}s...", delay_secs);
tokio::time::sleep(Duration::from_secs(*delay_secs)).await;
}
}
if let Some(err) = last_error {
crate::log_error!(
"Falha ao reportar status de politica USB apos {} tentativas: {err}",
delays.len()
);
}
false
}
struct HeartbeatHandle { struct HeartbeatHandle {
token: String, token: String,
base_url: String, base_url: String,
@ -1575,9 +919,9 @@ impl HeartbeatHandle {
} }
} }
#[derive(Default, Clone)] #[derive(Default)]
pub struct AgentRuntime { pub struct AgentRuntime {
inner: Arc<Mutex<Option<HeartbeatHandle>>>, inner: Mutex<Option<HeartbeatHandle>>,
} }
fn sanitize_base_url(input: &str) -> Result<String, AgentError> { fn sanitize_base_url(input: &str) -> Result<String, AgentError> {
@ -1591,7 +935,7 @@ fn sanitize_base_url(input: &str) -> Result<String, AgentError> {
impl AgentRuntime { impl AgentRuntime {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
inner: Arc::new(Mutex::new(None)), inner: Mutex::new(None),
} }
} }
@ -1624,46 +968,29 @@ impl AgentRuntime {
let status_clone = status.clone(); let status_clone = status.clone();
let join_handle = async_runtime::spawn(async move { let join_handle = async_runtime::spawn(async move {
crate::log_info!("Loop de agente iniciado");
if let Err(error) = if let Err(error) =
post_heartbeat(&base_clone, &token_clone, status_clone.clone()).await post_heartbeat(&base_clone, &token_clone, status_clone.clone()).await
{ {
crate::log_error!("Falha inicial ao enviar heartbeat: {error}"); eprintln!("[agent] Falha inicial ao enviar heartbeat: {error}");
} else {
crate::log_info!("Heartbeat inicial enviado com sucesso");
} }
// Verifica politica USB apos heartbeat inicial let mut ticker = tokio::time::interval(Duration::from_secs(interval));
check_and_apply_usb_policy(&base_clone, &token_clone).await; ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
let mut heartbeat_ticker = tokio::time::interval(Duration::from_secs(interval));
heartbeat_ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
let mut usb_ticker = tokio::time::interval(Duration::from_secs(15));
usb_ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
loop { loop {
// Wait interval // Wait interval
tokio::select! { tokio::select! {
_ = stop_signal_clone.notified() => { _ = stop_signal_clone.notified() => {
crate::log_info!("Loop de agente encerrado por sinal de parada");
break; break;
} }
_ = heartbeat_ticker.tick() => {} _ = ticker.tick() => {}
_ = usb_ticker.tick() => {
check_and_apply_usb_policy(&base_clone, &token_clone).await;
continue;
}
} }
if let Err(error) = if let Err(error) =
post_heartbeat(&base_clone, &token_clone, status_clone.clone()).await post_heartbeat(&base_clone, &token_clone, status_clone.clone()).await
{ {
crate::log_error!("Falha ao enviar heartbeat: {error}"); eprintln!("[agent] Falha ao enviar heartbeat: {error}");
} }
// Verifica politica USB apos cada heartbeat
check_and_apply_usb_policy(&base_clone, &token_clone).await;
} }
}); });
@ -1687,62 +1014,3 @@ impl AgentRuntime {
} }
} }
} }
#[cfg(all(test, target_os = "windows"))]
mod windows_tests {
use super::collect_windows_extended;
use serde_json::Value;
fn expect_object<'a>(value: &'a Value, context: &str) -> &'a serde_json::Map<String, Value> {
value
.as_object()
.unwrap_or_else(|| panic!("{context} não é um objeto JSON: {value:?}"))
}
#[test]
fn collects_activation_and_defender_status() {
let extended = collect_windows_extended();
let windows = extended.get("windows").unwrap_or_else(|| {
panic!("payload windows ausente: {extended:?}");
});
let windows_obj = expect_object(windows, "windows");
let os_info = windows_obj
.get("osInfo")
.unwrap_or_else(|| panic!("windows.osInfo ausente: {windows_obj:?}"));
let os_info_obj = expect_object(os_info, "windows.osInfo");
let is_activated = os_info_obj.get("IsActivated").unwrap_or_else(|| {
panic!("campo IsActivated ausente em windows.osInfo: {os_info_obj:?}")
});
assert!(
is_activated.as_bool().is_some(),
"esperava booleano em windows.osInfo.IsActivated, valor recebido: {is_activated:?}"
);
let license_status = os_info_obj.get("LicenseStatus").unwrap_or_else(|| {
panic!("campo LicenseStatus ausente em windows.osInfo: {os_info_obj:?}")
});
assert!(
license_status.as_i64().is_some(),
"esperava número em windows.osInfo.LicenseStatus, valor recebido: {license_status:?}"
);
let defender = windows_obj.get("defender").unwrap_or_else(|| {
panic!("windows.defender ausente: {windows_obj:?}");
});
let defender_obj = expect_object(defender, "windows.defender");
let realtime = defender_obj
.get("RealTimeProtectionEnabled")
.unwrap_or_else(|| {
panic!(
"campo RealTimeProtectionEnabled ausente em windows.defender: {defender_obj:?}"
)
});
assert!(
realtime.as_bool().is_some(),
"esperava booleano em windows.defender.RealTimeProtectionEnabled, valor recebido: {realtime:?}"
);
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,102 +1,7 @@
mod agent; mod agent;
mod chat;
#[cfg(target_os = "windows")]
mod rustdesk;
#[cfg(target_os = "windows")]
mod service_client;
mod usb_control;
use agent::{collect_inventory_plain, collect_profile, AgentRuntime, MachineProfile}; use agent::{collect_inventory_plain, collect_profile, AgentRuntime, MachineProfile};
use chat::{ChatRuntime, ChatSession, ChatMessagesResponse, SendMessageResponse};
use chrono::Local;
use usb_control::{UsbPolicy, UsbPolicyResult};
use tauri::{Emitter, Listener, Manager, WindowEvent};
use tauri_plugin_store::Builder as StorePluginBuilder; use tauri_plugin_store::Builder as StorePluginBuilder;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::OnceLock;
#[cfg(target_os = "windows")]
use tauri::menu::{MenuBuilder, MenuItemBuilder};
#[cfg(target_os = "windows")]
use tauri::tray::TrayIconBuilder;
#[cfg(target_os = "windows")]
use winreg::enums::*;
#[cfg(target_os = "windows")]
use winreg::RegKey;
const DEFAULT_CONVEX_URL: &str = "https://convex.esdrasrenan.com.br";
// ============================================================================
// Sistema de Logging para Agente
// ============================================================================
static AGENT_LOG_FILE: OnceLock<std::sync::Mutex<std::fs::File>> = OnceLock::new();
pub fn init_agent_logging() -> Result<(), String> {
let dir = logs_directory()
.ok_or("LOCALAPPDATA indisponivel para logging")?;
std::fs::create_dir_all(&dir)
.map_err(|e| format!("Falha ao criar diretorio de logs: {e}"))?;
let path = dir.join("raven-agent.log");
let file = OpenOptions::new()
.create(true)
.append(true)
.open(&path)
.map_err(|e| format!("Falha ao abrir raven-agent.log: {e}"))?;
let _ = AGENT_LOG_FILE.set(std::sync::Mutex::new(file));
Ok(())
}
pub fn log_agent(level: &str, message: &str) {
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S%.3f");
let line = format!("[{timestamp}] [{level}] {message}\n");
// Escreve para stderr (util em dev/debug)
eprint!("{line}");
// Escreve para arquivo
if let Some(mutex) = AGENT_LOG_FILE.get() {
if let Ok(mut file) = mutex.lock() {
let _ = file.write_all(line.as_bytes());
let _ = file.flush();
}
}
}
#[macro_export]
macro_rules! log_info {
($($arg:tt)*) => {
$crate::log_agent("INFO", format!($($arg)*).as_str())
};
}
#[macro_export]
macro_rules! log_error {
($($arg:tt)*) => {
$crate::log_agent("ERROR", format!($($arg)*).as_str())
};
}
#[macro_export]
macro_rules! log_warn {
($($arg:tt)*) => {
$crate::log_agent("WARN", format!($($arg)*).as_str())
};
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct RustdeskProvisioningResult {
pub id: String,
pub password: String,
pub installed_version: Option<String>,
pub updated: bool,
pub last_provisioned_at: i64,
}
#[tauri::command] #[tauri::command]
fn collect_machine_profile() -> Result<MachineProfile, String> { fn collect_machine_profile() -> Result<MachineProfile, String> {
@ -127,711 +32,20 @@ fn stop_machine_agent(state: tauri::State<AgentRuntime>) -> Result<(), String> {
Ok(()) Ok(())
} }
#[tauri::command]
fn open_devtools(window: tauri::WebviewWindow) -> Result<(), String> {
window.open_devtools();
Ok(())
}
#[tauri::command]
fn log_app_event(message: String) -> Result<(), String> {
append_app_log(&message)
}
fn append_app_log(message: &str) -> Result<(), String> {
let Some(dir) = logs_directory() else {
return Err("LOCALAPPDATA indisponivel para gravar logs".to_string());
};
std::fs::create_dir_all(&dir)
.map_err(|error| format!("Falha ao criar pasta de logs: {error}"))?;
let path = dir.join("app.log");
let mut file = OpenOptions::new()
.create(true)
.append(true)
.open(&path)
.map_err(|error| format!("Falha ao abrir app.log: {error}"))?;
let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S");
writeln!(file, "[{timestamp}] {message}")
.map_err(|error| format!("Falha ao escrever log: {error}"))?;
Ok(())
}
fn logs_directory() -> Option<PathBuf> {
let base = std::env::var("LOCALAPPDATA").ok()?;
Some(Path::new(&base).join("br.com.esdrasrenan.sistemadechamados").join("logs"))
}
#[tauri::command]
async fn ensure_rustdesk_and_emit(
app: tauri::AppHandle,
config_string: Option<String>,
password: Option<String>,
machine_id: Option<String>,
) -> Result<RustdeskProvisioningResult, String> {
let result = tauri::async_runtime::spawn_blocking(move || {
run_rustdesk_ensure(config_string, password, machine_id)
})
.await
.map_err(|error| error.to_string())??;
if let Err(error) = app.emit("raven://remote-access/provisioned", &result) {
eprintln!("[rustdesk] falha ao emitir evento raven://remote-access/provisioned: {error}");
}
Ok(result)
}
#[cfg(target_os = "windows")]
fn run_rustdesk_ensure(
config_string: Option<String>,
password: Option<String>,
machine_id: Option<String>,
) -> Result<RustdeskProvisioningResult, String> {
// Tenta usar o servico primeiro (sem UAC)
if service_client::is_service_available() {
log_info!("Usando Raven Service para provisionar RustDesk");
match service_client::provision_rustdesk(
config_string.as_deref(),
password.as_deref(),
machine_id.as_deref(),
) {
Ok(result) => {
return Ok(RustdeskProvisioningResult {
id: result.id,
password: result.password,
installed_version: result.installed_version,
updated: result.updated,
last_provisioned_at: result.last_provisioned_at,
});
}
Err(e) => {
log_warn!("Falha ao usar servico para RustDesk: {e}");
// Continua para fallback
}
}
}
// Fallback: chamada direta (pode pedir UAC)
log_info!("Usando chamada direta para provisionar RustDesk (pode pedir UAC)");
rustdesk::ensure_rustdesk(
config_string.as_deref(),
password.as_deref(),
machine_id.as_deref(),
)
.map_err(|error| error.to_string())
}
#[cfg(not(target_os = "windows"))]
fn run_rustdesk_ensure(
_config_string: Option<String>,
_password: Option<String>,
_machine_id: Option<String>,
) -> Result<RustdeskProvisioningResult, String> {
Err("Provisionamento automático do RustDesk está disponível apenas no Windows.".to_string())
}
#[tauri::command]
fn apply_usb_policy(policy: String) -> Result<UsbPolicyResult, String> {
// Valida a politica primeiro
let _policy_enum = UsbPolicy::from_str(&policy)
.ok_or_else(|| format!("Politica USB invalida: {}. Use ALLOW, BLOCK_ALL ou READONLY.", policy))?;
// Tenta usar o servico primeiro (sem UAC)
#[cfg(target_os = "windows")]
if service_client::is_service_available() {
log_info!("Usando Raven Service para aplicar politica USB: {}", policy);
match service_client::apply_usb_policy(&policy) {
Ok(result) => {
return Ok(UsbPolicyResult {
success: result.success,
policy: result.policy,
error: result.error,
applied_at: result.applied_at,
});
}
Err(e) => {
log_warn!("Falha ao usar servico para USB policy: {e}");
// Continua para fallback
}
}
}
// Fallback: chamada direta (pode pedir UAC)
log_info!("Usando chamada direta para aplicar politica USB (pode pedir UAC)");
usb_control::apply_usb_policy(_policy_enum).map_err(|e| e.to_string())
}
#[tauri::command]
fn get_usb_policy() -> Result<String, String> {
// Tenta usar o servico primeiro
#[cfg(target_os = "windows")]
if service_client::is_service_available() {
match service_client::get_usb_policy() {
Ok(policy) => return Ok(policy),
Err(e) => {
log_warn!("Falha ao obter USB policy via servico: {e}");
// Continua para fallback
}
}
}
// Fallback: leitura direta (nao precisa elevacao para ler)
usb_control::get_current_policy()
.map(|p| p.as_str().to_string())
.map_err(|e| e.to_string())
}
#[tauri::command]
fn refresh_usb_policy() -> Result<(), String> {
usb_control::refresh_group_policy().map_err(|e| e.to_string())
}
// ============================================================================
// COMANDOS DE CHAT
// ============================================================================
#[tauri::command]
fn start_chat_polling(
state: tauri::State<ChatRuntime>,
app: tauri::AppHandle,
base_url: String,
convex_url: Option<String>,
token: String,
) -> Result<(), String> {
let url = convex_url.unwrap_or_else(|| DEFAULT_CONVEX_URL.to_string());
state.start_polling(base_url, url, token, app)
}
#[tauri::command]
fn stop_chat_polling(state: tauri::State<ChatRuntime>) -> Result<(), String> {
state.stop();
Ok(())
}
#[tauri::command]
fn is_chat_using_realtime(state: tauri::State<ChatRuntime>) -> bool {
state.is_using_sse()
}
#[tauri::command]
fn get_chat_sessions(state: tauri::State<ChatRuntime>) -> Vec<ChatSession> {
state.get_sessions()
}
#[tauri::command]
async fn fetch_chat_sessions(base_url: String, token: String) -> Result<Vec<ChatSession>, String> {
chat::fetch_sessions(&base_url, &token).await
}
#[tauri::command]
async fn fetch_chat_messages(
base_url: String,
token: String,
ticket_id: String,
since: Option<i64>,
) -> Result<ChatMessagesResponse, String> {
chat::fetch_messages(&base_url, &token, &ticket_id, since).await
}
#[tauri::command]
async fn send_chat_message(
base_url: String,
token: String,
ticket_id: String,
body: String,
attachments: Option<Vec<chat::AttachmentPayload>>,
) -> Result<SendMessageResponse, String> {
chat::send_message(&base_url, &token, &ticket_id, &body, attachments).await
}
#[tauri::command]
async fn mark_chat_messages_read(
base_url: String,
token: String,
ticket_id: String,
message_ids: Vec<String>,
) -> Result<(), String> {
if message_ids.is_empty() {
return Ok(());
}
chat::mark_messages_read(&base_url, &token, &ticket_id, &message_ids).await
}
#[tauri::command]
async fn upload_chat_file(
base_url: String,
token: String,
file_path: String,
) -> Result<chat::AttachmentPayload, String> {
use std::path::Path;
// Ler o arquivo
let path = Path::new(&file_path);
let file_name = path
.file_name()
.and_then(|n| n.to_str())
.ok_or("Nome de arquivo inválido")?
.to_string();
let file_data = std::fs::read(&file_path)
.map_err(|e| format!("Falha ao ler arquivo: {e}"))?;
let file_size = file_data.len() as u64;
// Validar arquivo
chat::is_allowed_file(&file_name, file_size)?;
// Obter tipo MIME
let mime_type = chat::get_mime_type(&file_name);
// Gerar URL de upload
let upload_url = chat::generate_upload_url(
&base_url,
&token,
&file_name,
&mime_type,
file_size,
)
.await?;
// Fazer upload
let storage_id = chat::upload_file(&upload_url, file_data, &mime_type).await?;
Ok(chat::AttachmentPayload {
storage_id,
name: file_name,
size: Some(file_size),
mime_type: Some(mime_type),
})
}
#[tauri::command]
async fn open_chat_window(app: tauri::AppHandle, ticket_id: String, ticket_ref: u64) -> Result<(), String> {
log_info!("[CMD] open_chat_window called: ticket_id={}, ticket_ref={}", ticket_id, ticket_ref);
let app_handle = app.clone();
let ticket_id_for_task = ticket_id.clone();
let result = tauri::async_runtime::spawn_blocking(move || {
chat::open_chat_window(&app_handle, &ticket_id_for_task, ticket_ref)
})
.await
.map_err(|err| format!("Falha ao abrir chat (join): {err}"))?;
log_info!("[CMD] open_chat_window result: {:?}", result);
result
}
#[tauri::command]
fn close_chat_window(app: tauri::AppHandle, ticket_id: String) -> Result<(), String> {
chat::close_chat_window(&app, &ticket_id)
}
#[tauri::command]
fn minimize_chat_window(app: tauri::AppHandle, ticket_id: String) -> Result<(), String> {
chat::minimize_chat_window(&app, &ticket_id)
}
#[tauri::command]
fn set_chat_minimized(app: tauri::AppHandle, ticket_id: String, minimized: bool) -> Result<(), String> {
chat::set_chat_minimized(&app, &ticket_id, minimized)
}
#[tauri::command]
async fn open_hub_window(app: tauri::AppHandle) -> Result<(), String> {
let app_handle = app.clone();
tauri::async_runtime::spawn_blocking(move || {
chat::open_hub_window(&app_handle)
})
.await
.map_err(|err| format!("Falha ao abrir hub (join): {err}"))?
}
#[tauri::command]
fn close_hub_window(app: tauri::AppHandle) -> Result<(), String> {
chat::close_hub_window(&app)
}
#[tauri::command]
fn set_hub_minimized(app: tauri::AppHandle, minimized: bool) -> Result<(), String> {
chat::set_hub_minimized(&app, minimized)
}
// ============================================================================
// Handler de Deep Link (raven://)
// ============================================================================
/// Processa URLs do protocolo raven://
/// Formatos suportados:
/// - raven://ticket/{token} - Abre visualizacao do chamado
/// - raven://chat/{ticketId}?token={token} - Abre chat do chamado
/// - raven://rate/{token} - Abre avaliacao do chamado
fn handle_deep_link(app: &tauri::AppHandle, url: &str) {
log_info!("Processando deep link: {url}");
// Remove o prefixo raven://
let path = url.trim_start_matches("raven://");
// Parse do path
let parts: Vec<&str> = path.split('/').collect();
if parts.is_empty() {
log_warn!("Deep link invalido: path vazio");
return;
}
match parts[0] {
"ticket" => {
if parts.len() > 1 {
let token = parts[1].split('?').next().unwrap_or(parts[1]);
log_info!("Abrindo ticket com token: {token}");
// Mostra a janela principal
if let Some(window) = app.get_webview_window("main") {
let _ = window.show();
let _ = window.set_focus();
// Emite evento para o frontend navegar para o ticket
let _ = app.emit("raven://deep-link/ticket", serde_json::json!({
"token": token
}));
}
}
}
"chat" => {
if parts.len() > 1 {
let ticket_id = parts[1].split('?').next().unwrap_or(parts[1]);
log_info!("Abrindo chat do ticket: {ticket_id}");
// Abre janela de chat (ticket_ref 0 quando vem de deeplink)
if let Err(e) = chat::open_chat_window(app, ticket_id, 0) {
log_error!("Falha ao abrir chat: {e}");
}
}
}
"rate" => {
if parts.len() > 1 {
let token = parts[1].split('?').next().unwrap_or(parts[1]);
log_info!("Abrindo avaliacao com token: {token}");
// Mostra a janela principal
if let Some(window) = app.get_webview_window("main") {
let _ = window.show();
let _ = window.set_focus();
// Emite evento para o frontend navegar para avaliacao
let _ = app.emit("raven://deep-link/rate", serde_json::json!({
"token": token
}));
}
}
}
_ => {
log_warn!("Deep link desconhecido: {path}");
}
}
}
#[cfg_attr(mobile, tauri::mobile_entry_point)] #[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() { pub fn run() {
tauri::Builder::default() tauri::Builder::default()
.manage(AgentRuntime::new()) .manage(AgentRuntime::new())
.manage(ChatRuntime::new())
.plugin(tauri_plugin_dialog::init())
.plugin(tauri_plugin_opener::init()) .plugin(tauri_plugin_opener::init())
.plugin(StorePluginBuilder::default().build()) .plugin(StorePluginBuilder::default().build())
.plugin(tauri_plugin_updater::Builder::new().build()) .plugin(tauri_plugin_updater::Builder::new().build())
.plugin(tauri_plugin_process::init()) .plugin(tauri_plugin_process::init())
.plugin(tauri_plugin_notification::init())
.plugin(tauri_plugin_deep_link::init())
.plugin(tauri_plugin_single_instance::init(|app, _argv, _cwd| {
// Quando uma segunda instância tenta iniciar, foca a janela existente
if let Some(window) = app.get_webview_window("main") {
let _ = window.show();
let _ = window.unminimize();
let _ = window.set_focus();
}
}))
.on_window_event(|window, event| {
if let WindowEvent::CloseRequested { api, .. } = event {
api.prevent_close();
let _ = window.hide();
}
})
.setup(|app| {
// Inicializa sistema de logging primeiro
if let Err(e) = init_agent_logging() {
eprintln!("[raven] Falha ao inicializar logging: {e}");
}
log_info!("Raven iniciando...");
// Configura handler de deep link (raven://)
#[cfg(desktop)]
{
let handle = app.handle().clone();
app.listen("deep-link://new-url", move |event| {
let urls = event.payload();
log_info!("Deep link recebido: {urls}");
handle_deep_link(&handle, urls);
});
}
#[cfg(target_os = "windows")]
{
let start_in_background = std::env::args().any(|arg| arg == "--background");
setup_raven_autostart();
setup_tray(app.handle())?;
if start_in_background {
if let Some(win) = app.get_webview_window("main") {
let _ = win.hide();
}
}
// Tenta iniciar o agente e chat em background se houver credenciais salvas
let app_handle = app.handle().clone();
let agent_runtime = app.state::<AgentRuntime>().inner().clone();
let chat_runtime = app.state::<ChatRuntime>().inner().clone();
tauri::async_runtime::spawn(async move {
// Aguarda um pouco para o app estabilizar
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
if let Err(e) = try_start_background_agent(&app_handle, agent_runtime, chat_runtime).await {
log_warn!("Agente nao iniciado em background: {e}");
}
});
}
Ok(())
})
.invoke_handler(tauri::generate_handler![ .invoke_handler(tauri::generate_handler![
collect_machine_profile, collect_machine_profile,
collect_machine_inventory, collect_machine_inventory,
start_machine_agent, start_machine_agent,
stop_machine_agent, stop_machine_agent
open_devtools,
log_app_event,
ensure_rustdesk_and_emit,
apply_usb_policy,
get_usb_policy,
refresh_usb_policy,
// Chat commands
start_chat_polling,
stop_chat_polling,
is_chat_using_realtime,
get_chat_sessions,
fetch_chat_sessions,
fetch_chat_messages,
send_chat_message,
mark_chat_messages_read,
upload_chat_file,
open_chat_window,
close_chat_window,
minimize_chat_window,
set_chat_minimized,
// Hub commands
open_hub_window,
close_hub_window,
set_hub_minimized
]) ])
.run(tauri::generate_context!()) .run(tauri::generate_context!())
.expect("error while running tauri application"); .expect("error while running tauri application");
} }
#[cfg(target_os = "windows")]
fn setup_raven_autostart() {
let exe_path = match std::env::current_exe() {
Ok(p) => p,
Err(e) => {
log_error!("Falha ao obter caminho do executavel: {e}");
return;
}
};
let path_str = exe_path.display().to_string();
// Adiciona flag --background para indicar inicio automatico
let value = format!("\"{}\" --background", path_str);
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
let key = match hkcu.create_subkey(r"Software\Microsoft\Windows\CurrentVersion\Run") {
Ok((key, _)) => key,
Err(e) => {
log_error!("Falha ao criar/abrir chave de registro Run: {e}");
return;
}
};
if let Err(e) = key.set_value("Raven", &value) {
log_error!("Falha ao definir valor de auto-start no registro: {e}");
return;
}
log_info!("Auto-start configurado: {value}");
// Valida que foi salvo corretamente
match key.get_value::<String, _>("Raven") {
Ok(saved) => {
if saved == value {
log_info!("Auto-start validado: entrada existe no registro");
} else {
log_warn!("Auto-start: valor difere. Esperado: {value}, Salvo: {saved}");
}
}
Err(e) => {
log_warn!("Auto-start: nao foi possivel validar entrada: {e}");
}
}
}
#[cfg(target_os = "windows")]
fn setup_tray(app: &tauri::AppHandle) -> tauri::Result<()> {
let show_item = MenuItemBuilder::with_id("show", "Mostrar").build(app)?;
let chat_item = MenuItemBuilder::with_id("chat", "Abrir Chat").build(app)?;
let quit_item = MenuItemBuilder::with_id("quit", "Sair").build(app)?;
let menu = MenuBuilder::new(app)
.items(&[&show_item, &chat_item, &quit_item])
.build()?;
let mut builder = TrayIconBuilder::new()
.menu(&menu)
.on_menu_event(|tray, event| {
match event.id().as_ref() {
"show" => {
if let Some(win) = tray.app_handle().get_webview_window("main") {
let _ = win.show();
let _ = win.set_focus();
}
// Reabrir chat se houver sessao ativa
if let Some(chat_runtime) = tray.app_handle().try_state::<ChatRuntime>() {
let sessions = chat_runtime.get_sessions();
if let Some(session) = sessions.first() {
let _ = chat::open_chat_window(tray.app_handle(), &session.ticket_id, session.ticket_ref);
}
}
}
"chat" => {
// Abrir janela de chat se houver sessao ativa
if let Some(chat_runtime) = tray.app_handle().try_state::<ChatRuntime>() {
let sessions = chat_runtime.get_sessions();
if sessions.len() > 1 {
// Multiplas sessoes - abrir hub
if let Err(e) = chat::open_hub_window(tray.app_handle()) {
log_error!("Falha ao abrir hub de chat: {e}");
}
} else if let Some(session) = sessions.first() {
// Uma sessao - abrir diretamente
if let Err(e) = chat::open_chat_window(tray.app_handle(), &session.ticket_id, session.ticket_ref) {
log_error!("Falha ao abrir janela de chat: {e}");
}
}
}
}
"quit" => {
tray.app_handle().exit(0);
}
_ => {}
}
})
.on_tray_icon_event(|tray, event| {
if let tauri::tray::TrayIconEvent::DoubleClick { .. } = event {
if let Some(win) = tray.app_handle().get_webview_window("main") {
let _ = win.show();
let _ = win.set_focus();
}
// Reabrir chat se houver sessao ativa
if let Some(chat_runtime) = tray.app_handle().try_state::<ChatRuntime>() {
let sessions = chat_runtime.get_sessions();
if let Some(session) = sessions.first() {
let _ = chat::open_chat_window(tray.app_handle(), &session.ticket_id, session.ticket_ref);
}
}
}
});
if let Some(icon) = app.default_window_icon() {
builder = builder.icon(icon.clone());
}
builder = builder.tooltip("Raven");
builder.build(app)?;
Ok(())
}
#[cfg(target_os = "windows")]
async fn try_start_background_agent(
app: &tauri::AppHandle,
agent_runtime: AgentRuntime,
chat_runtime: ChatRuntime,
) -> Result<(), String> {
log_info!("Verificando credenciais salvas para iniciar agente...");
let app_data = app
.path()
.app_local_data_dir()
.map_err(|e| format!("Falha ao obter diretorio de dados: {e}"))?;
let store_path = app_data.join("machine-agent.json");
if !store_path.exists() {
return Err("Nenhuma configuracao encontrada".to_string());
}
// Ler arquivo JSON diretamente
let content = std::fs::read_to_string(&store_path)
.map_err(|e| format!("Falha ao ler machine-agent.json: {e}"))?;
let data: serde_json::Value = serde_json::from_str(&content)
.map_err(|e| format!("Falha ao parsear machine-agent.json: {e}"))?;
let token = data
.get("token")
.and_then(|v| v.as_str())
.filter(|t| !t.is_empty())
.ok_or("Token nao encontrado ou vazio")?;
let config = data.get("config");
let api_base_url = config
.and_then(|c| c.get("apiBaseUrl"))
.and_then(|v| v.as_str())
.unwrap_or("https://tickets.esdrasrenan.com.br");
let convex_url = config
.and_then(|c| c.get("convexUrl"))
.and_then(|v| v.as_str())
.unwrap_or(DEFAULT_CONVEX_URL);
let interval = config
.and_then(|c| c.get("heartbeatIntervalSec"))
.and_then(|v| v.as_u64())
.unwrap_or(300);
log_info!(
"Iniciando agente em background: url={}, interval={}s",
api_base_url,
interval
);
agent_runtime
.start_heartbeat(
api_base_url.to_string(),
token.to_string(),
Some("online".to_string()),
Some(interval),
)
.map_err(|e| format!("Falha ao iniciar heartbeat: {e}"))?;
// Iniciar sistema de chat (WebSocket + fallback HTTP polling)
if let Err(e) =
chat_runtime.start_polling(api_base_url.to_string(), convex_url.to_string(), token.to_string(), app.clone())
{
log_warn!("Falha ao iniciar chat em background: {e}");
} else {
log_info!("Chat iniciado com sucesso (Convex WebSocket)");
}
log_info!("Agente iniciado com sucesso em background");
Ok(())
}

File diff suppressed because it is too large Load diff

View file

@ -1,244 +0,0 @@
//! Cliente IPC para comunicacao com o Raven Service
//!
//! Este modulo permite que o app Tauri se comunique com o Raven Service
//! via Named Pipes para executar operacoes privilegiadas.
#![allow(dead_code)]
use serde::{Deserialize, Serialize};
use std::io::{BufRead, BufReader, Write};
use std::time::Duration;
use thiserror::Error;
const PIPE_NAME: &str = r"\\.\pipe\RavenService";
#[derive(Debug, Error)]
pub enum ServiceClientError {
#[error("Servico nao disponivel: {0}")]
ServiceUnavailable(String),
#[error("Erro de comunicacao: {0}")]
CommunicationError(String),
#[error("Erro de serializacao: {0}")]
SerializationError(#[from] serde_json::Error),
#[error("Erro do servico: {message} (code: {code})")]
ServiceError { code: i32, message: String },
#[error("Timeout aguardando resposta")]
Timeout,
}
#[derive(Debug, Serialize)]
struct Request {
id: String,
method: String,
params: serde_json::Value,
}
#[derive(Debug, Deserialize)]
struct Response {
id: String,
result: Option<serde_json::Value>,
error: Option<ErrorResponse>,
}
#[derive(Debug, Deserialize)]
struct ErrorResponse {
code: i32,
message: String,
}
// =============================================================================
// Tipos de Resultado
// =============================================================================
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UsbPolicyResult {
pub success: bool,
pub policy: String,
pub error: Option<String>,
pub applied_at: Option<i64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RustdeskResult {
pub id: String,
pub password: String,
pub installed_version: Option<String>,
pub updated: bool,
pub last_provisioned_at: i64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RustdeskStatus {
pub installed: bool,
pub running: bool,
pub id: Option<String>,
pub version: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct HealthCheckResult {
pub status: String,
pub service: String,
pub version: String,
pub timestamp: i64,
}
// =============================================================================
// Cliente
// =============================================================================
/// Verifica se o servico esta disponivel
pub fn is_service_available() -> bool {
health_check().is_ok()
}
/// Verifica saude do servico
pub fn health_check() -> Result<HealthCheckResult, ServiceClientError> {
let response = call_service("health_check", serde_json::json!({}))?;
serde_json::from_value(response).map_err(|e| e.into())
}
/// Aplica politica de USB
pub fn apply_usb_policy(policy: &str) -> Result<UsbPolicyResult, ServiceClientError> {
let response = call_service(
"apply_usb_policy",
serde_json::json!({ "policy": policy }),
)?;
serde_json::from_value(response).map_err(|e| e.into())
}
/// Obtem politica de USB atual
pub fn get_usb_policy() -> Result<String, ServiceClientError> {
let response = call_service("get_usb_policy", serde_json::json!({}))?;
response
.get("policy")
.and_then(|p| p.as_str())
.map(String::from)
.ok_or_else(|| ServiceClientError::CommunicationError("Resposta invalida".into()))
}
/// Provisiona RustDesk
pub fn provision_rustdesk(
config: Option<&str>,
password: Option<&str>,
machine_id: Option<&str>,
) -> Result<RustdeskResult, ServiceClientError> {
let params = serde_json::json!({
"config": config,
"password": password,
"machineId": machine_id,
});
let response = call_service("provision_rustdesk", params)?;
serde_json::from_value(response).map_err(|e| e.into())
}
/// Obtem status do RustDesk
pub fn get_rustdesk_status() -> Result<RustdeskStatus, ServiceClientError> {
let response = call_service("get_rustdesk_status", serde_json::json!({}))?;
serde_json::from_value(response).map_err(|e| e.into())
}
// =============================================================================
// Comunicacao IPC
// =============================================================================
fn call_service(
method: &str,
params: serde_json::Value,
) -> Result<serde_json::Value, ServiceClientError> {
// Gera ID unico para a requisicao
let id = uuid::Uuid::new_v4().to_string();
let request = Request {
id: id.clone(),
method: method.to_string(),
params,
};
// Serializa requisicao
let request_json = serde_json::to_string(&request)?;
// Conecta ao pipe
let mut pipe = connect_to_pipe()?;
// Envia requisicao
writeln!(pipe, "{}", request_json).map_err(|e| {
ServiceClientError::CommunicationError(format!("Erro ao enviar requisicao: {}", e))
})?;
pipe.flush().map_err(|e| {
ServiceClientError::CommunicationError(format!("Erro ao flush: {}", e))
})?;
// Le resposta
let mut reader = BufReader::new(pipe);
let mut response_line = String::new();
reader.read_line(&mut response_line).map_err(|e| {
ServiceClientError::CommunicationError(format!("Erro ao ler resposta: {}", e))
})?;
// Parse da resposta
let response: Response = serde_json::from_str(&response_line)?;
// Verifica se o ID bate
if response.id != id {
return Err(ServiceClientError::CommunicationError(
"ID de resposta nao corresponde".into(),
));
}
// Verifica erro
if let Some(error) = response.error {
return Err(ServiceClientError::ServiceError {
code: error.code,
message: error.message,
});
}
// Retorna resultado
response
.result
.ok_or_else(|| ServiceClientError::CommunicationError("Resposta sem resultado".into()))
}
#[cfg(target_os = "windows")]
fn connect_to_pipe() -> Result<std::fs::File, ServiceClientError> {
// Tenta conectar ao pipe com retry
let mut attempts = 0;
let max_attempts = 3;
loop {
match std::fs::OpenOptions::new()
.read(true)
.write(true)
.open(PIPE_NAME)
{
Ok(file) => return Ok(file),
Err(e) => {
attempts += 1;
if attempts >= max_attempts {
return Err(ServiceClientError::ServiceUnavailable(format!(
"Nao foi possivel conectar ao servico apos {} tentativas: {}",
max_attempts, e
)));
}
std::thread::sleep(Duration::from_millis(500));
}
}
}
}
#[cfg(not(target_os = "windows"))]
fn connect_to_pipe() -> Result<std::fs::File, ServiceClientError> {
Err(ServiceClientError::ServiceUnavailable(
"Named Pipes so estao disponiveis no Windows".into(),
))
}

View file

@ -1,408 +0,0 @@
//! USB Storage Control Module
//!
//! Este modulo implementa o controle de dispositivos de armazenamento USB no Windows.
//! Utiliza duas abordagens complementares:
//! 1. Removable Storage Access Policy (via registro do Windows)
//! 2. USBSTOR driver control (como fallback/reforco)
//!
//! IMPORTANTE: Requer privilegios de administrador para funcionar.
use serde::{Deserialize, Serialize};
use std::io;
use thiserror::Error;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum UsbPolicy {
Allow,
BlockAll,
Readonly,
}
impl UsbPolicy {
pub fn from_str(s: &str) -> Option<Self> {
match s.to_uppercase().as_str() {
"ALLOW" => Some(Self::Allow),
"BLOCK_ALL" => Some(Self::BlockAll),
"READONLY" => Some(Self::Readonly),
_ => None,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Self::Allow => "ALLOW",
Self::BlockAll => "BLOCK_ALL",
Self::Readonly => "READONLY",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UsbPolicyResult {
pub success: bool,
pub policy: String,
pub error: Option<String>,
pub applied_at: Option<i64>,
}
#[derive(Error, Debug)]
#[allow(dead_code)]
pub enum UsbControlError {
#[error("Politica USB invalida: {0}")]
InvalidPolicy(String),
#[error("Erro de registro do Windows: {0}")]
RegistryError(String),
#[error("Permissao negada - requer privilegios de administrador")]
PermissionDenied,
#[error("Sistema operacional nao suportado")]
UnsupportedOs,
#[error("Erro de I/O: {0}")]
Io(#[from] io::Error),
}
#[cfg(target_os = "windows")]
mod windows_impl {
use super::*;
use std::fs;
use std::path::PathBuf;
use std::process::Command;
use winreg::enums::*;
use winreg::RegKey;
// GUID para Removable Storage Devices (Disk)
const REMOVABLE_STORAGE_GUID: &str = "{53f56307-b6bf-11d0-94f2-00a0c91efb8b}";
// Chaves de registro
const REMOVABLE_STORAGE_PATH: &str =
r"Software\Policies\Microsoft\Windows\RemovableStorageDevices";
const USBSTOR_PATH: &str = r"SYSTEM\CurrentControlSet\Services\USBSTOR";
const STORAGE_POLICY_PATH: &str = r"SYSTEM\CurrentControlSet\Control\StorageDevicePolicies";
pub fn apply_usb_policy(policy: UsbPolicy) -> Result<UsbPolicyResult, UsbControlError> {
let now = chrono::Utc::now().timestamp_millis();
let direct_result = try_apply_policy_direct(policy);
match direct_result {
Ok(()) => Ok(UsbPolicyResult {
success: true,
policy: policy.as_str().to_string(),
error: None,
applied_at: Some(now),
}),
Err(err) => {
// Se faltou permissão, retorna erro - o serviço deve ser usado
// Não fazemos elevação aqui para evitar UAC adicional
if is_permission_error(&err) {
return Err(UsbControlError::PermissionDenied);
}
Err(err)
}
}
}
fn try_apply_policy_direct(policy: UsbPolicy) -> Result<(), UsbControlError> {
// 1. Aplicar Removable Storage Access Policy
apply_removable_storage_policy(policy)?;
// 2. Aplicar USBSTOR como reforco
apply_usbstor_policy(policy)?;
// 3. Aplicar WriteProtect se necessario
if policy == UsbPolicy::Readonly {
apply_write_protect(true)?;
} else {
apply_write_protect(false)?;
}
Ok(())
}
fn apply_removable_storage_policy(policy: UsbPolicy) -> Result<(), UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let full_path = format!(r"{}\{}", REMOVABLE_STORAGE_PATH, REMOVABLE_STORAGE_GUID);
match policy {
UsbPolicy::Allow => {
// Tenta remover as restricoes, se existirem
if let Ok(key) = hklm.open_subkey_with_flags(&full_path, KEY_ALL_ACCESS) {
let _ = key.delete_value("Deny_Read");
let _ = key.delete_value("Deny_Write");
let _ = key.delete_value("Deny_Execute");
}
// Tenta remover a chave inteira se estiver vazia
let _ = hklm.delete_subkey(&full_path);
}
UsbPolicy::BlockAll => {
let (key, _) = hklm
.create_subkey(&full_path)
.map_err(map_winreg_error)?;
key.set_value("Deny_Read", &1u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Write", &1u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Execute", &1u32)
.map_err(map_winreg_error)?;
}
UsbPolicy::Readonly => {
let (key, _) = hklm
.create_subkey(&full_path)
.map_err(map_winreg_error)?;
// Permite leitura, bloqueia escrita
key.set_value("Deny_Read", &0u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Write", &1u32)
.map_err(map_winreg_error)?;
key.set_value("Deny_Execute", &0u32)
.map_err(map_winreg_error)?;
}
}
Ok(())
}
fn apply_usbstor_policy(policy: UsbPolicy) -> Result<(), UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let key = hklm
.open_subkey_with_flags(USBSTOR_PATH, KEY_ALL_ACCESS)
.map_err(map_winreg_error)?;
match policy {
UsbPolicy::Allow => {
// Start = 3 habilita o driver
key.set_value("Start", &3u32)
.map_err(map_winreg_error)?;
}
UsbPolicy::BlockAll | UsbPolicy::Readonly => {
// Start = 4 desabilita o driver
// Nota: Para Readonly, mantemos o driver ativo mas com WriteProtect
// Porem, como fallback de seguranca, desabilitamos para BlockAll
if policy == UsbPolicy::BlockAll {
key.set_value("Start", &4u32)
.map_err(map_winreg_error)?;
} else {
// Readonly mantem driver ativo
key.set_value("Start", &3u32)
.map_err(map_winreg_error)?;
}
}
}
Ok(())
}
fn apply_write_protect(enable: bool) -> Result<(), UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
if enable {
let (key, _) = hklm
.create_subkey(STORAGE_POLICY_PATH)
.map_err(map_winreg_error)?;
key.set_value("WriteProtect", &1u32)
.map_err(map_winreg_error)?;
} else if let Ok(key) = hklm.open_subkey_with_flags(STORAGE_POLICY_PATH, KEY_ALL_ACCESS) {
let _ = key.set_value("WriteProtect", &0u32);
}
Ok(())
}
pub fn get_current_policy() -> Result<UsbPolicy, UsbControlError> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
// Verifica Removable Storage Policy primeiro
let full_path = format!(r"{}\{}", REMOVABLE_STORAGE_PATH, REMOVABLE_STORAGE_GUID);
if let Ok(key) = hklm.open_subkey_with_flags(&full_path, KEY_READ) {
let deny_read: u32 = key.get_value("Deny_Read").unwrap_or(0);
let deny_write: u32 = key.get_value("Deny_Write").unwrap_or(0);
if deny_read == 1 && deny_write == 1 {
return Ok(UsbPolicy::BlockAll);
}
if deny_read == 0 && deny_write == 1 {
return Ok(UsbPolicy::Readonly);
}
}
// Verifica USBSTOR como fallback
if let Ok(key) = hklm.open_subkey_with_flags(USBSTOR_PATH, KEY_READ) {
let start: u32 = key.get_value("Start").unwrap_or(3);
if start == 4 {
return Ok(UsbPolicy::BlockAll);
}
}
Ok(UsbPolicy::Allow)
}
fn is_permission_error(error: &UsbControlError) -> bool {
match error {
UsbControlError::PermissionDenied => true,
UsbControlError::RegistryError(msg) => {
let lower = msg.to_lowercase();
lower.contains("access is denied") || lower.contains("acesso negado") || lower.contains("5")
}
_ => false,
}
}
#[allow(dead_code)]
fn apply_policy_with_elevation(policy: UsbPolicy) -> Result<(), UsbControlError> {
// Cria script temporário para aplicar as chaves via PowerShell elevado
let temp_dir = std::env::temp_dir();
let script_path: PathBuf = temp_dir.join("raven_usb_policy.ps1");
let policy_str = policy.as_str();
let script = format!(
r#"$ErrorActionPreference = 'Stop'
$guid = '{guid}'
$policy = '{policy}'
function Set-Allow {{
reg delete 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /f 2>$null
reg delete 'HKLM\SYSTEM\CurrentControlSet\Control\StorageDevicePolicies' /f 2>$null
reg add 'HKLM\SYSTEM\CurrentControlSet\Services\USBSTOR' /v Start /t REG_DWORD /d 3 /f | Out-Null
}}
function Set-BlockAll {{
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /f | Out-Null
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /v Deny_Read /t REG_DWORD /d 1 /f | Out-Null
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /v Deny_Write /t REG_DWORD /d 1 /f | Out-Null
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /v Deny_Execute /t REG_DWORD /d 1 /f | Out-Null
reg add 'HKLM\SYSTEM\CurrentControlSet\Services\USBSTOR' /v Start /t REG_DWORD /d 4 /f | Out-Null
reg add 'HKLM\SYSTEM\CurrentControlSet\Control\StorageDevicePolicies' /f | Out-Null
reg add 'HKLM\SYSTEM\CurrentControlSet\Control\StorageDevicePolicies' /v WriteProtect /t REG_DWORD /d 0 /f | Out-Null
}}
function Set-Readonly {{
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /f | Out-Null
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /v Deny_Read /t REG_DWORD /d 0 /f | Out-Null
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /v Deny_Write /t REG_DWORD /d 1 /f | Out-Null
reg add 'HKLM\Software\Policies\Microsoft\Windows\RemovableStorageDevices\{guid}' /v Deny_Execute /t REG_DWORD /d 0 /f | Out-Null
reg add 'HKLM\SYSTEM\CurrentControlSet\Services\USBSTOR' /v Start /t REG_DWORD /d 3 /f | Out-Null
reg add 'HKLM\SYSTEM\CurrentControlSet\Control\StorageDevicePolicies' /f | Out-Null
reg add 'HKLM\SYSTEM\CurrentControlSet\Control\StorageDevicePolicies' /v WriteProtect /t REG_DWORD /d 1 /f | Out-Null
}}
switch ($policy) {{
'ALLOW' {{ Set-Allow }}
'BLOCK_ALL' {{ Set-BlockAll }}
'READONLY' {{ Set-Readonly }}
default {{ throw 'Politica invalida' }}
}}
try {{
gpupdate /target:computer /force | Out-Null
}} catch {{}}
"#,
guid = REMOVABLE_STORAGE_GUID,
policy = policy_str
);
fs::write(&script_path, script).map_err(UsbControlError::Io)?;
// Start-Process com RunAs para acionar UAC
let arg = format!(
"Start-Process -WindowStyle Hidden -FilePath powershell -Verb RunAs -Wait -ArgumentList '-ExecutionPolicy Bypass -File \"{}\"'",
script_path.display()
);
let status = Command::new("powershell")
.arg("-Command")
.arg(arg)
.status()
.map_err(UsbControlError::Io)?;
if !status.success() {
return Err(UsbControlError::PermissionDenied);
}
Ok(())
}
fn map_winreg_error(error: io::Error) -> UsbControlError {
if let Some(code) = error.raw_os_error() {
if code == 5 {
return UsbControlError::PermissionDenied;
}
}
UsbControlError::RegistryError(error.to_string())
}
pub fn refresh_group_policy() -> Result<(), UsbControlError> {
use std::os::windows::process::CommandExt;
use std::process::Command;
const CREATE_NO_WINDOW: u32 = 0x08000000;
// Executa gpupdate para forcar atualizacao das politicas
let output = Command::new("gpupdate")
.args(["/target:computer", "/force"])
.creation_flags(CREATE_NO_WINDOW)
.output()
.map_err(UsbControlError::Io)?;
if !output.status.success() {
// Nao e critico se falhar, apenas log
eprintln!(
"[usb_control] gpupdate retornou erro: {}",
String::from_utf8_lossy(&output.stderr)
);
}
Ok(())
}
}
#[cfg(not(target_os = "windows"))]
mod fallback_impl {
use super::*;
pub fn apply_usb_policy(_policy: UsbPolicy) -> Result<UsbPolicyResult, UsbControlError> {
Err(UsbControlError::UnsupportedOs)
}
pub fn get_current_policy() -> Result<UsbPolicy, UsbControlError> {
Err(UsbControlError::UnsupportedOs)
}
pub fn refresh_group_policy() -> Result<(), UsbControlError> {
Err(UsbControlError::UnsupportedOs)
}
}
#[cfg(target_os = "windows")]
pub use windows_impl::*;
#[cfg(not(target_os = "windows"))]
pub use fallback_impl::*;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_policy_from_str() {
assert_eq!(UsbPolicy::from_str("ALLOW"), Some(UsbPolicy::Allow));
assert_eq!(UsbPolicy::from_str("BLOCK_ALL"), Some(UsbPolicy::BlockAll));
assert_eq!(UsbPolicy::from_str("READONLY"), Some(UsbPolicy::Readonly));
assert_eq!(UsbPolicy::from_str("allow"), Some(UsbPolicy::Allow));
assert_eq!(UsbPolicy::from_str("invalid"), None);
}
#[test]
fn test_policy_as_str() {
assert_eq!(UsbPolicy::Allow.as_str(), "ALLOW");
assert_eq!(UsbPolicy::BlockAll.as_str(), "BLOCK_ALL");
assert_eq!(UsbPolicy::Readonly.as_str(), "READONLY");
}
}

View file

@ -1,12 +1,12 @@
{ {
"$schema": "https://schema.tauri.app/config/2", "$schema": "https://schema.tauri.app/config/2",
"productName": "Raven", "productName": "Raven",
"version": "0.2.0", "version": "0.1.5",
"identifier": "br.com.esdrasrenan.sistemadechamados", "identifier": "br.com.esdrasrenan.sistemadechamados",
"build": { "build": {
"beforeDevCommand": "bun run dev", "beforeDevCommand": "pnpm run dev",
"devUrl": "http://localhost:1420", "devUrl": "http://localhost:1420",
"beforeBuildCommand": "bun run build", "beforeBuildCommand": "pnpm run build",
"frontendDist": "../dist" "frontendDist": "../dist"
}, },
"app": { "app": {
@ -28,44 +28,26 @@
"plugins": { "plugins": {
"updater": { "updater": {
"endpoints": [ "endpoints": [
"https://raw.githubusercontent.com/esdrasrenan/sistema-de-chamados/main/apps/desktop/public/latest.json" "https://raw.githubusercontent.com/esdrasrenan/sistema-de-chamados/refs/heads/main/apps/desktop/public/latest.json"
], ],
"dialog": true, "dialog": true,
"active": true, "active": true,
"pubkey": "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IDZDRTBFNkY1NUQ3QzU0QkEKUldTNlZIeGQ5ZWJnYk5mY0J4aWRlb0dRdVZ4TGpBSUZXMnRVUFhmdmlLT0tlY084UjJQUHFWWUkK" "pubkey": "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IEM2NTA0QUY2NzRFQ0UzQzYKUldURzQreDA5a3BReGxMTTFQSUpLZmdJRXZSSm1ldzBQTmFpUE5lS0xFeTZTb2Yzb1NJUFZnOTUK"
},
"deep-link": {
"desktop": {
"schemes": ["raven"]
}
} }
}, },
"bundle": { "bundle": {
"active": true, "active": true,
"createUpdaterArtifacts": true, "createUpdaterArtifacts": true,
"targets": ["nsis", "deb", "rpm"], "targets": [
"deb",
"rpm",
"nsis"
],
"icon": [ "icon": [
"icons/icon.ico", "icons/icon.ico",
"icons/icon.icns", "icons/icon.icns",
"icons/icon.png", "icons/icon.png",
"icons/Raven.png" "icons/Raven.png"
], ]
"resources": {
"../service/target/release/raven-service.exe": "raven-service.exe"
},
"windows": {
"webviewInstallMode": {
"type": "skip"
},
"nsis": {
"displayLanguageSelector": true,
"installerIcon": "icons/icon.ico",
"headerImage": "icons/nsis-header.bmp",
"sidebarImage": "icons/nsis-sidebar.bmp",
"installMode": "perMachine",
"installerHooks": "installer-hooks.nsh",
"languages": ["PortugueseBR"]
}
}
} }
} }

View file

@ -1,256 +0,0 @@
/**
* ChatHubWidget - Lista de sessoes de chat ativas usando Convex subscriptions
*
* Arquitetura:
* - Usa useQuery do Convex React para subscription reativa (tempo real verdadeiro)
* - Sem polling - todas as atualizacoes sao push-based via WebSocket
* - Tauri usado apenas para gerenciamento de janelas
*/
import { useEffect, useState } from "react"
import { invoke } from "@tauri-apps/api/core"
import { Loader2, MessageCircle, ChevronUp, X, Minimize2 } from "lucide-react"
import { useMachineSessions, type MachineSession } from "./useConvexMachineQueries"
/**
* Hub Widget - Lista todas as sessoes de chat ativas
* Ao clicar em uma sessao, abre/foca a janela de chat daquele ticket
*/
export function ChatHubWidget() {
// Inicializa baseado na altura real da janela (< 100px = minimizado)
const [isMinimized, setIsMinimized] = useState(() => window.innerHeight < 100)
// Convex subscription reativa
const { sessions = [], isLoading, hasToken } = useMachineSessions()
// Sincronizar estado minimizado com tamanho da janela
useEffect(() => {
const mountTime = Date.now()
const STABILIZATION_DELAY = 500
const handler = () => {
if (Date.now() - mountTime < STABILIZATION_DELAY) {
return
}
const h = window.innerHeight
setIsMinimized(h < 100)
}
window.addEventListener("resize", handler)
return () => window.removeEventListener("resize", handler)
}, [])
const handleSelectSession = async (ticketId: string, ticketRef: number) => {
try {
// Tauri 2.x auto-converts snake_case (Rust) to camelCase (JS)
await invoke("open_chat_window", { ticketId, ticketRef })
await invoke("close_hub_window")
} catch (err) {
console.error("open_chat_window FAILED:", err)
}
}
const handleMinimize = async () => {
setIsMinimized(true)
try {
await invoke("set_hub_minimized", { minimized: true })
} catch (err) {
console.error("Erro ao minimizar hub:", err)
}
}
const handleExpand = async () => {
try {
await invoke("set_hub_minimized", { minimized: false })
setTimeout(() => setIsMinimized(false), 100)
} catch (err) {
console.error("set_hub_minimized FAILED:", err)
setIsMinimized(false)
}
}
const handleClose = () => {
invoke("close_hub_window").catch((err) => {
console.error("Erro ao fechar janela do hub:", err)
})
}
const totalUnread = sessions.reduce((sum, s) => sum + s.unreadCount, 0)
// Sem token
if (!hasToken) {
return (
<div className="pointer-events-none flex h-full w-full items-end justify-end bg-transparent p-2">
<div className="pointer-events-auto flex items-center gap-2 rounded-full bg-red-100 px-4 py-2 text-red-600 shadow-lg">
<span className="text-sm font-medium">Token nao configurado</span>
</div>
</div>
)
}
// Loading
if (isLoading) {
return (
<div className="pointer-events-none flex h-full w-full items-end justify-end bg-transparent p-2">
<div className="pointer-events-auto flex items-center gap-2 rounded-full bg-slate-200 px-4 py-2 text-slate-600 shadow-lg">
<Loader2 className="size-4 animate-spin" />
<span className="text-sm font-medium">Carregando...</span>
</div>
</div>
)
}
// Sem sessoes ativas
if (sessions.length === 0) {
return (
<div className="pointer-events-none flex h-full w-full items-end justify-end bg-transparent p-2">
<div className="pointer-events-auto flex items-center gap-2 rounded-full bg-slate-200 px-4 py-2 text-slate-600 shadow-lg">
<MessageCircle className="size-4" />
<span className="text-sm font-medium">Sem chats</span>
</div>
</div>
)
}
// Minimizado
if (isMinimized) {
return (
<div className="pointer-events-none flex h-full w-full items-end justify-end bg-transparent pr-3">
<button
onClick={(e) => {
e.stopPropagation()
handleExpand()
}}
className="pointer-events-auto relative flex items-center gap-2 rounded-full bg-black px-4 py-2 text-white shadow-lg hover:bg-black/90"
>
<MessageCircle className="size-4" />
<span className="text-sm font-medium">
{sessions.length} chat{sessions.length !== 1 ? "s" : ""}
</span>
<span className="size-2 rounded-full bg-emerald-400" />
<ChevronUp className="size-4" />
{totalUnread > 0 && (
<span className="absolute -right-1 -top-1 flex size-5 items-center justify-center rounded-full bg-red-500 text-xs font-bold">
{totalUnread > 9 ? "9+" : totalUnread}
</span>
)}
</button>
</div>
)
}
// Expandido
return (
<div className="flex h-full flex-col overflow-hidden rounded-2xl bg-white shadow-xl">
{/* Header */}
<div
data-tauri-drag-region
className="flex items-center justify-between border-b border-slate-200 bg-slate-50 px-4 py-3 rounded-t-2xl"
>
<div className="flex items-center gap-3">
<div className="flex size-10 items-center justify-center rounded-full bg-black text-white">
<MessageCircle className="size-5" />
</div>
<div>
<p className="text-sm font-semibold text-slate-900">Chats Ativos</p>
<p className="text-xs text-slate-500">
{sessions.length} conversa{sessions.length !== 1 ? "s" : ""}
</p>
</div>
</div>
<div className="flex items-center gap-1">
<button
onClick={handleMinimize}
className="rounded-md p-1.5 text-slate-500 hover:bg-slate-100"
aria-label="Minimizar lista de chats"
>
<Minimize2 className="size-4" />
</button>
<button
onClick={handleClose}
className="rounded-md p-1.5 text-slate-500 hover:bg-slate-100"
aria-label="Fechar lista de chats"
>
<X className="size-4" />
</button>
</div>
</div>
{/* Lista de sessoes */}
<div className="flex-1 overflow-y-auto p-2">
<div className="space-y-2">
{sessions.map((session) => (
<SessionItem
key={session.sessionId}
session={session}
onClick={() => handleSelectSession(session.ticketId, session.ticketRef)}
/>
))}
</div>
</div>
</div>
)
}
function SessionItem({
session,
onClick,
}: {
session: MachineSession
onClick: () => void
}) {
const handleClick = (e: React.MouseEvent) => {
e.stopPropagation()
onClick()
}
return (
<button
onClick={handleClick}
className="flex w-full items-center gap-3 rounded-xl p-3 text-left transition hover:bg-slate-50"
>
{/* Avatar */}
<div className="relative flex size-10 shrink-0 items-center justify-center rounded-full bg-black text-white">
<MessageCircle className="size-5" />
{/* Indicador online */}
<span className="absolute -bottom-0.5 -right-0.5 size-3 rounded-full border-2 border-white bg-emerald-500" />
</div>
{/* Info */}
<div className="min-w-0 flex-1">
<div className="flex items-center justify-between gap-2">
<p className="truncate text-sm font-medium text-slate-900">
Ticket #{session.ticketRef}
</p>
<span className="shrink-0 text-xs text-slate-400">
{formatRelativeTime(session.lastActivityAt)}
</span>
</div>
<p className="truncate text-xs text-slate-500">
{session.agentName}
</p>
</div>
{/* Badge nao lidas */}
{session.unreadCount > 0 && (
<span className="flex size-5 shrink-0 items-center justify-center rounded-full bg-red-500 text-xs font-bold text-white">
{session.unreadCount > 9 ? "9+" : session.unreadCount}
</span>
)}
</button>
)
}
function formatRelativeTime(timestamp: number): string {
const now = Date.now()
const diff = now - timestamp
const minutes = Math.floor(diff / 60000)
if (minutes < 1) return "agora"
if (minutes < 60) return `${minutes}m`
const hours = Math.floor(minutes / 60)
if (hours < 24) return `${hours}h`
const days = Math.floor(hours / 24)
return `${days}d`
}

File diff suppressed because it is too large Load diff

View file

@ -1,146 +0,0 @@
/**
* ConvexMachineProvider - Provider Convex para autenticacao via token de maquina
*
* Este provider inicializa o ConvexReactClient usando o token da maquina
* armazenado no Tauri Store, permitindo subscriptions reativas em tempo real.
*
* Arquitetura:
* - Carrega o token do Tauri Store na montagem
* - Inicializa o ConvexReactClient com a URL do Convex
* - Disponibiliza o cliente para componentes filhos via Context
* - Reconecta automaticamente quando o token muda
*/
import { createContext, useContext, useEffect, useState, type ReactNode } from "react"
import { ConvexReactClient } from "convex/react"
import { getMachineStoreConfig } from "./machineStore"
// URL do Convex - em producao, usa o dominio personalizado
const CONVEX_URL = import.meta.env.MODE === "production"
? "https://convex.esdrasrenan.com.br"
: (import.meta.env.VITE_CONVEX_URL ?? "https://convex.esdrasrenan.com.br")
type MachineAuthState = {
token: string | null
apiBaseUrl: string | null
isLoading: boolean
error: string | null
}
type ConvexMachineContextValue = {
client: ConvexReactClient | null
machineToken: string | null
apiBaseUrl: string | null
isReady: boolean
error: string | null
reload: () => Promise<void>
}
const ConvexMachineContext = createContext<ConvexMachineContextValue | null>(null)
export function useConvexMachine() {
const ctx = useContext(ConvexMachineContext)
if (!ctx) {
throw new Error("useConvexMachine must be used within ConvexMachineProvider")
}
return ctx
}
export function useMachineToken() {
const { machineToken } = useConvexMachine()
return machineToken
}
interface ConvexMachineProviderProps {
children: ReactNode
}
export function ConvexMachineProvider({ children }: ConvexMachineProviderProps) {
const [authState, setAuthState] = useState<MachineAuthState>({
token: null,
apiBaseUrl: null,
isLoading: true,
error: null,
})
const [client, setClient] = useState<ConvexReactClient | null>(null)
// Funcao para carregar configuracao do Tauri Store
const loadConfig = async () => {
setAuthState(prev => ({ ...prev, isLoading: true, error: null }))
try {
const config = await getMachineStoreConfig()
if (!config.token) {
setAuthState({
token: null,
apiBaseUrl: config.apiBaseUrl,
isLoading: false,
error: "Token da maquina nao encontrado",
})
return
}
setAuthState({
token: config.token,
apiBaseUrl: config.apiBaseUrl,
isLoading: false,
error: null,
})
} catch (err) {
const message = err instanceof Error ? err.message : String(err)
setAuthState({
token: null,
apiBaseUrl: null,
isLoading: false,
error: message || "Erro ao carregar configuracao",
})
}
}
// Carregar configuracao na montagem
useEffect(() => {
loadConfig()
}, [])
// Inicializar/reinicializar cliente Convex quando token muda
useEffect(() => {
if (!authState.token) {
// Limpar cliente se nao tem token
if (client) {
client.close()
setClient(null)
}
return
}
// Criar novo cliente Convex
const newClient = new ConvexReactClient(CONVEX_URL, {
// Desabilitar retry agressivo para evitar loops infinitos
unsavedChangesWarning: false,
})
setClient(newClient)
// Cleanup ao desmontar ou trocar token
return () => {
newClient.close()
}
}, [authState.token]) // eslint-disable-line react-hooks/exhaustive-deps
const contextValue: ConvexMachineContextValue = {
client,
machineToken: authState.token,
apiBaseUrl: authState.apiBaseUrl,
isReady: !authState.isLoading && !!client && !!authState.token,
error: authState.error,
reload: loadConfig,
}
return (
<ConvexMachineContext.Provider value={contextValue}>
{children}
</ConvexMachineContext.Provider>
)
}

View file

@ -1,41 +0,0 @@
const AUDIO_MIME_CANDIDATES = [
"audio/webm;codecs=opus",
"audio/webm",
"audio/ogg;codecs=opus",
"audio/ogg",
"audio/mp4",
"audio/mpeg",
"audio/wav",
]
const AUDIO_MIME_EXTENSION_MAP: Record<string, string> = {
"audio/webm": "webm",
"audio/ogg": "ogg",
"audio/mp4": "m4a",
"audio/mpeg": "mp3",
"audio/wav": "wav",
}
export function normalizeMimeType(mimeType: string) {
return mimeType.split(";")[0].trim().toLowerCase()
}
export function pickSupportedMimeType(isTypeSupported?: (mimeType: string) => boolean) {
const checker = isTypeSupported ?? (
typeof MediaRecorder === "undefined" ? undefined : MediaRecorder.isTypeSupported.bind(MediaRecorder)
)
if (!checker) return ""
for (const candidate of AUDIO_MIME_CANDIDATES) {
if (checker(candidate)) return candidate
}
return ""
}
export function buildAudioFileName(mimeType: string, now: Date = new Date()) {
const normalized = normalizeMimeType(mimeType)
const ext = AUDIO_MIME_EXTENSION_MAP[normalized] ?? "webm"
const timestamp = now.toISOString().replace(/[:.]/g, "-")
return `audio-${timestamp}.${ext}`
}

View file

@ -1,65 +0,0 @@
import { ConvexProvider } from "convex/react"
import { ChatWidget } from "./ChatWidget"
import { ChatHubWidget } from "./ChatHubWidget"
import { ConvexMachineProvider, useConvexMachine } from "./ConvexMachineProvider"
import { Loader2 } from "lucide-react"
function ChatAppContent() {
const { client, isReady, error } = useConvexMachine()
// Obter ticketId e ticketRef da URL
const params = new URLSearchParams(window.location.search)
const ticketId = params.get("ticketId")
const ticketRef = params.get("ticketRef")
const isHub = params.get("hub") === "true"
// Aguardar cliente Convex estar pronto
if (!isReady || !client) {
if (error) {
return (
<div className="pointer-events-none flex h-full w-full items-end justify-end bg-transparent p-2">
<div className="pointer-events-auto flex items-center gap-2 rounded-full bg-red-100 px-4 py-2 text-red-600 shadow-lg">
<span className="text-sm font-medium">Erro: {error}</span>
</div>
</div>
)
}
return (
<div className="pointer-events-none flex h-full w-full items-end justify-end bg-transparent p-2">
<div className="pointer-events-auto flex items-center gap-2 rounded-full bg-slate-200 px-4 py-2 text-slate-600 shadow-lg">
<Loader2 className="size-4 animate-spin" />
<span className="text-sm font-medium">Conectando...</span>
</div>
</div>
)
}
// Modo hub - lista de todas as sessoes
if (isHub || !ticketId) {
return (
<ConvexProvider client={client}>
<ChatHubWidget />
</ConvexProvider>
)
}
// Modo chat - conversa de um ticket especifico
return (
<ConvexProvider client={client}>
<ChatWidget ticketId={ticketId} ticketRef={ticketRef ? Number(ticketRef) : undefined} />
</ConvexProvider>
)
}
export function ChatApp() {
return (
<ConvexMachineProvider>
<ChatAppContent />
</ConvexMachineProvider>
)
}
export { ChatWidget }
export { ChatHubWidget }
export * from "./types"

View file

@ -1,52 +0,0 @@
import { Store } from "@tauri-apps/plugin-store"
import { appLocalDataDir, join } from "@tauri-apps/api/path"
const STORE_FILENAME = "machine-agent.json"
const DEFAULT_API_BASE_URL = "https://tickets.esdrasrenan.com.br"
type MachineStoreConfig = {
apiBaseUrl?: string
appUrl?: string
convexUrl?: string
}
type MachineStoreData = {
token?: string
config?: MachineStoreConfig
}
async function loadStore(): Promise<MachineStoreData> {
const appData = await appLocalDataDir()
const storePath = await join(appData, STORE_FILENAME)
const store = await Store.load(storePath)
const token = await store.get<string>("token")
const config = await store.get<MachineStoreConfig>("config")
return { token: token ?? undefined, config: config ?? undefined }
}
function normalizeUrl(value?: string | null, fallback?: string) {
const trimmed = (value ?? fallback ?? "").trim()
if (!trimmed) return fallback ?? ""
return trimmed.replace(/\/+$/, "")
}
function resolveApiBaseUrl(config?: MachineStoreConfig): string {
const fromConfig = normalizeUrl(config?.apiBaseUrl, DEFAULT_API_BASE_URL)
return fromConfig || DEFAULT_API_BASE_URL
}
function resolveAppUrl(config?: MachineStoreConfig, apiBaseUrl?: string): string {
const fromConfig = normalizeUrl(config?.appUrl, apiBaseUrl)
return fromConfig || apiBaseUrl || DEFAULT_API_BASE_URL
}
export async function getMachineStoreConfig() {
const data = await loadStore()
if (!data.token) {
throw new Error("Token de maquina nao encontrado no store")
}
const apiBaseUrl = resolveApiBaseUrl(data.config)
const appUrl = resolveAppUrl(data.config, apiBaseUrl)
return { token: data.token, apiBaseUrl, appUrl }
}

View file

@ -1,70 +0,0 @@
// Tipos para o sistema de chat
export interface ChatSession {
sessionId: string
ticketId: string
ticketRef: number
ticketSubject: string
agentName: string
agentEmail?: string
agentAvatarUrl?: string
unreadCount: number
lastActivityAt: number
startedAt: number
}
export interface ChatMessage {
id: string
body: string
authorName: string
authorAvatarUrl?: string
isFromMachine: boolean
createdAt: number
attachments: ChatAttachment[]
}
export interface ChatAttachment {
storageId: string
name: string
size?: number
type?: string
}
export interface ChatMessagesResponse {
messages: ChatMessage[]
hasSession: boolean
unreadCount?: number
}
export interface SendMessageResponse {
messageId: string
createdAt: number
}
export interface SessionStartedEvent {
session: ChatSession
}
export interface UnreadUpdateEvent {
totalUnread: number
sessions: ChatSession[]
}
export interface NewMessageEvent {
totalUnread: number
newCount: number
sessions: ChatSession[]
}
export interface SessionEndedEvent {
sessionId: string
ticketId: string
}
export interface ChatHistorySession {
sessionId: string
startedAt: number
endedAt: number | null
agentName: string
messages: ChatMessage[]
}

View file

@ -1,253 +0,0 @@
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { buildAudioFileName, pickSupportedMimeType } from "./audio-recorder-utils"
type AudioRecorderPayload = {
file: File
durationSeconds: number
}
type AudioRecorderOptions = {
onAudioReady: (payload: AudioRecorderPayload) => Promise<void>
onError?: (message: string) => void
maxDurationSeconds?: number
maxFileSizeBytes?: number
audioBitsPerSecond?: number
levelBars?: number
}
type AudioRecorderState = {
isRecording: boolean
isProcessing: boolean
durationSeconds: number
levels: number[]
startRecording: () => Promise<void>
stopRecording: () => void
cancelRecording: () => void
}
export function useAudioRecorder(options: AudioRecorderOptions): AudioRecorderState {
const {
onAudioReady,
onError,
maxDurationSeconds = 300,
maxFileSizeBytes = 5 * 1024 * 1024,
audioBitsPerSecond = 64000,
levelBars = 32,
} = options
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [durationSeconds, setDurationSeconds] = useState(0)
const [levels, setLevels] = useState<number[]>(() => Array.from({ length: levelBars }, () => 0))
const durationRef = useRef(0)
const recorderRef = useRef<MediaRecorder | null>(null)
const streamRef = useRef<MediaStream | null>(null)
const audioContextRef = useRef<AudioContext | null>(null)
const analyserRef = useRef<AnalyserNode | null>(null)
const chunksRef = useRef<BlobPart[]>([])
const timerRef = useRef<number | null>(null)
const stopTimeoutRef = useRef<number | null>(null)
const rafRef = useRef<number | null>(null)
const cancelRef = useRef(false)
const mountedRef = useRef(true)
useEffect(() => {
return () => {
mountedRef.current = false
}
}, [])
const cleanup = useCallback(() => {
if (timerRef.current) {
clearInterval(timerRef.current)
timerRef.current = null
}
if (stopTimeoutRef.current) {
clearTimeout(stopTimeoutRef.current)
stopTimeoutRef.current = null
}
if (rafRef.current) {
cancelAnimationFrame(rafRef.current)
rafRef.current = null
}
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
if (audioContextRef.current) {
void audioContextRef.current.close()
audioContextRef.current = null
}
analyserRef.current = null
recorderRef.current = null
chunksRef.current = []
}, [])
const updateLevels = useCallback(() => {
const analyser = analyserRef.current
if (!analyser) return
const bufferLength = analyser.fftSize
const dataArray = new Uint8Array(bufferLength)
analyser.getByteTimeDomainData(dataArray)
const step = Math.floor(bufferLength / levelBars)
const nextLevels = Array.from({ length: levelBars }, (_, index) => {
let sum = 0
const start = index * step
const end = Math.min(start + step, bufferLength)
for (let i = start; i < end; i += 1) {
sum += Math.abs(dataArray[i] - 128)
}
const avg = sum / Math.max(1, end - start)
return Math.min(1, avg / 128)
})
if (mountedRef.current) {
setLevels(nextLevels)
rafRef.current = requestAnimationFrame(updateLevels)
}
}, [levelBars])
const stopRecording = useCallback(() => {
if (!recorderRef.current || !isRecording) return
setIsRecording(false)
try {
recorderRef.current.stop()
} catch (error) {
console.error("Falha ao parar gravação:", error)
cleanup()
}
}, [cleanup, isRecording])
const cancelRecording = useCallback(() => {
cancelRef.current = true
stopRecording()
}, [stopRecording])
const startRecording = useCallback(async () => {
if (isRecording || isProcessing) return
if (typeof navigator === "undefined" || !navigator.mediaDevices?.getUserMedia) {
onError?.("Gravação de áudio indisponível neste dispositivo.")
return
}
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
streamRef.current = stream
const audioContext = new AudioContext()
const analyser = audioContext.createAnalyser()
analyser.fftSize = 256
const source = audioContext.createMediaStreamSource(stream)
source.connect(analyser)
audioContextRef.current = audioContext
analyserRef.current = analyser
const mimeType = pickSupportedMimeType()
const recorderOptions: MediaRecorderOptions = mimeType
? { mimeType, audioBitsPerSecond }
: { audioBitsPerSecond }
const recorder = new MediaRecorder(stream, recorderOptions)
recorderRef.current = recorder
chunksRef.current = []
cancelRef.current = false
recorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data)
}
}
recorder.onstop = async () => {
const blobType = recorder.mimeType || mimeType || "audio/webm"
const blob = new Blob(chunksRef.current, { type: blobType })
chunksRef.current = []
cleanup()
if (cancelRef.current) {
if (mountedRef.current) {
setLevels(Array.from({ length: levelBars }, () => 0))
}
return
}
if (blob.size > maxFileSizeBytes) {
onError?.("Áudio excede o limite de 5MB. Tente gravar por menos tempo.")
if (mountedRef.current) {
setLevels(Array.from({ length: levelBars }, () => 0))
}
return
}
const fileName = buildAudioFileName(blobType)
const file = new File([blob], fileName, { type: blobType })
setIsProcessing(true)
try {
await onAudioReady({ file, durationSeconds: durationRef.current })
} catch (error) {
const message = error instanceof Error ? error.message : "Falha ao enviar áudio."
onError?.(message)
} finally {
if (mountedRef.current) {
setIsProcessing(false)
setLevels(Array.from({ length: levelBars }, () => 0))
}
}
}
recorder.start()
durationRef.current = 0
setDurationSeconds(0)
setIsRecording(true)
updateLevels()
timerRef.current = window.setInterval(() => {
setDurationSeconds((prev) => {
const next = prev + 1
durationRef.current = next
if (next >= maxDurationSeconds) {
stopRecording()
return next
}
return next
})
}, 1000)
stopTimeoutRef.current = window.setTimeout(() => {
stopRecording()
}, maxDurationSeconds * 1000)
} catch (error) {
console.error("Falha ao iniciar gravação:", error)
onError?.("Não foi possível iniciar a gravação de áudio.")
cleanup()
}
}, [
audioBitsPerSecond,
cleanup,
isProcessing,
isRecording,
levelBars,
maxDurationSeconds,
maxFileSizeBytes,
onAudioReady,
onError,
stopRecording,
updateLevels,
])
return {
isRecording,
isProcessing,
durationSeconds,
levels,
startRecording,
stopRecording,
cancelRecording,
}
}

View file

@ -1,206 +0,0 @@
/**
* Hooks customizados para queries/mutations do Convex com token de maquina
*
* Estes hooks encapsulam a logica de passar o machineToken automaticamente
* para as queries e mutations do Convex, proporcionando uma API simples
* e reativa para os componentes de chat.
*/
import { useQuery, useMutation, useAction } from "convex/react"
import { api } from "@convex/_generated/api"
import type { Id } from "@convex/_generated/dataModel"
import { useMachineToken } from "./ConvexMachineProvider"
// ============================================
// TIPOS
// ============================================
export type MachineSession = {
sessionId: Id<"liveChatSessions">
ticketId: Id<"tickets">
ticketRef: number
ticketSubject: string
agentName: string
agentEmail?: string
agentAvatarUrl?: string
unreadCount: number
lastActivityAt: number
startedAt: number
}
export type MachineMessage = {
id: Id<"ticketChatMessages">
body: string
authorName: string
authorAvatarUrl?: string
isFromMachine: boolean
createdAt: number
attachments: Array<{
storageId: Id<"_storage">
name: string
size?: number
type?: string
}>
}
export type MachineMessagesResult = {
messages: MachineMessage[]
hasSession: boolean
unreadCount: number
}
export type MachineUpdatesResult = {
hasActiveSessions: boolean
sessions: Array<{
ticketId: Id<"tickets">
ticketRef: number
unreadCount: number
lastActivityAt: number
}>
totalUnread: number
}
// ============================================
// HOOKS
// ============================================
/**
* Hook para listar sessoes ativas da maquina
* Subscription reativa - atualiza automaticamente quando ha mudancas
*/
export function useMachineSessions() {
const machineToken = useMachineToken()
const sessions = useQuery(
api.liveChat.listMachineSessions,
machineToken ? { machineToken } : "skip"
)
return {
sessions: sessions as MachineSession[] | undefined,
isLoading: sessions === undefined && !!machineToken,
hasToken: !!machineToken,
}
}
/**
* Hook para listar mensagens de um ticket especifico
* Subscription reativa - atualiza automaticamente quando ha novas mensagens
*/
export function useMachineMessages(ticketId: Id<"tickets"> | null, options?: { limit?: number }) {
const machineToken = useMachineToken()
const result = useQuery(
api.liveChat.listMachineMessages,
machineToken && ticketId
? { machineToken, ticketId, limit: options?.limit }
: "skip"
)
return {
messages: (result as MachineMessagesResult | undefined)?.messages ?? [],
hasSession: (result as MachineMessagesResult | undefined)?.hasSession ?? false,
unreadCount: (result as MachineMessagesResult | undefined)?.unreadCount ?? 0,
isLoading: result === undefined && !!machineToken && !!ticketId,
hasToken: !!machineToken,
}
}
/**
* Hook para verificar updates (polling leve)
* Usado como fallback ou para verificar status rapidamente
*/
export function useMachineUpdates() {
const machineToken = useMachineToken()
const result = useQuery(
api.liveChat.checkMachineUpdates,
machineToken ? { machineToken } : "skip"
)
return {
hasActiveSessions: (result as MachineUpdatesResult | undefined)?.hasActiveSessions ?? false,
sessions: (result as MachineUpdatesResult | undefined)?.sessions ?? [],
totalUnread: (result as MachineUpdatesResult | undefined)?.totalUnread ?? 0,
isLoading: result === undefined && !!machineToken,
hasToken: !!machineToken,
}
}
/**
* Hook para enviar mensagem
*/
export function usePostMachineMessage() {
const machineToken = useMachineToken()
const postMessage = useMutation(api.liveChat.postMachineMessage)
return async (args: {
ticketId: Id<"tickets">
body: string
attachments?: Array<{
storageId: Id<"_storage">
name: string
size?: number
type?: string
}>
}) => {
if (!machineToken) {
throw new Error("Token da maquina nao disponivel")
}
return postMessage({
machineToken,
ticketId: args.ticketId,
body: args.body,
attachments: args.attachments,
})
}
}
/**
* Hook para marcar mensagens como lidas
*/
export function useMarkMachineMessagesRead() {
const machineToken = useMachineToken()
const markRead = useMutation(api.liveChat.markMachineMessagesRead)
return async (args: {
ticketId: Id<"tickets">
messageIds: Id<"ticketChatMessages">[]
}) => {
if (!machineToken) {
throw new Error("Token da maquina nao disponivel")
}
return markRead({
machineToken,
ticketId: args.ticketId,
messageIds: args.messageIds,
})
}
}
/**
* Hook para gerar URL de upload
*/
export function useGenerateMachineUploadUrl() {
const machineToken = useMachineToken()
const generateUrl = useAction(api.liveChat.generateMachineUploadUrl)
return async (args: {
fileName: string
fileType: string
fileSize: number
}) => {
if (!machineToken) {
throw new Error("Token da maquina nao disponivel")
}
return generateUrl({
machineToken,
fileName: args.fileName,
fileType: args.fileType,
fileSize: args.fileSize,
})
}
}

View file

@ -1,68 +0,0 @@
import { ShieldAlert, Mail, RefreshCw } from "lucide-react"
import { useState } from "react"
type DeactivationScreenProps = {
companyName?: string | null
onRetry?: () => Promise<void> | void
}
export function DeactivationScreen({ onRetry }: DeactivationScreenProps) {
const [isRetrying, setIsRetrying] = useState(false)
const handleRetry = async () => {
if (isRetrying || !onRetry) return
setIsRetrying(true)
try {
await onRetry()
} finally {
setIsRetrying(false)
}
}
return (
<div className="fixed inset-0 z-50 grid place-items-center overflow-hidden bg-neutral-950 p-6">
<div className="flex w-full max-w-[720px] flex-col items-center gap-6 rounded-2xl border border-slate-200 bg-white px-8 py-10 shadow-sm">
<div className="flex flex-col items-center gap-3 text-center">
<span className="inline-flex items-center gap-2 rounded-full border border-rose-200 bg-rose-50 px-3 py-1 text-xs font-semibold text-rose-700">
<ShieldAlert className="size-4" /> Acesso bloqueado
</span>
<h1 className="text-2xl font-semibold text-neutral-900">Dispositivo desativado</h1>
<p className="max-w-md text-sm text-neutral-600">
Este dispositivo foi desativado temporariamente pelos administradores. Enquanto isso, o acesso ao portal e o
envio de informações ficam indisponíveis.
</p>
</div>
<div className="w-full max-w-[520px] space-y-4">
<div className="rounded-xl border border-slate-200 bg-slate-50 p-4 text-sm text-neutral-700">
<p className="font-medium text-neutral-800">Como regularizar</p>
<ul className="mt-2 list-disc space-y-1 pl-5 text-neutral-600">
<li>Entre em contato com o suporte da Rever e solicite a reativação.</li>
<li>Informe o nome do computador e seus dados de contato.</li>
</ul>
</div>
<div className="flex flex-wrap items-center justify-center gap-3">
<a
href="mailto:suporte@rever.com.br"
className="inline-flex items-center gap-2 rounded-full border border-black bg-black px-4 py-2 text-sm font-semibold text-white transition hover:bg-black/90"
>
<Mail className="size-4" /> Falar com o suporte
</a>
{onRetry && (
<button
type="button"
onClick={handleRetry}
disabled={isRetrying}
className="inline-flex items-center gap-2 rounded-full border border-slate-300 bg-white px-4 py-2 text-sm font-semibold text-neutral-700 transition hover:bg-slate-50 disabled:opacity-50"
>
<RefreshCw className={`size-4 ${isRetrying ? "animate-spin" : ""}`} />
{isRetrying ? "Verificando..." : "Verificar novamente"}
</button>
)}
</div>
</div>
</div>
</div>
)
}

View file

@ -1,103 +0,0 @@
/**
* MachineStateMonitor - Componente para monitorar o estado da máquina em tempo real
*
* Este componente usa uma subscription Convex para detectar mudanças no estado da máquina:
* - Quando isActive muda para false: máquina foi desativada
* - Quando hasValidToken muda para false: máquina foi resetada (tokens revogados)
*
* O componente não renderiza nada, apenas monitora e chama callbacks quando detecta mudanças.
*/
import { useEffect, useRef } from "react"
import { useQuery, ConvexProvider } from "convex/react"
import type { ConvexReactClient } from "convex/react"
import { api } from "../convex/_generated/api"
import type { Id } from "../convex/_generated/dataModel"
type MachineStateMonitorProps = {
machineId: string
onDeactivated?: () => void
onTokenRevoked?: () => void
onReactivated?: () => void
}
function MachineStateMonitorInner({ machineId, onDeactivated, onTokenRevoked, onReactivated }: MachineStateMonitorProps) {
const machineState = useQuery(api.machines.getMachineState, {
machineId: machineId as Id<"machines">,
})
// Refs para rastrear o estado anterior e evitar chamadas duplicadas
const previousIsActive = useRef<boolean | null>(null)
const previousHasValidToken = useRef<boolean | null>(null)
const initialLoadDone = useRef(false)
useEffect(() => {
if (!machineState) return
// Na primeira carga, verifica estado inicial E armazena valores
if (!initialLoadDone.current) {
console.log("[MachineStateMonitor] Carga inicial", {
isActive: machineState.isActive,
hasValidToken: machineState.hasValidToken,
found: machineState.found,
})
// Se já estiver desativado na carga inicial, chama callback
if (machineState.isActive === false) {
console.log("[MachineStateMonitor] Máquina já estava desativada")
onDeactivated?.()
}
// Se token já estiver inválido na carga inicial, chama callback
if (machineState.hasValidToken === false) {
console.log("[MachineStateMonitor] Token já estava revogado")
onTokenRevoked?.()
}
previousIsActive.current = machineState.isActive
previousHasValidToken.current = machineState.hasValidToken
initialLoadDone.current = true
return
}
// Detecta mudança de ativo para inativo
if (previousIsActive.current === true && machineState.isActive === false) {
console.log("[MachineStateMonitor] Máquina foi desativada")
onDeactivated?.()
}
// Detecta mudança de inativo para ativo (reativação)
if (previousIsActive.current === false && machineState.isActive === true) {
console.log("[MachineStateMonitor] Máquina foi reativada")
onReactivated?.()
}
// Detecta mudança de token válido para inválido
if (previousHasValidToken.current === true && machineState.hasValidToken === false) {
console.log("[MachineStateMonitor] Token foi revogado (reset)")
onTokenRevoked?.()
}
// Atualiza refs
previousIsActive.current = machineState.isActive
previousHasValidToken.current = machineState.hasValidToken
}, [machineState, onDeactivated, onTokenRevoked, onReactivated])
// Este componente nao renderiza nada
return null
}
type MachineStateMonitorWithClientProps = MachineStateMonitorProps & {
client: ConvexReactClient
}
/**
* Wrapper que recebe o cliente Convex e envolve o monitor com o provider
*/
export function MachineStateMonitor({ client, ...props }: MachineStateMonitorWithClientProps) {
return (
<ConvexProvider client={client}>
<MachineStateMonitorInner {...props} />
</ConvexProvider>
)
}

View file

@ -1,40 +1,15 @@
import * as React from "react" import * as React from "react"
import { import * as TabsPrimitive from "@radix-ui/react-tabs"
Root as TabsRootPrimitive,
List as TabsListPrimitive,
Trigger as TabsTriggerPrimitive,
Content as TabsContentPrimitive,
type TabsProps as TabsPrimitiveProps,
type TabsListProps as TabsPrimitiveListProps,
type TabsTriggerProps as TabsPrimitiveTriggerProps,
type TabsContentProps as TabsPrimitiveContentProps,
} from "@radix-ui/react-tabs"
import { cn } from "../../lib/utils" import { cn } from "../../lib/utils"
type TabsProps = TabsPrimitiveProps & { className?: string } export function Tabs({ className, ...props }: React.ComponentProps<typeof TabsPrimitive.Root>) {
type TabsListProps = TabsPrimitiveListProps & { className?: string } return <TabsPrimitive.Root data-slot="tabs" className={cn("flex flex-col gap-2", className)} {...props} />
type TabsTriggerProps = TabsPrimitiveTriggerProps & { className?: string }
type TabsContentProps = TabsPrimitiveContentProps & { className?: string }
const TabsRoot = TabsRootPrimitive as unknown as React.ComponentType<TabsProps>
const TabsListBase = TabsListPrimitive as unknown as React.ComponentType<TabsListProps>
const TabsTriggerBase = TabsTriggerPrimitive as unknown as React.ComponentType<TabsTriggerProps>
const TabsContentBase = TabsContentPrimitive as unknown as React.ComponentType<TabsContentProps>
export function Tabs({ className, ...props }: TabsProps) {
return (
<TabsRoot
data-slot="tabs"
className={cn("flex flex-col gap-2", className)}
{...props}
/>
)
} }
export function TabsList({ className, ...props }: TabsListProps) { export function TabsList({ className, ...props }: React.ComponentProps<typeof TabsPrimitive.List>) {
return ( return (
<TabsListBase <TabsPrimitive.List
data-slot="tabs-list" data-slot="tabs-list"
className={cn("inline-flex h-9 w-fit items-center justify-center rounded-lg bg-slate-100 p-[3px] text-slate-600", className)} className={cn("inline-flex h-9 w-fit items-center justify-center rounded-lg bg-slate-100 p-[3px] text-slate-600", className)}
{...props} {...props}
@ -42,27 +17,20 @@ export function TabsList({ className, ...props }: TabsListProps) {
) )
} }
export function TabsTrigger({ className, value, ...props }: TabsTriggerProps) { export function TabsTrigger({ className, ...props }: React.ComponentProps<typeof TabsPrimitive.Trigger>) {
return ( return (
<TabsTriggerBase <TabsPrimitive.Trigger
data-slot="tabs-trigger" data-slot="tabs-trigger"
className={cn( className={cn(
"inline-flex h-[calc(100%-1px)] flex-1 items-center justify-center gap-1.5 whitespace-nowrap rounded-md border border-transparent px-2 py-1 text-sm font-medium text-slate-800 transition-[color,box-shadow] data-[state=active]:bg-white data-[state=active]:shadow-sm", "inline-flex h-[calc(100%-1px)] flex-1 items-center justify-center gap-1.5 whitespace-nowrap rounded-md border border-transparent px-2 py-1 text-sm font-medium text-slate-800 transition-[color,box-shadow] data-[state=active]:bg-white data-[state=active]:shadow-sm",
className className
)} )}
value={value}
{...props} {...props}
/> />
) )
} }
export function TabsContent({ className, value, ...props }: TabsContentProps) { export function TabsContent({ className, ...props }: React.ComponentProps<typeof TabsPrimitive.Content>) {
return ( return <TabsPrimitive.Content data-slot="tabs-content" className={cn("flex-1 outline-none", className)} {...props} />
<TabsContentBase
data-slot="tabs-content"
className={cn("flex-1 outline-none", className)}
value={value}
{...props}
/>
)
} }

View file

@ -1,121 +0,0 @@
/* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* To regenerate, run `npx convex dev`.
* @module
*/
import type * as alerts from "../alerts.js";
import type * as automations from "../automations.js";
import type * as bootstrap from "../bootstrap.js";
import type * as categories from "../categories.js";
import type * as categorySlas from "../categorySlas.js";
import type * as checklistTemplates from "../checklistTemplates.js";
import type * as commentTemplates from "../commentTemplates.js";
import type * as companies from "../companies.js";
import type * as crons from "../crons.js";
import type * as dashboards from "../dashboards.js";
import type * as deviceExportTemplates from "../deviceExportTemplates.js";
import type * as deviceFieldDefaults from "../deviceFieldDefaults.js";
import type * as deviceFields from "../deviceFields.js";
import type * as devices from "../devices.js";
import type * as emprestimos from "../emprestimos.js";
import type * as fields from "../fields.js";
import type * as files from "../files.js";
import type * as incidents from "../incidents.js";
import type * as invites from "../invites.js";
import type * as liveChat from "../liveChat.js";
import type * as machines from "../machines.js";
import type * as metrics from "../metrics.js";
import type * as migrations from "../migrations.js";
import type * as ops from "../ops.js";
import type * as queues from "../queues.js";
import type * as rbac from "../rbac.js";
import type * as reports from "../reports.js";
import type * as revision from "../revision.js";
import type * as seed from "../seed.js";
import type * as slas from "../slas.js";
import type * as teams from "../teams.js";
import type * as ticketFormSettings from "../ticketFormSettings.js";
import type * as ticketFormTemplates from "../ticketFormTemplates.js";
import type * as ticketNotifications from "../ticketNotifications.js";
import type * as tickets from "../tickets.js";
import type * as usbPolicy from "../usbPolicy.js";
import type * as users from "../users.js";
import type {
ApiFromModules,
FilterApi,
FunctionReference,
} from "convex/server";
declare const fullApi: ApiFromModules<{
alerts: typeof alerts;
automations: typeof automations;
bootstrap: typeof bootstrap;
categories: typeof categories;
categorySlas: typeof categorySlas;
checklistTemplates: typeof checklistTemplates;
commentTemplates: typeof commentTemplates;
companies: typeof companies;
crons: typeof crons;
dashboards: typeof dashboards;
deviceExportTemplates: typeof deviceExportTemplates;
deviceFieldDefaults: typeof deviceFieldDefaults;
deviceFields: typeof deviceFields;
devices: typeof devices;
emprestimos: typeof emprestimos;
fields: typeof fields;
files: typeof files;
incidents: typeof incidents;
invites: typeof invites;
liveChat: typeof liveChat;
machines: typeof machines;
metrics: typeof metrics;
migrations: typeof migrations;
ops: typeof ops;
queues: typeof queues;
rbac: typeof rbac;
reports: typeof reports;
revision: typeof revision;
seed: typeof seed;
slas: typeof slas;
teams: typeof teams;
ticketFormSettings: typeof ticketFormSettings;
ticketFormTemplates: typeof ticketFormTemplates;
ticketNotifications: typeof ticketNotifications;
tickets: typeof tickets;
usbPolicy: typeof usbPolicy;
users: typeof users;
}>;
/**
* A utility for referencing Convex functions in your app's public API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
export declare const api: FilterApi<
typeof fullApi,
FunctionReference<any, "public">
>;
/**
* A utility for referencing Convex functions in your app's internal API.
*
* Usage:
* ```js
* const myFunctionReference = internal.myModule.myFunction;
* ```
*/
export declare const internal: FilterApi<
typeof fullApi,
FunctionReference<any, "internal">
>;
export declare const components: {};

View file

@ -1,23 +0,0 @@
/* eslint-disable */
/**
* Generated `api` utility.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* To regenerate, run `npx convex dev`.
* @module
*/
import { anyApi, componentsGeneric } from "convex/server";
/**
* A utility for referencing Convex functions in your app's API.
*
* Usage:
* ```js
* const myFunctionReference = api.myModule.myFunction;
* ```
*/
export const api = anyApi;
export const internal = anyApi;
export const components = componentsGeneric();

View file

@ -1,60 +0,0 @@
/* eslint-disable */
/**
* Generated data model types.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* To regenerate, run `npx convex dev`.
* @module
*/
import type {
DataModelFromSchemaDefinition,
DocumentByName,
TableNamesInDataModel,
SystemTableNames,
} from "convex/server";
import type { GenericId } from "convex/values";
import schema from "../schema.js";
/**
* The names of all of your Convex tables.
*/
export type TableNames = TableNamesInDataModel<DataModel>;
/**
* The type of a document stored in Convex.
*
* @typeParam TableName - A string literal type of the table name (like "users").
*/
export type Doc<TableName extends TableNames> = DocumentByName<
DataModel,
TableName
>;
/**
* An identifier for a document in Convex.
*
* Convex documents are uniquely identified by their `Id`, which is accessible
* on the `_id` field. To learn more, see [Document IDs](https://docs.convex.dev/using/document-ids).
*
* Documents can be loaded using `db.get(id)` in query and mutation functions.
*
* IDs are just strings at runtime, but this type can be used to distinguish them from other
* strings when type checking.
*
* @typeParam TableName - A string literal type of the table name (like "users").
*/
export type Id<TableName extends TableNames | SystemTableNames> =
GenericId<TableName>;
/**
* A type describing your Convex data model.
*
* This type includes information about what tables you have, the type of
* documents stored in those tables, and the indexes defined on them.
*
* This type is used to parameterize methods like `queryGeneric` and
* `mutationGeneric` to make them type-safe.
*/
export type DataModel = DataModelFromSchemaDefinition<typeof schema>;

View file

@ -1,143 +0,0 @@
/* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* To regenerate, run `npx convex dev`.
* @module
*/
import {
ActionBuilder,
HttpActionBuilder,
MutationBuilder,
QueryBuilder,
GenericActionCtx,
GenericMutationCtx,
GenericQueryCtx,
GenericDatabaseReader,
GenericDatabaseWriter,
} from "convex/server";
import type { DataModel } from "./dataModel.js";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const query: QueryBuilder<DataModel, "public">;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export declare const internalQuery: QueryBuilder<DataModel, "internal">;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const mutation: MutationBuilder<DataModel, "public">;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export declare const internalMutation: MutationBuilder<DataModel, "internal">;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export declare const action: ActionBuilder<DataModel, "public">;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export declare const internalAction: ActionBuilder<DataModel, "internal">;
/**
* Define an HTTP action.
*
* The wrapped function will be used to respond to HTTP requests received
* by a Convex deployment if the requests matches the path and method where
* this action is routed. Be sure to route your httpAction in `convex/http.js`.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument
* and a Fetch API `Request` object as its second.
* @returns The wrapped function. Import this function from `convex/http.js` and route it to hook it up.
*/
export declare const httpAction: HttpActionBuilder;
/**
* A set of services for use within Convex query functions.
*
* The query context is passed as the first argument to any Convex query
* function run on the server.
*
* This differs from the {@link MutationCtx} because all of the services are
* read-only.
*/
export type QueryCtx = GenericQueryCtx<DataModel>;
/**
* A set of services for use within Convex mutation functions.
*
* The mutation context is passed as the first argument to any Convex mutation
* function run on the server.
*/
export type MutationCtx = GenericMutationCtx<DataModel>;
/**
* A set of services for use within Convex action functions.
*
* The action context is passed as the first argument to any Convex action
* function run on the server.
*/
export type ActionCtx = GenericActionCtx<DataModel>;
/**
* An interface to read from the database within Convex query functions.
*
* The two entry points are {@link DatabaseReader.get}, which fetches a single
* document by its {@link Id}, or {@link DatabaseReader.query}, which starts
* building a query.
*/
export type DatabaseReader = GenericDatabaseReader<DataModel>;
/**
* An interface to read from and write to the database within Convex mutation
* functions.
*
* Convex guarantees that all writes within a single mutation are
* executed atomically, so you never have to worry about partial writes leaving
* your data in an inconsistent state. See [the Convex Guide](https://docs.convex.dev/understanding/convex-fundamentals/functions#atomicity-and-optimistic-concurrency-control)
* for the guarantees Convex provides your functions.
*/
export type DatabaseWriter = GenericDatabaseWriter<DataModel>;

View file

@ -1,93 +0,0 @@
/* eslint-disable */
/**
* Generated utilities for implementing server-side Convex query and mutation functions.
*
* THIS CODE IS AUTOMATICALLY GENERATED.
*
* To regenerate, run `npx convex dev`.
* @module
*/
import {
actionGeneric,
httpActionGeneric,
queryGeneric,
mutationGeneric,
internalActionGeneric,
internalMutationGeneric,
internalQueryGeneric,
} from "convex/server";
/**
* Define a query in this Convex app's public API.
*
* This function will be allowed to read your Convex database and will be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const query = queryGeneric;
/**
* Define a query that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to read from your Convex database. It will not be accessible from the client.
*
* @param func - The query function. It receives a {@link QueryCtx} as its first argument.
* @returns The wrapped query. Include this as an `export` to name it and make it accessible.
*/
export const internalQuery = internalQueryGeneric;
/**
* Define a mutation in this Convex app's public API.
*
* This function will be allowed to modify your Convex database and will be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const mutation = mutationGeneric;
/**
* Define a mutation that is only accessible from other Convex functions (but not from the client).
*
* This function will be allowed to modify your Convex database. It will not be accessible from the client.
*
* @param func - The mutation function. It receives a {@link MutationCtx} as its first argument.
* @returns The wrapped mutation. Include this as an `export` to name it and make it accessible.
*/
export const internalMutation = internalMutationGeneric;
/**
* Define an action in this Convex app's public API.
*
* An action is a function which can execute any JavaScript code, including non-deterministic
* code and code with side-effects, like calling third-party services.
* They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive.
* They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}.
*
* @param func - The action. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped action. Include this as an `export` to name it and make it accessible.
*/
export const action = actionGeneric;
/**
* Define an action that is only accessible from other Convex functions (but not from the client).
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument.
* @returns The wrapped function. Include this as an `export` to name it and make it accessible.
*/
export const internalAction = internalActionGeneric;
/**
* Define an HTTP action.
*
* The wrapped function will be used to respond to HTTP requests received
* by a Convex deployment if the requests matches the path and method where
* this action is routed. Be sure to route your httpAction in `convex/http.js`.
*
* @param func - The function. It receives an {@link ActionCtx} as its first argument
* and a Fetch API `Request` object as its second.
* @returns The wrapped function. Import this function from `convex/http.js` and route it to hook it up.
*/
export const httpAction = httpActionGeneric;

View file

@ -6,18 +6,10 @@
html, body, #root { html, body, #root {
height: 100%; height: 100%;
overflow: hidden; /* Remove scrollbars */
} }
body { body {
@apply text-slate-900; @apply bg-slate-50 text-slate-900;
background: transparent;
overflow: hidden; /* Remove scrollbars */
}
/* Fundo padrão para janelas que não são chat minimizado */
.app-bg {
@apply bg-slate-50;
} }
.badge-status { .badge-status {
@ -29,7 +21,7 @@ body {
} }
.btn { .btn {
@apply inline-flex items-center justify-center rounded-md border px-3 py-2 text-sm font-semibold transition; @apply inline-flex items-center justify-center rounded-lg border px-3 py-2 text-sm font-semibold transition;
} }
.btn-primary { .btn-primary {
@ -57,7 +49,7 @@ body {
} }
.tab-btn { .tab-btn {
@apply rounded-md border border-transparent bg-transparent px-3 py-1.5 text-sm font-medium text-slate-700 hover:bg-slate-100; @apply rounded-lg border border-transparent bg-transparent px-3 py-1.5 text-sm font-medium text-slate-700 hover:bg-slate-100;
} }
.tab-btn.active { .tab-btn.active {

File diff suppressed because it is too large Load diff

View file

@ -19,13 +19,7 @@
"noUnusedParameters": true, "noUnusedParameters": true,
"noFallthroughCasesInSwitch": true, "noFallthroughCasesInSwitch": true,
"jsx": "react-jsx", "jsx": "react-jsx",
"types": ["vite/client"], "types": ["vite/client"]
/* Paths */
"baseUrl": ".",
"paths": {
"@convex/_generated/*": ["./src/convex/_generated/*"]
}
}, },
"include": ["src"] "include": ["src"]
} }

View file

@ -1,6 +1,5 @@
import { defineConfig } from "vite"; import { defineConfig } from "vite";
import react from "@vitejs/plugin-react"; import react from "@vitejs/plugin-react";
import { resolve } from "path";
const host = process.env.TAURI_DEV_HOST; const host = process.env.TAURI_DEV_HOST;
@ -8,13 +7,6 @@ const host = process.env.TAURI_DEV_HOST;
export default defineConfig(async () => ({ export default defineConfig(async () => ({
plugins: [react()], plugins: [react()],
resolve: {
alias: {
// Usar arquivos _generated locais para evitar problemas de type-check
"@convex/_generated": resolve(__dirname, "./src/convex/_generated"),
},
},
// Vite options tailored for Tauri development and only applied in `tauri dev` or `tauri build` // Vite options tailored for Tauri development and only applied in `tauri dev` or `tauri build`
// //
// 1. prevent Vite from obscuring rust errors // 1. prevent Vite from obscuring rust errors

2659
bun.lock

File diff suppressed because it is too large Load diff

View file

@ -1,3 +0,0 @@
[test]
preload = ["./tests/setup/bun-test-env.ts"]
timeout = 15000

View file

@ -1,7 +0,0 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACALomD1WTMgTtF+ZE/6d1QF73cY9W2W/5U9iQIEceaIogAAAJCCFZZTghWW
UwAAAAtzc2gtZWQyNTUxOQAAACALomD1WTMgTtF+ZE/6d1QF73cY9W2W/5U9iQIEceaIog
AAAED2WbX9/mtNwqBlVJIoWNJg1lTO7M1vOLXgP+h8q/CWBQuiYPVZMyBO0X5kT/p3VAXv
dxj1bZb/lT2JAgRx5oiiAAAACWNvZGV4LWNsaQECAwQ=
-----END OPENSSH PRIVATE KEY-----

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAuiYPVZMyBO0X5kT/p3VAXvdxj1bZb/lT2JAgRx5oii codex-cli

View file

@ -1,22 +1,22 @@
{ {
"$schema": "https://ui.shadcn.com/schema.json", "$schema": "https://ui.shadcn.com/schema.json",
"style": "new-york", "style": "new-york",
"rsc": true, "rsc": true,
"tsx": true, "tsx": true,
"tailwind": { "tailwind": {
"config": "", "config": "",
"css": "src/app/globals.css", "css": "src/app/globals.css",
"baseColor": "neutral", "baseColor": "neutral",
"cssVariables": true, "cssVariables": true,
"prefix": "" "prefix": ""
}, },
"iconLibrary": "lucide", "iconLibrary": "lucide",
"aliases": { "aliases": {
"components": "@/components", "components": "@/components",
"utils": "@/lib/utils", "utils": "@/lib/utils",
"ui": "@/components/ui", "ui": "@/components/ui",
"lib": "@/lib", "lib": "@/lib",
"hooks": "@/hooks" "hooks": "@/hooks"
}, },
"registries": {} "registries": {}
} }

Some files were not shown because too many files have changed in this diff Show more