feat(ci): adiciona Forgejo Actions como alternativa ao GitHub Actions
Configura o Forgejo como plataforma de CI/CD self-hosted para evitar custos futuros do GitHub Actions (a partir de marco/2026). Arquivos adicionados: - .forgejo/workflows/ci-cd-web-desktop.yml: workflow principal de deploy - .forgejo/workflows/quality-checks.yml: lint, test e build - forgejo/stack.yml: stack Docker do Forgejo para Swarm - forgejo/setup-runner.sh: script de configuracao do runner - docs/FORGEJO-CI-CD.md: documentacao completa Forgejo rodando em: https://git.esdrasrenan.com.br 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
771e25798d
commit
aaa64e339c
5 changed files with 1037 additions and 0 deletions
532
.forgejo/workflows/ci-cd-web-desktop.yml
Normal file
532
.forgejo/workflows/ci-cd-web-desktop.yml
Normal file
|
|
@ -0,0 +1,532 @@
|
|||
name: CI/CD Web + Desktop
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force_web_deploy:
|
||||
description: 'Forcar deploy do Web (ignorar filtro)?'
|
||||
required: false
|
||||
default: 'false'
|
||||
force_convex_deploy:
|
||||
description: 'Forcar deploy do Convex (ignorar filtro)?'
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
env:
|
||||
APP_DIR: /srv/apps/sistema
|
||||
VPS_UPDATES_DIR: /var/www/updates
|
||||
RUN_MACHINE_SMOKE: ${{ vars.RUN_MACHINE_SMOKE || 'false' }}
|
||||
|
||||
jobs:
|
||||
changes:
|
||||
name: Detect changes
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
convex: ${{ steps.filter.outputs.convex }}
|
||||
web: ${{ steps.filter.outputs.web }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
- name: Paths filter
|
||||
id: filter
|
||||
uses: https://github.com/dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
convex:
|
||||
- 'convex/**'
|
||||
web:
|
||||
- 'src/**'
|
||||
- 'public/**'
|
||||
- 'prisma/**'
|
||||
- 'next.config.ts'
|
||||
- 'package.json'
|
||||
- 'pnpm-lock.yaml'
|
||||
- 'tsconfig.json'
|
||||
- 'middleware.ts'
|
||||
- 'stack.yml'
|
||||
|
||||
deploy:
|
||||
name: Deploy (VPS Linux)
|
||||
needs: changes
|
||||
timeout-minutes: 30
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
|
||||
runs-on: [ self-hosted, linux, vps ]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
|
||||
- name: Determine APP_DIR (fallback safe path)
|
||||
id: appdir
|
||||
run: |
|
||||
TS=$(date +%s)
|
||||
FALLBACK_DIR="$HOME/apps/web.build.$TS"
|
||||
mkdir -p "$FALLBACK_DIR"
|
||||
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
|
||||
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: https://github.com/pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.20.0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup Bun
|
||||
uses: https://github.com/oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: 1.3.1
|
||||
|
||||
- name: Verify Bun runtime
|
||||
run: bun --version
|
||||
|
||||
- name: Permissions diagnostic (server paths)
|
||||
run: |
|
||||
set +e
|
||||
echo "== Basic context =="
|
||||
whoami || true
|
||||
id || true
|
||||
groups || true
|
||||
umask || true
|
||||
echo "HOME=$HOME"
|
||||
echo "APP_DIR(default)=${APP_DIR:-/srv/apps/sistema}"
|
||||
echo "EFFECTIVE_APP_DIR=$EFFECTIVE_APP_DIR"
|
||||
|
||||
echo "\n== Permissions check =="
|
||||
check_path() {
|
||||
P="$1"
|
||||
echo "-- $P"
|
||||
if [ -e "$P" ]; then
|
||||
stat -c '%A %U:%G %n' "$P" 2>/dev/null || ls -ld "$P" || true
|
||||
echo -n "WRITABLE? "; [ -w "$P" ] && echo yes || echo no
|
||||
if command -v namei >/dev/null 2>&1; then
|
||||
namei -l "$P" || true
|
||||
fi
|
||||
TMP="$P/.permtest.$$"
|
||||
(echo test > "$TMP" 2>/dev/null && echo "CREATE_FILE: ok" && rm -f "$TMP") || echo "CREATE_FILE: failed"
|
||||
else
|
||||
echo "(missing)"
|
||||
fi
|
||||
}
|
||||
check_path "/srv/apps/sistema"
|
||||
check_path "/srv/apps/sistema/src/app/machines/handshake"
|
||||
check_path "/srv/apps/sistema/apps/desktop/node_modules"
|
||||
check_path "/srv/apps/sistema/node_modules"
|
||||
check_path "$EFFECTIVE_APP_DIR"
|
||||
check_path "$EFFECTIVE_APP_DIR/node_modules"
|
||||
|
||||
- name: Sync workspace to APP_DIR (preserving local env)
|
||||
run: |
|
||||
mkdir -p "$EFFECTIVE_APP_DIR"
|
||||
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
|
||||
EXCLUDE_ENV="--exclude '.env*' --exclude 'apps/desktop/.env*' --exclude 'convex/.env*'"
|
||||
if [ "$EFFECTIVE_APP_DIR" != "${APP_DIR:-/srv/apps/sistema}" ]; then
|
||||
EXCLUDE_ENV=""
|
||||
fi
|
||||
rsync $RSYNC_FLAGS \
|
||||
--filter='protect .next.old*' \
|
||||
--exclude '.next.old*' \
|
||||
--filter='protect node_modules' \
|
||||
--filter='protect node_modules/**' \
|
||||
--filter='protect .pnpm-store' \
|
||||
--filter='protect .pnpm-store/**' \
|
||||
--filter='protect .env' \
|
||||
--filter='protect .env*' \
|
||||
--filter='protect apps/desktop/.env*' \
|
||||
--filter='protect convex/.env*' \
|
||||
--exclude '.git' \
|
||||
--exclude '.next' \
|
||||
--exclude 'node_modules' \
|
||||
--exclude 'node_modules/**' \
|
||||
--exclude '.pnpm-store' \
|
||||
--exclude '.pnpm-store/**' \
|
||||
$EXCLUDE_ENV \
|
||||
./ "$EFFECTIVE_APP_DIR"/
|
||||
|
||||
- name: Acquire Convex admin key
|
||||
id: key
|
||||
run: |
|
||||
echo "Waiting for Convex container..."
|
||||
CID=""
|
||||
for attempt in $(seq 1 12); do
|
||||
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
|
||||
if [ -n "$CID" ]; then
|
||||
echo "Convex container ready (CID=$CID)"
|
||||
break
|
||||
fi
|
||||
echo "Attempt $attempt/12: container not ready yet; waiting 5s..."
|
||||
sleep 5
|
||||
done
|
||||
CONVEX_IMAGE="ghcr.io/get-convex/convex-backend:latest"
|
||||
if [ -n "$CID" ]; then
|
||||
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
|
||||
else
|
||||
echo "No running convex container detected; attempting offline admin key extraction..."
|
||||
VOLUME="sistema_convex_data"
|
||||
if docker volume inspect "$VOLUME" >/dev/null 2>&1; then
|
||||
KEY=$(docker run --rm --entrypoint /bin/sh -v "$VOLUME":/convex/data "$CONVEX_IMAGE" -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
|
||||
else
|
||||
echo "Volume $VOLUME nao encontrado; nao foi possivel extrair a chave admin"
|
||||
fi
|
||||
fi
|
||||
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
|
||||
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
|
||||
if [ -z "$KEY" ]; then
|
||||
echo "ERRO: Nao foi possivel obter a chave admin do Convex"
|
||||
docker service ps sistema_convex_backend || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Copy production .env if present
|
||||
run: |
|
||||
DEFAULT_DIR="${APP_DIR:-/srv/apps/sistema}"
|
||||
if [ "$EFFECTIVE_APP_DIR" != "$DEFAULT_DIR" ] && [ -f "$DEFAULT_DIR/.env" ]; then
|
||||
echo "Copying production .env from $DEFAULT_DIR to $EFFECTIVE_APP_DIR"
|
||||
cp -f "$DEFAULT_DIR/.env" "$EFFECTIVE_APP_DIR/.env"
|
||||
fi
|
||||
|
||||
- name: Prune workspace for server-only build
|
||||
run: |
|
||||
cd "$EFFECTIVE_APP_DIR"
|
||||
printf "packages:\n - .\n\nignoredBuiltDependencies:\n - '@prisma/client'\n - '@prisma/engines'\n - '@tailwindcss/oxide'\n - esbuild\n - prisma\n - sharp\n - unrs-resolver\n" > pnpm-workspace.yaml
|
||||
|
||||
- name: Ensure Next.js cache directory exists and is writable
|
||||
run: |
|
||||
cd "$EFFECTIVE_APP_DIR"
|
||||
mkdir -p .next/cache
|
||||
chmod -R u+rwX .next || true
|
||||
|
||||
- name: Cache Next.js build cache (.next/cache)
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: ${{ env.EFFECTIVE_APP_DIR }}/.next/cache
|
||||
key: ${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-${{ hashFiles('src/**/*.ts', 'src/**/*.tsx', 'src/**/*.js', 'src/**/*.jsx', 'next.config.ts') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-
|
||||
|
||||
- name: Lint check (fail fast before build)
|
||||
run: |
|
||||
cd "$EFFECTIVE_APP_DIR"
|
||||
docker run --rm \
|
||||
-v "$EFFECTIVE_APP_DIR":/app \
|
||||
-w /app \
|
||||
sistema_web:node22-bun \
|
||||
bash -lc "set -euo pipefail; bun install --frozen-lockfile --filter '!appsdesktop'; bun run lint"
|
||||
|
||||
- name: Install and build (Next.js)
|
||||
env:
|
||||
PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING: "1"
|
||||
run: |
|
||||
cd "$EFFECTIVE_APP_DIR"
|
||||
docker run --rm \
|
||||
-e PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING="$PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING" \
|
||||
-e NODE_OPTIONS="--max-old-space-size=4096" \
|
||||
-v "$EFFECTIVE_APP_DIR":/app \
|
||||
-w /app \
|
||||
sistema_web:node22-bun \
|
||||
bash -lc "set -euo pipefail; bun install --frozen-lockfile --filter '!appsdesktop'; bun run prisma:generate; bun run build:bun"
|
||||
|
||||
- name: Publish build to stable APP_DIR directory
|
||||
run: |
|
||||
set -e
|
||||
DEST="$HOME/apps/sistema"
|
||||
mkdir -p "$DEST"
|
||||
mkdir -p "$DEST/.next/static"
|
||||
docker run --rm -v "$DEST":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true' || true
|
||||
if [ -d "$EFFECTIVE_APP_DIR/.next/static" ]; then
|
||||
rsync -a \
|
||||
"$EFFECTIVE_APP_DIR/.next/static/" "$DEST/.next/static/"
|
||||
fi
|
||||
rsync -a --delete \
|
||||
--chown=1000:1000 \
|
||||
--exclude '.pnpm-store' --exclude '.pnpm-store/**' \
|
||||
--exclude '.next/static' \
|
||||
"$EFFECTIVE_APP_DIR"/ "$DEST"/
|
||||
echo "Published build to: $DEST"
|
||||
|
||||
- name: Swarm deploy (stack.yml)
|
||||
run: |
|
||||
APP_DIR_STABLE="$HOME/apps/sistema"
|
||||
if [ ! -d "$APP_DIR_STABLE" ]; then
|
||||
echo "ERROR: Stable APP_DIR does not exist: $APP_DIR_STABLE" >&2; exit 1
|
||||
fi
|
||||
cd "$APP_DIR_STABLE"
|
||||
set -o allexport
|
||||
if [ -f .env ]; then
|
||||
echo "Loading .env from $APP_DIR_STABLE"
|
||||
. ./.env
|
||||
else
|
||||
echo "WARNING: No .env found at $APP_DIR_STABLE - stack vars may be empty!"
|
||||
fi
|
||||
set +o allexport
|
||||
echo "Using APP_DIR (stable)=$APP_DIR_STABLE"
|
||||
echo "NEXT_PUBLIC_CONVEX_URL=${NEXT_PUBLIC_CONVEX_URL:-<not set>}"
|
||||
echo "NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-<not set>}"
|
||||
APP_DIR="$APP_DIR_STABLE" RELEASE_SHA=${{ github.sha }} docker stack deploy --with-registry-auth -c stack.yml sistema
|
||||
|
||||
- name: Wait for services to be healthy
|
||||
run: |
|
||||
echo "Aguardando servicos ficarem saudaveis..."
|
||||
for i in $(seq 1 18); do
|
||||
WEB_STATUS=$(docker service ls --filter "name=sistema_web" --format "{{.Replicas}}" 2>/dev/null || echo "0/0")
|
||||
CONVEX_STATUS=$(docker service ls --filter "name=sistema_convex_backend" --format "{{.Replicas}}" 2>/dev/null || echo "0/0")
|
||||
echo "Tentativa $i/18: web=$WEB_STATUS convex=$CONVEX_STATUS"
|
||||
if echo "$WEB_STATUS" | grep -q "2/2" && echo "$CONVEX_STATUS" | grep -q "1/1"; then
|
||||
echo "Todos os servicos estao saudaveis!"
|
||||
exit 0
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
echo "AVISO: Timeout aguardando servicos. Status atual:"
|
||||
docker service ls --filter "label=com.docker.stack.namespace=sistema"
|
||||
|
||||
- name: Smoke test - register + heartbeat
|
||||
run: |
|
||||
set -e
|
||||
if [ "${RUN_MACHINE_SMOKE:-false}" != "true" ]; then
|
||||
echo "RUN_MACHINE_SMOKE != true - pulando smoke test"; exit 0
|
||||
fi
|
||||
if [ -f /srv/apps/sistema/.env ]; then
|
||||
set -o allexport
|
||||
. /srv/apps/sistema/.env
|
||||
set +o allexport
|
||||
fi
|
||||
if [ -z "${MACHINE_PROVISIONING_SECRET:-}" ]; then
|
||||
echo "MACHINE_PROVISIONING_SECRET ausente - pulando smoke test"; exit 0
|
||||
fi
|
||||
HOSTNAME_TEST="ci-smoke-$(date +%s)"
|
||||
BODY='{"provisioningSecret":"'"$MACHINE_PROVISIONING_SECRET"'","tenantId":"tenant-atlas","hostname":"'"$HOSTNAME_TEST"'","os":{"name":"Linux","version":"6.1.0","architecture":"x86_64"},"macAddresses":["AA:BB:CC:DD:EE:FF"],"serialNumbers":[],"metadata":{"inventory":{"cpu":"i7","ramGb":16}},"registeredBy":"ci-smoke"}'
|
||||
HTTP=$(curl -sS -o resp.json -w "%{http_code}" -H 'Content-Type: application/json' -d "$BODY" https://tickets.esdrasrenan.com.br/api/machines/register || true)
|
||||
echo "Register HTTP=$HTTP"
|
||||
if [ "$HTTP" != "201" ]; then
|
||||
echo "Register failed:"; tail -c 600 resp.json || true; exit 1; fi
|
||||
TOKEN=$(node -e 'try{const j=require("fs").readFileSync("resp.json","utf8");process.stdout.write(JSON.parse(j).machineToken||"");}catch(e){process.stdout.write("")}' )
|
||||
if [ -z "$TOKEN" ]; then echo "Missing token in register response"; exit 1; fi
|
||||
HB=$(curl -sS -o /dev/null -w "%{http_code}" -H 'Content-Type: application/json' -d '{"machineToken":"'"$TOKEN"'","status":"online","metrics":{"cpuPct":5,"memFreePct":70}}' https://tickets.esdrasrenan.com.br/api/machines/heartbeat || true)
|
||||
echo "Heartbeat HTTP=$HB"
|
||||
if [ "$HB" != "200" ]; then echo "Heartbeat failed"; exit 1; fi
|
||||
|
||||
- name: Cleanup old build workdirs (keep last 2)
|
||||
run: |
|
||||
set -e
|
||||
ROOT="$HOME/apps"
|
||||
KEEP=2
|
||||
PATTERN='web.build.*'
|
||||
ACTIVE="$HOME/apps/sistema"
|
||||
echo "Scanning $ROOT for old $PATTERN dirs"
|
||||
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
|
||||
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
|
||||
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
|
||||
[ -z "$dir" ] && continue
|
||||
if [ -n "$ACTIVE" ] && [ "$(readlink -f "$dir")" = "$ACTIVE" ]; then
|
||||
echo "Skipping active dir (in use by APP_DIR): $dir"; continue
|
||||
fi
|
||||
echo "Removing $dir"
|
||||
chmod -R u+rwX "$dir" 2>/dev/null || true
|
||||
rm -rf "$dir" || {
|
||||
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
|
||||
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
|
||||
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
|
||||
}
|
||||
done
|
||||
echo "Disk usage (top 10 under $ROOT):"
|
||||
du -sh "$ROOT"/* 2>/dev/null | sort -rh | head -n 10 || true
|
||||
|
||||
convex_deploy:
|
||||
name: Deploy Convex functions
|
||||
needs: changes
|
||||
timeout-minutes: 20
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || needs.changes.outputs.convex == 'true' }}
|
||||
runs-on: [ self-hosted, linux, vps ]
|
||||
env:
|
||||
APP_DIR: /srv/apps/sistema
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
|
||||
- name: Determine APP_DIR (fallback safe path)
|
||||
id: appdir
|
||||
run: |
|
||||
TS=$(date +%s)
|
||||
FALLBACK_DIR="$HOME/apps/convex.build.$TS"
|
||||
mkdir -p "$FALLBACK_DIR"
|
||||
echo "Using APP_DIR (fallback)=$FALLBACK_DIR"
|
||||
echo "EFFECTIVE_APP_DIR=$FALLBACK_DIR" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Sync workspace to APP_DIR (preserving local env)
|
||||
run: |
|
||||
mkdir -p "$EFFECTIVE_APP_DIR"
|
||||
RSYNC_FLAGS="-az --inplace --no-times --no-perms --no-owner --no-group --delete"
|
||||
rsync $RSYNC_FLAGS \
|
||||
--filter='protect .next.old*' \
|
||||
--exclude '.next.old*' \
|
||||
--exclude '.env*' \
|
||||
--exclude 'apps/desktop/.env*' \
|
||||
--exclude 'convex/.env*' \
|
||||
--filter='protect node_modules' \
|
||||
--filter='protect node_modules/**' \
|
||||
--filter='protect .pnpm-store' \
|
||||
--filter='protect .pnpm-store/**' \
|
||||
--exclude '.git' \
|
||||
--exclude '.next' \
|
||||
--exclude 'node_modules' \
|
||||
--exclude 'node_modules/**' \
|
||||
--exclude '.pnpm-store' \
|
||||
--exclude '.pnpm-store/**' \
|
||||
./ "$EFFECTIVE_APP_DIR"/
|
||||
|
||||
- name: Acquire Convex admin key
|
||||
id: key
|
||||
run: |
|
||||
echo "Waiting for Convex container..."
|
||||
CID=""
|
||||
for attempt in $(seq 1 12); do
|
||||
CID=$(docker ps --format '{{.ID}} {{.Names}}' | awk '/sistema_convex_backend/{print $1; exit}')
|
||||
if [ -n "$CID" ]; then
|
||||
echo "Convex container ready (CID=$CID)"
|
||||
break
|
||||
fi
|
||||
echo "Attempt $attempt/12: container not ready yet; waiting 5s..."
|
||||
sleep 5
|
||||
done
|
||||
CONVEX_IMAGE="ghcr.io/get-convex/convex-backend:latest"
|
||||
if [ -n "$CID" ]; then
|
||||
KEY=$(docker exec -i "$CID" /bin/sh -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
|
||||
else
|
||||
echo "No running convex container detected; attempting offline admin key extraction..."
|
||||
VOLUME="sistema_convex_data"
|
||||
if docker volume inspect "$VOLUME" >/dev/null 2>&1; then
|
||||
KEY=$(docker run --rm --entrypoint /bin/sh -v "$VOLUME":/convex/data "$CONVEX_IMAGE" -lc './generate_admin_key.sh' | tr -d '\r' | grep -o 'convex-self-hosted|[^ ]*' | tail -n1)
|
||||
else
|
||||
echo "Volume $VOLUME nao encontrado; nao foi possivel extrair a chave admin"
|
||||
fi
|
||||
fi
|
||||
echo "ADMIN_KEY=$KEY" >> $GITHUB_OUTPUT
|
||||
echo "Admin key acquired? $([ -n "$KEY" ] && echo yes || echo no)"
|
||||
if [ -z "$KEY" ]; then
|
||||
echo "ERRO: Nao foi possivel obter a chave admin do Convex"
|
||||
docker service ps sistema_convex_backend || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Bring convex.json from live app if present
|
||||
run: |
|
||||
if [ -f "$APP_DIR/convex.json" ]; then
|
||||
echo "Copying $APP_DIR/convex.json -> $EFFECTIVE_APP_DIR/convex.json"
|
||||
cp -f "$APP_DIR/convex.json" "$EFFECTIVE_APP_DIR/convex.json"
|
||||
else
|
||||
echo "No existing convex.json found at $APP_DIR; convex CLI will need self-hosted vars"
|
||||
fi
|
||||
|
||||
- name: Set Convex env vars (self-hosted)
|
||||
env:
|
||||
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
|
||||
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
|
||||
MACHINE_PROVISIONING_SECRET: ${{ secrets.MACHINE_PROVISIONING_SECRET }}
|
||||
MACHINE_TOKEN_TTL_MS: ${{ secrets.MACHINE_TOKEN_TTL_MS }}
|
||||
FLEET_SYNC_SECRET: ${{ secrets.FLEET_SYNC_SECRET }}
|
||||
run: |
|
||||
set -e
|
||||
docker run --rm -i \
|
||||
-v "$EFFECTIVE_APP_DIR":/app \
|
||||
-w /app \
|
||||
-e CONVEX_SELF_HOSTED_URL \
|
||||
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
|
||||
-e MACHINE_PROVISIONING_SECRET \
|
||||
-e MACHINE_TOKEN_TTL_MS \
|
||||
-e FLEET_SYNC_SECRET \
|
||||
-e CONVEX_TMPDIR=/app/.convex-tmp \
|
||||
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; export CONVEX_TMPDIR=/app/.convex-tmp; bun install --frozen-lockfile; \
|
||||
if [ -n \"$MACHINE_PROVISIONING_SECRET\" ]; then bunx convex env set MACHINE_PROVISIONING_SECRET \"$MACHINE_PROVISIONING_SECRET\"; fi; \
|
||||
if [ -n \"$MACHINE_TOKEN_TTL_MS\" ]; then bunx convex env set MACHINE_TOKEN_TTL_MS \"$MACHINE_TOKEN_TTL_MS\"; fi; \
|
||||
if [ -n \"$FLEET_SYNC_SECRET\" ]; then bunx convex env set FLEET_SYNC_SECRET \"$FLEET_SYNC_SECRET\"; fi; \
|
||||
bunx convex env list"
|
||||
|
||||
- name: Prepare Convex deploy workspace
|
||||
run: |
|
||||
cd "$EFFECTIVE_APP_DIR"
|
||||
if [ -f .env ]; then
|
||||
echo "Renaming .env -> .env.bak (Convex self-hosted deploy)"
|
||||
mv -f .env .env.bak
|
||||
fi
|
||||
mkdir -p .convex-tmp
|
||||
|
||||
- name: Deploy functions to Convex self-hosted
|
||||
env:
|
||||
CONVEX_SELF_HOSTED_URL: https://convex.esdrasrenan.com.br
|
||||
CONVEX_SELF_HOSTED_ADMIN_KEY: ${{ steps.key.outputs.ADMIN_KEY }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-v "$EFFECTIVE_APP_DIR":/app \
|
||||
-w /app \
|
||||
-e CI=true \
|
||||
-e CONVEX_SELF_HOSTED_URL \
|
||||
-e CONVEX_SELF_HOSTED_ADMIN_KEY \
|
||||
-e CONVEX_TMPDIR=/app/.convex-tmp \
|
||||
node:20-bullseye bash -lc "set -euo pipefail; curl -fsSL https://bun.sh/install | bash >/tmp/bun-install.log; export BUN_INSTALL=\"\${BUN_INSTALL:-/root/.bun}\"; export PATH=\"\$BUN_INSTALL/bin:\$PATH\"; export CONVEX_TMPDIR=/app/.convex-tmp; bun install --frozen-lockfile; bunx convex deploy"
|
||||
|
||||
- name: Cleanup old convex build workdirs (keep last 2)
|
||||
run: |
|
||||
set -e
|
||||
ROOT="$HOME/apps"
|
||||
KEEP=2
|
||||
PATTERN='convex.build.*'
|
||||
LIST=$(find "$ROOT" -maxdepth 1 -type d -name "$PATTERN" | sort -r || true)
|
||||
echo "$LIST" | sed -n "1,${KEEP}p" | sed 's/^/Keeping: /' || true
|
||||
echo "$LIST" | sed "1,${KEEP}d" | while read dir; do
|
||||
[ -z "$dir" ] && continue
|
||||
echo "Removing $dir"
|
||||
chmod -R u+rwX "$dir" 2>/dev/null || true
|
||||
rm -rf "$dir" || {
|
||||
echo "Local rm failed, falling back to docker (root) cleanup for $dir..."
|
||||
docker run --rm -v "$dir":/target alpine:3 sh -lc 'chown -R 1000:1000 /target 2>/dev/null || true; chmod -R u+rwX /target 2>/dev/null || true; rm -rf /target/* /target/.[!.]* /target/..?* 2>/dev/null || true' || true
|
||||
rm -rf "$dir" 2>/dev/null || rmdir "$dir" 2>/dev/null || true
|
||||
}
|
||||
done
|
||||
|
||||
desktop_release:
|
||||
name: Desktop Release (Windows)
|
||||
timeout-minutes: 30
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
runs-on: [ self-hosted, windows, desktop ]
|
||||
defaults:
|
||||
run:
|
||||
working-directory: apps/desktop
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: https://github.com/pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10.20.0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Install deps (desktop)
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build with Tauri
|
||||
uses: https://github.com/tauri-apps/tauri-action@v0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
|
||||
TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
|
||||
with:
|
||||
projectPath: apps/desktop
|
||||
|
||||
- name: Upload bundles to VPS
|
||||
run: |
|
||||
# Upload via SCP (configurar chave SSH no runner Windows)
|
||||
# scp -r src-tauri/target/release/bundle/* user@vps:/var/www/updates/
|
||||
echo "TODO: Configurar upload para VPS"
|
||||
62
.forgejo/workflows/quality-checks.yml
Normal file
62
.forgejo/workflows/quality-checks.yml
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
name: Quality Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
lint-test-build:
|
||||
name: Lint, Test and Build
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BETTER_AUTH_SECRET: test-secret
|
||||
NEXT_PUBLIC_APP_URL: http://localhost:3000
|
||||
BETTER_AUTH_URL: http://localhost:3000
|
||||
NEXT_PUBLIC_CONVEX_URL: http://localhost:3210
|
||||
DATABASE_URL: file:./prisma/db.dev.sqlite
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup Bun
|
||||
uses: https://github.com/oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: 1.3.1
|
||||
|
||||
- name: Verify Bun
|
||||
run: bun --version
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
- name: Cache Next.js build cache
|
||||
uses: https://github.com/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ github.workspace }}/.next/cache
|
||||
key: ${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-${{ hashFiles('**/*.{js,jsx,ts,tsx}') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-nextjs-${{ hashFiles('pnpm-lock.yaml', 'bun.lock') }}-
|
||||
|
||||
- name: Generate Prisma client
|
||||
env:
|
||||
PRISMA_ENGINES_CHECKSUM_IGNORE_MISSING: "1"
|
||||
run: bun run prisma:generate
|
||||
|
||||
- name: Lint
|
||||
run: bun run lint
|
||||
|
||||
- name: Test
|
||||
run: bun test
|
||||
|
||||
- name: Build
|
||||
run: bun run build:bun
|
||||
244
docs/FORGEJO-CI-CD.md
Normal file
244
docs/FORGEJO-CI-CD.md
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
# Forgejo CI/CD - Documentacao
|
||||
|
||||
Este documento descreve a configuracao do Forgejo como alternativa ao GitHub Actions para CI/CD self-hosted.
|
||||
|
||||
## Por que Forgejo?
|
||||
|
||||
A partir de marco de 2026, o GitHub passara a cobrar $0.002 por minuto de execucao em self-hosted runners. O Forgejo Actions oferece a mesma experiencia visual e funcionalidade sem custo adicional.
|
||||
|
||||
## Arquitetura
|
||||
|
||||
```
|
||||
Claude Code / VS Code
|
||||
|
|
||||
Git local
|
||||
|
|
||||
GitHub (repo principal) <---> Forgejo (mirror/CI)
|
||||
|
|
||||
Forgejo Actions
|
||||
|
|
||||
Forgejo Runner (VPS)
|
||||
|
|
||||
Docker Swarm deploy
|
||||
```
|
||||
|
||||
## URLs e Credenciais
|
||||
|
||||
| Servico | URL | Usuario |
|
||||
|---------|-----|---------|
|
||||
| Forgejo UI | https://git.esdrasrenan.com.br | esdras |
|
||||
| Forgejo SSH | git@git.esdrasrenan.com.br:2222 | - |
|
||||
| Actions | https://git.esdrasrenan.com.br/esdras/sistema-de-chamados/actions | - |
|
||||
|
||||
**Senha inicial:** `ForgejoAdmin2025!` (altere apos primeiro acesso)
|
||||
|
||||
## Estrutura de Arquivos
|
||||
|
||||
```
|
||||
projeto/
|
||||
├── .forgejo/
|
||||
│ └── workflows/
|
||||
│ ├── ci-cd-web-desktop.yml # Deploy principal (VPS + Convex)
|
||||
│ └── quality-checks.yml # Lint, test, build
|
||||
├── .github/
|
||||
│ └── workflows/ # Workflows originais do GitHub
|
||||
│ └── ...
|
||||
└── forgejo/
|
||||
├── stack.yml # Stack Docker do Forgejo
|
||||
└── setup-runner.sh # Script de setup do runner
|
||||
```
|
||||
|
||||
## Configuracao na VPS
|
||||
|
||||
### Forgejo Server
|
||||
|
||||
Rodando como servico Docker Swarm:
|
||||
|
||||
```bash
|
||||
# Localização do stack
|
||||
/srv/forgejo/stack.yml
|
||||
|
||||
# Comandos uteis
|
||||
docker service ls --filter "name=forgejo"
|
||||
docker service logs forgejo_forgejo --tail 100
|
||||
docker stack deploy -c /srv/forgejo/stack.yml forgejo
|
||||
```
|
||||
|
||||
### Forgejo Runner
|
||||
|
||||
Rodando como servico systemd:
|
||||
|
||||
```bash
|
||||
# Localização
|
||||
/srv/forgejo-runner/
|
||||
|
||||
# Arquivos
|
||||
/srv/forgejo-runner/forgejo-runner # Binario
|
||||
/srv/forgejo-runner/config.yaml # Configuracao
|
||||
/srv/forgejo-runner/.runner # Registro
|
||||
|
||||
# Comandos uteis
|
||||
systemctl status forgejo-runner
|
||||
systemctl restart forgejo-runner
|
||||
journalctl -u forgejo-runner -f
|
||||
|
||||
# Labels do runner
|
||||
- ubuntu-latest:docker://node:20-bookworm
|
||||
- self-hosted:host
|
||||
- linux:host
|
||||
- vps:host
|
||||
```
|
||||
|
||||
## Fluxo de Trabalho
|
||||
|
||||
### Opcao 1: Push para ambos (recomendado)
|
||||
|
||||
Configure o git para fazer push automatico para GitHub e Forgejo:
|
||||
|
||||
```bash
|
||||
# Adicionar Forgejo como remote
|
||||
git remote add forgejo https://git.esdrasrenan.com.br/esdras/sistema-de-chamados.git
|
||||
|
||||
# OU configurar push para ambos no origin
|
||||
git remote set-url --add --push origin https://git.esdrasrenan.com.br/esdras/sistema-de-chamados.git
|
||||
git remote set-url --add --push origin https://github.com/esdrasrenan/sistema-de-chamados.git
|
||||
```
|
||||
|
||||
### Opcao 2: Mirror automatico
|
||||
|
||||
Configure o Forgejo para sincronizar automaticamente com o GitHub (requer token do GitHub para repos privados).
|
||||
|
||||
## Workflows Disponiveis
|
||||
|
||||
### ci-cd-web-desktop.yml
|
||||
|
||||
Triggers:
|
||||
- Push na branch `main`
|
||||
- Tags `v*.*.*`
|
||||
- workflow_dispatch (manual)
|
||||
|
||||
Jobs:
|
||||
1. **changes** - Detecta arquivos alterados
|
||||
2. **deploy** - Deploy na VPS (Next.js + Docker Swarm)
|
||||
3. **convex_deploy** - Deploy das functions Convex
|
||||
4. **desktop_release** - Build do app desktop (tags `v*`)
|
||||
|
||||
### quality-checks.yml
|
||||
|
||||
Triggers:
|
||||
- Push na branch `main`
|
||||
- Pull requests para `main`
|
||||
|
||||
Jobs:
|
||||
1. **lint-test-build** - Lint, testes e build
|
||||
|
||||
## Diferenca do GitHub Actions
|
||||
|
||||
Os workflows do Forgejo sao quase identicos aos do GitHub Actions. Principais diferencas:
|
||||
|
||||
1. **Localizacao:** `.forgejo/workflows/` em vez de `.github/workflows/`
|
||||
2. **Actions URL:** Usar `https://github.com/` prefixo nas actions
|
||||
```yaml
|
||||
# GitHub Actions
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Forgejo Actions
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
```
|
||||
|
||||
3. **Secrets:** Configurar em Settings > Actions > Secrets no Forgejo
|
||||
|
||||
## Manutencao
|
||||
|
||||
### Atualizar Forgejo
|
||||
|
||||
```bash
|
||||
ssh root@154.12.253.40
|
||||
cd /srv/forgejo
|
||||
# Editar stack.yml para nova versao da imagem
|
||||
docker stack deploy -c stack.yml forgejo
|
||||
```
|
||||
|
||||
### Atualizar Runner
|
||||
|
||||
```bash
|
||||
ssh root@154.12.253.40
|
||||
cd /srv/forgejo-runner
|
||||
systemctl stop forgejo-runner
|
||||
|
||||
# Baixar nova versao
|
||||
RUNNER_VERSION="6.2.2" # ajustar versao
|
||||
curl -sL -o forgejo-runner "https://code.forgejo.org/forgejo/runner/releases/download/v${RUNNER_VERSION}/forgejo-runner-${RUNNER_VERSION}-linux-amd64"
|
||||
chmod +x forgejo-runner
|
||||
|
||||
systemctl start forgejo-runner
|
||||
```
|
||||
|
||||
### Re-registrar Runner
|
||||
|
||||
Se o runner perder a conexao:
|
||||
|
||||
```bash
|
||||
ssh root@154.12.253.40
|
||||
cd /srv/forgejo-runner
|
||||
|
||||
# Gerar novo token no Forgejo
|
||||
docker exec -u 1000:1000 $(docker ps -q --filter "name=forgejo_forgejo") \
|
||||
/usr/local/bin/gitea --config /data/gitea/conf/app.ini actions generate-runner-token
|
||||
|
||||
# Re-registrar
|
||||
systemctl stop forgejo-runner
|
||||
rm .runner
|
||||
./forgejo-runner register \
|
||||
--instance https://git.esdrasrenan.com.br \
|
||||
--token "NOVO_TOKEN" \
|
||||
--name "vps-runner" \
|
||||
--labels "ubuntu-latest:docker://node:20-bookworm,self-hosted:host,linux:host,vps:host" \
|
||||
--no-interactive
|
||||
systemctl start forgejo-runner
|
||||
```
|
||||
|
||||
### Backup
|
||||
|
||||
```bash
|
||||
# Backup do volume do Forgejo
|
||||
docker run --rm -v forgejo_forgejo_data:/data -v /backup:/backup alpine \
|
||||
tar czf /backup/forgejo-backup-$(date +%Y%m%d).tar.gz /data
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Runner nao aparece online
|
||||
|
||||
```bash
|
||||
# Verificar status
|
||||
systemctl status forgejo-runner
|
||||
journalctl -u forgejo-runner --no-pager -n 50
|
||||
|
||||
# Verificar conectividade
|
||||
curl -s https://git.esdrasrenan.com.br/api/healthz
|
||||
```
|
||||
|
||||
### Workflow nao dispara
|
||||
|
||||
1. Verificar se o arquivo esta em `.forgejo/workflows/`
|
||||
2. Verificar se Actions esta habilitado no repositorio (Settings > Actions)
|
||||
3. Verificar se o runner esta online (Settings > Actions > Runners)
|
||||
|
||||
### Erro de permissao no deploy
|
||||
|
||||
O runner precisa de acesso ao Docker:
|
||||
|
||||
```bash
|
||||
# Verificar grupo docker
|
||||
groups runner
|
||||
# Adicionar se necessario
|
||||
usermod -aG docker runner
|
||||
systemctl restart forgejo-runner
|
||||
```
|
||||
|
||||
## Referencias
|
||||
|
||||
- [Forgejo Documentation](https://forgejo.org/docs/)
|
||||
- [Forgejo Actions](https://forgejo.org/docs/latest/user/actions/)
|
||||
- [Forgejo Runner](https://code.forgejo.org/forgejo/runner)
|
||||
113
forgejo/setup-runner.sh
Normal file
113
forgejo/setup-runner.sh
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
#!/bin/bash
|
||||
# Script para configurar o Forgejo Runner
|
||||
# Execute na VPS apos o Forgejo estar rodando
|
||||
|
||||
set -e
|
||||
|
||||
FORGEJO_URL="${FORGEJO_URL:-https://git.esdrasrenan.com.br}"
|
||||
RUNNER_NAME="${RUNNER_NAME:-vps-runner}"
|
||||
RUNNER_DIR="/srv/forgejo-runner"
|
||||
CONFIG_FILE="$RUNNER_DIR/config.yml"
|
||||
|
||||
echo "=== Configuracao do Forgejo Runner ==="
|
||||
echo ""
|
||||
echo "1. Acesse o Forgejo: $FORGEJO_URL"
|
||||
echo "2. Va em: Site Administration > Actions > Runners"
|
||||
echo "3. Clique em 'Create new Runner'"
|
||||
echo "4. Copie o token de registro"
|
||||
echo ""
|
||||
read -p "Cole o token de registro aqui: " REGISTRATION_TOKEN
|
||||
|
||||
if [ -z "$REGISTRATION_TOKEN" ]; then
|
||||
echo "ERRO: Token nao pode ser vazio"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Criar diretorio do runner
|
||||
mkdir -p "$RUNNER_DIR"
|
||||
cd "$RUNNER_DIR"
|
||||
|
||||
# Baixar o runner se nao existir
|
||||
if [ ! -f "./forgejo-runner" ]; then
|
||||
echo "Baixando Forgejo Runner..."
|
||||
RUNNER_VERSION="6.2.2"
|
||||
curl -L -o forgejo-runner "https://code.forgejo.org/forgejo/runner/releases/download/v${RUNNER_VERSION}/forgejo-runner-${RUNNER_VERSION}-linux-amd64"
|
||||
chmod +x forgejo-runner
|
||||
fi
|
||||
|
||||
# Registrar o runner
|
||||
echo "Registrando runner..."
|
||||
./forgejo-runner register \
|
||||
--instance "$FORGEJO_URL" \
|
||||
--token "$REGISTRATION_TOKEN" \
|
||||
--name "$RUNNER_NAME" \
|
||||
--labels "ubuntu-latest:docker://node:20-bookworm,self-hosted:host,linux:host,vps:host" \
|
||||
--no-interactive
|
||||
|
||||
# Criar config.yml customizado
|
||||
cat > "$CONFIG_FILE" << 'EOF'
|
||||
log:
|
||||
level: info
|
||||
|
||||
runner:
|
||||
file: .runner
|
||||
capacity: 2
|
||||
timeout: 3h
|
||||
insecure: false
|
||||
fetch_timeout: 5s
|
||||
fetch_interval: 2s
|
||||
labels:
|
||||
- "ubuntu-latest:docker://node:20-bookworm"
|
||||
- "self-hosted:host"
|
||||
- "linux:host"
|
||||
- "vps:host"
|
||||
|
||||
cache:
|
||||
enabled: true
|
||||
dir: /tmp/forgejo-runner-cache
|
||||
host: ""
|
||||
port: 0
|
||||
external_server: ""
|
||||
|
||||
container:
|
||||
network: "host"
|
||||
privileged: false
|
||||
options: ""
|
||||
workdir_parent: /tmp/forgejo-runner-workdir
|
||||
valid_volumes:
|
||||
- /var/run/docker.sock
|
||||
- /home/runner/apps
|
||||
- /srv/apps
|
||||
- /tmp
|
||||
docker_host: ""
|
||||
force_pull: false
|
||||
|
||||
host:
|
||||
workdir_parent: /tmp/forgejo-runner-workdir
|
||||
EOF
|
||||
|
||||
echo ""
|
||||
echo "=== Runner registrado com sucesso! ==="
|
||||
echo ""
|
||||
echo "Para iniciar o runner como servico systemd, execute:"
|
||||
echo ""
|
||||
echo "sudo tee /etc/systemd/system/forgejo-runner.service << 'SYSTEMD'
|
||||
[Unit]
|
||||
Description=Forgejo Runner
|
||||
After=docker.service network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=runner
|
||||
WorkingDirectory=$RUNNER_DIR
|
||||
ExecStart=$RUNNER_DIR/forgejo-runner daemon --config $CONFIG_FILE
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
SYSTEMD"
|
||||
echo ""
|
||||
echo "sudo systemctl daemon-reload"
|
||||
echo "sudo systemctl enable forgejo-runner"
|
||||
echo "sudo systemctl start forgejo-runner"
|
||||
86
forgejo/stack.yml
Normal file
86
forgejo/stack.yml
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
version: "3.8"
|
||||
|
||||
# Forgejo para CI/CD self-hosted
|
||||
# Substitui o GitHub Actions sem perder a experiencia visual
|
||||
# NOTA: O runner roda como servico systemd, nao como container no Swarm
|
||||
|
||||
services:
|
||||
forgejo:
|
||||
image: codeberg.org/forgejo/forgejo:11
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
# Configuracoes do Forgejo
|
||||
- FORGEJO__database__DB_TYPE=sqlite3
|
||||
- FORGEJO__database__PATH=/data/gitea/forgejo.db
|
||||
- FORGEJO__server__DOMAIN=git.esdrasrenan.com.br
|
||||
- FORGEJO__server__ROOT_URL=https://git.esdrasrenan.com.br/
|
||||
- FORGEJO__server__SSH_DOMAIN=git.esdrasrenan.com.br
|
||||
- FORGEJO__server__SSH_PORT=2222
|
||||
- FORGEJO__server__HTTP_PORT=3000
|
||||
- FORGEJO__server__OFFLINE_MODE=false
|
||||
# Actions habilitado
|
||||
- FORGEJO__actions__ENABLED=true
|
||||
- FORGEJO__actions__DEFAULT_ACTIONS_URL=https://code.forgejo.org
|
||||
# Seguranca - INSTALL_LOCK=true apos instalacao inicial
|
||||
- FORGEJO__security__INSTALL_LOCK=true
|
||||
- FORGEJO__service__DISABLE_REGISTRATION=true
|
||||
# Logs
|
||||
- FORGEJO__log__MODE=console
|
||||
- FORGEJO__log__LEVEL=Info
|
||||
volumes:
|
||||
- forgejo_data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
networks:
|
||||
- traefik_public
|
||||
- forgejo_internal
|
||||
ports:
|
||||
# SSH para git clone via SSH (exposto diretamente)
|
||||
- "2222:2222"
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
update_config:
|
||||
parallelism: 1
|
||||
order: start-first
|
||||
failure_action: rollback
|
||||
delay: 10s
|
||||
monitor: 30s
|
||||
resources:
|
||||
limits:
|
||||
memory: "1G"
|
||||
reservations:
|
||||
memory: "256M"
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.docker.network=traefik_public
|
||||
# Web UI
|
||||
- traefik.http.routers.forgejo.rule=Host(`git.esdrasrenan.com.br`)
|
||||
- traefik.http.routers.forgejo.entrypoints=websecure
|
||||
- traefik.http.routers.forgejo.tls=true
|
||||
- traefik.http.routers.forgejo.tls.certresolver=le
|
||||
- traefik.http.services.forgejo.loadbalancer.server.port=3000
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-fsSL", "http://localhost:3000/api/healthz"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
|
||||
volumes:
|
||||
forgejo_data:
|
||||
|
||||
networks:
|
||||
traefik_public:
|
||||
external: true
|
||||
forgejo_internal:
|
||||
driver: overlay
|
||||
Loading…
Add table
Add a link
Reference in a new issue