diff --git a/.gitea/workflows/picsur.yml b/.gitea/workflows/picsur.yml new file mode 100644 index 0000000..bf8cc7e --- /dev/null +++ b/.gitea/workflows/picsur.yml @@ -0,0 +1,100 @@ +name: Picsur Deploy + +on: + pull_request: + paths: + - 'services/dockerino/picsur/**' + push: + branches: + - main + paths: + - 'services/dockerino/picsur/**' + +env: + DOCKERINO_HOST: 10.0.0.50 + DOCKERINO_USER: root + PICSUR_PATH: /root/dockerino/picsur + +jobs: + # ============================================ + # JOB 1: VALIDATE (roda em todo PR e Push) + # ============================================ + validate: + name: Validate Picsur Compose + runs-on: gitea-runner-hestia + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Validate docker-compose syntax + run: | + cd services/dockerino/picsur + docker compose config --quiet + echo "✅ docker-compose.yml syntax is valid" + + - name: Pull image (dry-run) + run: | + cd services/dockerino/picsur + docker compose pull --quiet + echo "✅ Docker image pull successful" + + - name: Show compose file + run: | + echo "📄 docker-compose.yml content:" + cat services/dockerino/picsur/docker-compose.yml + + # ============================================ + # JOB 2: DEPLOY (só roda no push to main) + # ============================================ + deploy: + name: Deploy Picsur to Dockerino + needs: validate + runs-on: gitea-runner-hestia + if: github.event_name == 'push' && github.ref == 'refs/heads/main' + environment: production + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup SSH Key + uses: webfactory/ssh-agent@v0.8.0 + with: + ssh-private-key: ${{ secrets.DOCKERINO_SSH_KEY }} + + - name: Add Dockerino to known hosts + run: | + ssh-keyscan -H ${{ env.DOCKERINO_HOST }} >> ~/.ssh/known_hosts + + - name: Create Picsur directory + run: | + ssh ${{ env.DOCKERINO_USER }}@${{ env.DOCKERINO_HOST }} \ + "mkdir -p ${{ env.PICSUR_PATH }}/data" + + - name: Sync docker-compose.yml to Dockerino + run: | + rsync -avz --progress \ + services/dockerino/picsur/ \ + ${{ env.DOCKERINO_USER }}@${{ env.DOCKERINO_HOST }}:${{ env.PICSUR_PATH }}/ + + - name: Pull latest image on Dockerino + run: | + ssh ${{ env.DOCKERINO_USER }}@${{ env.DOCKERINO_HOST }} \ + "cd ${{ env.PICSUR_PATH }} && docker compose pull" + + - name: Restart Picsur service + run: | + ssh ${{ env.DOCKERINO_USER }}@${{ env.DOCKERINO_HOST }} \ + "cd ${{ env.PICSUR_PATH }} && docker compose up -d" + + - name: Verify container is running + run: | + ssh ${{ env.DOCKERINO_USER }}@${{ env.DOCKERINO_HOST }} \ + "docker ps --filter name=picsur --format '{{.Names}}: {{.Status}}'" + + - name: Report Success + run: | + echo "✅ Picsur deployed successfully!" + echo "🌐 Access: https://picsur.hackerfortress.cc" diff --git a/.gitignore b/.gitignore index 3be021d..dabda1d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,82 @@ -.runner -token.txt +# =========================================== +# Terraform +# =========================================== +*.tfstate +*.tfstate.* +*.tfplan +.terraform/ +.terraform.lock.hcl +crash.log +crash.*.log + +# Override files +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Local .terraform directories +**/.terraform/* + +# .tfvars files (may contain sensitive data) +*.tfvars +*.tfvars.json + +# =========================================== +# Ansible +# =========================================== +*.retry +ansible vault password file +.vault_pass + +# =========================================== +# Gitea Runner +# =========================================== +gitea-runner/act_runner +gitea-runner/.runner +gitea-runner/data/ + +# =========================================== +# Environment & Secrets +# =========================================== +.env +.env.* +*.pem +*.key +id_rsa* +id_ed25519* +*.secret +*.token + +# =========================================== +# OS & Editor +# =========================================== +.DS_Store +Thumbs.db +*.swp +*.swo +*~ +.idea/ +.vscode/ +*.code-workspace + +# =========================================== +# Reports & Output (não é código) +# =========================================== +reports/ +output/ +tmp/ +temp/ + +# =========================================== +# Downloads & Build artifacts +# =========================================== +*.tar +*.tar.gz +*.zip +downloads/ + +# =========================================== +# Terraform Cloud / Remote +# =========================================== +.tfrun diff --git a/gitea-runner/README.md b/gitea-runner/README.md index 83884cc..a696866 100644 --- a/gitea-runner/README.md +++ b/gitea-runner/README.md @@ -4,66 +4,96 @@ Este diretório contém a configuração do runner de Gitea Actions para executa ## Visão Geral -O **act_runner** é o agente que executa os jobs definidos nos workflows `.gitea/workflows/*.yml`. Ele roda no Hestia (10.0.0.50) usando Docker. +O **act_runner** é o agente que executa os jobs definidos nos workflows `.gitea/workflows/*.yml`. Ele roda no Hestia (10.0.0.50) como serviço systemd. ## Arquivos ``` gitea-runner/ -├── docker-compose.yml # Serviço do runner -├── config.yaml # Configuração (gerado no registro) -├── register.sh # Script de registro -└── data/ # Dados persistentes do runner +├── config.yaml # Configuração do runner +├── gitea-runner.service # Serviço systemd +├── register.sh # Script de registro +└── README.md # Este arquivo ``` +## Pré-requisitos + +- Docker instalado e acessível ao usuário +- Usuário `iamferreirajp` no grupo `docker` +- Node.js em `$HOME/.local/bin` (path do Hermes) + ## Instalação -### 1. Obter Token de Registro - -Acesse o Gitea como admin: -``` -https://gitea.hackerfortress.cc/admin/runners -``` - -Clique em **"New Runner"** e copie o token. - -### 2. Registrar o Runner +### 1. Download do act_runner ```bash cd gitea-runner -export RUNNER_TOKEN="seu-token-aqui" +curl -L https://codeberg.org/pressman/act_runner/releases/latest/download/act_runner-linux-amd64 -o act_runner +chmod +x act_runner +``` + +### 2. Obter Token de Registro + +Acesse o Gitea como admin: +``` +https://gitea.hackerfortress.cc/gaia/homelab/settings/actions/runners +``` + +Clique em **"Create new Runner"**, configure o nome e copie o token. + +### 3. Registrar o Runner + +```bash +cd ~/homelab/gitea-runner +export RUNNER_TOKEN="" ./register.sh ``` -### 3. Iniciar o Runner +O `register.sh` vai: +- Baixar o act_runner (se não existir) +- Registrar no Gitea +- Gerar `config.yaml` e `.runner` + +### 4. Instalar o Serviço systemd ```bash -docker compose up -d +sudo cp gitea-runner.service /etc/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl enable --now gitea-runner ``` -### 4. Verificar +### 5. Verificar + +```bash +sudo systemctl status gitea-runner +``` Acesse: ``` -https://gitea.hackerfortress.cc/admin/runners +https://gitea.hackerfortress.cc/gaia/homelab/settings/actions/runners ``` -O runner deve aparecer como **"Active"**. +O runner deve aparecer como **"Idle"**. -## Labels Disponíveis +## Configuração -| Label | Descrição | -|-------|-----------| -| `gitea-runner-Hestia` | Runner principal | -| `ubuntu-latest` | Container Ubuntu para jobs | +O `config.yaml` controla: +- Labels disponíveis (`ubuntu-latest`, etc.) +- Capacidade de jobs paralelos +- Docker host (unix socket) + +O PATH do serviço systemd inclui: +``` +/home/iamferreirajp/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +``` ## Troubleshooting -### Runner não aparece como active +### Runner não aparece como idle ```bash # Ver logs -docker compose logs -f act_runner +journalctl -u gitea-runner -f --no-pager # Verificar configuração cat config.yaml @@ -72,20 +102,30 @@ cat config.yaml ### Docker socket permission denied ```bash -# No host (Hestia), adicionar usuário ao grupo docker -sudo usermod -aG docker $USER +# Adicionar usuário ao grupo docker +sudo usermod -aG docker iamferreirajp +# Faça logout e login novamente ``` -### Jobs ficam em "Pending" +### Jobs falham com "Cannot find: node in PATH" -- Verificar se runner está online -- Verificar se o token está correto -- Verificar se o runner tem labels necessárias +O PATH do systemd precisa incluir `~/.local/bin`. Verifique se o `gitea-runner.service` tem: + +```systemd +Environment=PATH=/home/iamferreirajp/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +``` + +### Jobs ficam em "Waiting" + +Verificar se o runner está online no Gitea e se o workflow pede labels que o runner tem. ## Atualização ```bash -cd gitea-runner -docker compose pull -docker compose up -d +cd ~/homelab/gitea-runner +# Baixar nova versão +curl -L https://codeberg.org/pressman/act_runner/releases/latest/download/act_runner-linux-amd64 -o act_runner +chmod +x act_runner +# Reiniciar +sudo systemctl restart gitea-runner ``` diff --git a/gitea-runner/act_runner b/gitea-runner/act_runner deleted file mode 100755 index 9c2429b..0000000 Binary files a/gitea-runner/act_runner and /dev/null differ diff --git a/gitea-runner/config.yaml b/gitea-runner/config.yaml index d047374..d04a499 100644 --- a/gitea-runner/config.yaml +++ b/gitea-runner/config.yaml @@ -1,5 +1,5 @@ log: - level: info + level: debug formatting: text runner: @@ -15,4 +15,4 @@ cache: docker: host: unix:///var/run/docker.sock network: "" - privileged: false + privileged: true diff --git a/gitea-runner/docker-compose.yml b/gitea-runner/docker-compose.yml deleted file mode 100644 index 70d1cb8..0000000 --- a/gitea-runner/docker-compose.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: '3.8' - -services: - act_runner: - image: gitea/act_runner:latest - container_name: gitea-runner - restart: unless-stopped - environment: - - CONFIG_FILE=/runner/config.yaml - - INSTANCE_URL=https://gitea.hackerfortress.cc - - RUNNER_TOKEN=${RUNNER_TOKEN} - - RUNNER_NAME=gitea-runner-hestia - - RUNNER_LABELS=gitea-runner-hestia - volumes: - - ./data:/data - - ./config.yaml:/runner/config.yaml - - /var/run/docker.sock:/var/run/docker.sock - network_mode: host diff --git a/gitea-runner/gitea-runner.service b/gitea-runner/gitea-runner.service index f710867..ff86e34 100644 --- a/gitea-runner/gitea-runner.service +++ b/gitea-runner/gitea-runner.service @@ -5,8 +5,10 @@ After=network.target [Service] Type=simple User=iamferreirajp +SupplementaryGroups=docker WorkingDirectory=/home/iamferreirajp/homelab/gitea-runner ExecStart=/home/iamferreirajp/homelab/gitea-runner/act_runner daemon +Environment=PATH=/home/iamferreirajp/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin Restart=always RestartSec=5 diff --git a/gitea-runner/register.sh b/gitea-runner/register.sh index 7b011cb..2b0a367 100755 --- a/gitea-runner/register.sh +++ b/gitea-runner/register.sh @@ -3,32 +3,40 @@ # Gitea Runner Registration Script # ============================================ # Usage: -# 1. Get token from Gitea Admin > Runners -# 2. Run: RUNNER_TOKEN="your-token" ./register.sh +# 1. Get token from Gitea Settings > Actions > Runners +# 2. Run: RUNNER_TOKEN="" ./register.sh set -e -GITEA_URL="${INSTANCE_URL:-https://gitea.hackerfortress.cc}" -TOKEN="${RUNNER_TOKEN}" +GITEA_URL="https://gitea.hackerfortress.cc" +RUNNER_NAME="gitea-runner-hestia" +TOKEN="${RUNNER_TOKEN:-}" if [ -z "$TOKEN" ]; then echo "❌ RUNNER_TOKEN not set" - echo " Get token from: ${GITEA_URL}/admin/runners" + echo " Get token from: ${GITEA_URL}/gaia/homelab/settings/actions/runners" exit 1 fi -echo "📡 Registering runner with Gitea at ${GITEA_URL}..." +# Download act_runner if not present +if [ ! -f "./act_runner" ]; then + echo "📥 Downloading act_runner..." + curl -L https://codeberg.org/pressman/act_runner/releases/latest/download/act_runner-linux-amd64 -o act_runner + chmod +x act_runner +fi -# Register and get the runner config -docker compose run --rm act_runner \ - act_runner generate-config \ +echo "📡 Registering runner '${RUNNER_NAME}' with Gitea at ${GITEA_URL}..." + +./act_runner register \ --instance "${GITEA_URL}" \ --token "${TOKEN}" \ - --name "gitea-runner-dockerino" \ - > config.yaml + --name "${RUNNER_NAME}" \ + --no-interactive +echo "" echo "✅ Runner registered successfully!" echo "" echo "Next steps:" echo "1. Review config.yaml" -echo "2. Run: docker compose up -d" +echo "2. Install service: sudo cp gitea-runner.service /etc/systemd/system/" +echo "3. Enable: sudo systemctl enable --now gitea-runner" diff --git a/inventory/README.md b/inventory/README.md new file mode 100644 index 0000000..e095c3d --- /dev/null +++ b/inventory/README.md @@ -0,0 +1,113 @@ +# Inventory — Single Source of Truth + +Este diretório contém o inventário completo do homelab em arquivos YAML. + +## Arquivos + +| Arquivo | Conteúdo | +|---------|----------| +| `physical.yaml` | Máquinas físicas — hardware, MACs, IPs, localização | +| `proxmox.yaml` | VMs e LXC containers no Proxmox | +| `truenas.yaml` | Discos, pools ZFS, datasets, shares | +| `network.yaml` | VLANs, subnets, DNS, DHCP | + +## Princípio + +> **Sempre atualize o inventory PRIMEIRO**, antes de fazer qualquer mudança na infraestrutura real. + +Exemplo: +1. Você quer trocar o IP do Dockerino de 10.0.0.50 pra 10.0.0.51 +2. Edita `inventory/proxmox.yaml` → muda o IP do dockerino +3. Terraform/Ansible pegam o novo IP e aplicam +4. PR mostra: "Dockerino IP: 10.0.0.50 → 10.0.0.51" +5. Após merge, a mudança é aplicada automaticamente + +## Como preencher com dados reais + +### 1. Hestia (esta máquina) + +```bash +# IP e MAC +ip link show +hostname -I + +# CPU e RAM +lscpu | grep -E "^Model name|^CPU\(s\)" +free -h + +# Discos +lsblk -d -o NAME,SIZE,TYPE | grep disk +``` + +### 2. Proxmox + +```bash +# SSH para o Proxmox +ssh root@ + +# Listar VMs e containers +pvesh get /cluster/resources + +# Listar disks +pvesh get /cluster/disks/list + +# Detalhes de uma VM específica +pvesh get /qemu//config +pvesh get /lxc//config +``` + +### 3. TrueNAS + +```bash +# SSH para TrueNAS +ssh root@ + +# Status dos pools +zpool status -v + +# Datasets +zfs list -o name,mountpoint,used + +# Exports NFS +cat /etc/exports + +# Discos +lsblk -d -o NAME,SIZE,TYPE,ROTA | grep disk +smartctl -a /dev/sdX +``` + +### 4. ER605 (Router) + +Acesse a UI do Omada Controller (provavelmente em https://10.0.0.50:8043) e consulte: +- LAN Settings → DHCP +- VLANs +- Port Forwards + +## Formato dos arquivos + +Todos os arquivos usam YAML. Campos com `PLACEHOLDER` precisam ser preenchidos com dados reais. + +## Validação + +```bash +# Instalar yq se necessário +sudo apt install yq + +# Validar syntax do YAML +yq eval '.' inventory/physical.yaml + +# Extrair IPs de todas as máquinas +yq eval '.physical_hosts | to_entries | .[].value.network.ip' inventory/physical.yaml +``` + +## Ordem de leitura para Terraform/Ansible + +``` +physical.yaml (camada 0 — fatos) + ↓ +proxmox.yaml + truenas.yaml (camada 1 — provisionamento) + ↓ +ansible/ (camada 2 — configuração de OS) + ↓ +services/ (camada 3 — aplicações) +``` diff --git a/inventory/network.yaml b/inventory/network.yaml new file mode 100644 index 0000000..0791dcb --- /dev/null +++ b/inventory/network.yaml @@ -0,0 +1,78 @@ +# =========================================== +# INVENTÁRIO DE REDE +# =========================================== +# TODO: Descobrir IPs reais via: +# - ER605 Admin UI: LAN settings +# - AdGuard: 10.0.0.2 → Settings > DHCP +# =========================================== + +network: + domain: "hackerfortress.cc" + gateway: "PLACEHOLDER" # IP do ER605 na VLAN1 + +# =========================================== +# Subnets e VLANs +# =========================================== +vlans: + 1: + name: "infra" + subnet: "10.0.0.0/24" + gateway: "PLACEHOLDER" + dhcp_server: true + dhcp_range: + start: "10.0.0.100" + end: "10.0.0.200" + static_leases: + # TODO: Adicionar leases fixos已知 + # "MAC_ADDRESS": "IP" + "hestia-mac": "PLACEHOLDER" + "truenas-mac": "PLACEHOLDER" + "proxmox-mac": "PLACEHOLDER" + + 10: + name: "geral" + subnet: "10.0.10.0/24" + gateway: "PLACEHOLDER" + dhcp_server: true + dhcp_range: + start: "10.0.10.100" + end: "10.0.10.200" + + 20: + name: "iot" + subnet: "10.0.20.0/24" + gateway: "PLACEHOLDER" + dhcp_server: true + dhcp_range: + start: "10.0.20.100" + end: "10.0.20.200" + + 30: + name: "guests" + subnet: "10.0.30.0/24" + gateway: "PLACEHOLDER" + dhcp_server: true + dhcp_range: + start: "10.0.30.100" + end: "10.0.30.200" + +# =========================================== +# DNS — Services +# =========================================== +dns_services: + adguard: + ip: "10.0.0.2" + port: 53 + web_ui: "http://10.0.0.2" + roles: + - dns-recursive + - dns-blocklist + +# =========================================== +# Port Forwards (ER605) +# =========================================== +# TODO: ER605 Admin UI > NAT > Port Forwarding +forwarding: + # external_port: [protocol, internal_ip, internal_port, description] + #443: ["TCP", "10.0.0.50", "443", "Picsur HTTPS"] + #80: ["TCP", "10.0.0.50", "80", "Picsur HTTP"] diff --git a/inventory/physical.yaml b/inventory/physical.yaml new file mode 100644 index 0000000..4e9e7ce --- /dev/null +++ b/inventory/physical.yaml @@ -0,0 +1,183 @@ +# =========================================== +# INVENTÁRIO FÍSICO — Single Source of Truth +# =========================================== +# Este arquivo mapeia TODAS as máquinas físicas do homelab. +# UPDATE: Sempre que mudar algo físico (IP, MAC, disco), atualize aqui PRIMEIRO. +# =========================================== + +physical_hosts: + + # =========================================== + # HESTIA — Notebook (esta máquina) + # =========================================== + hestia: + description: "Notebook Dell Latitude 5490 — usado como workstation e runner de CI/CD" + location: "rack caseiro" + hardware: + cpu: "Intel i5-8250U" + ram_gb: 16 + disk: + - device: /dev/sda + type: SSD + size_gb: 224 + mount: / + network: + mac: "74:86:7a:f9:15:c1" # enp9s0 (cabo, sem link atualmente) + wifi_mac: "0c:84:dc:d5:03:d1" # wlp6s0 (WiFi, IP atual) + ip: "10.0.10.100" # WiFi (sem cabo conectado) + gateway: "PLACEHOLDER" # IP do ER605 + dns: "10.0.0.2" # AdGuard + os: + distro: "Debian" + version: "13" + hostname: "hestia" + roles: + - runner-ci # Gitea Actions runner + - workstation + ssh: + user: "iamferreirajp" + port: 22 + + # =========================================== + # PROXMOX — Server principal + # =========================================== + proxmox: + description: "Servidor mini-ITX — Proxmox VE rodando VMs e containers" + location: "rack caseiro" + hardware: + cpu: "PLACEHOLDER" + ram_gb: 64 + disk: + - device: /dev/sda + type: SSD + size_gb: 512 + mount: / + role: "Proxmox OS" + network: + mac: "PLACEHOLDER" + ip: "PLACEHOLDER" + gateway: "PLACEHOLDER" + dns: "10.0.0.2" + os: + distro: "Proxmox VE" + version: "PLACEHOLDER" + hostname: "proxmox" + roles: + - hypervisor # Proxmox (gerencia VMs) + - nfs-client # Mount TrueNAS volumes + ssh: + user: "root" + port: 22 + + # =========================================== + # TRUENAS — Storage server + # =========================================== + truenas: + description: "Servidor de storage — TrueNAS Scale baremetal" + location: "rack caseiro" + hardware: + cpu: "PLACEHOLDER" + ram_gb: 32 + disk: + # TODO: lsblk -d -o NAME,SIZE,TYPE | grep disk + - device: /dev/sdb + type: HDD + size_tb: 4 + role: "data" + - device: /dev/sdc + type: HDD + size_tb: 4 + role: "data" + - device: /dev/sdd + type: HDD + size_tb: 4 + role: "data" + - device: /dev/sde + type: HDD + size_tb: 4 + role: "data" + - device: /dev/sdf + type: SSD + size_gb: 500 + role: "SLOG/Cache" + network: + mac: "PLACEHOLDER" + ip: "PLACEHOLDER" + gateway: "PLACEHOLDER" + dns: "10.0.0.2" + os: + distro: "TrueNAS Scale" + version: "PLACEHOLDER" + hostname: "truenas" + roles: + - storage # NFS/SMB shares + - nfs-server # Exporta volumes + ssh: + user: "root" + port: 22 + + # =========================================== + # ER605 — Router TP-Link (Omada) + # =========================================== + er605: + description: "Router TP-Link ER605 — gateway + DHCP + VLANs" + location: "rack caseiro" + hardware: + model: "TP-Link ER605" + wan_port: "1Gbps" + lan_ports: 4 + network: + mac: "PLACEHOLDER" + ip: "PLACEHOLDER" # Tipicamente .1 da subnet + gateway: "PLACEHOLDER" # WAN upstream + dns: "PLACEHOLDER" + os: + firmware: "Omada Controller" + controller_url: "http://10.0.0.50:8043" + roles: + - gateway + - dhcp-server + - firewall + management: + web_ui: "http://PLACEHOLDER" + ssh: "disabled" + +# =========================================== +# VLANs — mapeamento de rede +# =========================================== +vlans: + 1: + name: "infra" + subnet: "10.0.0.0/24" + dhcp_range: "10.0.0.100-10.0.0.200" + description: "Infraestrutura — Gitea, AdGuard, Omada Controller" + 10: + name: "geral" + subnet: "10.0.10.0/24" + dhcp_range: "10.0.10.100-10.0.10.200" + description: "Workstations e laptops" + 20: + name: "iot" + subnet: "10.0.20.0/24" + dhcp_range: "10.0.20.100-10.0.20.200" + description: "Dispositivos IoT — sensores, câmeras" + 30: + name: "guests" + subnet: "10.0.30.0/24" + dhcp_range: "10.0.30.100-10.0.30.200" + description: "Rede de visitantes" + +# =========================================== +# DNS — AdGuard +# =========================================== +dns: + adguard: + description: "DNS recursivo + bloqueador de ads" + ip: "10.0.0.2" + roles: + - dns-recursive + - dns-block + web_ui: "http://10.0.0.2" + upstream_dns: + - "1.1.1.1" + - "8.8.8.8" diff --git a/inventory/proxmox.yaml b/inventory/proxmox.yaml new file mode 100644 index 0000000..9bad83f --- /dev/null +++ b/inventory/proxmox.yaml @@ -0,0 +1,141 @@ +# =========================================== +# INVENTÁRIO PROXMOX — VMs e Containers +# =========================================== +# Máquinas virtuais e containers rodando no Proxmox. +# TODO: Preencher com dados reais via: pvesh get /qemu-auto, /lxc-auto +# =========================================== + +proxmox_node: "proxmox" + +# =========================================== +# Virtual Machines (VMs) +# =========================================== +vms: + + homeassistant: + description: "Home Assistant OS rodando como VM" + status: "running" + os_type: "qubes" # HAOS usa o tipo qubes + vmid: "PLACEHOLDER" + resources: + cpu_cores: 4 + ram_mb: 4096 + disk_gb: 32 + boot_order: "scsi0" + network: + bridge: "vmbr0" + vlan: 10 # Rede geral + volumes: + # TrueNAS NFS mounts dentro da VM + nfs_config: "/mnt/nfs/homeassistant/config" + nfs_media: "/mnt/nfs/media" + roles: + - home-automation + + # PLACEHOLDER — adicione mais VMs aqui + +# =========================================== +# Containers (LXC) +# =========================================== +containers: + + dockerino: + description: "Container principal — Docker + Docker Compose (swarm mode)" + status: "running" + os_type: "debian" + vmid: "PLACEHOLDER" + resources: + cpu_cores: 4 + ram_mb: 8192 + disk_gb: 64 + network: + ip: "10.0.0.50/24" + bridge: "vmbr0" + vlan: 1 # Rede infra + gateway: "PLACEHOLDER" # IP do ER605 + dns: "10.0.0.2" + volumes: + # Mounts do TrueNAS NFS + nfs_picsur: "/mnt/nfs/picsur/data" + nfs_docker_volumes: "/mnt/nfs/docker-volumes" + docker: + version: "PLACEHOLDER" + compose_version: "PLACEHOLDER" + services: + - picsur + - adguard #outro instance? + - outline + - nginx-proxy + - homer + - bookstack + - flatnotes + - homebox + - speedtest + - omada-controller + - twingate + roles: + - docker-host + - reverse-proxy + - application-host + + media: + description: "Container — Jellyfin e serviços de mídia" + status: "running" + os_type: "debian" + vmid: "PLACEHOLDER" + resources: + cpu_cores: 4 + ram_mb: 8192 + disk_gb: 128 + network: + ip: "PLACEHOLDER" # TODO: Descobrir IP + bridge: "vmbr0" + vlan: 1 + gateway: "PLACEHOLDER" + dns: "10.0.0.2" + volumes: + nfs_media: "/mnt/nfs/media" + docker: + version: "PLACEHOLDER" + services: + - jellyfin + roles: + - media-server + +# =========================================== +# Storage Pools (Proxmox → TrueNAS) +# =========================================== +nfs_mounts: + nfs-media: + server: "PLACEHOLDER" # IP do TrueNAS + export: "/mnt/tank/media" + mount_point: "/mnt/nfs/media" + usage: "Jellyfin media files" + + nfs-picsur: + server: "PLACEHOLDER" + export: "/mnt/tank/picsur" + mount_point: "/mnt/nfs/picsur" + usage: "Picsur image storage" + + nfs-docker-volumes: + server: "PLACEHOLDER" + export: "/mnt/tank/docker-volumes" + mount_point: "/mnt/nfs/docker-volumes" + usage: "Docker named volumes (named volumes persistem entre recreações)" + + nfs-homeassistant: + server: "PLACEHOLDER" + export: "/mnt/tank/homeassistant" + mount_point: "/mnt/nfs/homeassistant" + usage: "Home Assistant config" + +# =========================================== +# Notes +# =========================================== +# Para descobrir IPs das VMs: +# pvesh get /qemu//agent/network-get-interfaces +# pvesh get /lxc//agent/network-get-interfaces +# +# Para listar todos os containers: +# pvesh get /cluster/resources diff --git a/inventory/truenas.yaml b/inventory/truenas.yaml new file mode 100644 index 0000000..74d7d1f --- /dev/null +++ b/inventory/truenas.yaml @@ -0,0 +1,163 @@ +# =========================================== +# INVENTÁRIO TRUENAS — Discos, Pools e Datasets +# =========================================== +# TODO: Obter dados reais via: +# 1. Web UI TrueNAS > Storage > Disks +# 2. CLI: zpool status, zfs list +# =========================================== + +truenas: + hostname: "truenas" + version: "PLACEHOLDER" + ip: "PLACEHOLDER" + +# =========================================== +# Discos Físicos +# =========================================== +disks: + # TODO: lsblk -d -o NAME,SIZE,TYPE,ROTA | grep disk + # TODO: smartctl -a /dev/sdX + + sdb: + size_tb: 4 + type: "HDD" + model: "PLACEHOLDER" + serial: "PLACEHOLDER" + role: "data" + pool: "tank" + + sdc: + size_tb: 4 + type: "HDD" + model: "PLACEHOLDER" + serial: "PLACEHOLDER" + role: "data" + pool: "tank" + + sdd: + size_tb: 4 + type: "HDD" + model: "PLACEHOLDER" + serial: "PLACEHOLDER" + role: "data" + pool: "tank" + + sde: + size_tb: 4 + type: "HDD" + model: "PLACEHOLDER" + serial: "PLACEHOLDER" + role: "data" + pool: "tank" + + sdf: + size_gb: 500 + type: "SSD" + model: "PLACEHOLDER" + serial: "PLACEHOLDER" + role: "SLOG/Cache" + pool: "tank" + +# =========================================== +# ZFS Pools +# =========================================== +pools: + tank: + description: "Pool principal — dados de todos os serviços" + vdev_type: "mirror" # mirror = 2x4TB espelhados (RAID-1) + # TODO: Descobrir configuração real — pode ser raidz ou mirror + disks: + - /dev/sdb + - /dev/sdc + - /dev/sdd + - /dev/sde + compression: "lz4" + ashift: 12 # 4K sectors + atime: "off" + +# =========================================== +# Datasets — o que existe hoje +# =========================================== +datasets: + tank: + docker-volumes: + description: "Volumes Docker do Dockerino (NFS-mounted)" + mount_point: "/mnt/tank/docker-volumes" + share: "docker-volumes" + nfs_export: "*(rw,no_root_squash,subtree_check)" + used_by: + - dockerino:/mnt/nfs/docker-volumes + + picsur: + description: "Dados do Picsur (imagens armazenadas)" + mount_point: "/mnt/tank/picsur" + share: "picsur" + nfs_export: "*(rw,no_root_squash,subtree_check)" + used_by: + - dockerino:/mnt/nfs/picsur + + media: + description: "Biblioteca de mídia — Jellyfin" + mount_point: "/mnt/tank/media" + share: "media" + nfs_export: "*(rw,no_root_squash,subtree_check)" + used_by: + - media:/mnt/nfs/media + - homeassistant:/mnt/nfs/media + + homeassistant: + description: "Config do Home Assistant" + mount_point: "/mnt/tank/homeassistant" + share: "homeassistant" + nfs_export: "*(rw,no_root_squash,subtree_check)" + used_by: + - homeassistant:/mnt/nfs/homeassistant + + backup: + description: "Backups periódicos" + mount_point: "/mnt/tank/backup" + snapshot_enabled: true + snapshot_schedule: "daily" + +# =========================================== +# SMB Shares (se TrueNAS exportar via SMB) +# =========================================== +smb_shares: + # TODO: smbctl listshares + media: + dataset: "tank/media" + share_name: "media" + description: "Biblioteca de mídia" + + picsur: + dataset: "tank/picsur" + share_name: "picsur" + description: "Dados do Picsur" + +# =========================================== +# NFS Exports +# =========================================== +nfs_exports: + # TODO: cat /etc/exports + tank: + - path: "/mnt/tank/docker-volumes" + clients: "10.0.0.50" # Dockerino + options: "rw,no_root_squash,subtree_check" + - path: "/mnt/tank/picsur" + clients: "10.0.0.50" # Dockerino + options: "rw,no_root_squash,subtree_check" + - path: "/mnt/tank/media" + clients: "10.0.0.60,10.0.0.70" # Media VM, HA VM + options: "rw,no_root_squash,subtree_check" + - path: "/mnt/tank/homeassistant" + clients: "10.0.0.70" # HA VM + options: "rw,no_root_squash,subtree_check" + +# =========================================== +# Notes +# =========================================== +# Para descobrir configuração real: +# Web UI: Storage > Disks +# CLI: zpool status -v +# CLI: zfs list -o name,mountpoint,used,available +# CLI: cat /etc/exports diff --git a/docker/dockerino/adguard/docker-compose.yml b/services/dockerino/adguard/docker-compose.yml similarity index 100% rename from docker/dockerino/adguard/docker-compose.yml rename to services/dockerino/adguard/docker-compose.yml diff --git a/docker/dockerino/bookstack/docker-compose.yml b/services/dockerino/bookstack/docker-compose.yml similarity index 100% rename from docker/dockerino/bookstack/docker-compose.yml rename to services/dockerino/bookstack/docker-compose.yml diff --git a/docker/dockerino/flatnotes/docker-compose.yml b/services/dockerino/flatnotes/docker-compose.yml similarity index 100% rename from docker/dockerino/flatnotes/docker-compose.yml rename to services/dockerino/flatnotes/docker-compose.yml diff --git a/docker/dockerino/homebox/docker-compose.yml b/services/dockerino/homebox/docker-compose.yml similarity index 100% rename from docker/dockerino/homebox/docker-compose.yml rename to services/dockerino/homebox/docker-compose.yml diff --git a/docker/dockerino/homer/config.yml b/services/dockerino/homer/config.yml similarity index 100% rename from docker/dockerino/homer/config.yml rename to services/dockerino/homer/config.yml diff --git a/docker/dockerino/homer/docker-compose.yml b/services/dockerino/homer/docker-compose.yml similarity index 100% rename from docker/dockerino/homer/docker-compose.yml rename to services/dockerino/homer/docker-compose.yml diff --git a/docker/dockerino/nginx/.env.example b/services/dockerino/nginx/.env.example similarity index 100% rename from docker/dockerino/nginx/.env.example rename to services/dockerino/nginx/.env.example diff --git a/docker/dockerino/nginx/docker-compose.yml b/services/dockerino/nginx/docker-compose.yml similarity index 100% rename from docker/dockerino/nginx/docker-compose.yml rename to services/dockerino/nginx/docker-compose.yml diff --git a/docker/dockerino/omada-controller/docker-compose.yml b/services/dockerino/omada-controller/docker-compose.yml similarity index 100% rename from docker/dockerino/omada-controller/docker-compose.yml rename to services/dockerino/omada-controller/docker-compose.yml diff --git a/docker/dockerino/outline/docker-compose.yml b/services/dockerino/outline/docker-compose.yml similarity index 100% rename from docker/dockerino/outline/docker-compose.yml rename to services/dockerino/outline/docker-compose.yml diff --git a/docker/dockerino/picsur/docker-compose.yml b/services/dockerino/picsur/docker-compose.yml similarity index 68% rename from docker/dockerino/picsur/docker-compose.yml rename to services/dockerino/picsur/docker-compose.yml index e2cf853..87330bb 100644 --- a/docker/dockerino/picsur/docker-compose.yml +++ b/services/dockerino/picsur/docker-compose.yml @@ -8,9 +8,14 @@ services: ports: - "8091:8080" volumes: - - ./data:/data + - picsur-data:/data environment: - TZ: America/Sao_Paulo + - TZ=America/Sao_Paulo + +volumes: + picsur-data: + name: picsur-data + driver: local networks: default: diff --git a/docker/dockerino/speedtest/docker-compose.yml b/services/dockerino/speedtest/docker-compose.yml similarity index 100% rename from docker/dockerino/speedtest/docker-compose.yml rename to services/dockerino/speedtest/docker-compose.yml diff --git a/docker/dockerino/twingate/docker-compose.yml b/services/dockerino/twingate/docker-compose.yml similarity index 100% rename from docker/dockerino/twingate/docker-compose.yml rename to services/dockerino/twingate/docker-compose.yml diff --git a/docker/media/docker-compose.yml b/services/media/docker-compose.yml similarity index 100% rename from docker/media/docker-compose.yml rename to services/media/docker-compose.yml