wip: wuadlets

This commit is contained in:
Thomas Rijpstra 2025-10-03 14:11:03 +02:00
parent b801f8f201
commit 344baa16aa
Signed by: thomas
SSH Key Fingerprint: SHA256:NoPljIC4A210B8B3jJovKUIFRtnYxYA4ej6sgkR/yWA
21 changed files with 1829 additions and 0 deletions

40
quadlets/.gitignore vendored Normal file
View File

@ -0,0 +1,40 @@
# Local .terraform directories
.terraform/
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Ignore transient lock info files created by terraform apply
.terraform.tfstate.lock.info
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
# Optional: ignore graph output files generated by `terraform graph`
*.dot

10
quadlets/.idea/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# Default ignored files
/shelf/
/workspace.xml
# Ignored default folder with query files
/queries/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Editor-based HTTP Client requests
/httpRequests/

View File

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Encoding" addBOMForNewFiles="with BOM under Windows, with no BOM otherwise" />
</project>

View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/quadlets.iml" filepath="$PROJECT_DIR$/.idea/quadlets.iml" />
</modules>
</component>
</project>

View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="WEB_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

6
quadlets/.idea/vcs.xml Normal file
View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
</component>
</project>

66
quadlets/main.tf Normal file
View File

@ -0,0 +1,66 @@
variable "hcloud_token" {
description = "Hetzner Cloud API Token"
type = string
sensitive = true
}
variable "hdns_token" {
type = string
sensitive = true
}
variable "ssh_public_key_path" {
description = "Path to SSH public key"
type = string
}
variable "ssh_private_key_path" {
description = "Path to SSH private key"
type = string
}
variable "ghcr_username" {}
variable "ghcr_token" {}
module "hetzner" {
source = "./modules/hetzner"
hcloud_token = var.hcloud_token
ssh_public_key_path = var.ssh_public_key_path
ssh_private_key_path = var.ssh_private_key_path
name = "vw-hub"
datacenter = "nbg1-dc3"
hdns_token = var.hdns_token
ghcr_token = var.ghcr_token
ghcr_username = var.ghcr_username
}
module "minio" {
wait_on = module.hetzner.installed
source = "./modules/minio"
server_ip = module.hetzner.server_ip
server_domain = module.hetzner.server_domain
ssh_private_key_path = var.ssh_private_key_path
}
module "valkey" {
wait_on = module.hetzner.installed
source = "./modules/valkey"
server_ip = module.hetzner.server_ip
ssh_private_key_path = var.ssh_private_key_path
}
# module "vw-hub" {
# wait_on = module.minio.installed
#
# source = "./modules/vw-hub"
# server_ip = module.hetzner.server_ip
# ssh_private_key_path = var.ssh_private_key_path
# domain = "hub.${module.hetzner.server_domain}"
# s3_access_key = module.minio.access_key
# s3_secret_key = module.minio.secret_key
# s3_server = module.minio.server
# }
output "minio_app_urls" {
value = module.minio.app_urls
}

View File

@ -0,0 +1,36 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
type = string
}
variable "ssh_private_key_path" {
type = string
}
module "redis" {
source = "../quadlet-app"
wait_on = var.wait_on
server_ip = var.server_ip
ssh_private_key_path = var.ssh_private_key_path
app_name = "redis"
image = "docker.io/redis:7-alpine"
ports = ["6379:6379"]
volumes = ["/opt/storage/data/redis:/data:Z"]
command = ["redis-server", "--appendonly", "yes"]
}
output "app_urls" {
value = module.redis.app_urls
}
output "installed" {
value = true
depends_on = [module.redis.installed]
}

View File

@ -0,0 +1,561 @@
#cloud-config
users:
- name: fourlights
sudo: ALL=(ALL) NOPASSWD:ALL
groups: users,admin,sudo
shell: /bin/bash
lock_passwd: false
ssh_authorized_keys:
- ${ssh_public_key}
packages:
- podman
- haproxy
- python3
- python3-requests
- curl
- wget
- jq
- socat
- nmap
package_update: true
package_upgrade: true
write_files:
- path: /etc/sudoers.d/fourlights-haproxy
permissions: '0440'
content: |
fourlights ALL=(root) NOPASSWD: /bin/systemctl reload haproxy
fourlights ALL=(root) NOPASSWD: /bin/systemctl restart haproxy
fourlights ALL=(root) NOPASSWD: /bin/systemctl stop haproxy
fourlights ALL=(root) NOPASSWD: /bin/systemctl start haproxy
fourlights ALL=(root) NOPASSWD: /bin/chown -R haproxy\:haproxy /etc/ssl/haproxy/*
fourlights ALL=(root) NOPASSWD: /bin/chmod 600 /etc/ssl/haproxy/*
# HAProxy main configuration
- path: /etc/haproxy/haproxy.cfg
content: |
global
daemon
stats socket /var/run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
log stdout local0 info
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
option httplog
log global
# Stats interface
frontend stats
bind *:8404
http-request use-service prometheus-exporter if { path /metrics }
stats enable
stats uri /stats
stats refresh 10s
# HTTP Frontend
frontend main
bind *:80
# ACL to detect ACME challenge requests
acl is_acme_challenge path_beg /.well-known/acme-challenge/
# Route ACME challenges to the acme_challenge backend
use_backend acme_challenge if is_acme_challenge
default_backend no_match
# HTTPS Frontend
frontend https_main
bind *:443
default_backend no_match
# ACME Challenge Backend
backend acme_challenge
mode http
server acme_server 127.0.0.1:8888
# Default backend
backend no_match
http-request return status 404 content-type text/plain string "No matching service found"
- path: /etc/dataplaneapi/dataplaneapi.yml
content: |
dataplaneapi:
host: 0.0.0.0
port: 5555
user:
- insecure: true
password: admin
username: admin
haproxy:
config_file: /etc/haproxy/haproxy.cfg
haproxy_bin: /usr/sbin/haproxy
reload:
reload_cmd: systemctl reload haproxy
restart_cmd: systemctl restart haproxy
stats_socket: /var/run/haproxy/admin.sock
- path: /usr/local/bin/podman-haproxy-acme-sync-wrapper.sh
permissions: '0755'
content: |
#!/bin/bash
set -e
MAX_WAIT=60
ELAPSED=0
# Wait for HAProxy
echo "Checking HAProxy status..."
while ! systemctl is-active --quiet haproxy; do
echo "Waiting for HAProxy to start..."
sleep 2
ELAPSED=$($ELAPSED + 2)
[ $ELAPSED -ge $MAX_WAIT ] && { echo "ERROR: HAProxy timeout"; exit 1; }
done
echo "HAProxy is active"
# Reset and wait for Data Plane API to actually respond
ELAPSED=0
echo "Checking Data Plane API readiness..."
while true; do
HTTP_CODE=$(curl -s -w "%%{http_code}" -o /dev/null \
--connect-timeout 5 \
--max-time 10 \
-u :admin \
http://localhost:5555/v3/services/haproxy/configuration/version 2>/dev/null || echo "000")
[ "$HTTP_CODE" = "200" ] && { echo "Data Plane API ready"; break; }
echo "Waiting for Data Plane API... (HTTP $HTTP_CODE)"
sleep 2
ELAPSED=$((ELAPSED + 2))
if [ $ELAPSED -ge $MAX_WAIT ]; then
echo "ERROR: Data Plane API not ready within $MAX_WAITs (HTTP $HTTP_CODE)"
journalctl -u dataplaneapi -n 50 --no-pager
exit 1
fi
done
sleep 2
exec /usr/local/bin/podman-haproxy-acme-sync.py
# Podman HAProxy ACME Sync Script
- path: /usr/local/bin/podman-haproxy-acme-sync.py
permissions: '0755'
content: |
#!/usr/bin/env python3
import json
import subprocess
import requests
import time
import os
import sys
HAPROXY_API_BASE = "http://:admin@127.0.0.1:5555/v3"
CERT_DIR = "/home/fourlights/.acme.sh"
ACME_SCRIPT = "/usr/local/bin/acme.sh"
class PodmanHAProxyACMESync:
def __init__(self):
self.ssl_services = set()
self.session = requests.Session()
self.session.headers.update({'Content-Type': 'application/json'})
def get_next_index(self, path):
response = self.session.get(f"{HAPROXY_API_BASE}/services/haproxy/configuration/{path}")
return len(response.json()) if response.status_code == 200 else None
def get_dataplaneapi_version(self):
response = self.session.get(f"{HAPROXY_API_BASE}/services/haproxy/configuration/version")
return response.json() if response.status_code == 200 else None
def get_container_labels(self, container_id):
try:
result = subprocess.run(['podman', 'inspect', container_id],
capture_output=True, text=True)
if result.returncode == 0:
data = json.loads(result.stdout)
return data[0]['Config']['Labels'] or {}
except Exception as e:
print(f"Error getting labels for {container_id}: {e}")
return {}
def request_certificate(self, domain):
print(f"[CERT-REQUEST] About to request certificate for {domain}")
sys.stdout.flush()
try:
cmd = [
ACME_SCRIPT,
"--issue",
"-d", domain,
"--standalone",
"--httpport", "8888",
"--server", "letsencrypt",
"--listen-v4",
"--debug", "2"
]
# Log the command being executed
print(f"[CERT-REQUEST] Executing: {' '.join(cmd)}")
sys.stdout.flush()
result = subprocess.run(cmd, capture_output=True, text=True)
# Log both stdout and stderr for complete debugging
if result.stdout:
print(f"[CERT-STDOUT] {result.stdout}")
sys.stdout.flush()
if result.stderr:
print(f"[CERT-STDERR] {result.stderr}")
sys.stderr.flush()
if result.returncode == 0:
print(f"[CERT-SUCCESS] Certificate obtained for {domain}")
sys.stdout.flush()
self.install_certificate(domain)
return True
else:
print(f"[CERT-FAILED] Failed to obtain certificate for {domain}")
print(f"[CERT-FAILED] Return code: {result.returncode}")
sys.stdout.flush()
return False
except Exception as e:
print(f"[CERT-ERROR] Error requesting certificate: {e}")
sys.stdout.flush()
return False
def install_certificate(self, domain):
cert_file = f"{CERT_DIR}/{domain}.pem"
try:
acme_cert_dir = f"/home/fourlights/.acme.sh/{domain}_ecc"
with open(cert_file, 'w') as outfile:
with open(f"{acme_cert_dir}/fullchain.cer") as cert:
outfile.write(cert.read())
with open(f"{acme_cert_dir}/{domain}.key") as key:
outfile.write(key.read())
try:
with open(f"{acme_cert_dir}/ca.cer") as ca:
outfile.write(ca.read())
except FileNotFoundError:
pass
os.chmod(cert_file, 0o600)
print(f"Certificate installed at {cert_file}")
self.update_haproxy_ssl_bind(domain)
except Exception as e:
print(f"Error installing certificate for {domain}: {e}")
def update_haproxy_ssl_bind(self, domain):
print(f"Updating ssl bind for {domain}")
try:
ssl_bind_data = {
"address": "*",
"port": 443,
"ssl": True,
"ssl_certificate": f"{CERT_DIR}/{domain}.pem",
}
response = self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/https_main/binds?version={self.get_dataplaneapi_version()}",
json=ssl_bind_data)
print(response.json())
if response.status_code in [200, 201]:
print(f"Updated HAProxy SSL bind for {domain}")
except Exception as e:
print(f"Error updating HAProxy SSL bind: {e}")
def setup_certificate_renewal(self, domain):
renewal_script = f"/etc/cron.d/acme-{domain.replace('.', '-')}"
cron_content = f"""0 0 * * * root {ACME_SCRIPT} --renew -d {domain} --post-hook "systemctl reload haproxy" >/dev/null 2>&1
"""
with open(renewal_script, 'w') as f:
f.write(cron_content)
print(f"Setup automatic renewal for {domain}")
def update_haproxy_backend(self, service_name, host, port, action='add'):
backend_name = f"backend_{service_name}"
server_name = f"{service_name}_server"
if action == 'add':
backend_data = {
"name": backend_name,
"mode": "http",
"balance": {"algorithm": "roundrobin"},
}
backends = self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/backends?version={self.get_dataplaneapi_version()}",
json=backend_data)
print(backends.json())
server_data = {
"name": server_name,
"address": host,
"port": int(port),
"check": "enabled",
}
tweak = self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/backends/{backend_name}/servers?version={self.get_dataplaneapi_version()}",
json=server_data)
print(tweak.json())
elif action == 'remove':
self.session.delete(f"{HAPROXY_API_BASE}/services/haproxy/configuration/backends/{backend_name}/servers/{server_name}?version={self.get_dataplaneapi_version()}")
def update_haproxy_frontend_rule(self, service_name, domain, ssl_enabled=False, action='add'):
if action == 'add':
if ssl_enabled and domain and domain not in self.ssl_services:
print(f"Setting up SSL for {domain}")
if self.request_certificate(domain):
self.setup_certificate_renewal(domain)
self.ssl_services.add(domain)
acl_data = {
"acl_name": f"is_{service_name}",
"criterion": "hdr(host)",
"value": domain,
}
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/main/acls/{self.get_next_index('frontends/main/acls')}?version={self.get_dataplaneapi_version()}",
json=acl_data)
if ssl_enabled:
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/https_main/acls/{self.get_next_index('frontends/https_main/acls')}?version={self.get_dataplaneapi_version()}",
json=acl_data)
rule_data = {
"name": f"backend_{service_name}",
"cond": "if",
"cond_test": f"is_{service_name}",
}
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/main/backend_switching_rules/{self.get_next_index('frontends/main/backend_switching_rules')}?version={self.get_dataplaneapi_version()}",
json=rule_data)
if ssl_enabled:
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/https_main/backend_switching_rules/{self.get_next_index('frontends/https_main/backend_switching_rules')}?version={self.get_dataplaneapi_version()}",
json=rule_data)
redirect_rule = {
"type": "redirect",
"redirect_rule": {
"type": "scheme",
"value": "https",
"code": 301
},
"cond": "if",
"cond_test": f"is_{service_name}",
}
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/main/http_request_rules/{self.get_next_index('frontends/main/http_request_rules')}?version={self.get_dataplaneapi_version()}",
json=redirect_rule)
def process_container_event(self, event):
# DIAGNOSTIC: Log raw event structure
print(f"[EVENT-DEBUG] Received event - Type: {event.get('Type', 'MISSING')}, Action: {event.get('Action', 'MISSING')}")
sys.stdout.flush()
# DIAGNOSTIC: Check for Actor key
if 'Actor' not in event:
print(f"[EVENT-SKIP] Skipping event without 'Actor' key - Full event: {json.dumps(event)}")
sys.stdout.flush()
return
# DIAGNOSTIC: Check for ID in Actor
if 'ID' not in event['Actor']:
print(f"[EVENT-SKIP] Skipping event without 'Actor.ID' - Actor content: {json.dumps(event['Actor'])}")
sys.stdout.flush()
return
container_id = event['Actor']['ID'][:12]
action = event['Action']
print(f"[EVENT-PROCESS] Processing '{action}' event for container {container_id}")
sys.stdout.flush()
labels = self.get_container_labels(container_id)
# Dictionary to store discovered services
services = {}
# First, check for namespaced labels (haproxy.{service_name}.enable)
for label_key, label_value in labels.items():
if label_key.startswith('haproxy.') and label_key.endswith('.enable') and label_value.lower() == 'true':
# Extract service name from label key
parts = label_key.split('.')
if len(parts) == 3: # haproxy.{service_name}.enable
service_name = parts[1]
# Extract properties for this service namespace
service_config = {
'service_name': service_name,
'host': labels.get(f'haproxy.{service_name}.host', '127.0.0.1'),
'port': labels.get(f'haproxy.{service_name}.port', '8080'),
'domain': labels.get(f'haproxy.{service_name}.domain', None),
'ssl_enabled': labels.get(f'haproxy.{service_name}.tls', 'false').lower() == 'true'
}
services[service_name] = service_config
# Backward compatibility: If no namespaced labels found, check for flat labels
if not services and 'haproxy.enable' in labels and labels['haproxy.enable'].lower() == 'true':
service_name = labels.get('haproxy.service', container_id)
services[service_name] = {
'service_name': service_name,
'host': labels.get('haproxy.host', '127.0.0.1'),
'port': labels.get('haproxy.port', '8080'),
'domain': labels.get('haproxy.domain', None),
'ssl_enabled': labels.get('haproxy.tls', 'false').lower() == 'true'
}
# Process each discovered service
for service_name, config in services.items():
if action in ['start', 'restart']:
print(f"Adding service {config['service_name']} to HAProxy (SSL: {config['ssl_enabled']}, Domain: {config['domain']})")
sys.stdout.flush()
self.update_haproxy_backend(config['service_name'], config['host'], config['port'], 'add')
if config['domain']:
self.update_haproxy_frontend_rule(config['service_name'], config['domain'], config['ssl_enabled'], 'add')
elif action in ['stop', 'remove', 'died']:
print(f"Removing service {config['service_name']} from HAProxy")
sys.stdout.flush()
self.update_haproxy_backend(config['service_name'], config['host'], config['port'], 'remove')
def watch_events(self):
print("Starting Podman-HAProxy-ACME sync...")
# Track last sync time
last_full_sync = 0
SYNC_INTERVAL = 60 # Re-scan all containers every 60 seconds
def do_full_sync():
"""Perform a full sync of all running containers"""
print("Performing full container sync...")
try:
result = subprocess.run(['podman', 'ps', '--format', 'json'],
capture_output=True, text=True)
if result.returncode == 0:
containers = json.loads(result.stdout)
for container in containers:
event = {
'Type': 'container',
'Action': 'start',
'Actor': {'ID': container.get('Id', '')}
}
self.process_container_event(event)
print(f"Synced {len(containers)} containers")
except Exception as e:
print(f"Error during full sync: {e}")
# Initial sync
do_full_sync()
last_full_sync = time.time()
print("Watching for container events...")
cmd = ['podman', 'events', '--format', 'json']
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True)
# Use select/poll for non-blocking read so we can do periodic syncs
import select
while True:
# Check if it's time for periodic sync
if time.time() - last_full_sync >= SYNC_INTERVAL:
do_full_sync()
last_full_sync = time.time()
# Check for events with timeout
ready, _, _ = select.select([process.stdout], [], [], 5)
if ready:
line = process.stdout.readline()
if line:
try:
event = json.loads(line.strip())
if event['Type'] == 'container':
self.process_container_event(event)
except json.JSONDecodeError as e:
print(f"[EVENT-ERROR] JSON decode error: {e} - Line: {line[:100]}")
sys.stdout.flush()
except KeyError as e:
print(f"[EVENT-ERROR] Missing key {e} in event: {json.dumps(event)}")
sys.stdout.flush()
except Exception as e:
print(f"[EVENT-ERROR] Error processing event: {e}")
print(f"[EVENT-ERROR] Event structure: {json.dumps(event)}")
sys.stdout.flush()
if __name__ == "__main__":
os.makedirs(CERT_DIR, exist_ok=True)
sync = PodmanHAProxyACMESync()
sync.watch_events()
runcmd:
# Create necessary directories
- mkdir -p /var/run/haproxy /etc/ssl/haproxy /etc/containers/systemd /etc/haproxy/dataplane /etc/dataplaneapi
- chown haproxy:haproxy /var/run/haproxy
# Install Data Plane API
- cd /tmp && curl -LO https://github.com/haproxytech/dataplaneapi/releases/download/v3.2.4/dataplaneapi_3.2.4_linux_amd64.deb
- env DEBIAN_FRONTEND=noninteractive apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" /tmp/dataplaneapi_3.2.4_linux_amd64.deb
- rm /tmp/dataplaneapi_3.2.4_linux_amd64.deb
- mkdir -p /home/fourlights/.config/containers/systemd
- mkdir -p /home/fourlights/.config/systemd/user
- |
cat > /home/fourlights/.config/systemd/user/podman-haproxy-acme-sync.service << 'EOF'
[Unit]
Description=Podman HAProxy ACME Sync Service
After=network.target
[Service]
Type=simple
Environment="XDG_RUNTIME_DIR=/run/user/1000"
Environment="DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus"
ExecStart=/usr/local/bin/podman-haproxy-acme-sync-wrapper.sh
StandardOutput=journal
StandardError=journal
Restart=always
RestartSec=10
[Install]
WantedBy=default.target
EOF
- chown -R fourlights:fourlights /home/fourlights
# Install ACME.sh
- su - fourlights -c 'curl https://get.acme.sh | sh -s email=${acme_email}'
- ln -sf /home/fourlights/.acme.sh/acme.sh /usr/local/bin/acme.sh
# Setup data directory and mount volume
- mkdir -p /opt/storage/data
- mkfs.ext4 -F /dev/sdb
- mount /dev/sdb /opt/storage/data
- echo '/dev/sdb /opt/storage/data ext4 defaults 0 2' >> /etc/fstab
- chown -R fourlights:fourlights /opt/storage/data
# Enable Podman for user services
- loginctl enable-linger fourlights
- su - fourlights -c 'podman login ghcr.io -u ${ghcr_username} -p ${ghcr_token}'
# Enable and start services
- systemctl daemon-reload
- systemctl enable --now haproxy
- systemctl enable --now dataplaneapi
- su - fourlights -c 'systemctl --user daemon-reload'
- su - fourlights -c 'systemctl --user enable --now podman-haproxy-acme-sync'
final_message: "Server setup complete with HAProxy, Podman, and ACME sync configured"

View File

@ -0,0 +1,53 @@
variable "hdns_token" {}
variable "zone" { default = "fourlights.dev" }
variable "ipv4_address" {}
variable "ipv6_address" {}
variable "root" {}
terraform {
required_providers {
hetznerdns = {
source = "timohirt/hetznerdns"
version = "2.2.0"
}
}
}
provider "hetznerdns" {
apitoken = var.hdns_token
}
resource "hetznerdns_zone" "zone" {
name = var.zone
ttl = 300
}
resource "hetznerdns_record" "server_root_ipv4" {
zone_id = hetznerdns_zone.zone.id
name = var.root == null || var.root == "" ? "@" : var.root
value = var.ipv4_address
type = "A"
}
resource "hetznerdns_record" "server_root_ipv6" {
zone_id = hetznerdns_zone.zone.id
name = var.root == null || var.root == "" ? "@" : var.root
value = var.ipv6_address
type = "AAAA"
}
resource "hetznerdns_record" "server_wildcard" {
zone_id = hetznerdns_zone.zone.id
name = var.root == null || var.root == "" ? "*" : "*.${var.root}"
value = var.root
type = "CNAME"
}
locals {
root_suffix = var.root == null || var.root == "" ? "" : "."
}
output "server_domain" {
value = "${var.root}${local.root_suffix}${var.zone}"
}

View File

@ -0,0 +1,191 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "~> 1.0"
}
}
}
provider "hcloud" {
token = var.hcloud_token
}
variable "hcloud_token" {
description = "Hetzner Cloud API Token"
type = string
sensitive = true
}
variable "ssh_public_key_path" {
description = "Path to SSH public key"
type = string
}
variable "ssh_private_key_path" {
description = "Path to SSH private key"
type = string
}
# variable "acme_email" {
# description = "Email for Let's Encrypt certificates"
# type = string
# default = "engineering@fourlights.nl"
# }
variable "image" {
type = string
default = "ubuntu-24.04"
}
variable "location" {
type = string
default = "nbg1"
}
variable "server_type" {
type = string
default = "cx22"
}
variable "datacenter" {
type = string
default = "nbg1-dc3"
}
variable "name" {
type = string
default = "enterprise"
}
variable "zone" {
type = string
default = "fourlights.dev"
}
variable "hdns_token" {}
variable "ghcr_username" {}
variable "ghcr_token" {}
locals {
acme_email = "engineering+${var.name}@fourlights.nl"
}
resource "hcloud_primary_ip" "server_ipv4" {
name = "${var.name}-ipv4"
type = "ipv4"
assignee_type = "server"
datacenter = var.datacenter
auto_delete = false
}
resource "hcloud_primary_ip" "server_ipv6" {
name = "${var.name}-ipv6"
type = "ipv6"
assignee_type = "server"
datacenter = var.datacenter
auto_delete = false
}
module "dns" {
source = "./dns"
hdns_token = var.hdns_token
zone = var.zone
ipv4_address = hcloud_primary_ip.server_ipv4.ip_address
ipv6_address = hcloud_primary_ip.server_ipv6.ip_address
root = "visualworkplace"
}
# SSH Key
resource "hcloud_ssh_key" "default" {
name = "terraform-key"
public_key = file(var.ssh_public_key_path)
}
# Persistent volume for MinIO
resource "hcloud_volume" "minio_data" {
name = "minio-data"
size = 50
location = var.location
}
# Server with comprehensive cloud-init setup
resource "hcloud_server" "server" {
name = var.name
image = var.image
server_type = var.server_type
location = var.location
ssh_keys = [hcloud_ssh_key.default.id]
user_data = templatefile("${path.module}/cloud-init.yml", {
acme_email = local.acme_email
ssh_public_key = hcloud_ssh_key.default.public_key,
ghcr_username = var.ghcr_username
ghcr_token = var.ghcr_token
})
public_net {
ipv4_enabled = true
ipv6_enabled = true
ipv4 = hcloud_primary_ip.server_ipv4.id
ipv6 = hcloud_primary_ip.server_ipv6.id
}
lifecycle {
replace_triggered_by = [
# This ensures server gets rebuilt when user_data changes
]
}
}
# Attach volume
resource "hcloud_volume_attachment" "minio_data" {
volume_id = hcloud_volume.minio_data.id
server_id = hcloud_server.server.id
automount = false # We'll handle mounting in cloud-init
}
# Wait for cloud-init to complete
resource "null_resource" "wait_for_cloud_init" {
depends_on = [hcloud_server.server]
connection {
type = "ssh"
host = hcloud_server.server.ipv4_address
user = "fourlights"
timeout = "10m"
agent = true
agent_identity = var.ssh_private_key_path
}
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait",
"echo 'Cloud-init completed successfully'"
]
}
}
output "server_ip" {
value = hcloud_server.server.ipv4_address
}
output "haproxy_stats" {
value = "http://${hcloud_server.server.ipv4_address}:8404/stats"
}
output "haproxy_api" {
value = "http://${hcloud_server.server.ipv4_address}:5555"
}
output "server_domain" {
value = module.dns.server_domain
}
output "installed" {
value = true
depends_on = [null_resource.wait_for_cloud_init]
}

View File

@ -0,0 +1,92 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
type = string
}
variable "ssh_private_key_path" {
type = string
}
variable "server_domain" {
type = string
}
resource "random_password" "minio_access_key" {
length = 20
special = false
}
resource "random_password" "minio_secret_key" {
length = 40
special = false
}
module "minio" {
wait_on = var.wait_on
source = "../quadlet-app"
server_ip = var.server_ip
ssh_private_key_path = var.ssh_private_key_path
app_name = "minio"
image = "docker.io/minio/minio:latest"
ports = [
"9000:9000", # API port
"9001:9001" # Console port
]
volumes = ["/opt/storage/data/minio:/data:Z"]
environment = {
MINIO_ROOT_USER = random_password.minio_access_key.result
MINIO_ROOT_PASSWORD = random_password.minio_secret_key.result
MINIO_CONSOLE_ADDRESS = ":9001"
MINIO_BROWSER_REDIRECT_URL = "http://storage.${var.server_domain}"
}
command = ["server", "/data", "--console-address", ":9001"]
healthcmd = "curl -f http://localhost:9001/minio/health/live || exit 1"
# Configure multiple HAProxy services for MinIO
haproxy_services = [
{
name = "minio_api"
domain = "storage-api.${var.server_domain}"
port = "9000"
host = "127.0.0.1"
tls = false
},
{
name = "minio_console"
domain = "storage.${var.server_domain}"
port = "9001"
host = "127.0.0.1"
tls = false
}
]
}
output "app_urls" {
value = module.minio.app_urls
}
output "server" {
value = "storage-api.${var.server_domain}"
}
output "access_key" {
value = random_password.minio_access_key.result
}
output "secret_key" {
value = random_password.minio_secret_key.result
}
output "installed" {
value = true
depends_on = [module.minio.installed]
}

View File

@ -0,0 +1,221 @@
resource "null_resource" "health_check" {
depends_on = [var.wait_on]
provisioner "local-exec" {
command = <<-EOT
until curl -s -f "${var.tls ? "https" : "http" }://${var.server}/minio/health/live" || [[ $attempts -ge 60 ]]; do
sleep 10
attempts=$((attempts+1))
done
if [[ $attempts -ge 60 ]]; then
echo "Minio health check failed after maximum attempts"
exit 1
fi
EOT
}
}
resource "minio_s3_bucket" "overlay" {
depends_on = [var.wait_on]
bucket = var.name
acl = "private"
}
resource "minio_s3_bucket_policy" "overlay" {
depends_on = [minio_s3_bucket.overlay]
bucket = minio_s3_bucket.overlay.bucket
policy = jsonencode({
"Version" : "2012-10-17",
"Statement" : [
{
"Effect" : "Allow",
"Principal" : {
"AWS" : [
"*"
]
},
"Action" : [
"s3:GetBucketLocation"
],
"Resource" : [
minio_s3_bucket.overlay.arn,
]
},
{
"Effect" : "Allow",
"Principal" : {
"AWS" : [
"*"
]
},
"Action" : [
"s3:ListBucket"
],
"Resource" : [
minio_s3_bucket.overlay.arn,
],
"Condition" : {
"StringEquals" : {
"s3:prefix" : [
"*"
]
}
}
},
{
"Effect" : "Allow",
"Principal" : {
"AWS" : [
"*"
]
},
"Action" : [
"s3:GetObject"
],
"Resource" : [
"${minio_s3_bucket.overlay.arn}/**",
]
}
]
})
}
resource "minio_s3_bucket" "uploads" {
depends_on = [null_resource.health_check]
bucket = "uploads"
acl = "private"
}
resource "minio_s3_bucket_policy" "uploads" {
depends_on = [minio_s3_bucket.uploads]
bucket = minio_s3_bucket.uploads.bucket
policy = jsonencode({
"Version" : "2012-10-17",
"Statement" : [
{
"Effect" : "Allow",
"Principal" : {
"AWS" : [
"*"
]
},
"Action" : [
"s3:GetBucketLocation"
],
"Resource" : [
minio_s3_bucket.uploads.arn,
]
},
{
"Effect" : "Allow",
"Principal" : {
"AWS" : [
"*"
]
},
"Action" : [
"s3:ListBucket"
],
"Resource" : [
minio_s3_bucket.uploads.arn,
],
"Condition" : {
"StringEquals" : {
"s3:prefix" : [
"*"
]
}
}
},
{
"Effect" : "Allow",
"Principal" : {
"AWS" : [
"*"
]
},
"Action" : [
"s3:GetObject"
],
"Resource" : [
"${minio_s3_bucket.uploads.arn}/**",
]
}
]
})
}
resource "minio_iam_user" "overlay" {
depends_on = [null_resource.health_check]
name = var.name
}
resource "minio_iam_policy" "overlay" {
depends_on = [minio_s3_bucket.overlay, minio_s3_bucket.uploads]
name = minio_s3_bucket.overlay.bucket
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = [minio_s3_bucket.overlay.arn, minio_s3_bucket.uploads.arn, ]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["${minio_s3_bucket.overlay.arn}/*", "${minio_s3_bucket.uploads.arn}/*"]
}
]
})
}
resource "minio_iam_user_policy_attachment" "overlay" {
depends_on = [minio_iam_user.overlay, minio_iam_policy.overlay]
user_name = minio_iam_user.overlay.id
policy_name = minio_iam_policy.overlay.id
}
resource "minio_iam_service_account" "overlay" {
depends_on = [minio_iam_user.overlay, minio_s3_bucket.overlay, minio_s3_bucket.uploads]
target_user = minio_iam_user.overlay.name
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = [minio_s3_bucket.overlay.arn, minio_s3_bucket.uploads.arn]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["${minio_s3_bucket.overlay.arn}/*", "${minio_s3_bucket.uploads.arn}/*"]
}
]
})
}
output "bucket" {
value = var.name
}
output "access_key" {
value = minio_iam_service_account.overlay.access_key
sensitive = true
}
output "secret_key" {
value = minio_iam_service_account.overlay.secret_key
sensitive = true
}

View File

@ -0,0 +1,16 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 3.3.0"
}
}
}
provider "minio" {
minio_server = var.server
minio_region = var.region
minio_user = var.access_key
minio_password = var.secret_key
minio_ssl = var.tls
}

View File

@ -0,0 +1,33 @@
variable "name" {
type = string
}
variable "server" {
type = string
}
variable "access_key" {
type = string
sensitive = true
}
variable "secret_key" {
type = string
sensitive = true
}
variable "region" {
type = string
default = "eu-central-1"
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "tls" {
type = bool
default = false
}

View File

@ -0,0 +1,36 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
type = string
}
variable "ssh_private_key_path" {
type = string
}
module "redis" {
source = "../quadlet-app"
wait_on = var.wait_on
server_ip = var.server_ip
ssh_private_key_path = var.ssh_private_key_path
app_name = "redis"
image = "docker.io/redis:7-alpine"
ports = ["6379:6379"]
volumes = ["/opt/storage/data/redis:/data:Z"]
command = ["redis-server", "--appendonly", "yes"]
}
output "app_urls" {
value = module.redis.app_urls
}
output "installed" {
value = true
depends_on = [module.redis.installed]
}

View File

@ -0,0 +1,220 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
description = "Target server IP"
type = string
}
variable "ssh_private_key_path" {
description = "Path to SSH private key"
type = string
default = "~/.ssh/id_rsa"
}
variable "app_name" {
description = "Name of the application"
type = string
}
variable "image" {
description = "Container image"
type = string
}
variable "ports" {
description = "List of port mappings (e.g., ['8080:80', '8443:443'])"
type = list(string)
default = []
}
variable "volumes" {
description = "List of volume mounts (e.g., ['/host/path:/container/path:Z'])"
type = list(string)
default = []
}
variable "environment" {
description = "Environment variables as key-value pairs"
type = map(string)
default = {}
}
variable "command" {
description = "Command to run in container (list of strings)"
type = list(string)
default = []
}
variable "haproxy_services" {
description = "Multiple HAProxy service configurations"
type = list(object({
name = string
domain = string
port = string
host = optional(string, "127.0.0.1")
tls = optional(bool, false)
}))
default = []
}
variable "depends_on_services" {
description = "List of systemd services this app depends on"
type = list(string)
default = []
}
variable "restart_policy" {
description = "Systemd restart policy"
type = string
default = "always"
}
variable "healthcmd" {
default = ""
}
locals {
# Build all HAProxy labels for multiple services
haproxy_labels = flatten([
for svc in var.haproxy_services : [
"Label=haproxy.${svc.name}.enable=true",
"Label=haproxy.${svc.name}.domain=${svc.domain}",
"Label=haproxy.${svc.name}.port=${svc.port}",
"Label=haproxy.${svc.name}.host=${svc.host}",
"Label=haproxy.${svc.name}.tls=${svc.tls}"
]
])
}
resource "null_resource" "deploy_quadlet_app" {
depends_on = [var.wait_on]
triggers = {
app_name = var.app_name
image = var.image
server_ip = var.server_ip
ports = jsonencode(var.ports)
volumes = jsonencode(var.volumes)
environment = jsonencode(var.environment)
command = jsonencode(var.command)
haproxy_services = jsonencode(var.haproxy_services)
depends_on_services = jsonencode(var.depends_on_services)
ssh_private_key_path = var.ssh_private_key_path
restart_policy = var.restart_policy
}
provisioner "remote-exec" {
inline = compact(flatten([
[
# Wait for cloud-init to complete before proceeding
"cloud-init status --wait || true",
# Verify the user systemd session is ready and linger is enabled
"timeout 60 bash -c 'until loginctl show-user fourlights | grep -q \"Linger=yes\"; do sleep 2; done'",
# Create base quadlet file
"cat > /tmp/${var.app_name}.container << 'EOF'",
"[Unit]",
"Description=${var.app_name} Service",
"After=network-online.target",
"",
"[Container]",
"Image=${var.image}",
],
# Add ports (only if not empty)
length(var.ports) > 0 ? formatlist("PublishPort=127.0.0.1:%s", var.ports) : [],
# Add volumes (only if not empty)
length(var.volumes) > 0 ? formatlist("Volume=%s", var.volumes) : [],
# Add environment variables (only if not empty)
length(var.environment) > 0 ? formatlist("Environment=%s=%s", keys(var.environment), values(var.environment)) : [],
# Add command (only if not empty)
length(var.command) > 0 ? ["Exec=${join(" ", var.command)}"] : [],
# Add pre-computed HAProxy labels (only if not empty)
length(local.haproxy_labels) > 0 ? local.haproxy_labels : [],
# Add health checks if not empty
var.healthcmd != "" ? ["HealthCmd=${var.healthcmd}"] : [],
[
"",
"[Service]",
"Restart=${var.restart_policy}",
"",
"[Install]",
"WantedBy=default.target",
"EOF",
# Create volume directory
"mkdir -p /opt/storage/data/${var.app_name}",
# Move and activate
# Create directory more robustly
"test -d ~/.config/containers/systemd || mkdir -p ~/.config/containers/systemd",
"cp /tmp/${var.app_name}.container ~/.config/containers/systemd/${var.app_name}.container",
"systemctl --user daemon-reload",
"timeout 60 bash -c 'until systemctl --user list-unit-files | grep -q \"^${var.app_name}.service\"; do sleep 2; systemctl --user daemon-reload; done'",
"systemctl --user start ${var.app_name}",
"systemctl --user status ${var.app_name} --no-pager",
]
]))
connection {
type = "ssh"
host = var.server_ip
user = "fourlights"
agent = true
agent_identity = var.ssh_private_key_path
}
}
provisioner "remote-exec" {
when = destroy
inline = [
# Stop and remove the service
"systemctl --user stop ${self.triggers.app_name} || true",
# Remove the .container file
"rm -f ~/.config/containers/systemd/${self.triggers.app_name}.container",
# Reload systemd to remove the generated service
"systemctl --user daemon-reload",
# Force remove any lingering containers
"podman rm -f ${self.triggers.app_name} || true"
]
connection {
type = "ssh"
host = self.triggers.server_ip
user = "fourlights"
agent = true
agent_identity = self.triggers.ssh_private_key_path
}
}
}
output "app_name" {
value = var.app_name
}
output "service_status" {
value = "${var.app_name} deployed"
}
output "app_urls" {
value = [for svc in var.haproxy_services : format("%s://%s", (svc.tls == true ? "https" : "http"), svc.domain)]
}
output "installed" {
value = true
depends_on = [null_resource.deploy_quadlet_app]
}

View File

@ -0,0 +1,36 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
type = string
}
variable "ssh_private_key_path" {
type = string
}
module "redis" {
source = "../quadlet-app"
wait_on = var.wait_on
server_ip = var.server_ip
ssh_private_key_path = var.ssh_private_key_path
app_name = "redis"
image = "docker.io/redis:7-alpine"
ports = ["6379:6379"]
volumes = ["/opt/storage/data/redis:/data:Z"]
command = ["redis-server", "--appendonly", "yes"]
}
output "app_urls" {
value = module.redis.app_urls
}
output "installed" {
value = true
depends_on = [module.redis.installed]
}

View File

@ -0,0 +1,36 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
type = string
}
variable "ssh_private_key_path" {
type = string
}
module "redis" {
source = "../quadlet-app"
wait_on = var.wait_on
server_ip = var.server_ip
ssh_private_key_path = var.ssh_private_key_path
app_name = "redis"
image = "docker.io/redis:7-alpine"
ports = ["6379:6379"]
volumes = ["/opt/storage/data/redis:/data:Z"]
command = ["redis-server", "--appendonly", "yes"]
}
output "app_urls" {
value = module.redis.app_urls
}
output "installed" {
value = true
depends_on = [module.redis.installed]
}

View File

@ -0,0 +1,36 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
type = string
}
variable "ssh_private_key_path" {
type = string
}
module "valkey" {
source = "../quadlet-app"
wait_on = var.wait_on
server_ip = var.server_ip
ssh_private_key_path = var.ssh_private_key_path
app_name = "valkey"
image = "docker.io/valkey/valkey:7-alpine"
ports = ["6379:6379"]
volumes = ["/opt/storage/data/valkey:/data:Z"]
command = ["valkey-server", "--appendonly", "yes"]
}
output "app_urls" {
value = module.valkey.app_urls
}
output "installed" {
value = true
depends_on = [module.valkey.installed]
}

View File

@ -0,0 +1,120 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "server_ip" {
type = string
}
variable "ssh_private_key_path" {
type = string
}
variable "domain" {
type = string
default = "hub.visualworkplace.fourlights.dev"
}
variable "name" {
type = string
default = "visualworkplace-hub"
}
variable "s3_access_key" {
type = string
}
variable "s3_secret_key" {
type = string
}
variable "s3_server" {
type = string
}
variable "valkey_host" {
type = string
default = "systemd-valkey"
}
variable "valkey_db" {
type = number
default = 0
}
module "s3-tenant" {
source = "../minio/tenant"
wait_on = var.wait_on
access_key = var.s3_access_key
secret_key = var.s3_secret_key
server = var.s3_server
name = var.name
}
module "vw-hub" {
source = "../quadlet-app"
wait_on = module.s3-tenant.secret_key
server_ip = var.server_ip
ssh_private_key_path = var.ssh_private_key_path
app_name = var.name
image = "ghcr.io/four-lights-nl/vw-hub:8edae556b9c64fb602b8a54e67c3d06656c4bb9e"
volumes = ["/opt/storage/data/vw-hub:/run/secrets:Z"]
ports = [
"3000:3000",
]
environment = {
NODE_ENV = "production"
LOG_LEVEL = "info"
OTEL_LOG_LEVEL = "info"
HOST = "0.0.0.0"
PORT = "3000"
OAUTH_CLIENT_ID = var.name
OAUTH_CLIENT_SECRET = "OGZ0IDpkWOJXaFQOr6mbIF7.l0rZLvxQDZPEGv6qHLLH/stP5vAIqHLZ2x05uQn9TFQHtsPkRysGM.RpKlWra0"
OAUTH_DOMAIN = "https://${var.domain}"
BASE_URL = "https://${var.domain}"
REDIS_HOST = var.valkey_host
REDIS_DB = var.valkey_db
KEYS_MASTER_KEY = "54dd59c1f1c94795a2b63b074a3943674e964b0225e58b7595762d237d9fdcda"
TOKEN_ENCRYPTION_KEY = "4d15791e50874fbe8af1a8d0fe2605d65bcf44737b7c36d9b2f99ec3367276c5"
ZOHO_CLIENT_ID = "1000.LFYZSCTUJLMUNUUBZX5PMYUXM6HOMP"
ZOHO_CLIENT_SECRET = "07093529734781706356ec4bb8ce7274f1df25cb2e"
ZOHO_REFRESH_TOKEN = "1000.0808eabe967955a24d403eabec6c0aa5.44fbbd0c6e98c476c6bb7bee70317f82"
ZOHO_ACCESS_TOKEN = ""
ZOHO_TOKEN_URI = "https://accounts.zoho.eu/oauth/v2/token"
ZOHO_API_URI = "https://www.zohoapis.eu/crm/v6"
EXACTONLINE_CLIENT_ID = "5c6b0dc4-2e78-4116-89c2-79e6e73356d8"
EXACTONLINE_CLIENT_SECRET = "XMSrmWMZkABv"
EXACTONLINE_WEBHOOK_SECRET = "8vXq0eEHEhEc6iwn"
EXACTONLINE_REDIRECT_URI = "https://${var.domain}/exactonline/callback"
EXACTONLINE_BASE_URL = "https://start.exactonline.nl"
EXACTONLINE_API_BASE = "https://start.exactonline.nl/api/v1/2655637"
EXACTONLINE_AUTHORIZE_PATH = "api/oauth2/auth"
EXACTONLINE_TOKEN_PATH = "api/oauth2/token"
EXACTONLINE_BASE_URI = "https://start.exactonline.nl"
EXACTONLINE_DIVISION = "2655637"
EXACTONLINE_LEAD_SOURCE_ID = "945be231-9588-413e-a6cd-53c190669ea7"
S3_ENDPOINT = var.s3_server
S3_ACCESS_KEY = module.s3-tenant.access_key
S3_SECRET_KEY = module.s3-tenant.secret_key
S3_BUCKET = module.s3-tenant.bucket
}
haproxy_services = [
{
name = var.name
domain = var.domain
port = "3000"
host = "127.0.0.1"
tls = true
}
]
}
output "app_urls" {
value = module.vw-hub.app_urls
}