This commit is contained in:
Thomas Rijpstra 2025-06-18 12:52:38 +02:00
parent 4abf8bc6a9
commit 426f5d9a21
Signed by: thomas
SSH Key Fingerprint: SHA256:au5M4TrfxCxk778HDa1d+VB33vzyetoOvL8zrsDkJt0
18 changed files with 391 additions and 155 deletions

View File

@ -4,5 +4,6 @@
<file url="file://$PROJECT_DIR$/infra/modules/argocd/values.yaml" dialect="yaml" />
<file url="file://$PROJECT_DIR$/infra/modules/fusionauth/values.yaml" dialect="yaml" />
<file url="file://$PROJECT_DIR$/infra/modules/mongodb/values.yaml" dialect="yaml" />
<file url="file://$PROJECT_DIR$/infra/modules/zot/values.yaml.tftpl" dialect="TFTPL" />
</component>
</project>

View File

@ -1,11 +1,7 @@
config:
bookmarks:
- Developer:
- Github:
- abbr: GH
href: https://github.com/
services:
widgets:
# show the kubernetes widget, with the cluster summary and individual nodes
- kubernetes:
cluster:
show: true
@ -18,9 +14,6 @@ config:
cpu: true
memory: true
showLabel: true
- search:
provider: duckduckgo
target: _blank
kubernetes:
mode: cluster
settings:
@ -36,13 +29,13 @@ enableRbac: true
ingress:
main:
enabled: true
annotations:
# Example annotations to add Homepage to your Homepage!
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Homepage"
gethomepage.dev/description: "Dynamically Detected Homepage"
gethomepage.dev/group: "Dynamic"
gethomepage.dev/icon: "homepage.png"
#annotations:
# # Example annotations to add Homepage to your Homepage!
# gethomepage.dev/enabled: "true"
# gethomepage.dev/name: "Homepage"
# gethomepage.dev/description: "Dynamically Detected Homepage"
# gethomepage.dev/group: "Dynamic"
# gethomepage.dev/icon: "homepage.png"
hosts:
- host: ${service_uri}
paths:

View File

@ -6,15 +6,22 @@ resource "random_password" "minio_access_key" {
resource "random_password" "minio_secret_key" {
length = 40
special = true
#override_special = "!#$%&*()-_=+[]{}<>:?"
#min_special = 2
#min_upper = 2
#min_lower = 2
#min_numeric = 2
}
resource "helm_release" "minio" {
name = "minio"
repository = "https://charts.bitnami.com/bitnami"
repository = "oci://registry-1.docker.io/bitnamicharts"
chart = "minio"
namespace = var.namespace
create_namespace = true
version = "14.7.16"
version = "16.0.0"
wait = true
wait_for_jobs = true
set_sensitive {
name = "auth.rootUser"

View File

@ -1,10 +1,28 @@
resource "minio_s3_bucket" "overlay" {
resource "null_resource" "health_check" {
depends_on = [var.wait_on]
provisioner "local-exec" {
command = <<-EOT
until curl -s -f "https://${var.server}/minio/health/live" || [[ $attempts -ge 10 ]]; do
sleep 10
attempts=$((attempts+1))
done
if [[ $attempts -ge 10 ]]; then
echo "Minio health check failed after maximum attempts"
exit 1
fi
EOT
}
}
resource "minio_s3_bucket" "overlay" {
depends_on = [null_resource.health_check]
bucket = var.name
acl = "private"
}
resource "minio_s3_bucket_policy" "overlay" {
depends_on = [minio_s3_bucket.overlay]
bucket = minio_s3_bucket.overlay.bucket
policy = jsonencode({
"Version" : "2012-10-17",
@ -20,7 +38,7 @@ resource "minio_s3_bucket_policy" "overlay" {
"s3:GetBucketLocation"
],
"Resource" : [
"arn:aws:s3:::bouwroute"
minio_s3_bucket.overlay.arn
]
},
{
@ -34,7 +52,7 @@ resource "minio_s3_bucket_policy" "overlay" {
"s3:ListBucket"
],
"Resource" : [
"arn:aws:s3:::bouwroute"
minio_s3_bucket.overlay.arn
],
"Condition" : {
"StringEquals" : {
@ -55,7 +73,7 @@ resource "minio_s3_bucket_policy" "overlay" {
"s3:GetObject"
],
"Resource" : [
"arn:aws:s3:::bouwroute/**"
"${minio_s3_bucket.overlay.arn}/**"
]
}
]
@ -63,10 +81,12 @@ resource "minio_s3_bucket_policy" "overlay" {
}
resource "minio_iam_user" "overlay" {
depends_on = [null_resource.health_check]
name = var.name
}
resource "minio_iam_policy" "overlay" {
depends_on = [minio_s3_bucket.overlay]
name = minio_s3_bucket.overlay.bucket
policy = jsonencode({
Version = "2012-10-17"
@ -74,7 +94,7 @@ resource "minio_iam_policy" "overlay" {
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::${var.name}"]
Resource = [minio_s3_bucket.overlay.arn]
},
{
Effect = "Allow"
@ -83,7 +103,7 @@ resource "minio_iam_policy" "overlay" {
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["arn:aws:s3:::${var.name}/*"]
Resource = ["${minio_s3_bucket.overlay.arn}/*"]
}
]
})
@ -91,11 +111,14 @@ resource "minio_iam_policy" "overlay" {
resource "minio_iam_user_policy_attachment" "overlay" {
depends_on = [minio_iam_user.overlay, minio_iam_policy.overlay]
user_name = minio_iam_user.overlay.id
policy_name = minio_iam_policy.overlay.id
}
resource "minio_iam_service_account" "overlay" {
depends_on = [minio_iam_user.overlay, minio_s3_bucket.overlay]
target_user = minio_iam_user.overlay.name
policy = jsonencode({
Version = "2012-10-17"
@ -103,7 +126,7 @@ resource "minio_iam_service_account" "overlay" {
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::${var.name}"]
Resource = [minio_s3_bucket.overlay.arn]
},
{
Effect = "Allow"
@ -112,12 +135,16 @@ resource "minio_iam_service_account" "overlay" {
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["arn:aws:s3:::${var.name}/*"]
Resource = ["${minio_s3_bucket.overlay.arn}/*"]
}
]
})
}
output "bucket" {
value = var.name
}
output "access_key" {
value = minio_iam_service_account.overlay.access_key
sensitive = true

View File

@ -2,7 +2,7 @@ terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
version = "~> 3.3.0"
}
}
}

View File

@ -11,11 +11,11 @@ ports:
port: 8000
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
forwardedHeaders:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
@ -26,11 +26,11 @@ ports:
port: 8443
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
forwardedHeaders:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
@ -41,9 +41,9 @@ ports:
port: 2223
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 600s
idleTimeout: 60s
readTimeout: 600s
@ -54,9 +54,9 @@ ports:
port: 8993
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
@ -67,9 +67,9 @@ ports:
port: 8995
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
@ -80,9 +80,9 @@ ports:
port: 4190
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
@ -93,7 +93,7 @@ ports:
port: 8025
protocol: TCP
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s
@ -104,9 +104,9 @@ ports:
port: 8465
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimouts:
respondingTimeouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s

View File

@ -35,7 +35,7 @@ module "zitadel_project_application_ua" {
name = "${ var.name } (Swagger)"
redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html"]
redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html", "${local.uri}/hangfire/signin-oidc", "${local.uri}/signin-oidc"]
post_logout_redirect_uris = [local.uri]
}
@ -67,6 +67,7 @@ resource "kubernetes_secret" "api" {
data = {
"authority" = local.authority
"audience" = var.project_id
"client_id" = module.zitadel_project_application_api.client_id
"client_secret" = module.zitadel_project_application_api.client_secret
}
@ -113,6 +114,7 @@ resource "kubernetes_secret" "service-account" {
"audience" = var.project_id
"client_id" = module.zitadel_service_account[count.index].client_id
"client_secret" = module.zitadel_service_account[count.index].client_secret
"scope" = join(" ", concat(["openid", "profile", "urn:zitadel:iam:org:project:id:${var.project_id}:aud"], var.roles))
}
}

View File

@ -0,0 +1,28 @@
/**
* sets the roles an additional claim in the token with roles as value an project as key
*
* The role claims of the token look like the following:
*
* // added by the code below
* "groups": ["{roleName}", "{roleName}", ...],
*
* Flow: Complement token, Triggers: Pre Userinfo creation, Pre access token creation
*
* @param ctx
* @param api
*/
function groupsClaim(ctx, api) {
if (ctx.v1.user.grants === undefined || ctx.v1.user.grants.count == 0) {
return;
}
let grants = [];
ctx.v1.user.grants.grants.forEach((claim) => {
claim.roles.forEach((role) => {
grants.push(role);
});
});
api.v1.claims.setClaim("groups", grants);
api.v1.claims.setClaim("scope", grants);
}

View File

@ -13,6 +13,29 @@ resource "zitadel_org" "default" {
is_default = true
}
// resource "zitadel_action" "groups-claim" {
// org_id = zitadel_org.default.id
// name = "groupsClaim"
// script = templatefile("${path.module}/groupsClaim.action.tftpl", {})
// allowed_to_fail = true
// timeout = "10s"
// }
//
// resource "zitadel_trigger_actions" "groups-claim-pre-user-info" {
// org_id = zitadel_org.default.id
// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
// trigger_type = "TRIGGER_TYPE_PRE_USERINFO_CREATION"
// action_ids = [zitadel_action.groups-claim.id]
// }
//
// resource "zitadel_trigger_actions" "groups-claim-pre-access-token" {
// org_id = zitadel_org.default.id
// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
// trigger_type = "TRIGGER_TYPE_PRE_ACCESS_TOKEN_CREATION"
// action_ids = [zitadel_action.groups-claim.id]
// }
output "org_id" {
value = zitadel_org.default.id
}

View File

@ -10,7 +10,47 @@ resource "helm_release" "zot" {
]
}
resource "kubernetes_manifest" "traefik_middleware_request_body" {
depends_on = [helm_release.zot]
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "request-body"
namespace = "registry"
}
spec = {
buffering = {
maxRequestBodyBytes = 0
}
}
}
}
resource "kubernetes_manifest" "traefik_middleware_request_timeouts" {
depends_on = [helm_release.zot]
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "request-timeouts"
namespace = "registry"
}
spec = {
headers = {
customRequestHeaders = {
"X-Forwarded-Timeout-Read" = "3600s"
"X-Forwarded-Timeout-Write" = "3600s"
}
}
}
}
}
output "installed" {
value = true
depends_on = [helm_release.zot]
depends_on = [
kubernetes_manifest.traefik_middleware_request_body, kubernetes_manifest.traefik_middleware_request_timeouts,
helm_release.zot
]
}

View File

@ -3,9 +3,36 @@ ingress:
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
traefik.ingress.kubernetes.io/proxy-body-size: "0"
traefik.ingress.kubernetes.io/router.middlewares: registry-request-body@kubernetescrd,registry-request-timeouts@kubernetescrd,default-preserve-host-headers@kubernetescrd
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Registry"
gethomepage.dev/description: "OCI Registry"
gethomepage.dev/group: "Tools"
gethomepage.dev/icon: "docker.png"
hosts:
- host: ${ service_uri }
paths:
- path: /
persistence: true
pvc:
create: true
name: zot
accessMode: "ReadWriteOnce"
storage: 8Gi
service:
type: ClusterIP
port: 5000
mountConfig: true
configFiles:
config.json: |-
{
"storage": { "rootDirectory": "/var/lib/registry" },
"http": { "address": "0.0.0.0", "port": "5000" },
"log": { "level": "error" },
"extensions": {
"scrub": {
"enable": true,
"interval": "12h"
}
}
}

View File

@ -3,6 +3,8 @@ locals {
}
resource "kubernetes_namespace" "tenant" {
depends_on = [var.wait_on]
metadata {
name = lower(local.name)
}
@ -22,6 +24,15 @@ module "bootstrap-zitadel" {
}
// create uploads bucket in minio
module "minio" {
source = "../../modules/minio/tenant"
access_key = var.minio_access_key
secret_key = var.minio_secret_key
server = var.minio_server
name = "365zon"
}
// create minio secret
resource "kubernetes_secret" "storage" {
@ -31,10 +42,10 @@ resource "kubernetes_secret" "storage" {
}
data = {
Storage__AccountName = var.minio_access_key
Storage__AccountKey = var.minio_secret_key
Storage__BlobUri = var.minio_service_uri
Storage__S3BucketName = "uploads"
Storage__AccountName = module.minio.access_key
Storage__AccountKey = module.minio.secret_key
Storage__BlobUri = var.minio_api_uri
Storage__S3BucketName = module.minio.bucket
}
}
@ -54,3 +65,17 @@ resource "kubernetes_secret" "connection_strings" {
// next, we need to set-up:
// - the wildcard tls (*.365zon.venus.fourlights.dev)
// - argocd for all relevant apps
//
output "minio_access_key" {
value = module.minio.access_key
sensitive = true
}
output "minio_secret_key" {
value = module.minio.secret_key
sensitive = true
}
output "minio_bucket" {
value = module.minio.bucket
}

View File

@ -10,7 +10,8 @@ variable "minio_secret_key" {
type = string
sensitive = true
}
variable "minio_service_uri" { type = string }
variable "minio_api_uri" { type = string }
variable "minio_server" { type = string }
variable "mongodb_connection_string" {
type = string
sensitive = true
@ -19,3 +20,7 @@ variable "rabbitmq_connection_string" {
type = string
sensitive = true
}
variable "wait_on" {
type = any
default = true
}

View File

@ -18,31 +18,31 @@ module "zitadel_project" {
module "zitadel_project_operator_roles" {
source = "../../../modules/zitadel/project/roles"
wait_on = [module.zitadel_project.installed]
wait_on = module.zitadel_project.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Operator"
roles = [
"manage:profiles", "manage:contacts", "manage:addresses", "manage:enquiries", "manage:flowstates",
"manage:flowevents", "manage:files"
"manage:flowevents", "manage:files", "manage:brands"
]
}
module "zitadel_project_configurator_roles" {
source = "../../../modules/zitadel/project/roles"
wait_on = [module.zitadel_project_operator_roles.installed]
source = "../../../modules/zitadel/project/roles"
wait_on = module.zitadel_project_operator_roles.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Configurator"
roles = [
"manage:brands", "manage:flows"
"manage:flows"
]
}
module "zitadel_project_developer_roles" {
source = "../../../modules/zitadel/project/roles"
wait_on = [module.zitadel_project_configurator_roles.installed]
source = "../../../modules/zitadel/project/roles"
wait_on = module.zitadel_project_configurator_roles.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@ -54,7 +54,7 @@ module "zitadel_project_developer_roles" {
module "zitadel_project_user_grant" {
source = "../../../modules/zitadel/project/user-grant"
wait_on = [module.zitadel_project_developer_roles.installed]
wait_on = module.zitadel_project_developer_roles.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
user_id = var.user_id
@ -66,8 +66,8 @@ module "zitadel_project_user_grant" {
// TODO: Add read roles
module "zitadel_project_application_core" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_user_grant.installed]
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = module.zitadel_project_user_grant.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@ -84,8 +84,8 @@ module "zitadel_project_application_core" {
}
module "zitadel_project_application_salesforce" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_application_core.installed]
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = module.zitadel_project_application_core.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@ -101,8 +101,8 @@ module "zitadel_project_application_salesforce" {
}
module "zitadel_project_application_external" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_application_salesforce.installed]
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = module.zitadel_project_application_salesforce.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@ -118,8 +118,8 @@ module "zitadel_project_application_external" {
}
module "zitadel_project_application_module_internal" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_application_external.installed]
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = module.zitadel_project_application_external.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id

Binary file not shown.

View File

@ -35,12 +35,12 @@ async function executeCommand(
stdout?: "piped" | "inherit" | "null",
stderr?: "piped" | "inherit" | "null",
throwOnError?: boolean
} = {}
} = {stdout: 'piped', stderr: 'piped', throwOnError: true}
): Promise<{ success: boolean; output?: string; error?: string }> {
const { stdout = "piped", stderr = "piped", throwOnError = true } = options;
const {stdout = "piped", stderr = "piped", throwOnError = true} = options;
log.debug(`Executing: ${cmdArray.join(" ")}`);
try {
// Use Deno.Command API which is the modern replacement for Deno.run
const command = new Deno.Command(cmdArray[0], {
@ -48,19 +48,19 @@ async function executeCommand(
stdout: stdout === "piped" ? "piped" : stdout === "inherit" ? "inherit" : "null",
stderr: stderr === "piped" ? "piped" : stderr === "inherit" ? "inherit" : "null",
});
const { code, stdout: stdoutOutput, stderr: stderrOutput } = await command.output();
const {code, stdout: stdoutOutput, stderr: stderrOutput} = await command.output();
const stdoutText = stdout === "piped" ? new TextDecoder().decode(stdoutOutput).trim() : "";
const stderrText = stderr === "piped" ? new TextDecoder().decode(stderrOutput).trim() : "";
if (code !== 0) {
log.error(`Failed to ${description}: ${stderrText || "Unknown error"}`);
if (throwOnError) {
throw new Error(`Command failed: ${cmdArray.join(" ")}\n${stderrText}`);
}
}
return {
success: code === 0,
output: stdoutText,
@ -72,48 +72,48 @@ async function executeCommand(
if (throwOnError) {
throw error;
}
return { success: false, error: errorMessage };
return {success: false, error: errorMessage};
}
}
// Check if VM is ready for SSH connections
async function isVmReadyForSsh(ip: string, user: string, maxAttempts = 30): Promise<boolean> {
async function isVmReadyForSsh(ip: string, user: string, sshKeyPath: string, maxAttempts = 30): Promise<boolean> {
log.info(`Checking if VM at ${ip} is ready for SSH connections...`);
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
log.debug(`SSH readiness check attempt ${attempt}/${maxAttempts}`);
const { success } = await executeCommand(
["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "echo", "ready"],
const {success} = await executeCommand(
["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "-i", sshKeyPath, "echo", "ready"],
`check SSH connectivity to ${ip}`,
{ throwOnError: false, stderr: "null" }
{throwOnError: false}
);
if (success) {
log.success(`VM at ${ip} is ready for SSH connections`);
return true;
}
log.debug(`VM at ${ip} not ready yet, waiting...`);
await delay(2000); // Wait 2 seconds between attempts
}
log.error(`VM at ${ip} is not ready for SSH connections after ${maxAttempts} attempts`);
return false;
}
// Check if VM is running
async function isVmRunning(vmName: string): Promise<boolean> {
const { success, output } = await executeCommand(
const {success, output} = await executeCommand(
["incus", "list", vmName, "--format", "json"],
`check if VM ${vmName} is running`,
{ throwOnError: false }
{throwOnError: false}
);
if (!success || !output) {
return false;
}
try {
const vmInfo = JSON.parse(output);
return vmInfo.length > 0 && vmInfo[0].status === "Running";
@ -127,15 +127,17 @@ async function isVmRunning(vmName: string): Promise<boolean> {
// Cleanup function to handle failures
async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
log.info("Starting cleanup process...");
return;
for (const vmName of vmNames) {
// Check if VM exists
const { success, output } = await executeCommand(
const {success, output} = await executeCommand(
["incus", "list", vmName, "--format", "csv"],
`check if VM ${vmName} exists`,
{ throwOnError: false }
{throwOnError: false}
);
if (success && output) {
// Stop VM if it's running
const isRunning = await isVmRunning(vmName);
@ -144,58 +146,58 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
await executeCommand(
["incus", "stop", vmName, "--force"],
`stop VM ${vmName}`,
{ throwOnError: false }
{throwOnError: false}
);
}
// Remove VM if requested
if (shouldRemove) {
log.info(`Removing VM ${vmName}...`);
await executeCommand(
["incus", "delete", vmName],
`remove VM ${vmName}`,
{ throwOnError: false }
{throwOnError: false}
);
}
}
}
log.success("Cleanup completed");
}
const setupCluster = async (numMasters: number, forceCleanup = false) => {
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
const createdVMs: string[] = [];
try {
// Get hostname and user
const { output: hostname } = await executeCommand(
const {output: hostname} = await executeCommand(
["hostnamectl", "hostname"],
"get hostname"
);
const { output: user } = await executeCommand(
const {output: user} = await executeCommand(
["whoami"],
"get current user"
);
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
// Check if SSH keys exist
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
throw new Error("SSH keys not found");
}
// Step 1: Create Low-Resource Profile (if not exists)
const { success: profileExists } = await executeCommand(
const {success: profileExists} = await executeCommand(
["incus", "profile", "show", "low-resource"],
"check if low-resource profile exists",
{ stdout: "null", stderr: "null", throwOnError: false }
{throwOnError: false}
);
if (!profileExists) {
log.info("Creating low-resource profile...");
await executeCommand(
@ -218,37 +220,37 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
} else {
log.skip("Low-resource profile already exists");
}
// Read SSH key
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
// Step 3: Launch VMs (if not already running)
for (let i = 1; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const { success: vmExists, output: vmOutput } = await executeCommand(
const {success: vmExists, output: vmOutput} = await executeCommand(
["incus", "list", vmName, "--format", "csv"],
`check if VM ${vmName} exists`,
{ throwOnError: false }
{throwOnError: false}
);
if (!vmExists || !vmOutput) {
log.info(`Creating VM ${vmName}...`);
await executeCommand(
["incus", "init", `images:${image}`, vmName, "--profile", "low-resource", "-c", "user.timezone=\"Europe/Amsterdam\"", "-c", `user.ssh_key=\"${sshKey}\"`, ...config],
`initialize VM ${vmName}`
);
await executeCommand(
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(i)}`],
`configure network for VM ${vmName}`
);
await executeCommand(
["incus", "start", vmName],
`start VM ${vmName}`
);
createdVMs.push(vmName);
log.success(`VM ${vmName} started`);
} else {
@ -264,14 +266,14 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
log.skip(`VM ${vmName} already exists`);
}
}
// Step 4: Install k3sup (if not installed)
const { success: k3supInstalled } = await executeCommand(
const {success: k3supInstalled} = await executeCommand(
["which", "k3sup"],
"check if k3sup is installed",
{ stdout: "null", stderr: "null", throwOnError: false }
{throwOnError: false}
);
if (!k3supInstalled) {
log.info("Installing k3sup...");
await executeCommand(
@ -282,19 +284,19 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
} else {
log.skip("k3sup already installed");
}
// Step 5: Wait for VMs to be ready
const firstMasterIP = getIp(1);
log.info(`Waiting for first master node (${firstMasterIP}) to be ready...`);
const vmReady = await isVmReadyForSsh(firstMasterIP, "picard");
const vmReady = await isVmReadyForSsh(firstMasterIP, "picard", sshKeyPrivateFileName);
if (!vmReady) {
throw new Error(`First master node at ${firstMasterIP} is not ready for SSH connections`);
}
// Check if kubeconfig exists
const kubeconfigExists = await exists("./kubeconfig");
if (!kubeconfigExists) {
log.info("Bootstrapping first master node...");
await executeCommand(
@ -305,26 +307,26 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
} else {
log.skip("First master node already bootstrapped");
}
// Step 6: Join Additional Master Nodes (if not already joined)
for (let i = 2; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const vmIP = getIp(i);
// Wait for VM to be ready
log.info(`Waiting for ${vmName} (${vmIP}) to be ready...`);
const nodeReady = await isVmReadyForSsh(vmIP, "picard");
const nodeReady = await isVmReadyForSsh(vmIP, "picard", sshKeyPrivateFileName);
if (!nodeReady) {
log.warning(`VM ${vmName} is not ready for SSH connections, skipping join operation`);
continue;
}
const { success: joined } = await executeCommand(
const {success: joined} = await executeCommand(
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", vmName],
`check if ${vmName} has joined the cluster`,
{ stdout: "null", stderr: "null", throwOnError: false }
{throwOnError: false}
);
if (!joined) {
log.info(`Joining ${vmName} to the cluster...`);
await executeCommand(
@ -336,33 +338,33 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
log.skip(`VM ${vmName} already joined the cluster`);
}
}
log.success("HA k3s cluster setup complete! 🚀");
// Verify cluster status
log.info("Verifying cluster status...");
const { success: clusterVerified, output: nodesOutput } = await executeCommand(
const {success: clusterVerified, output: nodesOutput} = await executeCommand(
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", "-o", "wide"],
"verify cluster nodes",
{ throwOnError: false }
{throwOnError: false}
);
if (clusterVerified) {
log.info("Cluster nodes:");
console.log(nodesOutput);
} else {
log.warning("Could not verify cluster status");
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to set up cluster: ${errorMessage}`);
if (createdVMs.length > 0) {
log.warning("An error occurred during setup. Cleaning up created resources...");
await cleanup(createdVMs, forceCleanup);
}
Deno.exit(1);
}
};
@ -371,7 +373,7 @@ await new Command()
.name("setup-k3s-cluster")
.version("0.1.0")
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
.option("-m, --masters <numMasters:number>", "Number of master nodes", { default: 3 })
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", { default: false })
.action(({ masters, cleanup }) => setupCluster(masters, cleanup))
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3})
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", {default: false})
.action(({masters, cleanup}) => setupCluster(masters, cleanup))
.parse(Deno.args);

View File

@ -2,13 +2,41 @@ locals {
tld = "fourlights.dev"
cluster_dns = "venus.${local.tld}"
is_installed = true
node_count = 3
node_count = 1
}
module "registry" {
source = "../../infra/modules/zot"
service_uri = "registry.${local.cluster_dns}"
resource "kubernetes_manifest" "traefik-helm-config" {
manifest = {
apiVersion = "helm.cattle.io/v1"
kind = "HelmChartConfig"
metadata = {
name = "traefik"
namespace = "kube-system"
}
spec = {
valuesContent = <<EOF
serversTransport:
forwardingTimeouts:
dialTimeout: 0
responseHeaderTimeout: 0
idleConnTimeout: 0
logs:
general:
level: ERROR
ports:
web:
proxyProtocol:
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
forwardedHeaders:
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
respondingTimeouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
EOF
}
}
}
resource "kubernetes_manifest" "preserve-host-middleware" {
@ -154,14 +182,17 @@ module "redis" {
module "tenant-365zon" {
source = "../../infra/tenants/365zon"
wait_on = module.minio.installed
org_id = module.zitadel-bootstrap.org_id
user_id = module.zitadel-bootstrap.user_id
domain = module.zitadel.server
jwt_profile_file = module.zitadel.jwt_profile_file
minio_access_key = module.minio.minio_access_key
minio_secret_key = module.minio.minio_secret_key
minio_service_uri = module.minio.minio_api_uri
minio_access_key = module.minio.minio_access_key
minio_secret_key = module.minio.minio_secret_key
minio_server = module.minio.minio_server
minio_api_uri = module.minio.minio_api_uri
mongodb_connection_string = module.mongodb.connection_string
rabbitmq_connection_string = module.rabbitmq.connection_string
@ -209,3 +240,28 @@ output "mongodb-connection-string" {
value = module.mongodb.connection_string
sensitive = true
}
output "rabbitmq-connection-string" {
value = module.rabbitmq.connection_string
sensitive = true
}
output "minio-access-key" {
value = module.tenant-365zon.minio_access_key
sensitive = true
}
output "minio-secret-key" {
value = module.tenant-365zon.minio_secret_key
sensitive = true
}
output "minio-root-access-key" {
value = module.minio.minio_access_key
sensitive = true
}
output "minio-root-secret-key" {
value = module.minio.minio_secret_key
sensitive = true
}

View File

@ -1 +1 @@
{"type":"serviceaccount","keyId":"310142761184133898","key":"-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEApSaCjkOBVIe33bEIwENq1jGj6MgbN+NqYRN6EVDWVnESM10/\n188hB9UDCvNR5kcBbaz2bD/ymZ/ppWSLqmXONwc3/PtiOluBfkvR1q2pEh+f13wz\n72dbhOVqf+YhL3lndiQ/OupGzaCbmsBNPGag7mgaPxlgoWTWIItPnOomIhwtwXgy\nNyzt9Fmyh/4JsRlIYO90ZO32vKXABRMCGsKxvcY9CR4+LIqddns83YASGFnQ5oBo\nObc8EN2Di7uKWzNwxUJuZtFlHXp06su2mWDGJhKusHYW4KUIs2uwFtjJfAXG/adT\n8qVgi174m1jU2ocSd6o9IqDYf50arCinbgtAdwIDAQABAoIBABwrB1WQefya8Wdk\njKOOXCiQau6HQu0zYq+QDN/rM8OmoX4VR5Bdibq2QECb47otHjdAqv8noQ9G0Ske\njxvPJW8JUilaDxT5CosqD25YTGAE+NReINWSgW+XWaTa8YoRYO4rnIVF9DGaVS/9\n4K6OqqA/LUrZ3ztn4YXHfRq8bSif86GMo1GkwH8xOMJHdaxCs8YzAbpGURL03QtL\nemVNs9VwSWLmnK71FpXkko0aGi14naS7E4jv8uutykLQsc+QE7m9B4OiDkijKCP9\nQwvw/3RZYcrRuWz7uSANyxG4Uc8JhPdUIyvpkvUz8NfRLTDoSAEq1NQuxpyjLYYU\n7uzYcWECgYEAzKZ5wGTJBZafen2I61L8XAMk2df63nnEK+YuZqNZ6yH6IY7cCrlJ\n3LbeNoHNcGMXw1mf9Z9vvAjz7nbec2BYN1KRMR9QOTHcqwQZcOOJnwhdO4uAlsFZ\ngiyoLYCQP8Z6IIC4ht+2hmf8hS3CmWUPAXyLOcg4ok6SRdyNsfWiLwkCgYEAzpbL\n8szYqNY+r5n1DQ9d6zNb2cbkFfzZDxn64BA1xQZtRgxfzNAOvsGl5pPWve7oS/8Y\nmPx+1b08NvCcTuaow7CCw+IDHsI43TRNbvPQBWtINBE6eeBs3laaNvmxTZU5HGog\nt1yRtk0u64hKT7+L7Ku5JP79pxzNOIs1hnImU38CgYAaH84+/x6iNf4Ztti5oZhR\nbp1PqcB+kfC24eVeeM/LskSp8ACq5chGApoPPzaoeB3adCB1TGsJB+OLt2TiOZRJ\nS6L5MFQfWPwgYJ+Wx5UT1g+AwGgj1n7EnUrCtDy1x3Jjn8rufLRiJ/gWUCcdScdG\nm01yjNqd7YXCoUr9Qqv3cQKBgGd2klHZUbDNC7v6SQXvakP/BsM8nsJ8TWEIy+In\nfCZen59zVw9GK/xRE3s1E1kwK1rUOUd1PThie6OwQTgqwN6wqezcZl+jOcNfDGDC\n7q2oGxMohbbANQXtLXLW/nsyftXCOPxb+gXpBdSj/0ONVNCE+EaVBggJnqXw4i+h\nP5yVAoGBAIoXRgX3mSBsC/xgKIXQb4c9WT7W78IOpU43mbX9jC/emfLkOvuxR/Cv\nmJDgTv2zUq7uItbvXmxwmU7JVYlBFaWERsAqzzWUUsdfM3tBFdBbcH9fzoEG0j4u\nkqCwU1if6HTHCmunqt1ZQKN3oP1Uycn/1ZL6NR8ilqIcjCzh4JPQ\n-----END RSA PRIVATE KEY-----\n","expirationDate":"2026-01-01T00:00:00Z","userId":"310142761184068362"}
{"type":"serviceaccount","keyId":"313768085818048552","key":"-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAtdNVdOOD80x5NDMusGpdfWvo91N9MnOETO0RLhhyrSRyO6vo\nfxKD68nKK/SwUpq8dl9vNCzBFqOsbz2tRPp3jeV+6YdMwGgnQQxEVpOfRftd0718\nycQcaWIauuU3xuyA+rj74CWjlg+R9b5dWbo2p/2Ey+ygO60LWrRhDWnNslRpbPco\nrw2StHnPqpORQLn0Oj7qyns6lSeq20hwWJu9+IbTYrWPA0HnO7WZZBUgukX0prai\n4edAZJfwoo7UJKH6ZETvQbCVuA8UMHqLbUB0U4se9+d+OuotoghVCMH3HAcVh/7A\ndQvTVa6ix6DiXjtqtQytt1+fRL7bCcntRn1kyQIDAQABAoIBAC3wPRERQo8/7QeB\nPvSXixNbJjsGvwT2JqEA7GxHBQI1yR7GajFgzi/OhePhKINWUPNfXUtDW22K4NAi\nNxrMZVRWfWAnLP8X0YMfxExTc9RMlAIhR9v6TmtZvAMoUpVRv6yY/Bo/qDsLqAb8\nl71JzPFYniqfmEQ7jjjWhgbLiorZVyZsRSBuaTYIqJccbq/zZ/O+D3xXdSEwbOri\nxPhqjsWQz6q3jxcc3FAAmzxEMwFBwx8pbocVUyCn43LifLjuXk831SMg6l9Q5mCd\nEi7UYXkZzcPtrdo3mg682FEsSna7VFUlBBl/kEXdSvuGRMZZfEYsx8TrI524sDe7\nPxN2LgECgYEAwHjNlgBox3kMg6uO5USQo5CzCHMOQhsIITwax353LPrsuhYssFUS\nvMyrSlZDvw87xuZEVEhUDuvMQlUOcCvNjHvstt1HPB3hku4KlZrgRUBmgsVGi2sG\nlAxczrp/1qenVU3z1Y2u6DCTM3ASPyb4smYvLDRgm1aTD8VY2pAfaaECgYEA8dbz\nUAEeagH6Au6VQ6404ZrjrrokzsjjLUzuhRah/zlpKWqaOEfldSCEHu0sZSPG20T/\nt2KhKqNzpTsSv6H0QmhD3k1/b42Sr2bu8WbKbQTOeuY64TA341PS7vGzVh5iXN5H\nAo0D5hUoiFhPlXI5Xzpo5sDy8pnX18DREnwnOikCgYBouNHTDcH01m3yrkN/hwDT\nngVrUX6uhRq1SoifhrW9KYHn9ey2vHMHeqywM6OErvstS1heK0RhIfbvGGxUp+Cc\n0UiIbnk1wmRbl1z27V+dDl84Q7IQZVkc8GUGrf6kgm/PQCytQvuppdRRpmanKcMi\n/eoouQ7fNgmqCQxBTy1oQQKBgAoFuneqVDRYeJ/+ezke0xo2bREkrbnUIXYTJh9l\n3LjTDESnIlUKxbug6VjOw3Q9k1Qq+94BvGQj+frzA8flUlYeNBHWbF2XJGkYfvaK\nKAF0nYoCWJZUhTxqkOdOJPyArdrja1XzuiGi5tcfscyjuzTzGr3VaTwcnBFY8FEv\nzABhAoGBAJDlWEJYN94VWkbYpA0ak4CopZONaK3zo+LPnd8RteRF1Bb7nbVLxoWK\n3JyRdjKBGgZHjJVjeVvfHZd2RcH/toKsJ8Oj3ImfMFLlBmx6C8nzLVvIKCjmGpRV\nVISNo5nUrEn9/9vghSlWTSiV/jDm4ExPqn4am6xNyscjNo8aPiNo\n-----END RSA PRIVATE KEY-----\n","expirationDate":"2026-01-01T00:00:00Z","userId":"313768085817983016"}