diff --git a/.idea/templateLanguages.xml b/.idea/templateLanguages.xml
index 8a69eb3..fa124ce 100644
--- a/.idea/templateLanguages.xml
+++ b/.idea/templateLanguages.xml
@@ -4,5 +4,6 @@
+
\ No newline at end of file
diff --git a/infra/modules/homepage/values.yaml.tftpl b/infra/modules/homepage/values.yaml.tftpl
index 9b86d45..a784e46 100644
--- a/infra/modules/homepage/values.yaml.tftpl
+++ b/infra/modules/homepage/values.yaml.tftpl
@@ -1,11 +1,7 @@
config:
bookmarks:
- - Developer:
- - Github:
- - abbr: GH
- href: https://github.com/
+ services:
widgets:
- # show the kubernetes widget, with the cluster summary and individual nodes
- kubernetes:
cluster:
show: true
@@ -18,9 +14,6 @@ config:
cpu: true
memory: true
showLabel: true
- - search:
- provider: duckduckgo
- target: _blank
kubernetes:
mode: cluster
settings:
@@ -36,13 +29,13 @@ enableRbac: true
ingress:
main:
enabled: true
- annotations:
- # Example annotations to add Homepage to your Homepage!
- gethomepage.dev/enabled: "true"
- gethomepage.dev/name: "Homepage"
- gethomepage.dev/description: "Dynamically Detected Homepage"
- gethomepage.dev/group: "Dynamic"
- gethomepage.dev/icon: "homepage.png"
+ #annotations:
+ # # Example annotations to add Homepage to your Homepage!
+ # gethomepage.dev/enabled: "true"
+ # gethomepage.dev/name: "Homepage"
+ # gethomepage.dev/description: "Dynamically Detected Homepage"
+ # gethomepage.dev/group: "Dynamic"
+ # gethomepage.dev/icon: "homepage.png"
hosts:
- host: ${service_uri}
paths:
diff --git a/infra/modules/minio/main.tf b/infra/modules/minio/main.tf
index cf2be2f..4b4a3ae 100644
--- a/infra/modules/minio/main.tf
+++ b/infra/modules/minio/main.tf
@@ -6,15 +6,22 @@ resource "random_password" "minio_access_key" {
resource "random_password" "minio_secret_key" {
length = 40
special = true
+ #override_special = "!#$%&*()-_=+[]{}<>:?"
+ #min_special = 2
+ #min_upper = 2
+ #min_lower = 2
+ #min_numeric = 2
}
resource "helm_release" "minio" {
name = "minio"
- repository = "https://charts.bitnami.com/bitnami"
+ repository = "oci://registry-1.docker.io/bitnamicharts"
chart = "minio"
namespace = var.namespace
create_namespace = true
- version = "14.7.16"
+ version = "16.0.0"
+ wait = true
+ wait_for_jobs = true
set_sensitive {
name = "auth.rootUser"
diff --git a/infra/modules/minio/tenant/main.tf b/infra/modules/minio/tenant/main.tf
index b28e7e9..d30b941 100644
--- a/infra/modules/minio/tenant/main.tf
+++ b/infra/modules/minio/tenant/main.tf
@@ -1,10 +1,28 @@
-resource "minio_s3_bucket" "overlay" {
+resource "null_resource" "health_check" {
depends_on = [var.wait_on]
+
+ provisioner "local-exec" {
+ command = <<-EOT
+ until curl -s -f "https://${var.server}/minio/health/live" || [[ $attempts -ge 10 ]]; do
+ sleep 10
+ attempts=$((attempts+1))
+ done
+ if [[ $attempts -ge 10 ]]; then
+ echo "Minio health check failed after maximum attempts"
+ exit 1
+ fi
+ EOT
+ }
+}
+
+resource "minio_s3_bucket" "overlay" {
+ depends_on = [null_resource.health_check]
bucket = var.name
acl = "private"
}
resource "minio_s3_bucket_policy" "overlay" {
+ depends_on = [minio_s3_bucket.overlay]
bucket = minio_s3_bucket.overlay.bucket
policy = jsonencode({
"Version" : "2012-10-17",
@@ -20,7 +38,7 @@ resource "minio_s3_bucket_policy" "overlay" {
"s3:GetBucketLocation"
],
"Resource" : [
- "arn:aws:s3:::bouwroute"
+ minio_s3_bucket.overlay.arn
]
},
{
@@ -34,7 +52,7 @@ resource "minio_s3_bucket_policy" "overlay" {
"s3:ListBucket"
],
"Resource" : [
- "arn:aws:s3:::bouwroute"
+ minio_s3_bucket.overlay.arn
],
"Condition" : {
"StringEquals" : {
@@ -55,7 +73,7 @@ resource "minio_s3_bucket_policy" "overlay" {
"s3:GetObject"
],
"Resource" : [
- "arn:aws:s3:::bouwroute/**"
+ "${minio_s3_bucket.overlay.arn}/**"
]
}
]
@@ -63,10 +81,12 @@ resource "minio_s3_bucket_policy" "overlay" {
}
resource "minio_iam_user" "overlay" {
+ depends_on = [null_resource.health_check]
name = var.name
}
resource "minio_iam_policy" "overlay" {
+ depends_on = [minio_s3_bucket.overlay]
name = minio_s3_bucket.overlay.bucket
policy = jsonencode({
Version = "2012-10-17"
@@ -74,7 +94,7 @@ resource "minio_iam_policy" "overlay" {
{
Effect = "Allow"
Action = ["s3:ListBucket"]
- Resource = ["arn:aws:s3:::${var.name}"]
+ Resource = [minio_s3_bucket.overlay.arn]
},
{
Effect = "Allow"
@@ -83,7 +103,7 @@ resource "minio_iam_policy" "overlay" {
"s3:PutObject",
"s3:DeleteObject"
]
- Resource = ["arn:aws:s3:::${var.name}/*"]
+ Resource = ["${minio_s3_bucket.overlay.arn}/*"]
}
]
})
@@ -91,11 +111,14 @@ resource "minio_iam_policy" "overlay" {
resource "minio_iam_user_policy_attachment" "overlay" {
+ depends_on = [minio_iam_user.overlay, minio_iam_policy.overlay]
+
user_name = minio_iam_user.overlay.id
policy_name = minio_iam_policy.overlay.id
}
resource "minio_iam_service_account" "overlay" {
+ depends_on = [minio_iam_user.overlay, minio_s3_bucket.overlay]
target_user = minio_iam_user.overlay.name
policy = jsonencode({
Version = "2012-10-17"
@@ -103,7 +126,7 @@ resource "minio_iam_service_account" "overlay" {
{
Effect = "Allow"
Action = ["s3:ListBucket"]
- Resource = ["arn:aws:s3:::${var.name}"]
+ Resource = [minio_s3_bucket.overlay.arn]
},
{
Effect = "Allow"
@@ -112,12 +135,16 @@ resource "minio_iam_service_account" "overlay" {
"s3:PutObject",
"s3:DeleteObject"
]
- Resource = ["arn:aws:s3:::${var.name}/*"]
+ Resource = ["${minio_s3_bucket.overlay.arn}/*"]
}
]
})
}
+output "bucket" {
+ value = var.name
+}
+
output "access_key" {
value = minio_iam_service_account.overlay.access_key
sensitive = true
diff --git a/infra/modules/minio/tenant/providers.tf b/infra/modules/minio/tenant/providers.tf
index 58e0984..6073068 100644
--- a/infra/modules/minio/tenant/providers.tf
+++ b/infra/modules/minio/tenant/providers.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
minio = {
source = "aminueza/minio"
- version = "~> 2.5.0"
+ version = "~> 3.3.0"
}
}
}
diff --git a/infra/modules/traefik/traefik-values.yaml b/infra/modules/traefik/traefik-values.yaml
index a97e673..fe8ff19 100644
--- a/infra/modules/traefik/traefik-values.yaml
+++ b/infra/modules/traefik/traefik-values.yaml
@@ -11,11 +11,11 @@ ports:
port: 8000
protocol: TCP
proxyProtocol:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
forwardedHeaders:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
@@ -26,11 +26,11 @@ ports:
port: 8443
protocol: TCP
proxyProtocol:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
forwardedHeaders:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
@@ -41,9 +41,9 @@ ports:
port: 2223
protocol: TCP
proxyProtocol:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 600s
idleTimeout: 60s
readTimeout: 600s
@@ -54,9 +54,9 @@ ports:
port: 8993
protocol: TCP
proxyProtocol:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
@@ -67,9 +67,9 @@ ports:
port: 8995
protocol: TCP
proxyProtocol:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
@@ -80,9 +80,9 @@ ports:
port: 4190
protocol: TCP
proxyProtocol:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
@@ -93,7 +93,7 @@ ports:
port: 8025
protocol: TCP
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s
@@ -104,9 +104,9 @@ ports:
port: 8465
protocol: TCP
proxyProtocol:
- trustedIPs: [127.0.0.1/8,10.0.0.0/8]
+ trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
transport:
- respondingTimouts:
+ respondingTimeouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s
diff --git a/infra/modules/zitadel/api-m2m-swagger/main.tf b/infra/modules/zitadel/api-m2m-swagger/main.tf
index d16c8a3..765b3c5 100644
--- a/infra/modules/zitadel/api-m2m-swagger/main.tf
+++ b/infra/modules/zitadel/api-m2m-swagger/main.tf
@@ -35,7 +35,7 @@ module "zitadel_project_application_ua" {
name = "${ var.name } (Swagger)"
- redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html"]
+ redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html", "${local.uri}/hangfire/signin-oidc", "${local.uri}/signin-oidc"]
post_logout_redirect_uris = [local.uri]
}
@@ -67,6 +67,7 @@ resource "kubernetes_secret" "api" {
data = {
"authority" = local.authority
+ "audience" = var.project_id
"client_id" = module.zitadel_project_application_api.client_id
"client_secret" = module.zitadel_project_application_api.client_secret
}
@@ -113,6 +114,7 @@ resource "kubernetes_secret" "service-account" {
"audience" = var.project_id
"client_id" = module.zitadel_service_account[count.index].client_id
"client_secret" = module.zitadel_service_account[count.index].client_secret
+ "scope" = join(" ", concat(["openid", "profile", "urn:zitadel:iam:org:project:id:${var.project_id}:aud"], var.roles))
}
}
diff --git a/infra/modules/zitadel/tenant/groupsClaim.action.tftpl b/infra/modules/zitadel/tenant/groupsClaim.action.tftpl
new file mode 100644
index 0000000..5aa9094
--- /dev/null
+++ b/infra/modules/zitadel/tenant/groupsClaim.action.tftpl
@@ -0,0 +1,28 @@
+/**
+ * sets the roles an additional claim in the token with roles as value an project as key
+ *
+ * The role claims of the token look like the following:
+ *
+ * // added by the code below
+ * "groups": ["{roleName}", "{roleName}", ...],
+ *
+ * Flow: Complement token, Triggers: Pre Userinfo creation, Pre access token creation
+ *
+ * @param ctx
+ * @param api
+ */
+function groupsClaim(ctx, api) {
+ if (ctx.v1.user.grants === undefined || ctx.v1.user.grants.count == 0) {
+ return;
+ }
+
+ let grants = [];
+ ctx.v1.user.grants.grants.forEach((claim) => {
+ claim.roles.forEach((role) => {
+ grants.push(role);
+ });
+ });
+
+ api.v1.claims.setClaim("groups", grants);
+ api.v1.claims.setClaim("scope", grants);
+}
diff --git a/infra/modules/zitadel/tenant/main.tf b/infra/modules/zitadel/tenant/main.tf
index 6200890..052f2ee 100644
--- a/infra/modules/zitadel/tenant/main.tf
+++ b/infra/modules/zitadel/tenant/main.tf
@@ -13,6 +13,29 @@ resource "zitadel_org" "default" {
is_default = true
}
+// resource "zitadel_action" "groups-claim" {
+// org_id = zitadel_org.default.id
+// name = "groupsClaim"
+// script = templatefile("${path.module}/groupsClaim.action.tftpl", {})
+// allowed_to_fail = true
+// timeout = "10s"
+// }
+//
+// resource "zitadel_trigger_actions" "groups-claim-pre-user-info" {
+// org_id = zitadel_org.default.id
+// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
+// trigger_type = "TRIGGER_TYPE_PRE_USERINFO_CREATION"
+// action_ids = [zitadel_action.groups-claim.id]
+// }
+//
+// resource "zitadel_trigger_actions" "groups-claim-pre-access-token" {
+// org_id = zitadel_org.default.id
+// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
+// trigger_type = "TRIGGER_TYPE_PRE_ACCESS_TOKEN_CREATION"
+// action_ids = [zitadel_action.groups-claim.id]
+// }
+
+
output "org_id" {
value = zitadel_org.default.id
}
diff --git a/infra/modules/zot/main.tf b/infra/modules/zot/main.tf
index 4ee84b3..5c33a5f 100644
--- a/infra/modules/zot/main.tf
+++ b/infra/modules/zot/main.tf
@@ -10,7 +10,47 @@ resource "helm_release" "zot" {
]
}
+resource "kubernetes_manifest" "traefik_middleware_request_body" {
+ depends_on = [helm_release.zot]
+ manifest = {
+ apiVersion = "traefik.io/v1alpha1"
+ kind = "Middleware"
+ metadata = {
+ name = "request-body"
+ namespace = "registry"
+ }
+ spec = {
+ buffering = {
+ maxRequestBodyBytes = 0
+ }
+ }
+ }
+}
+
+resource "kubernetes_manifest" "traefik_middleware_request_timeouts" {
+ depends_on = [helm_release.zot]
+ manifest = {
+ apiVersion = "traefik.io/v1alpha1"
+ kind = "Middleware"
+ metadata = {
+ name = "request-timeouts"
+ namespace = "registry"
+ }
+ spec = {
+ headers = {
+ customRequestHeaders = {
+ "X-Forwarded-Timeout-Read" = "3600s"
+ "X-Forwarded-Timeout-Write" = "3600s"
+ }
+ }
+ }
+ }
+}
+
output "installed" {
value = true
- depends_on = [helm_release.zot]
+ depends_on = [
+ kubernetes_manifest.traefik_middleware_request_body, kubernetes_manifest.traefik_middleware_request_timeouts,
+ helm_release.zot
+ ]
}
diff --git a/infra/modules/zot/values.yaml.tftpl b/infra/modules/zot/values.yaml.tftpl
index 9183d3c..51b0f5e 100644
--- a/infra/modules/zot/values.yaml.tftpl
+++ b/infra/modules/zot/values.yaml.tftpl
@@ -3,9 +3,36 @@ ingress:
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
- traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
- traefik.ingress.kubernetes.io/proxy-body-size: "0"
+ traefik.ingress.kubernetes.io/router.middlewares: registry-request-body@kubernetescrd,registry-request-timeouts@kubernetescrd,default-preserve-host-headers@kubernetescrd
+ gethomepage.dev/enabled: "true"
+ gethomepage.dev/name: "Registry"
+ gethomepage.dev/description: "OCI Registry"
+ gethomepage.dev/group: "Tools"
+ gethomepage.dev/icon: "docker.png"
hosts:
- host: ${ service_uri }
paths:
- path: /
+persistence: true
+pvc:
+ create: true
+ name: zot
+ accessMode: "ReadWriteOnce"
+ storage: 8Gi
+service:
+ type: ClusterIP
+ port: 5000
+mountConfig: true
+configFiles:
+ config.json: |-
+ {
+ "storage": { "rootDirectory": "/var/lib/registry" },
+ "http": { "address": "0.0.0.0", "port": "5000" },
+ "log": { "level": "error" },
+ "extensions": {
+ "scrub": {
+ "enable": true,
+ "interval": "12h"
+ }
+ }
+ }
diff --git a/infra/tenants/365zon/main.tf b/infra/tenants/365zon/main.tf
index cb8eb99..1d34ea1 100644
--- a/infra/tenants/365zon/main.tf
+++ b/infra/tenants/365zon/main.tf
@@ -3,6 +3,8 @@ locals {
}
resource "kubernetes_namespace" "tenant" {
+ depends_on = [var.wait_on]
+
metadata {
name = lower(local.name)
}
@@ -22,6 +24,15 @@ module "bootstrap-zitadel" {
}
// create uploads bucket in minio
+module "minio" {
+ source = "../../modules/minio/tenant"
+
+ access_key = var.minio_access_key
+ secret_key = var.minio_secret_key
+ server = var.minio_server
+
+ name = "365zon"
+}
// create minio secret
resource "kubernetes_secret" "storage" {
@@ -31,10 +42,10 @@ resource "kubernetes_secret" "storage" {
}
data = {
- Storage__AccountName = var.minio_access_key
- Storage__AccountKey = var.minio_secret_key
- Storage__BlobUri = var.minio_service_uri
- Storage__S3BucketName = "uploads"
+ Storage__AccountName = module.minio.access_key
+ Storage__AccountKey = module.minio.secret_key
+ Storage__BlobUri = var.minio_api_uri
+ Storage__S3BucketName = module.minio.bucket
}
}
@@ -54,3 +65,17 @@ resource "kubernetes_secret" "connection_strings" {
// next, we need to set-up:
// - the wildcard tls (*.365zon.venus.fourlights.dev)
// - argocd for all relevant apps
+//
+output "minio_access_key" {
+ value = module.minio.access_key
+ sensitive = true
+}
+
+output "minio_secret_key" {
+ value = module.minio.secret_key
+ sensitive = true
+}
+
+output "minio_bucket" {
+ value = module.minio.bucket
+}
diff --git a/infra/tenants/365zon/variables.tf b/infra/tenants/365zon/variables.tf
index e6b03f3..2c44cb2 100644
--- a/infra/tenants/365zon/variables.tf
+++ b/infra/tenants/365zon/variables.tf
@@ -10,7 +10,8 @@ variable "minio_secret_key" {
type = string
sensitive = true
}
-variable "minio_service_uri" { type = string }
+variable "minio_api_uri" { type = string }
+variable "minio_server" { type = string }
variable "mongodb_connection_string" {
type = string
sensitive = true
@@ -19,3 +20,7 @@ variable "rabbitmq_connection_string" {
type = string
sensitive = true
}
+variable "wait_on" {
+ type = any
+ default = true
+}
diff --git a/infra/tenants/365zon/zitadel/main.tf b/infra/tenants/365zon/zitadel/main.tf
index 395263e..6274c71 100644
--- a/infra/tenants/365zon/zitadel/main.tf
+++ b/infra/tenants/365zon/zitadel/main.tf
@@ -18,31 +18,31 @@ module "zitadel_project" {
module "zitadel_project_operator_roles" {
source = "../../../modules/zitadel/project/roles"
- wait_on = [module.zitadel_project.installed]
+ wait_on = module.zitadel_project.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Operator"
roles = [
"manage:profiles", "manage:contacts", "manage:addresses", "manage:enquiries", "manage:flowstates",
- "manage:flowevents", "manage:files"
+ "manage:flowevents", "manage:files", "manage:brands"
]
}
module "zitadel_project_configurator_roles" {
- source = "../../../modules/zitadel/project/roles"
- wait_on = [module.zitadel_project_operator_roles.installed]
+ source = "../../../modules/zitadel/project/roles"
+ wait_on = module.zitadel_project_operator_roles.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Configurator"
roles = [
- "manage:brands", "manage:flows"
+ "manage:flows"
]
}
module "zitadel_project_developer_roles" {
- source = "../../../modules/zitadel/project/roles"
- wait_on = [module.zitadel_project_configurator_roles.installed]
+ source = "../../../modules/zitadel/project/roles"
+ wait_on = module.zitadel_project_configurator_roles.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@@ -54,7 +54,7 @@ module "zitadel_project_developer_roles" {
module "zitadel_project_user_grant" {
source = "../../../modules/zitadel/project/user-grant"
- wait_on = [module.zitadel_project_developer_roles.installed]
+ wait_on = module.zitadel_project_developer_roles.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
user_id = var.user_id
@@ -66,8 +66,8 @@ module "zitadel_project_user_grant" {
// TODO: Add read roles
module "zitadel_project_application_core" {
- source = "../../../modules/zitadel/api-m2m-swagger"
- wait_on = [module.zitadel_project_user_grant.installed]
+ source = "../../../modules/zitadel/api-m2m-swagger"
+ wait_on = module.zitadel_project_user_grant.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@@ -84,8 +84,8 @@ module "zitadel_project_application_core" {
}
module "zitadel_project_application_salesforce" {
- source = "../../../modules/zitadel/api-m2m-swagger"
- wait_on = [module.zitadel_project_application_core.installed]
+ source = "../../../modules/zitadel/api-m2m-swagger"
+ wait_on = module.zitadel_project_application_core.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@@ -101,8 +101,8 @@ module "zitadel_project_application_salesforce" {
}
module "zitadel_project_application_external" {
- source = "../../../modules/zitadel/api-m2m-swagger"
- wait_on = [module.zitadel_project_application_salesforce.installed]
+ source = "../../../modules/zitadel/api-m2m-swagger"
+ wait_on = module.zitadel_project_application_salesforce.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
@@ -118,8 +118,8 @@ module "zitadel_project_application_external" {
}
module "zitadel_project_application_module_internal" {
- source = "../../../modules/zitadel/api-m2m-swagger"
- wait_on = [module.zitadel_project_application_external.installed]
+ source = "../../../modules/zitadel/api-m2m-swagger"
+ wait_on = module.zitadel_project_application_external.installed
org_id = var.org_id
project_id = module.zitadel_project.project_id
diff --git a/shuttles/k3sup b/shuttles/k3sup
deleted file mode 100755
index 937d761..0000000
Binary files a/shuttles/k3sup and /dev/null differ
diff --git a/shuttles/setup-cluster.ts b/shuttles/setup-cluster.ts
index 59a372f..ae4624a 100755
--- a/shuttles/setup-cluster.ts
+++ b/shuttles/setup-cluster.ts
@@ -35,12 +35,12 @@ async function executeCommand(
stdout?: "piped" | "inherit" | "null",
stderr?: "piped" | "inherit" | "null",
throwOnError?: boolean
- } = {}
+ } = {stdout: 'piped', stderr: 'piped', throwOnError: true}
): Promise<{ success: boolean; output?: string; error?: string }> {
- const { stdout = "piped", stderr = "piped", throwOnError = true } = options;
-
+ const {stdout = "piped", stderr = "piped", throwOnError = true} = options;
+
log.debug(`Executing: ${cmdArray.join(" ")}`);
-
+
try {
// Use Deno.Command API which is the modern replacement for Deno.run
const command = new Deno.Command(cmdArray[0], {
@@ -48,19 +48,19 @@ async function executeCommand(
stdout: stdout === "piped" ? "piped" : stdout === "inherit" ? "inherit" : "null",
stderr: stderr === "piped" ? "piped" : stderr === "inherit" ? "inherit" : "null",
});
-
- const { code, stdout: stdoutOutput, stderr: stderrOutput } = await command.output();
-
+
+ const {code, stdout: stdoutOutput, stderr: stderrOutput} = await command.output();
+
const stdoutText = stdout === "piped" ? new TextDecoder().decode(stdoutOutput).trim() : "";
const stderrText = stderr === "piped" ? new TextDecoder().decode(stderrOutput).trim() : "";
-
+
if (code !== 0) {
log.error(`Failed to ${description}: ${stderrText || "Unknown error"}`);
if (throwOnError) {
throw new Error(`Command failed: ${cmdArray.join(" ")}\n${stderrText}`);
}
}
-
+
return {
success: code === 0,
output: stdoutText,
@@ -72,48 +72,48 @@ async function executeCommand(
if (throwOnError) {
throw error;
}
- return { success: false, error: errorMessage };
+ return {success: false, error: errorMessage};
}
}
// Check if VM is ready for SSH connections
-async function isVmReadyForSsh(ip: string, user: string, maxAttempts = 30): Promise {
+async function isVmReadyForSsh(ip: string, user: string, sshKeyPath: string, maxAttempts = 30): Promise {
log.info(`Checking if VM at ${ip} is ready for SSH connections...`);
-
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
log.debug(`SSH readiness check attempt ${attempt}/${maxAttempts}`);
-
- const { success } = await executeCommand(
- ["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "echo", "ready"],
+
+ const {success} = await executeCommand(
+ ["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "-i", sshKeyPath, "echo", "ready"],
`check SSH connectivity to ${ip}`,
- { throwOnError: false, stderr: "null" }
+ {throwOnError: false}
);
-
+
if (success) {
log.success(`VM at ${ip} is ready for SSH connections`);
return true;
}
-
+
log.debug(`VM at ${ip} not ready yet, waiting...`);
await delay(2000); // Wait 2 seconds between attempts
}
-
+
log.error(`VM at ${ip} is not ready for SSH connections after ${maxAttempts} attempts`);
return false;
}
// Check if VM is running
async function isVmRunning(vmName: string): Promise {
- const { success, output } = await executeCommand(
+ const {success, output} = await executeCommand(
["incus", "list", vmName, "--format", "json"],
`check if VM ${vmName} is running`,
- { throwOnError: false }
+ {throwOnError: false}
);
-
+
if (!success || !output) {
return false;
}
-
+
try {
const vmInfo = JSON.parse(output);
return vmInfo.length > 0 && vmInfo[0].status === "Running";
@@ -127,15 +127,17 @@ async function isVmRunning(vmName: string): Promise {
// Cleanup function to handle failures
async function cleanup(vmNames: string[], shouldRemove = false): Promise {
log.info("Starting cleanup process...");
-
+
+ return;
+
for (const vmName of vmNames) {
// Check if VM exists
- const { success, output } = await executeCommand(
+ const {success, output} = await executeCommand(
["incus", "list", vmName, "--format", "csv"],
`check if VM ${vmName} exists`,
- { throwOnError: false }
+ {throwOnError: false}
);
-
+
if (success && output) {
// Stop VM if it's running
const isRunning = await isVmRunning(vmName);
@@ -144,58 +146,58 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise {
await executeCommand(
["incus", "stop", vmName, "--force"],
`stop VM ${vmName}`,
- { throwOnError: false }
+ {throwOnError: false}
);
}
-
+
// Remove VM if requested
if (shouldRemove) {
log.info(`Removing VM ${vmName}...`);
await executeCommand(
["incus", "delete", vmName],
`remove VM ${vmName}`,
- { throwOnError: false }
+ {throwOnError: false}
);
}
}
}
-
+
log.success("Cleanup completed");
}
const setupCluster = async (numMasters: number, forceCleanup = false) => {
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
-
+
const createdVMs: string[] = [];
-
+
try {
// Get hostname and user
- const { output: hostname } = await executeCommand(
+ const {output: hostname} = await executeCommand(
["hostnamectl", "hostname"],
"get hostname"
);
-
- const { output: user } = await executeCommand(
+
+ const {output: user} = await executeCommand(
["whoami"],
"get current user"
);
-
+
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
-
+
// Check if SSH keys exist
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
throw new Error("SSH keys not found");
}
-
+
// Step 1: Create Low-Resource Profile (if not exists)
- const { success: profileExists } = await executeCommand(
+ const {success: profileExists} = await executeCommand(
["incus", "profile", "show", "low-resource"],
"check if low-resource profile exists",
- { stdout: "null", stderr: "null", throwOnError: false }
+ {throwOnError: false}
);
-
+
if (!profileExists) {
log.info("Creating low-resource profile...");
await executeCommand(
@@ -218,37 +220,37 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
} else {
log.skip("Low-resource profile already exists");
}
-
+
// Read SSH key
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
-
+
// Step 3: Launch VMs (if not already running)
for (let i = 1; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
-
- const { success: vmExists, output: vmOutput } = await executeCommand(
+
+ const {success: vmExists, output: vmOutput} = await executeCommand(
["incus", "list", vmName, "--format", "csv"],
`check if VM ${vmName} exists`,
- { throwOnError: false }
+ {throwOnError: false}
);
-
+
if (!vmExists || !vmOutput) {
log.info(`Creating VM ${vmName}...`);
await executeCommand(
["incus", "init", `images:${image}`, vmName, "--profile", "low-resource", "-c", "user.timezone=\"Europe/Amsterdam\"", "-c", `user.ssh_key=\"${sshKey}\"`, ...config],
`initialize VM ${vmName}`
);
-
+
await executeCommand(
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(i)}`],
`configure network for VM ${vmName}`
);
-
+
await executeCommand(
["incus", "start", vmName],
`start VM ${vmName}`
);
-
+
createdVMs.push(vmName);
log.success(`VM ${vmName} started`);
} else {
@@ -264,14 +266,14 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
log.skip(`VM ${vmName} already exists`);
}
}
-
+
// Step 4: Install k3sup (if not installed)
- const { success: k3supInstalled } = await executeCommand(
+ const {success: k3supInstalled} = await executeCommand(
["which", "k3sup"],
"check if k3sup is installed",
- { stdout: "null", stderr: "null", throwOnError: false }
+ {throwOnError: false}
);
-
+
if (!k3supInstalled) {
log.info("Installing k3sup...");
await executeCommand(
@@ -282,19 +284,19 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
} else {
log.skip("k3sup already installed");
}
-
+
// Step 5: Wait for VMs to be ready
const firstMasterIP = getIp(1);
log.info(`Waiting for first master node (${firstMasterIP}) to be ready...`);
-
- const vmReady = await isVmReadyForSsh(firstMasterIP, "picard");
+
+ const vmReady = await isVmReadyForSsh(firstMasterIP, "picard", sshKeyPrivateFileName);
if (!vmReady) {
throw new Error(`First master node at ${firstMasterIP} is not ready for SSH connections`);
}
-
+
// Check if kubeconfig exists
const kubeconfigExists = await exists("./kubeconfig");
-
+
if (!kubeconfigExists) {
log.info("Bootstrapping first master node...");
await executeCommand(
@@ -305,26 +307,26 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
} else {
log.skip("First master node already bootstrapped");
}
-
+
// Step 6: Join Additional Master Nodes (if not already joined)
for (let i = 2; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const vmIP = getIp(i);
-
+
// Wait for VM to be ready
log.info(`Waiting for ${vmName} (${vmIP}) to be ready...`);
- const nodeReady = await isVmReadyForSsh(vmIP, "picard");
+ const nodeReady = await isVmReadyForSsh(vmIP, "picard", sshKeyPrivateFileName);
if (!nodeReady) {
log.warning(`VM ${vmName} is not ready for SSH connections, skipping join operation`);
continue;
}
-
- const { success: joined } = await executeCommand(
+
+ const {success: joined} = await executeCommand(
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", vmName],
`check if ${vmName} has joined the cluster`,
- { stdout: "null", stderr: "null", throwOnError: false }
+ {throwOnError: false}
);
-
+
if (!joined) {
log.info(`Joining ${vmName} to the cluster...`);
await executeCommand(
@@ -336,33 +338,33 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
log.skip(`VM ${vmName} already joined the cluster`);
}
}
-
+
log.success("HA k3s cluster setup complete! 🚀");
-
+
// Verify cluster status
log.info("Verifying cluster status...");
- const { success: clusterVerified, output: nodesOutput } = await executeCommand(
+ const {success: clusterVerified, output: nodesOutput} = await executeCommand(
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", "-o", "wide"],
"verify cluster nodes",
- { throwOnError: false }
+ {throwOnError: false}
);
-
+
if (clusterVerified) {
log.info("Cluster nodes:");
console.log(nodesOutput);
} else {
log.warning("Could not verify cluster status");
}
-
+
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to set up cluster: ${errorMessage}`);
-
+
if (createdVMs.length > 0) {
log.warning("An error occurred during setup. Cleaning up created resources...");
await cleanup(createdVMs, forceCleanup);
}
-
+
Deno.exit(1);
}
};
@@ -371,7 +373,7 @@ await new Command()
.name("setup-k3s-cluster")
.version("0.1.0")
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
- .option("-m, --masters ", "Number of master nodes", { default: 3 })
- .option("-c, --cleanup", "Force cleanup of VMs if setup fails", { default: false })
- .action(({ masters, cleanup }) => setupCluster(masters, cleanup))
+ .option("-m, --masters ", "Number of master nodes", {default: 3})
+ .option("-c, --cleanup", "Force cleanup of VMs if setup fails", {default: false})
+ .action(({masters, cleanup}) => setupCluster(masters, cleanup))
.parse(Deno.args);
diff --git a/shuttles/terraform/main.tf b/shuttles/terraform/main.tf
index 07941b9..1716063 100644
--- a/shuttles/terraform/main.tf
+++ b/shuttles/terraform/main.tf
@@ -2,13 +2,41 @@ locals {
tld = "fourlights.dev"
cluster_dns = "venus.${local.tld}"
is_installed = true
- node_count = 3
+ node_count = 1
}
-module "registry" {
- source = "../../infra/modules/zot"
-
- service_uri = "registry.${local.cluster_dns}"
+resource "kubernetes_manifest" "traefik-helm-config" {
+ manifest = {
+ apiVersion = "helm.cattle.io/v1"
+ kind = "HelmChartConfig"
+ metadata = {
+ name = "traefik"
+ namespace = "kube-system"
+ }
+ spec = {
+ valuesContent = <