Merge pull request 'feat(infra): enhance infrastructure with Zitadel, tenant configs, and improved cluster automation' (#1) from shuttles into main

Reviewed-on: #1
This commit is contained in:
Thomas Rijpstra 2025-04-22 16:03:50 +00:00
commit 4c390fa05a
67 changed files with 2534 additions and 250 deletions

View File

@ -59,6 +59,7 @@ resource "helm_release" "argocd" {
oauth_client_id = var.oauth_client_id, oauth_client_id = var.oauth_client_id,
oauth_client_secret = var.oauth_client_secret, oauth_client_secret = var.oauth_client_secret,
oauth_redirect_uri = var.oauth_redirect_uri oauth_redirect_uri = var.oauth_redirect_uri
tls = var.tls
}) })
] ]
} }

View File

@ -42,14 +42,27 @@ server:
hostname: ${ service_uri } hostname: ${ service_uri }
annotations: annotations:
kubernetes.io/ingress.class: traefik kubernetes.io/ingress.class: traefik
%{ if tls }
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd traefik.ingress.kubernetes.io/router.middlewares: default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd
%{ else }
traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
%{ endif }
%{ if tls }
extraTls: extraTls:
- hosts: - hosts:
- ${ service_uri } - ${ service_uri }
secretName: argocd-tls secretName: argocd-tls
%{ endif }
config: config:
rbac: |
scopes: '[groups]'
"policy.csv": |
g, admin, role:admin
g, user, role:readonly
"policy.default": ''
%{ if oauth_uri != null } %{ if oauth_uri != null }
dex.config: | dex.config: |
connectors: connectors:
@ -57,9 +70,9 @@ server:
id: oidc id: oidc
name: OIDC name: OIDC
config: config:
issuer: ${ oauth_issuer } issuer: "${ oauth_issuer }"
clientID: ${ oauth_client_id } clientID: "${ oauth_client_id }"
clientSecret: ${ oauth_client_secret } clientSecret: "${ oauth_client_secret }"
insecureSkipEmailVerified: true insecureSkipEmailVerified: true
insecureEnableGroups: true insecureEnableGroups: true
scopes: scopes:
@ -67,6 +80,7 @@ server:
- email - email
- openid - openid
- groups - groups
logoutURL: "${ oauth_redirect_uri }"
claimMapping: claimMapping:
name: fullName # ArgoCD expects 'name', FusionAuth provides 'fullName' name: fullName # ArgoCD expects 'name', FusionAuth provides 'fullName'
preferred_username: email preferred_username: email

View File

@ -64,3 +64,8 @@ variable "oauth_redirect_uri" {
description = "OAuth redirect URI" description = "OAuth redirect URI"
default = null default = null
} }
variable "tls" {
type = bool
default = false
}

View File

@ -4,21 +4,6 @@ config:
- Github: - Github:
- abbr: GH - abbr: GH
href: https://github.com/ href: https://github.com/
services:
- My First Group:
- My First Service:
href: http://localhost/
description: Homepage is awesome
- My Second Group:
- My Second Service:
href: http://localhost/
description: Homepage is the best
- My Third Group:
- My Third Service:
href: http://localhost/
description: Homepage is 😎
widgets: widgets:
# show the kubernetes widget, with the cluster summary and individual nodes # show the kubernetes widget, with the cluster summary and individual nodes
- kubernetes: - kubernetes:

View File

@ -58,6 +58,7 @@ resource "helm_release" "minio" {
admin = var.admin, admin = var.admin,
tls = var.mode == "distributed" ? false : var.tls tls = var.mode == "distributed" ? false : var.tls
ingressClass = var.ingressClass ingressClass = var.ingressClass
displayOnHomepage = var.displayOnHomepage
}) })
] ]
} }
@ -66,3 +67,13 @@ output "installed" {
value = true value = true
depends_on = [helm_release.minio] depends_on = [helm_release.minio]
} }
output "access_key" {
value = random_password.minio_access_key.result
sensitive = true
}
output "secret_key" {
value = random_password.minio_secret_key.result
sensitive = true
}

View File

@ -22,6 +22,13 @@ ingress:
ingress.kubernetes.io/proxy-body-size: "0" ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-body-size: "0" nginx.ingress.kubernetes.io/proxy-body-size: "0"
%{ endif } %{ endif }
%{ if displayOnHomepage }
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Minio"
gethomepage.dev/description: "S3-Compatible cloud storage"
gethomepage.dev/group: "Tools"
gethomepage.dev/icon: "minio.png"
%{ endif }
apiIngress: apiIngress:
enabled: true enabled: true

View File

@ -61,7 +61,11 @@ variable "ingressClass" {
} }
variable "storageSize" { variable "storageSize" {
type = string type = string
default = "6Gi" default = "6Gi"
} }
variable "displayOnHomepage" {
type = bool
default = false
}

View File

@ -56,3 +56,16 @@ output "installed" {
value = true value = true
depends_on = [helm_release.mongodb] depends_on = [helm_release.mongodb]
} }
output "connection_string" {
value = format(
"mongodb://%s:%s@%s/%s?replicaSet=rs0&authSource=admin",
"root",
random_password.mongodb_root_password.result,
join(",", [
for i in range(var.replicas) :format("mongodb-%d.mongodb-headless.mongodb.svc.cluster.local:27017", i)
]),
"admin"
)
sensitive = true
}

View File

@ -16,14 +16,14 @@ mongodb:
readinessProbe: readinessProbe:
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 10 periodSeconds: 10
timeoutSeconds: 5 timeoutSeconds: 15
failureThreshold: 3 failureThreshold: 3
successThreshold: 1 successThreshold: 1
livenessProbe: livenessProbe:
initialDelaySeconds: 60 initialDelaySeconds: 60
periodSeconds: 20 periodSeconds: 20
timeoutSeconds: 5 timeoutSeconds: 15
failureThreshold: 6 failureThreshold: 6
# Proper shutdown handling # Proper shutdown handling
@ -55,3 +55,11 @@ auth:
- ${ database } - ${ database }
%{ endfor ~} %{ endfor ~}
%{ endif } %{ endif }
resources:
limits:
cpu: 1000m
memory: 1.5Gi
requests:
cpu: 500m
memory: 1Gi

View File

@ -1,4 +1,6 @@
resource "kubernetes_namespace" "postgresql" { resource "kubernetes_namespace" "postgresql" {
count = var.enabled ? 1 : 0
metadata { metadata {
name = var.namespace name = var.namespace
} }
@ -9,21 +11,32 @@ resource "kubernetes_namespace" "postgresql" {
} }
resource "random_password" "postgresql_user_password" { resource "random_password" "postgresql_user_password" {
length = 40 length = 40
special = true special = true
override_special = "!#$%&*()-_=+[]{}<>:?"
min_special = 2
min_upper = 2
min_lower = 2
min_numeric = 2
} }
resource "random_password" "postgresql_root_password" { resource "random_password" "postgresql_root_password" {
length = 40 length = 40
special = true special = true
override_special = "!#$%&*()-_=+[]{}<>:?"
min_special = 2
min_upper = 2
min_lower = 2
min_numeric = 2
} }
resource "kubernetes_secret" "postgresql_auth" { resource "kubernetes_secret" "postgresql_auth" {
type = "generic" count = var.enabled ? 1 : 0
type = "generic"
depends_on = [var.wait_on] depends_on = [var.wait_on]
metadata { metadata {
name = "postgresql-auth" name = "postgresql-auth"
namespace = kubernetes_namespace.postgresql.metadata.0.name namespace = kubernetes_namespace.postgresql[count.index].metadata.0.name
} }
data = { data = {
@ -33,11 +46,12 @@ resource "kubernetes_secret" "postgresql_auth" {
} }
resource "helm_release" "postgresql" { resource "helm_release" "postgresql" {
count = var.enabled ? 1 : 0
depends_on = [var.wait_on, kubernetes_secret.postgresql_auth] depends_on = [var.wait_on, kubernetes_secret.postgresql_auth]
name = "postgresql" name = "postgresql"
repository = "https://charts.bitnami.com/bitnami" repository = "https://charts.bitnami.com/bitnami"
chart = "postgresql" chart = "postgresql"
namespace = kubernetes_namespace.postgresql.metadata.0.name namespace = kubernetes_namespace.postgresql[count.index].metadata.0.name
version = "16.0.5" version = "16.0.5"
wait = true wait = true

View File

@ -17,6 +17,7 @@ resource "random_password" "tenant" {
} }
resource "kubernetes_job" "create-tenant" { resource "kubernetes_job" "create-tenant" {
count = var.enabled ? 1 : 0
depends_on = [var.wait_on] depends_on = [var.wait_on]
metadata { metadata {
@ -108,5 +109,5 @@ output "username" {
} }
output "job_name" { output "job_name" {
value = kubernetes_job.create-tenant.metadata[0].name value = var.enabled ? kubernetes_job.create-tenant[0].metadata[0].name : null
} }

View File

@ -38,3 +38,8 @@ variable "k8s_config_yaml" {
description = "Content of k8s config yaml file" description = "Content of k8s config yaml file"
type = string type = string
} }
variable "enabled" {
type = bool
default = true
}

View File

@ -16,3 +16,8 @@ variable "namespace" {
variable "username" { variable "username" {
type = string type = string
} }
variable "enabled" {
type = bool
default = true
}

View File

@ -41,3 +41,8 @@ output "installed" {
value = true value = true
depends_on = [helm_release.rabbitmq] depends_on = [helm_release.rabbitmq]
} }
output "connection_string" {
value = "rabbitmq://user:${random_password.password.result}@rabbitmq-headless.${var.namespace}.svc.cluster.local:5672/"
sensitive = true
}

View File

@ -0,0 +1,122 @@
terraform {
required_providers {
slugify = {
source = "public-cloud-wl/slugify"
version = "0.1.1"
}
}
}
locals {
authority = "https://${var.zitadel_domain}"
slug_project = provider::slugify::slug(var.project)
slug_name = provider::slugify::slug(var.name)
cluster = "${local.slug_project}.${var.cluster_domain}"
uri = "https://${local.slug_name}.${local.cluster}"
}
module "zitadel_project_application_api" {
source = "../project/application/api"
wait_on = var.wait_on
org_id = var.org_id
project_id = var.project_id
name = "${var.name} API"
}
module "zitadel_project_application_ua" {
source = "../project/application/user-agent"
wait_on = module.zitadel_project_application_api.installed
org_id = var.org_id
project_id = var.project_id
name = "${ var.name } (Swagger)"
redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html"]
post_logout_redirect_uris = [local.uri]
}
resource "kubernetes_secret" "user-agent" {
type = "Opaque"
depends_on = [module.zitadel_project_application_ua]
metadata {
name = "${local.slug_name}-user-agent"
namespace = var.namespace
}
data = {
"authority" = local.authority
"audience" = var.project_id
"client_id" = module.zitadel_project_application_ua.client_id
}
}
resource "kubernetes_secret" "api" {
type = "Opaque"
depends_on = [module.zitadel_project_application_api]
metadata {
name = "${local.slug_name}-api"
namespace = var.namespace
}
data = {
"authority" = local.authority
"client_id" = module.zitadel_project_application_api.client_id
"client_secret" = module.zitadel_project_application_api.client_secret
}
}
module "zitadel_service_account" {
count = var.service_account ? 1 : 0
wait_on = module.zitadel_project_application_api.installed
source = "../service-account"
org_id = var.org_id
user_name = "${local.slug_name}@${ local.cluster }"
name = "${var.name} @ ${var.project}"
with_secret = true
access_token_type = "ACCESS_TOKEN_TYPE_JWT"
}
module "zitadel_project_user_grant" {
count = var.service_account ? 1 : 0
source = "../project/user-grant"
org_id = var.org_id
project_id = var.project_id
user_id = module.zitadel_service_account[0].user_id
roles = var.roles
}
resource "kubernetes_secret" "service-account" {
count = var.service_account ? 1 : 0
type = "Opaque"
depends_on = [module.zitadel_service_account]
metadata {
name = "${local.slug_name}-service-account"
namespace = var.namespace
}
data = {
"authority" = local.authority
"audience" = var.project_id
"client_id" = module.zitadel_service_account[count.index].client_id
"client_secret" = module.zitadel_service_account[count.index].client_secret
}
}
output "installed" {
value = true
depends_on = [kubernetes_secret.service-account]
}

View File

@ -0,0 +1,44 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
}
variable "project_id" {
type = string
}
variable "name" {
type = string
}
variable "project" {
type = string
}
variable "roles" {
type = list(string)
description = "Roles to be granted"
}
variable "namespace" {
type = string
}
variable "service_account" {
type = bool
default = true
}
variable "zitadel_domain" {
type = string
}
variable "cluster_domain" {
type = string
}

View File

@ -0,0 +1,82 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
}
}
}
resource "zitadel_org_idp_google" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
name = "Google"
client_id = var.client_id
client_secret = var.client_secret
scopes = var.options.scopes
is_linking_allowed = var.options.is_linking_allowed
is_creation_allowed = var.options.is_creation_allowed
is_auto_creation = var.options.is_auto_creation
is_auto_update = var.options.is_auto_update
auto_linking = var.options.auto_linking
}
resource "zitadel_login_policy" "default" {
depends_on = [zitadel_org_idp_google.default]
org_id = var.org_id
user_login = false
allow_register = true
allow_external_idp = true
force_mfa = false
force_mfa_local_only = false
passwordless_type = "PASSWORDLESS_TYPE_ALLOWED"
hide_password_reset = "false"
password_check_lifetime = "240h0m0s"
external_login_check_lifetime = "240h0m0s"
multi_factor_check_lifetime = "24h0m0s"
mfa_init_skip_lifetime = "720h0m0s"
second_factor_check_lifetime = "24h0m0s"
ignore_unknown_usernames = true
default_redirect_uri = "https://${var.domain}"
second_factors = ["SECOND_FACTOR_TYPE_OTP", "SECOND_FACTOR_TYPE_U2F"]
multi_factors = ["MULTI_FACTOR_TYPE_U2F_WITH_VERIFICATION"]
idps = [zitadel_org_idp_google.default.id]
allow_domain_discovery = true
disable_login_with_email = true
disable_login_with_phone = true
}
#resource "zitadel_action" "verify-email-from-google-idp" {
# org_id = var.org_id
# name = "trustEmailVerification"
# script = templatefile("${path.module}/verify-email.action.tftpl", {
# trusted_idp = zitadel_org_idp_google.default.id,
# })
# allowed_to_fail = false
# timeout = "10s"
#}
#resource "zitadel_trigger_actions" "verify-email-from-google-idp" {
# org_id = var.org_id
# flow_type = "FLOW_TYPE_EXTERNAL_AUTHENTICATION"
# trigger_type = "TRIGGER_TYPE_PRE_CREATION"
# action_ids = [zitadel_action.verify-email-from-google-idp.id]
#}
#
#resource "zitadel_trigger_actions" "internal" {
# org_id = var.org_id
# flow_type = "FLOW_TYPE_INTERNAL_AUTHENTICATION"
# trigger_type = "TRIGGER_TYPE_PRE_CREATION"
# action_ids = [zitadel_action.verify-email-from-google-idp.id]
#}
output "installed" {
value = true
depends_on = [
zitadel_org_idp_google.default, zitadel_login_policy.default,
]
}
output "idp_id" {
value = zitadel_org_idp_google.default.id
}

View File

@ -0,0 +1,43 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "client_id" {
type = string
description = "Google Client ID"
}
variable "client_secret" {
type = string
description = "Google Client Secret"
}
variable "options" {
type = object({
scopes = list(string)
is_linking_allowed = bool
is_creation_allowed = bool
is_auto_creation = bool
is_auto_update = bool
auto_linking = string
})
default = {
scopes = ["openid", "profile", "email"],
is_linking_allowed = true
is_creation_allowed = true
is_auto_creation = true
is_auto_update = true
auto_linking = "AUTO_LINKING_OPTION_USERNAME"
}
}
variable "domain" {
type = string
}

View File

@ -0,0 +1,15 @@
/**
* Set first and lastname of a user on just in time provisioning for okta.
* Useful if you like to fill the first and lastname with the name stored on okta, so the user doesn't have to fill himself.
* Also set email to verified, so the user doesn't get a verification email
*
* Flow: External Authentication, Trigger: Post Authentication
*
* @param ctx
* @param api
*/
let logger = require("zitadel/log")
function trustEmailVerification(ctx, api) {
api.setEmailVerified(true);
}

View File

@ -0,0 +1,3 @@
locals {
service_uri = join(".", [var.service_name, var.server_dns])
}

View File

@ -0,0 +1,90 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.31.0"
}
}
}
resource "kubernetes_namespace" "zitadel" {
count = var.enabled ? 1 : 0
metadata {
name = var.namespace
}
lifecycle {
ignore_changes = [metadata]
}
}
resource "random_password" "zitadel_masterkey" {
length = 32
special = true
}
resource "kubernetes_secret" "zitadel" {
count = var.enabled ? 1 : 0
metadata {
name = "zitadel"
namespace = kubernetes_namespace.zitadel[count.index].metadata[0].name
}
data = {
masterkey = random_password.zitadel_masterkey.result
}
}
resource "helm_release" "zitadel" {
count = var.enabled ? 1 : 0
depends_on = [var.wait_on, kubernetes_secret.zitadel]
name = "zitadel"
repository = "https://charts.zitadel.com"
chart = "zitadel"
namespace = kubernetes_namespace.zitadel[count.index].metadata[0].name
version = "8.12.0"
create_namespace = false
wait = true
wait_for_jobs = true
values = [
templatefile("${path.module}/values.yaml.tftpl", {
service_uri = local.service_uri,
database = var.database,
database_username = var.database_username,
database_password = var.database_password,
database_root_username = var.database_root_password != null ? var.database_root_username : null,
database_root_password = var.database_root_password
display_on_homepage = var.display_on_homepage
})
]
}
data "kubernetes_secret" "zitadel_admin" {
depends_on = [helm_release.zitadel]
metadata {
name = "zitadel-admin-sa"
namespace = var.namespace
}
}
resource "local_file" "zitadel_jwt_profile_file" {
content = data.kubernetes_secret.zitadel_admin.data["zitadel-admin-sa.json"]
filename = format("%s/%s", path.root, "zitadel-admin-sa.json")
}
output "jwt_profile_file" {
value = local_file.zitadel_jwt_profile_file.filename
}
output "installed" {
value = true
depends_on = [helm_release.zitadel, local_file.zitadel_jwt_profile_file]
}
output "server" {
value = local.service_uri
}
output "uri" {
value = "https://${local.service_uri}"
}

View File

@ -0,0 +1,38 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_application_api" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
project_id = var.project_id
name = var.name
auth_method_type = "API_AUTH_METHOD_TYPE_BASIC"
// TODO: Change this to private key jwt in the future
}
output "installed" {
value = true
depends_on = [zitadel_application_api.default]
}
output "application_id" {
value = zitadel_application_api.default.id
}
output "client_id" {
value = zitadel_application_api.default.client_id
sensitive = true
}
output "client_secret" {
value = zitadel_application_api.default.client_secret
sensitive = true
}

View File

@ -0,0 +1,20 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "project_id" {
type = string
description = "Project Id"
}
variable "name" {
type = string
description = "Application name"
}

View File

@ -0,0 +1,63 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_application_oidc" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
grant_types = ["OIDC_GRANT_TYPE_AUTHORIZATION_CODE"]
name = var.name
project_id = var.project_id
redirect_uris = var.redirect_uris
response_types = ["OIDC_RESPONSE_TYPE_CODE"]
# // If selected, the requested roles of the authenticated user are added to the access token.
access_token_type = "OIDC_TOKEN_TYPE_JWT"
access_token_role_assertion = true
# BEARER uses an Opaque token, which needs the introspection endpoint and `urn:zitadel:iam:org:project:id:<API_PROJECT_ID>:aud` scope
#access_token_type = "OIDC_TOKEN_TYPE_BEARER"
# // If you want to add additional Origins to your app which is not used as a redirect you can do that here.
#additional_origins = []
app_type = "OIDC_APP_TYPE_USER_AGENT"
auth_method_type = "OIDC_AUTH_METHOD_TYPE_NONE"
# // Redirect URIs must begin with https:// unless dev_mode is true
#dev_mode = false
# // If selected, the requested roles of the authenticated user are added to the ID token.
#id_token_role_assertion = false
# // Enables clients to retrieve profile, email, phone and address claims from ID token.
#id_token_userinfo_assertion = false
post_logout_redirect_uris = var.post_logout_redirect_uris
}
output "installed" {
value = true
depends_on = [zitadel_application_oidc.default]
}
output "application_id" {
value = zitadel_application_oidc.default.id
}
output "client_id" {
value = zitadel_application_oidc.default.client_id
sensitive = true
}
output "client_secret" {
value = zitadel_application_oidc.default.client_secret
sensitive = true
}

View File

@ -0,0 +1,30 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "project_id" {
type = string
description = "Project Id"
}
variable "name" {
type = string
description = "Application name"
}
variable "redirect_uris" {
type = list(string)
}
variable "post_logout_redirect_uris" {
type = list(string)
default = []
}

View File

@ -0,0 +1,61 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_application_oidc" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
grant_types = ["OIDC_GRANT_TYPE_AUTHORIZATION_CODE"]
name = var.name
project_id = var.project_id
redirect_uris = var.redirect_uris
response_types = ["OIDC_RESPONSE_TYPE_CODE"]
# // If selected, the requested roles of the authenticated user are added to the access token.
#access_token_type = "OIDC_TOKEN_TYPE_JWT"
#access_token_role_assertion = true
# BEARER uses an Opaque token, which needs the introspection endpoint and `urn:zitadel:iam:org:project:id:<API_PROJECT_ID>:aud` scope
access_token_type = "OIDC_TOKEN_TYPE_BEARER"
# // If you want to add additional Origins to your app which is not used as a redirect you can do that here.
#additional_origins = []
app_type = "OIDC_APP_TYPE_WEB"
auth_method_type = var.auth_method_type
# // Redirect URIs must begin with https:// unless dev_mode is true
#dev_mode = false
id_token_role_assertion = var.id_token_role_assertion
id_token_userinfo_assertion = var.id_token_userinfo_assertion
post_logout_redirect_uris = var.post_logout_redirect_uris
}
output "installed" {
value = true
depends_on = [zitadel_application_oidc.default]
}
output "application_id" {
value = zitadel_application_oidc.default.id
}
output "client_id" {
value = zitadel_application_oidc.default.client_id
sensitive = true
}
output "client_secret" {
value = zitadel_application_oidc.default.client_secret
sensitive = true
}

View File

@ -0,0 +1,47 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "project_id" {
type = string
description = "Project Id"
}
variable "name" {
type = string
description = "Application name"
}
variable "redirect_uris" {
type = list(string)
}
variable "post_logout_redirect_uris" {
type = list(string)
default = []
}
variable "auth_method_type" {
type = string
default = "OIDC_AUTH_METHOD_TYPE_NONE"
}
variable "id_token_role_assertion" {
type = bool
default = false
description = "If selected, the requested roles of the authenticated user are added to the ID token."
}
variable "id_token_userinfo_assertion" {
type = bool
default = false
description = "Enables clients to retrieve profile, email, phone and address claims from ID token."
}

View File

@ -0,0 +1,36 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_project" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
name = var.name
project_role_assertion = true
project_role_check = true
has_project_check = true
private_labeling_setting = "PRIVATE_LABELING_SETTING_ENFORCE_PROJECT_RESOURCE_OWNER_POLICY"
}
resource "zitadel_project_member" "default" {
count = length(var.owners)
org_id = var.org_id
project_id = zitadel_project.default.id
user_id = var.owners[count.index]
roles = ["PROJECT_OWNER"]
}
output "installed" {
value = true
depends_on = [zitadel_project.default, zitadel_project_member.default]
}
output "project_id" {
value = zitadel_project.default.id
}

View File

@ -0,0 +1,34 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_project_role" "default" {
count = length(var.roles)
depends_on = [var.wait_on]
org_id = var.org_id
project_id = var.project_id
role_key = var.roles[count.index]
display_name = var.roles[count.index]
group = var.group
}
output "installed" {
value = true
depends_on = [zitadel_project_role.default]
}
output "role_ids" {
value = toset([
for role in zitadel_project_role.default : role.id
])
}
output "roles" {
value = var.roles
}

View File

@ -0,0 +1,27 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "project_id" {
type = string
description = "Project Id"
}
variable "group" {
type = string
description = "Optional group name"
default = null
}
variable "roles" {
type = list(string)
description = "Roles to be added"
default = []
}

View File

@ -0,0 +1,26 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_user_grant" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
project_id = var.project_id
user_id = var.user_id
role_keys = var.roles
}
output "installed" {
value = true
depends_on = [zitadel_user_grant.default]
}
output "user_grant_id" {
value = zitadel_user_grant.default.id
}

View File

@ -0,0 +1,26 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "project_id" {
type = string
description = "Project Id"
}
variable "user_id" {
type = string
description = "User Id"
}
variable "roles" {
type = list(string)
description = "Roles to be granted"
default = []
}

View File

@ -0,0 +1,21 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "name" {
type = string
description = "Name of the project"
}
variable "owners" {
type = list(string)
description = "User IDs to be granted `PROJECT_OWNER` role"
default = []
}

View File

@ -0,0 +1,33 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}

View File

@ -0,0 +1,38 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_machine_user" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
user_name = var.user_name
name = var.name
description = var.description
with_secret = var.with_secret
access_token_type = var.access_token_type
}
output "installed" {
value = true
depends_on = [zitadel_machine_user.default]
}
output "user_id" {
value = zitadel_machine_user.default.id
}
output "client_id" {
value = zitadel_machine_user.default.client_id
sensitive = true
}
output "client_secret" {
value = zitadel_machine_user.default.client_secret
sensitive = true
}

View File

@ -0,0 +1,33 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "user_name" {
type = string
}
variable "name" {
type = string
}
variable "description" {
type = string
default = null
}
variable "with_secret" {
type = bool
default = false
}
variable "access_token_type" {
type = string
default = "ACCESS_TOKEN_TYPE_JWT"
}

View File

@ -0,0 +1,23 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_org" "default" {
depends_on = [var.wait_on]
name = var.name
is_default = true
}
output "org_id" {
value = zitadel_org.default.id
}
output "installed" {
value = true
depends_on = [zitadel_org.default]
}

View File

@ -0,0 +1,20 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_org_member" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
user_id = var.user_id
roles = ["ORG_OWNER"]
}
output "installed" {
value = true
depends_on = [zitadel_org_member.default]
}

View File

@ -0,0 +1,15 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Zitadel Organization ID"
}
variable "user_id" {
type = string
description = "Zitadel User ID"
}

View File

@ -0,0 +1,11 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "name" {
type = string
description = "Name of the tenant"
default = "fourlights"
}

View File

@ -0,0 +1,31 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
resource "zitadel_human_user" "default" {
depends_on = [var.wait_on]
org_id = var.org_id
email = var.email
user_name = var.user_name
first_name = var.first_name
last_name = var.last_name
is_email_verified = true
initial_password = "Password1!"
}
output "installed" {
value = true
depends_on = [zitadel_human_user.default]
}
output "user_id" {
value = zitadel_human_user.default.id
}

View File

@ -0,0 +1,26 @@
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "org_id" {
type = string
description = "Organisation Id"
}
variable "user_name" {
type = string
}
variable "first_name" {
type = string
}
variable "last_name" {
type = string
}
variable "email" {
type = string
}

View File

@ -0,0 +1,76 @@
zitadel:
masterkeySecretName: "zitadel"
configmapConfig:
Log:
Level: 'info'
LogStore:
Access:
Stdout:
Enabled: true
ExternalSecure: true
ExternalDomain: ${ service_uri }
ExternalPort: 443
TLS:
Enabled: false
FirstInstance:
Org:
Machine:
Machine:
Username: zitadel-admin-sa
Name: Admin
MachineKey:
ExpirationDate: "2026-01-01T00:00:00Z"
Type: 1
Database:
Postgres:
Host: postgresql-hl.postgresql.svc.cluster.local
Port: 5432
Database: ${ database }
MaxOpenConns: 20
MaxIdleConns: 10
MaxConnLifetime: 30m
MaxConnIdleTime: 5m
User:
Username: ${ database_username }
Password: "${ database_password }"
SSL:
Mode: disable
%{ if database_root_username != null }Admin:
Username: ${ database_root_username }
Password: "${ database_root_password }"
SSL:
Mode: disable
%{ endif }
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 5
failureThreshold: 10
startupProbe:
periodSeconds: 5
failureThreshold: 30
service:
annotations:
traefik.ingress.kubernetes.io/service.serversscheme: h2c
ingress:
enabled: true
className: traefik
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
%{ if display_on_homepage }gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Zitadel"
gethomepage.dev/description: "Identity and Access Management"
gethomepage.dev/group: "Tools"
gethomepage.dev/icon: "zitadel.png"
%{ endif }
hosts:
- host: ${service_uri}
paths:
- path: /
pathType: Prefix

View File

@ -0,0 +1,61 @@
variable "service_name" {
type = string
description = "Name of the service"
default = "auth"
}
variable "server_dns" {
type = string
description = "Domain for the server"
}
variable "k8s_config_yaml" {
description = "Content of k8s config yaml file"
type = string
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "namespace" {
type = string
}
variable "database" {
type = string
default = "zitadel"
}
variable "database_username" {
type = string
default = "zitadel"
}
variable "database_password" {
type = string
sensitive = true
}
variable "database_root_username" {
type = string
default = "postgres"
}
variable "database_root_password" {
type = string
sensitive = true
default = null
}
variable "display_on_homepage" {
type = bool
default = false
}
variable "enabled" {
type = bool
default = true
}

16
infra/modules/zot/main.tf Normal file
View File

@ -0,0 +1,16 @@
resource "helm_release" "zot" {
name = "zot"
repository = "https://zotregistry.dev/helm-charts"
chart = "zot"
namespace = "registry"
create_namespace = true
values = [
templatefile("${path.module}/values.yaml.tftpl", { service_uri = var.service_uri })
]
}
output "installed" {
value = true
depends_on = [helm_release.zot]
}

View File

@ -0,0 +1,11 @@
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
traefik.ingress.kubernetes.io/proxy-body-size: "0"
hosts:
- host: ${ service_uri }
paths:
- path: /

View File

@ -0,0 +1 @@
variable "service_uri" { type = string }

View File

@ -0,0 +1,56 @@
locals {
name = "365Zon"
}
resource "kubernetes_namespace" "tenant" {
metadata {
name = lower(local.name)
}
lifecycle {
ignore_changes = [metadata]
}
}
module "bootstrap-zitadel" {
source = "./zitadel"
namespace = kubernetes_namespace.tenant.metadata[0].name
org_id = var.org_id
user_id = var.user_id
name = local.name
}
// create uploads bucket in minio
// create minio secret
resource "kubernetes_secret" "storage" {
metadata {
name = "storage"
namespace = kubernetes_namespace.tenant.metadata[0].name
}
data = {
Storage__AccountName = var.minio_access_key
Storage__AccountKey = var.minio_secret_key
Storage__BlobUri = var.minio_service_uri
Storage__S3BucketName = "uploads"
}
}
resource "kubernetes_secret" "connection_strings" {
metadata {
name = "connection-strings"
namespace = kubernetes_namespace.tenant.metadata[0].name
}
data = {
ConnectionStrings__DocumentDb = var.mongodb_connection_string
ConnectionStrings__ServiceBus = var.rabbitmq_connection_string
}
}
// okay, so now we have the identity stuff in order, and we have secrets to use for that
// next, we need to set-up:
// - the wildcard tls (*.365zon.venus.fourlights.dev)
// - argocd for all relevant apps

View File

@ -0,0 +1,50 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
provider "zitadel" {
domain = var.domain
insecure = "false"
jwt_profile_file = var.jwt_profile_file
}
locals {
k8s_config_path = format("%s/%s", path.root, "../kubeconfig")
k8s_config_yaml = file(local.k8s_config_path)
k8s_config = yamldecode(local.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}

View File

@ -0,0 +1,21 @@
variable "domain" { type = string }
variable "jwt_profile_file" { type = string }
variable "org_id" { type = string }
variable "user_id" { type = string }
variable "minio_access_key" {
type = string
sensitive = true
}
variable "minio_secret_key" {
type = string
sensitive = true
}
variable "minio_service_uri" { type = string }
variable "mongodb_connection_string" {
type = string
sensitive = true
}
variable "rabbitmq_connection_string" {
type = string
sensitive = true
}

View File

@ -0,0 +1,153 @@
locals {
tld = "fourlights.dev"
cluster_dns = "venus.${local.tld}"
domain = "zitadel.${local.cluster_dns}"
org_domain = "fourlights.${local.domain}"
}
module "zitadel_project" {
source = "../../../modules/zitadel/project"
org_id = var.org_id
name = var.name
owners = [var.user_id]
}
// TODO: add action for setting roles as scopes
module "zitadel_project_operator_roles" {
source = "../../../modules/zitadel/project/roles"
wait_on = [module.zitadel_project.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Operator"
roles = [
"manage:profiles", "manage:contacts", "manage:addresses", "manage:enquiries", "manage:flowstates",
"manage:flowevents", "manage:files"
]
}
module "zitadel_project_configurator_roles" {
source = "../../../modules/zitadel/project/roles"
wait_on = [module.zitadel_project_operator_roles.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Configurator"
roles = [
"manage:brands", "manage:flows"
]
}
module "zitadel_project_developer_roles" {
source = "../../../modules/zitadel/project/roles"
wait_on = [module.zitadel_project_configurator_roles.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Developer"
roles = [
"manage:jobs", "manage:infrastructure"
]
}
module "zitadel_project_user_grant" {
source = "../../../modules/zitadel/project/user-grant"
wait_on = [module.zitadel_project_developer_roles.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
user_id = var.user_id
roles = concat(module.zitadel_project_developer_roles.roles, module.zitadel_project_configurator_roles.roles, module.zitadel_project_operator_roles.roles)
}
// TODO: Move External (and 365zon Push service account) to own project
// TODO: Add grant for external project
// TODO: Add read roles
module "zitadel_project_application_core" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_user_grant.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
name = "Core"
zitadel_domain = local.domain
cluster_domain = local.cluster_dns
namespace = var.namespace
project = var.name
service_account = false
roles = []
}
module "zitadel_project_application_salesforce" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_application_core.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
name = "Salesforce"
zitadel_domain = local.domain
cluster_domain = local.cluster_dns
namespace = var.namespace
project = var.name
roles = module.zitadel_project_operator_roles.roles
}
module "zitadel_project_application_external" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_application_salesforce.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
name = "External"
zitadel_domain = local.domain
cluster_domain = local.cluster_dns
namespace = var.namespace
project = var.name
roles = module.zitadel_project_operator_roles.roles
}
module "zitadel_project_application_module_internal" {
source = "../../../modules/zitadel/api-m2m-swagger"
wait_on = [module.zitadel_project_application_external.installed]
org_id = var.org_id
project_id = module.zitadel_project.project_id
name = "Internal"
zitadel_domain = local.domain
cluster_domain = local.cluster_dns
namespace = var.namespace
project = var.name
roles = module.zitadel_project_operator_roles.roles
}
// TODO: Application for Front-End End (implicit, authorization_code, refresh_token)
// TODO: Update API applications with callback apiDomain/swagger/oauth2-redirect.html to allow logging in for swagger (and probably hangire?)
// TODO: Put all the relevant secrets into secret manager
// TODO: Set up opentelemetry and update appinsights shit to use that.
output "org_id" {
value = var.org_id
}
output "project_id" {
value = module.zitadel_project.project_id
}
output "installed" {
value = true
depends_on = [module.zitadel_project_application_external.installed]
}

View File

@ -0,0 +1,8 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}

View File

@ -0,0 +1,15 @@
variable "org_id" {
type = string
}
variable "user_id" {
type = string
}
variable "namespace" {
type = string
}
variable "name" {
type = string
}

View File

@ -0,0 +1,28 @@
/**
* sets the roles an additional claim in the token with roles as value an project as key
*
* The role claims of the token look like the following:
*
* // added by the code below
* "groups": ["{roleName}", "{roleName}", ...],
*
* Flow: Complement token, Triggers: Pre Userinfo creation, Pre access token creation
*
* @param ctx
* @param api
*/
function groupsClaim(ctx, api) {
if (ctx.v1.user.grants === undefined || ctx.v1.user.grants.count == 0) {
return;
}
let grants = [];
ctx.v1.user.grants.grants.forEach((claim) => {
claim.roles.forEach((role) => {
grants.push(role);
});
});
api.v1.claims.setClaim("groups", grants);
api.v1.claims.setClaim("scope", grants);
}

View File

@ -0,0 +1,113 @@
locals {
argocd_uri = "https://${var.argocd_service_domain}"
}
module "zitadel_project" {
source = "../../../modules/zitadel/project"
org_id = var.org_id
name = var.name
owners = [var.user_id]
}
module "zitadel_project_roles_user" {
source = "../../../modules/zitadel/project/roles"
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Users"
roles = ["user"]
}
module "zitadel_project_roles_admin" {
source = "../../../modules/zitadel/project/roles"
org_id = var.org_id
project_id = module.zitadel_project.project_id
group = "Admins"
roles = ["admin"]
}
module "zitadel_application_argocd" {
source = "../../../modules/zitadel/project/application/web"
name = "ArgoCD"
org_id = var.org_id
project_id = module.zitadel_project.project_id
redirect_uris = ["${ local.argocd_uri}/api/dex/callback"]
post_logout_redirect_uris = [local.argocd_uri]
auth_method_type = "OIDC_AUTH_METHOD_TYPE_BASIC"
id_token_role_assertion = true
id_token_userinfo_assertion = true
}
resource "zitadel_action" "groups-claim" {
org_id = var.org_id
name = "groupsClaim"
script = templatefile("${path.module}/groupsClaim.action.tftpl", {})
allowed_to_fail = true
timeout = "10s"
}
resource "zitadel_trigger_actions" "groups-claim-pre-user-info" {
org_id = var.org_id
flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
trigger_type = "TRIGGER_TYPE_PRE_USERINFO_CREATION"
action_ids = [zitadel_action.groups-claim.id]
}
resource "zitadel_trigger_actions" "groups-claim-pre-access-token" {
org_id = var.org_id
flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
trigger_type = "TRIGGER_TYPE_PRE_ACCESS_TOKEN_CREATION"
action_ids = [zitadel_action.groups-claim.id]
}
module "zitadel_project_user_grant" {
source = "../../../modules/zitadel/project/user-grant"
org_id = var.org_id
project_id = module.zitadel_project.project_id
user_id = var.user_id
roles = module.zitadel_project_roles_admin.roles
}
output "client_id" {
value = module.zitadel_application_argocd.client_id
}
output "client_secret" {
value = module.zitadel_application_argocd.client_secret
}
output "scopes" {
value = ["openid", "profile", "email", "groups"]
}
output "logoutSuffix" {
value = "oidc/v1/end_session"
}
output "user_roles" {
value = module.zitadel_project_roles_user.roles
}
output "admin_roles" {
value = module.zitadel_project_roles_admin.roles
}
output "project_id" {
value = module.zitadel_project.project_id
}
output "installed" {
value = true
depends_on = [
module.zitadel_project_user_grant.installed,
zitadel_trigger_actions.groups-claim-pre-access-token, zitadel_trigger_actions.groups-claim-pre-user-info
]
}

View File

@ -0,0 +1,14 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
provider "zitadel" {
domain = var.domain
insecure = "false"
jwt_profile_file = var.jwt_profile_file
}

View File

@ -0,0 +1,17 @@
variable "org_id" {
type = string
}
variable "user_id" {
type = string
}
variable "name" {
type = string
default = "argocd"
}
variable "domain" { type = string }
variable "jwt_profile_file" { type = string }
variable "argocd_service_domain" { type = string }

View File

@ -0,0 +1,54 @@
module "zitadel-tenant" {
source = "../../../modules/zitadel/tenant"
name = "fourlights"
}
module "zitadel-idp-google" {
source = "../../../modules/zitadel/identity-provider/google"
wait_on = module.zitadel-tenant.installed
org_id = module.zitadel-tenant.org_id
client_id = "783390190667-quvko2l2kr9ksgeo3pn6pn6t8c1mai9n.apps.googleusercontent.com"
client_secret = "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"
domain = var.domain
options = {
scopes = ["openid", "profile", "email"]
is_auto_creation = true
is_auto_update = true
is_creation_allowed = true
is_linking_allowed = true
auto_linking = "AUTO_LINKING_OPTION_USERNAME"
}
}
module "zitadel-user" {
source = "../../../modules/zitadel/user"
wait_on = module.zitadel-tenant.installed
org_id = module.zitadel-tenant.org_id
first_name = "Thomas"
last_name = "Rijpstra"
user_name = "thomas@fourlights.nl"
email = "thomas@fourlights.nl"
}
module "zitadel-org-owner" {
source = "../../../modules/zitadel/tenant/role-owner"
wait_on = module.zitadel-user.installed
org_id = module.zitadel-tenant.org_id
user_id = module.zitadel-user.user_id
}
output "org_id" {
value = module.zitadel-tenant.org_id
}
output "user_id" {
value = module.zitadel-user.user_id
}

View File

@ -0,0 +1,50 @@
terraform {
required_providers {
zitadel = {
source = "zitadel/zitadel"
version = "2.0.2"
}
}
}
provider "zitadel" {
domain = var.domain
insecure = "false"
jwt_profile_file = var.jwt_profile_file
}
locals {
k8s_config_path = format("%s/%s", path.root, "../kubeconfig")
k8s_config_yaml = file(local.k8s_config_path)
k8s_config = yamldecode(local.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}

View File

@ -0,0 +1,2 @@
variable "domain" { type = string }
variable "jwt_profile_file" { type = string }

115
ships/shuttle/setup-cluster.ts Executable file
View File

@ -0,0 +1,115 @@
#!/usr/bin/env -S deno run --allow-run --allow-read --allow-write
import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
const setupCluster = async (numMasters: number) => {
// Step 1: Create Low-Resource Profile (if not exists)
const profileExists = await Deno.run({
cmd: ["incus", "profile", "show", "low-resource"],
stdout: "null",
stderr: "null",
}).status().then((status) => status.success);
if (!profileExists) {
await Deno.run({
cmd: ["incus", "profile", "create", "low-resource"],
}).status();
await Deno.run({
cmd: ["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"],
}).status();
await Deno.run({
cmd: ["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"],
}).status();
await Deno.run({
cmd: ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"],
}).status();
console.log("✅ Low-resource profile created.");
} else {
console.log("⏩ Low-resource profile already exists.");
}
// Step 3: Launch VMs (if not already running)
for (let i = 1; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const vmExists = await Deno.run({
cmd: ["incus", "list", vmName, "--format", "csv"],
stdout: "piped",
}).output().then((output) => new TextDecoder().decode(output).trim() !== "");
if (!vmExists) {
await Deno.run({
cmd: ["incus", "launch", "images:alpine/edge/cloud", vmName, "--profile", "low-resource"],
}).status();
console.log(`✅ VM ${vmName} launched.`);
} else {
console.log(`⏩ VM ${vmName} already exists.`);
}
}
// Step 4: Install k3sup (if not installed)
const k3supInstalled = await Deno.run({
cmd: ["which", "k3sup"],
stdout: "null",
stderr: "null",
}).status().then((status) => status.success);
if (!k3supInstalled) {
await Deno.run({
cmd: ["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"],
}).status();
console.log("✅ k3sup installed.");
} else {
console.log("⏩ k3sup already installed.");
}
// Step 5: Bootstrap First Master Node (if not already bootstrapped)
const firstMasterIP = await Deno.run({
cmd: ["incus", "list", "k3s-master1", "--format", "csv", "--columns", "n4"],
stdout: "piped",
}).output().then((output) => new TextDecoder().decode(output).trim().split(",")[1].split(" ")[0])
const kubeconfigExists = await Deno.stat("./kubeconfig").then(() => true).catch(() => false);
if (!kubeconfigExists) {
await Deno.run({
cmd: ["k3sup", "install", "--ip", firstMasterIP, "--user", "root", "--cluster"],
}).status();
console.log("✅ First master node bootstrapped.");
} else {
console.log("⏩ First master node already bootstrapped.");
}
// Step 6: Join Additional Master Nodes (if not already joined)
for (let i = 2; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const vmIP = await Deno.run({
cmd: ["incus", "list", vmName, "--format", "csv", "--columns", "n4"],
stdout: "piped",
}).output().then((output) => new TextDecoder().decode(output).trim().split(",")[1].split(" ")[0])
const joined = await Deno.run({
cmd: ["kubectl", "get", "nodes", vmName],
stdout: "null",
stderr: "null",
}).status().then((status) => status.success);
if (!joined) {
await Deno.run({
cmd: ["k3sup", "join", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "root"],
}).status();
console.log(`✅ VM ${vmName} joined the cluster.`);
} else {
console.log(`⏩ VM ${vmName} already joined the cluster.`);
}
}
console.log("🚀 HA k3s cluster setup complete!");
};
await new Command()
.name("setup-k3s-cluster")
.version("0.1.0")
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
.option("-m, --masters <numMasters:number>", "Number of master nodes", { default: 3 })
.action(({ masters }) => setupCluster(masters))
.parse(Deno.args);

2
shuttles/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
kubeconfig
*.lock.hcl

View File

@ -1,19 +0,0 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXprM09ESTROelF3SGhjTk1qVXdNakUzTURrd01URTBXaGNOTXpVd01qRTFNRGt3TVRFMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXprM09ESTROelF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUWVNEV1Jwbmd6TE5ySGphTmhqdmM1SU82a2dibVpwaER4WVROTG11MjAKaWxaQnZLRlZRdW5kV3ZEQ1VrcGJNRjNsOTRuSmxaYVByK3lDSnJpVVh0UjZvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVQ5bVZxTGcvSFBCUS91L3MzbHAwCjhJQ0RDc013Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUpjMkJkMjd0SzNZTFpwa01yOFNMSEIvbngzd1E1MU0KRnRaYnBNVzJudVNXQWlFQTMyUmcyVHZNQW9LYll5bnhySkk3U3g5eWszZHFsSWd5TW15d2M5d1JicmM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://10.110.36.47:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJZFh2OWlXRHR6SE13Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOek01TnpneU9EYzBNQjRYRFRJMU1ESXhOekE1TURFeE5Gb1hEVEkyTURJeApOekE1TURFeE5Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJKNlNVZm5ESVJndVRDMjkKaWFjVTdTM3VPWkw1RERGZjJPQi9IakdTWEErQlRGaE5VOGtMSHBxZlZYeWVKbHNkd09mR1QvL2JQbENsWFYvdQowc0wyTW5halNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUXdoZkJDTWRocVpXMW96WlEzZG84d1VYOEpCREFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQXczSFpKY1cwaGI3ZUwxSktvcTJ2cExFaFVxVncxRG1oTGJtcUNQTVdmcEFDSVFDRkhXcDhoTTNMdTROTgpGUnYxc2pkYS93VjdmSVpUcUsyZHVNOUNPQVc5emc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFM016azNPREk0TnpRd0hoY05NalV3TWpFM01Ea3dNVEUwV2hjTk16VXdNakUxTURrd01URTAKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFM016azNPREk0TnpRd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBUjJCcXE5cVhESmZGeVQ1VVpEY3Z6SHVPdDg2TEZ5WTlDb1oxL0xxeldGClZMdHVQYUFXc3BUdUtZckJieTRZRlBQQlQ1M0RkS1F5cjhhWG5HUDRWenlxbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVNSVh3UWpIWWFtVnRhTTJVTjNhUApNRkYvQ1FRd0NnWUlLb1pJemowRUF3SURTQUF3UlFJZ1lmS01YQ3lFelBmM05wN3paLzVYTnFxeTdjTDBpMXBWCkpjZzNzYmtMbXB3Q0lRRDlzYVpmekswRlUrNWljWFpLZmUyVFg0WW5sNS96aFVGR2FHb2RTb1ovUXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUtlQVpqUzhNM1ZBd2l6cWo0UDN6RURuQmNaYldrcDJPekt2VlNpUSs0azRvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFbnBKUitjTWhHQzVNTGIySnB4VHRMZTQ1a3ZrTU1WL1k0SDhlTVpKY0Q0Rk1XRTFUeVFzZQptcDlWZko0bVd4M0E1OFpQLzlzK1VLVmRYKzdTd3ZZeWRnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=

View File

@ -1,149 +1,377 @@
#!/usr/bin/env -S deno run --allow-run --allow-read --allow-write #!/usr/bin/env -S deno run --allow-run --allow-read --allow-write
// Note: TypeScript errors related to Deno imports and namespace can be safely ignored
// These are only relevant when running the script with the Deno runtime
import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts"; import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
import { delay } from "https://deno.land/std/async/mod.ts"; import { delay } from "https://deno.land/std/async/mod.ts";
import { exists } from "https://deno.land/std/fs/mod.ts";
const alpineImage = "alpine/edge/cloud" // Configuration constants
const alpineConfig = ['--profile', 'cloud-init-alpine'] const alpineImage = "alpine/edge/cloud";
const archImage = "archlinux/current/cloud" const alpineConfig = ['--profile', 'cloud-init-alpine'];
const archConfig = ['--profile', 'cloud-init-arch'] const archImage = "archlinux/current/cloud";
const archConfig = ['--profile', 'cloud-init-arch'];
const getIp = (i: number) => `10.110.36.${109 + i}`;
const image = archImage const image = archImage;
const config = archConfig const config = archConfig;
const findIP4 = (name: string, nodeList: any) => { // Enhanced logging function with timestamps and log levels
const ip4 = nodeList?.find((n) => n.name === name)?.state?.network?.eth0?.addresses?.find((n) => n.family === 'inet')?.address; const log = {
return ip4; debug: (message: string) => console.log(`[${new Date().toISOString()}] [DEBUG] ${message}`),
info: (message: string) => console.log(`[${new Date().toISOString()}] [INFO] ${message}`),
success: (message: string) => console.log(`[${new Date().toISOString()}] [SUCCESS] ✅ ${message}`),
warning: (message: string) => console.log(`[${new Date().toISOString()}] [WARNING] ⚠️ ${message}`),
error: (message: string) => console.error(`[${new Date().toISOString()}] [ERROR] ❌ ${message}`),
skip: (message: string) => console.log(`[${new Date().toISOString()}] [SKIP] ⏩ ${message}`),
};
// Helper function to execute commands with proper error handling
async function executeCommand(
cmdArray: string[],
description: string,
options: {
stdout?: "piped" | "inherit" | "null",
stderr?: "piped" | "inherit" | "null",
throwOnError?: boolean
} = {}
): Promise<{ success: boolean; output?: string; error?: string }> {
const { stdout = "piped", stderr = "piped", throwOnError = true } = options;
log.debug(`Executing: ${cmdArray.join(" ")}`);
try {
// Use Deno.Command API which is the modern replacement for Deno.run
const command = new Deno.Command(cmdArray[0], {
args: cmdArray.slice(1),
stdout: stdout === "piped" ? "piped" : stdout === "inherit" ? "inherit" : "null",
stderr: stderr === "piped" ? "piped" : stderr === "inherit" ? "inherit" : "null",
});
const { code, stdout: stdoutOutput, stderr: stderrOutput } = await command.output();
const stdoutText = stdout === "piped" ? new TextDecoder().decode(stdoutOutput).trim() : "";
const stderrText = stderr === "piped" ? new TextDecoder().decode(stderrOutput).trim() : "";
if (code !== 0) {
log.error(`Failed to ${description}: ${stderrText || "Unknown error"}`);
if (throwOnError) {
throw new Error(`Command failed: ${cmdArray.join(" ")}\n${stderrText}`);
}
}
return {
success: code === 0,
output: stdoutText,
error: stderrText
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Exception while ${description}: ${errorMessage}`);
if (throwOnError) {
throw error;
}
return { success: false, error: errorMessage };
}
} }
const setupCluster = async (numMasters: number) => { // Check if VM is ready for SSH connections
const hostname = await Deno.run({ async function isVmReadyForSsh(ip: string, user: string, maxAttempts = 30): Promise<boolean> {
cmd: ["hostnamectl", "hostname"], log.info(`Checking if VM at ${ip} is ready for SSH connections...`);
stdout: "piped",
}).output().then((output) => new TextDecoder().decode(output).trim()); for (let attempt = 1; attempt <= maxAttempts; attempt++) {
log.debug(`SSH readiness check attempt ${attempt}/${maxAttempts}`);
const user = await Deno.run({
cmd: ["whoami"], const { success } = await executeCommand(
stdout: "piped", ["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "echo", "ready"],
}).output().then((output) => new TextDecoder().decode(output).trim()); `check SSH connectivity to ${ip}`,
{ throwOnError: false, stderr: "null" }
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`; );
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
if (success) {
// Step 1: Create Low-Resource Profile (if not exists) log.success(`VM at ${ip} is ready for SSH connections`);
const profileExists = await Deno.run({ return true;
cmd: ["incus", "profile", "show", "low-resource"], }
stdout: "null",
stderr: "null", log.debug(`VM at ${ip} not ready yet, waiting...`);
}).status().then((status) => status.success); await delay(2000); // Wait 2 seconds between attempts
if (!profileExists) {
await Deno.run({
cmd: ["incus", "profile", "create", "low-resource"],
}).status();
await Deno.run({
cmd: ["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"],
}).status();
await Deno.run({
cmd: ["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"],
}).status();
await Deno.run({
cmd: ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"],
}).status();
console.log("✅ Low-resource profile created.");
} else {
console.log("⏩ Low-resource profile already exists.");
} }
log.error(`VM at ${ip} is not ready for SSH connections after ${maxAttempts} attempts`);
return false;
}
// Check if VM is running
async function isVmRunning(vmName: string): Promise<boolean> {
const { success, output } = await executeCommand(
["incus", "list", vmName, "--format", "json"],
`check if VM ${vmName} is running`,
{ throwOnError: false }
);
if (!success || !output) {
return false;
}
try {
const vmInfo = JSON.parse(output);
return vmInfo.length > 0 && vmInfo[0].status === "Running";
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to parse VM status: ${errorMessage}`);
return false;
}
}
const sshKey = await Deno.readTextFile(sshKeyPubFileName); // Cleanup function to handle failures
async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
// Step 3: Launch VMs (if not already running) log.info("Starting cleanup process...");
for (let i = 1; i <= numMasters; i++) {
const vmName = `k3s-master${i}`; for (const vmName of vmNames) {
const vmExists = await Deno.run({ // Check if VM exists
cmd: ["incus", "list", vmName, "--format", "csv"], const { success, output } = await executeCommand(
stdout: "piped", ["incus", "list", vmName, "--format", "csv"],
}).output().then((output) => new TextDecoder().decode(output).trim() !== ""); `check if VM ${vmName} exists`,
{ throwOnError: false }
if (!vmExists) { );
await Deno.run({
cmd: ["incus", "launch", `images:${image}`, vmName, "--profile", "low-resource", "-c", "user.timezone=\"Europe/Amsterdam\"", "-c", `user.ssh_key=\"${sshKey}\"`, ...config], if (success && output) {
}).status(); // Stop VM if it's running
console.log(`✅ VM ${vmName} launched.`); const isRunning = await isVmRunning(vmName);
} else { if (isRunning) {
console.log(`⏩ VM ${vmName} already exists.`); log.info(`Stopping VM ${vmName}...`);
await executeCommand(
["incus", "stop", vmName, "--force"],
`stop VM ${vmName}`,
{ throwOnError: false }
);
}
// Remove VM if requested
if (shouldRemove) {
log.info(`Removing VM ${vmName}...`);
await executeCommand(
["incus", "delete", vmName],
`remove VM ${vmName}`,
{ throwOnError: false }
);
}
} }
} }
log.success("Cleanup completed");
}
// Step 4: Install k3sup (if not installed) const setupCluster = async (numMasters: number, forceCleanup = false) => {
const k3supInstalled = await Deno.run({ log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
cmd: ["which", "k3sup"],
stdout: "null", const createdVMs: string[] = [];
stderr: "null",
}).status().then((status) => status.success); try {
// Get hostname and user
if (!k3supInstalled) { const { output: hostname } = await executeCommand(
await Deno.run({ ["hostnamectl", "hostname"],
cmd: ["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"], "get hostname"
}).status(); );
console.log("✅ k3sup installed.");
} else { const { output: user } = await executeCommand(
console.log("⏩ k3sup already installed."); ["whoami"],
} "get current user"
);
// Step 5: Bootstrap First Master Node (if not already bootstrapped)
let firstMasterIP; const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
let nodes; const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
while (firstMasterIP === undefined) {
nodes = await Deno.run({ // Check if SSH keys exist
cmd: ["incus", "list", "--format", "json"], if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
stdout: "piped", log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
}).output().then((output) => JSON.parse(new TextDecoder().decode(output))); throw new Error("SSH keys not found");
firstMasterIP = findIP4('k3s-master1', nodes)
await delay(1000)
}
const kubeconfigExists = await Deno.stat("./kubeconfig").then(() => true).catch(() => false);
if (!kubeconfigExists) {
await Deno.run({
cmd: ["k3sup", "install", "--ip", firstMasterIP, "--user", "picard", "--cluster", "--ssh-key", sshKeyPrivateFileName],
}).status();
console.log("✅ First master node bootstrapped.");
} else {
console.log("⏩ First master node already bootstrapped.");
}
// Step 6: Join Additional Master Nodes (if not already joined)
for (let i = 2; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const vmIP = findIP4(vmName, nodes)
const joined = await Deno.run({
cmd: ["kubectl", "get", "nodes", vmName],
stdout: "null",
stderr: "null",
}).status().then((status) => status.success);
if (!joined) {
await Deno.run({
cmd: ["k3sup", "join", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "picard", "--ssh-key", sshKeyPrivateFileName],
}).status();
console.log(`✅ VM ${vmName} joined the cluster.`);
} else {
console.log(`⏩ VM ${vmName} already joined the cluster.`);
} }
// Step 1: Create Low-Resource Profile (if not exists)
const { success: profileExists } = await executeCommand(
["incus", "profile", "show", "low-resource"],
"check if low-resource profile exists",
{ stdout: "null", stderr: "null", throwOnError: false }
);
if (!profileExists) {
log.info("Creating low-resource profile...");
await executeCommand(
["incus", "profile", "create", "low-resource"],
"create low-resource profile"
);
await executeCommand(
["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"],
"set low-resource profile limits"
);
await executeCommand(
["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"],
"add root disk to low-resource profile"
);
// await executeCommand(
// ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"],
// "add network interface to low-resource profile"
// );
log.success("Low-resource profile created");
} else {
log.skip("Low-resource profile already exists");
}
// Read SSH key
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
// Step 3: Launch VMs (if not already running)
for (let i = 1; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const { success: vmExists, output: vmOutput } = await executeCommand(
["incus", "list", vmName, "--format", "csv"],
`check if VM ${vmName} exists`,
{ throwOnError: false }
);
if (!vmExists || !vmOutput) {
log.info(`Creating VM ${vmName}...`);
await executeCommand(
["incus", "init", `images:${image}`, vmName, "--profile", "low-resource", "-c", "user.timezone=\"Europe/Amsterdam\"", "-c", `user.ssh_key=\"${sshKey}\"`, ...config],
`initialize VM ${vmName}`
);
await executeCommand(
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(i)}`],
`configure network for VM ${vmName}`
);
await executeCommand(
["incus", "start", vmName],
`start VM ${vmName}`
);
createdVMs.push(vmName);
log.success(`VM ${vmName} started`);
} else {
// Check if VM is running, if not, start it
const isRunning = await isVmRunning(vmName);
if (!isRunning) {
log.info(`Starting existing VM ${vmName}...`);
await executeCommand(
["incus", "start", vmName],
`start VM ${vmName}`
);
}
log.skip(`VM ${vmName} already exists`);
}
}
// Step 4: Install k3sup (if not installed)
const { success: k3supInstalled } = await executeCommand(
["which", "k3sup"],
"check if k3sup is installed",
{ stdout: "null", stderr: "null", throwOnError: false }
);
if (!k3supInstalled) {
log.info("Installing k3sup...");
await executeCommand(
["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"],
"install k3sup"
);
log.success("k3sup installed");
} else {
log.skip("k3sup already installed");
}
// Step 5: Wait for VMs to be ready
const firstMasterIP = getIp(1);
log.info(`Waiting for first master node (${firstMasterIP}) to be ready...`);
const vmReady = await isVmReadyForSsh(firstMasterIP, "picard");
if (!vmReady) {
throw new Error(`First master node at ${firstMasterIP} is not ready for SSH connections`);
}
// Check if kubeconfig exists
const kubeconfigExists = await exists("./kubeconfig");
if (!kubeconfigExists) {
log.info("Bootstrapping first master node...");
await executeCommand(
["k3sup", "install", "--ip", firstMasterIP, "--user", "picard", "--cluster", "--ssh-key", sshKeyPrivateFileName],
"bootstrap first master node"
);
log.success("First master node bootstrapped");
} else {
log.skip("First master node already bootstrapped");
}
// Step 6: Join Additional Master Nodes (if not already joined)
for (let i = 2; i <= numMasters; i++) {
const vmName = `k3s-master${i}`;
const vmIP = getIp(i);
// Wait for VM to be ready
log.info(`Waiting for ${vmName} (${vmIP}) to be ready...`);
const nodeReady = await isVmReadyForSsh(vmIP, "picard");
if (!nodeReady) {
log.warning(`VM ${vmName} is not ready for SSH connections, skipping join operation`);
continue;
}
const { success: joined } = await executeCommand(
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", vmName],
`check if ${vmName} has joined the cluster`,
{ stdout: "null", stderr: "null", throwOnError: false }
);
if (!joined) {
log.info(`Joining ${vmName} to the cluster...`);
await executeCommand(
["k3sup", "join", "--server", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "picard", "--ssh-key", sshKeyPrivateFileName],
`join ${vmName} to the cluster`
);
log.success(`VM ${vmName} joined the cluster`);
} else {
log.skip(`VM ${vmName} already joined the cluster`);
}
}
log.success("HA k3s cluster setup complete! 🚀");
// Verify cluster status
log.info("Verifying cluster status...");
const { success: clusterVerified, output: nodesOutput } = await executeCommand(
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", "-o", "wide"],
"verify cluster nodes",
{ throwOnError: false }
);
if (clusterVerified) {
log.info("Cluster nodes:");
console.log(nodesOutput);
} else {
log.warning("Could not verify cluster status");
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to set up cluster: ${errorMessage}`);
if (createdVMs.length > 0) {
log.warning("An error occurred during setup. Cleaning up created resources...");
await cleanup(createdVMs, forceCleanup);
}
Deno.exit(1);
} }
console.log("🚀 HA k3s cluster setup complete!");
}; };
await new Command() await new Command()
.name("setup-k3s-cluster") .name("setup-k3s-cluster")
.version("0.1.0") .version("0.1.0")
.description("Automate the setup of an HA k3s cluster using incus and k3sup") .description("Automate the setup of an HA k3s cluster using incus and k3sup")
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3}) .option("-m, --masters <numMasters:number>", "Number of master nodes", { default: 3 })
.action(({masters}) => setupCluster(masters)) .option("-c, --cleanup", "Force cleanup of VMs if setup fails", { default: false })
.action(({ masters, cleanup }) => setupCluster(masters, cleanup))
.parse(Deno.args); .parse(Deno.args);

View File

@ -1,77 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/argoproj-labs/argocd" {
version = "7.0.2"
constraints = "7.0.2"
hashes = [
"h1:4lbS20EczuzhSNSOjp1mJoe2YbcXniBTzxmJHd+rjIE=",
"zh:083686eaeaa7b51ebaac42c3c7b01a15f020a735dc8dbe50aa6a6bff16888943",
"zh:16b1b813f33874844fadc747c57ae99cf8f119c119b3776a105c154fc4a54488",
"zh:25ed8dca5da5faa52392c7938c61dd9a83bc6388ad771062cecfc15c44bc3d8e",
"zh:3907351bbcb6a0c1c1abeb33dac5d70f798b0ecc05559f2ede40ae84b9079983",
"zh:3a737237f03b9b28de26b1fe9d20bcfa53f580489fc28d774396e5de38906fd3",
"zh:64421961cc342cec8280899352444a96ad1b09144fa933dc3a0dfb9bbae809a9",
"zh:9702119789cc42b98dc9d1a8d7666b608a964cf1355e3cf500b82bed1898f2fd",
"zh:9cc9ad41a6ce25aac40b9dd2291fc4d90a223add197155decdca7d2d82fc60f1",
"zh:a239381a36bf6041d6520c8db83fb281fd2417f4540c895e07db052dd108a72f",
"zh:ecca66064fff07719eec2ef35cd62d1cb65cf4a11f9ce96f3a9b9b7c78d614a5",
]
}
provider "registry.terraform.io/hashicorp/helm" {
version = "2.17.0"
hashes = [
"h1:K5FEjxvDnxb1JF1kG1xr8J3pNGxoaR3Z0IBG9Csm/Is=",
"zh:06fb4e9932f0afc1904d2279e6e99353c2ddac0d765305ce90519af410706bd4",
"zh:104eccfc781fc868da3c7fec4385ad14ed183eb985c96331a1a937ac79c2d1a7",
"zh:129345c82359837bb3f0070ce4891ec232697052f7d5ccf61d43d818912cf5f3",
"zh:3956187ec239f4045975b35e8c30741f701aa494c386aaa04ebabffe7749f81c",
"zh:66a9686d92a6b3ec43de3ca3fde60ef3d89fb76259ed3313ca4eb9bb8c13b7dd",
"zh:88644260090aa621e7e8083585c468c8dd5e09a3c01a432fb05da5c4623af940",
"zh:a248f650d174a883b32c5b94f9e725f4057e623b00f171936dcdcc840fad0b3e",
"zh:aa498c1f1ab93be5c8fbf6d48af51dc6ef0f10b2ea88d67bcb9f02d1d80d3930",
"zh:bf01e0f2ec2468c53596e027d376532a2d30feb72b0b5b810334d043109ae32f",
"zh:c46fa84cc8388e5ca87eb575a534ebcf68819c5a5724142998b487cb11246654",
"zh:d0c0f15ffc115c0965cbfe5c81f18c2e114113e7a1e6829f6bfd879ce5744fbb",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.35.1"
hashes = [
"h1:Av0Wk8g2XjY2oap7nyWNHEgfCRfphdJvrkqJjEM2ZKM=",
"zh:12212ca5ae47823ce14bfafb909eeb6861faf1e2435fb2fc4a8b334b3544b5f5",
"zh:3f49b3d77182df06b225ab266667de69681c2e75d296867eb2cf06a8f8db768c",
"zh:40832494d19f8a2b3cd0c18b80294d0b23ef6b82f6f6897b5fe00248a9997460",
"zh:739a5ddea61a77925ee7006a29c8717377a2e9d0a79a0bbd98738d92eec12c0d",
"zh:a02b472021753627c5c39447a56d125a32214c29ff9108fc499f2dcdf4f1cc4f",
"zh:b78865b3867065aa266d6758c9601a2756741478f5735a838c20d633d65e085b",
"zh:d362e87464683f5632790e66920ea803adb54c2bc0cb24b6fd9a314d2b1efffd",
"zh:d98206fe88c2c9a52b8d2d0cb2c877c812a4a51d19f9d8428e63cbd5fd8a304d",
"zh:dfa320946b1ce3f3615c42b3447a28dc9f604c06d8b9a6fe289855ab2ade4d11",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fc1debd2e695b5222d2ccc8b24dab65baba4ee2418ecce944e64d42e79474cb5",
"zh:fdaf960443720a238c09e519aeb30faf74f027ac5d1e0a309c3b326888e031d7",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.3"
hashes = [
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
]
}

View File

@ -1,11 +1,54 @@
locals { locals {
tld = "fourlights.dev" tld = "fourlights.dev"
cluster_dns = "venus.${local.tld}" cluster_dns = "venus.${local.tld}"
bridge_dns = "bridge.${local.cluster_dns}"
is_installed = true is_installed = true
node_count = 3 node_count = 3
} }
module "registry" {
source = "../../infra/modules/zot"
service_uri = "registry.${local.cluster_dns}"
}
resource "kubernetes_manifest" "preserve-host-middleware" {
depends_on = [local.is_installed]
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "preserve-host-headers"
namespace = "default" # NOTE: Hardcoded by design
}
spec = {
headers = {
customRequestHeaders = {
"X-Forwarded-Proto" = "https"
"X-Forwarded-Port" = "443"
}
}
}
}
}
resource "kubernetes_manifest" "https-redirect-middleware" {
depends_on = [local.is_installed]
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "redirect-to-https"
namespace = "default" # NOTE: Hardcoded by design
}
spec = {
redirectScheme = {
permanent = true
scheme = "https"
}
}
}
}
module "homepage" { module "homepage" {
source = "../../infra/modules/homepage" source = "../../infra/modules/homepage"
wait_on = local.is_installed wait_on = local.is_installed
@ -32,6 +75,8 @@ module "minio" {
admin = true admin = true
ingressClass = "traefik" ingressClass = "traefik"
storageSize = "10Gi" storageSize = "10Gi"
displayOnHomepage = true
} }
module "mongodb" { module "mongodb" {
@ -57,3 +102,110 @@ module "rabbitmq" {
admin = true admin = true
ingressClass = "traefik" ingressClass = "traefik"
} }
module "postgresql" {
source = "../../infra/modules/postgresql"
namespace = "postgresql"
k8s_config_yaml = local.k8s_config_yaml
username = "bridge"
}
module "zitadel-db" {
source = "../../infra/modules/postgresql/tenant"
wait_on = module.postgresql.installed
name = "zitadel"
root_password = module.postgresql.root_password
k8s_config_yaml = local.k8s_config_yaml
}
module "zitadel" {
source = "../../infra/modules/zitadel"
wait_on = module.zitadel-db.installed
k8s_config_yaml = local.k8s_config_yaml
server_dns = local.cluster_dns
service_name = "zitadel"
namespace = "zitadel"
database_password = module.zitadel-db.password
database_root_password = module.postgresql.root_password
display_on_homepage = true
}
module "zitadel-bootstrap" {
source = "../../infra/tenants/fourlights/zitadel"
domain = module.zitadel.server
jwt_profile_file = module.zitadel.jwt_profile_file
}
module "redis" {
source = "../../infra/modules/redis"
namespace = "redis"
k8s_config_yaml = local.k8s_config_yaml
}
module "tenant-365zon" {
source = "../../infra/tenants/365zon"
org_id = module.zitadel-bootstrap.org_id
user_id = module.zitadel-bootstrap.user_id
domain = module.zitadel.server
jwt_profile_file = module.zitadel.jwt_profile_file
minio_access_key = module.minio.minio_access_key
minio_secret_key = module.minio.minio_secret_key
minio_service_uri = module.minio.minio_api_uri
mongodb_connection_string = module.mongodb.connection_string
rabbitmq_connection_string = module.rabbitmq.connection_string
}
module "zitadel-argocd" {
source = "../../infra/tenants/argocd/zitadel"
org_id = module.zitadel-bootstrap.org_id
user_id = module.zitadel-bootstrap.user_id
domain = module.zitadel.server
jwt_profile_file = module.zitadel.jwt_profile_file
argocd_service_domain = "argocd.${ local.cluster_dns}"
}
module "argocd" {
source = "../../infra/modules/argocd"
wait_on = module.zitadel-argocd.installed
namespace = "argocd"
k8s_config_yaml = local.k8s_config_yaml
redis_db_start_index = 0
redis_password = module.redis.password
server_dns = local.cluster_dns
oauth_uri = module.zitadel.server
oauth_client_id = module.zitadel-argocd.client_id
oauth_client_secret = module.zitadel-argocd.client_secret
oauth_redirect_uri = "https://${module.zitadel.server}/${module.zitadel-argocd.logoutSuffix}"
oauth_issuer = "https://${module.zitadel.server}"
}
/*
argocd project
*/
output "argocd-root-password" {
value = module.argocd.admin_password
sensitive = true
}
output "mongodb-connection-string" {
value = module.mongodb.connection_string
sensitive = true
}

View File

@ -0,0 +1 @@
{"type":"serviceaccount","keyId":"310142761184133898","key":"-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEApSaCjkOBVIe33bEIwENq1jGj6MgbN+NqYRN6EVDWVnESM10/\n188hB9UDCvNR5kcBbaz2bD/ymZ/ppWSLqmXONwc3/PtiOluBfkvR1q2pEh+f13wz\n72dbhOVqf+YhL3lndiQ/OupGzaCbmsBNPGag7mgaPxlgoWTWIItPnOomIhwtwXgy\nNyzt9Fmyh/4JsRlIYO90ZO32vKXABRMCGsKxvcY9CR4+LIqddns83YASGFnQ5oBo\nObc8EN2Di7uKWzNwxUJuZtFlHXp06su2mWDGJhKusHYW4KUIs2uwFtjJfAXG/adT\n8qVgi174m1jU2ocSd6o9IqDYf50arCinbgtAdwIDAQABAoIBABwrB1WQefya8Wdk\njKOOXCiQau6HQu0zYq+QDN/rM8OmoX4VR5Bdibq2QECb47otHjdAqv8noQ9G0Ske\njxvPJW8JUilaDxT5CosqD25YTGAE+NReINWSgW+XWaTa8YoRYO4rnIVF9DGaVS/9\n4K6OqqA/LUrZ3ztn4YXHfRq8bSif86GMo1GkwH8xOMJHdaxCs8YzAbpGURL03QtL\nemVNs9VwSWLmnK71FpXkko0aGi14naS7E4jv8uutykLQsc+QE7m9B4OiDkijKCP9\nQwvw/3RZYcrRuWz7uSANyxG4Uc8JhPdUIyvpkvUz8NfRLTDoSAEq1NQuxpyjLYYU\n7uzYcWECgYEAzKZ5wGTJBZafen2I61L8XAMk2df63nnEK+YuZqNZ6yH6IY7cCrlJ\n3LbeNoHNcGMXw1mf9Z9vvAjz7nbec2BYN1KRMR9QOTHcqwQZcOOJnwhdO4uAlsFZ\ngiyoLYCQP8Z6IIC4ht+2hmf8hS3CmWUPAXyLOcg4ok6SRdyNsfWiLwkCgYEAzpbL\n8szYqNY+r5n1DQ9d6zNb2cbkFfzZDxn64BA1xQZtRgxfzNAOvsGl5pPWve7oS/8Y\nmPx+1b08NvCcTuaow7CCw+IDHsI43TRNbvPQBWtINBE6eeBs3laaNvmxTZU5HGog\nt1yRtk0u64hKT7+L7Ku5JP79pxzNOIs1hnImU38CgYAaH84+/x6iNf4Ztti5oZhR\nbp1PqcB+kfC24eVeeM/LskSp8ACq5chGApoPPzaoeB3adCB1TGsJB+OLt2TiOZRJ\nS6L5MFQfWPwgYJ+Wx5UT1g+AwGgj1n7EnUrCtDy1x3Jjn8rufLRiJ/gWUCcdScdG\nm01yjNqd7YXCoUr9Qqv3cQKBgGd2klHZUbDNC7v6SQXvakP/BsM8nsJ8TWEIy+In\nfCZen59zVw9GK/xRE3s1E1kwK1rUOUd1PThie6OwQTgqwN6wqezcZl+jOcNfDGDC\n7q2oGxMohbbANQXtLXLW/nsyftXCOPxb+gXpBdSj/0ONVNCE+EaVBggJnqXw4i+h\nP5yVAoGBAIoXRgX3mSBsC/xgKIXQb4c9WT7W78IOpU43mbX9jC/emfLkOvuxR/Cv\nmJDgTv2zUq7uItbvXmxwmU7JVYlBFaWERsAqzzWUUsdfM3tBFdBbcH9fzoEG0j4u\nkqCwU1if6HTHCmunqt1ZQKN3oP1Uycn/1ZL6NR8ilqIcjCzh4JPQ\n-----END RSA PRIVATE KEY-----\n","expirationDate":"2026-01-01T00:00:00Z","userId":"310142761184068362"}