Initial commit

This commit is contained in:
Thomas Rijpstra 2024-12-05 06:23:53 +01:00
commit 23e9e00f35
Signed by: thomas
SSH Key Fingerprint: SHA256:sFF5HPNPaaW14qykTkmRi1FGGO0YMUPBenlKOqepUpw
178 changed files with 403546 additions and 0 deletions

8
.idea/.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

12
.idea/devops.iml Normal file
View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="WEB_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/.tmp" />
<excludeFolder url="file://$MODULE_DIR$/temp" />
<excludeFolder url="file://$MODULE_DIR$/tmp" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

8
.idea/modules.xml Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/devops.iml" filepath="$PROJECT_DIR$/.idea/devops.iml" />
</modules>
</component>
</project>

View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="TemplateDataLanguageMappings">
<file url="file://$PROJECT_DIR$/infra/modules/argocd/values.yaml" dialect="yaml" />
<file url="file://$PROJECT_DIR$/infra/modules/fusionauth/values.yaml" dialect="yaml" />
<file url="file://$PROJECT_DIR$/infra/modules/mongodb/values.yaml" dialect="yaml" />
</component>
</project>

View File

@ -0,0 +1,18 @@
AUTH_ORIGIN=https://mijn.365zon.nl
AUTH_SECRET=nRm0dT_SD{H<3%Z!
HOST=0.0.0.0
NEXTAUTH_URL=https://mijn.365zon.nl
NODE_ENV=production
NUXT_AUTH_ORIGIN=https://mijn.365zon.nl
NUXT_AUTH0_AUDIENCE=https://365zon-prod.giddix.io
NUXT_AUTH0_AUTHORITY=https://giddix-365zon.eu.auth0.com
NUXT_AUTH0_CLIENT_ID=z2k0ajnrbqDhPRgiqZxW7ODq9aU9jmLG
NUXT_AUTH0_CLIENT_SECRET=oIxleJ9QktIRhN3GhGOozVS0ot8HxWCA1eNeZclSu-MQxD0KvtL0H9rsElO9-tnD
NUXT_PUBLIC_API_BASE_URL=https://365zon-api.giddix.io
NUXT_PUBLIC_APP_DEBUG=false
NUXT_PUBLIC_APP_INSIGHTS_ENABLED=True
NUXT_PUBLIC_APP_INSIGHTS_INSTRUMENTATION_KEY=b852a92c-dfbb-4c47-9431-afb9db86d669
NUXT_PUBLIC_APP_INSIGHTS_ROLE_NAME=px-app-365zon-prod
NUXT_PUBLIC_BRAND_API_SLUG=365zon
NUXT_PUBLIC_BRAND_SLUG=365zon
NUXT_PUBLIC_FLOW_DEBUG=False

View File

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
labels:
app: frontend
spec:
replicas: 1
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
spec:
imagePullSecrets:
- name: ghcr.io
containers:
- name: frontend
image: ghcr.io/four-lights-nl/mijn-365zon-frontend-365zon:latest
ports:
- containerPort: 80
envFrom:
- configMapRef:
name: frontend
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 1000m
memory: 1024Mi

View File

@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frontend
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
traefik.ingress.kubernetes.io/router.middlewares: application-redirect-to-https@kubernetescrd,application-preserve-host-headers@kubernetescrd
spec:
rules:
- host: frontend.svc.cluster.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: frontend
port:
number: 80

View File

@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: application
images:
- name: ghcr.io/four-lights-nl/mijn-365zon-frontend-365zon
newTag: latest
resources:
- deployment.yaml
- service.yaml
- ingress.yaml
configMapGenerator:
- name: frontend
envs:
- config.env

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: frontend
spec:
selector:
app: frontend
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@ -0,0 +1,7 @@
# configmap-patch.yaml
- op: replace
path: /data/BACKEND_API_URL
value: "https://admin.bouwroute.nl"
- op: replace
path: /data/NEXT_PUBLIC_GTM_ID
value: "GTM-PG52DB9"

View File

@ -0,0 +1,6 @@
- op: replace
path: /spec/replicas
value: 3
- op: add
path: /spec/template/spec/containers/0/imagePullPolicy
value: Always

View File

@ -0,0 +1,16 @@
- op: replace
path: /spec/rules/0/host
value: app.365zon.fourlights.dev
- op: add
path: /spec/rules/1
value:
host: mijn.365zon.nl
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: frontend
port:
number: 80

View File

@ -0,0 +1,23 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
images:
- name: ghcr.io/four-lights-nl/mijn-365zon-frontend-365zon
newTag: latest
patches:
- target:
kind: ConfigMap
name: frontend
path: configmap-patch.yaml
- target:
kind: Ingress
name: frontend
path: ingress-patch.yaml
- target:
kind: Deployment
name: frontend
path: deployment-patch.yaml

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: frontend
data:
BACKEND_API_URI: "https://admin.sandbox.bouwroute.app"
NEXT_PUBLIC_GTM_ID: "GTM-DEV123"

View File

@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
spec:
replicas: 2
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject-secret-frontend: "application/sandbox/frontend"

View File

@ -0,0 +1,7 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frontend
spec:
rules:
- host: sandbox.bouwroute.app

View File

@ -0,0 +1,13 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
images:
- newTag: latest
patches:
- path: configmap-patch.yaml
- path: ingress-patch.yaml
- path: deployment-patch.yaml

1
infra/clusters/app-365zon/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.terraform

View File

@ -0,0 +1,116 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/aminueza/minio" {
version = "2.5.1"
constraints = "~> 2.5.0"
hashes = [
"h1:03gfmXf78G9h9XCHwavPwAwCjg1xmQIp4e5aAv6xIbI=",
"zh:0710a1fcd8e3501237990344160b0193860c2e643e73c728bf832e3d3fde971a",
"zh:0b2f25fbb59d056299faec7fb09012ef0545bd25e7ffa55a04a5c10c28908713",
"zh:0e0179fe12c855bcf5dbcf6858373eaa6e9dd790010096a3fcc667a23224388d",
"zh:23f6118cefb2fae443de98197490a9ba56fa51f1e324d1811709e0fdfc22ed7d",
"zh:34875cbaf07fbed8b8c639f38146f19188e57fc2eac4cdeac638b3d675b82ad4",
"zh:5b0fc4934533557af0001630801e9e637ab0e1588fd086f0cd04a52f4a13474f",
"zh:5d8eda5611ce4017688694e566c00609508a2c3a0e9aa587f6a58dcd1cb9846c",
"zh:70855ab6327a1b79b0619d0ed3538513f98fdfadae6fe60e986dbbf2891151f8",
"zh:7330d66c56a67a4c36f2fc2f1d7042503f5b4d0ec66a9bbe2b72920fb56b85de",
"zh:764597f7be92426cd63f7ae82d2845a1f2677d2b86921f19facf93fdbb80f503",
"zh:7dd947c72366377a16adc7bf0c0d09c32ade09dcedbcbf411da057ca970fb9e8",
"zh:9db57839cdc1d667271d5589ca4d9e791b665c0248e37c9ccdc79c0cef39aaed",
]
}
provider "registry.terraform.io/argoproj-labs/argocd" {
version = "7.0.2"
constraints = "7.0.2"
hashes = [
"h1:4lbS20EczuzhSNSOjp1mJoe2YbcXniBTzxmJHd+rjIE=",
"zh:083686eaeaa7b51ebaac42c3c7b01a15f020a735dc8dbe50aa6a6bff16888943",
"zh:16b1b813f33874844fadc747c57ae99cf8f119c119b3776a105c154fc4a54488",
"zh:25ed8dca5da5faa52392c7938c61dd9a83bc6388ad771062cecfc15c44bc3d8e",
"zh:3907351bbcb6a0c1c1abeb33dac5d70f798b0ecc05559f2ede40ae84b9079983",
"zh:3a737237f03b9b28de26b1fe9d20bcfa53f580489fc28d774396e5de38906fd3",
"zh:64421961cc342cec8280899352444a96ad1b09144fa933dc3a0dfb9bbae809a9",
"zh:9702119789cc42b98dc9d1a8d7666b608a964cf1355e3cf500b82bed1898f2fd",
"zh:9cc9ad41a6ce25aac40b9dd2291fc4d90a223add197155decdca7d2d82fc60f1",
"zh:a239381a36bf6041d6520c8db83fb281fd2417f4540c895e07db052dd108a72f",
"zh:ecca66064fff07719eec2ef35cd62d1cb65cf4a11f9ce96f3a9b9b7c78d614a5",
]
}
provider "registry.terraform.io/hashicorp/helm" {
version = "2.16.1"
hashes = [
"h1:TerRBdq69SxIWg3ET2VE0bcP0BYRIWZOp1QxXj/14Fk=",
"zh:0003f6719a32aee9afaeeb001687fc0cfc8c2d5f54861298cf1dc5711f3b4e65",
"zh:16cd5bfee09e7bb081b8b4470f31a9af508e52220fd97fd81c6dda725d9422fe",
"zh:51817de8fdc2c2e36785f23fbf4ec022111bd1cf7679498c16ad0ad7471c16db",
"zh:51b95829b2873be40a65809294bffe349e40cfccc3ff6fee0f471d01770e0ebd",
"zh:56b158dde897c47e1460181fc472c3e920aa23db40579fdc2aad333c1456d2dd",
"zh:916641d26c386959eb982e680028aa677b787687ef7c1283241e45620bc8df50",
"zh:aec15ca8605babba77b283f2ca35daca53e006d567e1c3a3daf50497035b820b",
"zh:c2cecf710b87c8f3a4d186da2ea12cf08041f97ae0c6db82649720d6ed929d65",
"zh:dbdd96f17aea25c7db2d516ab8172a5e683c6686c72a1a44173d2fe96319be39",
"zh:de11e180368434a796b1ab6f20fde7554dc74f7800e063b8e4c8ec3a86d0be63",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:f827a9c1540d210c56053a2d5d5a6abda924896ffa8eeedc94054cf6d44c5f60",
]
}
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.34.0"
hashes = [
"h1:QOiO85qZnkUm7kAtuPkfblchuKPWUqRdNVWE5agpr8k=",
"zh:076b451dc8629c49f4260de6d43595e98ac5f1bdbebb01d112659ef94d99451f",
"zh:0c29855dbd3c6ba82fce680fa5ac969d4e09e20fecb4ed40166b778bd19895a4",
"zh:583b4dfcea4d8392dd7904c00b2ff41bbae78d238e8b72e5ad580370a24a4ecb",
"zh:5e20844d8d1af052381d00de4febd4055ad0f3c3c02795c361265b9ef72a1075",
"zh:766b7ab7c4727c62b5887c3922e0467c4cc355ba0dc3aabe465ebb86bc1caabb",
"zh:776a5000b441d7c8262d17d4a4aa4aa9760ae64de4cb7172961d9e007e0be1e5",
"zh:7838f509235116e55adeeecbe6def3da1b66dd3c4ce0de02fc7dc66a60e1d630",
"zh:931e5581ec66c145c1d29198bd23fddc8d0c5cbf4cda22e02dba65644c7842f2",
"zh:95e728efa2a31a63b879fd093507466e509e3bfc9325eb35ea3dc28fed15c6f7",
"zh:972b9e3ca2b6a1057dcf5003fc78cabb0dd8847580bddeb52d885ebd64df38ea",
"zh:ef6114217965d55f5bddbd7a316b8f85f15b8a77c075fcbed95813039d522e0a",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.3"
hashes = [
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "4.5.0"
hashes = [
"h1:oKiQcEqj/HTCMzgGtZ531D/jnnM0i7iguSM8pU7aK8U=",
"zh:0a9301aa6a9b59db97682be568329526033bb50a4a308ad695c2a1877c1241c3",
"zh:0f8fee69ea4eaa27b86a391edc7de8e8b215e3c48f7074bab799986d5f707014",
"zh:2a2e51fe280e07700920bc8ed29b77e5c79fada0e4d5315d55ec0d2893bb5eed",
"zh:3fc7d9016bebe26a4c779ce6b87b181ed6a1af12499419726b8b0a0e3eaa7234",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:813a9e4875e58dbca2526b3088c0f76dbb2a66b10b910497a0b703518eaa73cd",
"zh:889ed6f21b94f89b8cbc4224454ced01a2792f12f53379d2fb1a2f2749bf624a",
"zh:acf9c01d403584015005083e64d8479d167e4f54e87e540311010133fcb5b023",
"zh:b377945a4b6a75c79793cb92c873aacc9c087c2a6e5792a1613f3aa2f3693848",
"zh:be243567b2a76ba2a546449e89764f707477cf25dcdd6d7f3b808ddf40aaf9f6",
"zh:d879fa16f391fb75b779067c751f3b8f80f5f4d73b2ff86814662038878a0ce4",
"zh:e47fb3daac933f5dcb034379fe77c0bf834512da7348e7643457c9af3b2ab36b",
]
}

View File

@ -0,0 +1,10 @@
terraform {
backend "s3" {
#encrypt = false
skip_region_validation = true
skip_metadata_api_check = true
skip_credentials_validation = true
skip_requesting_account_id = true
use_path_style = true
}
}

View File

@ -0,0 +1,6 @@
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
access_key = "Y2Y1EA9WES29336DP9SE"
secret_key = "EzdA75G2LMlWilphFq9snV6HW6KKSp+BmubF4c+5"
bucket = "app-365zon"
key = "terraform.tfstate"
region = "eu-central-1"

View File

@ -0,0 +1,69 @@
locals {
tld = "fourlights.dev"
cluster_shortname = "365zon"
cluster_dns = "${local.cluster_shortname}.${local.tld}"
node_count = 3
bridge_dns = "bridge.${local.tld}"
}
module "cluster-init" {
source = "../../modules/cluster/init-rke2"
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
}
module "minio" {
source = "../../modules/minio"
wait_on = module.cluster-init.installed
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
server_dns = local.cluster_dns
service_name = "storage"
namespace = "minio"
admin_server_dns = "local" # Restricted admin access, access via bridge
tls = false # TLS termination happens on the bridge ingress
admin = true
ingressClass = "nginx"
}
module "mongodb" {
source = "../../modules/mongodb"
wait_on = module.cluster-init.installed
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
namespace = "mongodb"
replicas = local.node_count
}
#module "frontend-minio" {
# source = "../../modules/minio/overlay"
# wait_on = module.minio.installed
#
# name = "frontend"
# server = module.minio.minio_server
# access_key = module.minio.minio_access_key
# secret_key = module.minio.minio_secret_key
#}
module "rabbitmq" {
source = "../../modules/rabbitmq"
wait_on = module.cluster-init.installed
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
server_dns = "local" # Restricted admin access, access via bridge
service_name = "rabbitmq"
namespace = "rabbitmq"
tls = false # TLS termination happens on the bridge ingress
admin = true
ingressClass = "nginx"
}
# THESE SHOULD BE IN BRIDGE
# generate ed25519 for argocd: `ssh-keygen -t ed25519 -C "argocd.bridge.fourlights.dev" -f argocd.bridge.fourlights.dev
# add ed25519.pub to github repo deploy keys
# add argocd repo secret
# add argocd updated github.com known hosts
# add argocd application

View File

@ -0,0 +1,60 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
}
}
provider "minio" {
minio_server = var.minio_server
minio_region = var.region
minio_user = var.access_key
minio_password = var.secret_key
minio_ssl = true
}
data "minio_s3_object" "k8s_yaml" {
bucket_name = var.bucket
object_name = "kube_config.yaml"
}
locals {
k8s_config = yamldecode(data.minio_s3_object.k8s_yaml.content)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}
provider "vault" {
address = var.vault_addr
token = var.vault_token
}

View File

@ -0,0 +1,32 @@
#!/bin/bash
CLUSTER="app-365zon"
VAULT_TOKEN=$(cd ../bridge && terraform output -raw cluster-${CLUSTER}-vault-token)
VAULT_ADDR=$(cd ../bridge/bootstrap && terraform output -raw vault_uri)
MINIO_ADDR=$(cd ../bridge/bootstrap && terraform output -raw minio_uri)
MINIO_SERVER=$(cd ../bridge/bootstrap && terraform output -raw minio_server)
VAULT_ADDR="$VAULT_ADDR" VAULT_TOKEN="$VAULT_TOKEN" vault token renew
MINIO_ACCESS_KEY=$(VAULT_TOKEN="$VAULT_TOKEN" VAULT_ADDR="$VAULT_ADDR" vault kv get -mount="$CLUSTER" -field="access_key" "minio")
MINIO_SECRET_KEY=$(VAULT_TOKEN="$VAULT_TOKEN" VAULT_ADDR="$VAULT_ADDR" vault kv get -mount="$CLUSTER" -field="secret_key" "minio")
cat << EOF > backend.tfvars
endpoints = { s3 = "${MINIO_ADDR}" }
access_key = "${MINIO_ACCESS_KEY}"
secret_key = "${MINIO_SECRET_KEY}"
bucket = "${CLUSTER}"
key = "terraform.tfstate"
region = "eu-central-1"
EOF
cat << EOF > terraform.tfvars
endpoints = { s3 = "${MINIO_ADDR}" }
access_key = "${MINIO_ACCESS_KEY}"
secret_key = "${MINIO_SECRET_KEY}"
bucket = "${CLUSTER}"
region = "eu-central-1"
minio_server = "${MINIO_SERVER}"
vault_token = "${VAULT_TOKEN}"
vault_addr = "${VAULT_ADDR}"
EOF

View File

@ -0,0 +1,8 @@
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
access_key = "Y2Y1EA9WES29336DP9SE"
secret_key = "EzdA75G2LMlWilphFq9snV6HW6KKSp+BmubF4c+5"
bucket = "app-365zon"
region = "eu-central-1"
minio_server = "storage.bridge.fourlights.dev"
vault_token = "hvs.CAESIBeM5f-JB0KdnPeq61Nv1nkf01zlo_isKyi7qhEzx218Gh4KHGh2cy5HVHRkVW9lYWZ6bGFyUXg5V2U0ZmNaUlI"
vault_addr = "https://vault.bridge.fourlights.dev"

View File

@ -0,0 +1,34 @@
variable "endpoints" {
type = map(string)
}
variable "access_key" {
type = string
sensitive = true
}
variable "secret_key" {
type = string
sensitive = true
}
variable "bucket" {
type = string
}
variable "region" {
type = string
}
variable "minio_server" {
type = string
}
variable "vault_addr" {
type = string
}
variable "vault_token" {
type = string
sensitive = true
}

1
infra/clusters/bridge/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.terraform

View File

@ -0,0 +1,219 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/aminueza/minio" {
version = "2.5.1"
constraints = "~> 2.5.0"
hashes = [
"h1:03gfmXf78G9h9XCHwavPwAwCjg1xmQIp4e5aAv6xIbI=",
"zh:0710a1fcd8e3501237990344160b0193860c2e643e73c728bf832e3d3fde971a",
"zh:0b2f25fbb59d056299faec7fb09012ef0545bd25e7ffa55a04a5c10c28908713",
"zh:0e0179fe12c855bcf5dbcf6858373eaa6e9dd790010096a3fcc667a23224388d",
"zh:23f6118cefb2fae443de98197490a9ba56fa51f1e324d1811709e0fdfc22ed7d",
"zh:34875cbaf07fbed8b8c639f38146f19188e57fc2eac4cdeac638b3d675b82ad4",
"zh:5b0fc4934533557af0001630801e9e637ab0e1588fd086f0cd04a52f4a13474f",
"zh:5d8eda5611ce4017688694e566c00609508a2c3a0e9aa587f6a58dcd1cb9846c",
"zh:70855ab6327a1b79b0619d0ed3538513f98fdfadae6fe60e986dbbf2891151f8",
"zh:7330d66c56a67a4c36f2fc2f1d7042503f5b4d0ec66a9bbe2b72920fb56b85de",
"zh:764597f7be92426cd63f7ae82d2845a1f2677d2b86921f19facf93fdbb80f503",
"zh:7dd947c72366377a16adc7bf0c0d09c32ade09dcedbcbf411da057ca970fb9e8",
"zh:9db57839cdc1d667271d5589ca4d9e791b665c0248e37c9ccdc79c0cef39aaed",
]
}
provider "registry.terraform.io/argoproj-labs/argocd" {
version = "7.0.2"
constraints = "7.0.2"
hashes = [
"h1:4lbS20EczuzhSNSOjp1mJoe2YbcXniBTzxmJHd+rjIE=",
"zh:083686eaeaa7b51ebaac42c3c7b01a15f020a735dc8dbe50aa6a6bff16888943",
"zh:16b1b813f33874844fadc747c57ae99cf8f119c119b3776a105c154fc4a54488",
"zh:25ed8dca5da5faa52392c7938c61dd9a83bc6388ad771062cecfc15c44bc3d8e",
"zh:3907351bbcb6a0c1c1abeb33dac5d70f798b0ecc05559f2ede40ae84b9079983",
"zh:3a737237f03b9b28de26b1fe9d20bcfa53f580489fc28d774396e5de38906fd3",
"zh:64421961cc342cec8280899352444a96ad1b09144fa933dc3a0dfb9bbae809a9",
"zh:9702119789cc42b98dc9d1a8d7666b608a964cf1355e3cf500b82bed1898f2fd",
"zh:9cc9ad41a6ce25aac40b9dd2291fc4d90a223add197155decdca7d2d82fc60f1",
"zh:a239381a36bf6041d6520c8db83fb281fd2417f4540c895e07db052dd108a72f",
"zh:ecca66064fff07719eec2ef35cd62d1cb65cf4a11f9ce96f3a9b9b7c78d614a5",
]
}
provider "registry.terraform.io/fusionauth/fusionauth" {
version = "0.1.111"
constraints = "0.1.111"
hashes = [
"h1:JQTmlej2YeAK9Sp2XI8P/1oes9V72x/BfNWbrqjyTV4=",
"zh:0d5a26688b25de6ac9bd825af2f12399337926bab09d42bc749d52f8aa1b918e",
"zh:1816f9975fbddcd43426261d78baf8aec6e78b1a3b86c4a7b4aae146a642158c",
"zh:26ac71f6f7ba246718f41d0c1d9ce9d98e1e7569304bf15343af7e5d609fc20c",
"zh:26e8888275bf88f3f8d2a5ad566ebbc8cb3e60aa9ee887d5f555f5656a370904",
"zh:3352fca64f31314dc59edd05dac8c8b9c08d167b559f2c3c5cbfd32d6245ab6d",
"zh:60ab9bae71ea5eef53a7327aee614f5556fbdd1b82ad6d1cf37fa3ec9daea8b2",
"zh:6c3ffd37f4dce5432e138db0b44a7dd473933611a2e4d80d71643277ff1fcf07",
"zh:87e506be6c9a267f776d9b9e76ce9e0c0d7e2eefdc23abd96645e75fbb82ae83",
"zh:8837a7e6b3cf8acd0cb415eb8fd4d6255e4e736083d592e41e9c9bb971993160",
"zh:961e6506c90f7e2dabaade25f143967b1f2aa47ad07cb1de5fb3e58c359f0816",
"zh:aee902445d5435baf46a7a71c925e81b36ee005b5334382b1125d98959c14a75",
"zh:b14494deb1c3efe2dc7bcde4ce29e1f65fa4a0a99ebe18d6680b54fcd9b4ce0f",
"zh:d0e20d75a0311e27bcf031f4a15fc630c212284c186b4af3a27b39a852eab07c",
"zh:d1e95e895043b61f4d686af05206a343462fc3b0cb50f88d23b667ad4fd353cc",
]
}
provider "registry.terraform.io/hashicorp/helm" {
version = "2.16.1"
hashes = [
"h1:TerRBdq69SxIWg3ET2VE0bcP0BYRIWZOp1QxXj/14Fk=",
"zh:0003f6719a32aee9afaeeb001687fc0cfc8c2d5f54861298cf1dc5711f3b4e65",
"zh:16cd5bfee09e7bb081b8b4470f31a9af508e52220fd97fd81c6dda725d9422fe",
"zh:51817de8fdc2c2e36785f23fbf4ec022111bd1cf7679498c16ad0ad7471c16db",
"zh:51b95829b2873be40a65809294bffe349e40cfccc3ff6fee0f471d01770e0ebd",
"zh:56b158dde897c47e1460181fc472c3e920aa23db40579fdc2aad333c1456d2dd",
"zh:916641d26c386959eb982e680028aa677b787687ef7c1283241e45620bc8df50",
"zh:aec15ca8605babba77b283f2ca35daca53e006d567e1c3a3daf50497035b820b",
"zh:c2cecf710b87c8f3a4d186da2ea12cf08041f97ae0c6db82649720d6ed929d65",
"zh:dbdd96f17aea25c7db2d516ab8172a5e683c6686c72a1a44173d2fe96319be39",
"zh:de11e180368434a796b1ab6f20fde7554dc74f7800e063b8e4c8ec3a86d0be63",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:f827a9c1540d210c56053a2d5d5a6abda924896ffa8eeedc94054cf6d44c5f60",
]
}
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.31.0"
constraints = "2.31.0"
hashes = [
"h1:wGHbATbv/pBVTST1MtEn0zyVhZbzZJD2NYq2EddASHY=",
"zh:0d16b861edb2c021b3e9d759b8911ce4cf6d531320e5dc9457e2ea64d8c54ecd",
"zh:1bad69ed535a5f32dec70561eb481c432273b81045d788eb8b37f2e4a322cc40",
"zh:43c58e3912fcd5bb346b5cb89f31061508a9be3ca7dd4cd8169c066203bcdfb3",
"zh:4778123da9206918a92dfa73cc711475d2b9a8275ff25c13a30513c523ac9660",
"zh:8bfa67d2db03b3bfae62beebe6fb961aee8d91b7a766efdfe4d337b33dfd23dd",
"zh:9020bb5729db59a520ade5e24984b737e65f8b81751fbbd343926f6d44d22176",
"zh:90431dbfc5b92498bfbce38f0b989978c84421a6c33245b97788a46b563fbd6e",
"zh:b71a061dda1244f6a52500e703a9524b851e7b11bbf238c17bbd282f27d51cb2",
"zh:d6232a7651b834b89591b94bf4446050119dcde740247e6083a4d55a2cefd28a",
"zh:d89fba43e699e28e2b5e92fff2f75fc03dbc8de0df9dacefe1a8836f8f430753",
"zh:ef85c0b744f5ba1b10dadc3c11e331ba4225c45bb733e024d7218c24b02b0512",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.2.3"
hashes = [
"h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=",
"zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2",
"zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d",
"zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3",
"zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f",
"zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301",
"zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670",
"zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed",
"zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65",
"zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd",
"zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.3"
hashes = [
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.6"
hashes = [
"h1:dYSb3V94K5dDMtrBRLPzBpkMTPn+3cXZ/kIJdtFL+2M=",
"zh:10de0d8af02f2e578101688fd334da3849f56ea91b0d9bd5b1f7a243417fdda8",
"zh:37fc01f8b2bc9d5b055dc3e78bfd1beb7c42cfb776a4c81106e19c8911366297",
"zh:4578ca03d1dd0b7f572d96bd03f744be24c726bfd282173d54b100fd221608bb",
"zh:6c475491d1250050765a91a493ef330adc24689e8837a0f07da5a0e1269e11c1",
"zh:81bde94d53cdababa5b376bbc6947668be4c45ab655de7aa2e8e4736dfd52509",
"zh:abdce260840b7b050c4e401d4f75c7a199fafe58a8b213947a258f75ac18b3e8",
"zh:b754cebfc5184873840f16a642a7c9ef78c34dc246a8ae29e056c79939963c7a",
"zh:c928b66086078f9917aef0eec15982f2e337914c5c4dbc31dd4741403db7eb18",
"zh:cded27bee5f24de6f2ee0cfd1df46a7f88e84aaffc2ecbf3ff7094160f193d50",
"zh:d65eb3867e8f69aaf1b8bb53bd637c99c6b649ba3db16ded50fa9a01076d1a27",
"zh:ecb0c8b528c7a619fa71852bb3fb5c151d47576c5aab2bf3af4db52588722eeb",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "4.5.0"
hashes = [
"h1:oKiQcEqj/HTCMzgGtZ531D/jnnM0i7iguSM8pU7aK8U=",
"zh:0a9301aa6a9b59db97682be568329526033bb50a4a308ad695c2a1877c1241c3",
"zh:0f8fee69ea4eaa27b86a391edc7de8e8b215e3c48f7074bab799986d5f707014",
"zh:2a2e51fe280e07700920bc8ed29b77e5c79fada0e4d5315d55ec0d2893bb5eed",
"zh:3fc7d9016bebe26a4c779ce6b87b181ed6a1af12499419726b8b0a0e3eaa7234",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:813a9e4875e58dbca2526b3088c0f76dbb2a66b10b910497a0b703518eaa73cd",
"zh:889ed6f21b94f89b8cbc4224454ced01a2792f12f53379d2fb1a2f2749bf624a",
"zh:acf9c01d403584015005083e64d8479d167e4f54e87e540311010133fcb5b023",
"zh:b377945a4b6a75c79793cb92c873aacc9c087c2a6e5792a1613f3aa2f3693848",
"zh:be243567b2a76ba2a546449e89764f707477cf25dcdd6d7f3b808ddf40aaf9f6",
"zh:d879fa16f391fb75b779067c751f3b8f80f5f4d73b2ff86814662038878a0ce4",
"zh:e47fb3daac933f5dcb034379fe77c0bf834512da7348e7643457c9af3b2ab36b",
]
}
provider "registry.terraform.io/hetznercloud/hcloud" {
version = "1.49.1"
constraints = "~> 1.45"
hashes = [
"h1:FKGRNHVbcfQJd8EWrb8Ze5QHkaGr8zI+ZKxBMjvOwPk=",
"zh:3d5f9773da4f8203cf625d04a5a0e4ff7e202684c010a801a945756140c61cde",
"zh:446305d492017cda91e5c15122ec16ff15bfe3ef4d3fd6bcea0cdf7742ab1b86",
"zh:44d4f9156ed8b4f0444bd4dc456825940be49048828565964a192286d28c9f20",
"zh:492ad893d2f89bb17c9beb877c8ceb4a16caf39db1a79030fefeada6c7aa217f",
"zh:68dc552c19ad9d209ec6018445df6e06fb77a637513a53cc66ddce1b024082be",
"zh:7492495ffda6f6c49ab38b539bd2eb965b1150a63fb6b191a27dec07d17601cb",
"zh:850fe92005981ea00db86c3e49ba5b49732fdf1f7bd5530a68f6e272847059fc",
"zh:8cb67f744c233acfb1d68a6c27686315439d944edf733b95f113b4aa63d86713",
"zh:8e13dac46e8c2497772ed1baee701b1d1c26bcc95a63b5c4566c83468f504868",
"zh:c44249c6a8ba931e208a334792686b5355ab2da465cadea03c1ea8e73c02db12",
"zh:d103125a28a85c89aea0cb0c534fe3f504416c4d4fc75c37364b9ec5f66dd77d",
"zh:ed8f64e826aa9bfca95b72892271678cb78411b40d7b404a52404141e05a4ab1",
"zh:f40efad816de00b279bd1e2cbf62c76b0e5b2da150a0764f259984b318e30945",
"zh:f5e912d0873bf4ecc43feba4ceccdf158048080c76d557e47f34749139fdd452",
]
}
provider "registry.terraform.io/rancher/rancher2" {
version = "3.0.0"
constraints = "3.0.0"
hashes = [
"h1:Qnc86BDThHGg+UqfK8Ssx7l+KcYg8wBDsMU3mCgUK6E=",
"zh:3f28e165f4e6dbfb3c6f57ea96571f907915cf9d3eaf0041054ec3c4e22cc14b",
"zh:4d71e727690d8691321c9591248599fdb38e09e27dace74da6dee16ec01351b0",
"zh:51dc86277205c7514cad0edd6e48a300a470a846a12927323b09fb1550891bcb",
"zh:5b240c5eefc5bcffcf851bd11dc913cff05a0fbf7539e966c7638894265a6297",
"zh:8f754482629b587083c1b9e0e0646a577a8defdf64d61ca12c853dd41ffbc1bb",
"zh:9a212e0dd166e2dc1ae3c13c99b07eb6f48e5ec4b6dcdca857d3f3d05b0fcabc",
"zh:a4e45342af8e9a8ab2be9a3ffd8a7df244519fade4901cc0b95328937e8b80ba",
"zh:af148901e447f97b844b5d5a81df5c7fce0432b3f0a42cb674196f0ff2ce1ded",
"zh:b11a97fc16b1fde2956906569bae890be59d444c192c560f00dca418b8184875",
"zh:b1588f6b704326ee6cf384c6d2542e4bd6f08b5324098cb6a7c126fb37112b28",
"zh:e63dd35d6f962e22561b3dd1b6fd8c23bb8154ca492a89e6b4693569974c971f",
"zh:f1eeae30b192f569f3e16061e28f1ce876a6f48eeab4c113e5f771809719090b",
]
}

View File

@ -0,0 +1,10 @@
terraform {
backend "s3" {
#encrypt = false
skip_region_validation = true
skip_metadata_api_check = true
skip_credentials_validation = true
skip_requesting_account_id = true
use_path_style = true
}
}

View File

@ -0,0 +1,6 @@
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
access_key = "T8V84SHIVT6MAV424ES0"
secret_key = "23+N28yBK+cL3O2t9xsstT8jr2TpK+SgORCVIuxc"
bucket = "management"
key = "terraform.tfstate"
region = "eu-central-1"

View File

@ -0,0 +1 @@
.terraform

View File

@ -0,0 +1,220 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/aminueza/minio" {
version = "2.5.1"
constraints = "~> 2.5.0"
hashes = [
"h1:03gfmXf78G9h9XCHwavPwAwCjg1xmQIp4e5aAv6xIbI=",
"zh:0710a1fcd8e3501237990344160b0193860c2e643e73c728bf832e3d3fde971a",
"zh:0b2f25fbb59d056299faec7fb09012ef0545bd25e7ffa55a04a5c10c28908713",
"zh:0e0179fe12c855bcf5dbcf6858373eaa6e9dd790010096a3fcc667a23224388d",
"zh:23f6118cefb2fae443de98197490a9ba56fa51f1e324d1811709e0fdfc22ed7d",
"zh:34875cbaf07fbed8b8c639f38146f19188e57fc2eac4cdeac638b3d675b82ad4",
"zh:5b0fc4934533557af0001630801e9e637ab0e1588fd086f0cd04a52f4a13474f",
"zh:5d8eda5611ce4017688694e566c00609508a2c3a0e9aa587f6a58dcd1cb9846c",
"zh:70855ab6327a1b79b0619d0ed3538513f98fdfadae6fe60e986dbbf2891151f8",
"zh:7330d66c56a67a4c36f2fc2f1d7042503f5b4d0ec66a9bbe2b72920fb56b85de",
"zh:764597f7be92426cd63f7ae82d2845a1f2677d2b86921f19facf93fdbb80f503",
"zh:7dd947c72366377a16adc7bf0c0d09c32ade09dcedbcbf411da057ca970fb9e8",
"zh:9db57839cdc1d667271d5589ca4d9e791b665c0248e37c9ccdc79c0cef39aaed",
]
}
provider "registry.terraform.io/hashicorp/helm" {
version = "2.10.1"
constraints = "2.10.1"
hashes = [
"h1:ctDhNJU4tEcyoUgPzwKuJmbDIqUl25mCY+s/lVHP6Sg=",
"zh:0717312baed39fb0a00576297241b69b419880cad8771bf72dec97ebdc96b200",
"zh:0e0e287b4e8429a0700143c8159764502eba0b33b1d094bf0d4ef4d93c7802cb",
"zh:4f74605377dab4065aaad35a2c5fa6186558c6e2e57b9058bdc8a62cf91857b9",
"zh:505f4af4dedb7a4f8f45b4201900b8e16216bdc2a01cc84fe13cdbf937570e7e",
"zh:83f37fe692513c0ce307d487248765383e00f9a84ed95f993ce0d3efdf4204d3",
"zh:840e5a84e1b5744f0211f611a2c6890da58016a40aafd5971f12285164d4e29b",
"zh:8c03d8dee292fa0367b0511cf3e95b706e034f78025f5dff0388116e1798bf47",
"zh:937800d1860f6b3adbb20e65f11e5fcd940b21ce8bdb48198630426244691325",
"zh:c1853aa5cbbdd1d46f4b169e84c3482103f0e8575a9bb044dbde908e27348c5d",
"zh:c9b0f640590da20931c30818b0b0587aa517d5606cb6e8052e4e4bf38f97b54d",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fe8bd4dd09dc7ca218959eda1ced9115408c2cdc9b4a76964bfa455f3bcadfd3",
]
}
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.34.0"
hashes = [
"h1:QOiO85qZnkUm7kAtuPkfblchuKPWUqRdNVWE5agpr8k=",
"zh:076b451dc8629c49f4260de6d43595e98ac5f1bdbebb01d112659ef94d99451f",
"zh:0c29855dbd3c6ba82fce680fa5ac969d4e09e20fecb4ed40166b778bd19895a4",
"zh:583b4dfcea4d8392dd7904c00b2ff41bbae78d238e8b72e5ad580370a24a4ecb",
"zh:5e20844d8d1af052381d00de4febd4055ad0f3c3c02795c361265b9ef72a1075",
"zh:766b7ab7c4727c62b5887c3922e0467c4cc355ba0dc3aabe465ebb86bc1caabb",
"zh:776a5000b441d7c8262d17d4a4aa4aa9760ae64de4cb7172961d9e007e0be1e5",
"zh:7838f509235116e55adeeecbe6def3da1b66dd3c4ce0de02fc7dc66a60e1d630",
"zh:931e5581ec66c145c1d29198bd23fddc8d0c5cbf4cda22e02dba65644c7842f2",
"zh:95e728efa2a31a63b879fd093507466e509e3bfc9325eb35ea3dc28fed15c6f7",
"zh:972b9e3ca2b6a1057dcf5003fc78cabb0dd8847580bddeb52d885ebd64df38ea",
"zh:ef6114217965d55f5bddbd7a316b8f85f15b8a77c075fcbed95813039d522e0a",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.4.0"
constraints = "2.4.0"
hashes = [
"h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=",
"zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9",
"zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf",
"zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35",
"zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04",
"zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406",
"zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6",
"zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7",
"zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2",
"zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc",
"zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.2.2"
constraints = "3.2.2"
hashes = [
"h1:zT1ZbegaAYHwQa+QwIFugArWikRJI9dqohj8xb0GY88=",
"zh:3248aae6a2198f3ec8394218d05bd5e42be59f43a3a7c0b71c66ec0df08b69e7",
"zh:32b1aaa1c3013d33c245493f4a65465eab9436b454d250102729321a44c8ab9a",
"zh:38eff7e470acb48f66380a73a5c7cdd76cc9b9c9ba9a7249c7991488abe22fe3",
"zh:4c2f1faee67af104f5f9e711c4574ff4d298afaa8a420680b0cb55d7bbc65606",
"zh:544b33b757c0b954dbb87db83a5ad921edd61f02f1dc86c6186a5ea86465b546",
"zh:696cf785090e1e8cf1587499516b0494f47413b43cb99877ad97f5d0de3dc539",
"zh:6e301f34757b5d265ae44467d95306d61bef5e41930be1365f5a8dcf80f59452",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:913a929070c819e59e94bb37a2a253c228f83921136ff4a7aa1a178c7cce5422",
"zh:aa9015926cd152425dbf86d1abdbc74bfe0e1ba3d26b3db35051d7b9ca9f72ae",
"zh:bb04798b016e1e1d49bcc76d62c53b56c88c63d6f2dfe38821afef17c416a0e1",
"zh:c23084e1b23577de22603cff752e59128d83cfecc2e6819edadd8cf7a10af11e",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.3"
hashes = [
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.4"
constraints = "4.0.4"
hashes = [
"h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=",
"zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
"zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
"zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
"zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
"zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
"zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
"zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
"zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
"zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
"zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
"zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "4.5.0"
hashes = [
"h1:oKiQcEqj/HTCMzgGtZ531D/jnnM0i7iguSM8pU7aK8U=",
"zh:0a9301aa6a9b59db97682be568329526033bb50a4a308ad695c2a1877c1241c3",
"zh:0f8fee69ea4eaa27b86a391edc7de8e8b215e3c48f7074bab799986d5f707014",
"zh:2a2e51fe280e07700920bc8ed29b77e5c79fada0e4d5315d55ec0d2893bb5eed",
"zh:3fc7d9016bebe26a4c779ce6b87b181ed6a1af12499419726b8b0a0e3eaa7234",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:813a9e4875e58dbca2526b3088c0f76dbb2a66b10b910497a0b703518eaa73cd",
"zh:889ed6f21b94f89b8cbc4224454ced01a2792f12f53379d2fb1a2f2749bf624a",
"zh:acf9c01d403584015005083e64d8479d167e4f54e87e540311010133fcb5b023",
"zh:b377945a4b6a75c79793cb92c873aacc9c087c2a6e5792a1613f3aa2f3693848",
"zh:be243567b2a76ba2a546449e89764f707477cf25dcdd6d7f3b808ddf40aaf9f6",
"zh:d879fa16f391fb75b779067c751f3b8f80f5f4d73b2ff86814662038878a0ce4",
"zh:e47fb3daac933f5dcb034379fe77c0bf834512da7348e7643457c9af3b2ab36b",
]
}
provider "registry.terraform.io/hetznercloud/hcloud" {
version = "1.49.1"
constraints = "~> 1.45"
hashes = [
"h1:FKGRNHVbcfQJd8EWrb8Ze5QHkaGr8zI+ZKxBMjvOwPk=",
"zh:3d5f9773da4f8203cf625d04a5a0e4ff7e202684c010a801a945756140c61cde",
"zh:446305d492017cda91e5c15122ec16ff15bfe3ef4d3fd6bcea0cdf7742ab1b86",
"zh:44d4f9156ed8b4f0444bd4dc456825940be49048828565964a192286d28c9f20",
"zh:492ad893d2f89bb17c9beb877c8ceb4a16caf39db1a79030fefeada6c7aa217f",
"zh:68dc552c19ad9d209ec6018445df6e06fb77a637513a53cc66ddce1b024082be",
"zh:7492495ffda6f6c49ab38b539bd2eb965b1150a63fb6b191a27dec07d17601cb",
"zh:850fe92005981ea00db86c3e49ba5b49732fdf1f7bd5530a68f6e272847059fc",
"zh:8cb67f744c233acfb1d68a6c27686315439d944edf733b95f113b4aa63d86713",
"zh:8e13dac46e8c2497772ed1baee701b1d1c26bcc95a63b5c4566c83468f504868",
"zh:c44249c6a8ba931e208a334792686b5355ab2da465cadea03c1ea8e73c02db12",
"zh:d103125a28a85c89aea0cb0c534fe3f504416c4d4fc75c37364b9ec5f66dd77d",
"zh:ed8f64e826aa9bfca95b72892271678cb78411b40d7b404a52404141e05a4ab1",
"zh:f40efad816de00b279bd1e2cbf62c76b0e5b2da150a0764f259984b318e30945",
"zh:f5e912d0873bf4ecc43feba4ceccdf158048080c76d557e47f34749139fdd452",
]
}
provider "registry.terraform.io/loafoe/ssh" {
version = "2.6.0"
constraints = "2.6.0"
hashes = [
"h1:80FIBNQBLj9p0j7EMQDWxzY6Fh0VODiflCww1/Wx6rk=",
"zh:1285448b69bd388a4a59ed170f9c07c641207a7291057860557937807d02da95",
"zh:2472c5dee4265cb555a627aa4ecc5702d32cd7aebe85722820df7499b7d4502a",
"zh:2a9b8f0ad446febb517a7fe38de9b02bc1bcceb90843a713c546770eff44aa84",
"zh:66e62d5bb280af7407315a62aee2ab35f8ce1b36f7400633f75f72111deede87",
"zh:832fc1213c3447fa831c1b2331cde71072c95a3f3eae04ff23dd09975d7c6577",
"zh:a4e19d6a6e776732cce70f350e8cf1954febf1e9281b4668f567636c7d0f75d8",
"zh:ac18abae233fe367f164c5a4492875a25e1c1de38a181876ffdc9f87c75abacf",
"zh:b44203b49cdef04f3e110923017a1e80c8b5588b91605e0c0985b3c2d839d6c0",
"zh:bf489e0f8ebc6f1d0d28cd6eadd871d6d63b952deaf10271765609fce417a5cf",
"zh:c41a209c6a4bf81309e573a53ad7b9e8d655bd7e81e40685214aeac92e682333",
"zh:ded134d1359bd39c2261ce1ed3bd468f8fac4fff09c07a213a3d281313d99d59",
]
}
provider "registry.terraform.io/rancher/rancher2" {
version = "3.0.0"
constraints = "3.0.0"
hashes = [
"h1:Qnc86BDThHGg+UqfK8Ssx7l+KcYg8wBDsMU3mCgUK6E=",
"zh:3f28e165f4e6dbfb3c6f57ea96571f907915cf9d3eaf0041054ec3c4e22cc14b",
"zh:4d71e727690d8691321c9591248599fdb38e09e27dace74da6dee16ec01351b0",
"zh:51dc86277205c7514cad0edd6e48a300a470a846a12927323b09fb1550891bcb",
"zh:5b240c5eefc5bcffcf851bd11dc913cff05a0fbf7539e966c7638894265a6297",
"zh:8f754482629b587083c1b9e0e0646a577a8defdf64d61ca12c853dd41ffbc1bb",
"zh:9a212e0dd166e2dc1ae3c13c99b07eb6f48e5ec4b6dcdca857d3f3d05b0fcabc",
"zh:a4e45342af8e9a8ab2be9a3ffd8a7df244519fade4901cc0b95328937e8b80ba",
"zh:af148901e447f97b844b5d5a81df5c7fce0432b3f0a42cb674196f0ff2ce1ded",
"zh:b11a97fc16b1fde2956906569bae890be59d444c192c560f00dca418b8184875",
"zh:b1588f6b704326ee6cf384c6d2542e4bd6f08b5324098cb6a7c126fb37112b28",
"zh:e63dd35d6f962e22561b3dd1b6fd8c23bb8154ca492a89e6b4693569974c971f",
"zh:f1eeae30b192f569f3e16061e28f1ce876a6f48eeab4c113e5f771809719090b",
]
}

View File

@ -0,0 +1,24 @@
module "cluster-app-365zon-bootstrap" {
source = "../../../modules/cluster/bootstrap"
wait_on = module.vault.installed
cluster = "app-365zon"
vault_server = module.vault.vault_uri
vault_root_token = module.vault.vault_root_token
minio_access_key = module.minio.minio_access_key
minio_secret_key = module.minio.minio_secret_key
minio_server = module.minio.minio_server
}
resource "vault_kv_secret_v2" "clusters" {
mount = "management"
name = "clusters"
delete_all_versions = true
data_json = jsonencode({
"app-365zon" = module.cluster-app-365zon-bootstrap.vault_token
})
depends_on = [module.vault.installed]
}

View File

@ -0,0 +1 @@
{"auths":{"ghcr.io":{"username":"thomas@fourlights.nl","password":"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","auth":"BASE64_ENCODED_USERNAME:PAT"}}}

View File

@ -0,0 +1,58 @@
public_ip4="$(ip -4 addr show dev eth0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')"
public_ip6="$(ip -6 addr show dev eth0 | grep -oP '(?<=inet6\s)[\da-f:]+')"
lan_ips4="10.0.0.0/8"
LAN="enp7s0"
WAN="eth0"
CONTAINERS="vxlan+ flannel+ cni+ cali+ docker+ podman+"
server_k3s_ports="tcp/6443"
client_k3s_ports="6443"
version 6
# use snat to outgoing ip4 address
ipv4 snat to "${public_ip4}" outface ${WAN}
# enable ipv6 basic stuff on all interfaces
ipv6 interface any v6interop proto icmpv6
client ipv6neigh accept
server ipv6neigh accept
client ipv6router accept
server ipv6router accept
client ipv6mld accept
server ipv6mld accept
policy return
# enable dhcpv6 on all interfaces
ipv6 interface any dhcpv6
server dhcpv6 accept
client dhcpv6 accept
policy return
# allow everything on lan, containers
interface "${LAN} ${CONTAINERS}" lan
policy accept
# protect from the internet
interface46 "${WAN}" internet src4 not "${UNROUTABLE_IPS}" dst4 "${public_ip4}" src6 not "${UNROUTABLE_IPS}" dst6 "${public_ip6}"
protection strong
client all accept
server ident reject with tcp-reset
server "http https" accept
server "pop3 pop3s smtp smtps imap imaps sieve" accept
server "k3s" accept
# Accept all traffic from the container interfaces towards the internet or towards other containers. Note that
# This doesn't mean all traffic is allowed, since Kubernetes will have its own rules that will beat these.
router containers inface "${LAN} ${CONTAINERS}" outface "${LAN} ${CONTAINERS} ${WAN}"
policy accept
# Connection between internet and lan
router internet2lan inface ${WAN} outface ${LAN}
client all accept # allow all outgoing traffic, but I do think this is redundant with the containers policy

View File

@ -0,0 +1,83 @@
#cloud-config
package_update: true
package_upgrade: true
packages:
- firehol
write_files:
- path: /etc/sysctl.d/99-enable-ip-forward.conf
content: net.ipv4.ip_forward = 1
- path: /etc/default/firehol
content: |
START_FIREHOL=YES
WAIT_FOR_IFACE=""
FIREHOL_ESTABLISHED_ACTIVATION_ACCEPT=0
- path: /etc/firehol/firehol.conf
content: |
public_ip4="$(ip -4 addr show dev eth0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')"
public_ip6="$(ip -6 addr show dev eth0 | grep -oP '(?<=inet6\s)[\da-f:]+')"
lan_ips4="10.0.0.0/8"
server_sieve_ports="tcp/4190"
client_sieve_ports="4190"
server_ssh_alt_ports="tcp/2223"
client_ssh_alt_ports="2223"
server_k3s_ports="tcp/6443"
client_k3s_ports="6443"
LAN="enp7s0"
WAN="eth0"
CONTAINERS="vxlan+ flannel+ cni+ cali+ docker+ podman+"
version 6
# use snat to outgoing ip4 address
ipv4 snat to "${public_ip4}" outface ${WAN}
# enable ipv6 basic stuff on all interfaces
ipv6 interface any v6interop proto icmpv6
client ipv6neigh accept
server ipv6neigh accept
client ipv6router accept
server ipv6router accept
client ipv6mld accept
server ipv6mld accept
policy return
# enable dhcpv6 on all interfaces
ipv6 interface any dhcpv6
server dhcpv6 accept
client dhcpv6 accept
policy return
# allow everything on lan, containers
interface "${LAN} ${CONTAINERS}" lan
policy accept
# protect from the internet
interface46 "${WAN}" internet src4 not "${UNROUTABLE_IPS}" dst4 "${public_ip4}" src6 not "${UNROUTABLE_IPS}" dst6 "${public_ip6}"
protection strong
client all accept
server ident reject with tcp-reset
server "ssh http https" accept
server "pop3 pop3s smtp smtps imap imaps sieve ssh_alt k3s" accept
# Accept all traffic from the container interfaces towards the internet or towards other containers. Note that
# This doesn't mean all traffic is allowed, since Kubernetes will have its own rules that will beat these.
router containers inface "${LAN} ${CONTAINERS}" outface "${LAN} ${CONTAINERS} ${WAN}"
policy accept
# Connection between internet and lan
router internet2lan inface ${WAN} outface ${LAN}
client all accept # allow all outgoing traffic, but I do think this is redundant with the containers policy
runcmd:
- systemctl enable --now firehol

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAtSrideGwYPtqZLKmYpdKBYaRp3gLKclIC2W5CsivlnGpXbC8
8O3hmHOMWbzuABKuse7jFus8GLJgPNGRdg3kihcym9xfKbus1d4FyeDfuHcfo3Tx
3BYvPITUqjfpx584t9Iv+FYfQUeRdkDKQORgTC6xwq8e3RDhi3o5dW6tsndMfOYN
S4EpJIYaTUbz3GMt3T292ItKthc948nsEE4ancex3HXwdHaIE1Gyha8/NDoJepRA
eh/VJsDiEvO7falz7U7QocUfZs0dITRP9TtwpFwuW6OI0EEW4NuKv9aIgCsZoHAx
64UIS4GkDnrZy7pk8Jbiuqz4YJb6g/urL/m1IwIDAQABAoIBAFhq+qVPpZZffdhx
fFTnw1+ZAqM6KjCVpMyzM7E4qsGA7YjlRgFMB+swkZIGbB2rAImkrj40RltDS0bD
fPuh4iUnFzMMRYGHr92dAzQ4xNcxhEZxP80LIrill2kP6s/bPIIyWekYlx50AGE3
U76B9yGXNHhwoV6A66L33u0aPDm5v2pRbPAPhmsDKEscYfZr84du+qJA3Ysi2hyZ
JtFG2nTeWzzLF91ZWYCyoTLmvL084dZKIP6jAbfFelHvGxhwb1dUHWeTYD4fulK/
4+udtP3tvM0hv/Fm3VElV+FH15l3/F5SHhcF1jEXWWB/3hZh3U8Ab54tRJCH3IIb
oE2S3pkCgYEA2NlkarAe+91bhD6OU/4dE+DUtwr1qeLuP8xUmi7xhUrYsXUOw6/7
644nX/jVBTyJGnvO7bxxlqZRNyfIhu3gdEjLXHTf5Ua9E6yEDjvAWyhGCty3tRR8
g2U2JtCT8GbaNExN99A/M3sin+geRwudQob7j5EzGRcwZJJq4Bzq0wcCgYEA1eBP
6fDFlm9bSVr7yIt94UrVv75v+OHR4LLAwoeaKpJ4CHBApcXOTMEg3OQLu4ZD14LE
2UUWjeBuf5VAyowv7blgKM54CpQ2nIdoVnEzr4MHJEwVwAj3Ma9ghJOxAcEBhBKW
S0oacwI44fFqMu1yi4C7S31Q95RokgGKPi7MOgUCgYBVwsxbAWKPm8D48UCa6VIY
rX+TEMuNr8koBCilfdFZNPu0WyZGsFACngrmguPEmWdjyPlSvodx3D8cgChnbKar
dqqu2y49YBehM0Za1rjBlJD29MnW3M5NrPaYrT8FAbhknJTtYDfOpXFRKpzm3ioI
yJeu9O7vimNCM0H/dpBMewKBgQDL9j3fqJiVzR5uR1EkQDrMtlQ/pnocDCbdtTp0
iWLxRHsp+Pr93DTQJ8GSRfwWWoWqKXBWjzdiF+zoyMpgHu66M3XLlJMP66ZlAhDz
lsMmlrsh11lKqIIspMZTyJEeThSHAj7IECRHF5BtkI+pf54kAaEb7adgY+yRiNxw
Tqq+jQKBgHEPRb1HPmKT55PPSqm/P1jVP324DrhVulQrQ0Ww0G1fjTh0qMjWmT7S
3+CAEbAb/lzLhEXUTRcOo3oc5bZn+n5lAoDsBRP1O6rhQoC66eW6+B6TH54q+HD8
GBXz1Ki49XNAbtI97d5bJGVKOxmsz3F9BNtDH5KZx3eJjBm420DM
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1KuJ14bBg+2pksqZil0oFhpGneAspyUgLZbkKyK+WcaldsLzw7eGYc4xZvO4AEq6x7uMW6zwYsmA80ZF2DeSKFzKb3F8pu6zV3gXJ4N+4dx+jdPHcFi88hNSqN+nHnzi30i/4Vh9BR5F2QMpA5GBMLrHCrx7dEOGLejl1bq2yd0x85g1LgSkkhhpNRvPcYy3dPb3Yi0q2Fz3jyewQThqdx7HcdfB0dogTUbKFrz80Ogl6lEB6H9UmwOIS87t9qXPtTtChxR9mzR0hNE/1O3CkXC5bo4jQQRbg24q/1oiAKxmgcDHrhQhLgaQOetnLumTwluK6rPhglvqD+6sv+bUj

View File

@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXpJeE9EVXdPRFF3SGhjTk1qUXhNVEl4TVRBek1USTBXaGNOTXpReE1URTVNVEF6TVRJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXpJeE9EVXdPRFF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRVG1jZVdEcTZ4dG11Q1VvKzVMbURRemRWQTI3b05obnhTVm9qYVVROW0KNmxib29pbkpLZndLODNSek81Qlp0ZjNVSHhZRmQrOWV3d0Y3SVFoVktaOG9vMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWt5cGhDU0ZjL1h6VDAxZ01FKytzCitENE5vZ0F3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnU2JKM2RiQjU4Wko1TFN2TmxQR1Y5cERoQ2FRR0JobGEKRWlWbXk3NU5rVGdDSUNvVVlVaml0bTJsakxiaHF3cVgwc05BTlBjSkxRaklncFdTdlQ4MnFmdHUKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://49.13.215.164:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJREZDNFgxc2ZPT0l3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOek15TVRnMU1EZzBNQjRYRFRJME1URXlNVEV3TXpFeU5Gb1hEVEkxTVRFeQpNVEV3TXpFeU5Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJGWFpLRzhwc3Y5a0p0aUkKUSszLzhkWXFpU1FvUW9RRDFWMHlSaWJFdDJGQjlYK0ZqNHQzMGpEeEoyZjRhVm1BdURQbFl2S0p4UE0yaUR1UwoxVFQ0T3RtalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUUFVRWJYNUZZRnBiM0QyK0QrcWJOZS9nMFk0ekFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQTRDL0I3dWVmM0RMUDAwc25EeHo2UWR6Um1HN09BYVlVZVZQWEVYakMwVDRDSUZSeUFmdmYvcW9TS0J0WQo0ZEpXNEJUL0ZUUXhCd3VDSDhVWHFTVTRURkdVCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTXpJeE9EVXdPRFF3SGhjTk1qUXhNVEl4TVRBek1USTBXaGNOTXpReE1URTVNVEF6TVRJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTXpJeE9EVXdPRFF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSZVhmUmgvdlhWOUhKNnIvVWtkYVlZckk3S2tib3BTeUVMalZCcWZoZUkKUlBxNTYxL2pPUWxUWkNLQklsdU5WWGhsYnI0b0ZoeWJFQ2k3SVRwNkpGQWxvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVUFGQkcxK1JXQmFXOXc5dmcvcW16Clh2NE5HT013Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUlaN1lYRlhVQmRLaHFUQ3hnSHBNRy9qaFE1ZmhPakEKOWlzdC9mSGNVWmJBQWlCbitXZ0VEaW1CVTRDWjh4Q3NVWTJCOVl3OGdmV3h2U2JDVE9INU9BNUVtZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBLamhPTnFGUUs3VkRkcWFuTjB4STB5S000MnRlN1NpWFd4VjdGZFg1QmFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVmRrb2J5bXkvMlFtMkloRDdmL3gxaXFKSkNoQ2hBUFZYVEpHSnNTM1lVSDFmNFdQaTNmUwpNUEVuWi9ocFdZQzRNK1ZpOG9uRTh6YUlPNUxWTlBnNjJRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=

View File

@ -0,0 +1,250 @@
locals {
# Local variables used to reduce repetition
node_username = "root"
server_dns = join(".", ["bridge", var.hostname])
}
resource "tls_private_key" "global_key" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "local_sensitive_file" "ssh_private_key_pem" {
filename = "${path.module}/id_rsa"
content = tls_private_key.global_key.private_key_pem
file_permission = "0600"
}
resource "local_file" "ssh_public_key_openssh" {
filename = "${path.module}/id_rsa.pub"
content = tls_private_key.global_key.public_key_openssh
}
resource "hcloud_network" "private" {
name = "${var.prefix}-private-network"
ip_range = var.network_cidr
}
resource "hcloud_network_route" "egress" {
network_id = hcloud_network.private.id
destination = "0.0.0.0/0"
gateway = "10.0.1.1"
}
resource "hcloud_network_subnet" "private" {
type = "cloud"
network_id = hcloud_network.private.id
network_zone = var.network_zone
ip_range = var.network_ip_range
}
# Temporary key pair used for SSH access
resource "hcloud_ssh_key" "management_ssh_key" {
name = "${var.prefix}-management-ssh-key"
public_key = tls_private_key.global_key.public_key_openssh
}
# HCloud Instance for creating a single node RKE cluster and installing the Rancher server
resource "hcloud_server" "management_server" {
name = "${var.prefix}-management-1"
image = "ubuntu-24.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.management_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
user_data = file(format("%s/files/userdata.template", path.module))
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
lifecycle {
ignore_changes = [ssh_keys, network, user_data]
}
}
module "k3s" {
source = "../../../modules/cluster/init-k3s"
node_public_ip = hcloud_server.management_server.ipv4_address
node_internal_ip = one(hcloud_server.management_server.network[*]).ip
node_username = local.node_username
ssh_private_key_pem = tls_private_key.global_key.private_key_pem
}
# install traefik
module "traefik" {
source = "../../../modules/traefik"
k8s_config_path = module.k3s.kube_config_server_yaml
}
# install cert-manager
module "cert_manager" {
source = "../../../modules/cert-manager"
wait_on = module.traefik.installed
k8s_config_path = module.k3s.kube_config_server_yaml
}
# install hetzner
module "cert_manager_hetzner" {
source = "../../../modules/cert-manager/hetzner"
wait_on = module.cert_manager.installed
k8s_config_yaml = file(module.k3s.kube_config_server_yaml)
tld = "fourlights.dev"
hetzner_api_token = var.hdns_token
}
# install letsencrypt
module "letsencrypt" {
source = "../../../modules/letsencrypt"
wait_on = module.cert_manager_hetzner.installed
k8s_config_path = module.k3s.kube_config_server_yaml
extraSolvers = [module.cert_manager_hetzner.solver]
}
module "bridge-tls" {
source = "../../../modules/cluster/tls"
wait_on = module.letsencrypt.installed
name = "bridge"
namespace = "cert-manager" # TODO: Get from cert-manager module
hosts = ["bridge.fourlights.dev", "*.bridge.fourlights.dev"]
k8s_config_yaml = file(module.k3s.kube_config_server_yaml)
}
# install rancher
module "rancher" {
source = "../../../modules/rancher"
wait_on = module.bridge-tls.installed
k8s_config_path = module.k3s.kube_config_server_yaml
server_dns = local.server_dns
}
# install minio
module "minio" {
source = "../../../modules/minio"
wait_on = module.rancher.installed
k8s_config_yaml = file(module.k3s.kube_config_server_yaml)
server_dns = local.server_dns
service_name = "storage"
namespace = "minio"
admin = true
tls = true
ingressClass = "traefik"
}
# install vault
module "vault" {
source = "../../../modules/vault"
wait_on = module.rancher.installed
k8s_config_path = module.k3s.kube_config_server_yaml
server_dns = local.server_dns
service_name = "vault"
namespace = "vault"
aws = {
access_key_id = var.aws_access_key_id
secret_access_key = var.aws_secret_access_key
kms_key_id = var.aws_kms_key_id
region = var.aws_region
}
ingress = {
enabled = true
tls = true
className = "traefik"
annotations = {
"kubernetes.io/ingress.class" : "traefik"
"cert-manager.io/cluster-issuer" = "letsencrypt"
"traefik.ingress.kubernetes.io/router.entrypoints" = "web,websecure"
"traefik.ingress.kubernetes.io/router.middlewares" = "default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd"
}
}
}
# bootstrap
module "cluster-bootstrap" {
source = "../../../modules/cluster/bootstrap"
cluster = "management"
wait_on = module.vault.installed
vault_server = module.vault.vault_uri
vault_root_token = module.vault.vault_root_token
minio_server = module.minio.minio_server
minio_access_key = module.minio.minio_access_key
minio_secret_key = module.minio.minio_secret_key
}
# management
module "cluster-management" {
source = "../../../modules/cluster/management"
cluster = "management"
minio_server = module.minio.minio_server
minio_access_key = module.cluster-bootstrap.minio_access_key
minio_secret_key = module.cluster-bootstrap.minio_secret_key
vault_server = module.vault.vault_uri
vault_token = module.cluster-bootstrap.vault_token
k8s_config_yaml = file(module.k3s.kube_config_server_yaml)
ssh_private_key = local_sensitive_file.ssh_private_key_pem.content
ssh_public_key = local_file.ssh_public_key_openssh.content
hcloud_token = var.hcloud_token
hcloud_network_id = hcloud_network.private.id
vault_secret_path = module.vault.vault_local_file
}
resource "vault_kv_secret_v2" "rancher" {
mount = "management"
name = "rancher"
delete_all_versions = true
data_json = jsonencode({
token = module.rancher.rancher_server_admin_token
})
depends_on = [module.vault.installed]
}
# install mijn 365zon
module "mijn_365zon" {
source = "../../../modules/mijn-365zon-nl"
wait_on = module.rancher.installed
k8s_config_path = module.k3s.kube_config_server_yaml
}

View File

@ -0,0 +1,42 @@
output "node_ip" {
value = hcloud_server.management_server.ipv4_address
}
output "hcloud_network_id" {
value = hcloud_network.private.id
}
output "hcloud_token" {
value = var.hcloud_token
sensitive = true
}
output "rancher_admin_password" {
value = module.rancher.rancher_server_admin_password
sensitive = true
}
output "rancher_uri" {
value = module.rancher.rancher_uri
}
output "minio_server" {
value = module.minio.minio_server
}
output "minio_uri" {
value = module.minio.minio_api_uri
}
output "vault_uri" {
value = module.vault.vault_uri
}
output "vault_token" {
value = module.cluster-bootstrap.vault_token
sensitive = true
}
output "cluster" {
value = "management"
}

View File

@ -0,0 +1,30 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "~> 1.45"
}
local = {
source = "hashicorp/local"
version = "2.4.0"
}
tls = {
source = "hashicorp/tls"
version = "4.0.4"
}
helm = {
source = "hashicorp/helm"
version = "2.10.1"
}
}
required_version = ">= 1.0.0"
}
provider "hcloud" {
token = var.hcloud_token
}
provider "vault" {
address = module.vault.vault_uri
token = module.cluster-bootstrap.vault_token
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,29 @@
hostname = "fourlights.dev"
# Hetzner Cloud API token used to create infrastructure
hcloud_token = "0sjklQ2TwTUheEbJsJKFjXppCDQBeBY4gdnLZMpTHVEedTtNzOr8HAsbk4jZO4UX"
# Hetzner location used for all resources: fns1, nbg1, hel1
hcloud_location = "fsn1"
# Type of instance to be used for all instances: CPX21, CAX21, CCX23
instance_type = "cx32"
# Network to create for private communication
network_cidr = "10.0.0.0/8"
# Subnet to create for private communication. Must be part of the CIDR defined in `network_cidr`.
network_ip_range = "10.0.1.0/24"
# Zone to create the network in
network_zone = "eu-central"
# Prefix added to names of all resources
prefix = "fourlights"
aws_region = "eu-central-1"
aws_kms_key_id = "8ab3fc07-23ba-434d-ac42-0413c360e011"
aws_access_key_id = "AKIAZ5TC5IUTGTK22BQO"
aws_secret_access_key = "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"
hdns_token = "M1ZnS462983FPSyJNGoMTawI6wgQZ3Ej"

View File

@ -0,0 +1,73 @@
variable "hostname" {
type = string
default = "fourlights.dev"
}
# Variables for Hetzner Cloud infrastructure module
variable "hcloud_token" {
type = string
description = "Hetzner Cloud API token used to create infrastructure"
}
variable "hcloud_location" {
type = string
description = "Hetzner location used for all resources"
default = "fsn1"
}
variable "hdns_token" {
type = string
description = "Hetzner DNS API token used to create DNS records"
}
variable "prefix" {
type = string
description = "Prefix added to names of all resources"
default = "quickstart"
}
variable "network_cidr" {
type = string
description = "Network to create for private communication"
default = "10.0.0.0/8"
}
variable "network_ip_range" {
type = string
description = "Subnet to create for private communication. Must be part of the CIDR defined in `network_cidr`."
default = "10.0.1.0/24"
}
variable "network_zone" {
type = string
description = "Zone to create the network in"
default = "eu-central"
}
variable "instance_type" {
type = string
description = "Type of instance to be used for all instances"
default = "cx32"
}
variable "aws_access_key_id" {
description = "AWS Access Key ID for Vault KMS access"
type = string
}
variable "aws_secret_access_key" {
description = "AWS Secret Access Key for Vault KMS access"
type = string
}
variable "aws_kms_key_id" {
description = "AWS KMS Key ID for Vault KMS access"
type = string
}
variable "aws_region" {
description = "AWS KMS Region for Vault KMS access"
type = string
default = "eu-central-1"
}

View File

@ -0,0 +1,23 @@
{
"unseal_keys_b64": [],
"unseal_keys_hex": [],
"unseal_shares": 1,
"unseal_threshold": 1,
"recovery_keys_b64": [
"yiOv4g3sW427xQvkirhjq8ixxzdXJxAGbEMLKKHpr76o",
"rJoWisrrK+YU9wJFTW5m1hp/NQL5Y/MR6ZhUY1W4mVDT",
"XK2MXdEo/9caLEbEH0boip96PXuTA56r95EP47C7oEkr",
"GfhfasWebTu1FYzIw4nMeb0NvbJxcVGW5lsXZMd+L9Pn",
"Ao6ublyrABJFKIah6Z7PlZqMfAFQqvkDLRlHp6eMZj8Z"
],
"recovery_keys_hex": [
"ca23afe20dec5b8dbbc50be48ab863abc8b1c737572710066c430b28a1e9afbea8",
"ac9a168acaeb2be614f702454d6e66d61a7f3502f963f311e998546355b89950d3",
"5cad8c5dd128ffd71a2c46c41f46e88a9f7a3d7b93039eabf7910fe3b0bba0492b",
"19f85f6ac59e6d3bb5158cc8c389cc79bd0dbdb271715196e65b1764c77e2fd3e7",
"028eae6e5cab0012452886a1e99ecf959a8c7c0150aaf9032d1947a7a78c663f19"
],
"recovery_keys_shares": 5,
"recovery_keys_threshold": 3,
"root_token": "hvs.BsIbdvXLdbQn0v4sR3jSWJa9"
}

View File

@ -0,0 +1,31 @@
module "cluster-app-365zon" {
source = "../../modules/cluster"
wait_on = module.argocd.installed
prefix = "app"
name = "365zon"
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
hcloud_network_id = data.vault_kv_secret_v2.hcloud.data["network_id"]
hcloud_token = data.vault_kv_secret_v2.hcloud.data["token"]
minio_server = var.minio_server
vault_server = var.vault_addr
vault_token = data.vault_kv_secret_v2.clusters.data["app-365zon"]
rancher_admin_token = data.vault_kv_secret_v2.rancher.data["token"]
rancher_server_uri = var.rancher_server
hosts = [
"365zon.fourlights.dev",
"*.365zon.fourlights.dev",
]
node_instance_type = "cx32"
}
output "cluster-app-365zon-vault-token" {
value = data.vault_kv_secret_v2.clusters.data["app-365zon"]
sensitive = true
}

View File

@ -0,0 +1,124 @@
locals {
server_dns = "bridge.fourlights.dev"
}
module "postgresql" {
source = "../../modules/postgresql"
namespace = "postgresql"
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
username = "bridge"
}
module "redis" {
source = "../../modules/redis"
wait_on = true
namespace = "redis"
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
}
module "fusionauth-db" {
source = "../../modules/postgresql/tenant"
wait_on = module.postgresql.installed
name = "fusionauth"
root_password = module.postgresql.root_password
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
}
resource "null_resource" "fusionauth-wait" {
depends_on = [module.fusionauth-db.installed, module.redis.installed]
}
module "fusionauth" {
source = "../../modules/fusionauth"
wait_on = null_resource.fusionauth-wait.id
namespace = "fusionauth"
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
server_dns = local.server_dns
database_password = module.fusionauth-db.password
}
module "fusionauth-tenant-devops" {
source = "../../modules/fusionauth/tenant"
wait_on = module.fusionauth.installed
fusionauth_api_key = module.fusionauth.api_key
fusionauth_uri = module.fusionauth.uri
tenant_name = "devops"
theme_id = "cafafa30-c8de-40f1-b666-12d3fe361a0a"
}
module "fusionauth-application-argocd" {
source = "../../modules/fusionauth/application"
wait_on = module.fusionauth-tenant-devops.installed
fusionauth_api_key = module.fusionauth.api_key
fusionauth_uri = module.fusionauth.uri
tenant_id = module.fusionauth-tenant-devops.tenant_id
rbac_lambda_id = module.fusionauth-tenant-devops.rbac_lambda_id
oauth_redirect_uri = "https://argocd.${local.server_dns}/api/dex/callback"
name = "ArgoCD"
}
output "fusionauth-admin-password" {
value = module.fusionauth.admin_password
sensitive = true
}
output "fusionauth-api-key" {
value = module.fusionauth.api_key
sensitive = true
}
resource "null_resource" "argocd_wait" {
depends_on = [module.fusionauth-application-argocd.installed, module.redis.installed]
}
module "argocd" {
source = "../../modules/argocd"
wait_on = null_resource.argocd_wait.id
namespace = "argocd"
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
redis_db_start_index = 0
redis_password = module.redis.password
server_dns = local.server_dns
oauth_uri = module.fusionauth.server
oauth_client_id = module.fusionauth-application-argocd.client_id
oauth_client_secret = module.fusionauth-application-argocd.client_secret
oauth_redirect_uri = module.fusionauth-application-argocd.redirect_uri
oauth_issuer = module.fusionauth-tenant-devops.issuer
}
output "argocd-root-password" {
value = module.argocd.admin_password
sensitive = true
}
# we need to add more applications here unfortunately
module "fusionauth-google" {
source = "../../modules/fusionauth/identity-provider/google"
wait_on = module.fusionauth.installed
fusionauth_uri = module.fusionauth.uri
fusionauth_api_key = module.fusionauth.api_key
google_client_id = "783390190667-0nkts50perpmhott4i7ro1ob5n7koi5i.apps.googleusercontent.com"
google_client_secret = "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"
applications = [
{
id = module.fusionauth-application-argocd.application_id,
create_registration = true,
enabled = true
}
]
}

View File

@ -0,0 +1,55 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
hcloud = {
source = "hetznercloud/hcloud"
version = "~> 1.45"
}
}
}
provider "minio" {
minio_server = var.minio_server
minio_region = var.region
minio_user = var.access_key
minio_password = var.secret_key
minio_ssl = true
}
data "minio_s3_object" "k8s_yaml" {
bucket_name = var.bucket
object_name = "kube_config.yaml"
}
data "minio_s3_object" "id_rsa" {
bucket_name = var.bucket
object_name = "id_rsa"
}
data "minio_s3_object" "id_rsa_pub" {
bucket_name = var.bucket
object_name = "id_rsa.pub"
}
provider "vault" {
address = var.vault_addr
token = var.vault_token
}
data "vault_kv_secret_v2" "hcloud" {
mount = var.bucket
name = "hcloud"
}
data "vault_kv_secret_v2" "clusters" {
mount = var.bucket
name = "clusters"
}
data "vault_kv_secret_v2" "rancher" {
mount = var.bucket
name = "rancher"
}

View File

@ -0,0 +1,7 @@
After the management cluster has been boostrapped (see `bootstrap` directory), the following steps are:
(From the `infra/clusters/management` directory)
1. `./scripts/generate-tf-backend-config.sh`
2. `terraform init -backend-config=backend.tfvars -reconfigure`
3. `terraform apply`

View File

@ -0,0 +1,34 @@
#!/bin/bash
CLUSTER=$(cd bootstrap && terraform output -raw cluster)
VAULT_TOKEN=$(cd bootstrap && terraform output -raw vault_token)
VAULT_ADDR=$(cd bootstrap && terraform output -raw vault_uri)
MINIO_ADDR=$(cd bootstrap && terraform output -raw minio_uri)
MINIO_SERVER=$(cd bootstrap && terraform output -raw minio_server)
RANCHER_SERVER=$(cd bootstrap && terraform output -raw rancher_uri)
VAULT_ADDR="$VAULT_ADDR" VAULT_TOKEN="$VAULT_TOKEN" vault token renew
MINIO_ACCESS_KEY=$(VAULT_TOKEN="$VAULT_TOKEN" VAULT_ADDR="$VAULT_ADDR" vault kv get -mount="$CLUSTER" -field="access_key" "minio")
MINIO_SECRET_KEY=$(VAULT_TOKEN="$VAULT_TOKEN" VAULT_ADDR="$VAULT_ADDR" vault kv get -mount="$CLUSTER" -field="secret_key" "minio")
cat << EOF > backend.tfvars
endpoints = { s3 = "${MINIO_ADDR}" }
access_key = "${MINIO_ACCESS_KEY}"
secret_key = "${MINIO_SECRET_KEY}"
bucket = "${CLUSTER}"
key = "terraform.tfstate"
region = "eu-central-1"
EOF
cat << EOF > terraform.tfvars
endpoints = { s3 = "${MINIO_ADDR}" }
access_key = "${MINIO_ACCESS_KEY}"
secret_key = "${MINIO_SECRET_KEY}"
bucket = "${CLUSTER}"
region = "eu-central-1"
minio_server = "${MINIO_SERVER}"
rancher_server = "${RANCHER_SERVER}"
vault_token = "${VAULT_TOKEN}"
vault_addr = "${VAULT_ADDR}"
EOF

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
access_key = "T8V84SHIVT6MAV424ES0"
secret_key = "23+N28yBK+cL3O2t9xsstT8jr2TpK+SgORCVIuxc"
bucket = "management"
region = "eu-central-1"
minio_server = "storage.bridge.fourlights.dev"
rancher_server = "https://rancher.bridge.fourlights.dev"
vault_token = "hvs.CAESIPcy0DY5Jc-d0P2ZRRhiLXr3DmOOawpoA6--QTCoRCqqGh4KHGh2cy5lZjhJdTRINEVKaU55Q21VUTg4ZzZwSWI"
vault_addr = "https://vault.bridge.fourlights.dev"

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,38 @@
variable "endpoints" {
type = map(string)
}
variable "access_key" {
type = string
sensitive = true
}
variable "secret_key" {
type = string
sensitive = true
}
variable "bucket" {
type = string
}
variable "region" {
type = string
}
variable "minio_server" {
type = string
}
variable "vault_addr" {
type = string
}
variable "vault_token" {
type = string
sensitive = true
}
variable "rancher_server" {
type = string
}

View File

@ -0,0 +1,4 @@
locals {
service_uri = join(".", [var.service_name, var.server_dns])
grpc_service_uri = join(".", ["${var.service_name}-grpc", var.server_dns])
}

View File

@ -0,0 +1,78 @@
resource "random_password" "admin_password" {
length = 48
special = true
}
data "kubernetes_secret" "bridge-tls" {
metadata {
name = "bridge-tls"
namespace = "cert-manager"
}
}
resource "kubernetes_namespace" "argocd" {
metadata {
name = var.namespace
}
lifecycle {
ignore_changes = [metadata]
}
}
resource "kubernetes_secret" "argocd-tls" {
metadata {
name = "argocd-tls"
namespace = kubernetes_namespace.argocd.metadata[0].name
}
data = data.kubernetes_secret.bridge-tls.data
type = data.kubernetes_secret.bridge-tls.type
}
resource "helm_release" "argocd" {
depends_on = [var.wait_on, kubernetes_secret.argocd-tls]
name = "argocd"
repository = "https://charts.bitnami.com/bitnami"
chart = "argo-cd"
namespace = kubernetes_namespace.argocd.metadata[0].name
version = "7.0.20"
create_namespace = false
wait = true
wait_for_jobs = true
set_sensitive {
name = "config.secret.argocdServerAdminPassword"
value = random_password.admin_password.result
}
values = [
templatefile("${path.module}/values.yaml", {
service_uri = local.service_uri,
server_dns = var.server_dns,
grpc_service_uri = local.grpc_service_uri,
redis_index = var.redis_db_start_index,
redis_password = var.redis_password,
redis_service_uri = "redis-headless.redis.svc.cluster.local",
oauth_uri = var.oauth_uri,
oauth_issuer = var.oauth_issuer,
oauth_client_id = var.oauth_client_id,
oauth_client_secret = var.oauth_client_secret,
oauth_redirect_uri = var.oauth_redirect_uri
})
]
}
output "installed" {
value = true
depends_on = [helm_release.argocd]
}
output "admin_password" {
value = random_password.admin_password.result
sensitive = true
}
output "redis_db_next_start_index" {
value = var.redis_db_start_index + 1
}

View File

@ -0,0 +1,3 @@
resource "argocd_project" "project" {
}

View File

@ -0,0 +1,14 @@
terraform {
required_providers {
argocd = {
source = "oboukili/argocd"
version = "6.2.0"
}
}
}
provider "argocd" {
server_addr = var.server
username = var.username
password = var.password
}

View File

@ -0,0 +1,12 @@
variable "server" {
type = string
}
variable "password" {
type = string
}
variable "username" {
type = string
default = "admin"
}

View File

@ -0,0 +1,49 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}
terraform {
required_providers {
argocd = {
source = "argoproj-labs/argocd"
version = "7.0.2"
}
}
}
provider "argocd" {
server_addr = local.service_uri
username = "admin"
password = random_password.admin_password.result
}

View File

@ -0,0 +1,49 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}
terraform {
required_providers {
argocd = {
source = "argoproj-labs/argocd"
version = "7.0.2"
}
}
}
provider "argocd" {
server_addr = local.service_uri
username = "admin"
password = random_password.admin_password.result
}

View File

@ -0,0 +1,27 @@
variable "k8s_config_yaml" {
description = "Content of k8s config yaml file"
type = string
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "namespace" {
type = string
}
variable "uri" {
type = string
}
variable "name" {
type = string
}
variable "private-key" {
type = string
sensitive = true
}

View File

@ -0,0 +1,74 @@
commonEnvVars: &commonEnvVars
- name: REDIS_USERNAME
value: ""
- name: REDIS_PASSWORD
value: ${ redis_password }
commonArgs: &commonRedisArgs
- --redis=${ redis_service_uri }:6379
- --redisdb=${ redis_index }
redis:
enabled: false
redisWait:
enabled: false
externalRedis:
host: ${ redis_service_uri }
password: ${ redis_password }
database: ${ redis_index }
dex:
enabled: true
controller:
extraArgs: *commonRedisArgs
extraEnvVars: *commonEnvVars
repoServer:
extraArgs: *commonRedisArgs
extraEnvVars: *commonEnvVars
server:
extraArgs: *commonRedisArgs
extraEnvVars: *commonEnvVars
url: https://${ service_uri }
insecure: true
ingress:
enabled: true
ingressClassName: traefik
hostname: ${ service_uri }
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd
extraTls:
- hosts:
- ${ service_uri }
secretName: argocd-tls
config:
%{ if oauth_uri != null }
dex.config: |
connectors:
- type: oidc
id: oidc
name: OIDC
config:
issuer: ${ oauth_issuer }
clientID: ${ oauth_client_id }
clientSecret: ${ oauth_client_secret }
insecureSkipEmailVerified: true
insecureEnableGroups: true
scopes:
- profile
- email
- openid
- groups
claimMapping:
name: fullName # ArgoCD expects 'name', FusionAuth provides 'fullName'
preferred_username: email
%{ endif }

View File

@ -0,0 +1,66 @@
variable "service_name" {
type = string
description = "Name of the service"
default = "argocd"
}
variable "server_dns" {
type = string
description = "Domain for the server"
}
variable "k8s_config_yaml" {
description = "Content of k8s config yaml file"
type = string
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "namespace" {
type = string
}
variable "redis_db_start_index" {
type = number
description = "Start index for redis db"
default = 0
}
variable "redis_password" {
type = string
sensitive = true
}
variable "oauth_uri" {
type = string
description = "OAuth URI"
default = null
}
variable "oauth_issuer" {
type = string
description = "OAuth issuer"
default = null
}
variable "oauth_client_id" {
type = string
description = "OAuth client ID"
default = null
}
variable "oauth_client_secret" {
type = string
description = "OAuth client secret"
default = null
}
variable "oauth_redirect_uri" {
type = string
description = "OAuth redirect URI"
default = null
}

View File

@ -0,0 +1,46 @@
resource "helm_release" "cert-manager-webhook-hetzner" {
namespace = var.namespace
name = "cert-manager-webhook-hetzner"
repository = "https://vadimkim.github.io/cert-manager-webhook-hetzner"
chart = "cert-manager-webhook-hetzner"
set {
name = "groupName"
value = "acme.${var.tld}"
}
}
resource "kubernetes_secret" "hetzner-secret" {
type = "Opaque"
depends_on = [var.wait_on]
metadata {
name = "hetzner-secret"
namespace = var.namespace
}
data = {
"api-key" = var.hetzner_api_token
}
}
output "solver" {
value = {
dns01 = {
webhook = {
groupName = "acme.${var.tld}"
solverName = "hetzner"
config = {
secretName = kubernetes_secret.hetzner-secret.metadata[0].name
apiUrl = "https://dns.hetzner.com/api/v1"
}
}
}
}
}
output "installed" {
value = true
depends_on = [helm_release.cert-manager-webhook-hetzner]
}

View File

@ -0,0 +1,33 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}

View File

@ -0,0 +1,25 @@
variable "wait_on" {
type = any
description = "Resource to wait on"
default = true
}
variable "namespace" {
type = string
default = "cert-manager"
}
variable "k8s_config_yaml" {
description = "Content of the k8s config yaml file"
type = string
}
variable "tld" {
description = "Top level domain"
type = string
}
variable "hetzner_api_token" {
sensitive = true
type = string
}

View File

@ -0,0 +1,27 @@
resource "helm_release" "cert_manager" {
name = "cert-manager"
chart = "https://charts.jetstack.io/charts/cert-manager-v${var.chart_version}.tgz"
namespace = var.namespace
create_namespace = true
wait = true
set {
name = "installCRDs"
value = "true"
}
set_list {
name = "dnsConfig.nameservers"
value = ["1.1.1.1", "8.8.8.8"]
}
set {
name = "email"
value = var.email
}
}
output "installed" {
value = true
depends_on = [helm_release.cert_manager]
}

View File

@ -0,0 +1,33 @@
locals {
k8s_config = yamldecode(file(var.k8s_config_path))
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}

View File

@ -0,0 +1,26 @@
variable "chart_version" {
type = string
default = "1.11.0"
}
variable "email" {
type = string
description = "Email address to use for cert-manager"
default = "engineering@fourlights.nl"
}
variable "wait_on" {
type = any
description = "Resource to wait on"
default = true
}
variable "namespace" {
type = string
default = "cert-manager"
}
variable "k8s_config_path" {
description = "Path to the k8s config yaml file"
type = string
}

View File

@ -0,0 +1,134 @@
resource "vault_mount" "cluster" {
depends_on = [var.wait_on]
path = var.cluster
type = "kv"
options = { version = "2" }
description = "KV Version 2 secret engine mount for ${var.cluster}"
}
resource "vault_policy" "cluster" {
name = var.cluster
depends_on = [var.wait_on]
policy = <<EOT
path "${var.cluster}/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
path "auth/token/create" {
capabilities = ["create", "update", "sudo"]
}
path "auth/token/lookup-self" {
capabilities = ["read"]
}
path "auth/token/renew-self" {
capabilities = ["update"]
}
# Add other necessary permissions
EOT
}
resource "vault_token" "cluster" {
policies = [vault_policy.cluster.name]
renewable = true
ttl = "365d"
period = "30d"
no_parent = true
}
resource "minio_s3_bucket" "cluster" {
bucket = var.cluster
acl = "private"
}
# TODO: Enable encryption and versioning on the bucket
# resource "minio_s3_bucket_server_side_encryption" "encryption" {
# bucket = minio_s3_bucket.management.bucket
# encryption_type = "aws:kms"
# kms_key_id = var.aws_kms_key_id
# }
resource "minio_iam_user" "cluster" {
name = var.cluster
}
resource "minio_iam_policy" "cluster" {
name = minio_s3_bucket.cluster.bucket
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::${var.cluster}"]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["arn:aws:s3:::${var.cluster}/*"]
}
]
})
}
resource "minio_iam_user_policy_attachment" "cluster" {
user_name = minio_iam_user.cluster.id
policy_name = minio_iam_policy.cluster.id
}
resource "minio_iam_service_account" "cluster" {
target_user = minio_iam_user.cluster.name
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::${var.cluster}"]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["arn:aws:s3:::${var.cluster}/*"]
}
]
})
}
resource "vault_kv_secret_v2" "cluster" {
mount = var.cluster
name = "minio"
delete_all_versions = true
data_json = jsonencode({
access_key = minio_iam_service_account.cluster.access_key
secret_key = minio_iam_service_account.cluster.secret_key
})
depends_on = [
var.wait_on,
minio_iam_service_account.cluster
]
}
output "vault_token" {
value = vault_token.cluster.client_token
sensitive = true
}
output "minio_access_key" {
value = minio_iam_service_account.cluster.access_key
}
output "minio_secret_key" {
value = minio_iam_service_account.cluster.secret_key
sensitive = true
}

View File

@ -0,0 +1,21 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
}
}
provider "minio" {
minio_server = var.minio_server
minio_region = "not-used"
minio_user = var.minio_access_key
minio_password = var.minio_secret_key
minio_ssl = true
}
provider "vault" {
address = var.vault_server
token = var.vault_root_token
}

View File

@ -0,0 +1,30 @@
variable "cluster" {
description = "Name or ID of the cluster"
type = string
}
variable "vault_server" {
type = string
}
variable "vault_root_token" {
type = string
}
variable "minio_server" {
type = string
}
variable "minio_access_key" {
type = string
}
variable "minio_secret_key" {
type = string
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}

View File

@ -0,0 +1,47 @@
locals {
cluster_config = yamldecode(data.minio_s3_object.k8s_yaml.content)
cluster_server = local.cluster_config.clusters[0].cluster.server
cluster_auth = try(
{
token = local.cluster_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.cluster_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.cluster_config.users[0].user["client-key-data"])
using_token = false
}
)
}
resource "kubernetes_secret" "app-365zon-cluster" {
depends_on = [var.wait_on]
metadata {
name = "app-365zon-cluster-secret"
namespace = "argocd"
labels = {
"argocd.argoproj.io/secret-type" = "cluster"
}
}
type = "Opaque"
data = {
name = "app-365zon"
server = local.cluster_server
config = local.cluster_auth.using_token ? jsonencode({
"bearerToken" = local.cluster_auth.token
}) : jsonencode({
"tlsClientConfig" = {
"insecure" = true
"certData" = base64encode(local.cluster_auth.client_certificate)
"keyData" = base64encode(local.cluster_auth.client_key)
}
})
}
}
output "installed" {
value = true
depends_on = [kubernetes_secret.app-365zon-cluster]
}

View File

@ -0,0 +1,55 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
}
}
provider "minio" {
minio_server = var.minio_server
minio_region = "not-used"
minio_user = var.minio_access_key
minio_password = var.minio_secret_key
minio_ssl = true
}
data "minio_s3_object" "k8s_yaml" {
bucket_name = var.cluster
object_name = "kube_config.yaml"
}

View File

@ -0,0 +1,28 @@
variable "k8s_config_yaml" {
description = "Content of k8s config yaml file"
type = string
}
variable "cluster" {
description = "Name or ID of the cluster"
type = string
}
variable "minio_server" {
type = string
}
variable "minio_access_key" {
type = string
}
variable "minio_secret_key" {
type = string
sensitive = true
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}

View File

@ -0,0 +1,48 @@
#cloud-config
write_files:
- path: /etc/systemd/network/10-eth0.network
content: |
[Match]
Name=enp7s0
[Network]
DHCP=yes
Gateway=10.0.0.1
DNS=185.12.64.2
DNS=185.12.64.1
- path: /etc/netplan/50-cloud-init.yaml
content: |
network:
version: 2
ethernets:
enp7s0:
dhcp4: true
routes:
- to: default
via: 10.0.0.1
nameservers:
addresses:
- 185.12.64.2
- 185.12.64.1
runcmd:
- |
echo "Restarting network"
systemctl restart systemd-networkd
echo "Checking if we can reach the internet"
curl -Is https://google.com | head -n 1 | grep "200 OK" > /dev/null && echo "Internet access is up." || echo "Internet access is down."
echo "Updating package cache"
apt-get update -y
echo "Upgrading packages"
apt-get upgrade -y
echo "Registering node with kubeadm"
${register_command} --etcd --controlplane --worker
users:
- name: thomas
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
passwd: $6$gPaN6lngIIrrjMb1$wDrBxIDpGyJftsIQ5slQrEwXPxDOLXrFs2hGz640fyZLt1vSjtH8HPY6H4WwWpLKsKUEWCl/2hz4N57m98ESZ1
lock_passwd: false

View File

@ -0,0 +1,29 @@
resource "hcloud_server" "node" {
name = "${local.cluster_name}-${count.index + 1}"
image = var.node_image
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [var.ssh_key_id]
count = var.node_count
network {
network_id = var.hcloud_network_id
}
public_net {
ipv4_enabled = false
ipv6_enabled = false
}
user_data = templatefile(
format("%s/files/userdata_node.template", path.module),
{
username = local.node_username
register_command = var.cluster_registration_command
}
)
lifecycle {
ignore_changes = [ssh_keys, network]
}
}

View File

@ -0,0 +1,3 @@
output "ips" {
value = [for node in hcloud_server.node : [for ip in node.network : ip][0].ip]
}

View File

@ -0,0 +1,13 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "~> 1.45"
}
}
}
provider "hcloud" {
token = var.hcloud_token
}

View File

@ -0,0 +1,58 @@
variable "hcloud_token" {
type = string
description = "Hetzner Cloud API token used to create infrastructure"
}
variable "node_count" {
type = number
default = 1
}
variable "hcloud_location" {
type = string
description = "Hetzner location used for all resources"
default = "fsn1"
}
variable "hcloud_network_id" {
type = string
description = "ID of the network to use for all resources"
}
variable "ssh_key_id" {
type = string
description = "ID of the ssh key for this node"
}
variable "instance_type" {
type = string
description = "Type of instance to be used for all instances"
default = "cpx21"
}
variable "prefix" {
type = string
description = "Prefix added to names of all resources"
}
variable "name" {
type = string
description = "Name of the node"
}
variable "cluster_registration_command" {
type = string
description = "Command to register the node with the cluster"
}
variable "node_image" {
type = string
description = "Image to use for the node"
default = "ubuntu-22.04"
}
# Local variables used to reduce repetition
locals {
node_username = "root"
cluster_name = "${var.prefix}-${var.name}"
}

View File

@ -0,0 +1,47 @@
terraform {
required_providers {
local = {
source = "hashicorp/local"
version = "2.4.0"
}
ssh = {
source = "loafoe/ssh"
version = "2.6.0"
}
}
required_version = ">= 1.0.0"
}
resource "ssh_resource" "install_k3s" {
host = var.node_public_ip
commands = [
"bash -c 'curl https://get.k3s.io | INSTALL_K3S_EXEC=\"server --disable=traefik --node-external-ip ${var.node_public_ip} --node-ip ${var.node_internal_ip}\" INSTALL_K3S_VERSION=${var.rancher_kubernetes_version} sh -'"
]
user = var.node_username
private_key = var.ssh_private_key_pem
}
resource "ssh_resource" "retrieve_config" {
depends_on = [
ssh_resource.install_k3s
]
host = var.node_public_ip
commands = [
"sudo sed \"s/127.0.0.1/${var.node_public_ip}/g\" /etc/rancher/k3s/k3s.yaml"
]
user = var.node_username
private_key = var.ssh_private_key_pem
}
resource "local_file" "kube_config_server_yaml" {
filename = format("%s/%s", path.root, "kube_config_server.yaml")
content = ssh_resource.retrieve_config.result
}
output "kube_config_server_yaml" {
value = local_file.kube_config_server_yaml.filename
}
output "installed" {
value = true
}

View File

@ -0,0 +1,26 @@
variable "node_public_ip" {
type = string
description = "Public IP of compute node for Rancher cluster"
}
variable "node_internal_ip" {
type = string
description = "Internal IP of compute node for Rancher cluster"
default = ""
}
variable "node_username" {
type = string
description = "Username used for SSH access to the Rancher server cluster node"
}
variable "ssh_private_key_pem" {
type = string
description = "Private key used for SSH access to the Rancher server cluster node"
}
variable "rancher_kubernetes_version" {
type = string
description = "Kubernetes version to use for Rancher server cluster"
default = "v1.31.2+k3s1"
}

View File

@ -0,0 +1,32 @@
# Longhorn
module "longhorn" {
source = "../../longhorn"
namespace = "longhorn-system"
k8s_config_yaml = var.k8s_config_yaml
wait_on = var.wait_on
}
# Configure ingress to allow forwarded headers
resource "kubernetes_manifest" "rke2-ingress-nginx-config" {
manifest = {
apiVersion = "helm.cattle.io/v1"
kind = "HelmChartConfig"
metadata = {
name = "rke2-ingress-nginx"
namespace = "kube-system"
}
spec = {
valuesContent = <<-EOT
controller:
config:
use-forwarded-headers: "true"
EOT
}
}
}
output "installed" {
value = true
depends_on = [module.longhorn.installed, kubernetes_manifest.rke2-ingress-nginx-config]
}

View File

@ -0,0 +1,33 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}

View File

@ -0,0 +1,10 @@
variable "k8s_config_yaml" {
description = "Content of k8s config yaml file"
type = string
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}

View File

@ -0,0 +1,195 @@
locals {
cluster_name = "${var.prefix}-${var.name}"
cluster_addresses = module.cluster-hcloud.ips != null ? [for ip in module.cluster-hcloud.ips : { ip = ip }] : []
}
data "vault_kv_secret_v2" "minio" {
mount = local.cluster_name
name = "minio"
}
resource "tls_private_key" "cluster" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "rancher2_cluster_v2" "cluster" {
depends_on = [var.wait_on]
provider = rancher2.admin
name = local.cluster_name
kubernetes_version = var.kubernetes_version
}
resource "hcloud_ssh_key" "cluster" {
name = "${local.cluster_name}-ssh-key"
public_key = tls_private_key.cluster.public_key_openssh
lifecycle {
ignore_changes = [public_key]
}
}
// then do management module
module "cluster-management" {
source = "./management"
cluster = local.cluster_name
k8s_config_yaml = rancher2_cluster_v2.cluster.kube_config
minio_access_key = data.vault_kv_secret_v2.minio.data["access_key"]
minio_secret_key = data.vault_kv_secret_v2.minio.data["secret_key"]
minio_server = var.minio_server
ssh_private_key = tls_private_key.cluster.private_key_pem
ssh_public_key = tls_private_key.cluster.public_key_openssh
vault_token = var.vault_token
vault_server = var.vault_server
}
module "devops" {
source = "./devops"
minio_access_key = data.vault_kv_secret_v2.minio.data["access_key"]
minio_secret_key = data.vault_kv_secret_v2.minio.data["secret_key"]
minio_server = var.minio_server
cluster = local.cluster_name
k8s_config_yaml = var.k8s_config_yaml
}
module "cluster-hcloud" {
source = "./hcloud"
node_count = var.node_count
instance_type = var.node_instance_type
cluster_registration_command = rancher2_cluster_v2.cluster.cluster_registration_token.0.insecure_node_command
hcloud_network_id = var.hcloud_network_id
hcloud_token = var.hcloud_token
ssh_key_id = hcloud_ssh_key.cluster.id
prefix = var.prefix
name = var.name
}
resource "kubernetes_namespace" "cluster" {
metadata {
name = local.cluster_name
}
lifecycle {
ignore_changes = [metadata]
}
}
module "cluster-tls" {
source = "./tls"
name = local.cluster_name
namespace = kubernetes_namespace.cluster.metadata[0].name
hosts = var.hosts
k8s_config_yaml = var.k8s_config_yaml
}
resource "kubernetes_manifest" "cluster-endpoints" {
manifest = {
apiVersion = "v1"
kind = "Endpoints"
metadata = {
name = local.cluster_name
namespace = kubernetes_namespace.cluster.metadata[0].name
}
subsets = [
{
addresses = local.cluster_addresses,
ports = [
{
port = 80
}
]
}
]
}
}
resource "kubernetes_manifest" "cluster-service" {
depends_on = [kubernetes_manifest.cluster-endpoints]
manifest = {
apiVersion = "v1"
kind = "Service"
metadata = {
name = local.cluster_name
namespace = kubernetes_namespace.cluster.metadata[0].name
}
spec = {
ports = [
{
port = 80
protocol = "TCP"
targetPort = 80
}
]
type = "ClusterIP"
}
}
}
resource "kubernetes_manifest" "application_ingress" {
depends_on = [
kubernetes_manifest.cluster-endpoints,
kubernetes_manifest.cluster-service,
module.cluster-tls.installed
]
manifest = {
apiVersion = "networking.k8s.io/v1"
kind = "Ingress"
metadata = {
name = local.cluster_name
namespace = kubernetes_namespace.cluster.metadata[0].name
annotations = {
"kubernetes.io/ingress.class" = "traefik"
"cert-manager.io/cluster-issuer" = "letsencrypt"
"traefik.ingress.kubernetes.io/router.entrypoints" = "web,websecure"
"traefik.ingress.kubernetes.io/router.middlewares" = "default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd"
"traefik.ingress.kubernetes.io/service.backend.loadbalancer.server.scheme" = "http"
"traefik.ingress.kubernetes.io/service.backend.loadbalancer.healthcheck.path" = "/healthz"
"traefik.ingress.kubernetes.io/service.backend.loadbalancer.healthcheck.interval" = "10s"
"traefik.ingress.kubernetes.io/service.backend.loadbalancer.healthcheck.timeout" = "3s"
#"traefik.ingress.kubernetes.io/service.backend.loadbalancer.sticky.cookie" = "true"
#"traefik.ingress.kubernetes.io/service.backend.loadbalancer.sticky.cookie.name" = "platform_sticky"
}
}
spec = {
ingressClassName = "traefik"
rules = var.hosts != null ? [
for host in var.hosts : {
host = host
http = {
paths = [
{
path = "/"
pathType = "Prefix"
backend = {
service = {
name = kubernetes_manifest.cluster-service.manifest.metadata.name
port = {
number = 80
}
}
}
}
]
}
}
] : [],
tls = [
{
hosts = var.hosts
secretName = "${local.cluster_name}-tls"
}
// TODO: Optional extra TLS from external secret
]
}
}
}

View File

@ -0,0 +1,46 @@
resource "minio_s3_object" "kube_config_cluster_yaml" {
bucket_name = var.cluster
object_name = "kube_config.yaml"
content = var.k8s_config_yaml
content_type = "text/plain"
}
resource "minio_s3_object" "ssh_cluster_private_key" {
bucket_name = var.cluster
object_name = "id_rsa"
content = var.ssh_private_key
content_type = "text/plain"
}
resource "minio_s3_object" "ssh_cluster_public_key" {
bucket_name = var.cluster
object_name = "id_rsa.pub"
content = var.ssh_public_key
content_type = "text/plain"
}
resource "minio_s3_object" "vault_secrets" {
count = var.vault_secret_path == null ? 0 : 1
bucket_name = var.cluster
object_name = "vault.secret"
content = file(var.vault_secret_path)
content_type = "text/plain"
}
resource "vault_kv_secret_v2" "hcloud" {
count = var.hcloud_network_id == null && var.hcloud_token == null ? 0 : 1
mount = var.cluster
name = "hcloud"
delete_all_versions = true
data_json = jsonencode({
network_id = var.hcloud_network_id
token = var.hcloud_token
})
depends_on = [var.wait_on]
}
output "installed" {
value = true
}

View File

@ -0,0 +1,21 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
}
}
provider "vault" {
address = var.vault_server
token = var.vault_token
}
provider "minio" {
minio_server = var.minio_server
minio_region = "not-used"
minio_user = var.minio_access_key
minio_password = var.minio_secret_key
minio_ssl = true
}

View File

@ -0,0 +1,63 @@
variable "cluster" {
description = "Name or ID of the cluster"
type = string
}
variable "k8s_config_yaml" {
description = "Content of the k8s config yaml file"
type = string
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "ssh_private_key" {
description = "Contents of the ssh private key"
type = string
}
variable "ssh_public_key" {
description = "Contents of the ssh public key"
type = string
}
variable "vault_secret_path" {
description = "Path to the vault secret"
type = string
default = null
}
variable "hcloud_token" {
type = string
default = null
}
variable "hcloud_network_id" {
type = string
default = null
}
variable "vault_server" {
type = string
}
variable "vault_token" {
type = string
sensitive = true
}
variable "minio_server" {
type = string
}
variable "minio_access_key" {
type = string
}
variable "minio_secret_key" {
type = string
sensitive = true
}

View File

@ -0,0 +1,63 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
terraform {
required_providers {
rancher2 = {
source = "rancher/rancher2"
version = "3.0.0"
}
hcloud = {
source = "hetznercloud/hcloud"
version = "~> 1.45"
}
}
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}
provider "vault" {
address = var.vault_server
token = var.vault_token
}
provider "rancher2" {
alias = "admin"
api_url = var.rancher_server_uri
insecure = true
token_key = var.rancher_admin_token
timeout = "300s"
}
provider "hcloud" {
token = var.hcloud_token
}

View File

@ -0,0 +1,26 @@
resource "kubernetes_manifest" "cluster-tls" {
depends_on = [var.wait_on]
manifest = {
apiVersion = "cert-manager.io/v1"
kind = "Certificate"
metadata = {
name = "${var.name}-tls"
namespace = var.namespace
}
spec = {
secretName = "${var.name}-tls"
issuerRef = {
name = "letsencrypt"
kind = "ClusterIssuer"
}
dnsNames = var.hosts
}
}
}
output "installed" {
value = true
depends_on = [kubernetes_manifest.cluster-tls]
}

View File

@ -0,0 +1,33 @@
locals {
k8s_config = yamldecode(var.k8s_config_yaml)
k8s_host = local.k8s_config.clusters[0].cluster.server
k8s_auth = try(
{
token = local.k8s_config.users[0].user.token
using_token = true
},
{
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
using_token = false
}
)
}
provider "kubernetes" {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
provider "helm" {
kubernetes {
host = local.k8s_host
insecure = true
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
}
}

View File

@ -0,0 +1,24 @@
variable "name" {
type = string
description = "Name of the cluster"
}
variable "k8s_config_yaml" {
description = "Content of k8s config yaml file"
type = string
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}
variable "namespace" {
type = string
}
variable "hosts" {
type = list(string)
default = null
}

View File

@ -0,0 +1,74 @@
variable "prefix" {
type = string
default = "app"
}
variable "name" {
type = string
description = "Name of the cluster"
}
variable "k8s_config_yaml" {
description = "Content of k8s config yaml file"
type = string
}
variable "kubernetes_version" {
type = string
description = "Kubernetes version to use for managed workload cluster"
default = "v1.30.3+rke2r1"
}
variable "rancher_server_uri" {
type = string
}
variable "rancher_admin_token" {
type = string
sensitive = true
}
variable "hcloud_token" {
type = string
sensitive = true
}
variable "hcloud_network_id" {
type = string
}
variable "node_count" {
type = number
description = "Number of nodes in the cluster"
default = 3
}
variable "node_instance_type" {
type = string
description = "Type of instance to be used for all instances"
default = "cx22"
}
variable "minio_server" {
type = string
}
variable "vault_server" {
type = string
}
variable "vault_token" {
type = string
sensitive = true
}
variable "hosts" {
type = list(string)
default = null
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}

View File

@ -0,0 +1,87 @@
terraform {
required_providers {
fusionauth = {
source = "FusionAuth/fusionauth"
version = "0.1.111"
}
}
}
provider "fusionauth" {
api_key = var.fusionauth_api_key
host = var.fusionauth_uri
}
resource "random_password" "client_secret" {
length = 32
special = true
}
resource "fusionauth_application" "app" {
depends_on = [var.wait_on]
tenant_id = var.tenant_id
name = var.name
lambda_configuration {
id_token_populate_id = var.rbac_lambda_id
}
oauth_configuration {
authorized_redirect_urls = [var.oauth_redirect_uri]
client_secret = random_password.client_secret.result
require_registration = var.oauth_require_registration
enabled_grants = var.oauth_enabled_grants
unknown_scope_policy = "Remove"
scope_handling_policy = "Strict"
provided_scope_policy {
address {
enabled = true
required = false
}
phone {
enabled = true
required = false
}
email {
enabled = true
required = true
}
profile {
enabled = true
required = true
}
}
}
}
resource "fusionauth_application_role" "admin" {
application_id = fusionauth_application.app.oauth_configuration[0].client_id
description = "Admin"
is_default = true # NOTE: This is obviously insecure
is_super_role = true
name = "admin"
}
output "application_id" {
value = fusionauth_application.app.oauth_configuration[0].client_id
}
output "client_id" {
value = fusionauth_application.app.oauth_configuration[0].client_id
}
output "client_secret" {
value = fusionauth_application.app.oauth_configuration[0].client_secret
sensitive = true
}
output "redirect_uri" {
value = var.oauth_redirect_uri
}
output "installed" {
value = true
depends_on = [fusionauth_application.app]
}

View File

@ -0,0 +1,48 @@
variable "name" {
type = string
description = "Name of the application"
}
variable "tenant_id" {
type = string
description = "FusionAuth Tenant ID"
}
variable "fusionauth_api_key" {
type = string
sensitive = true
description = "FusionAuth API Key"
}
variable "fusionauth_uri" {
type = string
description = "FusionAuth instance URI"
}
variable "oauth_redirect_uri" {
type = string
description = "Redirect URI for the tenant"
}
variable "oauth_enabled_grants" {
type = list(string)
description = "List of enabled grants"
default = ["authorization_code", "refresh_token"]
}
variable "oauth_require_registration" {
type = bool
description = "Require registration for the tenant"
default = false
}
variable "rbac_lambda_id" {
type = string
default = null
}
variable "wait_on" {
type = any
description = "Resources to wait on"
default = true
}

Some files were not shown because too many files have changed in this diff Show More