added cluster functionality

This commit is contained in:
Thomas Rijpstra 2024-09-20 17:29:22 +02:00
parent 09d218f906
commit c745eb76a3
Signed by: thomas
SSH Key Fingerprint: SHA256:au5M4TrfxCxk778HDa1d+VB33vzyetoOvL8zrsDkJt0
41 changed files with 720 additions and 447 deletions

View File

@ -59,41 +59,22 @@ provider "registry.terraform.io/hashicorp/kubernetes" {
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.2.3"
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.6"
hashes = [
"h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=",
"zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2",
"zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d",
"zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3",
"zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f",
"zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301",
"zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670",
"zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed",
"zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65",
"zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd",
"zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.3"
hashes = [
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
"h1:dYSb3V94K5dDMtrBRLPzBpkMTPn+3cXZ/kIJdtFL+2M=",
"zh:10de0d8af02f2e578101688fd334da3849f56ea91b0d9bd5b1f7a243417fdda8",
"zh:37fc01f8b2bc9d5b055dc3e78bfd1beb7c42cfb776a4c81106e19c8911366297",
"zh:4578ca03d1dd0b7f572d96bd03f744be24c726bfd282173d54b100fd221608bb",
"zh:6c475491d1250050765a91a493ef330adc24689e8837a0f07da5a0e1269e11c1",
"zh:81bde94d53cdababa5b376bbc6947668be4c45ab655de7aa2e8e4736dfd52509",
"zh:abdce260840b7b050c4e401d4f75c7a199fafe58a8b213947a258f75ac18b3e8",
"zh:b754cebfc5184873840f16a642a7c9ef78c34dc246a8ae29e056c79939963c7a",
"zh:c928b66086078f9917aef0eec15982f2e337914c5c4dbc31dd4741403db7eb18",
"zh:cded27bee5f24de6f2ee0cfd1df46a7f88e84aaffc2ecbf3ff7094160f193d50",
"zh:d65eb3867e8f69aaf1b8bb53bd637c99c6b649ba3db16ded50fa9a01076d1a27",
"zh:ecb0c8b528c7a619fa71852bb3fb5c151d47576c5aab2bf3af4db52588722eeb",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
@ -115,3 +96,45 @@ provider "registry.terraform.io/hashicorp/vault" {
"zh:f5f6ae50a23a184d126832d688380e22311fa1b0192723507a790e57917c3e78",
]
}
provider "registry.terraform.io/hetznercloud/hcloud" {
version = "1.33.2"
constraints = "1.33.2"
hashes = [
"h1:3Hx8p9LbcnHfBhy3nT7+unlc5rwkiSZjLt9SVQOSpB8=",
"zh:0a5d0f332d7dfe77fa27301094af98a185aabfb9f56d71b81936e03211e4d66f",
"zh:0e047859ee7296f335881933ccf8ce8c07aa47bef56d5449a81b85a2d9dac93a",
"zh:1d3d0896f518df9e245c3207ed231e528f5dcfe628508e7c3ceba4a2bfefaa7a",
"zh:1d7a31c8c490512896ce327ab220e950f1a2e30ee83cc2e58e69bbbfbbb87e72",
"zh:67cbb2492683cb22f6c54f26bee72aec140c8dd2d0881b2815d2ef80959fc751",
"zh:771062815e662979204ac2dc91c34c893f27670d67e02370e48124483d3c9838",
"zh:957ebb146898cd059c0cc8b4c32e574b61041d8b6a11cd854b3cc1d3baaeb3a9",
"zh:95dbd8634000b979213cb97b5d869cad78299ac994d0665d150c8dafc1390429",
"zh:a21b22b2e9d835e1b8b3b7e0b41a4d199171d62e9e9be78c444c700e96b31316",
"zh:aead1ba50640a51f20d574374f2c6065d9bfa4eea5ef044d1475873c33e58239",
"zh:cefabd0a78af40ea5cd08e1ca436c753df9b1c6496eb27281b755a2de1f167ab",
"zh:d98cffc5206b9a7550a23e13031a6f53566bd1ed3bf65314bc55ef12404d49ce",
"zh:dddaaf95b6aba701153659feff12c7bce6acc78362cb5ff8321a1a1cbf780cd9",
"zh:fd662b483250326a1bfbe5684c22c5083955a43e0773347eea35cd4c2cfe700e",
]
}
provider "registry.terraform.io/rancher/rancher2" {
version = "3.0.0"
constraints = "3.0.0"
hashes = [
"h1:Qnc86BDThHGg+UqfK8Ssx7l+KcYg8wBDsMU3mCgUK6E=",
"zh:3f28e165f4e6dbfb3c6f57ea96571f907915cf9d3eaf0041054ec3c4e22cc14b",
"zh:4d71e727690d8691321c9591248599fdb38e09e27dace74da6dee16ec01351b0",
"zh:51dc86277205c7514cad0edd6e48a300a470a846a12927323b09fb1550891bcb",
"zh:5b240c5eefc5bcffcf851bd11dc913cff05a0fbf7539e966c7638894265a6297",
"zh:8f754482629b587083c1b9e0e0646a577a8defdf64d61ca12c853dd41ffbc1bb",
"zh:9a212e0dd166e2dc1ae3c13c99b07eb6f48e5ec4b6dcdca857d3f3d05b0fcabc",
"zh:a4e45342af8e9a8ab2be9a3ffd8a7df244519fade4901cc0b95328937e8b80ba",
"zh:af148901e447f97b844b5d5a81df5c7fce0432b3f0a42cb674196f0ff2ce1ded",
"zh:b11a97fc16b1fde2956906569bae890be59d444c192c560f00dca418b8184875",
"zh:b1588f6b704326ee6cf384c6d2542e4bd6f08b5324098cb6a7c126fb37112b28",
"zh:e63dd35d6f962e22561b3dd1b6fd8c23bb8154ca492a89e6b4693569974c971f",
"zh:f1eeae30b192f569f3e16061e28f1ce876a6f48eeab4c113e5f771809719090b",
]
}

View File

@ -0,0 +1,33 @@
module "application_cluster" {
source = "../../modules/cluster"
name = "application"
rancher_admin_token = data.vault_kv_secret_v2.rancher.data["token"]
rancher_server_uri = data.vault_kv_secret_v2.rancher.data["uri"]
}
data "minio_s3_object" "application_ssh_public_key" {
depends_on = [module.application_cluster]
bucket_name = "application"
object_name = "id_rsa.pub"
}
data "minio_s3_object" "application_ssh_private_key" {
depends_on = [module.application_cluster]
bucket_name = "application"
object_name = "id_rsa"
}
module "application_node_1" {
source = "../../modules/hcloud-node"
name = "application"
cluster_registration_command = module.application_cluster.cluster_registration_command
hcloud_network_id = data.vault_kv_secret_v2.hcloud.data["network_id"]
hcloud_token = data.vault_kv_secret_v2.hcloud.data["token"]
prefix = "bouwroute"
ssh_private_key = data.minio_s3_object.application_ssh_private_key.content
ssh_public_key = data.minio_s3_object.application_ssh_public_key.content
suffix = "1"
}

View File

@ -38,6 +38,25 @@ provider "registry.terraform.io/hashicorp/helm" {
]
}
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.32.0"
hashes = [
"h1:HqeU0sZBh+2loFYqPMFx7jJamNUPEykyqJ9+CkMCYE0=",
"zh:0e715d7fb13a8ad569a5fdc937b488590633f6942e986196fdb17cd7b8f7720e",
"zh:495fc23acfe508ed981e60af9a3758218b0967993065e10a297fdbc210874974",
"zh:4b930a8619910ef528bc90dae739cb4236b9b76ce41367281e3bc3cf586101c7",
"zh:5344405fde7b1febf0734052052268ee24e7220818155702907d9ece1c0697c7",
"zh:92ee11e8c23bbac3536df7b124456407f35c6c2468bc0dbab15c3fc9f414bd0e",
"zh:a45488fe8d5bb59c49380f398da5d109a4ac02ebc10824567dabb87f6102fda8",
"zh:a4a0b57cf719a4c91f642436882b7bea24d659c08a5b6f4214ce4fe6a0204caa",
"zh:b7a27a6d11ba956a2d7b0f7389a46ec857ebe46ae3aeee537250e66cac15bf03",
"zh:bf94ce389028b686bfa70a90f536e81bb776c5c20ab70138bbe5c3d0a04c4253",
"zh:d965b2608da0212e26a65a0b3f33c5baae46cbe839196be15d93f70061516908",
"zh:f441fc793d03057a17af8bdca8b26d54916645bc5c148f54e22a54ed39089e83",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.4.0"
constraints = "2.4.0"
@ -59,21 +78,21 @@ provider "registry.terraform.io/hashicorp/local" {
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
version = "3.6.3"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
]
}

View File

@ -1,5 +1,6 @@
data "local_sensitive_file" "vault_keys" {
filename = "${path.module}/vault.secret"
depends_on = [module.rancher.vault_uri]
filename = "vault.secret"
}
locals {

View File

@ -29,17 +29,17 @@ resource "hcloud_network_subnet" "private" {
# Temporary key pair used for SSH access
resource "hcloud_ssh_key" "management_ssh_key" {
name = "${var.prefix}-instance-ssh-key"
name = "${var.prefix}-management-ssh-key"
public_key = tls_private_key.global_key.public_key_openssh
}
# HCloud Instance for creating a single node RKE cluster and installing the Rancher server
resource "hcloud_server" "management_server" {
name = "${var.prefix}-management-server"
name = "${var.prefix}-management-1"
image = "ubuntu-24.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.management_ssh_key.id]
ssh_keys = [hcloud_ssh_key.management_ssh_key.id]
network {
network_id = hcloud_network.private.id

View File

@ -7,7 +7,12 @@ output "rancher_server_uri" {
}
output "rancher_server_admin_password" {
value = module.rancher.rancher_server_admin_password
value = module.rancher.rancher_server_admin_password
sensitive = true
}
output "rancher_server_admin_token" {
value = module.rancher.rancher_server_admin_token
sensitive = true
}
@ -20,12 +25,12 @@ output "minio_admin_uri" {
}
output "minio_root_user" {
value = module.rancher.minio_root_user
value = module.rancher.minio_root_user
sensitive = true
}
output "minio_root_password" {
value = module.rancher.minio_root_password
value = module.rancher.minio_root_password
sensitive = true
}
@ -38,6 +43,15 @@ output "vault_uri" {
}
output "vault_root_token" {
value = local.vault_keys.root_token
value = local.vault_keys.root_token
sensitive = true
}
output "hcloud_network_id" {
value = hcloud_network.private.id
}
output "hcloud_token" {
value = var.hcloud_token
sensitive = true
}

View File

@ -6,12 +6,17 @@ locals {
"$1:443"
)
minio_region = try(data.terraform_remote_state.stage1.outputs.minio_region, "eu-central-1")
minio_root_user = data.terraform_remote_state.stage1.outputs.minio_root_user
minio_root_user = data.terraform_remote_state.stage1.outputs.minio_root_user
minio_root_password = data.terraform_remote_state.stage1.outputs.minio_root_password
vault_uri = data.terraform_remote_state.stage1.outputs.vault_uri
vault_root_token = data.terraform_remote_state.stage1.outputs.vault_root_token
vault_uri = data.terraform_remote_state.stage1.outputs.vault_uri
vault_root_token = data.terraform_remote_state.stage1.outputs.vault_root_token
node_ip = data.terraform_remote_state.stage1.outputs.rancher_node_ip
rancher_server = data.terraform_remote_state.stage1.outputs.rancher_server_uri
rancher_server_admin_token = data.terraform_remote_state.stage1.outputs.rancher_server_admin_token
node_ip = data.terraform_remote_state.stage1.outputs.rancher_node_ip
hcloud_network_id = data.terraform_remote_state.stage1.outputs.hcloud_network_id
hcloud_token = data.terraform_remote_state.stage1.outputs.hcloud_token
id_rsa = file("../stage1-create/id_rsa")
id_rsa_pub = file("../stage1-create/id_rsa.pub")

View File

@ -1,13 +1,13 @@
resource "vault_mount" "management" {
path = "management"
type = "kv"
options = { version = "2" }
options = { version = "2" }
description = "KV Version 2 secret engine mount for management"
}
resource "minio_s3_bucket" "management" {
bucket = "management"
acl = "private"
acl = "private"
}
# TODO: Enable encryption and versioning on the bucket
@ -18,27 +18,27 @@ resource "minio_s3_bucket" "management" {
# }
resource "minio_s3_object" "id_rsa" {
depends_on = [minio_s3_bucket.management]
bucket_name = minio_s3_bucket.management.bucket
object_name = "id_rsa"
content = local.id_rsa
content_type = "text/plain"
depends_on = [minio_s3_bucket.management]
bucket_name = minio_s3_bucket.management.bucket
object_name = "id_rsa"
content = local.id_rsa
content_type = "text/plain"
}
resource "minio_s3_object" "id_rsa_pub" {
depends_on = [minio_s3_bucket.management]
bucket_name = minio_s3_bucket.management.bucket
object_name = "id_rsa.pub"
content = local.id_rsa_pub
content_type = "text/plain"
depends_on = [minio_s3_bucket.management]
bucket_name = minio_s3_bucket.management.bucket
object_name = "id_rsa.pub"
content = local.id_rsa_pub
content_type = "text/plain"
}
resource "minio_s3_object" "k8s_yaml" {
depends_on = [minio_s3_bucket.management]
bucket_name = minio_s3_bucket.management.bucket
object_name = "kube_config_server.yaml"
content = local.k8s_yaml
content_type = "text/plain"
depends_on = [minio_s3_bucket.management]
bucket_name = minio_s3_bucket.management.bucket
object_name = "kube_config_server.yaml"
content = local.k8s_yaml
content_type = "text/plain"
}
resource "minio_iam_user" "management" {
@ -51,19 +51,42 @@ resource "minio_iam_policy" "management" {
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::management"]
Effect = "Allow"
Action = [
"s3:*",
]
Resource = [
"arn:aws:s3:::*"
]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
"admin:*"
]
Resource = ["arn:aws:s3:::management/*"]
}
Resource = [
"arn:aws:s3:::*"
]
},
{
Effect = "Deny"
Action = [
"s3:DeleteBucket",
]
Resource = [
"arn:aws:s3:::management"
]
},
{
Effect = "Deny"
Action = [
"admin:DeleteUser",
"admin:DeletePolicy",
]
Resource = [
"arn:aws:s3:::management"
]
},
]
})
}
@ -76,25 +99,7 @@ resource "minio_iam_user_policy_attachment" "management" {
resource "minio_iam_service_account" "management_service_account" {
target_user = minio_iam_user.management.name
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::management"]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["arn:aws:s3:::management/*"]
}
]
})
policy = minio_iam_policy.management.policy
}
resource "vault_kv_secret_v2" "minio_creds" {
@ -113,28 +118,71 @@ resource "vault_kv_secret_v2" "minio_creds" {
]
}
resource "vault_kv_secret_v2" "rancher_server" {
mount = "management"
name = "rancher"
delete_all_versions = true
data_json = jsonencode({
uri = local.rancher_server
token = local.rancher_server_admin_token
})
depends_on = [
vault_mount.management,
minio_iam_service_account.management_service_account
]
}
resource "vault_kv_secret_v2" "hcloud" {
mount = "management"
name = "hcloud"
delete_all_versions = true
data_json = jsonencode({
network_id = local.hcloud_network_id
token = local.hcloud_token
})
depends_on = [
vault_mount.management,
minio_iam_service_account.management_service_account
]
}
resource "vault_policy" "management" {
name = "management"
policy = <<EOT
path "management/*" {
# Grant full access to all KV2 mounts, including 'management' and any new mounts
path "+/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Allow creating and configuring new KV2 mounts
path "sys/mounts/*" {
capabilities = ["create", "read", "update", "delete"]
}
# Token management
path "auth/token/create" {
capabilities = ["create", "update", "sudo"]
capabilities = ["create", "update", "sudo"]
}
path "auth/token/lookup-self" {
capabilities = ["read"]
capabilities = ["read"]
}
path "auth/token/renew-self" {
capabilities = ["update"]
capabilities = ["update"]
}
# Add other necessary permissions
# Add other necessary permissions as needed
EOT
}
resource "vault_token" "management" {
policies = [vault_policy.management.name]
policies = [vault_policy.management.name]
renewable = true
ttl = "1h"
period = "15m"

View File

@ -3,7 +3,7 @@ output "vault_uri" {
}
output "vault_token" {
value = vault_token.management.client_token
value = vault_token.management.client_token
sensitive = true
}

View File

@ -1,4 +1,3 @@
# Rancher resources
module "rancher_common" {
source = "../rancher-common"
@ -14,7 +13,7 @@ module "rancher_common" {
rancher_helm_repository = var.rancher_helm_repository
rancher_server_dns = join(".", ["rancher", hcloud_server.rancher_server.ipv4_address, "sslip.io"])
admin_password = var.rancher_server_admin_password
admin_password = var.rancher_server_admin_password
workload_kubernetes_version = var.workload_kubernetes_version
workload_cluster_name = "quickstart-hcloud-custom"
@ -26,14 +25,14 @@ resource "hcloud_server" "quickstart_node" {
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
user_data = templatefile(
"${path.module}/files/userdata_quickstart_node.template",
"userdata_node.template",
{
username = local.node_username
register_command = module.rancher_common.custom_cluster_command

View File

@ -1,7 +1,7 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
source = "aminueza/minio"
version = "~> 2.5.0"
}
}
@ -23,4 +23,9 @@ data "minio_s3_object" "k8s_yaml" {
data "minio_s3_object" "id_rsa" {
bucket_name = var.bucket
object_name = "id_rsa"
}
}
data "minio_s3_object" "id_rsa_pub" {
bucket_name = var.bucket
object_name = "id_rsa.pub"
}

View File

@ -0,0 +1,33 @@
module "monitoring_cluster" {
source = "../../modules/cluster"
name = "monitoring"
rancher_admin_token = data.vault_kv_secret_v2.rancher.data["token"]
rancher_server_uri = data.vault_kv_secret_v2.rancher.data["uri"]
}
data "minio_s3_object" "monitoring_ssh_public_key" {
depends_on = [module.monitoring_cluster]
bucket_name = "monitoring"
object_name = "id_rsa.pub"
}
data "minio_s3_object" "monitoring_ssh_private_key" {
depends_on = [module.monitoring_cluster]
bucket_name = "monitoring"
object_name = "id_rsa"
}
module "monitoring_node_1" {
source = "../../modules/hcloud-node"
name = "monitoring"
cluster_registration_command = module.monitoring_cluster.cluster_registration_command
hcloud_network_id = data.vault_kv_secret_v2.hcloud.data["network_id"]
hcloud_token = data.vault_kv_secret_v2.hcloud.data["token"]
prefix = "bouwroute"
ssh_private_key = data.minio_s3_object.monitoring_ssh_private_key.content
ssh_public_key = data.minio_s3_object.monitoring_ssh_public_key.content
suffix = "1"
}

View File

@ -1,5 +1,7 @@
output "rancher_bootstrap_password" {
value = vault_kv_secret_v2.rancher_creds.data["admin_password"]
sensitive = true
output "platform_ip" {
value = module.platform_node_1.ip
}
#output "platform_ssh" {
# value = data.minio_s3_object.platform_ssh_private_key.content
#}

View File

@ -0,0 +1,33 @@
module "platform_cluster" {
source = "../../modules/cluster"
name = "platform"
rancher_admin_token = data.vault_kv_secret_v2.rancher.data["token"]
rancher_server_uri = data.vault_kv_secret_v2.rancher.data["uri"]
}
data "minio_s3_object" "platform_ssh_public_key" {
depends_on = [module.platform_cluster]
bucket_name = "platform"
object_name = "id_rsa.pub"
}
data "minio_s3_object" "platform_ssh_private_key" {
depends_on = [module.platform_cluster]
bucket_name = "platform"
object_name = "id_rsa"
}
module "platform_node_1" {
source = "../../modules/hcloud-node"
name = "platform"
cluster_registration_command = module.platform_cluster.cluster_registration_command
hcloud_network_id = data.vault_kv_secret_v2.hcloud.data["network_id"]
hcloud_token = data.vault_kv_secret_v2.hcloud.data["token"]
prefix = "bouwroute"
ssh_private_key = data.minio_s3_object.platform_ssh_private_key.content
ssh_public_key = data.minio_s3_object.platform_ssh_public_key.content
suffix = "1"
}

View File

@ -1,51 +0,0 @@
resource "random_password" "rancher_admin_password" {
length = 20
special = false
}
resource "vault_kv_secret_v2" "rancher_creds" {
mount = "management"
name = "rancher"
delete_all_versions = true
data_json = jsonencode({
admin_password = random_password.rancher_admin_password.result
})
}
resource "kubernetes_secret" "bootstrap_secret" {
metadata {
name = "bootstrap-secret"
namespace = "cattle-system"
annotations = {
"field.cattle.io/projectId" = "local:p-q7vbv"
"helm.sh/hook" = "pre-install,pre-upgrade"
"helm.sh/hook-weight" = "-5"
"helm.sh/resource-policy" = "keep"
}
}
data = {
bootstrapPassword = vault_kv_secret_v2.rancher_creds.data["admin_password"]
}
type = "Opaque"
}
# Force a rollout of the Rancher deployment to pick up the new secret
resource "null_resource" "rancher_rollout" {
triggers = {
password_change = kubernetes_secret.bootstrap_secret.data["bootstrapPassword"]
}
provisioner "remote-exec" {
inline = ["kubectl rollout restart deployment rancher -n cattle-system"]
connection {
type = "ssh"
host = var.node_ip
user = var.node_username
private_key = data.minio_s3_object.id_rsa.content
}
}
depends_on = [kubernetes_secret.bootstrap_secret]
}

View File

@ -16,6 +16,15 @@ secret_key = "${MINIO_SECRET_KEY}"
bucket = "management"
key = "terraform.tfstate"
region = "eu-central-1"
EOF
cat << EOF > terraform.tfvars
endpoints = { s3 = "${MINIO_ADDR}" }
access_key = "${MINIO_ACCESS_KEY}"
secret_key = "${MINIO_SECRET_KEY}"
bucket = "management"
key = "terraform.tfstate"
region = "eu-central-1"
minio_server = "${MINIO_SERVER}"
vault_token = "${VAULT_TOKEN}"
vault_addr = "${VAULT_ADDR}"

View File

@ -1,4 +1,14 @@
provider "vault" {
address = var.vault_addr
token = var.vault_token
token = var.vault_token
}
data "vault_kv_secret_v2" "hcloud" {
mount = "management"
name = "hcloud"
}
data "vault_kv_secret_v2" "rancher" {
mount = "management"
name = "rancher"
}

View File

@ -1,18 +1,17 @@
# HCloud instance for creating a single node workload cluster
resource "hcloud_server" "quickstart_node" {
name = "${var.prefix}-worker"
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
user_data = templatefile(
"${path.module}/files/userdata_quickstart_node.template",
"userdata_node.template",
{
username = local.node_username
register_command = module.rancher_common.custom_cluster_command

View File

@ -0,0 +1,117 @@
resource "vault_mount" "cluster" {
path = var.name
type = "kv"
options = { version = "2" }
description = "KV Version 2 secret engine mount for ${var.name}"
}
resource "minio_s3_bucket" "cluster" {
bucket = var.name
acl = "private"
}
# TODO: Enable encryption and versioning on the bucket
# resource "minio_s3_bucket_server_side_encryption" "encryption" {
# bucket = minio_s3_bucket.management.bucket
# encryption_type = "aws:kms"
# kms_key_id = var.aws_kms_key_id
# }
resource "minio_iam_user" "cluster" {
name = var.name
}
resource "minio_iam_policy" "cluster" {
name = minio_s3_bucket.cluster.bucket
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::${var.name}"]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["arn:aws:s3:::${var.name}/*"]
}
]
})
}
resource "minio_iam_user_policy_attachment" "cluster" {
user_name = minio_iam_user.cluster.id
policy_name = minio_iam_policy.cluster.id
}
resource "minio_iam_service_account" "cluster" {
target_user = minio_iam_user.cluster.name
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::${var.name}"]
},
{
Effect = "Allow"
Action = [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
Resource = ["arn:aws:s3:::${var.name}/*"]
}
]
})
}
resource "vault_kv_secret_v2" "cluster" {
mount = var.name
name = "minio"
delete_all_versions = true
data_json = jsonencode({
access_key = minio_iam_service_account.cluster.access_key
secret_key = minio_iam_service_account.cluster.secret_key
})
depends_on = [
vault_mount.cluster,
minio_iam_service_account.cluster
]
}
resource "vault_policy" "cluster" {
name = var.name
policy = <<EOT
path "${var.name}/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
path "auth/token/create" {
capabilities = ["create", "update", "sudo"]
}
path "auth/token/lookup-self" {
capabilities = ["read"]
}
path "auth/token/renew-self" {
capabilities = ["update"]
}
# Add other necessary permissions
EOT
}
resource "vault_token" "cluster" {
policies = [vault_policy.cluster.name]
renewable = true
ttl = "1h"
period = "15m"
}

View File

@ -0,0 +1,13 @@
output "name" {
value = var.name
}
output "vault_token" {
value = vault_token.cluster.client_token
sensitive = true
}
output "cluster_registration_command" {
value = rancher2_cluster_v2.cluster.cluster_registration_token.0.insecure_node_command
sensitive = true
}

View File

@ -0,0 +1,22 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
rancher2 = {
source = "rancher/rancher2"
version = "3.0.0"
}
}
}
provider "rancher2" {
alias = "admin"
api_url = var.rancher_server_uri
insecure = true
token_key = var.rancher_admin_token
timeout = "300s"
# ca_certs = data.kubernetes_secret.rancher_cert.data["ca.crt"]
}

View File

@ -0,0 +1,35 @@
resource "rancher2_cluster_v2" "cluster" {
provider = rancher2.admin
name = var.name
kubernetes_version = var.kubernetes_version
}
resource "minio_s3_object" "kube_config_cluster_yaml" {
depends_on = [minio_s3_bucket.cluster, rancher2_cluster_v2.cluster]
bucket_name = minio_s3_bucket.cluster.bucket
object_name = "kube_config.yaml"
content = rancher2_cluster_v2.cluster.kube_config
content_type = "text/plain"
}
resource "tls_private_key" "cluster" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "minio_s3_object" "ssh_cluster_private_key" {
depends_on = [tls_private_key.cluster, minio_s3_bucket.cluster]
bucket_name = minio_s3_bucket.cluster.bucket
object_name = "id_rsa"
content = tls_private_key.cluster.private_key_pem
content_type = "text/plain"
}
resource "minio_s3_object" "ssh_cluster_public_key" {
depends_on = [tls_private_key.cluster, minio_s3_bucket.cluster]
bucket_name = minio_s3_bucket.cluster.bucket
object_name = "id_rsa.pub"
content = tls_private_key.cluster.public_key_openssh
content_type = "text/plain"
}

View File

@ -0,0 +1,18 @@
variable "name" {
type = string
}
variable "kubernetes_version" {
type = string
description = "Kubernetes version to use for managed workload cluster"
default = "v1.30.3+rke2r1"
}
variable "rancher_server_uri" {
type = string
}
variable "rancher_admin_token" {
type = string
sensitive = true
}

View File

@ -0,0 +1,39 @@
resource "hcloud_ssh_key" "cluster" {
name = "${var.prefix}-${var.name}-ssh-key"
public_key = var.ssh_public_key
}
resource "hcloud_server" "node" {
name = local.node_name
image = var.node_image
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.cluster.id]
network {
network_id = var.hcloud_network_id
}
user_data = templatefile(
format("%s/files/userdata_node.template", path.module),
{
username = local.node_username
register_command = var.cluster_registration_command
}
)
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = var.ssh_private_key
}
}
}

View File

@ -0,0 +1,3 @@
output "ip" {
value = hcloud_server.node.ipv4_address
}

View File

@ -0,0 +1,13 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "1.33.2"
}
}
}
provider "hcloud" {
token = var.hcloud_token
}

View File

@ -0,0 +1,67 @@
variable "hcloud_token" {
type = string
description = "Hetzner Cloud API token used to create infrastructure"
}
variable "hcloud_location" {
type = string
description = "Hetzner location used for all resources"
default = "fsn1"
}
variable "hcloud_network_id" {
type = string
description = "ID of the network to use for all resources"
}
variable "ssh_private_key" {
type = string
description = "Private key used for SSH access to instances"
sensitive = true
}
variable "ssh_public_key" {
type = string
description = "Public key used for SSH access to instances"
}
variable "instance_type" {
type = string
description = "Type of instance to be used for all instances"
default = "cpx21"
}
variable "prefix" {
type = string
description = "Prefix added to names of all resources"
}
variable "name" {
type = string
description = "Name of the node"
}
variable "suffix" {
type = string
description = "Suffix added to names of all resources"
}
variable "cluster_registration_command" {
type = string
description = "Command to register the node with the cluster"
sensitive = true
}
variable "node_image" {
type = string
description = "Image to use for the node"
default = "ubuntu-22.04"
}
# Local variables used to reduce repetition
locals {
node_username = "root"
cluster_name = "${var.prefix}-${var.name}"
node_name = "${local.cluster_name}-${var.suffix}"
}

View File

@ -1,128 +0,0 @@
# HCloud infrastructure resources
resource "tls_private_key" "global_key" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "local_sensitive_file" "ssh_private_key_pem" {
filename = "${path.module}/id_rsa"
content = tls_private_key.global_key.private_key_pem
file_permission = "0600"
}
resource "local_file" "ssh_public_key_openssh" {
filename = "${path.module}/id_rsa.pub"
content = tls_private_key.global_key.public_key_openssh
}
resource "hcloud_network" "private" {
name = "${var.prefix}-private-network"
ip_range = var.network_cidr
}
resource "hcloud_network_subnet" "private" {
type = "cloud"
network_id = hcloud_network.private.id
network_zone = var.network_zone
ip_range = var.network_ip_range
}
# Temporary key pair used for SSH accesss
resource "hcloud_ssh_key" "quickstart_ssh_key" {
name = "${var.prefix}-instance-ssh-key"
public_key = tls_private_key.global_key.public_key_openssh
}
# HCloud Instance for creating a single node RKE cluster and installing the Rancher server
resource "hcloud_server" "rancher_server" {
name = "${var.prefix}-rancher-server"
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
}
# Rancher resources
module "rancher_common" {
source = "../rancher-common"
node_public_ip = hcloud_server.rancher_server.ipv4_address
node_internal_ip = one(hcloud_server.rancher_server.network[*]).ip
node_username = local.node_username
ssh_private_key_pem = tls_private_key.global_key.private_key_pem
rancher_kubernetes_version = var.rancher_kubernetes_version
cert_manager_version = var.cert_manager_version
rancher_version = var.rancher_version
rancher_helm_repository = var.rancher_helm_repository
rancher_server_dns = join(".", ["rancher", hcloud_server.rancher_server.ipv4_address, "sslip.io"])
admin_password = var.rancher_server_admin_password
workload_kubernetes_version = var.workload_kubernetes_version
workload_cluster_name = "quickstart-hcloud-custom"
}
# HCloud instance for creating a single node workload cluster
resource "hcloud_server" "quickstart_node" {
name = "${var.prefix}-worker"
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
user_data = templatefile(
"${path.module}/files/userdata_quickstart_node.template",
{
username = local.node_username
register_command = module.rancher_common.custom_cluster_command
}
)
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
}

View File

@ -1,12 +0,0 @@
output "rancher_server_url" {
value = module.rancher_common.rancher_url
}
output "rancher_node_ip" {
value = hcloud_server.rancher_server.ipv4_address
}
output "workload_node_ip" {
value = hcloud_server.quickstart_node.ipv4_address
}

View File

@ -1,21 +0,0 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "1.33.2"
}
local = {
source = "hashicorp/local"
version = "2.4.0"
}
tls = {
source = "hashicorp/tls"
version = "4.0.4"
}
}
required_version = ">= 1.0.0"
}
provider "hcloud" {
token = var.hcloud_token
}

View File

@ -1,82 +0,0 @@
# Variables for Hetzner Cloud infrastructure module
variable "hcloud_token" {
type = string
description = "Hetzner Cloud API token used to create infrastructure"
}
variable "hcloud_location" {
type = string
description = "Hetzner location used for all resources"
default = "fsn1"
}
variable "prefix" {
type = string
description = "Prefix added to names of all resources"
default = "quickstart"
}
variable "network_cidr" {
type = string
description = "Network to create for private communication"
default = "10.0.0.0/8"
}
variable "network_ip_range" {
type = string
description = "Subnet to create for private communication. Must be part of the CIDR defined in `network_cidr`."
default = "10.0.1.0/24"
}
variable "network_zone" {
type = string
description = "Zone to create the network in"
default = "eu-central"
}
variable "instance_type" {
type = string
description = "Type of instance to be used for all instances"
default = "cx21"
}
variable "rancher_kubernetes_version" {
type = string
description = "Kubernetes version to use for Rancher server cluster"
default = "v1.24.14+k3s1"
}
variable "workload_kubernetes_version" {
type = string
description = "Kubernetes version to use for managed workload cluster"
default = "v1.24.14+rke2r1"
}
variable "cert_manager_version" {
type = string
description = "Version of cert-manager to install alongside Rancher (format: 0.0.0)"
default = "1.11.0"
}
variable "rancher_version" {
type = string
description = "Rancher server version (format: v0.0.0)"
default = "2.7.9"
}
variable "rancher_helm_repository" {
type = string
description = "The helm repository, where the Rancher helm chart is installed from"
default = "https://releases.rancher.com/server-charts/latest"
}
variable "rancher_server_admin_password" {
type = string
description = "Admin password to use for Rancher server bootstrap, min. 12 characters"
}
# Local variables used to reduce repetition
locals {
node_username = "root"
}

View File

@ -4,11 +4,11 @@
# ----------------------------------------------------------
# # Rancher certificates
# data "kubernetes_secret" "rancher_cert" {
# depends_on = [helm_release.rancher_server]
# metadata {
# name = "tls-rancher-ingress"
# namespace = "cattle-system"
# }
# }
#data "kubernetes_secret" "rancher_cert" {
# depends_on = [helm_release.rancher_server]
#
# metadata {
# name = "tls-rancher-ingress"
# namespace = "cattle-system"
# }
#}

View File

@ -5,7 +5,7 @@ output "rancher_uri" {
}
output "rancher_server_admin_password" {
value = var.admin_password
value = var.admin_password
sensitive = true
}
@ -22,7 +22,7 @@ output "minio_root_user" {
}
output "minio_root_password" {
value = module.minio.minio_root_password
value = module.minio.minio_root_password
sensitive = true
}
@ -31,10 +31,14 @@ output "vault_uri" {
}
output "vault_root_token" {
value = module.vault.vault_root_token
value = module.vault.vault_root_token
sensitive = true
}
output "rancher_server_admin_token" {
value = rancher2_bootstrap.admin.token
sensitive = true
}
# output "custom_cluster_command" {
# value = rancher2_cluster_v2.quickstart_workload.cluster_registration_token.0.insecure_node_command

View File

@ -9,6 +9,7 @@ ingress:
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
replicas: 1
agentTLSMode: "system-store"
letsEncrypt:
environment: staging

View File

@ -11,11 +11,3 @@ resource "rancher2_bootstrap" "admin" {
password = var.admin_password
telemetry = true
}
#
## Create custom managed cluster for quickstart
#resource "rancher2_cluster_v2" "quickstart_workload" {
# provider = rancher2.admin
#
# name = var.workload_cluster_name
# kubernetes_version = var.workload_kubernetes_version
#}

View File

@ -1,4 +1,14 @@
resource "kubectl_manifest" "vault_namespace" {
yaml_body = <<YAML
apiVersion: v1
kind: Namespace
metadata:
name: vault
YAML
}
resource "kubectl_manifest" "vault_aws_creds" {
depends_on = [kubectl_manifest.vault_namespace]
yaml_body = <<YAML
apiVersion: v1
kind: Secret

View File

@ -1,12 +1,12 @@
resource "helm_release" "vault" {
name = "vault"
repository = "https://helm.releases.hashicorp.com"
chart = "vault"
namespace = "vault"
version = "0.28.1"
create_namespace = true
wait = true
depends_on = [kubectl_manifest.vault_aws_creds]
name = "vault"
repository = "https://helm.releases.hashicorp.com"
chart = "vault"
namespace = "vault"
version = "0.28.1"
create_namespace = true
wait = true
set {
name = "server.ha.enabled"
@ -53,8 +53,8 @@ resource "helm_release" "vault" {
resource "ssh_resource" "vault_init" {
depends_on = [helm_release.vault]
host = var.node_public_ip
user = var.node_username
host = var.node_public_ip
user = var.node_username
private_key = var.ssh_private_key_pem
commands = [
@ -65,5 +65,5 @@ resource "ssh_resource" "vault_init" {
resource "local_file" "vault-keys" {
depends_on = [ssh_resource.vault_init]
filename = format("%s/%s", path.root, "vault.secret")
content = ssh_resource.vault_init.result
content = ssh_resource.vault_init.result
}