add infra, update gitignores

This commit is contained in:
Thomas Rijpstra 2024-09-13 13:03:52 +02:00
parent f605fcd24a
commit 2d3e95abd0
Signed by: thomas
SSH Key Fingerprint: SHA256:au5M4TrfxCxk778HDa1d+VB33vzyetoOvL8zrsDkJt0
91 changed files with 2765 additions and 5534 deletions

5593
.gitignore vendored

File diff suppressed because it is too large Load Diff

5
.idea/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/

12
.idea/devops.iml Normal file
View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="WEB_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/.tmp" />
<excludeFolder url="file://$MODULE_DIR$/temp" />
<excludeFolder url="file://$MODULE_DIR$/tmp" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

8
.idea/modules.xml Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/devops.iml" filepath="$PROJECT_DIR$/.idea/devops.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml Normal file
View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: backend-config
data:
DB_HOST: "db.yourdomain.com"
DB_PORT: "5432"
API_KEY: "your-api-key"

View File

@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
labels:
app: backend
spec:
replicas: 2
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
spec:
containers:
- name: backend
image: your-registry/backend:v1.0.0
ports:
- containerPort: 8080
envFrom:
- configMapRef:
name: backend-config
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 1
memory: 1Gi
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 5
periodSeconds: 10

12
apps/backend/service.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: backend
spec:
selector:
app: backend
ports:
- protocol: TCP
port: 8080
targetPort: 8080

View File

@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
labels:
app: frontend
spec:
replicas: 2
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
spec:
containers:
- name: frontend
image: your-registry/frontend:v1.0.0
ports:
- containerPort: 80
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
livenessProbe:
httpGet:
path: /healthz
port: 80
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 80
initialDelaySeconds: 5
periodSeconds: 10

View File

@ -0,0 +1,24 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frontend
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
tls:
- hosts:
- frontend.yourdomain.com
secretName: frontend-tls
rules:
- host: frontend.yourdomain.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: frontend
port:
number: 80

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: frontend
spec:
selector:
app: frontend
ports:
- protocol: TCP
port: 80
targetPort: 80

10
base/kustomization.yaml Normal file
View File

@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../apps/frontend/deployment.yaml
- ../apps/frontend/service.yaml
- ../apps/frontend/ingress.yaml
- ../apps/backend/deployment.yaml
- ../apps/backend/service.yaml
- ../apps/backend/configmap.yaml

37
infra/.gitignore vendored Normal file
View File

@ -0,0 +1,37 @@
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Ignore transient lock info files created by terraform apply
.terraform.tfstate.lock.info
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc

Binary file not shown.

View File

@ -0,0 +1,159 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/gavinbunney/kubectl" {
version = "1.14.0"
constraints = "1.14.0"
hashes = [
"h1:gLFn+RvP37sVzp9qnFCwngRjjFV649r6apjxvJ1E/SE=",
"zh:0350f3122ff711984bbc36f6093c1fe19043173fad5a904bce27f86afe3cc858",
"zh:07ca36c7aa7533e8325b38232c77c04d6ef1081cb0bac9d56e8ccd51f12f2030",
"zh:0c351afd91d9e994a71fe64bbd1662d0024006b3493bb61d46c23ea3e42a7cf5",
"zh:39f1a0aa1d589a7e815b62b5aa11041040903b061672c4cfc7de38622866cbc4",
"zh:428d3a321043b78e23c91a8d641f2d08d6b97f74c195c654f04d2c455e017de5",
"zh:4baf5b1de2dfe9968cc0f57fd4be5a741deb5b34ee0989519267697af5f3eee5",
"zh:6131a927f9dffa014ab5ca5364ac965fe9b19830d2bbf916a5b2865b956fdfcf",
"zh:c62e0c9fd052cbf68c5c2612af4f6408c61c7e37b615dc347918d2442dd05e93",
"zh:f0beffd7ce78f49ead612e4b1aefb7cb6a461d040428f514f4f9cc4e5698ac65",
]
}
provider "registry.terraform.io/hashicorp/helm" {
version = "2.10.1"
constraints = "2.10.1"
hashes = [
"h1:ctDhNJU4tEcyoUgPzwKuJmbDIqUl25mCY+s/lVHP6Sg=",
"zh:0717312baed39fb0a00576297241b69b419880cad8771bf72dec97ebdc96b200",
"zh:0e0e287b4e8429a0700143c8159764502eba0b33b1d094bf0d4ef4d93c7802cb",
"zh:4f74605377dab4065aaad35a2c5fa6186558c6e2e57b9058bdc8a62cf91857b9",
"zh:505f4af4dedb7a4f8f45b4201900b8e16216bdc2a01cc84fe13cdbf937570e7e",
"zh:83f37fe692513c0ce307d487248765383e00f9a84ed95f993ce0d3efdf4204d3",
"zh:840e5a84e1b5744f0211f611a2c6890da58016a40aafd5971f12285164d4e29b",
"zh:8c03d8dee292fa0367b0511cf3e95b706e034f78025f5dff0388116e1798bf47",
"zh:937800d1860f6b3adbb20e65f11e5fcd940b21ce8bdb48198630426244691325",
"zh:c1853aa5cbbdd1d46f4b169e84c3482103f0e8575a9bb044dbde908e27348c5d",
"zh:c9b0f640590da20931c30818b0b0587aa517d5606cb6e8052e4e4bf38f97b54d",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fe8bd4dd09dc7ca218959eda1ced9115408c2cdc9b4a76964bfa455f3bcadfd3",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.4.0"
constraints = "2.4.0"
hashes = [
"h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=",
"zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9",
"zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf",
"zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35",
"zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04",
"zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406",
"zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6",
"zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7",
"zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2",
"zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc",
"zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.4"
constraints = "4.0.4"
hashes = [
"h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=",
"zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
"zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
"zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
"zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
"zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
"zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
"zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
"zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
"zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
"zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
"zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hetznercloud/hcloud" {
version = "1.33.2"
constraints = "1.33.2"
hashes = [
"h1:3Hx8p9LbcnHfBhy3nT7+unlc5rwkiSZjLt9SVQOSpB8=",
"zh:0a5d0f332d7dfe77fa27301094af98a185aabfb9f56d71b81936e03211e4d66f",
"zh:0e047859ee7296f335881933ccf8ce8c07aa47bef56d5449a81b85a2d9dac93a",
"zh:1d3d0896f518df9e245c3207ed231e528f5dcfe628508e7c3ceba4a2bfefaa7a",
"zh:1d7a31c8c490512896ce327ab220e950f1a2e30ee83cc2e58e69bbbfbbb87e72",
"zh:67cbb2492683cb22f6c54f26bee72aec140c8dd2d0881b2815d2ef80959fc751",
"zh:771062815e662979204ac2dc91c34c893f27670d67e02370e48124483d3c9838",
"zh:957ebb146898cd059c0cc8b4c32e574b61041d8b6a11cd854b3cc1d3baaeb3a9",
"zh:95dbd8634000b979213cb97b5d869cad78299ac994d0665d150c8dafc1390429",
"zh:a21b22b2e9d835e1b8b3b7e0b41a4d199171d62e9e9be78c444c700e96b31316",
"zh:aead1ba50640a51f20d574374f2c6065d9bfa4eea5ef044d1475873c33e58239",
"zh:cefabd0a78af40ea5cd08e1ca436c753df9b1c6496eb27281b755a2de1f167ab",
"zh:d98cffc5206b9a7550a23e13031a6f53566bd1ed3bf65314bc55ef12404d49ce",
"zh:dddaaf95b6aba701153659feff12c7bce6acc78362cb5ff8321a1a1cbf780cd9",
"zh:fd662b483250326a1bfbe5684c22c5083955a43e0773347eea35cd4c2cfe700e",
]
}
provider "registry.terraform.io/loafoe/ssh" {
version = "2.6.0"
constraints = "2.6.0"
hashes = [
"h1:80FIBNQBLj9p0j7EMQDWxzY6Fh0VODiflCww1/Wx6rk=",
"zh:1285448b69bd388a4a59ed170f9c07c641207a7291057860557937807d02da95",
"zh:2472c5dee4265cb555a627aa4ecc5702d32cd7aebe85722820df7499b7d4502a",
"zh:2a9b8f0ad446febb517a7fe38de9b02bc1bcceb90843a713c546770eff44aa84",
"zh:66e62d5bb280af7407315a62aee2ab35f8ce1b36f7400633f75f72111deede87",
"zh:832fc1213c3447fa831c1b2331cde71072c95a3f3eae04ff23dd09975d7c6577",
"zh:a4e19d6a6e776732cce70f350e8cf1954febf1e9281b4668f567636c7d0f75d8",
"zh:ac18abae233fe367f164c5a4492875a25e1c1de38a181876ffdc9f87c75abacf",
"zh:b44203b49cdef04f3e110923017a1e80c8b5588b91605e0c0985b3c2d839d6c0",
"zh:bf489e0f8ebc6f1d0d28cd6eadd871d6d63b952deaf10271765609fce417a5cf",
"zh:c41a209c6a4bf81309e573a53ad7b9e8d655bd7e81e40685214aeac92e682333",
"zh:ded134d1359bd39c2261ce1ed3bd468f8fac4fff09c07a213a3d281313d99d59",
]
}
provider "registry.terraform.io/rancher/rancher2" {
version = "3.0.0"
constraints = "3.0.0"
hashes = [
"h1:Qnc86BDThHGg+UqfK8Ssx7l+KcYg8wBDsMU3mCgUK6E=",
"zh:3f28e165f4e6dbfb3c6f57ea96571f907915cf9d3eaf0041054ec3c4e22cc14b",
"zh:4d71e727690d8691321c9591248599fdb38e09e27dace74da6dee16ec01351b0",
"zh:51dc86277205c7514cad0edd6e48a300a470a846a12927323b09fb1550891bcb",
"zh:5b240c5eefc5bcffcf851bd11dc913cff05a0fbf7539e966c7638894265a6297",
"zh:8f754482629b587083c1b9e0e0646a577a8defdf64d61ca12c853dd41ffbc1bb",
"zh:9a212e0dd166e2dc1ae3c13c99b07eb6f48e5ec4b6dcdca857d3f3d05b0fcabc",
"zh:a4e45342af8e9a8ab2be9a3ffd8a7df244519fade4901cc0b95328937e8b80ba",
"zh:af148901e447f97b844b5d5a81df5c7fce0432b3f0a42cb674196f0ff2ce1ded",
"zh:b11a97fc16b1fde2956906569bae890be59d444c192c560f00dca418b8184875",
"zh:b1588f6b704326ee6cf384c6d2542e4bd6f08b5324098cb6a7c126fb37112b28",
"zh:e63dd35d6f962e22561b3dd1b6fd8c23bb8154ca492a89e6b4693569974c971f",
"zh:f1eeae30b192f569f3e16061e28f1ce876a6f48eeab4c113e5f771809719090b",
]
}

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAujkDLDgyrzbdQRk96ucXOB9aBKZAxoEiFqlGIY3Z65kB4o0C
4Oc8l74S5CPmQsxNJwfRWpSmzVNn3XiHGLcSpLF9NeKMYrWZ/uvZ7r/dZmeOkp+I
073Dkz9jU09HMlLZMuB7BNzEZmpO+1BrjYj9s76DwF9sjJlUYG88xBqOVp/7mqiX
RD+XLBdz090qx/JGOHMSxJECRq8AiWrp5Jp/garjcw7fOKiS1tQ4uiswWj+AfU3b
b9l3HpgyAcaFbg5sHllj5iSwlsMlEGjU/AeEpUkWjQHrv3hcwMIAua5qhC8az30D
fJ2kcjyg7J4CDqB6Jaxk9SWATO4jlQzEQd3reQIDAQABAoIBAB6AJALkGXkVNVvq
NHf67ZSlnerRCB77G4g2pWPthdnOr75ZdjU76nK4EQQI1TfvmltgLmv1mPRcStC9
YvMrWQRchbODrZalQ9m663DtP+f74iAlvRVhKV0+rpgAjhqx8x0gI0YCmrBjjRgg
bd06qML92zpw5qiInIdG8N71soaBWaAHPurMxQitGEiKCMXziRyNDJdt6vvuW+ne
K2x6CYP+4bqXDDKOLk2s8w1iG+50TMf28USSOVTORWu/fBoWzK56Ifhy3qHRRklF
U74jyolGo2fTyGCxdsjrjmkluVy7FYiTAKiSyQ7qPeTeFswE7Bko7xF690wp9rUP
Ecf33tkCgYEAxRCOWZX+EJ9MIqEc19lU/sWR+xMgnQoS2+I2u2KIGjCkIOqPssGj
qcGqLglp4SzZRvOAOMfU6dGZHsw+OjY50Nj8Z5irgps68wionB7eIzCWaJ5+FtZl
OpAquwWYUZbitdOMkn4vDoIjbkb7Lg8abK5593RHKTH7nzXmLckHG0cCgYEA8epi
xLcWdRr7R4YrSAJm7LkvAVvOr7LGmVG5tTHCgBMmxb3ac1reKJLF871FsKkTuVE9
LHhUvHNO3rExrRs8wzZRkykPHoL/xiuIxZagRyX3fiBU/IlxNU7S6W/fqShWyVnM
QZdeL6GB4KyBTcVWckBdx9XRxuJxFtSjcYdAoz8CgYAnCGl5uyjwt+zHqyvgYS7h
q9aHabl7AYTMwtjectzMezf3QR6nZI17UV3Y8opjTJPY5uBN3t8AVUDEBeaJdzYq
dOBB0M6f9wlNHDjxe3e6dqREIqiM83hniii8lJYgCwfqxfVBCvddI0/LRwn3jIxK
XKDR79ksgrb5SR7HkNr0fwKBgQDB/8CtGFY+GyEfvoy+9o4rkGYNpN4fMDLriQpw
MDNWGgWh1hp1H43A49MzH7nfbfePSYLRQr5RUfcRxu+bbphPxCOu6+JgCZWsCgVo
zt5BsETFNJBy5JWQtmGVKk4mjV7rocOXZhf2AoLw9eXLxFEK1mteci4EXFtsf6GC
zYAcaQKBgHrx0e5NHuSUYw0AYBd6CR4zIOdVPVTUEocAhjVvglEp5ApC0SQYr2bD
BjA2lsfYhtRrjciSAvDHRnK2pTWlbNZEQAert1z20+Hv7BLq376vgTjJCOXQYxUY
v3uf4s04ZJkDT5msmxvGQMZfjsMPqAPNNYyajUy+RdSU5DBNPv6G
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6OQMsODKvNt1BGT3q5xc4H1oEpkDGgSIWqUYhjdnrmQHijQLg5zyXvhLkI+ZCzE0nB9FalKbNU2fdeIcYtxKksX014oxitZn+69nuv91mZ46Sn4jTvcOTP2NTT0cyUtky4HsE3MRmak77UGuNiP2zvoPAX2yMmVRgbzzEGo5Wn/uaqJdEP5csF3PT3SrH8kY4cxLEkQJGrwCJaunkmn+BquNzDt84qJLW1Di6KzBaP4B9Tdtv2XcemDIBxoVuDmweWWPmJLCWwyUQaNT8B4SlSRaNAeu/eFzAwgC5rmqELxrPfQN8naRyPKDsngIOoHolrGT1JYBM7iOVDMRB3et5

View File

@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTWpVNU56RXhNVGt3SGhjTk1qUXdPVEV3TVRJeU5URTVXaGNOTXpRd09UQTRNVEl5TlRFNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTWpVNU56RXhNVGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSeVY2RG5UUUkwUEEzNEdNU1FlYWNKZ0l0WWJjdThzZ2ZhODBoc3AxNEgKQ2Y5YWtsSkJVOFpZaGl4WnZTOS9tKzB3MXF2RkRnM2ZVSDd4QnQ2T3A0UXZvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXRUcEk2MXNMemkzVlp3eisxY05qClRBWTN2K0V3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUtQZjgybTA3LzBqek1sdHkrcVU5SVQ0TTA1UkNmbmIKNmlHdWYzTlRLYS9NQWlCcW5PK3ZGSUZ1QWFsTUlvNFl1eWUzb0NZbkhwd3Z3aGxJNUdTSmU2Y2F5UT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://168.119.61.8:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJRXhBYnJ5RWprUzR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOekkxT1RjeE1URTVNQjRYRFRJME1Ea3hNREV5TWpVeE9Wb1hEVEkxTURreApNREV5TWpVeE9Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJFc2MwWVgxSlRqSSswR2gKd0tkRnZjU1BMK1p3dDhCN1hlNktMUVIyRU5TMkpuS3pVSG56QnhaWXVPY2RlR0I2OUZ1L3FoTCtHL1QySnA3KwoyQmRDcjdtalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVDMxZi84OWsrY3BoOU9YTWVPNE5hLzdaeEhiREFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQTROZEtiLy84K2NSbzNmN3J4aFZOWjFIbG11cTVHUE5oUHl2cFl6YWE1aFVDSVFDME9SWng5UGdLTlB6NwpIZjdlcmxmdjJaVUw1c0tRQWhrdE9sOFJxSHpYTUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFM01qVTVOekV4TVRrd0hoY05NalF3T1RFd01USXlOVEU1V2hjTk16UXdPVEE0TVRJeU5URTUKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFM01qVTVOekV4TVRrd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBVDVkQ2NhOU05eUFQakJjZnJGUkRwK08rclEwL3VXbDdzUjN5ODZQWlV6CmZBaUllMm5Gc2NVOEpadzVFN2svQXg3d2Z4ZWZGdjJyaHJvUmt5V0pZaHJvbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVU5OVgvL1BaUG5LWWZUbHpIanVEVwp2KzJjUjJ3d0NnWUlLb1pJemowRUF3SURTQUF3UlFJaEFKSGlxa2lKQVFqT3RYK2tsQnhzbVU5WUZxb0Rud2xICkR0RnB3Rm1WM2ZVdkFpQURBbi84dVl3TkpjNU9uWE5SeDVUckVlc0JvcmRJem96dVZhRFV2a3VLM0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUZEbVZrTFR0cjgvaGlEM0E4bXJWRkRRdTYyMCtqSU9zVHNRNlVPM21uRDZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFU3h6UmhmVWxPTWo3UWFIQXAwVzl4STh2NW5DM3dIdGQ3b290QkhZUTFMWW1jck5RZWZNSApGbGk0NXgxNFlIcjBXNytxRXY0YjlQWW1udjdZRjBLdnVRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=

View File

@ -0,0 +1,7 @@
data "local_sensitive_file" "vault_keys" {
filename = "${path.module}/vault.secret"
}
locals {
vault_keys = jsondecode(data.local_sensitive_file.vault_keys.content)
}

View File

@ -0,0 +1,67 @@
# HCloud infrastructure resources
resource "tls_private_key" "global_key" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "local_sensitive_file" "ssh_private_key_pem" {
filename = "${path.module}/id_rsa"
content = tls_private_key.global_key.private_key_pem
file_permission = "0600"
}
resource "local_file" "ssh_public_key_openssh" {
filename = "${path.module}/id_rsa.pub"
content = tls_private_key.global_key.public_key_openssh
}
resource "hcloud_network" "private" {
name = "${var.prefix}-private-network"
ip_range = var.network_cidr
}
resource "hcloud_network_subnet" "private" {
type = "cloud"
network_id = hcloud_network.private.id
network_zone = var.network_zone
ip_range = var.network_ip_range
}
# Temporary key pair used for SSH access
resource "hcloud_ssh_key" "management_ssh_key" {
name = "${var.prefix}-instance-ssh-key"
public_key = tls_private_key.global_key.public_key_openssh
}
# HCloud Instance for creating a single node RKE cluster and installing the Rancher server
resource "hcloud_server" "management_server" {
name = "${var.prefix}-management-server"
image = "ubuntu-24.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.management_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
}

View File

@ -0,0 +1,43 @@
output "rancher_node_ip" {
value = hcloud_server.management_server.ipv4_address
}
output "rancher_server_uri" {
value = module.rancher.rancher_uri
}
output "rancher_server_admin_password" {
value = module.rancher.rancher_server_admin_password
sensitive = true
}
output "minio_api_uri" {
value = module.rancher.minio_api_uri
}
output "minio_admin_uri" {
value = module.rancher.minio_admin_uri
}
output "minio_root_user" {
value = module.rancher.minio_root_user
sensitive = true
}
output "minio_root_password" {
value = module.rancher.minio_root_password
sensitive = true
}
output "minio_region" {
value = var.aws_region
}
output "vault_uri" {
value = module.rancher.vault_uri
}
output "vault_root_token" {
value = local.vault_keys.root_token
sensitive = true
}

View File

@ -0,0 +1,21 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "1.33.2"
}
local = {
source = "hashicorp/local"
version = "2.4.0"
}
tls = {
source = "hashicorp/tls"
version = "4.0.4"
}
}
required_version = ">= 1.0.0"
}
provider "hcloud" {
token = var.hcloud_token
}

View File

@ -0,0 +1,23 @@
module "rancher" {
source = "../../../../modules/rancher"
node_public_ip = hcloud_server.management_server.ipv4_address
node_internal_ip = one(hcloud_server.management_server.network[*]).ip
node_username = local.node_username
ssh_private_key_pem = tls_private_key.global_key.private_key_pem
rancher_kubernetes_version = var.rancher_kubernetes_version
cert_manager_version = var.cert_manager_version
rancher_version = var.rancher_version
rancher_helm_repository = var.rancher_helm_repository
// rancher_server_dns = join(".", ["rancher", hcloud_server.management_server.ipv4_address, "sslip.io"])
admin_password = var.rancher_server_admin_password
aws_access_key_id = var.aws_access_key_id
aws_secret_access_key = var.aws_secret_access_key
aws_region = var.aws_region
aws_kms_key_id = var.aws_kms_key_id
server_dns = join(".", [hcloud_server.management_server.ipv4_address, "sslip.io"])
}

View File

@ -0,0 +1,97 @@
# Variables for Hetzner Cloud infrastructure module
variable "hcloud_token" {
type = string
description = "Hetzner Cloud API token used to create infrastructure"
}
variable "hcloud_location" {
type = string
description = "Hetzner location used for all resources"
default = "fsn1"
}
variable "prefix" {
type = string
description = "Prefix added to names of all resources"
default = "quickstart"
}
variable "network_cidr" {
type = string
description = "Network to create for private communication"
default = "10.0.0.0/8"
}
variable "network_ip_range" {
type = string
description = "Subnet to create for private communication. Must be part of the CIDR defined in `network_cidr`."
default = "10.0.1.0/24"
}
variable "network_zone" {
type = string
description = "Zone to create the network in"
default = "eu-central"
}
variable "instance_type" {
type = string
description = "Type of instance to be used for all instances"
default = "cx21"
}
# Variables for Rancher server module
variable "rancher_kubernetes_version" {
type = string
description = "Kubernetes version to use for Rancher server cluster"
default = "v1.24.14+k3s1"
}
variable "cert_manager_version" {
type = string
description = "Version of cert-manager to install alongside Rancher (format: 0.0.0)"
default = "1.11.0"
}
variable "rancher_version" {
type = string
description = "Rancher server version (format: v0.0.0)"
default = "2.7.9"
}
variable "rancher_helm_repository" {
type = string
description = "The helm repository, where the Rancher helm chart is installed from"
default = "https://releases.rancher.com/server-charts/latest"
}
variable "rancher_server_admin_password" {
type = string
description = "Admin password to use for Rancher server bootstrap, min. 12 characters"
}
# Local variables used to reduce repetition
locals {
node_username = "root"
}
variable "aws_access_key_id" {
description = "AWS Access Key ID for Vault KMS access"
type = string
}
variable "aws_secret_access_key" {
description = "AWS Secret Access Key for Vault KMS access"
type = string
}
variable "aws_kms_key_id" {
description = "AWS KMS Key ID for Vault KMS access"
type = string
}
variable "aws_region" {
description = "AWS KMS Region for Vault KMS access"
type = string
default = "eu-central-1"
}

View File

@ -0,0 +1,23 @@
{
"unseal_keys_b64": [],
"unseal_keys_hex": [],
"unseal_shares": 1,
"unseal_threshold": 1,
"recovery_keys_b64": [
"V2OBJfg6VFyd8My0fFnQVnX0ehUIejdkiS0wIk/yj08L",
"Ub3NvB8i9wylI7GUhlHu1YayvrBL71eEC7+SmkXT9AWw",
"rL3HQKU0AEenP4y2UBD+ZlUrIY3LJfnA+eFKXi8IIowN",
"MldF1Cqi2FvLs4Z7yYyAxlZgNptVcUNRv4zU3wkD1RpT",
"SlGl9m9bYfD/xZ0Ia1RrPEFbxDPmnMdDjpV0S26BrY8O"
],
"recovery_keys_hex": [
"57638125f83a545c9df0ccb47c59d05675f47a15087a3764892d30224ff28f4f0b",
"51bdcdbc1f22f70ca523b1948651eed586b2beb04bef57840bbf929a45d3f405b0",
"acbdc740a5340047a73f8cb65010fe66552b218dcb25f9c0f9e14a5e2f08228c0d",
"325745d42aa2d85bcbb3867bc98c80c65660369b55714351bf8cd4df0903d51a53",
"4a51a5f66f5b61f0ffc59d086b546b3c415bc433e69cc7438e95744b6e81ad8f0e"
],
"recovery_keys_shares": 5,
"recovery_keys_threshold": 3,
"root_token": "hvs.mDJwUQczuCUrK5wu9HhnKsP5"
}

View File

@ -0,0 +1,41 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/aminueza/minio" {
version = "2.5.0"
constraints = "~> 2.5.0"
hashes = [
"h1:RrjfsRy+fBVh7VF3r9u7uCCSjAdR5APa6sqbc9b8GfU=",
"zh:066cdb289dbfd1675e22fe58c8b42e2732f24fc1528b1919a78dfe28f80e8b30",
"zh:26d5e55106259e69493b95058178ec3d6b2395f03a8fe832af1be0e4d89ef42c",
"zh:6247e19de9ec6ef719cfcb174b8f08085c0fd5118b3b0de3fb9bb150702b4ad8",
"zh:70c3cbab0ba8edeec0db2e175bcdb47255c92f3153f839c4e8f2b0fe8c1366f4",
"zh:713793b4b93ae62070b18983ff525390de6c84547cab4220aa068437149f5035",
"zh:72de3e532d4bc7c7a4a872aaf00d7e4dfa09f3730668a738bb881d6734248f02",
"zh:9090f9288d7bc9f23043c1e65d8535e91f10413a16699d4a18add811b25fa167",
"zh:9847284aecb52718468feccb914d67e8befb8bff8345275cb03c3209b338f68b",
"zh:aa09ba1aa6fec278198ff352cc7f2977cfe567d31fd948c54fba5db82b4cd7ec",
"zh:ca28efbf60400918b9dadd18ecbf683065bf9329b35cbf3826718d8d50f10263",
"zh:cb21b119202ac6a30724beb89aefbb8660762b0e9b7165f1e22d59720dd0f110",
"zh:f36b4c9fe4795e892b3be2c80a22461f373541f81d335b51afa963097ab29624",
]
}
provider "registry.terraform.io/hashicorp/vault" {
version = "4.4.0"
hashes = [
"h1:+OO0KHYslvmN+mgRi+v3B6Yg7CYJUyaFh0GIW2hQcCY=",
"zh:12758c5afc4160355c55e808f3d0e960a69ef285ddd57f29c3a775ac63c76135",
"zh:190c4fbb620bbc07ff850119e17ffbca9f4d81968e69436024fcfb20c69d177e",
"zh:2668d3f37e41a539ddca8507a2f8100711cbe54fd7de6d9e82e191c456999674",
"zh:59cf5fe3a5cff561c9d15b1b0748fdaeee8966537a5121a20178a1dd265cc22c",
"zh:6bf7107b56132281b05932aa8fce8851cd2351d2f6c7d0de4475b5dabf755d4f",
"zh:77ee85a529e9ae519aa63950960bd2c2056dd622ad32b08731cb5237e28a9200",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:c3ff4d9c123cc23f95813800c4cea69d1fab29c65b96de4a5932fc328275f527",
"zh:c5a6dd8530f720757861da38c16da6577f30cc00423471f8bcc5682c1852027f",
"zh:c5acd773d7d24a6116866bf0e4449a3be6849cd6bd2f87141289d47983a0f777",
"zh:ede501e9979a586be279b63e0cf5ce69fa89780e37791fe87d8b4283e3716c96",
"zh:f5f6ae50a23a184d126832d688380e22311fa1b0192723507a790e57917c3e78",
]
}

View File

@ -0,0 +1,13 @@
locals {
minio_uri = data.terraform_remote_state.stage1.outputs.minio_api_uri
minio_server = replace(
regex("^https?://(?:www\\.)?(.+)/?$", local.minio_uri)[0],
"$1",
"$1:443"
)
minio_region = try(data.terraform_remote_state.stage1.outputs.minio_region, "eu-central-1")
minio_root_user = data.terraform_remote_state.stage1.outputs.minio_root_user
minio_root_password = data.terraform_remote_state.stage1.outputs.minio_root_password
vault_uri = data.terraform_remote_state.stage1.outputs.vault_uri
vault_root_token = data.terraform_remote_state.stage1.outputs.vault_root_token
}

View File

@ -0,0 +1,83 @@
resource "vault_mount" "management" {
path = "management"
type = "kv"
options = { version = "2" }
description = "KV Version 2 secret engine mount for management"
}
resource "minio_s3_bucket" "management" {
bucket = "management"
acl = "private"
}
# resource "minio_s3_bucket_server_side_encryption" "management" {
# bucket = minio_s3_bucket.management.bucket
# encryption_type = "aws:kms"
# kms_key_id = var.aws_kms_key_id
# }
resource "minio_iam_user" "management" {
name = "management"
}
resource "minio_iam_service_account" "management_service_account" {
target_user = minio_iam_user.management.name
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:ListBucket"]
Resource = ["arn:aws:s3:::management"]
},
{
Effect = "Allow"
Action = ["s3:GetObject", "s3:PutObject"]
Resource = ["arn:aws:s3:::management/*"]
}
]
})
}
resource "vault_kv_secret_v2" "minio_creds" {
mount = "management"
name = "minio"
delete_all_versions = true
data_json = jsonencode({
access_key = minio_iam_service_account.management_service_account.access_key
secret_key = minio_iam_service_account.management_service_account.secret_key
})
depends_on = [
vault_mount.management,
minio_iam_service_account.management_service_account
]
}
resource "vault_policy" "management" {
name = "management"
policy = <<EOT
path "management/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
path "auth/token/create" {
capabilities = ["create", "update", "sudo"]
}
path "auth/token/lookup-self" {
capabilities = ["read"]
}
path "auth/token/renew-self" {
capabilities = ["update"]
}
# Add other necessary permissions
EOT
}
resource "vault_token" "management" {
policies = [vault_policy.management.name]
renewable = true
ttl = "720h" # 30 days
period = "720h" # Will be renewed every 30 days
}

View File

@ -0,0 +1,8 @@
output "vault_uri" {
value = local.vault_uri
}
output "vault_token" {
value = vault_token.management.client_token
sensitive = true
}

View File

@ -0,0 +1,21 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
}
}
provider "minio" {
minio_server = local.minio_server
minio_region = local.minio_region
minio_user = local.minio_root_user
minio_password = local.minio_root_password
minio_ssl = true
}
provider "vault" {
address = local.vault_uri
token = local.vault_root_token
}

View File

@ -0,0 +1,7 @@
data "terraform_remote_state" "stage1" {
backend = "local"
config = {
path = "../stage1-create/terraform.tfstate"
}
}

View File

@ -0,0 +1,61 @@
# Rancher resources
module "rancher_common" {
source = "../rancher-common"
node_public_ip = hcloud_server.rancher_server.ipv4_address
node_internal_ip = one(hcloud_server.rancher_server.network[*]).ip
node_username = local.node_username
ssh_private_key_pem = tls_private_key.global_key.private_key_pem
rancher_kubernetes_version = var.rancher_kubernetes_version
cert_manager_version = var.cert_manager_version
rancher_version = var.rancher_version
rancher_helm_repository = var.rancher_helm_repository
rancher_server_dns = join(".", ["rancher", hcloud_server.rancher_server.ipv4_address, "sslip.io"])
admin_password = var.rancher_server_admin_password
workload_kubernetes_version = var.workload_kubernetes_version
workload_cluster_name = "quickstart-hcloud-custom"
}
# HCloud instance for creating a single node workload cluster
resource "hcloud_server" "quickstart_node" {
name = "${var.prefix}-worker"
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
user_data = templatefile(
"${path.module}/files/userdata_quickstart_node.template",
{
username = local.node_username
register_command = module.rancher_common.custom_cluster_command
}
)
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
}

View File

@ -0,0 +1,5 @@
provider "helm" {
kubernetes {
config_path = "./bootstrap/stage1-initial-infra/kube_config_server.yaml"
}
}

View File

@ -0,0 +1,17 @@
terraform {
backend "s3" {
endpoints = {
s3 = "https://storage.168.119.61.8.sslip.io"
}
bucket = "management"
key = "terraform.tfstate"
region = "eu-central-1"
#encrypt = false
skip_region_validation = true
skip_metadata_api_check = true
skip_credentials_validation = true
skip_requesting_account_id = true
use_path_style = true
}
}

View File

@ -0,0 +1,34 @@
resource "random_password" "rancher_admin_password" {
length = 20
special = false
}
resource "vault_kv_secret_v2" "rancher_creds" {
mount = "management"
name = "rancher"
delete_all_versions = true
data_json = jsonencode({
admin_password = random_password.rancher_admin_password.result
})
}
resource "helm_release" "rancher" {
name = "rancher"
namespace = "cattle-system"
chart = "https://releases.rancher.com/server-charts/latest/rancher-2.9.1.tgz"
reuse_values = true
recreate_pods = false
set_sensitive {
name = "adminPassword"
value = vault_kv_secret_v2.rancher_creds.data["admin_password"]
}
lifecycle {
ignore_changes = [
set,
set_sensitive,
]
}
}

View File

@ -0,0 +1,11 @@
#!/bin/bash
MINIO_ACCESS_KEY=$(vault kv get -mount="management" -field="access_key" "minio")
MINIO_SECRET_KEY=$(vault kv get -mount="management" -field="secret_key" "minio")
cat << EOF > backend.tfvars
access_key = "${MINIO_ACCESS_KEY}"
secret_key = "${MINIO_SECRET_KEY}"
bucket = "${BUCKET}"
key = "terraform.tfstate"
region = "eu-central-1"
EOF

View File

@ -0,0 +1,13 @@
#!/bin/bash
# Generate new secrets
NEW_MINIO_ACCESS_KEY=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
NEW_MINIO_SECRET_KEY=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1)
NEW_RANCHER_PASSWORD=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
# Update secrets in Vault
vault kv put kvv2/minio access_key="$NEW_MINIO_ACCESS_KEY" secret_key="$NEW_MINIO_SECRET_KEY"
vault kv put kvv2/rancher admin_password="$NEW_RANCHER_PASSWORD"
# Apply Terraform configuration
terraform apply -var="minio_access_key=$NEW_MINIO_ACCESS_KEY" -var="minio_secret_key=$NEW_MINIO_SECRET_KEY" -var="rancher_admin_password=$NEW_RANCHER_PASSWORD"

View File

@ -0,0 +1,7 @@
data "terraform_remote_state" "stage2" {
backend = "local"
config = {
path = "./bootstrap/stage2-harden/terraform.tfstate"
}
}

View File

@ -0,0 +1,4 @@
provider "vault" {
address = "http://127.0.0.1:8200"
token = data.terraform_remote_state.stage4.outputs.vault_token
}

View File

@ -0,0 +1,47 @@
terraform {
required_providers {
minio = {
source = "aminueza/minio"
version = "~> 2.5.0"
}
}
}
provider "minio" {
minio_server = "localhost:9123"
minio_region = "eu-central-1"
minio_user = data.vault_kv_secret_v2.minio_creds.data["access_key"]
minio_password = data.vault_kv_secret_v2.minio_creds.data["secret_key"]
}
resource "minio_s3_bucket" "platform" {
depends_on = [data.vault_kv_secret_v2.minio_creds]
bucket = "platform"
acl = "private"
}
resource "minio_iam_user" "platform" {
depends_on = [data.vault_kv_secret_v2.minio_creds]
name = "platform"
}
resource "minio_iam_policy" "platform" {
depends_on = [minio_s3_bucket.platform]
name = "platform-policy"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["s3:GetObject", "s3:PutObject"]
Resource = ["arn:aws:s3:::platform/*"]
}
]
})
}
resource "minio_iam_user_policy_attachment" "policy_attachment" {
depends_on = [minio_iam_user.platform, minio_iam_policy.platform]
user_name = minio_iam_user.platform.name
policy_name = minio_iam_policy.platform.name
}

View File

@ -0,0 +1,17 @@
output "vault_token" {
value = vault_token.platform.client_token
sensitive = true
}
output "minio_user" {
value = minio_iam_user.platform.id
}
output "minio_user_status" {
value = minio_iam_user.platform.status
}
output "minio_user_secret" {
value = minio_iam_user.platform.secret
sensitive = true
}

View File

@ -0,0 +1,4 @@
variable "vault_root_token" {
description = "Vault (root) token to create secrets"
type = string
}

View File

@ -0,0 +1,34 @@
provider "vault" {
address = "http://127.0.0.1:8200"
token = var.vault_root_token
}
data "vault_kv_secret_v2" "minio_creds" {
mount = "management"
name = "minio"
}
resource "vault_mount" "platform" {
path = "platform"
type = "kv"
options = { version = "2" }
description = "KV Version 2 secret engine mount for management"
}
resource "vault_policy" "platform" {
name = "platform"
policy = <<EOT
path "platform/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Add other necessary permissions
EOT
}
resource "vault_token" "platform" {
policies = [vault_policy.platform.name]
renewable = true
ttl = "720h" # 30 days
period = "720h" # Will be renewed every 30 days
}

View File

@ -0,0 +1,40 @@
# HCloud instance for creating a single node workload cluster
resource "hcloud_server" "quickstart_node" {
name = "${var.prefix}-worker"
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
user_data = templatefile(
"${path.module}/files/userdata_quickstart_node.template",
{
username = local.node_username
register_command = module.rancher_common.custom_cluster_command
}
)
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
}

View File

@ -0,0 +1,8 @@
# Create custom managed cluster for quickstart
resource "rancher2_cluster_v2" "quickstart_workload" {
provider = rancher2.admin
name = var.workload_cluster_name
kubernetes_version = var.workload_kubernetes_version
}

15
infra/modules/MODULES.md Normal file
View File

@ -0,0 +1,15 @@
- consul: service discovery
- crdb: cockroachdb, distributed postgresql database
- jaeger: distributed tracing
- harbor: registry
- keda: k8s scaling using events
- metabase: easy dashboarding
- minio: s3 compatible object storage
- novu: webhooks
- rabbitmq: message broker
- redis: in-memory key-value store
- stack-auth: auth0 alternative
- traefik: ingress
- umami: analytics
- vault: secrets management
- victoriametrics: prometheus compatible TSDB

15
infra/modules/argocd.tf Normal file
View File

@ -0,0 +1,15 @@
resource "helm_release" "argocd" {
name = "argocd"
repository = "https://argoproj.github.io/argo-helm"
chart = "argo-cd"
namespace = "argocd"
create_namespace = true
set {
name = "server.service.type"
value = "LoadBalancer"
}
# Add other configuration options as needed
}

18
infra/modules/dns.tf Normal file
View File

@ -0,0 +1,18 @@
resource "http" "create_dns_record" {
url = "https://dns.hetzner.com/api/v1/records"
method = "POST"
request_headers = {
"Auth-API-Token" = var.hetzner_dns_token
"Content-Type" = "application/json"
}
request_body = jsonencode({
zone_id = var.hetzner_zone_id
type = "A"
name = "rancher"
value = hcloud_server.master.ipv4_address
ttl = 3600
})
}

View File

@ -0,0 +1,19 @@
resource "harbor_project" "example_project" {
name = "example-project"
public = false
vulnerability_scanning = true
}
resource "harbor_robot_account" "example_robot" {
name = "example-robot"
description = "Robot account for example project"
level = "project"
permissions {
access {
action = "push"
resource = "repository"
}
kind = "project"
namespace = harbor_project.example_project.name
}
}

View File

@ -0,0 +1,19 @@
terraform {
required_providers {
harbor = {
source = "goharbor/harbor"
version = "~> 3.0" # Use the latest version available
}
}
}
provider "harbor" {
url = "https://your-harbor-instance-url"
username = var.harbor_username
password = var.harbor_password
# Alternatively, you can use a bearer token:
# bearer_token = var.harbor_token
# If you're using a self-signed certificate, you might need:
# insecure = true
}

View File

@ -0,0 +1,17 @@
variable "harbor_username" {
type = string
description = "Username for Harbor"
}
variable "harbor_password" {
type = string
description = "Password for Harbor"
sensitive = true
}
# If using bearer token instead of username/password:
# variable "harbor_token" {
# type = string
# description = "Bearer token for Harbor"
# sensitive = true
# }

View File

@ -0,0 +1,68 @@
# Hetzner Cloud Rancher Quickstart
Two single-node Kubernetes clusters will be created from two instances running Ubuntu 20.04 and Docker.
Both instances will be accessible over SSH using the auto-generated SSH keys `id_rsa` and `id_rsa.pub`.
<!-- BEGIN_TF_DOCS -->
## Requirements
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0.0 |
| <a name="requirement_hcloud"></a> [hcloud](#requirement\_hcloud) | 1.33.2 |
| <a name="requirement_local"></a> [local](#requirement\_local) | 2.4.0 |
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | 4.0.4 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_hcloud"></a> [hcloud](#provider\_hcloud) | 1.33.2 |
| <a name="provider_local"></a> [local](#provider\_local) | 2.4.0 |
| <a name="provider_tls"></a> [tls](#provider\_tls) | 4.0.4 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| <a name="module_rancher_common"></a> [rancher\_common](#module\_rancher\_common) | ../rancher-common | n/a |
## Resources
| Name | Type |
|------|------|
| [hcloud_network.private](https://registry.terraform.io/providers/hetznercloud/hcloud/1.33.2/docs/resources/network) | resource |
| [hcloud_network_subnet.private](https://registry.terraform.io/providers/hetznercloud/hcloud/1.33.2/docs/resources/network_subnet) | resource |
| [hcloud_server.quickstart_node](https://registry.terraform.io/providers/hetznercloud/hcloud/1.33.2/docs/resources/server) | resource |
| [hcloud_server.rancher_server](https://registry.terraform.io/providers/hetznercloud/hcloud/1.33.2/docs/resources/server) | resource |
| [hcloud_ssh_key.quickstart_ssh_key](https://registry.terraform.io/providers/hetznercloud/hcloud/1.33.2/docs/resources/ssh_key) | resource |
| [local_file.ssh_public_key_openssh](https://registry.terraform.io/providers/hashicorp/local/2.4.0/docs/resources/file) | resource |
| [local_sensitive_file.ssh_private_key_pem](https://registry.terraform.io/providers/hashicorp/local/2.4.0/docs/resources/sensitive_file) | resource |
| [tls_private_key.global_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_hcloud_token"></a> [hcloud\_token](#input\_hcloud\_token) | Hetzner Cloud API token used to create infrastructure | `string` | n/a | yes |
| <a name="input_rancher_server_admin_password"></a> [rancher\_server\_admin\_password](#input\_rancher\_server\_admin\_password) | Admin password to use for Rancher server bootstrap, min. 12 characters | `string` | n/a | yes |
| <a name="input_cert_manager_version"></a> [cert\_manager\_version](#input\_cert\_manager\_version) | Version of cert-manager to install alongside Rancher (format: 0.0.0) | `string` | `"1.11.0"` | no |
| <a name="input_hcloud_location"></a> [hcloud\_location](#input\_hcloud\_location) | Hetzner location used for all resources | `string` | `"fsn1"` | no |
| <a name="input_instance_type"></a> [instance\_type](#input\_instance\_type) | Type of instance to be used for all instances | `string` | `"cx21"` | no |
| <a name="input_network_cidr"></a> [network\_cidr](#input\_network\_cidr) | Network to create for private communication | `string` | `"10.0.0.0/8"` | no |
| <a name="input_network_ip_range"></a> [network\_ip\_range](#input\_network\_ip\_range) | Subnet to create for private communication. Must be part of the CIDR defined in `network_cidr`. | `string` | `"10.0.1.0/24"` | no |
| <a name="input_network_zone"></a> [network\_zone](#input\_network\_zone) | Zone to create the network in | `string` | `"eu-central"` | no |
| <a name="input_prefix"></a> [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `"quickstart"` | no |
| <a name="input_rancher_helm_repository"></a> [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | The helm repository, where the Rancher helm chart is installed from | `string` | `"https://releases.rancher.com/server-charts/latest"` | no |
| <a name="input_rancher_kubernetes_version"></a> [rancher\_kubernetes\_version](#input\_rancher\_kubernetes\_version) | Kubernetes version to use for Rancher server cluster | `string` | `"v1.24.14+k3s1"` | no |
| <a name="input_rancher_version"></a> [rancher\_version](#input\_rancher\_version) | Rancher server version (format: v0.0.0) | `string` | `"2.7.9"` | no |
| <a name="input_workload_kubernetes_version"></a> [workload\_kubernetes\_version](#input\_workload\_kubernetes\_version) | Kubernetes version to use for managed workload cluster | `string` | `"v1.24.14+rke2r1"` | no |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_rancher_node_ip"></a> [rancher\_node\_ip](#output\_rancher\_node\_ip) | n/a |
| <a name="output_rancher_server_url"></a> [rancher\_server\_url](#output\_rancher\_server\_url) | n/a |
| <a name="output_workload_node_ip"></a> [workload\_node\_ip](#output\_workload\_node\_ip) | n/a |
<!-- END_TF_DOCS -->

View File

@ -0,0 +1,3 @@
#!/bin/bash -x
${register_command} --etcd --controlplane --worker

View File

@ -0,0 +1,128 @@
# HCloud infrastructure resources
resource "tls_private_key" "global_key" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "local_sensitive_file" "ssh_private_key_pem" {
filename = "${path.module}/id_rsa"
content = tls_private_key.global_key.private_key_pem
file_permission = "0600"
}
resource "local_file" "ssh_public_key_openssh" {
filename = "${path.module}/id_rsa.pub"
content = tls_private_key.global_key.public_key_openssh
}
resource "hcloud_network" "private" {
name = "${var.prefix}-private-network"
ip_range = var.network_cidr
}
resource "hcloud_network_subnet" "private" {
type = "cloud"
network_id = hcloud_network.private.id
network_zone = var.network_zone
ip_range = var.network_ip_range
}
# Temporary key pair used for SSH accesss
resource "hcloud_ssh_key" "quickstart_ssh_key" {
name = "${var.prefix}-instance-ssh-key"
public_key = tls_private_key.global_key.public_key_openssh
}
# HCloud Instance for creating a single node RKE cluster and installing the Rancher server
resource "hcloud_server" "rancher_server" {
name = "${var.prefix}-rancher-server"
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
}
# Rancher resources
module "rancher_common" {
source = "../rancher-common"
node_public_ip = hcloud_server.rancher_server.ipv4_address
node_internal_ip = one(hcloud_server.rancher_server.network[*]).ip
node_username = local.node_username
ssh_private_key_pem = tls_private_key.global_key.private_key_pem
rancher_kubernetes_version = var.rancher_kubernetes_version
cert_manager_version = var.cert_manager_version
rancher_version = var.rancher_version
rancher_helm_repository = var.rancher_helm_repository
rancher_server_dns = join(".", ["rancher", hcloud_server.rancher_server.ipv4_address, "sslip.io"])
admin_password = var.rancher_server_admin_password
workload_kubernetes_version = var.workload_kubernetes_version
workload_cluster_name = "quickstart-hcloud-custom"
}
# HCloud instance for creating a single node workload cluster
resource "hcloud_server" "quickstart_node" {
name = "${var.prefix}-worker"
image = "ubuntu-20.04"
server_type = var.instance_type
location = var.hcloud_location
ssh_keys = [hcloud_ssh_key.quickstart_ssh_key.id]
network {
network_id = hcloud_network.private.id
}
user_data = templatefile(
"${path.module}/files/userdata_quickstart_node.template",
{
username = local.node_username
register_command = module.rancher_common.custom_cluster_command
}
)
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.ipv4_address
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
depends_on = [
hcloud_network_subnet.private
]
}

View File

@ -0,0 +1,12 @@
output "rancher_server_url" {
value = module.rancher_common.rancher_url
}
output "rancher_node_ip" {
value = hcloud_server.rancher_server.ipv4_address
}
output "workload_node_ip" {
value = hcloud_server.quickstart_node.ipv4_address
}

View File

@ -0,0 +1,21 @@
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "1.33.2"
}
local = {
source = "hashicorp/local"
version = "2.4.0"
}
tls = {
source = "hashicorp/tls"
version = "4.0.4"
}
}
required_version = ">= 1.0.0"
}
provider "hcloud" {
token = var.hcloud_token
}

View File

@ -0,0 +1,39 @@
# Hetzner Cloud API token used to create infrastructure
hcloud_token = ""
# Admin password to use for Rancher server bootstrap, min. 12 characters
rancher_server_admin_password = ""
# Version of cert-manager to install alongside Rancher (format: 0.0.0)
cert_manager_version = "1.11.0"
# Hetzner location used for all resources
hcloud_location = "fsn1"
# Type of instance to be used for all instances
instance_type = "cx21"
# Network to create for private communication
network_cidr = "10.0.0.0/8"
# Subnet to create for private communication. Must be part of the CIDR defined in `network_cidr`.
network_ip_range = "10.0.1.0/24"
# Zone to create the network in
network_zone = "eu-central"
# Prefix added to names of all resources
prefix = "quickstart"
# The helm repository, where the Rancher helm chart is installed from
rancher_helm_repository = "https://releases.rancher.com/server-charts/latest"
# Kubernetes version to use for Rancher server cluster
rancher_kubernetes_version = "v1.24.14+k3s1"
# Rancher server version (format: v0.0.0)
rancher_version = "2.7.9"
# Kubernetes version to use for managed workload cluster
workload_kubernetes_version = "v1.24.14+rke2r1"

View File

@ -0,0 +1,82 @@
# Variables for Hetzner Cloud infrastructure module
variable "hcloud_token" {
type = string
description = "Hetzner Cloud API token used to create infrastructure"
}
variable "hcloud_location" {
type = string
description = "Hetzner location used for all resources"
default = "fsn1"
}
variable "prefix" {
type = string
description = "Prefix added to names of all resources"
default = "quickstart"
}
variable "network_cidr" {
type = string
description = "Network to create for private communication"
default = "10.0.0.0/8"
}
variable "network_ip_range" {
type = string
description = "Subnet to create for private communication. Must be part of the CIDR defined in `network_cidr`."
default = "10.0.1.0/24"
}
variable "network_zone" {
type = string
description = "Zone to create the network in"
default = "eu-central"
}
variable "instance_type" {
type = string
description = "Type of instance to be used for all instances"
default = "cx21"
}
variable "rancher_kubernetes_version" {
type = string
description = "Kubernetes version to use for Rancher server cluster"
default = "v1.24.14+k3s1"
}
variable "workload_kubernetes_version" {
type = string
description = "Kubernetes version to use for managed workload cluster"
default = "v1.24.14+rke2r1"
}
variable "cert_manager_version" {
type = string
description = "Version of cert-manager to install alongside Rancher (format: 0.0.0)"
default = "1.11.0"
}
variable "rancher_version" {
type = string
description = "Rancher server version (format: v0.0.0)"
default = "2.7.9"
}
variable "rancher_helm_repository" {
type = string
description = "The helm repository, where the Rancher helm chart is installed from"
default = "https://releases.rancher.com/server-charts/latest"
}
variable "rancher_server_admin_password" {
type = string
description = "Admin password to use for Rancher server bootstrap, min. 12 characters"
}
# Local variables used to reduce repetition
locals {
node_username = "root"
}

View File

@ -0,0 +1,4 @@
locals {
hostname_api = var.hostname
hostname_admin = join(".", ["admin", var.hostname])
}

View File

@ -0,0 +1,52 @@
resource "random_password" "minio_access_key" {
length = 20
special = false
}
resource "random_password" "minio_secret_key" {
length = 40
special = true
}
resource "helm_release" "minio" {
name = "minio"
repository = "https://charts.bitnami.com/bitnami"
chart = "minio"
namespace = "minio"
create_namespace = true
set_sensitive {
name = "auth.rootUser"
value = random_password.minio_access_key.result
}
set_sensitive {
name = "auth.rootPassword"
value = random_password.minio_secret_key.result
}
values = [
<<-EOT
ingress:
enabled: false
ingressClassName: traefik
tls: true
hostname: ${local.hostname_admin}
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
apiIngress:
enabled: true
ingressClassName: traefik
tls: true
hostname: ${local.hostname_api}
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
EOT
]
}

View File

@ -0,0 +1,17 @@
output "minio_api_uri" {
value = "https://${local.hostname_api}"
}
output "minio_admin_uri" {
value = "https://${local.hostname_admin}"
}
output "minio_root_user" {
value = random_password.minio_access_key.result
sensitive = true
}
output "minio_root_password" {
value = random_password.minio_secret_key.result
sensitive = true
}

View File

@ -0,0 +1,5 @@
provider "helm" {
kubernetes {
config_path = var.config_path
}
}

View File

@ -0,0 +1,9 @@
variable "config_path" {
description = "Path to the kubeconfig file"
type = string
}
variable "hostname" {
description = "Hostname for the Minio server"
type = string
}

View File

@ -0,0 +1,70 @@
# Rancher Common Terraform Module
The `rancher-common` module contains all resources that do not depend on a
specific cloud provider. RKE, Kubernetes, Helm, and Rancher providers are used
given the necessary information about the infrastructure created in a cloud
provider.
<!-- BEGIN_TF_DOCS -->
## Requirements
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0.0 |
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | 2.10.1 |
| <a name="requirement_local"></a> [local](#requirement\_local) | 2.4.0 |
| <a name="requirement_rancher2"></a> [rancher2](#requirement\_rancher2) | 3.0.0 |
| <a name="requirement_ssh"></a> [ssh](#requirement\_ssh) | 2.6.0 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_helm"></a> [helm](#provider\_helm) | 2.10.1 |
| <a name="provider_local"></a> [local](#provider\_local) | 2.4.0 |
| <a name="provider_rancher2.admin"></a> [rancher2.admin](#provider\_rancher2.admin) | 3.0.0 |
| <a name="provider_rancher2.bootstrap"></a> [rancher2.bootstrap](#provider\_rancher2.bootstrap) | 3.0.0 |
| <a name="provider_ssh"></a> [ssh](#provider\_ssh) | 2.6.0 |
## Modules
No modules.
## Resources
| Name | Type |
|------|------|
| [helm_release.cert_manager](https://registry.terraform.io/providers/hashicorp/helm/2.10.1/docs/resources/release) | resource |
| [helm_release.rancher_server](https://registry.terraform.io/providers/hashicorp/helm/2.10.1/docs/resources/release) | resource |
| [local_file.kube_config_server_yaml](https://registry.terraform.io/providers/hashicorp/local/2.4.0/docs/resources/file) | resource |
| [local_file.kube_config_workload_yaml](https://registry.terraform.io/providers/hashicorp/local/2.4.0/docs/resources/file) | resource |
| [rancher2_bootstrap.admin](https://registry.terraform.io/providers/rancher/rancher2/3.0.0/docs/resources/bootstrap) | resource |
| [rancher2_cluster_v2.quickstart_workload](https://registry.terraform.io/providers/rancher/rancher2/3.0.0/docs/resources/cluster_v2) | resource |
| [ssh_resource.install_k3s](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource |
| [ssh_resource.retrieve_config](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_admin_password"></a> [admin\_password](#input\_admin\_password) | Admin password to use for Rancher server bootstrap, min. 12 characters | `string` | n/a | yes |
| <a name="input_node_public_ip"></a> [node\_public\_ip](#input\_node\_public\_ip) | Public IP of compute node for Rancher cluster | `string` | n/a | yes |
| <a name="input_node_username"></a> [node\_username](#input\_node\_username) | Username used for SSH access to the Rancher server cluster node | `string` | n/a | yes |
| <a name="input_rancher_server_dns"></a> [rancher\_server\_dns](#input\_rancher\_server\_dns) | DNS host name of the Rancher server | `string` | n/a | yes |
| <a name="input_ssh_private_key_pem"></a> [ssh\_private\_key\_pem](#input\_ssh\_private\_key\_pem) | Private key used for SSH access to the Rancher server cluster node | `string` | n/a | yes |
| <a name="input_workload_cluster_name"></a> [workload\_cluster\_name](#input\_workload\_cluster\_name) | Name for created custom workload cluster | `string` | n/a | yes |
| <a name="input_cert_manager_version"></a> [cert\_manager\_version](#input\_cert\_manager\_version) | Version of cert-manager to install alongside Rancher (format: 0.0.0) | `string` | `"1.11.0"` | no |
| <a name="input_node_internal_ip"></a> [node\_internal\_ip](#input\_node\_internal\_ip) | Internal IP of compute node for Rancher cluster | `string` | `""` | no |
| <a name="input_rancher_helm_repository"></a> [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | The helm repository, where the Rancher helm chart is installed from | `string` | `"https://releases.rancher.com/server-charts/latest"` | no |
| <a name="input_rancher_kubernetes_version"></a> [rancher\_kubernetes\_version](#input\_rancher\_kubernetes\_version) | Kubernetes version to use for Rancher server cluster | `string` | `"v1.24.14+k3s1"` | no |
| <a name="input_rancher_version"></a> [rancher\_version](#input\_rancher\_version) | Rancher server version (format v0.0.0) | `string` | `"2.7.9"` | no |
| <a name="input_workload_kubernetes_version"></a> [workload\_kubernetes\_version](#input\_workload\_kubernetes\_version) | Kubernetes version to use for managed workload cluster | `string` | `"v1.24.14+rke2r1"` | no |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_custom_cluster_command"></a> [custom\_cluster\_command](#output\_custom\_cluster\_command) | Docker command used to add a node to the quickstart cluster |
| <a name="output_custom_cluster_windows_command"></a> [custom\_cluster\_windows\_command](#output\_custom\_cluster\_windows\_command) | Docker command used to add a windows node to the quickstart cluster |
| <a name="output_rancher_url"></a> [rancher\_url](#output\_rancher\_url) | n/a |
<!-- END_TF_DOCS -->

View File

@ -0,0 +1,14 @@
# Data for rancher common module
# Kubernetes data
# ----------------------------------------------------------
# # Rancher certificates
# data "kubernetes_secret" "rancher_cert" {
# depends_on = [helm_release.rancher_server]
# metadata {
# name = "tls-rancher-ingress"
# namespace = "cattle-system"
# }
# }

View File

@ -0,0 +1,51 @@
# Helm resources
# Install cert-manager helm chart
resource "helm_release" "cert_manager" {
name = "cert-manager"
chart = "https://charts.jetstack.io/charts/cert-manager-v${var.cert_manager_version}.tgz"
namespace = "cert-manager"
create_namespace = true
wait = true
set {
name = "installCRDs"
value = "true"
}
set_list {
name = "dnsConfig.nameservers"
value = ["1.1.1.1", "8.8.8.8"]
}
set {
name = "email"
value = var.cert_manager_email
}
}
# Install Rancher helm chart
resource "helm_release" "rancher_server" {
depends_on = [
helm_release.cert_manager,
]
name = "rancher"
chart = "${var.rancher_helm_repository}/rancher-${var.rancher_version}.tgz"
namespace = "cattle-system"
create_namespace = true
wait = true
values = [ file("${path.module}/rancher-values.yaml") ]
set {
name = "hostname"
value = join(".", ["rancher", var.server_dns])
}
set {
name = "bootstrapPassword"
value = "admin" # TODO: change this once the terraform provider has been updated with the new pw bootstrap logic
}
}

View File

@ -0,0 +1,23 @@
# K3s cluster for Rancher
resource "ssh_resource" "install_k3s" {
host = var.node_public_ip
commands = [
"bash -c 'curl https://get.k3s.io | INSTALL_K3S_EXEC=\"server --disable=traefik --node-external-ip ${var.node_public_ip} --node-ip ${var.node_internal_ip}\" INSTALL_K3S_VERSION=${var.rancher_kubernetes_version} sh -'"
]
user = var.node_username
private_key = var.ssh_private_key_pem
}
resource "ssh_resource" "retrieve_config" {
depends_on = [
ssh_resource.install_k3s
]
host = var.node_public_ip
commands = [
"sudo sed \"s/127.0.0.1/${var.node_public_ip}/g\" /etc/rancher/k3s/k3s.yaml"
]
user = var.node_username
private_key = var.ssh_private_key_pem
}

View File

@ -0,0 +1,39 @@
resource "kubectl_manifest" "clusterissuer-letsencrypt" {
depends_on = [
ssh_resource.retrieve_config,
helm_release.cert_manager,
module.traefik,
]
yaml_body = <<YAML
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: engineering@fourlights.nl
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
ingressClassName: traefik
YAML
}
# resource "kubernetes_manifest" "traefik-config" {
# depends_on = [local_file.kube_config_server_yaml]
# manifest = {
# apiVersion = "helm.cattle.io/v1"
# kind = "HelmChartConfig"
# metadata = {
# name = "traefik"
# namespace = "kube-system"
# }
# spec = {
# valuesContent = file("${path.module}/traefik-config.yaml")
# }
# }
# }

View File

@ -0,0 +1,18 @@
# Local resources
# Save kubeconfig file for interacting with the RKE cluster on your local machine
resource "local_file" "kube_config_server_yaml" {
filename = format("%s/%s", path.root, "kube_config_server.yaml")
content = ssh_resource.retrieve_config.result
}
locals {
rancher_server_dns = join(".", ["rancher", var.server_dns])
minio_server_dns = join(".", ["storage", var.server_dns])
vault_server_dns = join(".", ["vault", var.server_dns])
}
# resource "local_file" "kube_config_workload_yaml" {
# filename = format("%s/%s", path.root, "kube_config_workload.yaml")
# content = rancher2_cluster_v2.quickstart_workload.kube_config
# }

View File

@ -0,0 +1,7 @@
module "minio" {
source = "../../modules/minio"
config_path = local_file.kube_config_server_yaml.filename
hostname = local.minio_server_dns
}

View File

@ -0,0 +1,47 @@
# Outputs
output "rancher_uri" {
value = "https://${local.rancher_server_dns}"
}
output "rancher_server_admin_password" {
value = var.admin_password
sensitive = true
}
output "minio_api_uri" {
value = module.minio.minio_api_uri
}
output "minio_admin_uri" {
value = module.minio.minio_admin_uri
}
output "minio_root_user" {
value = module.minio.minio_root_user
}
output "minio_root_password" {
value = module.minio.minio_root_password
sensitive = true
}
output "vault_uri" {
value = "https://${local.vault_server_dns}"
}
output "vault_root_token" {
value = module.vault.vault_root_token
sensitive = true
}
# output "custom_cluster_command" {
# value = rancher2_cluster_v2.quickstart_workload.cluster_registration_token.0.insecure_node_command
# description = "Docker command used to add a node to the quickstart cluster"
# }
#
# output "custom_cluster_windows_command" {
# value = rancher2_cluster_v2.quickstart_workload.cluster_registration_token.0.insecure_windows_node_command
# description = "Docker command used to add a windows node to the quickstart cluster"
# }

View File

@ -0,0 +1,56 @@
terraform {
required_providers {
helm = {
source = "hashicorp/helm"
version = "2.10.1"
}
local = {
source = "hashicorp/local"
version = "2.4.0"
}
rancher2 = {
source = "rancher/rancher2"
version = "3.0.0"
}
ssh = {
source = "loafoe/ssh"
version = "2.6.0"
}
kubectl = {
source = "gavinbunney/kubectl"
version = "1.14.0"
}
}
required_version = ">= 1.0.0"
}
provider "helm" {
kubernetes {
config_path = local_file.kube_config_server_yaml.filename
}
}
provider "kubectl" {
config_path = local_file.kube_config_server_yaml.filename
}
# Rancher2 bootstrapping provider
provider "rancher2" {
alias = "bootstrap"
api_url = "https://${local.rancher_server_dns}"
insecure = true
# ca_certs = data.kubernetes_secret.rancher_cert.data["ca.crt"]
bootstrap = true
}
# Rancher2 administration provider
provider "rancher2" {
alias = "admin"
api_url = "https://${local.rancher_server_dns}"
insecure = true
# ca_certs = data.kubernetes_secret.rancher_cert.data["ca.crt"]
token_key = rancher2_bootstrap.admin.token
timeout = "300s"
}

View File

@ -0,0 +1,17 @@
ingress:
tls:
source: secret
ingressClassName: traefik
includeDefaultExtraAnnotations: false
extraAnnotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
replicas: 1
letsEncrypt:
environment: staging
email: engineering+bouwroute@fourlights.nl
ingress:
class: traefik

View File

@ -0,0 +1,21 @@
# Rancher resources
# Initialize Rancher server
resource "rancher2_bootstrap" "admin" {
depends_on = [
helm_release.rancher_server
]
provider = rancher2.bootstrap
password = var.admin_password
telemetry = true
}
#
## Create custom managed cluster for quickstart
#resource "rancher2_cluster_v2" "quickstart_workload" {
# provider = rancher2.admin
#
# name = var.workload_cluster_name
# kubernetes_version = var.workload_kubernetes_version
#}

View File

@ -0,0 +1,36 @@
# Admin password to use for Rancher server bootstrap, min. 12 characters
admin_password = ""
# Public IP of compute node for Rancher cluster
node_public_ip = ""
# Username used for SSH access to the Rancher server cluster node
node_username = ""
# DNS host name of the Rancher server
rancher_server_dns = ""
# Private key used for SSH access to the Rancher server cluster node
ssh_private_key_pem = ""
# Name for created custom workload cluster
workload_cluster_name = ""
# Version of cert-manager to install alongside Rancher (format: 0.0.0)
cert_manager_version = "1.11.0"
# Internal IP of compute node for Rancher cluster
node_internal_ip = ""
# The helm repository, where the Rancher helm chart is installed from
rancher_helm_repository = "https://releases.rancher.com/server-charts/latest"
# Kubernetes version to use for Rancher server cluster
rancher_kubernetes_version = "v1.24.14+k3s1"
# Rancher server version (format v0.0.0)
rancher_version = "2.7.9"
# Kubernetes version to use for managed workload cluster
workload_kubernetes_version = "v1.24.14+rke2r1"

View File

@ -0,0 +1,115 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
image:
name: traefik
tag: v3.1.2
ports:
web:
asDefault: true
expose:
default: true
exposedPort: 80
port: 8000
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
websecure:
expose:
default: true
exposedPort: 443
port: 8443
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
ssh:
expose:
default: true
exposedPort: 2223
port: 2223
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 60s
readTimeout: 600s
imaps:
expose:
default: true
exposedPort: 993
port: 8993
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
pop3s:
expose:
default: true
exposedPort: 995
port: 8995
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
sieve:
expose:
default: true
exposedPort: 4190
port: 4190
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
smtp:
expose:
default: true
exposedPort: 25
port: 8025
protocol: TCP
transport:
respondingTimouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s
smtps:
expose:
default: true
exposedPort: 465
port: 8465
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s

View File

@ -0,0 +1,5 @@
module "traefik" {
source = "../../modules/traefik"
config_path = local_file.kube_config_server_yaml.filename
}

View File

@ -0,0 +1,102 @@
# Variables for rancher common module
# Required
variable "node_public_ip" {
type = string
description = "Public IP of compute node for Rancher cluster"
}
variable "node_internal_ip" {
type = string
description = "Internal IP of compute node for Rancher cluster"
default = ""
}
# Required
variable "node_username" {
type = string
description = "Username used for SSH access to the Rancher server cluster node"
}
# Required
variable "ssh_private_key_pem" {
type = string
description = "Private key used for SSH access to the Rancher server cluster node"
}
variable "rancher_kubernetes_version" {
type = string
description = "Kubernetes version to use for Rancher server cluster"
default = "v1.24.14+k3s1"
}
variable "cert_manager_version" {
type = string
description = "Version of cert-manager to install alongside Rancher (format: 0.0.0)"
default = "1.11.0"
}
variable "rancher_version" {
type = string
description = "Rancher server version (format v0.0.0)"
default = "2.7.9"
}
# Required
variable "server_dns" {
type = string
description = "DNS host name of the server"
}
# Required
variable "admin_password" {
type = string
description = "Admin password to use for Rancher server bootstrap, min. 12 characters"
}
# variable "workload_kubernetes_version" {
# type = string
# description = "Kubernetes version to use for managed workload cluster"
# default = "v1.24.14+rke2r1"
# }
# Required
# variable "workload_cluster_name" {
# type = string
# description = "Name for created custom workload cluster"
# }
variable "rancher_helm_repository" {
type = string
description = "The helm repository, where the Rancher helm chart is installed from"
default = "https://releases.rancher.com/server-charts/latest"
}
variable "cert_manager_email" {
type = string
description = "Email address to use for cert-manager"
default = "engineering+bouwroute@fourlights.nl"
}
# Required
variable "aws_access_key_id" {
description = "AWS Access Key ID for Vault KMS access"
type = string
}
# Required
variable "aws_secret_access_key" {
description = "AWS Secret Access Key for Vault KMS access"
type = string
}
# Required
variable "aws_kms_key_id" {
description = "AWS KMS Key ID for Vault KMS access"
type = string
}
# Required
variable "aws_region" {
description = "AWS KMS Region for Vault KMS access"
type = string
}

View File

@ -0,0 +1,16 @@
module "vault" {
source = "../../modules/vault"
config_path = local_file.kube_config_server_yaml.filename
aws_region = var.aws_region
aws_access_key_id = var.aws_access_key_id
aws_secret_access_key = var.aws_secret_access_key
aws_kms_key_id = var.aws_kms_key_id
node_public_ip = var.node_public_ip
node_username = var.node_username
ssh_private_key_pem = var.ssh_private_key_pem
hostname = local.vault_server_dns
}

View File

@ -0,0 +1,17 @@
provider "helm" {
kubernetes {
config_path = var.config_path
}
}
resource "helm_release" "traefik" {
name = "traefik"
repository = "https://helm.traefik.io/traefik"
chart = "traefik"
namespace = "kube-system"
version = "31.0.0"
values = [
file("${path.module}/traefik-values.yaml")
]
}

View File

@ -0,0 +1,104 @@
ports:
web:
asDefault: true
expose:
default: true
exposedPort: 80
port: 8000
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
websecure:
expose:
default: true
exposedPort: 443
port: 8443
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 0
idleTimeout: 0
readTimeout: 0
ssh:
expose:
default: true
exposedPort: 2223
port: 2223
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 60s
readTimeout: 600s
imaps:
expose:
default: true
exposedPort: 993
port: 8993
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
pop3s:
expose:
default: true
exposedPort: 995
port: 8995
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
sieve:
expose:
default: true
exposedPort: 4190
port: 4190
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 600s
idleTimeout: 300s
readTimeout: 600s
smtp:
expose:
default: true
exposedPort: 25
port: 8025
protocol: TCP
transport:
respondingTimouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s
smtps:
expose:
default: true
exposedPort: 465
port: 8465
protocol: TCP
proxyProtocol:
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
transport:
respondingTimouts:
writeTimeout: 300s
idleTimeout: 300s
readTimeout: 300s

View File

@ -0,0 +1,4 @@
variable "config_path" {
description = "Path to the kubeconfig file"
type = string
}

View File

@ -0,0 +1,13 @@
resource "kubectl_manifest" "vault_aws_creds" {
yaml_body = <<YAML
apiVersion: v1
kind: Secret
metadata:
name: vault-aws-creds
namespace: vault
type: Opaque
stringData:
AWS_ACCESS_KEY_ID: ${var.aws_access_key_id}
AWS_SECRET_ACCESS_KEY: ${var.aws_secret_access_key}
YAML
}

View File

@ -0,0 +1,69 @@
resource "helm_release" "vault" {
name = "vault"
repository = "https://helm.releases.hashicorp.com"
chart = "vault"
namespace = "vault"
version = "0.28.1"
create_namespace = true
wait = true
set {
name = "server.ha.enabled"
value = "true"
}
set {
name = "server.ha.raft.enabled"
value = "true"
}
values = [
<<-EOT
server:
ingress:
enabled: true
ingressClassName: traefik
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
hosts:
- host: ${var.hostname}
paths: []
tls:
- secretName: tls-vault
hosts:
- ${var.hostname}
extraEnvironmentVars:
VAULT_SEAL_TYPE: "awskms"
AWS_REGION: "${var.aws_region}"
VAULT_AWSKMS_SEAL_KEY_ID: "${var.aws_kms_key_id}"
extraSecretEnvironmentVars:
- envName: AWS_ACCESS_KEY_ID
secretName: vault-aws-creds
secretKey: AWS_ACCESS_KEY_ID
- envName: AWS_SECRET_ACCESS_KEY
secretName: vault-aws-creds
secretKey: AWS_SECRET_ACCESS_KEY
EOT
]
}
resource "ssh_resource" "vault_init" {
depends_on = [helm_release.vault]
host = var.node_public_ip
user = var.node_username
private_key = var.ssh_private_key_pem
commands = [
"kubectl -n vault exec vault-0 -- vault operator init -format=json"
]
}
resource "local_file" "vault-keys" {
depends_on = [ssh_resource.vault_init]
filename = format("%s/%s", path.root, "vault.secret")
content = ssh_resource.vault_init.result
}

View File

@ -0,0 +1,7 @@
output "vault_uri" {
value = "https://${var.hostname}"
}
output "vault_root_token" {
value = ""
}

View File

@ -0,0 +1,22 @@
terraform {
required_providers {
ssh = {
source = "loafoe/ssh"
version = "2.6.0"
}
kubectl = {
source = "gavinbunney/kubectl"
version = "1.14.0"
}
}
}
provider "helm" {
kubernetes {
config_path = var.config_path
}
}
provider "kubectl" {
config_path = var.config_path
}

View File

@ -0,0 +1,52 @@
# Required
variable "config_path" {
description = "Path to the kubeconfig file"
type = string
}
# Required
variable "node_public_ip" {
type = string
description = "Public IP of compute node for Rancher cluster"
}
# Required
variable "node_username" {
type = string
description = "Username used for SSH access to the Rancher server cluster node"
}
# Required
variable "ssh_private_key_pem" {
type = string
description = "Private key used for SSH access to the Rancher server cluster node"
}
# Required
variable "aws_access_key_id" {
description = "AWS Access Key ID for Vault KMS access"
type = string
}
# Required
variable "aws_secret_access_key" {
description = "AWS Secret Access Key for Vault KMS access"
type = string
}
# Required
variable "aws_kms_key_id" {
description = "AWS KMS Key ID for Vault KMS access"
type = string
}
# Required
variable "aws_region" {
description = "AWS KMS Region for Vault KMS access"
type = string
}
variable "hostname" {
description = "Hostname for the Vault server"
type = string
}

View File

@ -0,0 +1,7 @@
iapiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patchesStrategicMerge:
- replica-count.yaml

View File

@ -0,0 +1,14 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
spec:
replicas: 1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
spec:
replicas: 1

View File

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patchesStrategicMerge:
- resource-limits.yaml

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
spec:
template:
spec:
containers:
- name: frontend
resources:
limits:
cpu: 1
memory: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
spec:
template:
spec:
containers:
- name: backend
resources:
limits:
cpu: 2
memory: 2Gi