Update devops with local shuttles
This commit is contained in:
parent
23e9e00f35
commit
e368bbd94d
|
|
@ -0,0 +1,39 @@
|
|||
# Local .terraform directories
|
||||
**/.terraform/*
|
||||
|
||||
# .tfstate files
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# Crash log files
|
||||
crash.log
|
||||
crash.*.log
|
||||
|
||||
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
|
||||
# password, private keys, and other secrets. These should not be part of version
|
||||
# control as they are data points which are potentially sensitive and subject
|
||||
# to change depending on the environment.
|
||||
*.tfvars
|
||||
*.tfvars.json
|
||||
|
||||
# Ignore override files as they are usually used to override resources locally and so
|
||||
# are not checked in
|
||||
override.tf
|
||||
override.tf.json
|
||||
*_override.tf
|
||||
*_override.tf.json
|
||||
|
||||
# Ignore transient lock info files created by terraform apply
|
||||
.terraform.tfstate.lock.info
|
||||
|
||||
# Include override files you do wish to add to version control using negated pattern
|
||||
# !example_override.tf
|
||||
|
||||
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
||||
# example: *tfplan*
|
||||
|
||||
# Ignore CLI configuration files
|
||||
.terraformrc
|
||||
terraform.rc
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="TerraformProjectSettings">
|
||||
<option name="toolPath" value="/usr/bin/terraform" />
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="" vcs="Git" />
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
"version": 3,
|
||||
"terraform_version": "1.10.0",
|
||||
"backend": {
|
||||
"type": "s3",
|
||||
"config": {
|
||||
"access_key": "Y2Y1EA9WES29336DP9SE",
|
||||
"acl": null,
|
||||
"allowed_account_ids": null,
|
||||
"assume_role": null,
|
||||
"assume_role_with_web_identity": null,
|
||||
"bucket": "app-365zon",
|
||||
"custom_ca_bundle": null,
|
||||
"dynamodb_endpoint": null,
|
||||
"dynamodb_table": null,
|
||||
"ec2_metadata_service_endpoint": null,
|
||||
"ec2_metadata_service_endpoint_mode": null,
|
||||
"encrypt": null,
|
||||
"endpoint": null,
|
||||
"endpoints": {
|
||||
"dynamodb": null,
|
||||
"iam": null,
|
||||
"s3": "https://storage.bridge.fourlights.dev",
|
||||
"sso": null,
|
||||
"sts": null
|
||||
},
|
||||
"forbidden_account_ids": null,
|
||||
"force_path_style": null,
|
||||
"http_proxy": null,
|
||||
"https_proxy": null,
|
||||
"iam_endpoint": null,
|
||||
"insecure": null,
|
||||
"key": "terraform.tfstate",
|
||||
"kms_key_id": null,
|
||||
"max_retries": null,
|
||||
"no_proxy": null,
|
||||
"profile": null,
|
||||
"region": "eu-central-1",
|
||||
"retry_mode": null,
|
||||
"secret_key": "EzdA75G2LMlWilphFq9snV6HW6KKSp+BmubF4c+5",
|
||||
"shared_config_files": null,
|
||||
"shared_credentials_file": null,
|
||||
"shared_credentials_files": null,
|
||||
"skip_credentials_validation": true,
|
||||
"skip_metadata_api_check": true,
|
||||
"skip_region_validation": true,
|
||||
"skip_requesting_account_id": true,
|
||||
"skip_s3_checksum": null,
|
||||
"sse_customer_key": null,
|
||||
"sts_endpoint": null,
|
||||
"sts_region": null,
|
||||
"token": null,
|
||||
"use_dualstack_endpoint": null,
|
||||
"use_fips_endpoint": null,
|
||||
"use_lockfile": null,
|
||||
"use_path_style": true,
|
||||
"workspace_key_prefix": null
|
||||
},
|
||||
"hash": 1135345461
|
||||
}
|
||||
}
|
||||
|
|
@ -20,11 +20,12 @@ module "minio" {
|
|||
service_name = "storage"
|
||||
namespace = "minio"
|
||||
|
||||
admin_server_dns = "local" # Restricted admin access, access via bridge
|
||||
admin_server_dns = local.cluster_dns # Restricted admin access, access via bridge
|
||||
|
||||
tls = false # TLS termination happens on the bridge ingress
|
||||
admin = true
|
||||
ingressClass = "nginx"
|
||||
storageSize = "40Gi"
|
||||
}
|
||||
|
||||
module "mongodb" {
|
||||
|
|
@ -67,3 +68,22 @@ module "rabbitmq" {
|
|||
# add argocd repo secret
|
||||
# add argocd updated github.com known hosts
|
||||
# add argocd application
|
||||
|
||||
# setup secrets
|
||||
|
||||
resource "vault_kv_secret_v2" "cluster" {
|
||||
mount = var.cluster
|
||||
name = "minio"
|
||||
delete_all_versions = true
|
||||
|
||||
data_json = jsonencode({
|
||||
access_key = minio_iam_service_account.cluster.access_key
|
||||
secret_key = minio_iam_service_account.cluster.secret_key
|
||||
})
|
||||
|
||||
depends_on = [
|
||||
var.wait_on,
|
||||
minio_iam_service_account.cluster
|
||||
]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
|
||||
access_key = "T8V84SHIVT6MAV424ES0"
|
||||
secret_key = "23+N28yBK+cL3O2t9xsstT8jr2TpK+SgORCVIuxc"
|
||||
access_key = ""
|
||||
secret_key = ""
|
||||
bucket = "management"
|
||||
key = "terraform.tfstate"
|
||||
region = "eu-central-1"
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"bridge-tls","Source":"../../../modules/cluster/tls","Dir":"../../../modules/cluster/tls"},{"Key":"cert_manager","Source":"../../../modules/cert-manager","Dir":"../../../modules/cert-manager"},{"Key":"cert_manager_hetzner","Source":"../../../modules/cert-manager/hetzner","Dir":"../../../modules/cert-manager/hetzner"},{"Key":"cluster-app-365zon","Source":"../../../modules/cluster","Dir":"../../../modules/cluster"},{"Key":"cluster-app-365zon-bootstrap","Source":"../../../modules/cluster/bootstrap","Dir":"../../../modules/cluster/bootstrap"},{"Key":"cluster-app-365zon-vault","Source":"../../../modules/cluster-bootstrap","Dir":"../../../modules/cluster-bootstrap"},{"Key":"cluster-app-365zon.cluster-hcloud","Source":"../cluster-hcloud","Dir":"../../../modules/cluster-hcloud"},{"Key":"cluster-app-365zon.cluster-management","Source":"../cluster-management","Dir":"../../../modules/cluster-management"},{"Key":"cluster-bootstrap","Source":"../../../modules/cluster/bootstrap","Dir":"../../../modules/cluster/bootstrap"},{"Key":"cluster-management","Source":"../../../modules/cluster/management","Dir":"../../../modules/cluster/management"},{"Key":"cluster-vault","Source":"../../../modules/cluster-vault","Dir":"../../../modules/cluster-vault"},{"Key":"cluster_management","Source":"../../../modules/cluster-management","Dir":"../../../modules/cluster-management"},{"Key":"k3s","Source":"../../../modules/cluster/init-k3s","Dir":"../../../modules/cluster/init-k3s"},{"Key":"letsencrypt","Source":"../../../modules/letsencrypt","Dir":"../../../modules/letsencrypt"},{"Key":"mijn_365zon","Source":"../../../modules/mijn-365zon-nl","Dir":"../../../modules/mijn-365zon-nl"},{"Key":"minio","Source":"../../../modules/minio","Dir":"../../../modules/minio"},{"Key":"rancher","Source":"../../../modules/rancher","Dir":"../../../modules/rancher"},{"Key":"traefik","Source":"../../../modules/traefik","Dir":"../../../modules/traefik"},{"Key":"vault","Source":"../../../modules/vault","Dir":"../../../modules/vault"}]}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
|
||||
access_key = "T8V84SHIVT6MAV424ES0"
|
||||
secret_key = "23+N28yBK+cL3O2t9xsstT8jr2TpK+SgORCVIuxc"
|
||||
access_key = ""
|
||||
secret_key = ""
|
||||
bucket = "management"
|
||||
region = "eu-central-1"
|
||||
minio_server = "storage.bridge.fourlights.dev"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
locals {
|
||||
service_uri = var.service_uri == null ? join(".", [var.service_name, var.server_dns]) : var.service_uri
|
||||
}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
resource "helm_release" "homepage" {
|
||||
name = "homepage"
|
||||
repository = "https://jameswynn.github.io/helm-charts"
|
||||
chart = "homepage"
|
||||
namespace = var.namespace
|
||||
create_namespace = true
|
||||
version = "2.0.1"
|
||||
|
||||
values = [
|
||||
templatefile("${path.module}/values.yaml.tftpl", {
|
||||
service_uri = local.service_uri,
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
output "installed" {
|
||||
value = true
|
||||
depends_on = [helm_release.homepage]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
output "homepage_uri" {
|
||||
value = "https://${local.service_uri}"
|
||||
}
|
||||
|
||||
output "homepage_server" {
|
||||
value = local.service_uri
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
locals {
|
||||
k8s_config = yamldecode(var.k8s_config_yaml)
|
||||
k8s_host = local.k8s_config.clusters[0].cluster.server
|
||||
k8s_auth = try(
|
||||
{
|
||||
token = local.k8s_config.users[0].user.token
|
||||
using_token = true
|
||||
},
|
||||
{
|
||||
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
|
||||
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
|
||||
using_token = false
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = local.k8s_host
|
||||
insecure = true
|
||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
|
||||
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = local.k8s_host
|
||||
insecure = true
|
||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
|
||||
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
config:
|
||||
bookmarks:
|
||||
- Developer:
|
||||
- Github:
|
||||
- abbr: GH
|
||||
href: https://github.com/
|
||||
services:
|
||||
- My First Group:
|
||||
- My First Service:
|
||||
href: http://localhost/
|
||||
description: Homepage is awesome
|
||||
|
||||
- My Second Group:
|
||||
- My Second Service:
|
||||
href: http://localhost/
|
||||
description: Homepage is the best
|
||||
|
||||
- My Third Group:
|
||||
- My Third Service:
|
||||
href: http://localhost/
|
||||
description: Homepage is 😎
|
||||
widgets:
|
||||
# show the kubernetes widget, with the cluster summary and individual nodes
|
||||
- kubernetes:
|
||||
cluster:
|
||||
show: true
|
||||
cpu: true
|
||||
memory: true
|
||||
showLabel: true
|
||||
label: "cluster"
|
||||
nodes:
|
||||
show: true
|
||||
cpu: true
|
||||
memory: true
|
||||
showLabel: true
|
||||
- search:
|
||||
provider: duckduckgo
|
||||
target: _blank
|
||||
kubernetes:
|
||||
mode: cluster
|
||||
settings:
|
||||
|
||||
# The service account is necessary to allow discovery of other services
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: homepage
|
||||
|
||||
# This enables the service account to access the necessary resources
|
||||
enableRbac: true
|
||||
|
||||
ingress:
|
||||
main:
|
||||
enabled: true
|
||||
annotations:
|
||||
# Example annotations to add Homepage to your Homepage!
|
||||
gethomepage.dev/enabled: "true"
|
||||
gethomepage.dev/name: "Homepage"
|
||||
gethomepage.dev/description: "Dynamically Detected Homepage"
|
||||
gethomepage.dev/group: "Dynamic"
|
||||
gethomepage.dev/icon: "homepage.png"
|
||||
hosts:
|
||||
- host: ${service_uri}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
variable "service_name" {
|
||||
type = string
|
||||
description = "Name of the service"
|
||||
default = "storage"
|
||||
}
|
||||
|
||||
variable "server_dns" {
|
||||
type = string
|
||||
description = "Domain for the server"
|
||||
}
|
||||
|
||||
variable "service_uri" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "k8s_config_yaml" {
|
||||
description = "Content of the k8s config yaml file"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "wait_on" {
|
||||
type = any
|
||||
description = "Resources to wait on"
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "namespace" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tls" {
|
||||
description = "Enable TLS"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "ingressClass" {
|
||||
description = "Ingress class to use"
|
||||
type = string
|
||||
default = "nginx"
|
||||
}
|
||||
|
|
@ -4,6 +4,9 @@ locals {
|
|||
ingress = {
|
||||
ingressClassName = "traefik"
|
||||
}
|
||||
selector = {
|
||||
dnsNames = ["mijn.365zon.nl"]
|
||||
}
|
||||
}
|
||||
}
|
||||
solvers = concat([for solver in var.extraSolvers : solver], [local.httpSolver])
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ resource "helm_release" "minio" {
|
|||
|
||||
set {
|
||||
name = "persistence.size"
|
||||
value = "6Gi"
|
||||
value = var.storageSize
|
||||
}
|
||||
|
||||
values = [
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
tls:
|
||||
enabled: ${tobool(tls)}
|
||||
|
||||
ingress:
|
||||
enabled: ${tobool(admin)}
|
||||
tls: ${tobool(tls)}
|
||||
|
|
@ -15,6 +18,10 @@ ingress:
|
|||
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
|
||||
%{ endif }
|
||||
%{ endif }
|
||||
%{ if ingressClass == "nginx" }
|
||||
ingress.kubernetes.io/proxy-body-size: "0"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||||
%{ endif }
|
||||
|
||||
apiIngress:
|
||||
enabled: true
|
||||
|
|
@ -33,3 +40,7 @@ apiIngress:
|
|||
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
|
||||
%{ endif }
|
||||
%{ endif }
|
||||
%{ if ingressClass == "nginx" }
|
||||
ingress.kubernetes.io/proxy-body-size: "0"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||||
%{ endif }
|
||||
|
|
|
|||
|
|
@ -59,3 +59,9 @@ variable "ingressClass" {
|
|||
type = string
|
||||
default = "nginx"
|
||||
}
|
||||
|
||||
variable "storageSize" {
|
||||
type = string
|
||||
default = "6Gi"
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,43 @@
|
|||
architecture: replicaset
|
||||
replicaCount: ${ replicas }
|
||||
|
||||
commonAnnotations:
|
||||
mongodb.com/timeouts: "true"
|
||||
|
||||
mongodb:
|
||||
extraFlags:
|
||||
- "--oplogSizeMB=10240"
|
||||
- "--setParameter=electionTimeoutMillis=10000"
|
||||
- "--setParameter=heartbeatIntervalMillis=2000"
|
||||
- "--setParameter=catchUpTimeoutMillis=30000"
|
||||
- "--setParameter=catchUpTakeoverDelayMillis=30000"
|
||||
|
||||
# More forgiving probe settings
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 20
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 6
|
||||
|
||||
# Proper shutdown handling
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
||||
# Ensure pods are distributed across nodes
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: mongodb
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
auth:
|
||||
enabled: true
|
||||
existingSecret: mongodb-auth
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/shuttles.iml" filepath="$PROJECT_DIR$/.idea/shuttles.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="WEB_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<excludeFolder url="file://$MODULE_DIR$/.tmp" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/temp" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/tmp" />
|
||||
</content>
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="TerraformProjectSettings">
|
||||
<option name="toolPath" value="/usr/bin/terraform" />
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
name: cloud-init-arch
|
||||
description: Cloud-init profile for Arch instances
|
||||
devices:
|
||||
aadisable:
|
||||
path: /sys/module/nf_conntrack/parameters/hashsize
|
||||
source: /sys/module/nf_conntrack/parameters/hashsize
|
||||
type: disk
|
||||
aadisable2:
|
||||
path: /proc/sys/net/netfilter/nf_conntrack_max
|
||||
source: /proc/sys/net/netfilter/nf_conntrack_max
|
||||
type: disk
|
||||
aadisable3:
|
||||
path: /dev/kmsg
|
||||
source: /dev/kmsg
|
||||
type: unix-char
|
||||
aadisable4:
|
||||
path: /sys/fs/bpf
|
||||
source: /sys/fs/bpf
|
||||
type: disk
|
||||
config:
|
||||
security.privileged: 'true'
|
||||
security.nesting: 'true'
|
||||
cloud-init.user-data: |
|
||||
#cloud-config
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
package_reboot_if_required: true
|
||||
packages:
|
||||
- vim
|
||||
- zsh
|
||||
- bash
|
||||
- sudo
|
||||
- curl
|
||||
- openssh
|
||||
write_files:
|
||||
- path: /etc/sysctl.d/99-kubernetes-cri.conf
|
||||
content: |
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
- path: /etc/ssh/sshd_config
|
||||
permissions: '0600'
|
||||
content: |
|
||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
HostKey /etc/ssh/ssh_host_rsa_key
|
||||
|
||||
PermitRootLogin no
|
||||
PubkeyAuthentication yes
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
KbdInteractiveAuthentication no
|
||||
UsePAM yes
|
||||
|
||||
Protocol 2
|
||||
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512
|
||||
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com
|
||||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com
|
||||
|
||||
SyslogFacility AUTH
|
||||
LogLevel VERBOSE
|
||||
|
||||
X11Forwarding no
|
||||
PrintMotd no
|
||||
TCPKeepAlive yes
|
||||
Compression no
|
||||
|
||||
MaxAuthTries 3
|
||||
MaxSessions 2
|
||||
LoginGraceTime 30
|
||||
ClientAliveInterval 300
|
||||
ClientAliveCountMax 2
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
Subsystem sftp internal-sftp
|
||||
|
||||
AddressFamily inet
|
||||
users:
|
||||
- name: picard
|
||||
shell: /bin/zsh
|
||||
groups: [wheel]
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILwFgFIm6DIbm+t6kIR5YVdgLE+BmaxRzXFrvSkkCyPk thomas@fourlights.nl
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKffoo0kKezQNLUOSawxDohmVtdor8mvzzItqrRXJvTW thomas@fourlights.nl
|
||||
runcmd:
|
||||
- systemctl enable --now sshd
|
||||
- pacman -Rdd iptables --noconfirm && pacman -S iptables-nft --noconfirm
|
||||
linux.kernel_modules: >-
|
||||
ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter
|
||||
raw.lxc: |
|
||||
lxc.apparmor.profile=unconfined
|
||||
lxc.mount.auto=proc:rw sys:rw cgroup:rw
|
||||
lxc.cgroup.devices.allow=a
|
||||
lxc.cap.drop=
|
||||
project: default
|
||||
Binary file not shown.
|
|
@ -0,0 +1,19 @@
|
|||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXprM09ESTROelF3SGhjTk1qVXdNakUzTURrd01URTBXaGNOTXpVd01qRTFNRGt3TVRFMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXprM09ESTROelF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUWVNEV1Jwbmd6TE5ySGphTmhqdmM1SU82a2dibVpwaER4WVROTG11MjAKaWxaQnZLRlZRdW5kV3ZEQ1VrcGJNRjNsOTRuSmxaYVByK3lDSnJpVVh0UjZvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVQ5bVZxTGcvSFBCUS91L3MzbHAwCjhJQ0RDc013Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUpjMkJkMjd0SzNZTFpwa01yOFNMSEIvbngzd1E1MU0KRnRaYnBNVzJudVNXQWlFQTMyUmcyVHZNQW9LYll5bnhySkk3U3g5eWszZHFsSWd5TW15d2M5d1JicmM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
server: https://10.110.36.47:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
user: default
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: default
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJZFh2OWlXRHR6SE13Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOek01TnpneU9EYzBNQjRYRFRJMU1ESXhOekE1TURFeE5Gb1hEVEkyTURJeApOekE1TURFeE5Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJKNlNVZm5ESVJndVRDMjkKaWFjVTdTM3VPWkw1RERGZjJPQi9IakdTWEErQlRGaE5VOGtMSHBxZlZYeWVKbHNkd09mR1QvL2JQbENsWFYvdQowc0wyTW5halNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUXdoZkJDTWRocVpXMW96WlEzZG84d1VYOEpCREFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQXczSFpKY1cwaGI3ZUwxSktvcTJ2cExFaFVxVncxRG1oTGJtcUNQTVdmcEFDSVFDRkhXcDhoTTNMdTROTgpGUnYxc2pkYS93VjdmSVpUcUsyZHVNOUNPQVc5emc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFM016azNPREk0TnpRd0hoY05NalV3TWpFM01Ea3dNVEUwV2hjTk16VXdNakUxTURrd01URTAKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFM016azNPREk0TnpRd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBUjJCcXE5cVhESmZGeVQ1VVpEY3Z6SHVPdDg2TEZ5WTlDb1oxL0xxeldGClZMdHVQYUFXc3BUdUtZckJieTRZRlBQQlQ1M0RkS1F5cjhhWG5HUDRWenlxbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVNSVh3UWpIWWFtVnRhTTJVTjNhUApNRkYvQ1FRd0NnWUlLb1pJemowRUF3SURTQUF3UlFJZ1lmS01YQ3lFelBmM05wN3paLzVYTnFxeTdjTDBpMXBWCkpjZzNzYmtMbXB3Q0lRRDlzYVpmekswRlUrNWljWFpLZmUyVFg0WW5sNS96aFVGR2FHb2RTb1ovUXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUtlQVpqUzhNM1ZBd2l6cWo0UDN6RURuQmNaYldrcDJPekt2VlNpUSs0azRvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFbnBKUitjTWhHQzVNTGIySnB4VHRMZTQ1a3ZrTU1WL1k0SDhlTVpKY0Q0Rk1XRTFUeVFzZQptcDlWZko0bVd4M0E1OFpQLzlzK1VLVmRYKzdTd3ZZeWRnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
#!/usr/bin/env -S deno run --allow-run --allow-read --allow-write
|
||||
|
||||
import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
||||
import { delay } from "https://deno.land/std/async/mod.ts";
|
||||
|
||||
const alpineImage = "alpine/edge/cloud"
|
||||
const alpineConfig = ['--profile', 'cloud-init-alpine']
|
||||
const archImage = "archlinux/current/cloud"
|
||||
const archConfig = ['--profile', 'cloud-init-arch']
|
||||
|
||||
|
||||
const image = archImage
|
||||
const config = archConfig
|
||||
|
||||
const findIP4 = (name: string, nodeList: any) => {
|
||||
const ip4 = nodeList?.find((n) => n.name === name)?.state?.network?.eth0?.addresses?.find((n) => n.family === 'inet')?.address;
|
||||
return ip4;
|
||||
}
|
||||
|
||||
const setupCluster = async (numMasters: number) => {
|
||||
const hostname = await Deno.run({
|
||||
cmd: ["hostnamectl", "hostname"],
|
||||
stdout: "piped",
|
||||
}).output().then((output) => new TextDecoder().decode(output).trim());
|
||||
|
||||
const user = await Deno.run({
|
||||
cmd: ["whoami"],
|
||||
stdout: "piped",
|
||||
}).output().then((output) => new TextDecoder().decode(output).trim());
|
||||
|
||||
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
|
||||
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
|
||||
|
||||
// Step 1: Create Low-Resource Profile (if not exists)
|
||||
const profileExists = await Deno.run({
|
||||
cmd: ["incus", "profile", "show", "low-resource"],
|
||||
stdout: "null",
|
||||
stderr: "null",
|
||||
}).status().then((status) => status.success);
|
||||
|
||||
if (!profileExists) {
|
||||
await Deno.run({
|
||||
cmd: ["incus", "profile", "create", "low-resource"],
|
||||
}).status();
|
||||
await Deno.run({
|
||||
cmd: ["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"],
|
||||
}).status();
|
||||
await Deno.run({
|
||||
cmd: ["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"],
|
||||
}).status();
|
||||
await Deno.run({
|
||||
cmd: ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"],
|
||||
}).status();
|
||||
console.log("✅ Low-resource profile created.");
|
||||
} else {
|
||||
console.log("⏩ Low-resource profile already exists.");
|
||||
}
|
||||
|
||||
|
||||
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
|
||||
|
||||
// Step 3: Launch VMs (if not already running)
|
||||
for (let i = 1; i <= numMasters; i++) {
|
||||
const vmName = `k3s-master${i}`;
|
||||
const vmExists = await Deno.run({
|
||||
cmd: ["incus", "list", vmName, "--format", "csv"],
|
||||
stdout: "piped",
|
||||
}).output().then((output) => new TextDecoder().decode(output).trim() !== "");
|
||||
|
||||
if (!vmExists) {
|
||||
await Deno.run({
|
||||
cmd: ["incus", "launch", `images:${image}`, vmName, "--profile", "low-resource", "-c", "user.timezone=\"Europe/Amsterdam\"", "-c", `user.ssh_key=\"${sshKey}\"`, ...config],
|
||||
}).status();
|
||||
console.log(`✅ VM ${vmName} launched.`);
|
||||
} else {
|
||||
console.log(`⏩ VM ${vmName} already exists.`);
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Install k3sup (if not installed)
|
||||
const k3supInstalled = await Deno.run({
|
||||
cmd: ["which", "k3sup"],
|
||||
stdout: "null",
|
||||
stderr: "null",
|
||||
}).status().then((status) => status.success);
|
||||
|
||||
if (!k3supInstalled) {
|
||||
await Deno.run({
|
||||
cmd: ["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"],
|
||||
}).status();
|
||||
console.log("✅ k3sup installed.");
|
||||
} else {
|
||||
console.log("⏩ k3sup already installed.");
|
||||
}
|
||||
|
||||
// Step 5: Bootstrap First Master Node (if not already bootstrapped)
|
||||
let firstMasterIP;
|
||||
let nodes;
|
||||
while (firstMasterIP === undefined) {
|
||||
nodes = await Deno.run({
|
||||
cmd: ["incus", "list", "--format", "json"],
|
||||
stdout: "piped",
|
||||
}).output().then((output) => JSON.parse(new TextDecoder().decode(output)));
|
||||
firstMasterIP = findIP4('k3s-master1', nodes)
|
||||
await delay(1000)
|
||||
}
|
||||
|
||||
const kubeconfigExists = await Deno.stat("./kubeconfig").then(() => true).catch(() => false);
|
||||
|
||||
if (!kubeconfigExists) {
|
||||
await Deno.run({
|
||||
cmd: ["k3sup", "install", "--ip", firstMasterIP, "--user", "picard", "--cluster", "--ssh-key", sshKeyPrivateFileName],
|
||||
}).status();
|
||||
console.log("✅ First master node bootstrapped.");
|
||||
} else {
|
||||
console.log("⏩ First master node already bootstrapped.");
|
||||
}
|
||||
|
||||
// Step 6: Join Additional Master Nodes (if not already joined)
|
||||
for (let i = 2; i <= numMasters; i++) {
|
||||
const vmName = `k3s-master${i}`;
|
||||
const vmIP = findIP4(vmName, nodes)
|
||||
|
||||
const joined = await Deno.run({
|
||||
cmd: ["kubectl", "get", "nodes", vmName],
|
||||
stdout: "null",
|
||||
stderr: "null",
|
||||
}).status().then((status) => status.success);
|
||||
|
||||
if (!joined) {
|
||||
await Deno.run({
|
||||
cmd: ["k3sup", "join", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "picard", "--ssh-key", sshKeyPrivateFileName],
|
||||
}).status();
|
||||
console.log(`✅ VM ${vmName} joined the cluster.`);
|
||||
} else {
|
||||
console.log(`⏩ VM ${vmName} already joined the cluster.`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log("🚀 HA k3s cluster setup complete!");
|
||||
};
|
||||
|
||||
await new Command()
|
||||
.name("setup-k3s-cluster")
|
||||
.version("0.1.0")
|
||||
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
||||
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3})
|
||||
.action(({masters}) => setupCluster(masters))
|
||||
.parse(Deno.args);
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/argoproj-labs/argocd" {
|
||||
version = "7.0.2"
|
||||
constraints = "7.0.2"
|
||||
hashes = [
|
||||
"h1:4lbS20EczuzhSNSOjp1mJoe2YbcXniBTzxmJHd+rjIE=",
|
||||
"zh:083686eaeaa7b51ebaac42c3c7b01a15f020a735dc8dbe50aa6a6bff16888943",
|
||||
"zh:16b1b813f33874844fadc747c57ae99cf8f119c119b3776a105c154fc4a54488",
|
||||
"zh:25ed8dca5da5faa52392c7938c61dd9a83bc6388ad771062cecfc15c44bc3d8e",
|
||||
"zh:3907351bbcb6a0c1c1abeb33dac5d70f798b0ecc05559f2ede40ae84b9079983",
|
||||
"zh:3a737237f03b9b28de26b1fe9d20bcfa53f580489fc28d774396e5de38906fd3",
|
||||
"zh:64421961cc342cec8280899352444a96ad1b09144fa933dc3a0dfb9bbae809a9",
|
||||
"zh:9702119789cc42b98dc9d1a8d7666b608a964cf1355e3cf500b82bed1898f2fd",
|
||||
"zh:9cc9ad41a6ce25aac40b9dd2291fc4d90a223add197155decdca7d2d82fc60f1",
|
||||
"zh:a239381a36bf6041d6520c8db83fb281fd2417f4540c895e07db052dd108a72f",
|
||||
"zh:ecca66064fff07719eec2ef35cd62d1cb65cf4a11f9ce96f3a9b9b7c78d614a5",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/helm" {
|
||||
version = "2.17.0"
|
||||
hashes = [
|
||||
"h1:K5FEjxvDnxb1JF1kG1xr8J3pNGxoaR3Z0IBG9Csm/Is=",
|
||||
"zh:06fb4e9932f0afc1904d2279e6e99353c2ddac0d765305ce90519af410706bd4",
|
||||
"zh:104eccfc781fc868da3c7fec4385ad14ed183eb985c96331a1a937ac79c2d1a7",
|
||||
"zh:129345c82359837bb3f0070ce4891ec232697052f7d5ccf61d43d818912cf5f3",
|
||||
"zh:3956187ec239f4045975b35e8c30741f701aa494c386aaa04ebabffe7749f81c",
|
||||
"zh:66a9686d92a6b3ec43de3ca3fde60ef3d89fb76259ed3313ca4eb9bb8c13b7dd",
|
||||
"zh:88644260090aa621e7e8083585c468c8dd5e09a3c01a432fb05da5c4623af940",
|
||||
"zh:a248f650d174a883b32c5b94f9e725f4057e623b00f171936dcdcc840fad0b3e",
|
||||
"zh:aa498c1f1ab93be5c8fbf6d48af51dc6ef0f10b2ea88d67bcb9f02d1d80d3930",
|
||||
"zh:bf01e0f2ec2468c53596e027d376532a2d30feb72b0b5b810334d043109ae32f",
|
||||
"zh:c46fa84cc8388e5ca87eb575a534ebcf68819c5a5724142998b487cb11246654",
|
||||
"zh:d0c0f15ffc115c0965cbfe5c81f18c2e114113e7a1e6829f6bfd879ce5744fbb",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/kubernetes" {
|
||||
version = "2.35.1"
|
||||
hashes = [
|
||||
"h1:Av0Wk8g2XjY2oap7nyWNHEgfCRfphdJvrkqJjEM2ZKM=",
|
||||
"zh:12212ca5ae47823ce14bfafb909eeb6861faf1e2435fb2fc4a8b334b3544b5f5",
|
||||
"zh:3f49b3d77182df06b225ab266667de69681c2e75d296867eb2cf06a8f8db768c",
|
||||
"zh:40832494d19f8a2b3cd0c18b80294d0b23ef6b82f6f6897b5fe00248a9997460",
|
||||
"zh:739a5ddea61a77925ee7006a29c8717377a2e9d0a79a0bbd98738d92eec12c0d",
|
||||
"zh:a02b472021753627c5c39447a56d125a32214c29ff9108fc499f2dcdf4f1cc4f",
|
||||
"zh:b78865b3867065aa266d6758c9601a2756741478f5735a838c20d633d65e085b",
|
||||
"zh:d362e87464683f5632790e66920ea803adb54c2bc0cb24b6fd9a314d2b1efffd",
|
||||
"zh:d98206fe88c2c9a52b8d2d0cb2c877c812a4a51d19f9d8428e63cbd5fd8a304d",
|
||||
"zh:dfa320946b1ce3f3615c42b3447a28dc9f604c06d8b9a6fe289855ab2ade4d11",
|
||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||
"zh:fc1debd2e695b5222d2ccc8b24dab65baba4ee2418ecce944e64d42e79474cb5",
|
||||
"zh:fdaf960443720a238c09e519aeb30faf74f027ac5d1e0a309c3b326888e031d7",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/random" {
|
||||
version = "3.6.3"
|
||||
hashes = [
|
||||
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
|
||||
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
|
||||
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
|
||||
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
|
||||
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
|
||||
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
|
||||
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
|
||||
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
|
||||
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
|
||||
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
|
||||
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
locals {
|
||||
tld = "fourlights.dev"
|
||||
cluster_dns = "venus.${local.tld}"
|
||||
bridge_dns = "bridge.${local.cluster_dns}"
|
||||
is_installed = true
|
||||
node_count = 3
|
||||
}
|
||||
|
||||
module "homepage" {
|
||||
source = "../../infra/modules/homepage"
|
||||
wait_on = local.is_installed
|
||||
k8s_config_yaml = local.k8s_config_yaml
|
||||
|
||||
server_dns = local.cluster_dns
|
||||
service_name = "homepage"
|
||||
service_uri = local.cluster_dns
|
||||
namespace = "homepage"
|
||||
}
|
||||
|
||||
module "minio" {
|
||||
source = "../../infra/modules/minio"
|
||||
wait_on = local.is_installed
|
||||
k8s_config_yaml = local.k8s_config_yaml
|
||||
|
||||
server_dns = local.cluster_dns
|
||||
service_name = "storage"
|
||||
namespace = "minio"
|
||||
|
||||
admin_server_dns = local.cluster_dns # Restricted admin access, access via bridge
|
||||
|
||||
tls = false
|
||||
admin = true
|
||||
ingressClass = "traefik"
|
||||
storageSize = "10Gi"
|
||||
}
|
||||
|
||||
module "mongodb" {
|
||||
source = "../../infra/modules/mongodb"
|
||||
wait_on = local.is_installed
|
||||
k8s_config_yaml = local.k8s_config_yaml
|
||||
|
||||
namespace = "mongodb"
|
||||
replicas = local.node_count
|
||||
}
|
||||
|
||||
module "rabbitmq" {
|
||||
source = "../../infra/modules/rabbitmq"
|
||||
wait_on = local.is_installed
|
||||
k8s_config_yaml = local.k8s_config_yaml
|
||||
|
||||
server_dns = "local" # Restricted admin access, access via bridge
|
||||
|
||||
service_name = "rabbitmq"
|
||||
namespace = "rabbitmq"
|
||||
|
||||
tls = false
|
||||
admin = true
|
||||
ingressClass = "traefik"
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
locals {
|
||||
k8s_config_path = "../kubeconfig"
|
||||
k8s_config_yaml = file(local.k8s_config_path)
|
||||
k8s_config = yamldecode(local.k8s_config_yaml)
|
||||
k8s_host = local.k8s_config.clusters[0].cluster.server
|
||||
k8s_auth = try(
|
||||
{
|
||||
token = local.k8s_config.users[0].user.token
|
||||
using_token = true
|
||||
},
|
||||
{
|
||||
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
|
||||
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
|
||||
using_token = false
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = local.k8s_host
|
||||
insecure = true
|
||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
|
||||
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = local.k8s_host
|
||||
insecure = true
|
||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
|
||||
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
variable "hdns_token" {
|
||||
type = string
|
||||
description = "Hetzner DNS API token used to create DNS records"
|
||||
}
|
||||
Loading…
Reference in New Issue