diff --git a/ships/shuttle/setup-cluster.ts b/ships/shuttle/setup-cluster.ts new file mode 100755 index 0000000..c969cad --- /dev/null +++ b/ships/shuttle/setup-cluster.ts @@ -0,0 +1,115 @@ +#!/usr/bin/env -S deno run --allow-run --allow-read --allow-write + +import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts"; + +const setupCluster = async (numMasters: number) => { + // Step 1: Create Low-Resource Profile (if not exists) + const profileExists = await Deno.run({ + cmd: ["incus", "profile", "show", "low-resource"], + stdout: "null", + stderr: "null", + }).status().then((status) => status.success); + + if (!profileExists) { + await Deno.run({ + cmd: ["incus", "profile", "create", "low-resource"], + }).status(); + await Deno.run({ + cmd: ["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"], + }).status(); + await Deno.run({ + cmd: ["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"], + }).status(); + await Deno.run({ + cmd: ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"], + }).status(); + console.log("✅ Low-resource profile created."); + } else { + console.log("⏩ Low-resource profile already exists."); + } + + // Step 3: Launch VMs (if not already running) + for (let i = 1; i <= numMasters; i++) { + const vmName = `k3s-master${i}`; + const vmExists = await Deno.run({ + cmd: ["incus", "list", vmName, "--format", "csv"], + stdout: "piped", + }).output().then((output) => new TextDecoder().decode(output).trim() !== ""); + + if (!vmExists) { + await Deno.run({ + cmd: ["incus", "launch", "images:alpine/edge/cloud", vmName, "--profile", "low-resource"], + }).status(); + console.log(`✅ VM ${vmName} launched.`); + } else { + console.log(`⏩ VM ${vmName} already exists.`); + } + } + + // Step 4: Install k3sup (if not installed) + const k3supInstalled = await Deno.run({ + cmd: ["which", "k3sup"], + stdout: "null", + stderr: "null", + }).status().then((status) => status.success); + + if (!k3supInstalled) { + await Deno.run({ + cmd: ["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"], + }).status(); + console.log("✅ k3sup installed."); + } else { + console.log("⏩ k3sup already installed."); + } + + // Step 5: Bootstrap First Master Node (if not already bootstrapped) + const firstMasterIP = await Deno.run({ + cmd: ["incus", "list", "k3s-master1", "--format", "csv", "--columns", "n4"], + stdout: "piped", + }).output().then((output) => new TextDecoder().decode(output).trim().split(",")[1].split(" ")[0]) + + const kubeconfigExists = await Deno.stat("./kubeconfig").then(() => true).catch(() => false); + + if (!kubeconfigExists) { + await Deno.run({ + cmd: ["k3sup", "install", "--ip", firstMasterIP, "--user", "root", "--cluster"], + }).status(); + console.log("✅ First master node bootstrapped."); + } else { + console.log("⏩ First master node already bootstrapped."); + } + + // Step 6: Join Additional Master Nodes (if not already joined) + for (let i = 2; i <= numMasters; i++) { + const vmName = `k3s-master${i}`; + const vmIP = await Deno.run({ + cmd: ["incus", "list", vmName, "--format", "csv", "--columns", "n4"], + stdout: "piped", + }).output().then((output) => new TextDecoder().decode(output).trim().split(",")[1].split(" ")[0]) + + const joined = await Deno.run({ + cmd: ["kubectl", "get", "nodes", vmName], + stdout: "null", + stderr: "null", + }).status().then((status) => status.success); + + if (!joined) { + await Deno.run({ + cmd: ["k3sup", "join", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "root"], + }).status(); + console.log(`✅ VM ${vmName} joined the cluster.`); + } else { + console.log(`⏩ VM ${vmName} already joined the cluster.`); + } + } + + console.log("🚀 HA k3s cluster setup complete!"); +}; + +await new Command() + .name("setup-k3s-cluster") + .version("0.1.0") + .description("Automate the setup of an HA k3s cluster using incus and k3sup") + .option("-m, --masters ", "Number of master nodes", { default: 3 }) + .action(({ masters }) => setupCluster(masters)) + .parse(Deno.args); diff --git a/shuttles/.gitignore b/shuttles/.gitignore new file mode 100644 index 0000000..2171f23 --- /dev/null +++ b/shuttles/.gitignore @@ -0,0 +1,2 @@ +kubeconfig +*.lock.hcl diff --git a/shuttles/kubeconfig b/shuttles/kubeconfig deleted file mode 100644 index 342348c..0000000 --- a/shuttles/kubeconfig +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTXprM09ESTROelF3SGhjTk1qVXdNakUzTURrd01URTBXaGNOTXpVd01qRTFNRGt3TVRFMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTXprM09ESTROelF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUWVNEV1Jwbmd6TE5ySGphTmhqdmM1SU82a2dibVpwaER4WVROTG11MjAKaWxaQnZLRlZRdW5kV3ZEQ1VrcGJNRjNsOTRuSmxaYVByK3lDSnJpVVh0UjZvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVQ5bVZxTGcvSFBCUS91L3MzbHAwCjhJQ0RDc013Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUpjMkJkMjd0SzNZTFpwa01yOFNMSEIvbngzd1E1MU0KRnRaYnBNVzJudVNXQWlFQTMyUmcyVHZNQW9LYll5bnhySkk3U3g5eWszZHFsSWd5TW15d2M5d1JicmM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - server: https://10.110.36.47:6443 - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJZFh2OWlXRHR6SE13Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOek01TnpneU9EYzBNQjRYRFRJMU1ESXhOekE1TURFeE5Gb1hEVEkyTURJeApOekE1TURFeE5Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJKNlNVZm5ESVJndVRDMjkKaWFjVTdTM3VPWkw1RERGZjJPQi9IakdTWEErQlRGaE5VOGtMSHBxZlZYeWVKbHNkd09mR1QvL2JQbENsWFYvdQowc0wyTW5halNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUXdoZkJDTWRocVpXMW96WlEzZG84d1VYOEpCREFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQXczSFpKY1cwaGI3ZUwxSktvcTJ2cExFaFVxVncxRG1oTGJtcUNQTVdmcEFDSVFDRkhXcDhoTTNMdTROTgpGUnYxc2pkYS93VjdmSVpUcUsyZHVNOUNPQVc5emc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFM016azNPREk0TnpRd0hoY05NalV3TWpFM01Ea3dNVEUwV2hjTk16VXdNakUxTURrd01URTAKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFM016azNPREk0TnpRd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBUjJCcXE5cVhESmZGeVQ1VVpEY3Z6SHVPdDg2TEZ5WTlDb1oxL0xxeldGClZMdHVQYUFXc3BUdUtZckJieTRZRlBQQlQ1M0RkS1F5cjhhWG5HUDRWenlxbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVNSVh3UWpIWWFtVnRhTTJVTjNhUApNRkYvQ1FRd0NnWUlLb1pJemowRUF3SURTQUF3UlFJZ1lmS01YQ3lFelBmM05wN3paLzVYTnFxeTdjTDBpMXBWCkpjZzNzYmtMbXB3Q0lRRDlzYVpmekswRlUrNWljWFpLZmUyVFg0WW5sNS96aFVGR2FHb2RTb1ovUXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUtlQVpqUzhNM1ZBd2l6cWo0UDN6RURuQmNaYldrcDJPekt2VlNpUSs0azRvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFbnBKUitjTWhHQzVNTGIySnB4VHRMZTQ1a3ZrTU1WL1k0SDhlTVpKY0Q0Rk1XRTFUeVFzZQptcDlWZko0bVd4M0E1OFpQLzlzK1VLVmRYKzdTd3ZZeWRnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/shuttles/setup-cluster.ts b/shuttles/setup-cluster.ts index 69c4479..59a372f 100755 --- a/shuttles/setup-cluster.ts +++ b/shuttles/setup-cluster.ts @@ -1,149 +1,377 @@ #!/usr/bin/env -S deno run --allow-run --allow-read --allow-write +// Note: TypeScript errors related to Deno imports and namespace can be safely ignored +// These are only relevant when running the script with the Deno runtime import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts"; import { delay } from "https://deno.land/std/async/mod.ts"; +import { exists } from "https://deno.land/std/fs/mod.ts"; -const alpineImage = "alpine/edge/cloud" -const alpineConfig = ['--profile', 'cloud-init-alpine'] -const archImage = "archlinux/current/cloud" -const archConfig = ['--profile', 'cloud-init-arch'] +// Configuration constants +const alpineImage = "alpine/edge/cloud"; +const alpineConfig = ['--profile', 'cloud-init-alpine']; +const archImage = "archlinux/current/cloud"; +const archConfig = ['--profile', 'cloud-init-arch']; +const getIp = (i: number) => `10.110.36.${109 + i}`; -const image = archImage -const config = archConfig +const image = archImage; +const config = archConfig; -const findIP4 = (name: string, nodeList: any) => { - const ip4 = nodeList?.find((n) => n.name === name)?.state?.network?.eth0?.addresses?.find((n) => n.family === 'inet')?.address; - return ip4; +// Enhanced logging function with timestamps and log levels +const log = { + debug: (message: string) => console.log(`[${new Date().toISOString()}] [DEBUG] ${message}`), + info: (message: string) => console.log(`[${new Date().toISOString()}] [INFO] ${message}`), + success: (message: string) => console.log(`[${new Date().toISOString()}] [SUCCESS] ✅ ${message}`), + warning: (message: string) => console.log(`[${new Date().toISOString()}] [WARNING] ⚠️ ${message}`), + error: (message: string) => console.error(`[${new Date().toISOString()}] [ERROR] ❌ ${message}`), + skip: (message: string) => console.log(`[${new Date().toISOString()}] [SKIP] ⏩ ${message}`), +}; + +// Helper function to execute commands with proper error handling +async function executeCommand( + cmdArray: string[], + description: string, + options: { + stdout?: "piped" | "inherit" | "null", + stderr?: "piped" | "inherit" | "null", + throwOnError?: boolean + } = {} +): Promise<{ success: boolean; output?: string; error?: string }> { + const { stdout = "piped", stderr = "piped", throwOnError = true } = options; + + log.debug(`Executing: ${cmdArray.join(" ")}`); + + try { + // Use Deno.Command API which is the modern replacement for Deno.run + const command = new Deno.Command(cmdArray[0], { + args: cmdArray.slice(1), + stdout: stdout === "piped" ? "piped" : stdout === "inherit" ? "inherit" : "null", + stderr: stderr === "piped" ? "piped" : stderr === "inherit" ? "inherit" : "null", + }); + + const { code, stdout: stdoutOutput, stderr: stderrOutput } = await command.output(); + + const stdoutText = stdout === "piped" ? new TextDecoder().decode(stdoutOutput).trim() : ""; + const stderrText = stderr === "piped" ? new TextDecoder().decode(stderrOutput).trim() : ""; + + if (code !== 0) { + log.error(`Failed to ${description}: ${stderrText || "Unknown error"}`); + if (throwOnError) { + throw new Error(`Command failed: ${cmdArray.join(" ")}\n${stderrText}`); + } + } + + return { + success: code === 0, + output: stdoutText, + error: stderrText + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + log.error(`Exception while ${description}: ${errorMessage}`); + if (throwOnError) { + throw error; + } + return { success: false, error: errorMessage }; + } } -const setupCluster = async (numMasters: number) => { - const hostname = await Deno.run({ - cmd: ["hostnamectl", "hostname"], - stdout: "piped", - }).output().then((output) => new TextDecoder().decode(output).trim()); - - const user = await Deno.run({ - cmd: ["whoami"], - stdout: "piped", - }).output().then((output) => new TextDecoder().decode(output).trim()); - - const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`; - const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`; - - // Step 1: Create Low-Resource Profile (if not exists) - const profileExists = await Deno.run({ - cmd: ["incus", "profile", "show", "low-resource"], - stdout: "null", - stderr: "null", - }).status().then((status) => status.success); - - if (!profileExists) { - await Deno.run({ - cmd: ["incus", "profile", "create", "low-resource"], - }).status(); - await Deno.run({ - cmd: ["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"], - }).status(); - await Deno.run({ - cmd: ["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"], - }).status(); - await Deno.run({ - cmd: ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"], - }).status(); - console.log("✅ Low-resource profile created."); - } else { - console.log("⏩ Low-resource profile already exists."); +// Check if VM is ready for SSH connections +async function isVmReadyForSsh(ip: string, user: string, maxAttempts = 30): Promise { + log.info(`Checking if VM at ${ip} is ready for SSH connections...`); + + for (let attempt = 1; attempt <= maxAttempts; attempt++) { + log.debug(`SSH readiness check attempt ${attempt}/${maxAttempts}`); + + const { success } = await executeCommand( + ["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "echo", "ready"], + `check SSH connectivity to ${ip}`, + { throwOnError: false, stderr: "null" } + ); + + if (success) { + log.success(`VM at ${ip} is ready for SSH connections`); + return true; + } + + log.debug(`VM at ${ip} not ready yet, waiting...`); + await delay(2000); // Wait 2 seconds between attempts } + + log.error(`VM at ${ip} is not ready for SSH connections after ${maxAttempts} attempts`); + return false; +} +// Check if VM is running +async function isVmRunning(vmName: string): Promise { + const { success, output } = await executeCommand( + ["incus", "list", vmName, "--format", "json"], + `check if VM ${vmName} is running`, + { throwOnError: false } + ); + + if (!success || !output) { + return false; + } + + try { + const vmInfo = JSON.parse(output); + return vmInfo.length > 0 && vmInfo[0].status === "Running"; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + log.error(`Failed to parse VM status: ${errorMessage}`); + return false; + } +} - const sshKey = await Deno.readTextFile(sshKeyPubFileName); - - // Step 3: Launch VMs (if not already running) - for (let i = 1; i <= numMasters; i++) { - const vmName = `k3s-master${i}`; - const vmExists = await Deno.run({ - cmd: ["incus", "list", vmName, "--format", "csv"], - stdout: "piped", - }).output().then((output) => new TextDecoder().decode(output).trim() !== ""); - - if (!vmExists) { - await Deno.run({ - cmd: ["incus", "launch", `images:${image}`, vmName, "--profile", "low-resource", "-c", "user.timezone=\"Europe/Amsterdam\"", "-c", `user.ssh_key=\"${sshKey}\"`, ...config], - }).status(); - console.log(`✅ VM ${vmName} launched.`); - } else { - console.log(`⏩ VM ${vmName} already exists.`); +// Cleanup function to handle failures +async function cleanup(vmNames: string[], shouldRemove = false): Promise { + log.info("Starting cleanup process..."); + + for (const vmName of vmNames) { + // Check if VM exists + const { success, output } = await executeCommand( + ["incus", "list", vmName, "--format", "csv"], + `check if VM ${vmName} exists`, + { throwOnError: false } + ); + + if (success && output) { + // Stop VM if it's running + const isRunning = await isVmRunning(vmName); + if (isRunning) { + log.info(`Stopping VM ${vmName}...`); + await executeCommand( + ["incus", "stop", vmName, "--force"], + `stop VM ${vmName}`, + { throwOnError: false } + ); + } + + // Remove VM if requested + if (shouldRemove) { + log.info(`Removing VM ${vmName}...`); + await executeCommand( + ["incus", "delete", vmName], + `remove VM ${vmName}`, + { throwOnError: false } + ); + } } } + + log.success("Cleanup completed"); +} - // Step 4: Install k3sup (if not installed) - const k3supInstalled = await Deno.run({ - cmd: ["which", "k3sup"], - stdout: "null", - stderr: "null", - }).status().then((status) => status.success); - - if (!k3supInstalled) { - await Deno.run({ - cmd: ["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"], - }).status(); - console.log("✅ k3sup installed."); - } else { - console.log("⏩ k3sup already installed."); - } - - // Step 5: Bootstrap First Master Node (if not already bootstrapped) - let firstMasterIP; - let nodes; - while (firstMasterIP === undefined) { - nodes = await Deno.run({ - cmd: ["incus", "list", "--format", "json"], - stdout: "piped", - }).output().then((output) => JSON.parse(new TextDecoder().decode(output))); - firstMasterIP = findIP4('k3s-master1', nodes) - await delay(1000) - } - - const kubeconfigExists = await Deno.stat("./kubeconfig").then(() => true).catch(() => false); - - if (!kubeconfigExists) { - await Deno.run({ - cmd: ["k3sup", "install", "--ip", firstMasterIP, "--user", "picard", "--cluster", "--ssh-key", sshKeyPrivateFileName], - }).status(); - console.log("✅ First master node bootstrapped."); - } else { - console.log("⏩ First master node already bootstrapped."); - } - - // Step 6: Join Additional Master Nodes (if not already joined) - for (let i = 2; i <= numMasters; i++) { - const vmName = `k3s-master${i}`; - const vmIP = findIP4(vmName, nodes) - - const joined = await Deno.run({ - cmd: ["kubectl", "get", "nodes", vmName], - stdout: "null", - stderr: "null", - }).status().then((status) => status.success); - - if (!joined) { - await Deno.run({ - cmd: ["k3sup", "join", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "picard", "--ssh-key", sshKeyPrivateFileName], - }).status(); - console.log(`✅ VM ${vmName} joined the cluster.`); - } else { - console.log(`⏩ VM ${vmName} already joined the cluster.`); +const setupCluster = async (numMasters: number, forceCleanup = false) => { + log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`); + + const createdVMs: string[] = []; + + try { + // Get hostname and user + const { output: hostname } = await executeCommand( + ["hostnamectl", "hostname"], + "get hostname" + ); + + const { output: user } = await executeCommand( + ["whoami"], + "get current user" + ); + + const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`; + const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`; + + // Check if SSH keys exist + if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) { + log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`); + throw new Error("SSH keys not found"); } + + // Step 1: Create Low-Resource Profile (if not exists) + const { success: profileExists } = await executeCommand( + ["incus", "profile", "show", "low-resource"], + "check if low-resource profile exists", + { stdout: "null", stderr: "null", throwOnError: false } + ); + + if (!profileExists) { + log.info("Creating low-resource profile..."); + await executeCommand( + ["incus", "profile", "create", "low-resource"], + "create low-resource profile" + ); + await executeCommand( + ["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"], + "set low-resource profile limits" + ); + await executeCommand( + ["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"], + "add root disk to low-resource profile" + ); + // await executeCommand( + // ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"], + // "add network interface to low-resource profile" + // ); + log.success("Low-resource profile created"); + } else { + log.skip("Low-resource profile already exists"); + } + + // Read SSH key + const sshKey = await Deno.readTextFile(sshKeyPubFileName); + + // Step 3: Launch VMs (if not already running) + for (let i = 1; i <= numMasters; i++) { + const vmName = `k3s-master${i}`; + + const { success: vmExists, output: vmOutput } = await executeCommand( + ["incus", "list", vmName, "--format", "csv"], + `check if VM ${vmName} exists`, + { throwOnError: false } + ); + + if (!vmExists || !vmOutput) { + log.info(`Creating VM ${vmName}...`); + await executeCommand( + ["incus", "init", `images:${image}`, vmName, "--profile", "low-resource", "-c", "user.timezone=\"Europe/Amsterdam\"", "-c", `user.ssh_key=\"${sshKey}\"`, ...config], + `initialize VM ${vmName}` + ); + + await executeCommand( + ["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(i)}`], + `configure network for VM ${vmName}` + ); + + await executeCommand( + ["incus", "start", vmName], + `start VM ${vmName}` + ); + + createdVMs.push(vmName); + log.success(`VM ${vmName} started`); + } else { + // Check if VM is running, if not, start it + const isRunning = await isVmRunning(vmName); + if (!isRunning) { + log.info(`Starting existing VM ${vmName}...`); + await executeCommand( + ["incus", "start", vmName], + `start VM ${vmName}` + ); + } + log.skip(`VM ${vmName} already exists`); + } + } + + // Step 4: Install k3sup (if not installed) + const { success: k3supInstalled } = await executeCommand( + ["which", "k3sup"], + "check if k3sup is installed", + { stdout: "null", stderr: "null", throwOnError: false } + ); + + if (!k3supInstalled) { + log.info("Installing k3sup..."); + await executeCommand( + ["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"], + "install k3sup" + ); + log.success("k3sup installed"); + } else { + log.skip("k3sup already installed"); + } + + // Step 5: Wait for VMs to be ready + const firstMasterIP = getIp(1); + log.info(`Waiting for first master node (${firstMasterIP}) to be ready...`); + + const vmReady = await isVmReadyForSsh(firstMasterIP, "picard"); + if (!vmReady) { + throw new Error(`First master node at ${firstMasterIP} is not ready for SSH connections`); + } + + // Check if kubeconfig exists + const kubeconfigExists = await exists("./kubeconfig"); + + if (!kubeconfigExists) { + log.info("Bootstrapping first master node..."); + await executeCommand( + ["k3sup", "install", "--ip", firstMasterIP, "--user", "picard", "--cluster", "--ssh-key", sshKeyPrivateFileName], + "bootstrap first master node" + ); + log.success("First master node bootstrapped"); + } else { + log.skip("First master node already bootstrapped"); + } + + // Step 6: Join Additional Master Nodes (if not already joined) + for (let i = 2; i <= numMasters; i++) { + const vmName = `k3s-master${i}`; + const vmIP = getIp(i); + + // Wait for VM to be ready + log.info(`Waiting for ${vmName} (${vmIP}) to be ready...`); + const nodeReady = await isVmReadyForSsh(vmIP, "picard"); + if (!nodeReady) { + log.warning(`VM ${vmName} is not ready for SSH connections, skipping join operation`); + continue; + } + + const { success: joined } = await executeCommand( + ["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", vmName], + `check if ${vmName} has joined the cluster`, + { stdout: "null", stderr: "null", throwOnError: false } + ); + + if (!joined) { + log.info(`Joining ${vmName} to the cluster...`); + await executeCommand( + ["k3sup", "join", "--server", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "picard", "--ssh-key", sshKeyPrivateFileName], + `join ${vmName} to the cluster` + ); + log.success(`VM ${vmName} joined the cluster`); + } else { + log.skip(`VM ${vmName} already joined the cluster`); + } + } + + log.success("HA k3s cluster setup complete! 🚀"); + + // Verify cluster status + log.info("Verifying cluster status..."); + const { success: clusterVerified, output: nodesOutput } = await executeCommand( + ["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", "-o", "wide"], + "verify cluster nodes", + { throwOnError: false } + ); + + if (clusterVerified) { + log.info("Cluster nodes:"); + console.log(nodesOutput); + } else { + log.warning("Could not verify cluster status"); + } + + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + log.error(`Failed to set up cluster: ${errorMessage}`); + + if (createdVMs.length > 0) { + log.warning("An error occurred during setup. Cleaning up created resources..."); + await cleanup(createdVMs, forceCleanup); + } + + Deno.exit(1); } - - console.log("🚀 HA k3s cluster setup complete!"); }; await new Command() .name("setup-k3s-cluster") .version("0.1.0") .description("Automate the setup of an HA k3s cluster using incus and k3sup") - .option("-m, --masters ", "Number of master nodes", {default: 3}) - .action(({masters}) => setupCluster(masters)) + .option("-m, --masters ", "Number of master nodes", { default: 3 }) + .option("-c, --cleanup", "Force cleanup of VMs if setup fails", { default: false }) + .action(({ masters, cleanup }) => setupCluster(masters, cleanup)) .parse(Deno.args); diff --git a/shuttles/terraform/.terraform.lock.hcl b/shuttles/terraform/.terraform.lock.hcl deleted file mode 100644 index 8c5ae0a..0000000 --- a/shuttles/terraform/.terraform.lock.hcl +++ /dev/null @@ -1,77 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/argoproj-labs/argocd" { - version = "7.0.2" - constraints = "7.0.2" - hashes = [ - "h1:4lbS20EczuzhSNSOjp1mJoe2YbcXniBTzxmJHd+rjIE=", - "zh:083686eaeaa7b51ebaac42c3c7b01a15f020a735dc8dbe50aa6a6bff16888943", - "zh:16b1b813f33874844fadc747c57ae99cf8f119c119b3776a105c154fc4a54488", - "zh:25ed8dca5da5faa52392c7938c61dd9a83bc6388ad771062cecfc15c44bc3d8e", - "zh:3907351bbcb6a0c1c1abeb33dac5d70f798b0ecc05559f2ede40ae84b9079983", - "zh:3a737237f03b9b28de26b1fe9d20bcfa53f580489fc28d774396e5de38906fd3", - "zh:64421961cc342cec8280899352444a96ad1b09144fa933dc3a0dfb9bbae809a9", - "zh:9702119789cc42b98dc9d1a8d7666b608a964cf1355e3cf500b82bed1898f2fd", - "zh:9cc9ad41a6ce25aac40b9dd2291fc4d90a223add197155decdca7d2d82fc60f1", - "zh:a239381a36bf6041d6520c8db83fb281fd2417f4540c895e07db052dd108a72f", - "zh:ecca66064fff07719eec2ef35cd62d1cb65cf4a11f9ce96f3a9b9b7c78d614a5", - ] -} - -provider "registry.terraform.io/hashicorp/helm" { - version = "2.17.0" - hashes = [ - "h1:K5FEjxvDnxb1JF1kG1xr8J3pNGxoaR3Z0IBG9Csm/Is=", - "zh:06fb4e9932f0afc1904d2279e6e99353c2ddac0d765305ce90519af410706bd4", - "zh:104eccfc781fc868da3c7fec4385ad14ed183eb985c96331a1a937ac79c2d1a7", - "zh:129345c82359837bb3f0070ce4891ec232697052f7d5ccf61d43d818912cf5f3", - "zh:3956187ec239f4045975b35e8c30741f701aa494c386aaa04ebabffe7749f81c", - "zh:66a9686d92a6b3ec43de3ca3fde60ef3d89fb76259ed3313ca4eb9bb8c13b7dd", - "zh:88644260090aa621e7e8083585c468c8dd5e09a3c01a432fb05da5c4623af940", - "zh:a248f650d174a883b32c5b94f9e725f4057e623b00f171936dcdcc840fad0b3e", - "zh:aa498c1f1ab93be5c8fbf6d48af51dc6ef0f10b2ea88d67bcb9f02d1d80d3930", - "zh:bf01e0f2ec2468c53596e027d376532a2d30feb72b0b5b810334d043109ae32f", - "zh:c46fa84cc8388e5ca87eb575a534ebcf68819c5a5724142998b487cb11246654", - "zh:d0c0f15ffc115c0965cbfe5c81f18c2e114113e7a1e6829f6bfd879ce5744fbb", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/hashicorp/kubernetes" { - version = "2.35.1" - hashes = [ - "h1:Av0Wk8g2XjY2oap7nyWNHEgfCRfphdJvrkqJjEM2ZKM=", - "zh:12212ca5ae47823ce14bfafb909eeb6861faf1e2435fb2fc4a8b334b3544b5f5", - "zh:3f49b3d77182df06b225ab266667de69681c2e75d296867eb2cf06a8f8db768c", - "zh:40832494d19f8a2b3cd0c18b80294d0b23ef6b82f6f6897b5fe00248a9997460", - "zh:739a5ddea61a77925ee7006a29c8717377a2e9d0a79a0bbd98738d92eec12c0d", - "zh:a02b472021753627c5c39447a56d125a32214c29ff9108fc499f2dcdf4f1cc4f", - "zh:b78865b3867065aa266d6758c9601a2756741478f5735a838c20d633d65e085b", - "zh:d362e87464683f5632790e66920ea803adb54c2bc0cb24b6fd9a314d2b1efffd", - "zh:d98206fe88c2c9a52b8d2d0cb2c877c812a4a51d19f9d8428e63cbd5fd8a304d", - "zh:dfa320946b1ce3f3615c42b3447a28dc9f604c06d8b9a6fe289855ab2ade4d11", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - "zh:fc1debd2e695b5222d2ccc8b24dab65baba4ee2418ecce944e64d42e79474cb5", - "zh:fdaf960443720a238c09e519aeb30faf74f027ac5d1e0a309c3b326888e031d7", - ] -} - -provider "registry.terraform.io/hashicorp/random" { - version = "3.6.3" - hashes = [ - "h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=", - "zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451", - "zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8", - "zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe", - "zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1", - "zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36", - "zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30", - "zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615", - "zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad", - "zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556", - "zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0", - ] -} diff --git a/shuttles/terraform/main.tf b/shuttles/terraform/main.tf index 957d491..07941b9 100644 --- a/shuttles/terraform/main.tf +++ b/shuttles/terraform/main.tf @@ -1,11 +1,54 @@ locals { tld = "fourlights.dev" cluster_dns = "venus.${local.tld}" - bridge_dns = "bridge.${local.cluster_dns}" is_installed = true node_count = 3 } +module "registry" { + source = "../../infra/modules/zot" + + service_uri = "registry.${local.cluster_dns}" +} + +resource "kubernetes_manifest" "preserve-host-middleware" { + depends_on = [local.is_installed] + manifest = { + apiVersion = "traefik.io/v1alpha1" + kind = "Middleware" + metadata = { + name = "preserve-host-headers" + namespace = "default" # NOTE: Hardcoded by design + } + spec = { + headers = { + customRequestHeaders = { + "X-Forwarded-Proto" = "https" + "X-Forwarded-Port" = "443" + } + } + } + } +} + +resource "kubernetes_manifest" "https-redirect-middleware" { + depends_on = [local.is_installed] + manifest = { + apiVersion = "traefik.io/v1alpha1" + kind = "Middleware" + metadata = { + name = "redirect-to-https" + namespace = "default" # NOTE: Hardcoded by design + } + spec = { + redirectScheme = { + permanent = true + scheme = "https" + } + } + } +} + module "homepage" { source = "../../infra/modules/homepage" wait_on = local.is_installed @@ -32,6 +75,8 @@ module "minio" { admin = true ingressClass = "traefik" storageSize = "10Gi" + + displayOnHomepage = true } module "mongodb" { @@ -57,3 +102,110 @@ module "rabbitmq" { admin = true ingressClass = "traefik" } + +module "postgresql" { + source = "../../infra/modules/postgresql" + + namespace = "postgresql" + k8s_config_yaml = local.k8s_config_yaml + username = "bridge" +} + +module "zitadel-db" { + source = "../../infra/modules/postgresql/tenant" + wait_on = module.postgresql.installed + + name = "zitadel" + root_password = module.postgresql.root_password + k8s_config_yaml = local.k8s_config_yaml +} + +module "zitadel" { + source = "../../infra/modules/zitadel" + wait_on = module.zitadel-db.installed + k8s_config_yaml = local.k8s_config_yaml + + server_dns = local.cluster_dns + + service_name = "zitadel" + namespace = "zitadel" + + database_password = module.zitadel-db.password + database_root_password = module.postgresql.root_password + + display_on_homepage = true +} + +module "zitadel-bootstrap" { + source = "../../infra/tenants/fourlights/zitadel" + + domain = module.zitadel.server + jwt_profile_file = module.zitadel.jwt_profile_file +} + + +module "redis" { + source = "../../infra/modules/redis" + + namespace = "redis" + k8s_config_yaml = local.k8s_config_yaml +} + +module "tenant-365zon" { + source = "../../infra/tenants/365zon" + + org_id = module.zitadel-bootstrap.org_id + user_id = module.zitadel-bootstrap.user_id + domain = module.zitadel.server + jwt_profile_file = module.zitadel.jwt_profile_file + + minio_access_key = module.minio.minio_access_key + minio_secret_key = module.minio.minio_secret_key + minio_service_uri = module.minio.minio_api_uri + + mongodb_connection_string = module.mongodb.connection_string + rabbitmq_connection_string = module.rabbitmq.connection_string +} + +module "zitadel-argocd" { + source = "../../infra/tenants/argocd/zitadel" + + org_id = module.zitadel-bootstrap.org_id + user_id = module.zitadel-bootstrap.user_id + domain = module.zitadel.server + jwt_profile_file = module.zitadel.jwt_profile_file + + argocd_service_domain = "argocd.${ local.cluster_dns}" +} + +module "argocd" { + source = "../../infra/modules/argocd" + wait_on = module.zitadel-argocd.installed + + namespace = "argocd" + k8s_config_yaml = local.k8s_config_yaml + + redis_db_start_index = 0 + redis_password = module.redis.password + server_dns = local.cluster_dns + + oauth_uri = module.zitadel.server + oauth_client_id = module.zitadel-argocd.client_id + oauth_client_secret = module.zitadel-argocd.client_secret + oauth_redirect_uri = "https://${module.zitadel.server}/${module.zitadel-argocd.logoutSuffix}" + oauth_issuer = "https://${module.zitadel.server}" +} + +/* + argocd project + */ + +output "argocd-root-password" { + value = module.argocd.admin_password + sensitive = true +} + +output "mongodb-connection-string" { + value = module.mongodb.connection_string + sensitive = true +} diff --git a/shuttles/terraform/zitadel-admin-sa.json b/shuttles/terraform/zitadel-admin-sa.json new file mode 100755 index 0000000..a05b37c --- /dev/null +++ b/shuttles/terraform/zitadel-admin-sa.json @@ -0,0 +1 @@ +{"type":"serviceaccount","keyId":"310142761184133898","key":"-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEApSaCjkOBVIe33bEIwENq1jGj6MgbN+NqYRN6EVDWVnESM10/\n188hB9UDCvNR5kcBbaz2bD/ymZ/ppWSLqmXONwc3/PtiOluBfkvR1q2pEh+f13wz\n72dbhOVqf+YhL3lndiQ/OupGzaCbmsBNPGag7mgaPxlgoWTWIItPnOomIhwtwXgy\nNyzt9Fmyh/4JsRlIYO90ZO32vKXABRMCGsKxvcY9CR4+LIqddns83YASGFnQ5oBo\nObc8EN2Di7uKWzNwxUJuZtFlHXp06su2mWDGJhKusHYW4KUIs2uwFtjJfAXG/adT\n8qVgi174m1jU2ocSd6o9IqDYf50arCinbgtAdwIDAQABAoIBABwrB1WQefya8Wdk\njKOOXCiQau6HQu0zYq+QDN/rM8OmoX4VR5Bdibq2QECb47otHjdAqv8noQ9G0Ske\njxvPJW8JUilaDxT5CosqD25YTGAE+NReINWSgW+XWaTa8YoRYO4rnIVF9DGaVS/9\n4K6OqqA/LUrZ3ztn4YXHfRq8bSif86GMo1GkwH8xOMJHdaxCs8YzAbpGURL03QtL\nemVNs9VwSWLmnK71FpXkko0aGi14naS7E4jv8uutykLQsc+QE7m9B4OiDkijKCP9\nQwvw/3RZYcrRuWz7uSANyxG4Uc8JhPdUIyvpkvUz8NfRLTDoSAEq1NQuxpyjLYYU\n7uzYcWECgYEAzKZ5wGTJBZafen2I61L8XAMk2df63nnEK+YuZqNZ6yH6IY7cCrlJ\n3LbeNoHNcGMXw1mf9Z9vvAjz7nbec2BYN1KRMR9QOTHcqwQZcOOJnwhdO4uAlsFZ\ngiyoLYCQP8Z6IIC4ht+2hmf8hS3CmWUPAXyLOcg4ok6SRdyNsfWiLwkCgYEAzpbL\n8szYqNY+r5n1DQ9d6zNb2cbkFfzZDxn64BA1xQZtRgxfzNAOvsGl5pPWve7oS/8Y\nmPx+1b08NvCcTuaow7CCw+IDHsI43TRNbvPQBWtINBE6eeBs3laaNvmxTZU5HGog\nt1yRtk0u64hKT7+L7Ku5JP79pxzNOIs1hnImU38CgYAaH84+/x6iNf4Ztti5oZhR\nbp1PqcB+kfC24eVeeM/LskSp8ACq5chGApoPPzaoeB3adCB1TGsJB+OLt2TiOZRJ\nS6L5MFQfWPwgYJ+Wx5UT1g+AwGgj1n7EnUrCtDy1x3Jjn8rufLRiJ/gWUCcdScdG\nm01yjNqd7YXCoUr9Qqv3cQKBgGd2klHZUbDNC7v6SQXvakP/BsM8nsJ8TWEIy+In\nfCZen59zVw9GK/xRE3s1E1kwK1rUOUd1PThie6OwQTgqwN6wqezcZl+jOcNfDGDC\n7q2oGxMohbbANQXtLXLW/nsyftXCOPxb+gXpBdSj/0ONVNCE+EaVBggJnqXw4i+h\nP5yVAoGBAIoXRgX3mSBsC/xgKIXQb4c9WT7W78IOpU43mbX9jC/emfLkOvuxR/Cv\nmJDgTv2zUq7uItbvXmxwmU7JVYlBFaWERsAqzzWUUsdfM3tBFdBbcH9fzoEG0j4u\nkqCwU1if6HTHCmunqt1ZQKN3oP1Uycn/1ZL6NR8ilqIcjCzh4JPQ\n-----END RSA PRIVATE KEY-----\n","expirationDate":"2026-01-01T00:00:00Z","userId":"310142761184068362"}