feat: add provisioner support with incus and lxc
Introduced Provisioner enum type and refactored VM setup logic to support different provisioners like incus and lxc. Added bootstrap function for incus.
This commit is contained in:
parent
426f5d9a21
commit
24dd89d071
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
// Note: TypeScript errors related to Deno imports and namespace can be safely ignored
|
||||
// These are only relevant when running the script with the Deno runtime
|
||||
import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
||||
import { Command, EnumType } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
||||
import { delay } from "https://deno.land/std/async/mod.ts";
|
||||
import { exists } from "https://deno.land/std/fs/mod.ts";
|
||||
|
||||
|
|
@ -12,6 +12,8 @@ const alpineConfig = ['--profile', 'cloud-init-alpine'];
|
|||
const archImage = "archlinux/current/cloud";
|
||||
const archConfig = ['--profile', 'cloud-init-arch'];
|
||||
|
||||
const Provisioner = new EnumType(["incus", "lxc"])
|
||||
|
||||
const getIp = (i: number) => `10.110.36.${109 + i}`;
|
||||
|
||||
const image = archImage;
|
||||
|
|
@ -165,32 +167,7 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
|||
log.success("Cleanup completed");
|
||||
}
|
||||
|
||||
const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
||||
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
|
||||
|
||||
const createdVMs: string[] = [];
|
||||
|
||||
try {
|
||||
// Get hostname and user
|
||||
const {output: hostname} = await executeCommand(
|
||||
["hostnamectl", "hostname"],
|
||||
"get hostname"
|
||||
);
|
||||
|
||||
const {output: user} = await executeCommand(
|
||||
["whoami"],
|
||||
"get current user"
|
||||
);
|
||||
|
||||
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
|
||||
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
|
||||
|
||||
// Check if SSH keys exist
|
||||
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
|
||||
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
|
||||
throw new Error("SSH keys not found");
|
||||
}
|
||||
|
||||
const bootstrapIncus = async () => {
|
||||
// Step 1: Create Low-Resource Profile (if not exists)
|
||||
const {success: profileExists} = await executeCommand(
|
||||
["incus", "profile", "show", "low-resource"],
|
||||
|
|
@ -220,14 +197,20 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
} else {
|
||||
log.skip("Low-resource profile already exists");
|
||||
}
|
||||
}
|
||||
|
||||
// Read SSH key
|
||||
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
|
||||
|
||||
// Step 3: Launch VMs (if not already running)
|
||||
for (let i = 1; i <= numMasters; i++) {
|
||||
const vmName = `k3s-master${i}`;
|
||||
const makeSureVMExists = async (method: "incus" | "lxc", idx: number, vmName: string, sshKey: string) => {
|
||||
switch (method) {
|
||||
case "incus":
|
||||
return makeSureVMExistsIncus(idx, vmName, sshKey);
|
||||
case "lxc":
|
||||
return makeSureVMExistsLxc(idx, vmName, sshKey);
|
||||
default:
|
||||
throw new Error(`Unknown VM method: ${method}`);
|
||||
}
|
||||
}
|
||||
|
||||
const makeSureVMExistsIncus = async (idx: number, vmName: string, sshKey: string) => {
|
||||
const {success: vmExists, output: vmOutput} = await executeCommand(
|
||||
["incus", "list", vmName, "--format", "csv"],
|
||||
`check if VM ${vmName} exists`,
|
||||
|
|
@ -242,7 +225,7 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
);
|
||||
|
||||
await executeCommand(
|
||||
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(i)}`],
|
||||
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(idx)}`],
|
||||
`configure network for VM ${vmName}`
|
||||
);
|
||||
|
||||
|
|
@ -251,9 +234,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
`start VM ${vmName}`
|
||||
);
|
||||
|
||||
createdVMs.push(vmName);
|
||||
log.success(`VM ${vmName} started`);
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if VM is running, if not, start it
|
||||
const isRunning = await isVmRunning(vmName);
|
||||
if (!isRunning) {
|
||||
|
|
@ -264,7 +248,52 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
);
|
||||
}
|
||||
log.skip(`VM ${vmName} already exists`);
|
||||
return false
|
||||
}
|
||||
|
||||
const makeSureVMExistsLxc = async (idx: number, vmName: string, sshKey: string) => {
|
||||
throw new Error("Not implemented yet")
|
||||
return false
|
||||
}
|
||||
|
||||
const setupCluster = async (provisioner: Provisioner, numMasters: number, forceCleanup = false) => {
|
||||
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
|
||||
|
||||
const createdVMs: string[] = [];
|
||||
|
||||
try {
|
||||
// Get hostname and user
|
||||
const {output: hostname} = await executeCommand(
|
||||
["hostnamectl", "hostname"],
|
||||
"get hostname"
|
||||
);
|
||||
|
||||
const {output: user} = await executeCommand(
|
||||
["whoami"],
|
||||
"get current user"
|
||||
);
|
||||
|
||||
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
|
||||
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
|
||||
|
||||
// Check if SSH keys exist
|
||||
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
|
||||
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
|
||||
throw new Error("SSH keys not found");
|
||||
}
|
||||
|
||||
// Bootstrap our provisioner
|
||||
if (method === "incus") await bootstrapIncus();
|
||||
|
||||
// Read SSH key
|
||||
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
|
||||
|
||||
// Step 3: Launch VMs (if not already running)
|
||||
for (let i = 1; i <= numMasters; i++) {
|
||||
const vmName = `k3s-master${i}`;
|
||||
|
||||
const created = await makeSureVMExists(method, i, vmName, sshKey);
|
||||
if (created) createdVMs.push(vmName);
|
||||
}
|
||||
|
||||
// Step 4: Install k3sup (if not installed)
|
||||
|
|
@ -372,8 +401,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
await new Command()
|
||||
.name("setup-k3s-cluster")
|
||||
.version("0.1.0")
|
||||
.type("provisioner", Provisioner)
|
||||
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
||||
.option("-p, --provisioner <provisioner:provisioner>", "Provisioner of VMs", {default: "incus"})
|
||||
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3})
|
||||
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", {default: false})
|
||||
.action(({masters, cleanup}) => setupCluster(masters, cleanup))
|
||||
.action(({provisioner, masters, cleanup}) => setupCluster(provisioner, masters, cleanup))
|
||||
.parse(Deno.args);
|
||||
|
|
|
|||
Loading…
Reference in New Issue