feat: add provisioner support with incus and lxc
Introduced Provisioner enum type and refactored VM setup logic to support different provisioners like incus and lxc. Added bootstrap function for incus.
This commit is contained in:
parent
426f5d9a21
commit
24dd89d071
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
// Note: TypeScript errors related to Deno imports and namespace can be safely ignored
|
// Note: TypeScript errors related to Deno imports and namespace can be safely ignored
|
||||||
// These are only relevant when running the script with the Deno runtime
|
// These are only relevant when running the script with the Deno runtime
|
||||||
import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
import { Command, EnumType } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
||||||
import { delay } from "https://deno.land/std/async/mod.ts";
|
import { delay } from "https://deno.land/std/async/mod.ts";
|
||||||
import { exists } from "https://deno.land/std/fs/mod.ts";
|
import { exists } from "https://deno.land/std/fs/mod.ts";
|
||||||
|
|
||||||
|
|
@ -12,6 +12,8 @@ const alpineConfig = ['--profile', 'cloud-init-alpine'];
|
||||||
const archImage = "archlinux/current/cloud";
|
const archImage = "archlinux/current/cloud";
|
||||||
const archConfig = ['--profile', 'cloud-init-arch'];
|
const archConfig = ['--profile', 'cloud-init-arch'];
|
||||||
|
|
||||||
|
const Provisioner = new EnumType(["incus", "lxc"])
|
||||||
|
|
||||||
const getIp = (i: number) => `10.110.36.${109 + i}`;
|
const getIp = (i: number) => `10.110.36.${109 + i}`;
|
||||||
|
|
||||||
const image = archImage;
|
const image = archImage;
|
||||||
|
|
@ -165,32 +167,7 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
||||||
log.success("Cleanup completed");
|
log.success("Cleanup completed");
|
||||||
}
|
}
|
||||||
|
|
||||||
const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
const bootstrapIncus = async () => {
|
||||||
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
|
|
||||||
|
|
||||||
const createdVMs: string[] = [];
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Get hostname and user
|
|
||||||
const {output: hostname} = await executeCommand(
|
|
||||||
["hostnamectl", "hostname"],
|
|
||||||
"get hostname"
|
|
||||||
);
|
|
||||||
|
|
||||||
const {output: user} = await executeCommand(
|
|
||||||
["whoami"],
|
|
||||||
"get current user"
|
|
||||||
);
|
|
||||||
|
|
||||||
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
|
|
||||||
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
|
|
||||||
|
|
||||||
// Check if SSH keys exist
|
|
||||||
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
|
|
||||||
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
|
|
||||||
throw new Error("SSH keys not found");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 1: Create Low-Resource Profile (if not exists)
|
// Step 1: Create Low-Resource Profile (if not exists)
|
||||||
const {success: profileExists} = await executeCommand(
|
const {success: profileExists} = await executeCommand(
|
||||||
["incus", "profile", "show", "low-resource"],
|
["incus", "profile", "show", "low-resource"],
|
||||||
|
|
@ -220,14 +197,20 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
||||||
} else {
|
} else {
|
||||||
log.skip("Low-resource profile already exists");
|
log.skip("Low-resource profile already exists");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Read SSH key
|
const makeSureVMExists = async (method: "incus" | "lxc", idx: number, vmName: string, sshKey: string) => {
|
||||||
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
|
switch (method) {
|
||||||
|
case "incus":
|
||||||
// Step 3: Launch VMs (if not already running)
|
return makeSureVMExistsIncus(idx, vmName, sshKey);
|
||||||
for (let i = 1; i <= numMasters; i++) {
|
case "lxc":
|
||||||
const vmName = `k3s-master${i}`;
|
return makeSureVMExistsLxc(idx, vmName, sshKey);
|
||||||
|
default:
|
||||||
|
throw new Error(`Unknown VM method: ${method}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const makeSureVMExistsIncus = async (idx: number, vmName: string, sshKey: string) => {
|
||||||
const {success: vmExists, output: vmOutput} = await executeCommand(
|
const {success: vmExists, output: vmOutput} = await executeCommand(
|
||||||
["incus", "list", vmName, "--format", "csv"],
|
["incus", "list", vmName, "--format", "csv"],
|
||||||
`check if VM ${vmName} exists`,
|
`check if VM ${vmName} exists`,
|
||||||
|
|
@ -242,7 +225,7 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
||||||
);
|
);
|
||||||
|
|
||||||
await executeCommand(
|
await executeCommand(
|
||||||
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(i)}`],
|
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(idx)}`],
|
||||||
`configure network for VM ${vmName}`
|
`configure network for VM ${vmName}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
@ -251,9 +234,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
||||||
`start VM ${vmName}`
|
`start VM ${vmName}`
|
||||||
);
|
);
|
||||||
|
|
||||||
createdVMs.push(vmName);
|
|
||||||
log.success(`VM ${vmName} started`);
|
log.success(`VM ${vmName} started`);
|
||||||
} else {
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Check if VM is running, if not, start it
|
// Check if VM is running, if not, start it
|
||||||
const isRunning = await isVmRunning(vmName);
|
const isRunning = await isVmRunning(vmName);
|
||||||
if (!isRunning) {
|
if (!isRunning) {
|
||||||
|
|
@ -264,7 +248,52 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
log.skip(`VM ${vmName} already exists`);
|
log.skip(`VM ${vmName} already exists`);
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
const makeSureVMExistsLxc = async (idx: number, vmName: string, sshKey: string) => {
|
||||||
|
throw new Error("Not implemented yet")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
const setupCluster = async (provisioner: Provisioner, numMasters: number, forceCleanup = false) => {
|
||||||
|
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
|
||||||
|
|
||||||
|
const createdVMs: string[] = [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get hostname and user
|
||||||
|
const {output: hostname} = await executeCommand(
|
||||||
|
["hostnamectl", "hostname"],
|
||||||
|
"get hostname"
|
||||||
|
);
|
||||||
|
|
||||||
|
const {output: user} = await executeCommand(
|
||||||
|
["whoami"],
|
||||||
|
"get current user"
|
||||||
|
);
|
||||||
|
|
||||||
|
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
|
||||||
|
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
|
||||||
|
|
||||||
|
// Check if SSH keys exist
|
||||||
|
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
|
||||||
|
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
|
||||||
|
throw new Error("SSH keys not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Bootstrap our provisioner
|
||||||
|
if (method === "incus") await bootstrapIncus();
|
||||||
|
|
||||||
|
// Read SSH key
|
||||||
|
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
|
||||||
|
|
||||||
|
// Step 3: Launch VMs (if not already running)
|
||||||
|
for (let i = 1; i <= numMasters; i++) {
|
||||||
|
const vmName = `k3s-master${i}`;
|
||||||
|
|
||||||
|
const created = await makeSureVMExists(method, i, vmName, sshKey);
|
||||||
|
if (created) createdVMs.push(vmName);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 4: Install k3sup (if not installed)
|
// Step 4: Install k3sup (if not installed)
|
||||||
|
|
@ -372,8 +401,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
||||||
await new Command()
|
await new Command()
|
||||||
.name("setup-k3s-cluster")
|
.name("setup-k3s-cluster")
|
||||||
.version("0.1.0")
|
.version("0.1.0")
|
||||||
|
.type("provisioner", Provisioner)
|
||||||
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
||||||
|
.option("-p, --provisioner <provisioner:provisioner>", "Provisioner of VMs", {default: "incus"})
|
||||||
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3})
|
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3})
|
||||||
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", {default: false})
|
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", {default: false})
|
||||||
.action(({masters, cleanup}) => setupCluster(masters, cleanup))
|
.action(({provisioner, masters, cleanup}) => setupCluster(provisioner, masters, cleanup))
|
||||||
.parse(Deno.args);
|
.parse(Deno.args);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue