Compare commits
5 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
4c390fa05a | |
|
|
f18e1c55e3 | |
|
|
ed1eef9db0 | |
|
|
f17e210f3e | |
|
|
4c4e74ff8d |
|
|
@ -1,9 +1,8 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project version="4">
|
<project version="4">
|
||||||
<component name="TemplateDataLanguageMappings">
|
<component name="TemplateDataLanguageMappings">
|
||||||
|
<file url="file://$PROJECT_DIR$/infra/modules/argocd/values.yaml" dialect="yaml" />
|
||||||
<file url="file://$PROJECT_DIR$/infra/modules/fusionauth/values.yaml" dialect="yaml" />
|
<file url="file://$PROJECT_DIR$/infra/modules/fusionauth/values.yaml" dialect="yaml" />
|
||||||
<file url="file://$PROJECT_DIR$/infra/modules/mongodb/values.yaml" dialect="yaml" />
|
<file url="file://$PROJECT_DIR$/infra/modules/mongodb/values.yaml" dialect="yaml" />
|
||||||
<file url="file://$PROJECT_DIR$/infra/modules/monitoring/monitoring-values.yaml.tftpl" dialect="TFTPL" />
|
|
||||||
<file url="file://$PROJECT_DIR$/infra/modules/zot/values.yaml.tftpl" dialect="TFTPL" />
|
|
||||||
</component>
|
</component>
|
||||||
</project>
|
</project>
|
||||||
|
|
@ -2,10 +2,5 @@
|
||||||
<project version="4">
|
<project version="4">
|
||||||
<component name="TerraformProjectSettings">
|
<component name="TerraformProjectSettings">
|
||||||
<option name="toolPath" value="/usr/bin/terraform" />
|
<option name="toolPath" value="/usr/bin/terraform" />
|
||||||
<option name="ignoredTemplateCandidatePaths">
|
|
||||||
<set>
|
|
||||||
<option value="file://$PROJECT_DIR$/infra/modules/monitoring/monitoring-values.yaml.tftpl" />
|
|
||||||
</set>
|
|
||||||
</option>
|
|
||||||
</component>
|
</component>
|
||||||
</project>
|
</project>
|
||||||
|
|
@ -2,6 +2,5 @@
|
||||||
<project version="4">
|
<project version="4">
|
||||||
<component name="VcsDirectoryMappings">
|
<component name="VcsDirectoryMappings">
|
||||||
<mapping directory="" vcs="Git" />
|
<mapping directory="" vcs="Git" />
|
||||||
<mapping directory="$PROJECT_DIR$/shuttles/terraform/terraform-provider-helm" vcs="Git" />
|
|
||||||
</component>
|
</component>
|
||||||
</project>
|
</project>
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="AutoImportSettings">
|
|
||||||
<option name="autoReloadType" value="SELECTIVE" />
|
|
||||||
</component>
|
|
||||||
<component name="ChangeListManager">
|
|
||||||
<list default="true" id="009bc178-658e-4c81-9bb8-8d7bf6b8cbc6" name="Changes" comment="">
|
|
||||||
<change beforePath="$PROJECT_DIR$/../infra/clusters/app-365zon/.terraform/terraform.tfstate" beforeDir="false" afterPath="$PROJECT_DIR$/../infra/clusters/app-365zon/.terraform/terraform.tfstate" afterDir="false" />
|
|
||||||
<change beforePath="$PROJECT_DIR$/../infra/clusters/app-365zon/main.tf" beforeDir="false" afterPath="$PROJECT_DIR$/../infra/clusters/app-365zon/main.tf" afterDir="false" />
|
|
||||||
<change beforePath="$PROJECT_DIR$/../infra/modules/mijn-365zon-nl/main.tf" beforeDir="false" afterPath="$PROJECT_DIR$/../infra/modules/mijn-365zon-nl/main.tf" afterDir="false" />
|
|
||||||
<change beforePath="$PROJECT_DIR$/../infra/modules/minio/main.tf" beforeDir="false" afterPath="$PROJECT_DIR$/../infra/modules/minio/main.tf" afterDir="false" />
|
|
||||||
<change beforePath="$PROJECT_DIR$/../infra/modules/minio/values.yaml.tftpl" beforeDir="false" afterPath="$PROJECT_DIR$/../infra/modules/minio/values.yaml.tftpl" afterDir="false" />
|
|
||||||
<change beforePath="$PROJECT_DIR$/../infra/modules/minio/variables.tf" beforeDir="false" afterPath="$PROJECT_DIR$/../infra/modules/minio/variables.tf" afterDir="false" />
|
|
||||||
<change beforePath="$PROJECT_DIR$/../infra/modules/mongodb/values.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/../infra/modules/mongodb/values.yaml" afterDir="false" />
|
|
||||||
</list>
|
|
||||||
<option name="SHOW_DIALOG" value="false" />
|
|
||||||
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
|
||||||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
|
||||||
<option name="LAST_RESOLUTION" value="IGNORE" />
|
|
||||||
</component>
|
|
||||||
<component name="Git.Settings">
|
|
||||||
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$/.." />
|
|
||||||
</component>
|
|
||||||
<component name="ProjectColorInfo">{
|
|
||||||
"associatedIndex": 1
|
|
||||||
}</component>
|
|
||||||
<component name="ProjectId" id="2oqTXEtODybqnAKfjaqPi9uslRP" />
|
|
||||||
<component name="ProjectViewState">
|
|
||||||
<option name="hideEmptyMiddlePackages" value="true" />
|
|
||||||
<option name="showLibraryContents" value="true" />
|
|
||||||
</component>
|
|
||||||
<component name="PropertiesComponent"><![CDATA[{
|
|
||||||
"keyToString": {
|
|
||||||
"RunOnceActivity.ShowReadmeOnStart": "true",
|
|
||||||
"RunOnceActivity.git.unshallow": "true",
|
|
||||||
"git-widget-placeholder": "main",
|
|
||||||
"last_opened_file_path": "/home/lamelos/Projects/fourlights/devops",
|
|
||||||
"node.js.detected.package.eslint": "true",
|
|
||||||
"node.js.detected.package.tslint": "true",
|
|
||||||
"node.js.selected.package.eslint": "(autodetect)",
|
|
||||||
"node.js.selected.package.tslint": "(autodetect)",
|
|
||||||
"nodejs_package_manager_path": "npm",
|
|
||||||
"vue.rearranger.settings.migration": "true"
|
|
||||||
}
|
|
||||||
}]]></component>
|
|
||||||
<component name="SharedIndexes">
|
|
||||||
<attachedChunks>
|
|
||||||
<set>
|
|
||||||
<option value="bundled-js-predefined-d6986cc7102b-deb605915726-JavaScript-WS-243.22562.222" />
|
|
||||||
</set>
|
|
||||||
</attachedChunks>
|
|
||||||
</component>
|
|
||||||
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
|
|
||||||
<component name="TaskManager">
|
|
||||||
<task active="true" id="Default" summary="Default task">
|
|
||||||
<changelist id="009bc178-658e-4c81-9bb8-8d7bf6b8cbc6" name="Changes" comment="" />
|
|
||||||
<created>1731596143702</created>
|
|
||||||
<option name="number" value="Default" />
|
|
||||||
<option name="presentableId" value="Default" />
|
|
||||||
<updated>1731596143702</updated>
|
|
||||||
<workItem from="1731596144788" duration="1417000" />
|
|
||||||
<workItem from="1736261138378" duration="1228000" />
|
|
||||||
<workItem from="1736775177111" duration="7000" />
|
|
||||||
</task>
|
|
||||||
<servers />
|
|
||||||
</component>
|
|
||||||
<component name="TypeScriptGeneratedFilesManager">
|
|
||||||
<option name="version" value="3" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
|
|
@ -2,22 +2,22 @@
|
||||||
# Manual edits may be lost in future updates.
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
provider "registry.terraform.io/aminueza/minio" {
|
provider "registry.terraform.io/aminueza/minio" {
|
||||||
version = "3.3.0"
|
version = "2.5.1"
|
||||||
constraints = "~> 3.3.0"
|
constraints = "~> 2.5.0"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:apkVsmgFVWd1jpCMnPR1Kd8WJB2UkYRiS2kc1Meefz8=",
|
"h1:03gfmXf78G9h9XCHwavPwAwCjg1xmQIp4e5aAv6xIbI=",
|
||||||
"zh:0c0ac1602465eaeb7045410a8ad22ee6eb82233f7bfda78bb07c58d3697bf62b",
|
"zh:0710a1fcd8e3501237990344160b0193860c2e643e73c728bf832e3d3fde971a",
|
||||||
"zh:26a097f7523b222bb7808b76ec9fdac8c5974e440dc2438d16ef8fa4562bf297",
|
"zh:0b2f25fbb59d056299faec7fb09012ef0545bd25e7ffa55a04a5c10c28908713",
|
||||||
"zh:47ade5b7a7ce2755291e0e4ae2125298bef682190a9917a4ca384edb23a338f4",
|
"zh:0e0179fe12c855bcf5dbcf6858373eaa6e9dd790010096a3fcc667a23224388d",
|
||||||
"zh:74769c5e1615b3fc930b9acc2f966dd7c053c31146d1eca19666bee8a7512088",
|
"zh:23f6118cefb2fae443de98197490a9ba56fa51f1e324d1811709e0fdfc22ed7d",
|
||||||
"zh:8b4a72b05ce50f41b6b1c8f2cd692509c0814f2d5fb4869a98e42b9eb22430f3",
|
"zh:34875cbaf07fbed8b8c639f38146f19188e57fc2eac4cdeac638b3d675b82ad4",
|
||||||
"zh:8bcb1c844ab14b780c4547834d4f7755e4f2ac643f8061e8cfaa98becc6a78b5",
|
"zh:5b0fc4934533557af0001630801e9e637ab0e1588fd086f0cd04a52f4a13474f",
|
||||||
"zh:8e44bae37b1f984908f427d64154c090136b6e8b8e0c1229df7b03945e59509d",
|
"zh:5d8eda5611ce4017688694e566c00609508a2c3a0e9aa587f6a58dcd1cb9846c",
|
||||||
"zh:974bba8e806aa3265ddc558657f93b0465877a8687f691d366dd34a90e059f97",
|
"zh:70855ab6327a1b79b0619d0ed3538513f98fdfadae6fe60e986dbbf2891151f8",
|
||||||
"zh:a5d029fb41b6e0f1f4d742e326918e725f3ebd57cb5170fdb39f82a26ce52d1c",
|
"zh:7330d66c56a67a4c36f2fc2f1d7042503f5b4d0ec66a9bbe2b72920fb56b85de",
|
||||||
"zh:a7a405574406ff525105880ca7b86614b5bced701f74b7e748d8e2976b5880cd",
|
"zh:764597f7be92426cd63f7ae82d2845a1f2677d2b86921f19facf93fdbb80f503",
|
||||||
"zh:f83cf18fd194129ca06dfa3fc5bc8b951df71fa04cdbcb0bf651277f06c17d5f",
|
"zh:7dd947c72366377a16adc7bf0c0d09c32ade09dcedbcbf411da057ca970fb9e8",
|
||||||
"zh:fa2eefadf213ad934c75bc1679408ad4b765ff86853b473f837ad83b68a44c77",
|
"zh:9db57839cdc1d667271d5589ca4d9e791b665c0248e37c9ccdc79c0cef39aaed",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -39,183 +39,78 @@ provider "registry.terraform.io/argoproj-labs/argocd" {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/external" {
|
|
||||||
version = "2.3.5"
|
|
||||||
hashes = [
|
|
||||||
"h1:smKSos4zs57pJjQrNuvGBpSWth2el9SgePPbPHo0aps=",
|
|
||||||
"zh:6e89509d056091266532fa64de8c06950010498adf9070bf6ff85bc485a82562",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:86868aec05b58dc0aa1904646a2c26b9367d69b890c9ad70c33c0d3aa7b1485a",
|
|
||||||
"zh:a2ce38fda83a62fa5fb5a70e6ca8453b168575feb3459fa39803f6f40bd42154",
|
|
||||||
"zh:a6c72798f4a9a36d1d1433c0372006cc9b904e8cfd60a2ae03ac5b7d2abd2398",
|
|
||||||
"zh:a8a3141d2fc71c86bf7f3c13b0b3be8a1b0f0144a47572a15af4dfafc051e28a",
|
|
||||||
"zh:aa20a1242eb97445ad26ebcfb9babf2cd675bdb81cac5f989268ebefa4ef278c",
|
|
||||||
"zh:b58a22445fb8804e933dcf835ab06c29a0f33148dce61316814783ee7f4e4332",
|
|
||||||
"zh:cb5626a661ee761e0576defb2a2d75230a3244799d380864f3089c66e99d0dcc",
|
|
||||||
"zh:d1acb00d20445f682c4e705c965e5220530209c95609194c2dc39324f3d4fcce",
|
|
||||||
"zh:d91a254ba77b69a29d8eae8ed0e9367cbf0ea6ac1a85b58e190f8cb096a40871",
|
|
||||||
"zh:f6592327673c9f85cdb6f20336faef240abae7621b834f189c4a62276ea5db41",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/helm" {
|
provider "registry.terraform.io/hashicorp/helm" {
|
||||||
version = "3.0.2"
|
version = "2.16.1"
|
||||||
constraints = ">= 2.0.0"
|
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:+tHGl509bhyUrvvj9GQTBsdK+ImHJnRuo6ppDZPavqY=",
|
"h1:TerRBdq69SxIWg3ET2VE0bcP0BYRIWZOp1QxXj/14Fk=",
|
||||||
"zh:2778de76c7dfb2e85c75fe6de3c11172a25551ed499bfb9e9f940a5be81167b0",
|
"zh:0003f6719a32aee9afaeeb001687fc0cfc8c2d5f54861298cf1dc5711f3b4e65",
|
||||||
"zh:3b4c436a41e4fbae5f152852a9bd5c97db4460af384e26977477a40adf036690",
|
"zh:16cd5bfee09e7bb081b8b4470f31a9af508e52220fd97fd81c6dda725d9422fe",
|
||||||
"zh:617a372f5bb2288f3faf5fd4c878a68bf08541cf418a3dbb8a19bc41ad4a0bf2",
|
"zh:51817de8fdc2c2e36785f23fbf4ec022111bd1cf7679498c16ad0ad7471c16db",
|
||||||
"zh:84de431479548c96cb61c495278e320f361e80ab4f8835a5425ece24a9b6d310",
|
"zh:51b95829b2873be40a65809294bffe349e40cfccc3ff6fee0f471d01770e0ebd",
|
||||||
"zh:8b4cf5f81d10214e5e1857d96cff60a382a22b9caded7f5d7a92e5537fc166c1",
|
"zh:56b158dde897c47e1460181fc472c3e920aa23db40579fdc2aad333c1456d2dd",
|
||||||
"zh:baeb26a00ffbcf3d507cdd940b2a2887eee723af5d3319a53eec69048d5e341e",
|
"zh:916641d26c386959eb982e680028aa677b787687ef7c1283241e45620bc8df50",
|
||||||
"zh:ca05a8814e9bf5fbffcd642df3a8d9fae9549776c7057ceae6d6f56471bae80f",
|
"zh:aec15ca8605babba77b283f2ca35daca53e006d567e1c3a3daf50497035b820b",
|
||||||
"zh:ca4bf3f94dedb5c5b1a73568f2dad7daf0ef3f85e688bc8bc2d0e915ec148366",
|
"zh:c2cecf710b87c8f3a4d186da2ea12cf08041f97ae0c6db82649720d6ed929d65",
|
||||||
"zh:d331f2129fd3165c4bda875c84a65555b22eb007801522b9e017d065ac69b67e",
|
"zh:dbdd96f17aea25c7db2d516ab8172a5e683c6686c72a1a44173d2fe96319be39",
|
||||||
"zh:e583b2b478dde67da28e605ab4ef6521c2e390299b471d7d8ef05a0b608dcdad",
|
"zh:de11e180368434a796b1ab6f20fde7554dc74f7800e063b8e4c8ec3a86d0be63",
|
||||||
"zh:f238b86611647c108c073d265f8891a2738d3158c247468ae0ff5b1a3ac4122a",
|
|
||||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||||
|
"zh:f827a9c1540d210c56053a2d5d5a6abda924896ffa8eeedc94054cf6d44c5f60",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/kubernetes" {
|
provider "registry.terraform.io/hashicorp/kubernetes" {
|
||||||
version = "2.31.0"
|
version = "2.34.0"
|
||||||
constraints = ">= 2.0.0, 2.31.0"
|
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:wGHbATbv/pBVTST1MtEn0zyVhZbzZJD2NYq2EddASHY=",
|
"h1:QOiO85qZnkUm7kAtuPkfblchuKPWUqRdNVWE5agpr8k=",
|
||||||
"zh:0d16b861edb2c021b3e9d759b8911ce4cf6d531320e5dc9457e2ea64d8c54ecd",
|
"zh:076b451dc8629c49f4260de6d43595e98ac5f1bdbebb01d112659ef94d99451f",
|
||||||
"zh:1bad69ed535a5f32dec70561eb481c432273b81045d788eb8b37f2e4a322cc40",
|
"zh:0c29855dbd3c6ba82fce680fa5ac969d4e09e20fecb4ed40166b778bd19895a4",
|
||||||
"zh:43c58e3912fcd5bb346b5cb89f31061508a9be3ca7dd4cd8169c066203bcdfb3",
|
"zh:583b4dfcea4d8392dd7904c00b2ff41bbae78d238e8b72e5ad580370a24a4ecb",
|
||||||
"zh:4778123da9206918a92dfa73cc711475d2b9a8275ff25c13a30513c523ac9660",
|
"zh:5e20844d8d1af052381d00de4febd4055ad0f3c3c02795c361265b9ef72a1075",
|
||||||
"zh:8bfa67d2db03b3bfae62beebe6fb961aee8d91b7a766efdfe4d337b33dfd23dd",
|
"zh:766b7ab7c4727c62b5887c3922e0467c4cc355ba0dc3aabe465ebb86bc1caabb",
|
||||||
"zh:9020bb5729db59a520ade5e24984b737e65f8b81751fbbd343926f6d44d22176",
|
"zh:776a5000b441d7c8262d17d4a4aa4aa9760ae64de4cb7172961d9e007e0be1e5",
|
||||||
"zh:90431dbfc5b92498bfbce38f0b989978c84421a6c33245b97788a46b563fbd6e",
|
"zh:7838f509235116e55adeeecbe6def3da1b66dd3c4ce0de02fc7dc66a60e1d630",
|
||||||
"zh:b71a061dda1244f6a52500e703a9524b851e7b11bbf238c17bbd282f27d51cb2",
|
"zh:931e5581ec66c145c1d29198bd23fddc8d0c5cbf4cda22e02dba65644c7842f2",
|
||||||
"zh:d6232a7651b834b89591b94bf4446050119dcde740247e6083a4d55a2cefd28a",
|
"zh:95e728efa2a31a63b879fd093507466e509e3bfc9325eb35ea3dc28fed15c6f7",
|
||||||
"zh:d89fba43e699e28e2b5e92fff2f75fc03dbc8de0df9dacefe1a8836f8f430753",
|
"zh:972b9e3ca2b6a1057dcf5003fc78cabb0dd8847580bddeb52d885ebd64df38ea",
|
||||||
"zh:ef85c0b744f5ba1b10dadc3c11e331ba4225c45bb733e024d7218c24b02b0512",
|
"zh:ef6114217965d55f5bddbd7a316b8f85f15b8a77c075fcbed95813039d522e0a",
|
||||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/local" {
|
|
||||||
version = "2.5.3"
|
|
||||||
hashes = [
|
|
||||||
"h1:1Nkh16jQJMp0EuDmvP/96f5Unnir0z12WyDuoR6HjMo=",
|
|
||||||
"zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927",
|
|
||||||
"zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e",
|
|
||||||
"zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf",
|
|
||||||
"zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d",
|
|
||||||
"zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09",
|
|
||||||
"zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f",
|
|
||||||
"zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1",
|
|
||||||
"zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6",
|
|
||||||
"zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47",
|
|
||||||
"zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/null" {
|
|
||||||
version = "3.2.4"
|
|
||||||
hashes = [
|
|
||||||
"h1:hkf5w5B6q8e2A42ND2CjAvgvSN3puAosDmOJb3zCVQM=",
|
|
||||||
"zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43",
|
|
||||||
"zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a",
|
|
||||||
"zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991",
|
|
||||||
"zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f",
|
|
||||||
"zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e",
|
|
||||||
"zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615",
|
|
||||||
"zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442",
|
|
||||||
"zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5",
|
|
||||||
"zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f",
|
|
||||||
"zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/random" {
|
provider "registry.terraform.io/hashicorp/random" {
|
||||||
version = "3.7.2"
|
version = "3.6.3"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=",
|
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
|
||||||
"zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f",
|
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
|
||||||
"zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc",
|
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
|
||||||
"zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab",
|
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
|
||||||
"zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3",
|
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
|
||||||
"zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212",
|
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
|
||||||
"zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f",
|
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||||
"zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34",
|
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
|
||||||
"zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967",
|
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
|
||||||
"zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d",
|
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
|
||||||
"zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62",
|
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
|
||||||
"zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0",
|
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/vault" {
|
provider "registry.terraform.io/hashicorp/vault" {
|
||||||
version = "5.1.0"
|
version = "4.5.0"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:x9cfzSpsdqUfrKM4qD/Mfqvy66ZWKrLtA+PZx7HhqZ8=",
|
"h1:oKiQcEqj/HTCMzgGtZ531D/jnnM0i7iguSM8pU7aK8U=",
|
||||||
"zh:121c84975a3732d2c68db6b555c37a4520f3c283fd916e25e472e784518662a7",
|
"zh:0a9301aa6a9b59db97682be568329526033bb50a4a308ad695c2a1877c1241c3",
|
||||||
"zh:2a80da4424db091d3b9846a569b0ae3e60f3e95b0a988ff94f3986391a62c93d",
|
"zh:0f8fee69ea4eaa27b86a391edc7de8e8b215e3c48f7074bab799986d5f707014",
|
||||||
"zh:2bcb3aadf97aecf0b9f98393affd766b929eafd3cb68ed4f26419405c3e8ec64",
|
"zh:2a2e51fe280e07700920bc8ed29b77e5c79fada0e4d5315d55ec0d2893bb5eed",
|
||||||
"zh:5a5f11db49784e9be251fbad2bb3a46c5f9999ab4e1ea7940f120b3743afca28",
|
"zh:3fc7d9016bebe26a4c779ce6b87b181ed6a1af12499419726b8b0a0e3eaa7234",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||||
"zh:976b000cf6fa75c33c3b3a3c2e5c67a8c1958744b0521a1f807de9f8855dc961",
|
"zh:813a9e4875e58dbca2526b3088c0f76dbb2a66b10b910497a0b703518eaa73cd",
|
||||||
"zh:a4321ce4ff9422d90c3c85bb4835a84563eb761d0714d1db9c81ca810a48fd7c",
|
"zh:889ed6f21b94f89b8cbc4224454ced01a2792f12f53379d2fb1a2f2749bf624a",
|
||||||
"zh:a7464751eaf0b9cc7afb03e098d7efcf8c559215f3de7f34a56458e75709c94d",
|
"zh:acf9c01d403584015005083e64d8479d167e4f54e87e540311010133fcb5b023",
|
||||||
"zh:ae434febd2590e58040308d18bf772f796b2fad90670be263acdd447db8fb106",
|
"zh:b377945a4b6a75c79793cb92c873aacc9c087c2a6e5792a1613f3aa2f3693848",
|
||||||
"zh:c61a27d8c9daa483feb4e3fecd42fa9f2887c5588433bb15df6d62572a7bb6f4",
|
"zh:be243567b2a76ba2a546449e89764f707477cf25dcdd6d7f3b808ddf40aaf9f6",
|
||||||
"zh:dd2e8bdc76f09f8a47485d129140cd6126ad722014f6704ad5d8c4f18014571d",
|
"zh:d879fa16f391fb75b779067c751f3b8f80f5f4d73b2ff86814662038878a0ce4",
|
||||||
"zh:f15d32b1eaeb419533e586b0c2c1e2b2f732925b3a094e31e9669cd6e6e735f0",
|
"zh:e47fb3daac933f5dcb034379fe77c0bf834512da7348e7643457c9af3b2ab36b",
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/public-cloud-wl/slugify" {
|
|
||||||
version = "0.1.1"
|
|
||||||
constraints = "0.1.1"
|
|
||||||
hashes = [
|
|
||||||
"h1:iOJEMYX1bLfUnKjSxluQkKijr5NgWSqb2lU9Ag2Q12w=",
|
|
||||||
"zh:13f77dedcc74256053ac51512372510d722116bf58e119fac203fe599d667720",
|
|
||||||
"zh:2223be634f684f76e265efdaafdf95a948ba9e44f09f8a89540bdb564eff17f1",
|
|
||||||
"zh:73e8b763c796d57186756cf0bab75323e2d92c873f1df8eccd8a7e336a2e3e81",
|
|
||||||
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
|
|
||||||
"zh:9f83adcf17de03afb5c27111cb26c580dc5296dffd40fca4571e81ad0bad3bad",
|
|
||||||
"zh:a5414ade8cbae9aea10dee79e43da247ceecb7e4a54e76d39906ee60b7365a7d",
|
|
||||||
"zh:bd118ead731e129c92c0dfe3c9a2ebbd8fa25ba6508deaaaccb9ac3a7f70af2d",
|
|
||||||
"zh:c8ce48ad921956edcee0643cb6184442f3deb438e5605a53794dfd6e8f89a559",
|
|
||||||
"zh:d96da8a32ef2b807ed3bd943294c6e1d0bd5fc3a793deb762f74d0c54aeff335",
|
|
||||||
"zh:e30a218b474afe082e005faf51c323ed8747d46845bfacab4cd3adc0c51704ec",
|
|
||||||
"zh:e3cd265c38da6e65974ac1b9b6be608ba0534178f16f059ad13672de6846e32e",
|
|
||||||
"zh:f2ded7f8c771a603ad3e2df84986b5f175c38049b7a9ab4a3cd384abafb33dff",
|
|
||||||
"zh:f2ece1996cf686583afd19384041204a32e08389dc6f4f105501584e653e797d",
|
|
||||||
"zh:fa2418b74cea55d29dad24f5095aaf30d6253d63ebac3c0c47949b3de8087c88",
|
|
||||||
"zh:fdc8d3fbca6a19db203802e7a7337075e39b9ffb7a3887a7583e379be61bde17",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/zitadel/zitadel" {
|
|
||||||
version = "2.0.2"
|
|
||||||
constraints = "2.0.2"
|
|
||||||
hashes = [
|
|
||||||
"h1:iymeaNBrZ4smcr7eHrxO4gbXQ6bx/enKyj3RQ6xZRYA=",
|
|
||||||
"zh:01e16af0dda9372696b5e1d43ec709aed79829b49ee69a4f9606a248752f672d",
|
|
||||||
"zh:048c4e726fb846cfe9ab0a0a1f86d3f8922442154b086e2bd8e389b32f69f2f0",
|
|
||||||
"zh:3a3f6bea621c9d480f1f288cffebace8620979b9260cfeae8f9af5d9a25ed490",
|
|
||||||
"zh:4d349e584786589bc2037cee691ff1678296f5351e6491aa34dcb08ecbe1dcb7",
|
|
||||||
"zh:80741c78179788be8d7e33e471e1311197cd4e1067803d438463d0a8ac871a60",
|
|
||||||
"zh:89178d30f5ec49551e6a6ebc5eb589ab6631012dcec0d03ea7130b1029890e51",
|
|
||||||
"zh:94cd3b1fe3d1d39bcb3b70208b044bde4c5ce5152e12b29f0fa0ff1085e12863",
|
|
||||||
"zh:97299c172ada852705f8ca9fa91eeee12c6259263baae3ca53cf41e3130b1731",
|
|
||||||
"zh:a33d53acc640dc93b81352ba633cf392bc8c7614a72d320d59d3dcdb22d73fc4",
|
|
||||||
"zh:a95c15960baf8157f79a6490361455767d48e4dd3ce2ef1d0051743f6152733b",
|
|
||||||
"zh:ae66ad95c7039e6ef844c39389c9077ce7dbb501b6af02afb26a223fd289dbcb",
|
|
||||||
"zh:b8a9cb3b53653c06d52607368c406112ee1abc6d66dc4aedaedddbb46a66ea8f",
|
|
||||||
"zh:d48693ecdc985bb4167af0c3164240c13c4ea48167d28f706e7893cbdb20540a",
|
|
||||||
"zh:f6db1ec30bfbcf4423ab2d29979b775423ba37008fd48a766b5a1cf87a131859",
|
|
||||||
"zh:fed4e95dc9aaf361c8ff57f819d31fa25152b9e6cb90b7202d8be9ab1446b081",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"version": 3,
|
"version": 3,
|
||||||
"terraform_version": "1.12.1",
|
"terraform_version": "1.10.0",
|
||||||
"backend": {
|
"backend": {
|
||||||
"type": "s3",
|
"type": "s3",
|
||||||
"config": {
|
"config": {
|
||||||
|
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
clusters:
|
|
||||||
- name: "app-365zon"
|
|
||||||
cluster:
|
|
||||||
server: "https://rancher.bridge.fourlights.dev/k8s/clusters/c-m-fh8pgmtb"
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: "app-365zon"
|
|
||||||
user:
|
|
||||||
token: "kubeconfig-user-9slw5nc6r2:7xq7zjsdfsvnjx9wnhh78r8nwmgqjfk4272mjsc4vwzhh5tcqv5swb"
|
|
||||||
|
|
||||||
|
|
||||||
contexts:
|
|
||||||
- name: "app-365zon"
|
|
||||||
context:
|
|
||||||
user: "app-365zon"
|
|
||||||
cluster: "app-365zon"
|
|
||||||
|
|
||||||
current-context: "app-365zon"
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
clusters:
|
|
||||||
- name: "app-365zon"
|
|
||||||
cluster:
|
|
||||||
server: "https://rancher.bridge.fourlights.dev/k8s/clusters/c-m-fh8pgmtb"
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: "app-365zon"
|
|
||||||
user:
|
|
||||||
token: "kubeconfig-user-9slw5k78ws:dvh4q8rn5d6w9tqc5vpl6mnnr8p9b7pxzjn9n6sxmxqqnhpqc54dzz"
|
|
||||||
|
|
||||||
|
|
||||||
contexts:
|
|
||||||
- name: "app-365zon"
|
|
||||||
context:
|
|
||||||
user: "app-365zon"
|
|
||||||
cluster: "app-365zon"
|
|
||||||
|
|
||||||
current-context: "app-365zon"
|
|
||||||
|
|
@ -8,32 +8,30 @@ locals {
|
||||||
|
|
||||||
module "cluster-init" {
|
module "cluster-init" {
|
||||||
source = "../../modules/cluster/init-rke2"
|
source = "../../modules/cluster/init-rke2"
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
|
||||||
}
|
}
|
||||||
|
|
||||||
module "minio" {
|
module "minio" {
|
||||||
source = "../../modules/minio"
|
source = "../../modules/minio"
|
||||||
wait_on = module.cluster-init.installed
|
wait_on = module.cluster-init.installed
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
|
||||||
|
|
||||||
server_dns = local.cluster_dns
|
server_dns = local.cluster_dns
|
||||||
service_name = "storage"
|
service_name = "storage"
|
||||||
namespace = "minio"
|
namespace = "minio"
|
||||||
mode = "distributed"
|
|
||||||
replicas = local.node_count
|
|
||||||
|
|
||||||
admin_server_dns = local.cluster_dns # Restricted admin access, access via bridge
|
admin_server_dns = local.cluster_dns # Restricted admin access, access via bridge
|
||||||
|
|
||||||
tls = false # TLS termination happens on the bridge ingress
|
tls = false # TLS termination happens on the bridge ingress
|
||||||
admin = true
|
admin = true
|
||||||
ingressClass = "nginx"
|
ingressClass = "nginx"
|
||||||
storageSize = "20Gi"
|
storageSize = "40Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "mongodb" {
|
module "mongodb" {
|
||||||
source = "../../modules/mongodb"
|
source = "../../modules/mongodb"
|
||||||
wait_on = module.cluster-init.installed
|
wait_on = module.cluster-init.installed
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
|
||||||
|
|
||||||
namespace = "mongodb"
|
namespace = "mongodb"
|
||||||
replicas = local.node_count
|
replicas = local.node_count
|
||||||
|
|
@ -52,7 +50,7 @@ module "mongodb" {
|
||||||
module "rabbitmq" {
|
module "rabbitmq" {
|
||||||
source = "../../modules/rabbitmq"
|
source = "../../modules/rabbitmq"
|
||||||
wait_on = module.cluster-init.installed
|
wait_on = module.cluster-init.installed
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
k8s_config_yaml = data.minio_s3_object.k8s_yaml.content
|
||||||
|
|
||||||
server_dns = "local" # Restricted admin access, access via bridge
|
server_dns = "local" # Restricted admin access, access via bridge
|
||||||
|
|
||||||
|
|
@ -64,155 +62,28 @@ module "rabbitmq" {
|
||||||
ingressClass = "nginx"
|
ingressClass = "nginx"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# THESE SHOULD BE IN BRIDGE
|
||||||
|
# generate ed25519 for argocd: `ssh-keygen -t ed25519 -C "argocd.bridge.fourlights.dev" -f argocd.bridge.fourlights.dev
|
||||||
|
# add ed25519.pub to github repo deploy keys
|
||||||
|
# add argocd repo secret
|
||||||
|
# add argocd updated github.com known hosts
|
||||||
|
# add argocd application
|
||||||
|
|
||||||
module "postgresql" {
|
# setup secrets
|
||||||
source = "../../modules/postgresql"
|
|
||||||
|
|
||||||
namespace = "postgresql"
|
resource "vault_kv_secret_v2" "cluster" {
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
mount = var.cluster
|
||||||
username = "bridge"
|
name = "minio"
|
||||||
}
|
delete_all_versions = true
|
||||||
|
|
||||||
module "zitadel-db" {
|
data_json = jsonencode({
|
||||||
source = "../../modules/postgresql/tenant"
|
access_key = minio_iam_service_account.cluster.access_key
|
||||||
wait_on = module.postgresql.installed
|
secret_key = minio_iam_service_account.cluster.secret_key
|
||||||
|
})
|
||||||
name = "zitadel"
|
|
||||||
root_password = module.postgresql.root_password
|
depends_on = [
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
var.wait_on,
|
||||||
}
|
minio_iam_service_account.cluster
|
||||||
|
]
|
||||||
module "zitadel" {
|
|
||||||
source = "../../modules/zitadel"
|
|
||||||
wait_on = module.zitadel-db.installed
|
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
|
||||||
|
|
||||||
server_dns = local.cluster_dns
|
|
||||||
|
|
||||||
service_name = "zitadel"
|
|
||||||
namespace = "zitadel"
|
|
||||||
|
|
||||||
database_password = module.zitadel-db.password
|
|
||||||
database_root_password = module.postgresql.root_password
|
|
||||||
|
|
||||||
display_on_homepage = true
|
|
||||||
ingressClass = "nginx"
|
|
||||||
}
|
|
||||||
|
|
||||||
module "zitadel-bootstrap" {
|
|
||||||
source = "../../tenants/fourlights/zitadel"
|
|
||||||
|
|
||||||
domain = module.zitadel.server
|
|
||||||
jwt_profile_file = module.zitadel.jwt_profile_file
|
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
|
||||||
}
|
|
||||||
|
|
||||||
module "redis" {
|
|
||||||
source = "../../modules/redis"
|
|
||||||
|
|
||||||
namespace = "redis"
|
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
|
||||||
}
|
|
||||||
|
|
||||||
module "tenant-365zon" {
|
|
||||||
source = "../../tenants/365zon"
|
|
||||||
|
|
||||||
wait_on = module.minio.installed
|
|
||||||
|
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
|
||||||
org_id = module.zitadel-bootstrap.org_id
|
|
||||||
user_id = module.zitadel-bootstrap.user_id
|
|
||||||
domain = module.zitadel.server
|
|
||||||
jwt_profile_file = module.zitadel.jwt_profile_file
|
|
||||||
|
|
||||||
minio_access_key = module.minio.minio_access_key
|
|
||||||
minio_secret_key = module.minio.minio_secret_key
|
|
||||||
minio_server = module.minio.minio_server
|
|
||||||
minio_api_uri = module.minio.minio_api_uri
|
|
||||||
|
|
||||||
mongodb_connection_string = module.mongodb.connection_string
|
|
||||||
rabbitmq_connection_string = module.rabbitmq.connection_string
|
|
||||||
}
|
|
||||||
|
|
||||||
module "zitadel-argocd" {
|
|
||||||
source = "../../tenants/argocd/zitadel"
|
|
||||||
|
|
||||||
org_id = module.zitadel-bootstrap.org_id
|
|
||||||
user_id = module.zitadel-bootstrap.user_id
|
|
||||||
domain = module.zitadel.server
|
|
||||||
jwt_profile_file = module.zitadel.jwt_profile_file
|
|
||||||
|
|
||||||
argocd_service_domain = "argocd.${ local.cluster_dns}"
|
|
||||||
}
|
|
||||||
|
|
||||||
module "argocd" {
|
|
||||||
source = "../../modules/argocd"
|
|
||||||
wait_on = module.zitadel-argocd.installed
|
|
||||||
|
|
||||||
namespace = "argocd"
|
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
|
||||||
|
|
||||||
redis_db_start_index = 0
|
|
||||||
redis_password = module.redis.password
|
|
||||||
server_dns = local.cluster_dns
|
|
||||||
|
|
||||||
oauth_uri = module.zitadel.server
|
|
||||||
oauth_client_id = module.zitadel-argocd.client_id
|
|
||||||
oauth_client_secret = module.zitadel-argocd.client_secret
|
|
||||||
oauth_redirect_uri = "https://${module.zitadel.server}/${module.zitadel-argocd.logoutSuffix}"
|
|
||||||
oauth_issuer = "https://${module.zitadel.server}"
|
|
||||||
|
|
||||||
ingressClass = "nginx"
|
|
||||||
}
|
|
||||||
|
|
||||||
module "monitoring" {
|
|
||||||
source = "../../modules/monitoring"
|
|
||||||
wait_on = module.argocd.installed
|
|
||||||
|
|
||||||
namespace = "monitoring"
|
|
||||||
k8s_config_yaml = local.k8s_config_yaml
|
|
||||||
|
|
||||||
server_dns = local.cluster_dns
|
|
||||||
ingressClass = "nginx"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "argocd-root-password" {
|
|
||||||
value = module.argocd.admin_password
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "mongodb-connection-string" {
|
|
||||||
value = module.mongodb.connection_string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "rabbitmq-connection-string" {
|
|
||||||
value = module.rabbitmq.connection_string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minio-access-key" {
|
|
||||||
value = module.tenant-365zon.minio_access_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minio-secret-key" {
|
|
||||||
value = module.tenant-365zon.minio_secret_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "monitoring" {
|
|
||||||
value = module.monitoring.access_instructions
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minio-root-access-key" {
|
|
||||||
value = module.minio.minio_access_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minio-root-secret-key" {
|
|
||||||
value = module.minio.minio_secret_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
minio = {
|
minio = {
|
||||||
source = "aminueza/minio"
|
source = "aminueza/minio"
|
||||||
version = "~> 3.3.0"
|
version = "~> 2.5.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -21,8 +21,7 @@ data "minio_s3_object" "k8s_yaml" {
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
k8s_config_yaml = file("./kubeconfig")
|
k8s_config = yamldecode(data.minio_s3_object.k8s_yaml.content)
|
||||||
k8s_config = yamldecode(local.k8s_config_yaml)
|
|
||||||
k8s_host = local.k8s_config.clusters[0].cluster.server
|
k8s_host = local.k8s_config.clusters[0].cluster.server
|
||||||
k8s_auth = try(
|
k8s_auth = try(
|
||||||
{
|
{
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -1 +0,0 @@
|
||||||
{"type":"serviceaccount","keyId":"328362785044575662","key":"-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEA1+ggA05PsZZzpklcqEAck8UUkn1H5HJXiD3to0IU2vnLQYk9\n/pxHM30n2uWLOflL3OWeqy0rDfR2tp3a0gziGO3MV6EuDPg73WZqHMwxa4RkPOMO\nLcHcfnp4bG9RxZgbvj2pFkvPJ/j5KC54HDyol2urWuGSVKKIGY7bUE4HD0n+ejr6\nLoEeGtIV17ttipqvF2jPMXHvH/yjsQWPofRQh6kqnxTuwyesjlEXyezMP1+WfpCa\n/kmGfowocyIuMNHUSeymLfBX3KIYVWp+/HukP1J781RQLPivdh8hiPZT560gXh72\niSKCLgTLuUt72LFs+1XnKuq2gIOFvP8jpae+GQIDAQABAoIBAEkc+SJFdaVMrpMk\nCrWLKGI9/e5xcFQDCHNBjWSYrxlMN7FUuWKsJ0xfWIy+6LvUGQWP2IBdBlRucXj7\n0asJ49LPTEBse3y23wbldI+V8jJXnd4kgZurJ3DJJliKBeXk0ZhFpym+uELwA+fA\nfLoLLIhCVL+s3XG9rFsDTm9OTQO2ykPvwGAx151nitBnLJm6ms+meBOPc/f/nsH+\nIR6W3Fm4hWjSeKdeWSJG6ePkJqbXLTNYhxnkbgNg4fz4CCPf39aqS3NbwiUv/dQG\ni8cp/UnsZGF0IlmSyipQirFk9wLd9iX9vNorPPMosD68m/plC3eXFIQATbBXEGaf\nelMUmeUCgYEA2j0M42iiBQ0qsuj4rfdCv8rjaIva5EG6Jp2AMtXmQQulrp31iuKw\njfpMoUAip5HS1PdQuXCK6m7DVS3Nr39pBcGrsumU/nW1h3AWTR4GeWFtcCj5ek7q\nitJKfpGnyfWlpPeluXfGmS6npA36URlhX+FfaE6vEHVa7o/hcw4bF6sCgYEA/UPK\nHCwJNBPP04Q7zGF2ivl/zz9WDwnp3d9QZzpmjCZa3J52tLShs5URC8VeSH6mWh78\nfMoFb8fFiJUSjA1Dh0X1pbhXLeFnJmobHAlHZvdMgGDQSe0VfnmC7tJxwEQ3tdxu\n/E95kNFvXUSq0lSICG8TFwr2zHueJf8sPHvsbUsCgYEAzut3nt6ET/nN9FhEAN4h\n4atCtstzvVw8XYSUGp8Ic7/RVaDbQNXyxIr/wRwIP41ZsybS2SBF9kUS215UWQOa\n1iRs4XPrGuEMPuAdpPzE8q1XkDKpaDgO1ZTRoPQFfM12QtWzwgg4uuCrfjbqkZ5Y\n3wnW5hVEk3xRvUOaZPaI5YUCgYAQC5NqZX/PxNGN3PrM3//aniPyxVh2a3JdlCBL\n6e9FQcjP4Voz6J21+uOxkzd98EgpvXzRixr9EfL5zDf4l36Hu27sqNkdFwKp9F7z\nT8MuSsyV9Yw8osCR2rDo4HxEag39f5GoeoCBJEOh7Q/Fc4WKwz66Xv9zxQEn9xqe\nWluIMQKBgEFkltf0957pEC7pPb/t9/20j7ODKSKrSYE3+SGi9XwUeT74HkU43Euf\na3KWlwMmJcYvtc4RDNuYJ6F9/eAq5c5MqgySEHoMh386u3YzzZBcI5L06pMI4kYb\njhK63OCrlzeILxwcwc00ztYHbjxk/yFOUWQ/OknCzlGE0o8TOPyu\n-----END RSA PRIVATE KEY-----\n","expirationDate":"2026-01-01T00:00:00Z","userId":"328362785044510126"}
|
|
||||||
|
|
@ -61,41 +61,22 @@ provider "registry.terraform.io/fusionauth/fusionauth" {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/external" {
|
|
||||||
version = "2.3.5"
|
|
||||||
hashes = [
|
|
||||||
"h1:smKSos4zs57pJjQrNuvGBpSWth2el9SgePPbPHo0aps=",
|
|
||||||
"zh:6e89509d056091266532fa64de8c06950010498adf9070bf6ff85bc485a82562",
|
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
|
||||||
"zh:86868aec05b58dc0aa1904646a2c26b9367d69b890c9ad70c33c0d3aa7b1485a",
|
|
||||||
"zh:a2ce38fda83a62fa5fb5a70e6ca8453b168575feb3459fa39803f6f40bd42154",
|
|
||||||
"zh:a6c72798f4a9a36d1d1433c0372006cc9b904e8cfd60a2ae03ac5b7d2abd2398",
|
|
||||||
"zh:a8a3141d2fc71c86bf7f3c13b0b3be8a1b0f0144a47572a15af4dfafc051e28a",
|
|
||||||
"zh:aa20a1242eb97445ad26ebcfb9babf2cd675bdb81cac5f989268ebefa4ef278c",
|
|
||||||
"zh:b58a22445fb8804e933dcf835ab06c29a0f33148dce61316814783ee7f4e4332",
|
|
||||||
"zh:cb5626a661ee761e0576defb2a2d75230a3244799d380864f3089c66e99d0dcc",
|
|
||||||
"zh:d1acb00d20445f682c4e705c965e5220530209c95609194c2dc39324f3d4fcce",
|
|
||||||
"zh:d91a254ba77b69a29d8eae8ed0e9367cbf0ea6ac1a85b58e190f8cb096a40871",
|
|
||||||
"zh:f6592327673c9f85cdb6f20336faef240abae7621b834f189c4a62276ea5db41",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/helm" {
|
provider "registry.terraform.io/hashicorp/helm" {
|
||||||
version = "3.0.2"
|
version = "2.16.1"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:+tHGl509bhyUrvvj9GQTBsdK+ImHJnRuo6ppDZPavqY=",
|
"h1:TerRBdq69SxIWg3ET2VE0bcP0BYRIWZOp1QxXj/14Fk=",
|
||||||
"zh:2778de76c7dfb2e85c75fe6de3c11172a25551ed499bfb9e9f940a5be81167b0",
|
"zh:0003f6719a32aee9afaeeb001687fc0cfc8c2d5f54861298cf1dc5711f3b4e65",
|
||||||
"zh:3b4c436a41e4fbae5f152852a9bd5c97db4460af384e26977477a40adf036690",
|
"zh:16cd5bfee09e7bb081b8b4470f31a9af508e52220fd97fd81c6dda725d9422fe",
|
||||||
"zh:617a372f5bb2288f3faf5fd4c878a68bf08541cf418a3dbb8a19bc41ad4a0bf2",
|
"zh:51817de8fdc2c2e36785f23fbf4ec022111bd1cf7679498c16ad0ad7471c16db",
|
||||||
"zh:84de431479548c96cb61c495278e320f361e80ab4f8835a5425ece24a9b6d310",
|
"zh:51b95829b2873be40a65809294bffe349e40cfccc3ff6fee0f471d01770e0ebd",
|
||||||
"zh:8b4cf5f81d10214e5e1857d96cff60a382a22b9caded7f5d7a92e5537fc166c1",
|
"zh:56b158dde897c47e1460181fc472c3e920aa23db40579fdc2aad333c1456d2dd",
|
||||||
"zh:baeb26a00ffbcf3d507cdd940b2a2887eee723af5d3319a53eec69048d5e341e",
|
"zh:916641d26c386959eb982e680028aa677b787687ef7c1283241e45620bc8df50",
|
||||||
"zh:ca05a8814e9bf5fbffcd642df3a8d9fae9549776c7057ceae6d6f56471bae80f",
|
"zh:aec15ca8605babba77b283f2ca35daca53e006d567e1c3a3daf50497035b820b",
|
||||||
"zh:ca4bf3f94dedb5c5b1a73568f2dad7daf0ef3f85e688bc8bc2d0e915ec148366",
|
"zh:c2cecf710b87c8f3a4d186da2ea12cf08041f97ae0c6db82649720d6ed929d65",
|
||||||
"zh:d331f2129fd3165c4bda875c84a65555b22eb007801522b9e017d065ac69b67e",
|
"zh:dbdd96f17aea25c7db2d516ab8172a5e683c6686c72a1a44173d2fe96319be39",
|
||||||
"zh:e583b2b478dde67da28e605ab4ef6521c2e390299b471d7d8ef05a0b608dcdad",
|
"zh:de11e180368434a796b1ab6f20fde7554dc74f7800e063b8e4c8ec3a86d0be63",
|
||||||
"zh:f238b86611647c108c073d265f8891a2738d3158c247468ae0ff5b1a3ac4122a",
|
|
||||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||||
|
"zh:f827a9c1540d210c56053a2d5d5a6abda924896ffa8eeedc94054cf6d44c5f60",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -120,100 +101,100 @@ provider "registry.terraform.io/hashicorp/kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/null" {
|
provider "registry.terraform.io/hashicorp/null" {
|
||||||
version = "3.2.4"
|
version = "3.2.3"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:hkf5w5B6q8e2A42ND2CjAvgvSN3puAosDmOJb3zCVQM=",
|
"h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=",
|
||||||
"zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2",
|
"zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2",
|
||||||
|
"zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d",
|
||||||
|
"zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3",
|
||||||
|
"zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f",
|
||||||
|
"zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||||
"zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43",
|
"zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301",
|
||||||
"zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a",
|
"zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670",
|
||||||
"zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991",
|
"zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed",
|
||||||
"zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f",
|
"zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65",
|
||||||
"zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e",
|
"zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd",
|
||||||
"zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615",
|
"zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5",
|
||||||
"zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442",
|
|
||||||
"zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5",
|
|
||||||
"zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f",
|
|
||||||
"zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/random" {
|
provider "registry.terraform.io/hashicorp/random" {
|
||||||
version = "3.7.2"
|
version = "3.6.3"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=",
|
"h1:Fnaec9vA8sZ8BXVlN3Xn9Jz3zghSETIKg7ch8oXhxno=",
|
||||||
"zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f",
|
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
|
||||||
"zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc",
|
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
|
||||||
"zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab",
|
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
|
||||||
"zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3",
|
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
|
||||||
"zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212",
|
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
|
||||||
"zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f",
|
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||||
"zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34",
|
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
|
||||||
"zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967",
|
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
|
||||||
"zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d",
|
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
|
||||||
"zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62",
|
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
|
||||||
"zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0",
|
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/tls" {
|
provider "registry.terraform.io/hashicorp/tls" {
|
||||||
version = "4.1.0"
|
version = "4.0.6"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:Ka8mEwRFXBabR33iN/WTIEW6RP0z13vFsDlwn11Pf2I=",
|
"h1:dYSb3V94K5dDMtrBRLPzBpkMTPn+3cXZ/kIJdtFL+2M=",
|
||||||
"zh:14c35d89307988c835a7f8e26f1b83ce771e5f9b41e407f86a644c0152089ac2",
|
"zh:10de0d8af02f2e578101688fd334da3849f56ea91b0d9bd5b1f7a243417fdda8",
|
||||||
"zh:2fb9fe7a8b5afdbd3e903acb6776ef1be3f2e587fb236a8c60f11a9fa165faa8",
|
"zh:37fc01f8b2bc9d5b055dc3e78bfd1beb7c42cfb776a4c81106e19c8911366297",
|
||||||
"zh:35808142ef850c0c60dd93dc06b95c747720ed2c40c89031781165f0c2baa2fc",
|
"zh:4578ca03d1dd0b7f572d96bd03f744be24c726bfd282173d54b100fd221608bb",
|
||||||
"zh:35b5dc95bc75f0b3b9c5ce54d4d7600c1ebc96fbb8dfca174536e8bf103c8cdc",
|
"zh:6c475491d1250050765a91a493ef330adc24689e8837a0f07da5a0e1269e11c1",
|
||||||
"zh:38aa27c6a6c98f1712aa5cc30011884dc4b128b4073a4a27883374bfa3ec9fac",
|
"zh:81bde94d53cdababa5b376bbc6947668be4c45ab655de7aa2e8e4736dfd52509",
|
||||||
"zh:51fb247e3a2e88f0047cb97bb9df7c228254a3b3021c5534e4563b4007e6f882",
|
"zh:abdce260840b7b050c4e401d4f75c7a199fafe58a8b213947a258f75ac18b3e8",
|
||||||
"zh:62b981ce491e38d892ba6364d1d0cdaadcee37cc218590e07b310b1dfa34be2d",
|
"zh:b754cebfc5184873840f16a642a7c9ef78c34dc246a8ae29e056c79939963c7a",
|
||||||
"zh:bc8e47efc611924a79f947ce072a9ad698f311d4a60d0b4dfff6758c912b7298",
|
"zh:c928b66086078f9917aef0eec15982f2e337914c5c4dbc31dd4741403db7eb18",
|
||||||
"zh:c149508bd131765d1bc085c75a870abb314ff5a6d7f5ac1035a8892d686b6297",
|
"zh:cded27bee5f24de6f2ee0cfd1df46a7f88e84aaffc2ecbf3ff7094160f193d50",
|
||||||
"zh:d38d40783503d278b63858978d40e07ac48123a2925e1a6b47e62179c046f87a",
|
"zh:d65eb3867e8f69aaf1b8bb53bd637c99c6b649ba3db16ded50fa9a01076d1a27",
|
||||||
|
"zh:ecb0c8b528c7a619fa71852bb3fb5c151d47576c5aab2bf3af4db52588722eeb",
|
||||||
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
|
||||||
"zh:fb07f708e3316615f6d218cec198504984c0ce7000b9f1eebff7516e384f4b54",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/vault" {
|
provider "registry.terraform.io/hashicorp/vault" {
|
||||||
version = "5.1.0"
|
version = "4.5.0"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:x9cfzSpsdqUfrKM4qD/Mfqvy66ZWKrLtA+PZx7HhqZ8=",
|
"h1:oKiQcEqj/HTCMzgGtZ531D/jnnM0i7iguSM8pU7aK8U=",
|
||||||
"zh:121c84975a3732d2c68db6b555c37a4520f3c283fd916e25e472e784518662a7",
|
"zh:0a9301aa6a9b59db97682be568329526033bb50a4a308ad695c2a1877c1241c3",
|
||||||
"zh:2a80da4424db091d3b9846a569b0ae3e60f3e95b0a988ff94f3986391a62c93d",
|
"zh:0f8fee69ea4eaa27b86a391edc7de8e8b215e3c48f7074bab799986d5f707014",
|
||||||
"zh:2bcb3aadf97aecf0b9f98393affd766b929eafd3cb68ed4f26419405c3e8ec64",
|
"zh:2a2e51fe280e07700920bc8ed29b77e5c79fada0e4d5315d55ec0d2893bb5eed",
|
||||||
"zh:5a5f11db49784e9be251fbad2bb3a46c5f9999ab4e1ea7940f120b3743afca28",
|
"zh:3fc7d9016bebe26a4c779ce6b87b181ed6a1af12499419726b8b0a0e3eaa7234",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||||
"zh:976b000cf6fa75c33c3b3a3c2e5c67a8c1958744b0521a1f807de9f8855dc961",
|
"zh:813a9e4875e58dbca2526b3088c0f76dbb2a66b10b910497a0b703518eaa73cd",
|
||||||
"zh:a4321ce4ff9422d90c3c85bb4835a84563eb761d0714d1db9c81ca810a48fd7c",
|
"zh:889ed6f21b94f89b8cbc4224454ced01a2792f12f53379d2fb1a2f2749bf624a",
|
||||||
"zh:a7464751eaf0b9cc7afb03e098d7efcf8c559215f3de7f34a56458e75709c94d",
|
"zh:acf9c01d403584015005083e64d8479d167e4f54e87e540311010133fcb5b023",
|
||||||
"zh:ae434febd2590e58040308d18bf772f796b2fad90670be263acdd447db8fb106",
|
"zh:b377945a4b6a75c79793cb92c873aacc9c087c2a6e5792a1613f3aa2f3693848",
|
||||||
"zh:c61a27d8c9daa483feb4e3fecd42fa9f2887c5588433bb15df6d62572a7bb6f4",
|
"zh:be243567b2a76ba2a546449e89764f707477cf25dcdd6d7f3b808ddf40aaf9f6",
|
||||||
"zh:dd2e8bdc76f09f8a47485d129140cd6126ad722014f6704ad5d8c4f18014571d",
|
"zh:d879fa16f391fb75b779067c751f3b8f80f5f4d73b2ff86814662038878a0ce4",
|
||||||
"zh:f15d32b1eaeb419533e586b0c2c1e2b2f732925b3a094e31e9669cd6e6e735f0",
|
"zh:e47fb3daac933f5dcb034379fe77c0bf834512da7348e7643457c9af3b2ab36b",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "registry.terraform.io/hetznercloud/hcloud" {
|
provider "registry.terraform.io/hetznercloud/hcloud" {
|
||||||
version = "1.51.0"
|
version = "1.49.1"
|
||||||
constraints = "~> 1.45"
|
constraints = "~> 1.45"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:yER+O3OKYfxBAO7KVYZzH+4EYrmorCO0J0hlnRUfH00=",
|
"h1:FKGRNHVbcfQJd8EWrb8Ze5QHkaGr8zI+ZKxBMjvOwPk=",
|
||||||
"zh:0e8e78084c12866e8e3873011bcac125780b62afeaa518d4749b9a063ae6e32b",
|
"zh:3d5f9773da4f8203cf625d04a5a0e4ff7e202684c010a801a945756140c61cde",
|
||||||
"zh:145738cee21bcdeea1cf82f0d44f7f239c27c2214249e5e5079668c479522a8a",
|
"zh:446305d492017cda91e5c15122ec16ff15bfe3ef4d3fd6bcea0cdf7742ab1b86",
|
||||||
"zh:164406be8ee83952f58a449d514837cc6d9763b6d29e72262d5582d5d5b89315",
|
"zh:44d4f9156ed8b4f0444bd4dc456825940be49048828565964a192286d28c9f20",
|
||||||
"zh:1a0e6ffab3196b35ca65eb445622615bb8dddd68d0bf350ed60d25e1e74f67dc",
|
"zh:492ad893d2f89bb17c9beb877c8ceb4a16caf39db1a79030fefeada6c7aa217f",
|
||||||
"zh:3b7729d1bb5cc7a5af60b42a607f7b3fec690192b1efb55e2341cee88405ecb0",
|
"zh:68dc552c19ad9d209ec6018445df6e06fb77a637513a53cc66ddce1b024082be",
|
||||||
"zh:3bcfc5c40d1b7702f39dac5d2dd9eef58c9c934effb4676e26fbe85fe2057e8f",
|
"zh:7492495ffda6f6c49ab38b539bd2eb965b1150a63fb6b191a27dec07d17601cb",
|
||||||
"zh:3ce193892dca025b804de6d99316c50a33462eb36336006a9db7ea44be439eba",
|
"zh:850fe92005981ea00db86c3e49ba5b49732fdf1f7bd5530a68f6e272847059fc",
|
||||||
"zh:4f92437e1eba8eafe4417f8b61d557ed47f121622305ee2b3c13c31e45c69ca4",
|
"zh:8cb67f744c233acfb1d68a6c27686315439d944edf733b95f113b4aa63d86713",
|
||||||
"zh:554c308bf64b603a075a8f13a151a136b68ba382c2d83977a0df26de7dea2d3d",
|
"zh:8e13dac46e8c2497772ed1baee701b1d1c26bcc95a63b5c4566c83468f504868",
|
||||||
"zh:8c57aa6032fed5da43a0102a4f26262c0496803b99f2f92e5ceb02c80161e291",
|
"zh:c44249c6a8ba931e208a334792686b5355ab2da465cadea03c1ea8e73c02db12",
|
||||||
"zh:99cd4d246d0ad3a3529176df22a47f254700f8c4fc33f62c14464259284945b7",
|
"zh:d103125a28a85c89aea0cb0c534fe3f504416c4d4fc75c37364b9ec5f66dd77d",
|
||||||
"zh:af38a4d1e93f2392a296970ba4ecea341204e888d579cd74642e9f23a94b3b06",
|
"zh:ed8f64e826aa9bfca95b72892271678cb78411b40d7b404a52404141e05a4ab1",
|
||||||
"zh:f0766d42dd97b3eac6fa614fa5809ff2511c9104f3834d0d4b6e84674f13f092",
|
"zh:f40efad816de00b279bd1e2cbf62c76b0e5b2da150a0764f259984b318e30945",
|
||||||
"zh:f20f7379876ede225f3b6f0719826706a171ea4c1dd438a8a3103dee8fe43ccc",
|
"zh:f5e912d0873bf4ecc43feba4ceccdf158048080c76d557e47f34749139fdd452",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
|
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
|
||||||
access_key = "T8V84SHIVT6MAV424ES0"
|
access_key = ""
|
||||||
secret_key = "23+N28yBK+cL3O2t9xsstT8jr2TpK+SgORCVIuxc"
|
secret_key = ""
|
||||||
bucket = "management"
|
bucket = "management"
|
||||||
key = "terraform.tfstate"
|
key = "terraform.tfstate"
|
||||||
region = "eu-central-1"
|
region = "eu-central-1"
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,9 @@
|
||||||
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
|
endpoints = { s3 = "https://storage.bridge.fourlights.dev" }
|
||||||
access_key = "T8V84SHIVT6MAV424ES0"
|
access_key = ""
|
||||||
secret_key = "23+N28yBK+cL3O2t9xsstT8jr2TpK+SgORCVIuxc"
|
secret_key = ""
|
||||||
bucket = "management"
|
bucket = "management"
|
||||||
region = "eu-central-1"
|
region = "eu-central-1"
|
||||||
minio_server = "storage.bridge.fourlights.dev"
|
minio_server = "storage.bridge.fourlights.dev"
|
||||||
rancher_server = "https://rancher.bridge.fourlights.dev"
|
rancher_server = "https://rancher.bridge.fourlights.dev"
|
||||||
vault_token = "hvs.BsIbdvXLdbQn0v4sR3jSWJa9"
|
vault_token = "hvs.CAESIPcy0DY5Jc-d0P2ZRRhiLXr3DmOOawpoA6--QTCoRCqqGh4KHGh2cy5lZjhJdTRINEVKaU55Q21VUTg4ZzZwSWI"
|
||||||
vault_addr = "https://vault.bridge.fourlights.dev"
|
vault_addr = "https://vault.bridge.fourlights.dev"
|
||||||
|
|
|
||||||
|
|
@ -31,23 +31,23 @@ resource "kubernetes_secret" "argocd-tls" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "helm_release" "argocd" {
|
resource "helm_release" "argocd" {
|
||||||
depends_on = [var.wait_on]
|
depends_on = [var.wait_on, kubernetes_secret.argocd-tls]
|
||||||
name = "argocd"
|
name = "argocd"
|
||||||
repository = "https://charts.bitnami.com/bitnami"
|
repository = "https://charts.bitnami.com/bitnami"
|
||||||
chart = "argo-cd"
|
chart = "argo-cd"
|
||||||
namespace = kubernetes_namespace.argocd.metadata[0].name
|
namespace = kubernetes_namespace.argocd.metadata[0].name
|
||||||
version = "9.0.29"
|
version = "7.0.20"
|
||||||
create_namespace = false
|
create_namespace = false
|
||||||
wait = true
|
wait = true
|
||||||
wait_for_jobs = true
|
wait_for_jobs = true
|
||||||
|
|
||||||
set_sensitive = [{
|
set_sensitive {
|
||||||
name = "config.secret.argocdServerAdminPassword"
|
name = "config.secret.argocdServerAdminPassword"
|
||||||
value = random_password.admin_password.result
|
value = random_password.admin_password.result
|
||||||
}]
|
}
|
||||||
|
|
||||||
values = [
|
values = [
|
||||||
templatefile("${path.module}/values.yaml.tftpl", {
|
templatefile("${path.module}/values.yaml", {
|
||||||
service_uri = local.service_uri,
|
service_uri = local.service_uri,
|
||||||
server_dns = var.server_dns,
|
server_dns = var.server_dns,
|
||||||
grpc_service_uri = local.grpc_service_uri,
|
grpc_service_uri = local.grpc_service_uri,
|
||||||
|
|
@ -58,9 +58,8 @@ resource "helm_release" "argocd" {
|
||||||
oauth_issuer = var.oauth_issuer,
|
oauth_issuer = var.oauth_issuer,
|
||||||
oauth_client_id = var.oauth_client_id,
|
oauth_client_id = var.oauth_client_id,
|
||||||
oauth_client_secret = var.oauth_client_secret,
|
oauth_client_secret = var.oauth_client_secret,
|
||||||
oauth_redirect_uri = var.oauth_redirect_uri,
|
oauth_redirect_uri = var.oauth_redirect_uri
|
||||||
tls = var.tls,
|
tls = var.tls
|
||||||
ingress_class = var.ingressClass
|
|
||||||
})
|
})
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,13 @@
|
||||||
|
commonEnvVars: &commonEnvVars
|
||||||
|
- name: REDIS_USERNAME
|
||||||
|
value: ""
|
||||||
|
- name: REDIS_PASSWORD
|
||||||
|
value: ${ redis_password }
|
||||||
|
|
||||||
|
commonArgs: &commonRedisArgs
|
||||||
|
- --redis=${ redis_service_uri }:6379
|
||||||
|
- --redisdb=${ redis_index }
|
||||||
|
|
||||||
redis:
|
redis:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
|
|
@ -13,45 +23,25 @@ dex:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
controller:
|
controller:
|
||||||
extraArgs:
|
extraArgs: *commonRedisArgs
|
||||||
- --redis=${ redis_service_uri }:6379
|
extraEnvVars: *commonEnvVars
|
||||||
- --redisdb=${ redis_index }
|
|
||||||
extraEnvVars:
|
|
||||||
- name: REDIS_USERNAME
|
|
||||||
value: ""
|
|
||||||
- name: REDIS_PASSWORD
|
|
||||||
value: ${ redis_password }
|
|
||||||
|
|
||||||
repoServer:
|
repoServer:
|
||||||
extraArgs:
|
extraArgs: *commonRedisArgs
|
||||||
- --redis=${ redis_service_uri }:6379
|
extraEnvVars: *commonEnvVars
|
||||||
- --redisdb=${ redis_index }
|
|
||||||
extraEnvVars:
|
|
||||||
- name: REDIS_USERNAME
|
|
||||||
value: ""
|
|
||||||
- name: REDIS_PASSWORD
|
|
||||||
value: ${ redis_password }
|
|
||||||
|
|
||||||
server:
|
server:
|
||||||
extraArgs:
|
extraArgs: *commonRedisArgs
|
||||||
- --redis=${ redis_service_uri }:6379
|
extraEnvVars: *commonEnvVars
|
||||||
- --redisdb=${ redis_index }
|
|
||||||
extraEnvVars:
|
|
||||||
- name: REDIS_USERNAME
|
|
||||||
value: ""
|
|
||||||
- name: REDIS_PASSWORD
|
|
||||||
value: ${ redis_password }
|
|
||||||
|
|
||||||
url: https://${ service_uri }
|
url: https://${ service_uri }
|
||||||
insecure: true
|
insecure: true
|
||||||
ingress:
|
ingress:
|
||||||
enabled: true
|
enabled: true
|
||||||
ingressClassName: ${ingress_class}
|
ingressClassName: traefik
|
||||||
hostname: ${ service_uri }
|
hostname: ${ service_uri }
|
||||||
annotations:
|
annotations:
|
||||||
kubernetes.io/ingress.class: ${ingress_class}
|
kubernetes.io/ingress.class: traefik
|
||||||
cert-manager.io/cluster-issuer: letsencrypt
|
|
||||||
%{ if ingress_class == "traefik" }
|
|
||||||
%{ if tls }
|
%{ if tls }
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
|
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
|
||||||
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd
|
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd
|
||||||
|
|
@ -59,10 +49,6 @@ server:
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||||
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
|
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
|
||||||
%{ endif }
|
%{ endif }
|
||||||
%{ else }
|
|
||||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
|
||||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
|
||||||
%{ endif }
|
|
||||||
%{ if tls }
|
%{ if tls }
|
||||||
extraTls:
|
extraTls:
|
||||||
- hosts:
|
- hosts:
|
||||||
|
|
@ -95,10 +81,8 @@ server:
|
||||||
- openid
|
- openid
|
||||||
- groups
|
- groups
|
||||||
logoutURL: "${ oauth_redirect_uri }"
|
logoutURL: "${ oauth_redirect_uri }"
|
||||||
getUserInfo: true
|
|
||||||
claimMapping:
|
claimMapping:
|
||||||
name: fullName
|
name: fullName # ArgoCD expects 'name', FusionAuth provides 'fullName'
|
||||||
groups: "urn:zitadel:iam:org:project:roles"
|
|
||||||
preferred_username: email
|
preferred_username: email
|
||||||
|
|
||||||
%{ endif }
|
%{ endif }
|
||||||
|
|
||||||
|
|
@ -69,9 +69,3 @@ variable "tls" {
|
||||||
type = bool
|
type = bool
|
||||||
default = false
|
default = false
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ingressClass" {
|
|
||||||
description = "Ingress class to use"
|
|
||||||
type = string
|
|
||||||
default = "nginx"
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -4,10 +4,10 @@ resource "helm_release" "cert-manager-webhook-hetzner" {
|
||||||
repository = "https://vadimkim.github.io/cert-manager-webhook-hetzner"
|
repository = "https://vadimkim.github.io/cert-manager-webhook-hetzner"
|
||||||
chart = "cert-manager-webhook-hetzner"
|
chart = "cert-manager-webhook-hetzner"
|
||||||
|
|
||||||
set = [{
|
set {
|
||||||
name = "groupName"
|
name = "groupName"
|
||||||
value = "acme.${var.tld}"
|
value = "acme.${var.tld}"
|
||||||
}]
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "kubernetes_secret" "hetzner-secret" {
|
resource "kubernetes_secret" "hetzner-secret" {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -5,21 +5,21 @@ resource "helm_release" "cert_manager" {
|
||||||
create_namespace = true
|
create_namespace = true
|
||||||
wait = true
|
wait = true
|
||||||
|
|
||||||
set = [{
|
set {
|
||||||
name = "installCRDs"
|
name = "installCRDs"
|
||||||
value = "true"
|
value = "true"
|
||||||
},
|
}
|
||||||
{
|
|
||||||
name = "email"
|
|
||||||
value = var.email
|
|
||||||
}]
|
|
||||||
|
|
||||||
set_list = [{
|
set_list {
|
||||||
name = "dnsConfig.nameservers"
|
name = "dnsConfig.nameservers"
|
||||||
value = ["1.1.1.1", "8.8.8.8"]
|
value = ["1.1.1.1", "8.8.8.8"]
|
||||||
}]
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
set {
|
||||||
|
name = "email"
|
||||||
|
value = var.email
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
output "installed" {
|
output "installed" {
|
||||||
value = true
|
value = true
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -7,106 +7,6 @@ module "longhorn" {
|
||||||
wait_on = var.wait_on
|
wait_on = var.wait_on
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "helm_release" "descheduler" {
|
|
||||||
name = "descheduler"
|
|
||||||
repository = "https://kubernetes-sigs.github.io/descheduler/"
|
|
||||||
chart = "descheduler"
|
|
||||||
namespace = "kube-system"
|
|
||||||
version = "0.33.0"
|
|
||||||
|
|
||||||
values = [
|
|
||||||
yamlencode({
|
|
||||||
deschedulerPolicy = {
|
|
||||||
# Only evict pods older than 5 minutes
|
|
||||||
maxPodLifeTimeSeconds = 300
|
|
||||||
|
|
||||||
# Respect PodDisruptionBudgets
|
|
||||||
evictLocalStoragePods = false
|
|
||||||
ignorePvcPods = true
|
|
||||||
|
|
||||||
strategies = {
|
|
||||||
LowNodeUtilization = {
|
|
||||||
enabled = true
|
|
||||||
params = {
|
|
||||||
nodeResourceUtilizationThresholds = {
|
|
||||||
thresholds = {
|
|
||||||
cpu = 30
|
|
||||||
memory = 30
|
|
||||||
}
|
|
||||||
targetThresholds = {
|
|
||||||
cpu = 50
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
evictableNamespaces = {
|
|
||||||
exclude = ["kube-system", "longhorn-system"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Additional settings
|
|
||||||
schedule = "*/10 * * * *" # Run every 10 minutes
|
|
||||||
|
|
||||||
# Don't run on control plane nodes
|
|
||||||
nodeSelector = {
|
|
||||||
"node-role.kubernetes.io/control-plane" = null
|
|
||||||
}
|
|
||||||
|
|
||||||
# Resource limits for the descheduler pod itself
|
|
||||||
resources = {
|
|
||||||
requests = {
|
|
||||||
cpu = "100m"
|
|
||||||
memory = "100Mi"
|
|
||||||
}
|
|
||||||
limits = {
|
|
||||||
cpu = "500m"
|
|
||||||
memory = "256Mi"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
# resource "kubernetes_config_map" "scheduler_config" {
|
|
||||||
# metadata {
|
|
||||||
# name = "scheduler-config"
|
|
||||||
# namespace = "kube-system"
|
|
||||||
# }
|
|
||||||
#
|
|
||||||
# data = {
|
|
||||||
# "config.yaml" = yamlencode({
|
|
||||||
# apiVersion = "kubescheduler.config.k8s.io/v1beta3"
|
|
||||||
# kind = "KubeSchedulerConfiguration"
|
|
||||||
# profiles = [{
|
|
||||||
# schedulerName = "default-scheduler"
|
|
||||||
# plugins = {
|
|
||||||
# score = {
|
|
||||||
# enabled = [
|
|
||||||
# { name = "NodeResourcesFit", weight = 100 },
|
|
||||||
# { name = "NodeResourcesBalancedAllocation", weight = 100 },
|
|
||||||
# { name = "NodeAffinity", weight = 50 },
|
|
||||||
# { name = "InterPodAffinity", weight = 50 },
|
|
||||||
# { name = "NodePreferAvoidPods", weight = 10000 },
|
|
||||||
# { name = "TaintToleration", weight = 100 }
|
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
# pluginConfig = [{
|
|
||||||
# name = "NodeResourcesBalancedAllocation"
|
|
||||||
# args = {
|
|
||||||
# resources = [
|
|
||||||
# { name = "cpu", weight = 100 },
|
|
||||||
# { name = "memory", weight = 100 }
|
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
# }]
|
|
||||||
# }]
|
|
||||||
# })
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
|
|
||||||
# Configure ingress to allow forwarded headers
|
# Configure ingress to allow forwarded headers
|
||||||
resource "kubernetes_manifest" "rke2-ingress-nginx-config" {
|
resource "kubernetes_manifest" "rke2-ingress-nginx-config" {
|
||||||
manifest = {
|
manifest = {
|
||||||
|
|
@ -121,25 +21,6 @@ resource "kubernetes_manifest" "rke2-ingress-nginx-config" {
|
||||||
controller:
|
controller:
|
||||||
config:
|
config:
|
||||||
use-forwarded-headers: "true"
|
use-forwarded-headers: "true"
|
||||||
|
|
||||||
# Buffer settings to prevent "upstream sent too big header" errors
|
|
||||||
proxy-buffer-size: "16k"
|
|
||||||
proxy-buffers: "8 16k"
|
|
||||||
proxy-busy-buffers-size: "32k"
|
|
||||||
large-client-header-buffers: "4 16k"
|
|
||||||
client-header-buffer-size: "16k"
|
|
||||||
client-body-buffer-size: "16k"
|
|
||||||
|
|
||||||
# File upload settings for production
|
|
||||||
client-max-body-size: "100m"
|
|
||||||
proxy-body-size: "100m"
|
|
||||||
proxy-request-buffering: "off"
|
|
||||||
|
|
||||||
# Additional production timeouts
|
|
||||||
proxy-connect-timeout: "600"
|
|
||||||
proxy-send-timeout: "600"
|
|
||||||
proxy-read-timeout: "600"
|
|
||||||
client-body-timeout: "600"
|
|
||||||
EOT
|
EOT
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ resource "helm_release" "homepage" {
|
||||||
chart = "homepage"
|
chart = "homepage"
|
||||||
namespace = var.namespace
|
namespace = var.namespace
|
||||||
create_namespace = true
|
create_namespace = true
|
||||||
version = "2.1.0"
|
version = "2.0.1"
|
||||||
|
|
||||||
values = [
|
values = [
|
||||||
templatefile("${path.module}/values.yaml.tftpl", {
|
templatefile("${path.module}/values.yaml.tftpl", {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,11 @@
|
||||||
config:
|
config:
|
||||||
bookmarks:
|
bookmarks:
|
||||||
services:
|
- Developer:
|
||||||
|
- Github:
|
||||||
|
- abbr: GH
|
||||||
|
href: https://github.com/
|
||||||
widgets:
|
widgets:
|
||||||
|
# show the kubernetes widget, with the cluster summary and individual nodes
|
||||||
- kubernetes:
|
- kubernetes:
|
||||||
cluster:
|
cluster:
|
||||||
show: true
|
show: true
|
||||||
|
|
@ -14,6 +18,9 @@ config:
|
||||||
cpu: true
|
cpu: true
|
||||||
memory: true
|
memory: true
|
||||||
showLabel: true
|
showLabel: true
|
||||||
|
- search:
|
||||||
|
provider: duckduckgo
|
||||||
|
target: _blank
|
||||||
kubernetes:
|
kubernetes:
|
||||||
mode: cluster
|
mode: cluster
|
||||||
settings:
|
settings:
|
||||||
|
|
@ -26,20 +33,16 @@ serviceAccount:
|
||||||
# This enables the service account to access the necessary resources
|
# This enables the service account to access the necessary resources
|
||||||
enableRbac: true
|
enableRbac: true
|
||||||
|
|
||||||
env:
|
|
||||||
- name: HOMEPAGE_ALLOWED_HOSTS
|
|
||||||
value: ${service_uri}
|
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
main:
|
main:
|
||||||
enabled: true
|
enabled: true
|
||||||
#annotations:
|
annotations:
|
||||||
# # Example annotations to add Homepage to your Homepage!
|
# Example annotations to add Homepage to your Homepage!
|
||||||
# gethomepage.dev/enabled: "true"
|
gethomepage.dev/enabled: "true"
|
||||||
# gethomepage.dev/name: "Homepage"
|
gethomepage.dev/name: "Homepage"
|
||||||
# gethomepage.dev/description: "Dynamically Detected Homepage"
|
gethomepage.dev/description: "Dynamically Detected Homepage"
|
||||||
# gethomepage.dev/group: "Dynamic"
|
gethomepage.dev/group: "Dynamic"
|
||||||
# gethomepage.dev/icon: "homepage.png"
|
gethomepage.dev/icon: "homepage.png"
|
||||||
hosts:
|
hosts:
|
||||||
- host: ${service_uri}
|
- host: ${service_uri}
|
||||||
paths:
|
paths:
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -7,38 +7,50 @@ resource "helm_release" "longhorn" {
|
||||||
create_namespace = true
|
create_namespace = true
|
||||||
version = "1.7.1"
|
version = "1.7.1"
|
||||||
|
|
||||||
set = [{
|
set {
|
||||||
name = "defaultSettings.defaultDataPath"
|
name = "defaultSettings.defaultDataPath"
|
||||||
value = "/var/lib/longhorn/"
|
value = "/var/lib/longhorn/"
|
||||||
},
|
}
|
||||||
{
|
|
||||||
name = "defaultSettings.defaultDataLocality"
|
set {
|
||||||
value = "best-effort"
|
name = "defaultSettings.defaultDataLocality"
|
||||||
},
|
value = "best-effort"
|
||||||
{
|
}
|
||||||
name = "defaultSettings.storageOverProvisioningPercentage"
|
|
||||||
value = "90"
|
set {
|
||||||
},
|
name = "defaultSettings.storageOverProvisioningPercentage"
|
||||||
{
|
value = "90"
|
||||||
name = "csi.kubeletRootDir"
|
}
|
||||||
value = "/var/lib/kubelet" # Adjust if your Rancher setup uses a different path
|
|
||||||
},
|
# set {
|
||||||
{
|
# name = "global.cattle.systemDefaultRegistry"
|
||||||
name = "enablePSP"
|
# value = "" # Set this to your private registry if you're using one
|
||||||
value = "false"
|
# }
|
||||||
},
|
|
||||||
{
|
set {
|
||||||
name = "serviceMonitor.enabled"
|
name = "csi.kubeletRootDir"
|
||||||
value = "true"
|
value = "/var/lib/kubelet" # Adjust if your Rancher setup uses a different path
|
||||||
},
|
}
|
||||||
{
|
|
||||||
name = "persistence.defaultClassReplicaCount"
|
set {
|
||||||
value = "1"
|
name = "enablePSP"
|
||||||
},
|
value = "false"
|
||||||
{
|
}
|
||||||
name = "persistence.defaultDataLocality"
|
|
||||||
value = "best-effort"
|
set {
|
||||||
}]
|
name = "serviceMonitor.enabled"
|
||||||
|
value = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
set {
|
||||||
|
name = "persistence.defaultClassReplicaCount"
|
||||||
|
value = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
set {
|
||||||
|
name = "persistence.defaultDataLocality"
|
||||||
|
value = "best-effort"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output "installed" {
|
output "installed" {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -6,56 +6,50 @@ resource "random_password" "minio_access_key" {
|
||||||
resource "random_password" "minio_secret_key" {
|
resource "random_password" "minio_secret_key" {
|
||||||
length = 40
|
length = 40
|
||||||
special = true
|
special = true
|
||||||
#override_special = "!#$%&*()-_=+[]{}<>:?"
|
|
||||||
#min_special = 2
|
|
||||||
#min_upper = 2
|
|
||||||
#min_lower = 2
|
|
||||||
#min_numeric = 2
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "helm_release" "minio" {
|
resource "helm_release" "minio" {
|
||||||
name = "minio"
|
name = "minio"
|
||||||
repository = "oci://registry-1.docker.io/bitnamicharts"
|
repository = "https://charts.bitnami.com/bitnami"
|
||||||
chart = "minio"
|
chart = "minio"
|
||||||
namespace = var.namespace
|
namespace = var.namespace
|
||||||
create_namespace = true
|
create_namespace = true
|
||||||
version = "16.0.0"
|
version = "14.7.16"
|
||||||
wait = true
|
|
||||||
wait_for_jobs = true
|
|
||||||
|
|
||||||
set_sensitive = [{
|
set_sensitive {
|
||||||
name = "auth.rootUser"
|
name = "auth.rootUser"
|
||||||
value = random_password.minio_access_key.result
|
value = random_password.minio_access_key.result
|
||||||
},
|
}
|
||||||
{
|
|
||||||
name = "auth.rootPassword"
|
|
||||||
value = random_password.minio_secret_key.result
|
|
||||||
}]
|
|
||||||
|
|
||||||
set = [{
|
set_sensitive {
|
||||||
|
name = "auth.rootPassword"
|
||||||
|
value = random_password.minio_secret_key.result
|
||||||
|
}
|
||||||
|
|
||||||
|
set {
|
||||||
name = "mode"
|
name = "mode"
|
||||||
value = var.mode
|
value = var.mode
|
||||||
},
|
}
|
||||||
|
|
||||||
{
|
set {
|
||||||
name = "resourcesPreset"
|
name = "resourcesPreset"
|
||||||
value = "nano"
|
value = "nano"
|
||||||
},
|
}
|
||||||
|
|
||||||
{
|
set {
|
||||||
name = "statefulset.replicaCount"
|
name = "statefulset.replicaCount"
|
||||||
value = var.replicas
|
value = var.replicas
|
||||||
},
|
}
|
||||||
|
|
||||||
{
|
set {
|
||||||
name = "statefulset.drivesPerNode"
|
name = "statefulset.drivesPerNode"
|
||||||
value = var.replicas < 4 ? 2 : 1
|
value = var.replicas < 4 ? 2 : 1
|
||||||
},
|
}
|
||||||
|
|
||||||
{
|
set {
|
||||||
name = "persistence.size"
|
name = "persistence.size"
|
||||||
value = var.storageSize
|
value = var.storageSize
|
||||||
}]
|
}
|
||||||
|
|
||||||
values = [
|
values = [
|
||||||
templatefile("${path.module}/values.yaml.tftpl", {
|
templatefile("${path.module}/values.yaml.tftpl", {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -1,28 +1,10 @@
|
||||||
resource "null_resource" "health_check" {
|
|
||||||
depends_on = [var.wait_on]
|
|
||||||
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = <<-EOT
|
|
||||||
until curl -s -f "https://${var.server}/minio/health/live" || [[ $attempts -ge 60 ]]; do
|
|
||||||
sleep 10
|
|
||||||
attempts=$((attempts+1))
|
|
||||||
done
|
|
||||||
if [[ $attempts -ge 60 ]]; then
|
|
||||||
echo "Minio health check failed after maximum attempts"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_s3_bucket" "overlay" {
|
resource "minio_s3_bucket" "overlay" {
|
||||||
depends_on = [null_resource.health_check]
|
depends_on = [var.wait_on]
|
||||||
bucket = var.name
|
bucket = var.name
|
||||||
acl = "private"
|
acl = "private"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "minio_s3_bucket_policy" "overlay" {
|
resource "minio_s3_bucket_policy" "overlay" {
|
||||||
depends_on = [minio_s3_bucket.overlay]
|
|
||||||
bucket = minio_s3_bucket.overlay.bucket
|
bucket = minio_s3_bucket.overlay.bucket
|
||||||
policy = jsonencode({
|
policy = jsonencode({
|
||||||
"Version" : "2012-10-17",
|
"Version" : "2012-10-17",
|
||||||
|
|
@ -38,7 +20,7 @@ resource "minio_s3_bucket_policy" "overlay" {
|
||||||
"s3:GetBucketLocation"
|
"s3:GetBucketLocation"
|
||||||
],
|
],
|
||||||
"Resource" : [
|
"Resource" : [
|
||||||
minio_s3_bucket.overlay.arn,
|
"arn:aws:s3:::bouwroute"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -52,7 +34,7 @@ resource "minio_s3_bucket_policy" "overlay" {
|
||||||
"s3:ListBucket"
|
"s3:ListBucket"
|
||||||
],
|
],
|
||||||
"Resource" : [
|
"Resource" : [
|
||||||
minio_s3_bucket.overlay.arn,
|
"arn:aws:s3:::bouwroute"
|
||||||
],
|
],
|
||||||
"Condition" : {
|
"Condition" : {
|
||||||
"StringEquals" : {
|
"StringEquals" : {
|
||||||
|
|
@ -73,72 +55,7 @@ resource "minio_s3_bucket_policy" "overlay" {
|
||||||
"s3:GetObject"
|
"s3:GetObject"
|
||||||
],
|
],
|
||||||
"Resource" : [
|
"Resource" : [
|
||||||
"${minio_s3_bucket.overlay.arn}/**",
|
"arn:aws:s3:::bouwroute/**"
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_s3_bucket" "uploads" {
|
|
||||||
depends_on = [null_resource.health_check]
|
|
||||||
bucket = "uploads"
|
|
||||||
acl = "private"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_s3_bucket_policy" "uploads" {
|
|
||||||
depends_on = [minio_s3_bucket.uploads]
|
|
||||||
bucket = minio_s3_bucket.uploads.bucket
|
|
||||||
policy = jsonencode({
|
|
||||||
"Version" : "2012-10-17",
|
|
||||||
"Statement" : [
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:GetBucketLocation"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
minio_s3_bucket.uploads.arn,
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:ListBucket"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
minio_s3_bucket.uploads.arn,
|
|
||||||
],
|
|
||||||
"Condition" : {
|
|
||||||
"StringEquals" : {
|
|
||||||
"s3:prefix" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:GetObject"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
"${minio_s3_bucket.uploads.arn}/**",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
@ -146,12 +63,10 @@ resource "minio_s3_bucket_policy" "uploads" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "minio_iam_user" "overlay" {
|
resource "minio_iam_user" "overlay" {
|
||||||
depends_on = [null_resource.health_check]
|
|
||||||
name = var.name
|
name = var.name
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "minio_iam_policy" "overlay" {
|
resource "minio_iam_policy" "overlay" {
|
||||||
depends_on = [minio_s3_bucket.overlay, minio_s3_bucket.uploads]
|
|
||||||
name = minio_s3_bucket.overlay.bucket
|
name = minio_s3_bucket.overlay.bucket
|
||||||
policy = jsonencode({
|
policy = jsonencode({
|
||||||
Version = "2012-10-17"
|
Version = "2012-10-17"
|
||||||
|
|
@ -159,7 +74,7 @@ resource "minio_iam_policy" "overlay" {
|
||||||
{
|
{
|
||||||
Effect = "Allow"
|
Effect = "Allow"
|
||||||
Action = ["s3:ListBucket"]
|
Action = ["s3:ListBucket"]
|
||||||
Resource = [minio_s3_bucket.overlay.arn, minio_s3_bucket.uploads.arn, ]
|
Resource = ["arn:aws:s3:::${var.name}"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Effect = "Allow"
|
Effect = "Allow"
|
||||||
|
|
@ -168,7 +83,7 @@ resource "minio_iam_policy" "overlay" {
|
||||||
"s3:PutObject",
|
"s3:PutObject",
|
||||||
"s3:DeleteObject"
|
"s3:DeleteObject"
|
||||||
]
|
]
|
||||||
Resource = ["${minio_s3_bucket.overlay.arn}/*", "${minio_s3_bucket.uploads.arn}/*"]
|
Resource = ["arn:aws:s3:::${var.name}/*"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
})
|
})
|
||||||
|
|
@ -176,14 +91,11 @@ resource "minio_iam_policy" "overlay" {
|
||||||
|
|
||||||
|
|
||||||
resource "minio_iam_user_policy_attachment" "overlay" {
|
resource "minio_iam_user_policy_attachment" "overlay" {
|
||||||
depends_on = [minio_iam_user.overlay, minio_iam_policy.overlay]
|
|
||||||
|
|
||||||
user_name = minio_iam_user.overlay.id
|
user_name = minio_iam_user.overlay.id
|
||||||
policy_name = minio_iam_policy.overlay.id
|
policy_name = minio_iam_policy.overlay.id
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "minio_iam_service_account" "overlay" {
|
resource "minio_iam_service_account" "overlay" {
|
||||||
depends_on = [minio_iam_user.overlay, minio_s3_bucket.overlay, minio_s3_bucket.uploads]
|
|
||||||
target_user = minio_iam_user.overlay.name
|
target_user = minio_iam_user.overlay.name
|
||||||
policy = jsonencode({
|
policy = jsonencode({
|
||||||
Version = "2012-10-17"
|
Version = "2012-10-17"
|
||||||
|
|
@ -191,7 +103,7 @@ resource "minio_iam_service_account" "overlay" {
|
||||||
{
|
{
|
||||||
Effect = "Allow"
|
Effect = "Allow"
|
||||||
Action = ["s3:ListBucket"]
|
Action = ["s3:ListBucket"]
|
||||||
Resource = [minio_s3_bucket.overlay.arn, minio_s3_bucket.uploads.arn]
|
Resource = ["arn:aws:s3:::${var.name}"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Effect = "Allow"
|
Effect = "Allow"
|
||||||
|
|
@ -200,16 +112,12 @@ resource "minio_iam_service_account" "overlay" {
|
||||||
"s3:PutObject",
|
"s3:PutObject",
|
||||||
"s3:DeleteObject"
|
"s3:DeleteObject"
|
||||||
]
|
]
|
||||||
Resource = ["${minio_s3_bucket.overlay.arn}/*", "${minio_s3_bucket.uploads.arn}/*"]
|
Resource = ["arn:aws:s3:::${var.name}/*"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
output "bucket" {
|
|
||||||
value = var.name
|
|
||||||
}
|
|
||||||
|
|
||||||
output "access_key" {
|
output "access_key" {
|
||||||
value = minio_iam_service_account.overlay.access_key
|
value = minio_iam_service_account.overlay.access_key
|
||||||
sensitive = true
|
sensitive = true
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
minio = {
|
minio = {
|
||||||
source = "aminueza/minio"
|
source = "aminueza/minio"
|
||||||
version = "~> 3.3.0"
|
version = "~> 2.5.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,6 @@
|
||||||
tls:
|
tls:
|
||||||
enabled: ${tobool(tls)}
|
enabled: ${tobool(tls)}
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
namespace: monitoring
|
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
enabled: ${tobool(admin)}
|
enabled: ${tobool(admin)}
|
||||||
tls: ${tobool(tls)}
|
tls: ${tobool(tls)}
|
||||||
|
|
@ -57,13 +51,3 @@ apiIngress:
|
||||||
ingress.kubernetes.io/proxy-body-size: "0"
|
ingress.kubernetes.io/proxy-body-size: "0"
|
||||||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||||||
%{ endif }
|
%{ endif }
|
||||||
|
|
||||||
affinity:
|
|
||||||
podAntiAffinity:
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 100
|
|
||||||
podAffinityTerm:
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: minio
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
locals {
|
|
||||||
service_uri = join(".", [var.service_name, var.server_dns])
|
|
||||||
}
|
|
||||||
|
|
@ -1,92 +0,0 @@
|
||||||
# Monitoring stack for k3s cluster with Thanos
|
|
||||||
terraform {
|
|
||||||
required_providers {
|
|
||||||
helm = {
|
|
||||||
source = "hashicorp/helm"
|
|
||||||
version = ">= 2.0.0"
|
|
||||||
}
|
|
||||||
kubernetes = {
|
|
||||||
source = "hashicorp/kubernetes"
|
|
||||||
version = ">= 2.0.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create monitoring namespace
|
|
||||||
resource "kubernetes_namespace" "monitoring" {
|
|
||||||
metadata {
|
|
||||||
name = "monitoring"
|
|
||||||
}
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
ignore_changes = [metadata]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "random_password" "grafana_admin_password" {
|
|
||||||
length = 40
|
|
||||||
special = false
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create secret for remote write authentication
|
|
||||||
resource "kubernetes_secret" "prometheus_remote_write_auth" {
|
|
||||||
metadata {
|
|
||||||
name = "prometheus-remote-write-auth"
|
|
||||||
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
username = var.remote_write_username
|
|
||||||
password = var.remote_write_password
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Prometheus + Grafana + Alertmanager stack
|
|
||||||
resource "helm_release" "kube_prometheus_stack" {
|
|
||||||
depends_on = [var.wait_on, kubernetes_secret.prometheus_remote_write_auth]
|
|
||||||
|
|
||||||
name = "monitoring"
|
|
||||||
repository = "https://prometheus-community.github.io/helm-charts"
|
|
||||||
chart = "kube-prometheus-stack"
|
|
||||||
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
||||||
version = "75.9.0" # Specify version for reproducibility
|
|
||||||
|
|
||||||
# Use values from template file
|
|
||||||
values = [
|
|
||||||
templatefile("${path.module}/monitoring-values.yaml.tftpl", {
|
|
||||||
remote_write_url = var.remote_write_url
|
|
||||||
remote_read_url = var.remote_read_url
|
|
||||||
grafana_admin_password = random_password.grafana_admin_password.result
|
|
||||||
})
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Output important endpoints
|
|
||||||
output "grafana_url" {
|
|
||||||
value = "http://monitoring-grafana.${kubernetes_namespace.monitoring.metadata[0].name}.svc.cluster.local"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "alertmanager_url" {
|
|
||||||
value = "http://monitoring-kube-prometheus-alertmanager.${kubernetes_namespace.monitoring.metadata[0].name}.svc.cluster.local:9093"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "prometheus_url" {
|
|
||||||
value = "http://monitoring-kube-prometheus-prometheus.${kubernetes_namespace.monitoring.metadata[0].name}.svc.cluster.local:9090"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Instructions for accessing services
|
|
||||||
output "access_instructions" {
|
|
||||||
value = <<-EOT
|
|
||||||
To access services from outside the cluster:
|
|
||||||
|
|
||||||
Grafana:
|
|
||||||
kubectl port-forward -n ${kubernetes_namespace.monitoring.metadata[0].name} svc/monitoring-grafana 3000:80
|
|
||||||
|
|
||||||
Alertmanager:
|
|
||||||
kubectl port-forward -n ${kubernetes_namespace.monitoring.metadata[0].name} svc/monitoring-kube-prometheus-alertmanager 9093:9093
|
|
||||||
|
|
||||||
Default Grafana credentials:
|
|
||||||
Username: admin
|
|
||||||
Password: ${random_password.grafana_admin_password.result}
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
|
|
@ -1,135 +0,0 @@
|
||||||
additionalPrometheusRulesMap:
|
|
||||||
custom-app-rules:
|
|
||||||
groups:
|
|
||||||
- name: aspnetcore
|
|
||||||
interval: 5m
|
|
||||||
rules:
|
|
||||||
- alert: HighRequestLatency
|
|
||||||
expr: histogram_quantile(0.95, sum by (job, instance) (rate(http_request_duration_seconds_bucket[5m]))) > 0.5
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: warning
|
|
||||||
annotations:
|
|
||||||
summary: "High request latency on {{ $labels.instance }}"
|
|
||||||
description: "95th percentile latency is above 500ms (current value: {{ $value }}s)"
|
|
||||||
- alert: HighErrorRate
|
|
||||||
expr: 'rate(http_requests_total{status=~"5.."}[5m]) > 0.05'
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
summary: "High error rate on {{ $labels.instance }}"
|
|
||||||
description: "Error rate is above 5% (current value: {{ $value }})"
|
|
||||||
|
|
||||||
prometheus:
|
|
||||||
prometheusSpec:
|
|
||||||
retention: 24h
|
|
||||||
retentionSize: 10GB
|
|
||||||
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 200Mi
|
|
||||||
cpu: 100m
|
|
||||||
limits:
|
|
||||||
memory: 500Mi
|
|
||||||
cpu: 500m
|
|
||||||
|
|
||||||
# Remote write to VictoriaMetrics
|
|
||||||
remoteWrite:
|
|
||||||
- url: ${remote_write_url}
|
|
||||||
queueConfig:
|
|
||||||
maxSamplesPerSend: 10000
|
|
||||||
maxShards: 5
|
|
||||||
minShards: 1
|
|
||||||
batchSendDeadline: 5s
|
|
||||||
basicAuth:
|
|
||||||
username:
|
|
||||||
name: prometheus-remote-write-auth
|
|
||||||
key: username
|
|
||||||
password:
|
|
||||||
name: prometheus-remote-write-auth
|
|
||||||
key: password
|
|
||||||
writeRelabelConfigs:
|
|
||||||
- sourceLabels: ["__name__"]
|
|
||||||
regex: "(up|kube_.*|container_.*|node_.*|http_.*|process_.*)"
|
|
||||||
action: keep
|
|
||||||
|
|
||||||
# Remote read from VictoriaMetrics for old data
|
|
||||||
remoteRead:
|
|
||||||
- url: ${remote_read_url}
|
|
||||||
basicAuth:
|
|
||||||
username:
|
|
||||||
name: prometheus-remote-write-auth
|
|
||||||
key: username
|
|
||||||
password:
|
|
||||||
name: prometheus-remote-write-auth
|
|
||||||
key: password
|
|
||||||
readRecent: false # Only read data older than local retention
|
|
||||||
|
|
||||||
alertmanager:
|
|
||||||
enabled: true
|
|
||||||
alertmanagerSpec:
|
|
||||||
replicas: 1
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 50Mi
|
|
||||||
cpu: 10m
|
|
||||||
limits:
|
|
||||||
memory: 150Mi
|
|
||||||
cpu: 100m
|
|
||||||
retention: 24h
|
|
||||||
|
|
||||||
grafana:
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 100Mi
|
|
||||||
cpu: 50m
|
|
||||||
limits:
|
|
||||||
memory: 300Mi
|
|
||||||
cpu: 200m
|
|
||||||
persistence:
|
|
||||||
enabled: true
|
|
||||||
size: 1Gi
|
|
||||||
adminUser: admin
|
|
||||||
adminPassword: ${grafana_admin_password}
|
|
||||||
|
|
||||||
kubeStateMetrics:
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 50Mi
|
|
||||||
cpu: 10m
|
|
||||||
limits:
|
|
||||||
memory: 150Mi
|
|
||||||
cpu: 100m
|
|
||||||
|
|
||||||
nodeExporter:
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 30Mi
|
|
||||||
cpu: 10m
|
|
||||||
limits:
|
|
||||||
memory: 100Mi
|
|
||||||
cpu: 100m
|
|
||||||
|
|
||||||
prometheusOperator:
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 100Mi
|
|
||||||
cpu: 50m
|
|
||||||
limits:
|
|
||||||
memory: 300Mi
|
|
||||||
cpu: 200m
|
|
||||||
|
|
||||||
defaultRules:
|
|
||||||
create: true
|
|
||||||
rules:
|
|
||||||
alertmanager: true
|
|
||||||
etcd: false
|
|
||||||
general: true
|
|
||||||
k8s: true
|
|
||||||
kubernetesApps: true
|
|
||||||
kubernetesResources: true
|
|
||||||
kubernetesStorage: true
|
|
||||||
kubernetesSystem: true
|
|
||||||
node: true
|
|
||||||
prometheus: true
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
locals {
|
|
||||||
k8s_config = yamldecode(var.k8s_config_yaml)
|
|
||||||
k8s_host = local.k8s_config.clusters[0].cluster.server
|
|
||||||
k8s_auth = try(
|
|
||||||
{
|
|
||||||
token = local.k8s_config.users[0].user.token
|
|
||||||
using_token = true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
client_certificate = base64decode(local.k8s_config.users[0].user["client-certificate-data"])
|
|
||||||
client_key = base64decode(local.k8s_config.users[0].user["client-key-data"])
|
|
||||||
using_token = false
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "kubernetes" {
|
|
||||||
host = local.k8s_host
|
|
||||||
insecure = true
|
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
|
||||||
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
|
|
||||||
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "helm" {
|
|
||||||
kubernetes = {
|
|
||||||
host = local.k8s_host
|
|
||||||
insecure = true
|
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
|
||||||
client_certificate = local.k8s_auth.using_token ? null : local.k8s_auth.client_certificate
|
|
||||||
client_key = local.k8s_auth.using_token ? null : local.k8s_auth.client_key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,60 +0,0 @@
|
||||||
variable "remote_write_url" {
|
|
||||||
description = "URL for remote write endpoint on local machine"
|
|
||||||
type = string
|
|
||||||
default = "https://metrics.binarysunset.dev/api/v1/write"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "remote_read_url" {
|
|
||||||
description = "URL for remote read endpoint on local machine"
|
|
||||||
type = string
|
|
||||||
default = "https://metrics.binarysunset.dev/api/v1/read"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "remote_write_username" {
|
|
||||||
description = "Username for remote write authentication"
|
|
||||||
type = string
|
|
||||||
default = "prometheus"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "remote_write_password" {
|
|
||||||
description = "Password for remote write authentication"
|
|
||||||
type = string
|
|
||||||
default = "your-secure-password"
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "service_name" {
|
|
||||||
type = string
|
|
||||||
description = "Name of the service"
|
|
||||||
default = "auth"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_dns" {
|
|
||||||
type = string
|
|
||||||
description = "Domain for the server"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "k8s_config_yaml" {
|
|
||||||
description = "Content of k8s config yaml file"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "namespace" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "enabled" {
|
|
||||||
type = bool
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ingressClass" {
|
|
||||||
type = string
|
|
||||||
default = "nginx"
|
|
||||||
}
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -12,19 +12,20 @@ resource "helm_release" "rabbitmq" {
|
||||||
create_namespace = true
|
create_namespace = true
|
||||||
version = "15.1.0"
|
version = "15.1.0"
|
||||||
|
|
||||||
set_sensitive = [{
|
set_sensitive {
|
||||||
name = "auth.password"
|
name = "auth.password"
|
||||||
value = random_password.password.result
|
value = random_password.password.result
|
||||||
}]
|
}
|
||||||
|
|
||||||
set = [{
|
set {
|
||||||
name = "replicaCount"
|
name = "replicaCount"
|
||||||
value = var.replicas
|
value = var.replicas
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
set {
|
||||||
name = "persistence.size"
|
name = "persistence.size"
|
||||||
value = "4Gi"
|
value = "4Gi"
|
||||||
}]
|
}
|
||||||
|
|
||||||
values = [
|
values = [
|
||||||
templatefile("${path.module}/values.yaml.tftpl", {
|
templatefile("${path.module}/values.yaml.tftpl", {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -10,15 +10,15 @@ resource "helm_release" "rancher_server" {
|
||||||
|
|
||||||
values = [file("${path.module}/rancher-values.yaml")]
|
values = [file("${path.module}/rancher-values.yaml")]
|
||||||
|
|
||||||
set = [{
|
set {
|
||||||
name = "hostname"
|
name = "hostname"
|
||||||
value = join(".", ["rancher", var.server_dns])
|
value = join(".", ["rancher", var.server_dns])
|
||||||
},
|
}
|
||||||
|
|
||||||
{
|
set {
|
||||||
name = "bootstrapPassword"
|
name = "bootstrapPassword"
|
||||||
value = "admin" # TODO: change this once the terraform provider has been updated with the new pw bootstrap logic
|
value = "admin" # TODO: change this once the terraform provider has been updated with the new pw bootstrap logic
|
||||||
}]
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "random_password" "admin_password" {
|
resource "random_password" "admin_password" {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -11,11 +11,11 @@ ports:
|
||||||
port: 8000
|
port: 8000
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
proxyProtocol:
|
proxyProtocol:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
forwardedHeaders:
|
forwardedHeaders:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 0
|
writeTimeout: 0
|
||||||
idleTimeout: 0
|
idleTimeout: 0
|
||||||
readTimeout: 0
|
readTimeout: 0
|
||||||
|
|
@ -26,11 +26,11 @@ ports:
|
||||||
port: 8443
|
port: 8443
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
proxyProtocol:
|
proxyProtocol:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
forwardedHeaders:
|
forwardedHeaders:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 0
|
writeTimeout: 0
|
||||||
idleTimeout: 0
|
idleTimeout: 0
|
||||||
readTimeout: 0
|
readTimeout: 0
|
||||||
|
|
@ -41,9 +41,9 @@ ports:
|
||||||
port: 2223
|
port: 2223
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
proxyProtocol:
|
proxyProtocol:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 600s
|
writeTimeout: 600s
|
||||||
idleTimeout: 60s
|
idleTimeout: 60s
|
||||||
readTimeout: 600s
|
readTimeout: 600s
|
||||||
|
|
@ -54,9 +54,9 @@ ports:
|
||||||
port: 8993
|
port: 8993
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
proxyProtocol:
|
proxyProtocol:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 600s
|
writeTimeout: 600s
|
||||||
idleTimeout: 300s
|
idleTimeout: 300s
|
||||||
readTimeout: 600s
|
readTimeout: 600s
|
||||||
|
|
@ -67,9 +67,9 @@ ports:
|
||||||
port: 8995
|
port: 8995
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
proxyProtocol:
|
proxyProtocol:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 600s
|
writeTimeout: 600s
|
||||||
idleTimeout: 300s
|
idleTimeout: 300s
|
||||||
readTimeout: 600s
|
readTimeout: 600s
|
||||||
|
|
@ -80,9 +80,9 @@ ports:
|
||||||
port: 4190
|
port: 4190
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
proxyProtocol:
|
proxyProtocol:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 600s
|
writeTimeout: 600s
|
||||||
idleTimeout: 300s
|
idleTimeout: 300s
|
||||||
readTimeout: 600s
|
readTimeout: 600s
|
||||||
|
|
@ -93,7 +93,7 @@ ports:
|
||||||
port: 8025
|
port: 8025
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 300s
|
writeTimeout: 300s
|
||||||
idleTimeout: 300s
|
idleTimeout: 300s
|
||||||
readTimeout: 300s
|
readTimeout: 300s
|
||||||
|
|
@ -104,9 +104,9 @@ ports:
|
||||||
port: 8465
|
port: 8465
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
proxyProtocol:
|
proxyProtocol:
|
||||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||||
transport:
|
transport:
|
||||||
respondingTimeouts:
|
respondingTimouts:
|
||||||
writeTimeout: 300s
|
writeTimeout: 300s
|
||||||
idleTimeout: 300s
|
idleTimeout: 300s
|
||||||
readTimeout: 300s
|
readTimeout: 300s
|
||||||
|
|
|
||||||
|
|
@ -40,18 +40,20 @@ resource "helm_release" "vault" {
|
||||||
create_namespace = false
|
create_namespace = false
|
||||||
wait = true
|
wait = true
|
||||||
|
|
||||||
set = [{
|
set {
|
||||||
name = "server.ha.enabled"
|
name = "server.ha.enabled"
|
||||||
value = "false"
|
value = "false"
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
set {
|
||||||
name = "server.ha.replicas"
|
name = "server.ha.replicas"
|
||||||
value = "1"
|
value = "1"
|
||||||
},
|
}
|
||||||
{
|
|
||||||
|
set {
|
||||||
name = "server.ha.raft.enabled"
|
name = "server.ha.raft.enabled"
|
||||||
value = "false"
|
value = "false"
|
||||||
}]
|
}
|
||||||
|
|
||||||
values = [
|
values = [
|
||||||
templatefile("${path.module}/values.yaml.tftpl", {
|
templatefile("${path.module}/values.yaml.tftpl", {
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ locals {
|
||||||
slug_name = provider::slugify::slug(var.name)
|
slug_name = provider::slugify::slug(var.name)
|
||||||
|
|
||||||
cluster = "${local.slug_project}.${var.cluster_domain}"
|
cluster = "${local.slug_project}.${var.cluster_domain}"
|
||||||
uri = var.uri
|
uri = "https://${local.slug_name}.${local.cluster}"
|
||||||
}
|
}
|
||||||
|
|
||||||
module "zitadel_project_application_api" {
|
module "zitadel_project_application_api" {
|
||||||
|
|
@ -35,7 +35,7 @@ module "zitadel_project_application_ua" {
|
||||||
|
|
||||||
name = "${ var.name } (Swagger)"
|
name = "${ var.name } (Swagger)"
|
||||||
|
|
||||||
redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html", "${local.uri}/hangfire/signin-oidc", "${local.uri}/signin-oidc"]
|
redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html"]
|
||||||
post_logout_redirect_uris = [local.uri]
|
post_logout_redirect_uris = [local.uri]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -67,7 +67,6 @@ resource "kubernetes_secret" "api" {
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"authority" = local.authority
|
"authority" = local.authority
|
||||||
"audience" = var.project_id
|
|
||||||
"client_id" = module.zitadel_project_application_api.client_id
|
"client_id" = module.zitadel_project_application_api.client_id
|
||||||
"client_secret" = module.zitadel_project_application_api.client_secret
|
"client_secret" = module.zitadel_project_application_api.client_secret
|
||||||
}
|
}
|
||||||
|
|
@ -114,7 +113,6 @@ resource "kubernetes_secret" "service-account" {
|
||||||
"audience" = var.project_id
|
"audience" = var.project_id
|
||||||
"client_id" = module.zitadel_service_account[count.index].client_id
|
"client_id" = module.zitadel_service_account[count.index].client_id
|
||||||
"client_secret" = module.zitadel_service_account[count.index].client_secret
|
"client_secret" = module.zitadel_service_account[count.index].client_secret
|
||||||
"scope" = join(" ", concat(["openid", "profile", "urn:zitadel:iam:org:project:id:${var.project_id}:aud"], var.roles))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,3 @@ variable "zitadel_domain" {
|
||||||
variable "cluster_domain" {
|
variable "cluster_domain" {
|
||||||
type = string
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "uri" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,7 @@ resource "helm_release" "zitadel" {
|
||||||
name = "zitadel"
|
name = "zitadel"
|
||||||
repository = "https://charts.zitadel.com"
|
repository = "https://charts.zitadel.com"
|
||||||
chart = "zitadel"
|
chart = "zitadel"
|
||||||
namespace = kubernetes_namespace.zitadel[count.index].metadata[0].name
|
namespace = kubernetes_namespace.zitadel[count.index].metadata[0].name
|
||||||
version = "8.12.0"
|
version = "8.12.0"
|
||||||
create_namespace = false
|
create_namespace = false
|
||||||
wait = true
|
wait = true
|
||||||
|
|
@ -55,7 +55,6 @@ resource "helm_release" "zitadel" {
|
||||||
database_root_username = var.database_root_password != null ? var.database_root_username : null,
|
database_root_username = var.database_root_password != null ? var.database_root_username : null,
|
||||||
database_root_password = var.database_root_password
|
database_root_password = var.database_root_password
|
||||||
display_on_homepage = var.display_on_homepage
|
display_on_homepage = var.display_on_homepage
|
||||||
ingressClass = var.ingressClass
|
|
||||||
})
|
})
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ provider "kubernetes" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "helm" {
|
provider "helm" {
|
||||||
kubernetes = {
|
kubernetes {
|
||||||
host = local.k8s_host
|
host = local.k8s_host
|
||||||
insecure = true
|
insecure = true
|
||||||
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
token = local.k8s_auth.using_token ? local.k8s_auth.token : null
|
||||||
|
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
||||||
/**
|
|
||||||
* sets the roles an additional claim in the token with roles as value an project as key
|
|
||||||
*
|
|
||||||
* The role claims of the token look like the following:
|
|
||||||
*
|
|
||||||
* // added by the code below
|
|
||||||
* "groups": ["{roleName}", "{roleName}", ...],
|
|
||||||
*
|
|
||||||
* Flow: Complement token, Triggers: Pre Userinfo creation, Pre access token creation
|
|
||||||
*
|
|
||||||
* @param ctx
|
|
||||||
* @param api
|
|
||||||
*/
|
|
||||||
function groupsClaim(ctx, api) {
|
|
||||||
if (ctx.v1.user.grants === undefined || ctx.v1.user.grants.count == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let grants = [];
|
|
||||||
ctx.v1.user.grants.grants.forEach((claim) => {
|
|
||||||
claim.roles.forEach((role) => {
|
|
||||||
grants.push(role);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
api.v1.claims.setClaim("groups", grants);
|
|
||||||
api.v1.claims.setClaim("scope", grants);
|
|
||||||
}
|
|
||||||
|
|
@ -13,29 +13,6 @@ resource "zitadel_org" "default" {
|
||||||
is_default = true
|
is_default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// resource "zitadel_action" "groups-claim" {
|
|
||||||
// org_id = zitadel_org.default.id
|
|
||||||
// name = "groupsClaim"
|
|
||||||
// script = templatefile("${path.module}/groupsClaim.action.tftpl", {})
|
|
||||||
// allowed_to_fail = true
|
|
||||||
// timeout = "10s"
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// resource "zitadel_trigger_actions" "groups-claim-pre-user-info" {
|
|
||||||
// org_id = zitadel_org.default.id
|
|
||||||
// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
|
|
||||||
// trigger_type = "TRIGGER_TYPE_PRE_USERINFO_CREATION"
|
|
||||||
// action_ids = [zitadel_action.groups-claim.id]
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// resource "zitadel_trigger_actions" "groups-claim-pre-access-token" {
|
|
||||||
// org_id = zitadel_org.default.id
|
|
||||||
// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
|
|
||||||
// trigger_type = "TRIGGER_TYPE_PRE_ACCESS_TOKEN_CREATION"
|
|
||||||
// action_ids = [zitadel_action.groups-claim.id]
|
|
||||||
// }
|
|
||||||
|
|
||||||
|
|
||||||
output "org_id" {
|
output "org_id" {
|
||||||
value = zitadel_org.default.id
|
value = zitadel_org.default.id
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -57,17 +57,11 @@ service:
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
enabled: true
|
enabled: true
|
||||||
className: ${ingressClass}
|
className: traefik
|
||||||
annotations:
|
annotations:
|
||||||
kubernetes.io/ingress.class: ${ingressClass}
|
kubernetes.io/ingress.class: traefik
|
||||||
cert-manager.io/cluster-issuer: letsencrypt
|
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||||
%{ if ingressClass == "traefik" }
|
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
|
|
||||||
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-to-https@kubernetescrd,default-preserve-host-headers@kubernetescrd
|
|
||||||
%{ else }
|
|
||||||
nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
|
|
||||||
nginx.ingress.kubernetes.io/grpc-backend: "true"
|
|
||||||
%{ endif }
|
|
||||||
%{ if display_on_homepage }gethomepage.dev/enabled: "true"
|
%{ if display_on_homepage }gethomepage.dev/enabled: "true"
|
||||||
gethomepage.dev/name: "Zitadel"
|
gethomepage.dev/name: "Zitadel"
|
||||||
gethomepage.dev/description: "Identity and Access Management"
|
gethomepage.dev/description: "Identity and Access Management"
|
||||||
|
|
|
||||||
|
|
@ -59,9 +59,3 @@ variable "enabled" {
|
||||||
type = bool
|
type = bool
|
||||||
default = true
|
default = true
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ingressClass" {
|
|
||||||
description = "Ingress class to use"
|
|
||||||
type = string
|
|
||||||
default = "nginx"
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -10,47 +10,7 @@ resource "helm_release" "zot" {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "kubernetes_manifest" "traefik_middleware_request_body" {
|
|
||||||
depends_on = [helm_release.zot]
|
|
||||||
manifest = {
|
|
||||||
apiVersion = "traefik.io/v1alpha1"
|
|
||||||
kind = "Middleware"
|
|
||||||
metadata = {
|
|
||||||
name = "request-body"
|
|
||||||
namespace = "registry"
|
|
||||||
}
|
|
||||||
spec = {
|
|
||||||
buffering = {
|
|
||||||
maxRequestBodyBytes = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "kubernetes_manifest" "traefik_middleware_request_timeouts" {
|
|
||||||
depends_on = [helm_release.zot]
|
|
||||||
manifest = {
|
|
||||||
apiVersion = "traefik.io/v1alpha1"
|
|
||||||
kind = "Middleware"
|
|
||||||
metadata = {
|
|
||||||
name = "request-timeouts"
|
|
||||||
namespace = "registry"
|
|
||||||
}
|
|
||||||
spec = {
|
|
||||||
headers = {
|
|
||||||
customRequestHeaders = {
|
|
||||||
"X-Forwarded-Timeout-Read" = "3600s"
|
|
||||||
"X-Forwarded-Timeout-Write" = "3600s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
output "installed" {
|
||||||
value = true
|
value = true
|
||||||
depends_on = [
|
depends_on = [helm_release.zot]
|
||||||
kubernetes_manifest.traefik_middleware_request_body, kubernetes_manifest.traefik_middleware_request_timeouts,
|
|
||||||
helm_release.zot
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,36 +3,9 @@ ingress:
|
||||||
className: "traefik"
|
className: "traefik"
|
||||||
annotations:
|
annotations:
|
||||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||||
traefik.ingress.kubernetes.io/router.middlewares: registry-request-body@kubernetescrd,registry-request-timeouts@kubernetescrd,default-preserve-host-headers@kubernetescrd
|
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
|
||||||
gethomepage.dev/enabled: "true"
|
traefik.ingress.kubernetes.io/proxy-body-size: "0"
|
||||||
gethomepage.dev/name: "Registry"
|
|
||||||
gethomepage.dev/description: "OCI Registry"
|
|
||||||
gethomepage.dev/group: "Tools"
|
|
||||||
gethomepage.dev/icon: "docker.png"
|
|
||||||
hosts:
|
hosts:
|
||||||
- host: ${ service_uri }
|
- host: ${ service_uri }
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
persistence: true
|
|
||||||
pvc:
|
|
||||||
create: true
|
|
||||||
name: zot
|
|
||||||
accessMode: "ReadWriteOnce"
|
|
||||||
storage: 8Gi
|
|
||||||
service:
|
|
||||||
type: ClusterIP
|
|
||||||
port: 5000
|
|
||||||
mountConfig: true
|
|
||||||
configFiles:
|
|
||||||
config.json: |-
|
|
||||||
{
|
|
||||||
"storage": { "rootDirectory": "/var/lib/registry" },
|
|
||||||
"http": { "address": "0.0.0.0", "port": "5000" },
|
|
||||||
"log": { "level": "error" },
|
|
||||||
"extensions": {
|
|
||||||
"scrub": {
|
|
||||||
"enable": true,
|
|
||||||
"interval": "12h"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -3,8 +3,6 @@ locals {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "kubernetes_namespace" "tenant" {
|
resource "kubernetes_namespace" "tenant" {
|
||||||
depends_on = [var.wait_on]
|
|
||||||
|
|
||||||
metadata {
|
metadata {
|
||||||
name = lower(local.name)
|
name = lower(local.name)
|
||||||
}
|
}
|
||||||
|
|
@ -24,15 +22,6 @@ module "bootstrap-zitadel" {
|
||||||
}
|
}
|
||||||
|
|
||||||
// create uploads bucket in minio
|
// create uploads bucket in minio
|
||||||
module "minio" {
|
|
||||||
source = "../../modules/minio/tenant"
|
|
||||||
|
|
||||||
access_key = var.minio_access_key
|
|
||||||
secret_key = var.minio_secret_key
|
|
||||||
server = var.minio_server
|
|
||||||
|
|
||||||
name = "365zon"
|
|
||||||
}
|
|
||||||
|
|
||||||
// create minio secret
|
// create minio secret
|
||||||
resource "kubernetes_secret" "storage" {
|
resource "kubernetes_secret" "storage" {
|
||||||
|
|
@ -42,10 +31,10 @@ resource "kubernetes_secret" "storage" {
|
||||||
}
|
}
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
Storage__AccountName = module.minio.access_key
|
Storage__AccountName = var.minio_access_key
|
||||||
Storage__AccountKey = module.minio.secret_key
|
Storage__AccountKey = var.minio_secret_key
|
||||||
Storage__BlobUri = var.minio_api_uri
|
Storage__BlobUri = var.minio_service_uri
|
||||||
Storage__S3BucketName = module.minio.bucket
|
Storage__S3BucketName = "uploads"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -65,17 +54,3 @@ resource "kubernetes_secret" "connection_strings" {
|
||||||
// next, we need to set-up:
|
// next, we need to set-up:
|
||||||
// - the wildcard tls (*.365zon.venus.fourlights.dev)
|
// - the wildcard tls (*.365zon.venus.fourlights.dev)
|
||||||
// - argocd for all relevant apps
|
// - argocd for all relevant apps
|
||||||
//
|
|
||||||
output "minio_access_key" {
|
|
||||||
value = module.minio.access_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minio_secret_key" {
|
|
||||||
value = module.minio.secret_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minio_bucket" {
|
|
||||||
value = module.minio.bucket
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,9 @@ provider "zitadel" {
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
k8s_config = yamldecode(var.k8s_config_yaml)
|
k8s_config_path = format("%s/%s", path.root, "../kubeconfig")
|
||||||
|
k8s_config_yaml = file(local.k8s_config_path)
|
||||||
|
k8s_config = yamldecode(local.k8s_config_yaml)
|
||||||
k8s_host = local.k8s_config.clusters[0].cluster.server
|
k8s_host = local.k8s_config.clusters[0].cluster.server
|
||||||
k8s_auth = try(
|
k8s_auth = try(
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -10,8 +10,7 @@ variable "minio_secret_key" {
|
||||||
type = string
|
type = string
|
||||||
sensitive = true
|
sensitive = true
|
||||||
}
|
}
|
||||||
variable "minio_api_uri" { type = string }
|
variable "minio_service_uri" { type = string }
|
||||||
variable "minio_server" { type = string }
|
|
||||||
variable "mongodb_connection_string" {
|
variable "mongodb_connection_string" {
|
||||||
type = string
|
type = string
|
||||||
sensitive = true
|
sensitive = true
|
||||||
|
|
@ -20,13 +19,3 @@ variable "rabbitmq_connection_string" {
|
||||||
type = string
|
type = string
|
||||||
sensitive = true
|
sensitive = true
|
||||||
}
|
}
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
variable "k8s_config_yaml" {
|
|
||||||
description = "Content of k8s config yaml file"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
locals {
|
locals {
|
||||||
tld = "fourlights.dev"
|
tld = "fourlights.dev"
|
||||||
cluster_dns = "365zon"
|
cluster_dns = "venus.${local.tld}"
|
||||||
domain = "zitadel.${local.cluster_dns}.${local.tld}"
|
domain = "zitadel.${local.cluster_dns}"
|
||||||
org_domain = "fourlights.${local.domain}"
|
org_domain = "fourlights.${local.domain}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -18,31 +18,31 @@ module "zitadel_project" {
|
||||||
module "zitadel_project_operator_roles" {
|
module "zitadel_project_operator_roles" {
|
||||||
source = "../../../modules/zitadel/project/roles"
|
source = "../../../modules/zitadel/project/roles"
|
||||||
|
|
||||||
wait_on = module.zitadel_project.installed
|
wait_on = [module.zitadel_project.installed]
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
group = "Operator"
|
group = "Operator"
|
||||||
roles = [
|
roles = [
|
||||||
"manage:profiles", "manage:contacts", "manage:addresses", "manage:enquiries", "manage:flowstates",
|
"manage:profiles", "manage:contacts", "manage:addresses", "manage:enquiries", "manage:flowstates",
|
||||||
"manage:flowevents", "manage:files", "manage:brands"
|
"manage:flowevents", "manage:files"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
module "zitadel_project_configurator_roles" {
|
module "zitadel_project_configurator_roles" {
|
||||||
source = "../../../modules/zitadel/project/roles"
|
source = "../../../modules/zitadel/project/roles"
|
||||||
wait_on = module.zitadel_project_operator_roles.installed
|
wait_on = [module.zitadel_project_operator_roles.installed]
|
||||||
|
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
group = "Configurator"
|
group = "Configurator"
|
||||||
roles = [
|
roles = [
|
||||||
"manage:flows"
|
"manage:brands", "manage:flows"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
module "zitadel_project_developer_roles" {
|
module "zitadel_project_developer_roles" {
|
||||||
source = "../../../modules/zitadel/project/roles"
|
source = "../../../modules/zitadel/project/roles"
|
||||||
wait_on = module.zitadel_project_configurator_roles.installed
|
wait_on = [module.zitadel_project_configurator_roles.installed]
|
||||||
|
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
|
|
@ -54,7 +54,7 @@ module "zitadel_project_developer_roles" {
|
||||||
|
|
||||||
module "zitadel_project_user_grant" {
|
module "zitadel_project_user_grant" {
|
||||||
source = "../../../modules/zitadel/project/user-grant"
|
source = "../../../modules/zitadel/project/user-grant"
|
||||||
wait_on = module.zitadel_project_developer_roles.installed
|
wait_on = [module.zitadel_project_developer_roles.installed]
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
user_id = var.user_id
|
user_id = var.user_id
|
||||||
|
|
@ -66,8 +66,8 @@ module "zitadel_project_user_grant" {
|
||||||
// TODO: Add read roles
|
// TODO: Add read roles
|
||||||
|
|
||||||
module "zitadel_project_application_core" {
|
module "zitadel_project_application_core" {
|
||||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||||
wait_on = module.zitadel_project_user_grant.installed
|
wait_on = [module.zitadel_project_user_grant.installed]
|
||||||
|
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
|
|
@ -75,7 +75,6 @@ module "zitadel_project_application_core" {
|
||||||
name = "Core"
|
name = "Core"
|
||||||
zitadel_domain = local.domain
|
zitadel_domain = local.domain
|
||||||
cluster_domain = local.cluster_dns
|
cluster_domain = local.cluster_dns
|
||||||
uri = "https://api.${local.cluster_dns}.${local.tld}"
|
|
||||||
|
|
||||||
namespace = var.namespace
|
namespace = var.namespace
|
||||||
project = var.name
|
project = var.name
|
||||||
|
|
@ -85,8 +84,8 @@ module "zitadel_project_application_core" {
|
||||||
}
|
}
|
||||||
|
|
||||||
module "zitadel_project_application_salesforce" {
|
module "zitadel_project_application_salesforce" {
|
||||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||||
wait_on = module.zitadel_project_application_core.installed
|
wait_on = [module.zitadel_project_application_core.installed]
|
||||||
|
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
|
|
@ -94,7 +93,6 @@ module "zitadel_project_application_salesforce" {
|
||||||
name = "Salesforce"
|
name = "Salesforce"
|
||||||
zitadel_domain = local.domain
|
zitadel_domain = local.domain
|
||||||
cluster_domain = local.cluster_dns
|
cluster_domain = local.cluster_dns
|
||||||
uri = "https://salesforce.${local.cluster_dns}.${local.tld}"
|
|
||||||
|
|
||||||
namespace = var.namespace
|
namespace = var.namespace
|
||||||
project = var.name
|
project = var.name
|
||||||
|
|
@ -103,8 +101,8 @@ module "zitadel_project_application_salesforce" {
|
||||||
}
|
}
|
||||||
|
|
||||||
module "zitadel_project_application_external" {
|
module "zitadel_project_application_external" {
|
||||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||||
wait_on = module.zitadel_project_application_salesforce.installed
|
wait_on = [module.zitadel_project_application_salesforce.installed]
|
||||||
|
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
|
|
@ -112,7 +110,6 @@ module "zitadel_project_application_external" {
|
||||||
name = "External"
|
name = "External"
|
||||||
zitadel_domain = local.domain
|
zitadel_domain = local.domain
|
||||||
cluster_domain = local.cluster_dns
|
cluster_domain = local.cluster_dns
|
||||||
uri = "https://external.${local.cluster_dns}.${local.tld}"
|
|
||||||
|
|
||||||
namespace = var.namespace
|
namespace = var.namespace
|
||||||
project = var.name
|
project = var.name
|
||||||
|
|
@ -121,8 +118,8 @@ module "zitadel_project_application_external" {
|
||||||
}
|
}
|
||||||
|
|
||||||
module "zitadel_project_application_module_internal" {
|
module "zitadel_project_application_module_internal" {
|
||||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||||
wait_on = module.zitadel_project_application_external.installed
|
wait_on = [module.zitadel_project_application_external.installed]
|
||||||
|
|
||||||
org_id = var.org_id
|
org_id = var.org_id
|
||||||
project_id = module.zitadel_project.project_id
|
project_id = module.zitadel_project.project_id
|
||||||
|
|
@ -130,7 +127,6 @@ module "zitadel_project_application_module_internal" {
|
||||||
name = "Internal"
|
name = "Internal"
|
||||||
zitadel_domain = local.domain
|
zitadel_domain = local.domain
|
||||||
cluster_domain = local.cluster_dns
|
cluster_domain = local.cluster_dns
|
||||||
uri = "https://internal.${local.cluster_dns}.${local.tld}"
|
|
||||||
|
|
||||||
namespace = var.namespace
|
namespace = var.namespace
|
||||||
project = var.name
|
project = var.name
|
||||||
|
|
|
||||||
|
|
@ -15,4 +15,3 @@ variable "domain" { type = string }
|
||||||
variable "jwt_profile_file" { type = string }
|
variable "jwt_profile_file" { type = string }
|
||||||
|
|
||||||
variable "argocd_service_domain" { type = string }
|
variable "argocd_service_domain" { type = string }
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,9 @@ provider "zitadel" {
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
k8s_config = yamldecode(var.k8s_config_yaml)
|
k8s_config_path = format("%s/%s", path.root, "../kubeconfig")
|
||||||
|
k8s_config_yaml = file(local.k8s_config_path)
|
||||||
|
k8s_config = yamldecode(local.k8s_config_yaml)
|
||||||
k8s_host = local.k8s_config.clusters[0].cluster.server
|
k8s_host = local.k8s_config.clusters[0].cluster.server
|
||||||
k8s_auth = try(
|
k8s_auth = try(
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,2 @@
|
||||||
variable "domain" { type = string }
|
variable "domain" { type = string }
|
||||||
variable "jwt_profile_file" { type = string }
|
variable "jwt_profile_file" { type = string }
|
||||||
variable "k8s_config_yaml" {
|
|
||||||
description = "Content of k8s config yaml file"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,40 +0,0 @@
|
||||||
# Local .terraform directories
|
|
||||||
.terraform/
|
|
||||||
|
|
||||||
# .tfstate files
|
|
||||||
*.tfstate
|
|
||||||
*.tfstate.*
|
|
||||||
|
|
||||||
# Crash log files
|
|
||||||
crash.log
|
|
||||||
crash.*.log
|
|
||||||
|
|
||||||
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
|
|
||||||
# password, private keys, and other secrets. These should not be part of version
|
|
||||||
# control as they are data points which are potentially sensitive and subject
|
|
||||||
# to change depending on the environment.
|
|
||||||
*.tfvars
|
|
||||||
*.tfvars.json
|
|
||||||
|
|
||||||
# Ignore override files as they are usually used to override resources locally and so
|
|
||||||
# are not checked in
|
|
||||||
override.tf
|
|
||||||
override.tf.json
|
|
||||||
*_override.tf
|
|
||||||
*_override.tf.json
|
|
||||||
|
|
||||||
# Ignore transient lock info files created by terraform apply
|
|
||||||
.terraform.tfstate.lock.info
|
|
||||||
|
|
||||||
# Include override files you do wish to add to version control using negated pattern
|
|
||||||
# !example_override.tf
|
|
||||||
|
|
||||||
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
|
||||||
# example: *tfplan*
|
|
||||||
|
|
||||||
# Ignore CLI configuration files
|
|
||||||
.terraformrc
|
|
||||||
terraform.rc
|
|
||||||
|
|
||||||
# Optional: ignore graph output files generated by `terraform graph`
|
|
||||||
*.dot
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
# Default ignored files
|
|
||||||
/shelf/
|
|
||||||
/workspace.xml
|
|
||||||
# Ignored default folder with query files
|
|
||||||
/queries/
|
|
||||||
# Datasource local storage ignored files
|
|
||||||
/dataSources/
|
|
||||||
/dataSources.local.xml
|
|
||||||
# Editor-based HTTP Client requests
|
|
||||||
/httpRequests/
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="Encoding" addBOMForNewFiles="with BOM under Windows, with no BOM otherwise" />
|
|
||||||
</project>
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectModuleManager">
|
|
||||||
<modules>
|
|
||||||
<module fileurl="file://$PROJECT_DIR$/.idea/quadlets.iml" filepath="$PROJECT_DIR$/.idea/quadlets.iml" />
|
|
||||||
</modules>
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<module type="WEB_MODULE" version="4">
|
|
||||||
<component name="NewModuleRootManager">
|
|
||||||
<content url="file://$MODULE_DIR$" />
|
|
||||||
<orderEntry type="inheritedJdk" />
|
|
||||||
<orderEntry type="sourceFolder" forTests="false" />
|
|
||||||
</component>
|
|
||||||
</module>
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="VcsDirectoryMappings">
|
|
||||||
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
|
|
@ -1,66 +0,0 @@
|
||||||
variable "hcloud_token" {
|
|
||||||
description = "Hetzner Cloud API Token"
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "hdns_token" {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_public_key_path" {
|
|
||||||
description = "Path to SSH public key"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
description = "Path to SSH private key"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ghcr_username" {}
|
|
||||||
variable "ghcr_token" {}
|
|
||||||
|
|
||||||
module "hetzner" {
|
|
||||||
source = "./modules/hetzner"
|
|
||||||
hcloud_token = var.hcloud_token
|
|
||||||
ssh_public_key_path = var.ssh_public_key_path
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
name = "vw-hub"
|
|
||||||
datacenter = "nbg1-dc3"
|
|
||||||
hdns_token = var.hdns_token
|
|
||||||
ghcr_token = var.ghcr_token
|
|
||||||
ghcr_username = var.ghcr_username
|
|
||||||
}
|
|
||||||
|
|
||||||
module "minio" {
|
|
||||||
wait_on = module.hetzner.installed
|
|
||||||
source = "./modules/minio"
|
|
||||||
server_ip = module.hetzner.server_ip
|
|
||||||
server_domain = module.hetzner.server_domain
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
}
|
|
||||||
|
|
||||||
module "valkey" {
|
|
||||||
wait_on = module.hetzner.installed
|
|
||||||
source = "./modules/valkey"
|
|
||||||
server_ip = module.hetzner.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
}
|
|
||||||
|
|
||||||
# module "vw-hub" {
|
|
||||||
# wait_on = module.minio.installed
|
|
||||||
#
|
|
||||||
# source = "./modules/vw-hub"
|
|
||||||
# server_ip = module.hetzner.server_ip
|
|
||||||
# ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
# domain = "hub.${module.hetzner.server_domain}"
|
|
||||||
# s3_access_key = module.minio.access_key
|
|
||||||
# s3_secret_key = module.minio.secret_key
|
|
||||||
# s3_server = module.minio.server
|
|
||||||
# }
|
|
||||||
|
|
||||||
output "minio_app_urls" {
|
|
||||||
value = module.minio.app_urls
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
module "redis" {
|
|
||||||
source = "../quadlet-app"
|
|
||||||
wait_on = var.wait_on
|
|
||||||
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
|
|
||||||
app_name = "redis"
|
|
||||||
image = "docker.io/redis:7-alpine"
|
|
||||||
ports = ["6379:6379"]
|
|
||||||
volumes = ["/opt/storage/data/redis:/data:Z"]
|
|
||||||
command = ["redis-server", "--appendonly", "yes"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = module.redis.app_urls
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [module.redis.installed]
|
|
||||||
}
|
|
||||||
|
|
@ -1,561 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
users:
|
|
||||||
- name: fourlights
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
groups: users,admin,sudo
|
|
||||||
shell: /bin/bash
|
|
||||||
lock_passwd: false
|
|
||||||
ssh_authorized_keys:
|
|
||||||
- ${ssh_public_key}
|
|
||||||
|
|
||||||
packages:
|
|
||||||
- podman
|
|
||||||
- haproxy
|
|
||||||
- python3
|
|
||||||
- python3-requests
|
|
||||||
- curl
|
|
||||||
- wget
|
|
||||||
- jq
|
|
||||||
- socat
|
|
||||||
- nmap
|
|
||||||
|
|
||||||
package_update: true
|
|
||||||
package_upgrade: true
|
|
||||||
|
|
||||||
write_files:
|
|
||||||
- path: /etc/sudoers.d/fourlights-haproxy
|
|
||||||
permissions: '0440'
|
|
||||||
content: |
|
|
||||||
fourlights ALL=(root) NOPASSWD: /bin/systemctl reload haproxy
|
|
||||||
fourlights ALL=(root) NOPASSWD: /bin/systemctl restart haproxy
|
|
||||||
fourlights ALL=(root) NOPASSWD: /bin/systemctl stop haproxy
|
|
||||||
fourlights ALL=(root) NOPASSWD: /bin/systemctl start haproxy
|
|
||||||
fourlights ALL=(root) NOPASSWD: /bin/chown -R haproxy\:haproxy /etc/ssl/haproxy/*
|
|
||||||
fourlights ALL=(root) NOPASSWD: /bin/chmod 600 /etc/ssl/haproxy/*
|
|
||||||
# HAProxy main configuration
|
|
||||||
- path: /etc/haproxy/haproxy.cfg
|
|
||||||
content: |
|
|
||||||
global
|
|
||||||
daemon
|
|
||||||
stats socket /var/run/haproxy/admin.sock mode 660 level admin expose-fd listeners
|
|
||||||
stats timeout 30s
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
log stdout local0 info
|
|
||||||
|
|
||||||
defaults
|
|
||||||
mode http
|
|
||||||
timeout connect 5000ms
|
|
||||||
timeout client 50000ms
|
|
||||||
timeout server 50000ms
|
|
||||||
option httplog
|
|
||||||
log global
|
|
||||||
|
|
||||||
# Stats interface
|
|
||||||
frontend stats
|
|
||||||
bind *:8404
|
|
||||||
http-request use-service prometheus-exporter if { path /metrics }
|
|
||||||
stats enable
|
|
||||||
stats uri /stats
|
|
||||||
stats refresh 10s
|
|
||||||
|
|
||||||
# HTTP Frontend
|
|
||||||
frontend main
|
|
||||||
bind *:80
|
|
||||||
# ACL to detect ACME challenge requests
|
|
||||||
acl is_acme_challenge path_beg /.well-known/acme-challenge/
|
|
||||||
# Route ACME challenges to the acme_challenge backend
|
|
||||||
use_backend acme_challenge if is_acme_challenge
|
|
||||||
default_backend no_match
|
|
||||||
|
|
||||||
# HTTPS Frontend
|
|
||||||
frontend https_main
|
|
||||||
bind *:443
|
|
||||||
default_backend no_match
|
|
||||||
|
|
||||||
# ACME Challenge Backend
|
|
||||||
backend acme_challenge
|
|
||||||
mode http
|
|
||||||
server acme_server 127.0.0.1:8888
|
|
||||||
|
|
||||||
# Default backend
|
|
||||||
backend no_match
|
|
||||||
http-request return status 404 content-type text/plain string "No matching service found"
|
|
||||||
|
|
||||||
- path: /etc/dataplaneapi/dataplaneapi.yml
|
|
||||||
content: |
|
|
||||||
dataplaneapi:
|
|
||||||
host: 0.0.0.0
|
|
||||||
port: 5555
|
|
||||||
user:
|
|
||||||
- insecure: true
|
|
||||||
password: admin
|
|
||||||
username: admin
|
|
||||||
|
|
||||||
haproxy:
|
|
||||||
config_file: /etc/haproxy/haproxy.cfg
|
|
||||||
haproxy_bin: /usr/sbin/haproxy
|
|
||||||
reload:
|
|
||||||
reload_cmd: systemctl reload haproxy
|
|
||||||
restart_cmd: systemctl restart haproxy
|
|
||||||
stats_socket: /var/run/haproxy/admin.sock
|
|
||||||
|
|
||||||
- path: /usr/local/bin/podman-haproxy-acme-sync-wrapper.sh
|
|
||||||
permissions: '0755'
|
|
||||||
content: |
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
MAX_WAIT=60
|
|
||||||
ELAPSED=0
|
|
||||||
|
|
||||||
# Wait for HAProxy
|
|
||||||
echo "Checking HAProxy status..."
|
|
||||||
while ! systemctl is-active --quiet haproxy; do
|
|
||||||
echo "Waiting for HAProxy to start..."
|
|
||||||
sleep 2
|
|
||||||
ELAPSED=$($ELAPSED + 2)
|
|
||||||
[ $ELAPSED -ge $MAX_WAIT ] && { echo "ERROR: HAProxy timeout"; exit 1; }
|
|
||||||
done
|
|
||||||
echo "HAProxy is active"
|
|
||||||
|
|
||||||
# Reset and wait for Data Plane API to actually respond
|
|
||||||
ELAPSED=0
|
|
||||||
echo "Checking Data Plane API readiness..."
|
|
||||||
while true; do
|
|
||||||
HTTP_CODE=$(curl -s -w "%%{http_code}" -o /dev/null \
|
|
||||||
--connect-timeout 5 \
|
|
||||||
--max-time 10 \
|
|
||||||
-u :admin \
|
|
||||||
http://localhost:5555/v3/services/haproxy/configuration/version 2>/dev/null || echo "000")
|
|
||||||
|
|
||||||
[ "$HTTP_CODE" = "200" ] && { echo "Data Plane API ready"; break; }
|
|
||||||
|
|
||||||
echo "Waiting for Data Plane API... (HTTP $HTTP_CODE)"
|
|
||||||
sleep 2
|
|
||||||
ELAPSED=$((ELAPSED + 2))
|
|
||||||
|
|
||||||
if [ $ELAPSED -ge $MAX_WAIT ]; then
|
|
||||||
echo "ERROR: Data Plane API not ready within $MAX_WAITs (HTTP $HTTP_CODE)"
|
|
||||||
journalctl -u dataplaneapi -n 50 --no-pager
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
sleep 2
|
|
||||||
exec /usr/local/bin/podman-haproxy-acme-sync.py
|
|
||||||
|
|
||||||
# Podman HAProxy ACME Sync Script
|
|
||||||
- path: /usr/local/bin/podman-haproxy-acme-sync.py
|
|
||||||
permissions: '0755'
|
|
||||||
content: |
|
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import json
|
|
||||||
import subprocess
|
|
||||||
import requests
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
HAPROXY_API_BASE = "http://:admin@127.0.0.1:5555/v3"
|
|
||||||
CERT_DIR = "/home/fourlights/.acme.sh"
|
|
||||||
ACME_SCRIPT = "/usr/local/bin/acme.sh"
|
|
||||||
|
|
||||||
class PodmanHAProxyACMESync:
|
|
||||||
def __init__(self):
|
|
||||||
self.ssl_services = set()
|
|
||||||
self.session = requests.Session()
|
|
||||||
self.session.headers.update({'Content-Type': 'application/json'})
|
|
||||||
|
|
||||||
def get_next_index(self, path):
|
|
||||||
response = self.session.get(f"{HAPROXY_API_BASE}/services/haproxy/configuration/{path}")
|
|
||||||
return len(response.json()) if response.status_code == 200 else None
|
|
||||||
|
|
||||||
def get_dataplaneapi_version(self):
|
|
||||||
response = self.session.get(f"{HAPROXY_API_BASE}/services/haproxy/configuration/version")
|
|
||||||
return response.json() if response.status_code == 200 else None
|
|
||||||
|
|
||||||
def get_container_labels(self, container_id):
|
|
||||||
try:
|
|
||||||
result = subprocess.run(['podman', 'inspect', container_id],
|
|
||||||
capture_output=True, text=True)
|
|
||||||
if result.returncode == 0:
|
|
||||||
data = json.loads(result.stdout)
|
|
||||||
return data[0]['Config']['Labels'] or {}
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error getting labels for {container_id}: {e}")
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def request_certificate(self, domain):
|
|
||||||
print(f"[CERT-REQUEST] About to request certificate for {domain}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
try:
|
|
||||||
cmd = [
|
|
||||||
ACME_SCRIPT,
|
|
||||||
"--issue",
|
|
||||||
"-d", domain,
|
|
||||||
"--standalone",
|
|
||||||
"--httpport", "8888",
|
|
||||||
"--server", "letsencrypt",
|
|
||||||
"--listen-v4",
|
|
||||||
"--debug", "2"
|
|
||||||
]
|
|
||||||
|
|
||||||
# Log the command being executed
|
|
||||||
print(f"[CERT-REQUEST] Executing: {' '.join(cmd)}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
|
||||||
|
|
||||||
# Log both stdout and stderr for complete debugging
|
|
||||||
if result.stdout:
|
|
||||||
print(f"[CERT-STDOUT] {result.stdout}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
if result.stderr:
|
|
||||||
print(f"[CERT-STDERR] {result.stderr}")
|
|
||||||
sys.stderr.flush()
|
|
||||||
|
|
||||||
if result.returncode == 0:
|
|
||||||
print(f"[CERT-SUCCESS] Certificate obtained for {domain}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
self.install_certificate(domain)
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
print(f"[CERT-FAILED] Failed to obtain certificate for {domain}")
|
|
||||||
print(f"[CERT-FAILED] Return code: {result.returncode}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
return False
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"[CERT-ERROR] Error requesting certificate: {e}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
return False
|
|
||||||
|
|
||||||
def install_certificate(self, domain):
|
|
||||||
cert_file = f"{CERT_DIR}/{domain}.pem"
|
|
||||||
|
|
||||||
try:
|
|
||||||
acme_cert_dir = f"/home/fourlights/.acme.sh/{domain}_ecc"
|
|
||||||
|
|
||||||
with open(cert_file, 'w') as outfile:
|
|
||||||
with open(f"{acme_cert_dir}/fullchain.cer") as cert:
|
|
||||||
outfile.write(cert.read())
|
|
||||||
with open(f"{acme_cert_dir}/{domain}.key") as key:
|
|
||||||
outfile.write(key.read())
|
|
||||||
try:
|
|
||||||
with open(f"{acme_cert_dir}/ca.cer") as ca:
|
|
||||||
outfile.write(ca.read())
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
os.chmod(cert_file, 0o600)
|
|
||||||
print(f"Certificate installed at {cert_file}")
|
|
||||||
|
|
||||||
self.update_haproxy_ssl_bind(domain)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error installing certificate for {domain}: {e}")
|
|
||||||
|
|
||||||
def update_haproxy_ssl_bind(self, domain):
|
|
||||||
print(f"Updating ssl bind for {domain}")
|
|
||||||
try:
|
|
||||||
ssl_bind_data = {
|
|
||||||
"address": "*",
|
|
||||||
"port": 443,
|
|
||||||
"ssl": True,
|
|
||||||
"ssl_certificate": f"{CERT_DIR}/{domain}.pem",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/https_main/binds?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=ssl_bind_data)
|
|
||||||
print(response.json())
|
|
||||||
|
|
||||||
if response.status_code in [200, 201]:
|
|
||||||
print(f"Updated HAProxy SSL bind for {domain}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error updating HAProxy SSL bind: {e}")
|
|
||||||
|
|
||||||
def setup_certificate_renewal(self, domain):
|
|
||||||
renewal_script = f"/etc/cron.d/acme-{domain.replace('.', '-')}"
|
|
||||||
|
|
||||||
cron_content = f"""0 0 * * * root {ACME_SCRIPT} --renew -d {domain} --post-hook "systemctl reload haproxy" >/dev/null 2>&1
|
|
||||||
"""
|
|
||||||
|
|
||||||
with open(renewal_script, 'w') as f:
|
|
||||||
f.write(cron_content)
|
|
||||||
|
|
||||||
print(f"Setup automatic renewal for {domain}")
|
|
||||||
|
|
||||||
def update_haproxy_backend(self, service_name, host, port, action='add'):
|
|
||||||
backend_name = f"backend_{service_name}"
|
|
||||||
server_name = f"{service_name}_server"
|
|
||||||
|
|
||||||
if action == 'add':
|
|
||||||
backend_data = {
|
|
||||||
"name": backend_name,
|
|
||||||
"mode": "http",
|
|
||||||
"balance": {"algorithm": "roundrobin"},
|
|
||||||
}
|
|
||||||
backends = self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/backends?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=backend_data)
|
|
||||||
print(backends.json())
|
|
||||||
|
|
||||||
server_data = {
|
|
||||||
"name": server_name,
|
|
||||||
"address": host,
|
|
||||||
"port": int(port),
|
|
||||||
"check": "enabled",
|
|
||||||
}
|
|
||||||
tweak = self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/backends/{backend_name}/servers?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=server_data)
|
|
||||||
print(tweak.json())
|
|
||||||
|
|
||||||
elif action == 'remove':
|
|
||||||
self.session.delete(f"{HAPROXY_API_BASE}/services/haproxy/configuration/backends/{backend_name}/servers/{server_name}?version={self.get_dataplaneapi_version()}")
|
|
||||||
|
|
||||||
def update_haproxy_frontend_rule(self, service_name, domain, ssl_enabled=False, action='add'):
|
|
||||||
if action == 'add':
|
|
||||||
if ssl_enabled and domain and domain not in self.ssl_services:
|
|
||||||
print(f"Setting up SSL for {domain}")
|
|
||||||
if self.request_certificate(domain):
|
|
||||||
self.setup_certificate_renewal(domain)
|
|
||||||
self.ssl_services.add(domain)
|
|
||||||
|
|
||||||
acl_data = {
|
|
||||||
"acl_name": f"is_{service_name}",
|
|
||||||
"criterion": "hdr(host)",
|
|
||||||
"value": domain,
|
|
||||||
}
|
|
||||||
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/main/acls/{self.get_next_index('frontends/main/acls')}?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=acl_data)
|
|
||||||
|
|
||||||
if ssl_enabled:
|
|
||||||
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/https_main/acls/{self.get_next_index('frontends/https_main/acls')}?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=acl_data)
|
|
||||||
|
|
||||||
rule_data = {
|
|
||||||
"name": f"backend_{service_name}",
|
|
||||||
"cond": "if",
|
|
||||||
"cond_test": f"is_{service_name}",
|
|
||||||
}
|
|
||||||
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/main/backend_switching_rules/{self.get_next_index('frontends/main/backend_switching_rules')}?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=rule_data)
|
|
||||||
|
|
||||||
if ssl_enabled:
|
|
||||||
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/https_main/backend_switching_rules/{self.get_next_index('frontends/https_main/backend_switching_rules')}?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=rule_data)
|
|
||||||
|
|
||||||
redirect_rule = {
|
|
||||||
"type": "redirect",
|
|
||||||
"redirect_rule": {
|
|
||||||
"type": "scheme",
|
|
||||||
"value": "https",
|
|
||||||
"code": 301
|
|
||||||
},
|
|
||||||
"cond": "if",
|
|
||||||
"cond_test": f"is_{service_name}",
|
|
||||||
}
|
|
||||||
self.session.post(f"{HAPROXY_API_BASE}/services/haproxy/configuration/frontends/main/http_request_rules/{self.get_next_index('frontends/main/http_request_rules')}?version={self.get_dataplaneapi_version()}",
|
|
||||||
json=redirect_rule)
|
|
||||||
|
|
||||||
def process_container_event(self, event):
|
|
||||||
# DIAGNOSTIC: Log raw event structure
|
|
||||||
print(f"[EVENT-DEBUG] Received event - Type: {event.get('Type', 'MISSING')}, Action: {event.get('Action', 'MISSING')}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
# DIAGNOSTIC: Check for Actor key
|
|
||||||
if 'Actor' not in event:
|
|
||||||
print(f"[EVENT-SKIP] Skipping event without 'Actor' key - Full event: {json.dumps(event)}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
return
|
|
||||||
|
|
||||||
# DIAGNOSTIC: Check for ID in Actor
|
|
||||||
if 'ID' not in event['Actor']:
|
|
||||||
print(f"[EVENT-SKIP] Skipping event without 'Actor.ID' - Actor content: {json.dumps(event['Actor'])}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
return
|
|
||||||
|
|
||||||
container_id = event['Actor']['ID'][:12]
|
|
||||||
action = event['Action']
|
|
||||||
|
|
||||||
print(f"[EVENT-PROCESS] Processing '{action}' event for container {container_id}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
labels = self.get_container_labels(container_id)
|
|
||||||
|
|
||||||
# Dictionary to store discovered services
|
|
||||||
services = {}
|
|
||||||
|
|
||||||
# First, check for namespaced labels (haproxy.{service_name}.enable)
|
|
||||||
for label_key, label_value in labels.items():
|
|
||||||
if label_key.startswith('haproxy.') and label_key.endswith('.enable') and label_value.lower() == 'true':
|
|
||||||
# Extract service name from label key
|
|
||||||
parts = label_key.split('.')
|
|
||||||
if len(parts) == 3: # haproxy.{service_name}.enable
|
|
||||||
service_name = parts[1]
|
|
||||||
|
|
||||||
# Extract properties for this service namespace
|
|
||||||
service_config = {
|
|
||||||
'service_name': service_name,
|
|
||||||
'host': labels.get(f'haproxy.{service_name}.host', '127.0.0.1'),
|
|
||||||
'port': labels.get(f'haproxy.{service_name}.port', '8080'),
|
|
||||||
'domain': labels.get(f'haproxy.{service_name}.domain', None),
|
|
||||||
'ssl_enabled': labels.get(f'haproxy.{service_name}.tls', 'false').lower() == 'true'
|
|
||||||
}
|
|
||||||
services[service_name] = service_config
|
|
||||||
|
|
||||||
# Backward compatibility: If no namespaced labels found, check for flat labels
|
|
||||||
if not services and 'haproxy.enable' in labels and labels['haproxy.enable'].lower() == 'true':
|
|
||||||
service_name = labels.get('haproxy.service', container_id)
|
|
||||||
services[service_name] = {
|
|
||||||
'service_name': service_name,
|
|
||||||
'host': labels.get('haproxy.host', '127.0.0.1'),
|
|
||||||
'port': labels.get('haproxy.port', '8080'),
|
|
||||||
'domain': labels.get('haproxy.domain', None),
|
|
||||||
'ssl_enabled': labels.get('haproxy.tls', 'false').lower() == 'true'
|
|
||||||
}
|
|
||||||
|
|
||||||
# Process each discovered service
|
|
||||||
for service_name, config in services.items():
|
|
||||||
if action in ['start', 'restart']:
|
|
||||||
print(f"Adding service {config['service_name']} to HAProxy (SSL: {config['ssl_enabled']}, Domain: {config['domain']})")
|
|
||||||
sys.stdout.flush()
|
|
||||||
self.update_haproxy_backend(config['service_name'], config['host'], config['port'], 'add')
|
|
||||||
if config['domain']:
|
|
||||||
self.update_haproxy_frontend_rule(config['service_name'], config['domain'], config['ssl_enabled'], 'add')
|
|
||||||
|
|
||||||
elif action in ['stop', 'remove', 'died']:
|
|
||||||
print(f"Removing service {config['service_name']} from HAProxy")
|
|
||||||
sys.stdout.flush()
|
|
||||||
self.update_haproxy_backend(config['service_name'], config['host'], config['port'], 'remove')
|
|
||||||
|
|
||||||
def watch_events(self):
|
|
||||||
print("Starting Podman-HAProxy-ACME sync...")
|
|
||||||
|
|
||||||
# Track last sync time
|
|
||||||
last_full_sync = 0
|
|
||||||
SYNC_INTERVAL = 60 # Re-scan all containers every 60 seconds
|
|
||||||
|
|
||||||
def do_full_sync():
|
|
||||||
"""Perform a full sync of all running containers"""
|
|
||||||
print("Performing full container sync...")
|
|
||||||
try:
|
|
||||||
result = subprocess.run(['podman', 'ps', '--format', 'json'],
|
|
||||||
capture_output=True, text=True)
|
|
||||||
if result.returncode == 0:
|
|
||||||
containers = json.loads(result.stdout)
|
|
||||||
for container in containers:
|
|
||||||
event = {
|
|
||||||
'Type': 'container',
|
|
||||||
'Action': 'start',
|
|
||||||
'Actor': {'ID': container.get('Id', '')}
|
|
||||||
}
|
|
||||||
self.process_container_event(event)
|
|
||||||
print(f"Synced {len(containers)} containers")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error during full sync: {e}")
|
|
||||||
|
|
||||||
# Initial sync
|
|
||||||
do_full_sync()
|
|
||||||
last_full_sync = time.time()
|
|
||||||
|
|
||||||
print("Watching for container events...")
|
|
||||||
|
|
||||||
cmd = ['podman', 'events', '--format', 'json']
|
|
||||||
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True)
|
|
||||||
|
|
||||||
# Use select/poll for non-blocking read so we can do periodic syncs
|
|
||||||
import select
|
|
||||||
|
|
||||||
while True:
|
|
||||||
# Check if it's time for periodic sync
|
|
||||||
if time.time() - last_full_sync >= SYNC_INTERVAL:
|
|
||||||
do_full_sync()
|
|
||||||
last_full_sync = time.time()
|
|
||||||
|
|
||||||
# Check for events with timeout
|
|
||||||
ready, _, _ = select.select([process.stdout], [], [], 5)
|
|
||||||
|
|
||||||
if ready:
|
|
||||||
line = process.stdout.readline()
|
|
||||||
if line:
|
|
||||||
try:
|
|
||||||
event = json.loads(line.strip())
|
|
||||||
if event['Type'] == 'container':
|
|
||||||
self.process_container_event(event)
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
print(f"[EVENT-ERROR] JSON decode error: {e} - Line: {line[:100]}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
except KeyError as e:
|
|
||||||
print(f"[EVENT-ERROR] Missing key {e} in event: {json.dumps(event)}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
except Exception as e:
|
|
||||||
print(f"[EVENT-ERROR] Error processing event: {e}")
|
|
||||||
print(f"[EVENT-ERROR] Event structure: {json.dumps(event)}")
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
os.makedirs(CERT_DIR, exist_ok=True)
|
|
||||||
sync = PodmanHAProxyACMESync()
|
|
||||||
sync.watch_events()
|
|
||||||
|
|
||||||
runcmd:
|
|
||||||
# Create necessary directories
|
|
||||||
- mkdir -p /var/run/haproxy /etc/ssl/haproxy /etc/containers/systemd /etc/haproxy/dataplane /etc/dataplaneapi
|
|
||||||
- chown haproxy:haproxy /var/run/haproxy
|
|
||||||
|
|
||||||
# Install Data Plane API
|
|
||||||
- cd /tmp && curl -LO https://github.com/haproxytech/dataplaneapi/releases/download/v3.2.4/dataplaneapi_3.2.4_linux_amd64.deb
|
|
||||||
- env DEBIAN_FRONTEND=noninteractive apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" /tmp/dataplaneapi_3.2.4_linux_amd64.deb
|
|
||||||
- rm /tmp/dataplaneapi_3.2.4_linux_amd64.deb
|
|
||||||
|
|
||||||
- mkdir -p /home/fourlights/.config/containers/systemd
|
|
||||||
- mkdir -p /home/fourlights/.config/systemd/user
|
|
||||||
- |
|
|
||||||
cat > /home/fourlights/.config/systemd/user/podman-haproxy-acme-sync.service << 'EOF'
|
|
||||||
[Unit]
|
|
||||||
Description=Podman HAProxy ACME Sync Service
|
|
||||||
After=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
Environment="XDG_RUNTIME_DIR=/run/user/1000"
|
|
||||||
Environment="DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus"
|
|
||||||
ExecStart=/usr/local/bin/podman-haproxy-acme-sync-wrapper.sh
|
|
||||||
StandardOutput=journal
|
|
||||||
StandardError=journal
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=default.target
|
|
||||||
EOF
|
|
||||||
- chown -R fourlights:fourlights /home/fourlights
|
|
||||||
|
|
||||||
# Install ACME.sh
|
|
||||||
- su - fourlights -c 'curl https://get.acme.sh | sh -s email=${acme_email}'
|
|
||||||
- ln -sf /home/fourlights/.acme.sh/acme.sh /usr/local/bin/acme.sh
|
|
||||||
|
|
||||||
# Setup data directory and mount volume
|
|
||||||
- mkdir -p /opt/storage/data
|
|
||||||
- mkfs.ext4 -F /dev/sdb
|
|
||||||
- mount /dev/sdb /opt/storage/data
|
|
||||||
- echo '/dev/sdb /opt/storage/data ext4 defaults 0 2' >> /etc/fstab
|
|
||||||
- chown -R fourlights:fourlights /opt/storage/data
|
|
||||||
|
|
||||||
# Enable Podman for user services
|
|
||||||
- loginctl enable-linger fourlights
|
|
||||||
- su - fourlights -c 'podman login ghcr.io -u ${ghcr_username} -p ${ghcr_token}'
|
|
||||||
|
|
||||||
# Enable and start services
|
|
||||||
- systemctl daemon-reload
|
|
||||||
- systemctl enable --now haproxy
|
|
||||||
- systemctl enable --now dataplaneapi
|
|
||||||
- su - fourlights -c 'systemctl --user daemon-reload'
|
|
||||||
- su - fourlights -c 'systemctl --user enable --now podman-haproxy-acme-sync'
|
|
||||||
|
|
||||||
final_message: "Server setup complete with HAProxy, Podman, and ACME sync configured"
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
variable "hdns_token" {}
|
|
||||||
variable "zone" { default = "fourlights.dev" }
|
|
||||||
variable "ipv4_address" {}
|
|
||||||
variable "ipv6_address" {}
|
|
||||||
|
|
||||||
variable "root" {}
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_providers {
|
|
||||||
hetznerdns = {
|
|
||||||
source = "timohirt/hetznerdns"
|
|
||||||
version = "2.2.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "hetznerdns" {
|
|
||||||
apitoken = var.hdns_token
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "hetznerdns_zone" "zone" {
|
|
||||||
name = var.zone
|
|
||||||
ttl = 300
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "hetznerdns_record" "server_root_ipv4" {
|
|
||||||
zone_id = hetznerdns_zone.zone.id
|
|
||||||
name = var.root == null || var.root == "" ? "@" : var.root
|
|
||||||
value = var.ipv4_address
|
|
||||||
type = "A"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "hetznerdns_record" "server_root_ipv6" {
|
|
||||||
zone_id = hetznerdns_zone.zone.id
|
|
||||||
name = var.root == null || var.root == "" ? "@" : var.root
|
|
||||||
value = var.ipv6_address
|
|
||||||
type = "AAAA"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "hetznerdns_record" "server_wildcard" {
|
|
||||||
zone_id = hetznerdns_zone.zone.id
|
|
||||||
name = var.root == null || var.root == "" ? "*" : "*.${var.root}"
|
|
||||||
value = var.root
|
|
||||||
type = "CNAME"
|
|
||||||
}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
root_suffix = var.root == null || var.root == "" ? "" : "."
|
|
||||||
}
|
|
||||||
|
|
||||||
output "server_domain" {
|
|
||||||
value = "${var.root}${local.root_suffix}${var.zone}"
|
|
||||||
}
|
|
||||||
|
|
@ -1,191 +0,0 @@
|
||||||
terraform {
|
|
||||||
required_providers {
|
|
||||||
hcloud = {
|
|
||||||
source = "hetznercloud/hcloud"
|
|
||||||
version = "~> 1.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "hcloud" {
|
|
||||||
token = var.hcloud_token
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "hcloud_token" {
|
|
||||||
description = "Hetzner Cloud API Token"
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_public_key_path" {
|
|
||||||
description = "Path to SSH public key"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
description = "Path to SSH private key"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
# variable "acme_email" {
|
|
||||||
# description = "Email for Let's Encrypt certificates"
|
|
||||||
# type = string
|
|
||||||
# default = "engineering@fourlights.nl"
|
|
||||||
# }
|
|
||||||
|
|
||||||
variable "image" {
|
|
||||||
type = string
|
|
||||||
default = "ubuntu-24.04"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "location" {
|
|
||||||
type = string
|
|
||||||
default = "nbg1"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_type" {
|
|
||||||
type = string
|
|
||||||
default = "cx22"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "datacenter" {
|
|
||||||
type = string
|
|
||||||
default = "nbg1-dc3"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "name" {
|
|
||||||
type = string
|
|
||||||
default = "enterprise"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "zone" {
|
|
||||||
type = string
|
|
||||||
default = "fourlights.dev"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "hdns_token" {}
|
|
||||||
variable "ghcr_username" {}
|
|
||||||
variable "ghcr_token" {}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
acme_email = "engineering+${var.name}@fourlights.nl"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "hcloud_primary_ip" "server_ipv4" {
|
|
||||||
name = "${var.name}-ipv4"
|
|
||||||
type = "ipv4"
|
|
||||||
assignee_type = "server"
|
|
||||||
datacenter = var.datacenter
|
|
||||||
auto_delete = false
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "hcloud_primary_ip" "server_ipv6" {
|
|
||||||
name = "${var.name}-ipv6"
|
|
||||||
type = "ipv6"
|
|
||||||
assignee_type = "server"
|
|
||||||
datacenter = var.datacenter
|
|
||||||
auto_delete = false
|
|
||||||
}
|
|
||||||
|
|
||||||
module "dns" {
|
|
||||||
source = "./dns"
|
|
||||||
|
|
||||||
hdns_token = var.hdns_token
|
|
||||||
zone = var.zone
|
|
||||||
ipv4_address = hcloud_primary_ip.server_ipv4.ip_address
|
|
||||||
ipv6_address = hcloud_primary_ip.server_ipv6.ip_address
|
|
||||||
root = "visualworkplace"
|
|
||||||
}
|
|
||||||
|
|
||||||
# SSH Key
|
|
||||||
resource "hcloud_ssh_key" "default" {
|
|
||||||
name = "terraform-key"
|
|
||||||
public_key = file(var.ssh_public_key_path)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Persistent volume for MinIO
|
|
||||||
resource "hcloud_volume" "minio_data" {
|
|
||||||
name = "minio-data"
|
|
||||||
size = 50
|
|
||||||
location = var.location
|
|
||||||
}
|
|
||||||
|
|
||||||
# Server with comprehensive cloud-init setup
|
|
||||||
resource "hcloud_server" "server" {
|
|
||||||
name = var.name
|
|
||||||
image = var.image
|
|
||||||
server_type = var.server_type
|
|
||||||
location = var.location
|
|
||||||
ssh_keys = [hcloud_ssh_key.default.id]
|
|
||||||
|
|
||||||
user_data = templatefile("${path.module}/cloud-init.yml", {
|
|
||||||
acme_email = local.acme_email
|
|
||||||
ssh_public_key = hcloud_ssh_key.default.public_key,
|
|
||||||
ghcr_username = var.ghcr_username
|
|
||||||
ghcr_token = var.ghcr_token
|
|
||||||
})
|
|
||||||
|
|
||||||
public_net {
|
|
||||||
ipv4_enabled = true
|
|
||||||
ipv6_enabled = true
|
|
||||||
|
|
||||||
ipv4 = hcloud_primary_ip.server_ipv4.id
|
|
||||||
ipv6 = hcloud_primary_ip.server_ipv6.id
|
|
||||||
}
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
replace_triggered_by = [
|
|
||||||
# This ensures server gets rebuilt when user_data changes
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Attach volume
|
|
||||||
resource "hcloud_volume_attachment" "minio_data" {
|
|
||||||
volume_id = hcloud_volume.minio_data.id
|
|
||||||
server_id = hcloud_server.server.id
|
|
||||||
automount = false # We'll handle mounting in cloud-init
|
|
||||||
}
|
|
||||||
|
|
||||||
# Wait for cloud-init to complete
|
|
||||||
resource "null_resource" "wait_for_cloud_init" {
|
|
||||||
depends_on = [hcloud_server.server]
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = hcloud_server.server.ipv4_address
|
|
||||||
user = "fourlights"
|
|
||||||
timeout = "10m"
|
|
||||||
agent = true
|
|
||||||
agent_identity = var.ssh_private_key_path
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = [
|
|
||||||
"echo 'Waiting for cloud-init to complete...'",
|
|
||||||
"cloud-init status --wait",
|
|
||||||
"echo 'Cloud-init completed successfully'"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output "server_ip" {
|
|
||||||
value = hcloud_server.server.ipv4_address
|
|
||||||
}
|
|
||||||
|
|
||||||
output "haproxy_stats" {
|
|
||||||
value = "http://${hcloud_server.server.ipv4_address}:8404/stats"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "haproxy_api" {
|
|
||||||
value = "http://${hcloud_server.server.ipv4_address}:5555"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "server_domain" {
|
|
||||||
value = module.dns.server_domain
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [null_resource.wait_for_cloud_init]
|
|
||||||
}
|
|
||||||
|
|
@ -1,92 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_domain" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "random_password" "minio_access_key" {
|
|
||||||
length = 20
|
|
||||||
special = false
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "random_password" "minio_secret_key" {
|
|
||||||
length = 40
|
|
||||||
special = false
|
|
||||||
}
|
|
||||||
|
|
||||||
module "minio" {
|
|
||||||
wait_on = var.wait_on
|
|
||||||
source = "../quadlet-app"
|
|
||||||
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
|
|
||||||
app_name = "minio"
|
|
||||||
image = "docker.io/minio/minio:latest"
|
|
||||||
ports = [
|
|
||||||
"9000:9000", # API port
|
|
||||||
"9001:9001" # Console port
|
|
||||||
]
|
|
||||||
volumes = ["/opt/storage/data/minio:/data:Z"]
|
|
||||||
|
|
||||||
environment = {
|
|
||||||
MINIO_ROOT_USER = random_password.minio_access_key.result
|
|
||||||
MINIO_ROOT_PASSWORD = random_password.minio_secret_key.result
|
|
||||||
MINIO_CONSOLE_ADDRESS = ":9001"
|
|
||||||
MINIO_BROWSER_REDIRECT_URL = "http://storage.${var.server_domain}"
|
|
||||||
}
|
|
||||||
|
|
||||||
command = ["server", "/data", "--console-address", ":9001"]
|
|
||||||
healthcmd = "curl -f http://localhost:9001/minio/health/live || exit 1"
|
|
||||||
|
|
||||||
# Configure multiple HAProxy services for MinIO
|
|
||||||
haproxy_services = [
|
|
||||||
{
|
|
||||||
name = "minio_api"
|
|
||||||
domain = "storage-api.${var.server_domain}"
|
|
||||||
port = "9000"
|
|
||||||
host = "127.0.0.1"
|
|
||||||
tls = false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name = "minio_console"
|
|
||||||
domain = "storage.${var.server_domain}"
|
|
||||||
port = "9001"
|
|
||||||
host = "127.0.0.1"
|
|
||||||
tls = false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = module.minio.app_urls
|
|
||||||
}
|
|
||||||
|
|
||||||
output "server" {
|
|
||||||
value = "storage-api.${var.server_domain}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "access_key" {
|
|
||||||
value = random_password.minio_access_key.result
|
|
||||||
}
|
|
||||||
|
|
||||||
output "secret_key" {
|
|
||||||
value = random_password.minio_secret_key.result
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [module.minio.installed]
|
|
||||||
}
|
|
||||||
|
|
@ -1,221 +0,0 @@
|
||||||
resource "null_resource" "health_check" {
|
|
||||||
depends_on = [var.wait_on]
|
|
||||||
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = <<-EOT
|
|
||||||
until curl -s -f "${var.tls ? "https" : "http" }://${var.server}/minio/health/live" || [[ $attempts -ge 60 ]]; do
|
|
||||||
sleep 10
|
|
||||||
attempts=$((attempts+1))
|
|
||||||
done
|
|
||||||
if [[ $attempts -ge 60 ]]; then
|
|
||||||
echo "Minio health check failed after maximum attempts"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_s3_bucket" "overlay" {
|
|
||||||
depends_on = [var.wait_on]
|
|
||||||
bucket = var.name
|
|
||||||
acl = "private"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_s3_bucket_policy" "overlay" {
|
|
||||||
depends_on = [minio_s3_bucket.overlay]
|
|
||||||
bucket = minio_s3_bucket.overlay.bucket
|
|
||||||
policy = jsonencode({
|
|
||||||
"Version" : "2012-10-17",
|
|
||||||
"Statement" : [
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:GetBucketLocation"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
minio_s3_bucket.overlay.arn,
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:ListBucket"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
minio_s3_bucket.overlay.arn,
|
|
||||||
],
|
|
||||||
"Condition" : {
|
|
||||||
"StringEquals" : {
|
|
||||||
"s3:prefix" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:GetObject"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
"${minio_s3_bucket.overlay.arn}/**",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_s3_bucket" "uploads" {
|
|
||||||
depends_on = [null_resource.health_check]
|
|
||||||
bucket = "uploads"
|
|
||||||
acl = "private"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_s3_bucket_policy" "uploads" {
|
|
||||||
depends_on = [minio_s3_bucket.uploads]
|
|
||||||
bucket = minio_s3_bucket.uploads.bucket
|
|
||||||
policy = jsonencode({
|
|
||||||
"Version" : "2012-10-17",
|
|
||||||
"Statement" : [
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:GetBucketLocation"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
minio_s3_bucket.uploads.arn,
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:ListBucket"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
minio_s3_bucket.uploads.arn,
|
|
||||||
],
|
|
||||||
"Condition" : {
|
|
||||||
"StringEquals" : {
|
|
||||||
"s3:prefix" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"AWS" : [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action" : [
|
|
||||||
"s3:GetObject"
|
|
||||||
],
|
|
||||||
"Resource" : [
|
|
||||||
"${minio_s3_bucket.uploads.arn}/**",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_iam_user" "overlay" {
|
|
||||||
depends_on = [null_resource.health_check]
|
|
||||||
name = var.name
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_iam_policy" "overlay" {
|
|
||||||
depends_on = [minio_s3_bucket.overlay, minio_s3_bucket.uploads]
|
|
||||||
name = minio_s3_bucket.overlay.bucket
|
|
||||||
policy = jsonencode({
|
|
||||||
Version = "2012-10-17"
|
|
||||||
Statement = [
|
|
||||||
{
|
|
||||||
Effect = "Allow"
|
|
||||||
Action = ["s3:ListBucket"]
|
|
||||||
Resource = [minio_s3_bucket.overlay.arn, minio_s3_bucket.uploads.arn, ]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Effect = "Allow"
|
|
||||||
Action = [
|
|
||||||
"s3:GetObject",
|
|
||||||
"s3:PutObject",
|
|
||||||
"s3:DeleteObject"
|
|
||||||
]
|
|
||||||
Resource = ["${minio_s3_bucket.overlay.arn}/*", "${minio_s3_bucket.uploads.arn}/*"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resource "minio_iam_user_policy_attachment" "overlay" {
|
|
||||||
depends_on = [minio_iam_user.overlay, minio_iam_policy.overlay]
|
|
||||||
|
|
||||||
user_name = minio_iam_user.overlay.id
|
|
||||||
policy_name = minio_iam_policy.overlay.id
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "minio_iam_service_account" "overlay" {
|
|
||||||
depends_on = [minio_iam_user.overlay, minio_s3_bucket.overlay, minio_s3_bucket.uploads]
|
|
||||||
target_user = minio_iam_user.overlay.name
|
|
||||||
policy = jsonencode({
|
|
||||||
Version = "2012-10-17"
|
|
||||||
Statement = [
|
|
||||||
{
|
|
||||||
Effect = "Allow"
|
|
||||||
Action = ["s3:ListBucket"]
|
|
||||||
Resource = [minio_s3_bucket.overlay.arn, minio_s3_bucket.uploads.arn]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Effect = "Allow"
|
|
||||||
Action = [
|
|
||||||
"s3:GetObject",
|
|
||||||
"s3:PutObject",
|
|
||||||
"s3:DeleteObject"
|
|
||||||
]
|
|
||||||
Resource = ["${minio_s3_bucket.overlay.arn}/*", "${minio_s3_bucket.uploads.arn}/*"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
output "bucket" {
|
|
||||||
value = var.name
|
|
||||||
}
|
|
||||||
|
|
||||||
output "access_key" {
|
|
||||||
value = minio_iam_service_account.overlay.access_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
output "secret_key" {
|
|
||||||
value = minio_iam_service_account.overlay.secret_key
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
terraform {
|
|
||||||
required_providers {
|
|
||||||
minio = {
|
|
||||||
source = "aminueza/minio"
|
|
||||||
version = "~> 3.3.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "minio" {
|
|
||||||
minio_server = var.server
|
|
||||||
minio_region = var.region
|
|
||||||
minio_user = var.access_key
|
|
||||||
minio_password = var.secret_key
|
|
||||||
minio_ssl = var.tls
|
|
||||||
}
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
variable "name" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "access_key" {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "secret_key" {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "region" {
|
|
||||||
type = string
|
|
||||||
default = "eu-central-1"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "tls" {
|
|
||||||
type = bool
|
|
||||||
default = false
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
module "redis" {
|
|
||||||
source = "../quadlet-app"
|
|
||||||
wait_on = var.wait_on
|
|
||||||
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
|
|
||||||
app_name = "redis"
|
|
||||||
image = "docker.io/redis:7-alpine"
|
|
||||||
ports = ["6379:6379"]
|
|
||||||
volumes = ["/opt/storage/data/redis:/data:Z"]
|
|
||||||
command = ["redis-server", "--appendonly", "yes"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = module.redis.app_urls
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [module.redis.installed]
|
|
||||||
}
|
|
||||||
|
|
@ -1,220 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
description = "Target server IP"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
description = "Path to SSH private key"
|
|
||||||
type = string
|
|
||||||
default = "~/.ssh/id_rsa"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "app_name" {
|
|
||||||
description = "Name of the application"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "image" {
|
|
||||||
description = "Container image"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ports" {
|
|
||||||
description = "List of port mappings (e.g., ['8080:80', '8443:443'])"
|
|
||||||
type = list(string)
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "volumes" {
|
|
||||||
description = "List of volume mounts (e.g., ['/host/path:/container/path:Z'])"
|
|
||||||
type = list(string)
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "environment" {
|
|
||||||
description = "Environment variables as key-value pairs"
|
|
||||||
type = map(string)
|
|
||||||
default = {}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "command" {
|
|
||||||
description = "Command to run in container (list of strings)"
|
|
||||||
type = list(string)
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "haproxy_services" {
|
|
||||||
description = "Multiple HAProxy service configurations"
|
|
||||||
type = list(object({
|
|
||||||
name = string
|
|
||||||
domain = string
|
|
||||||
port = string
|
|
||||||
host = optional(string, "127.0.0.1")
|
|
||||||
tls = optional(bool, false)
|
|
||||||
}))
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "depends_on_services" {
|
|
||||||
description = "List of systemd services this app depends on"
|
|
||||||
type = list(string)
|
|
||||||
default = []
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "restart_policy" {
|
|
||||||
description = "Systemd restart policy"
|
|
||||||
type = string
|
|
||||||
default = "always"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "healthcmd" {
|
|
||||||
default = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
locals {
|
|
||||||
# Build all HAProxy labels for multiple services
|
|
||||||
haproxy_labels = flatten([
|
|
||||||
for svc in var.haproxy_services : [
|
|
||||||
"Label=haproxy.${svc.name}.enable=true",
|
|
||||||
"Label=haproxy.${svc.name}.domain=${svc.domain}",
|
|
||||||
"Label=haproxy.${svc.name}.port=${svc.port}",
|
|
||||||
"Label=haproxy.${svc.name}.host=${svc.host}",
|
|
||||||
"Label=haproxy.${svc.name}.tls=${svc.tls}"
|
|
||||||
]
|
|
||||||
])
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "null_resource" "deploy_quadlet_app" {
|
|
||||||
depends_on = [var.wait_on]
|
|
||||||
triggers = {
|
|
||||||
app_name = var.app_name
|
|
||||||
image = var.image
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ports = jsonencode(var.ports)
|
|
||||||
volumes = jsonencode(var.volumes)
|
|
||||||
environment = jsonencode(var.environment)
|
|
||||||
command = jsonencode(var.command)
|
|
||||||
haproxy_services = jsonencode(var.haproxy_services)
|
|
||||||
depends_on_services = jsonencode(var.depends_on_services)
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
restart_policy = var.restart_policy
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
inline = compact(flatten([
|
|
||||||
[
|
|
||||||
# Wait for cloud-init to complete before proceeding
|
|
||||||
"cloud-init status --wait || true",
|
|
||||||
|
|
||||||
# Verify the user systemd session is ready and linger is enabled
|
|
||||||
"timeout 60 bash -c 'until loginctl show-user fourlights | grep -q \"Linger=yes\"; do sleep 2; done'",
|
|
||||||
|
|
||||||
# Create base quadlet file
|
|
||||||
"cat > /tmp/${var.app_name}.container << 'EOF'",
|
|
||||||
"[Unit]",
|
|
||||||
"Description=${var.app_name} Service",
|
|
||||||
"After=network-online.target",
|
|
||||||
"",
|
|
||||||
"[Container]",
|
|
||||||
"Image=${var.image}",
|
|
||||||
],
|
|
||||||
|
|
||||||
# Add ports (only if not empty)
|
|
||||||
length(var.ports) > 0 ? formatlist("PublishPort=127.0.0.1:%s", var.ports) : [],
|
|
||||||
|
|
||||||
# Add volumes (only if not empty)
|
|
||||||
length(var.volumes) > 0 ? formatlist("Volume=%s", var.volumes) : [],
|
|
||||||
|
|
||||||
# Add environment variables (only if not empty)
|
|
||||||
length(var.environment) > 0 ? formatlist("Environment=%s=%s", keys(var.environment), values(var.environment)) : [],
|
|
||||||
|
|
||||||
# Add command (only if not empty)
|
|
||||||
length(var.command) > 0 ? ["Exec=${join(" ", var.command)}"] : [],
|
|
||||||
|
|
||||||
# Add pre-computed HAProxy labels (only if not empty)
|
|
||||||
length(local.haproxy_labels) > 0 ? local.haproxy_labels : [],
|
|
||||||
# Add health checks if not empty
|
|
||||||
var.healthcmd != "" ? ["HealthCmd=${var.healthcmd}"] : [],
|
|
||||||
|
|
||||||
[
|
|
||||||
"",
|
|
||||||
"[Service]",
|
|
||||||
"Restart=${var.restart_policy}",
|
|
||||||
"",
|
|
||||||
"[Install]",
|
|
||||||
"WantedBy=default.target",
|
|
||||||
"EOF",
|
|
||||||
|
|
||||||
# Create volume directory
|
|
||||||
"mkdir -p /opt/storage/data/${var.app_name}",
|
|
||||||
|
|
||||||
# Move and activate
|
|
||||||
# Create directory more robustly
|
|
||||||
"test -d ~/.config/containers/systemd || mkdir -p ~/.config/containers/systemd",
|
|
||||||
"cp /tmp/${var.app_name}.container ~/.config/containers/systemd/${var.app_name}.container",
|
|
||||||
"systemctl --user daemon-reload",
|
|
||||||
"timeout 60 bash -c 'until systemctl --user list-unit-files | grep -q \"^${var.app_name}.service\"; do sleep 2; systemctl --user daemon-reload; done'",
|
|
||||||
|
|
||||||
"systemctl --user start ${var.app_name}",
|
|
||||||
"systemctl --user status ${var.app_name} --no-pager",
|
|
||||||
]
|
|
||||||
]))
|
|
||||||
|
|
||||||
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = var.server_ip
|
|
||||||
user = "fourlights"
|
|
||||||
agent = true
|
|
||||||
agent_identity = var.ssh_private_key_path
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "remote-exec" {
|
|
||||||
when = destroy
|
|
||||||
inline = [
|
|
||||||
# Stop and remove the service
|
|
||||||
"systemctl --user stop ${self.triggers.app_name} || true",
|
|
||||||
|
|
||||||
# Remove the .container file
|
|
||||||
"rm -f ~/.config/containers/systemd/${self.triggers.app_name}.container",
|
|
||||||
|
|
||||||
# Reload systemd to remove the generated service
|
|
||||||
"systemctl --user daemon-reload",
|
|
||||||
|
|
||||||
# Force remove any lingering containers
|
|
||||||
"podman rm -f ${self.triggers.app_name} || true"
|
|
||||||
]
|
|
||||||
connection {
|
|
||||||
type = "ssh"
|
|
||||||
host = self.triggers.server_ip
|
|
||||||
user = "fourlights"
|
|
||||||
agent = true
|
|
||||||
agent_identity = self.triggers.ssh_private_key_path
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_name" {
|
|
||||||
value = var.app_name
|
|
||||||
}
|
|
||||||
|
|
||||||
output "service_status" {
|
|
||||||
value = "${var.app_name} deployed"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = [for svc in var.haproxy_services : format("%s://%s", (svc.tls == true ? "https" : "http"), svc.domain)]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [null_resource.deploy_quadlet_app]
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
module "redis" {
|
|
||||||
source = "../quadlet-app"
|
|
||||||
wait_on = var.wait_on
|
|
||||||
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
|
|
||||||
app_name = "redis"
|
|
||||||
image = "docker.io/redis:7-alpine"
|
|
||||||
ports = ["6379:6379"]
|
|
||||||
volumes = ["/opt/storage/data/redis:/data:Z"]
|
|
||||||
command = ["redis-server", "--appendonly", "yes"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = module.redis.app_urls
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [module.redis.installed]
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
module "redis" {
|
|
||||||
source = "../quadlet-app"
|
|
||||||
wait_on = var.wait_on
|
|
||||||
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
|
|
||||||
app_name = "redis"
|
|
||||||
image = "docker.io/redis:7-alpine"
|
|
||||||
ports = ["6379:6379"]
|
|
||||||
volumes = ["/opt/storage/data/redis:/data:Z"]
|
|
||||||
command = ["redis-server", "--appendonly", "yes"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = module.redis.app_urls
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [module.redis.installed]
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
module "valkey" {
|
|
||||||
source = "../quadlet-app"
|
|
||||||
wait_on = var.wait_on
|
|
||||||
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
|
|
||||||
app_name = "valkey"
|
|
||||||
image = "docker.io/valkey/valkey:7-alpine"
|
|
||||||
ports = ["6379:6379"]
|
|
||||||
volumes = ["/opt/storage/data/valkey:/data:Z"]
|
|
||||||
command = ["valkey-server", "--appendonly", "yes"]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = module.valkey.app_urls
|
|
||||||
}
|
|
||||||
|
|
||||||
output "installed" {
|
|
||||||
value = true
|
|
||||||
depends_on = [module.valkey.installed]
|
|
||||||
}
|
|
||||||
|
|
@ -1,120 +0,0 @@
|
||||||
variable "wait_on" {
|
|
||||||
type = any
|
|
||||||
description = "Resources to wait on"
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "server_ip" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ssh_private_key_path" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "domain" {
|
|
||||||
type = string
|
|
||||||
default = "hub.visualworkplace.fourlights.dev"
|
|
||||||
}
|
|
||||||
variable "name" {
|
|
||||||
type = string
|
|
||||||
default = "visualworkplace-hub"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "s3_access_key" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "s3_secret_key" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "s3_server" {
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "valkey_host" {
|
|
||||||
type = string
|
|
||||||
default = "systemd-valkey"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "valkey_db" {
|
|
||||||
type = number
|
|
||||||
default = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
module "s3-tenant" {
|
|
||||||
source = "../minio/tenant"
|
|
||||||
wait_on = var.wait_on
|
|
||||||
|
|
||||||
access_key = var.s3_access_key
|
|
||||||
secret_key = var.s3_secret_key
|
|
||||||
server = var.s3_server
|
|
||||||
name = var.name
|
|
||||||
}
|
|
||||||
|
|
||||||
module "vw-hub" {
|
|
||||||
source = "../quadlet-app"
|
|
||||||
wait_on = module.s3-tenant.secret_key
|
|
||||||
|
|
||||||
server_ip = var.server_ip
|
|
||||||
ssh_private_key_path = var.ssh_private_key_path
|
|
||||||
|
|
||||||
app_name = var.name
|
|
||||||
image = "ghcr.io/four-lights-nl/vw-hub:8edae556b9c64fb602b8a54e67c3d06656c4bb9e"
|
|
||||||
volumes = ["/opt/storage/data/vw-hub:/run/secrets:Z"]
|
|
||||||
ports = [
|
|
||||||
"3000:3000",
|
|
||||||
]
|
|
||||||
|
|
||||||
environment = {
|
|
||||||
NODE_ENV = "production"
|
|
||||||
LOG_LEVEL = "info"
|
|
||||||
OTEL_LOG_LEVEL = "info"
|
|
||||||
HOST = "0.0.0.0"
|
|
||||||
PORT = "3000"
|
|
||||||
OAUTH_CLIENT_ID = var.name
|
|
||||||
OAUTH_CLIENT_SECRET = "OGZ0IDpkWOJXaFQOr6mbIF7.l0rZLvxQDZPEGv6qHLLH/stP5vAIqHLZ2x05uQn9TFQHtsPkRysGM.RpKlWra0"
|
|
||||||
OAUTH_DOMAIN = "https://${var.domain}"
|
|
||||||
BASE_URL = "https://${var.domain}"
|
|
||||||
REDIS_HOST = var.valkey_host
|
|
||||||
REDIS_DB = var.valkey_db
|
|
||||||
KEYS_MASTER_KEY = "54dd59c1f1c94795a2b63b074a3943674e964b0225e58b7595762d237d9fdcda"
|
|
||||||
TOKEN_ENCRYPTION_KEY = "4d15791e50874fbe8af1a8d0fe2605d65bcf44737b7c36d9b2f99ec3367276c5"
|
|
||||||
ZOHO_CLIENT_ID = "1000.LFYZSCTUJLMUNUUBZX5PMYUXM6HOMP"
|
|
||||||
ZOHO_CLIENT_SECRET = "07093529734781706356ec4bb8ce7274f1df25cb2e"
|
|
||||||
ZOHO_REFRESH_TOKEN = "1000.0808eabe967955a24d403eabec6c0aa5.44fbbd0c6e98c476c6bb7bee70317f82"
|
|
||||||
ZOHO_ACCESS_TOKEN = ""
|
|
||||||
ZOHO_TOKEN_URI = "https://accounts.zoho.eu/oauth/v2/token"
|
|
||||||
ZOHO_API_URI = "https://www.zohoapis.eu/crm/v6"
|
|
||||||
EXACTONLINE_CLIENT_ID = "5c6b0dc4-2e78-4116-89c2-79e6e73356d8"
|
|
||||||
EXACTONLINE_CLIENT_SECRET = "XMSrmWMZkABv"
|
|
||||||
EXACTONLINE_WEBHOOK_SECRET = "8vXq0eEHEhEc6iwn"
|
|
||||||
EXACTONLINE_REDIRECT_URI = "https://${var.domain}/exactonline/callback"
|
|
||||||
EXACTONLINE_BASE_URL = "https://start.exactonline.nl"
|
|
||||||
EXACTONLINE_API_BASE = "https://start.exactonline.nl/api/v1/2655637"
|
|
||||||
EXACTONLINE_AUTHORIZE_PATH = "api/oauth2/auth"
|
|
||||||
EXACTONLINE_TOKEN_PATH = "api/oauth2/token"
|
|
||||||
EXACTONLINE_BASE_URI = "https://start.exactonline.nl"
|
|
||||||
EXACTONLINE_DIVISION = "2655637"
|
|
||||||
EXACTONLINE_LEAD_SOURCE_ID = "945be231-9588-413e-a6cd-53c190669ea7"
|
|
||||||
S3_ENDPOINT = var.s3_server
|
|
||||||
S3_ACCESS_KEY = module.s3-tenant.access_key
|
|
||||||
S3_SECRET_KEY = module.s3-tenant.secret_key
|
|
||||||
S3_BUCKET = module.s3-tenant.bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
haproxy_services = [
|
|
||||||
{
|
|
||||||
name = var.name
|
|
||||||
domain = var.domain
|
|
||||||
port = "3000"
|
|
||||||
host = "127.0.0.1"
|
|
||||||
tls = true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
output "app_urls" {
|
|
||||||
value = module.vw-hub.app_urls
|
|
||||||
}
|
|
||||||
|
|
@ -0,0 +1,115 @@
|
||||||
|
#!/usr/bin/env -S deno run --allow-run --allow-read --allow-write
|
||||||
|
|
||||||
|
import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
||||||
|
|
||||||
|
const setupCluster = async (numMasters: number) => {
|
||||||
|
// Step 1: Create Low-Resource Profile (if not exists)
|
||||||
|
const profileExists = await Deno.run({
|
||||||
|
cmd: ["incus", "profile", "show", "low-resource"],
|
||||||
|
stdout: "null",
|
||||||
|
stderr: "null",
|
||||||
|
}).status().then((status) => status.success);
|
||||||
|
|
||||||
|
if (!profileExists) {
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["incus", "profile", "create", "low-resource"],
|
||||||
|
}).status();
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["incus", "profile", "set", "low-resource", "limits.cpu=1", "limits.memory=512MB"],
|
||||||
|
}).status();
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["incus", "profile", "device", "add", "low-resource", "root", "disk", "pool=default", "path=/"],
|
||||||
|
}).status();
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["incus", "profile", "device", "add", "low-resource", "eth-0", "nic", "network=incusbr0"],
|
||||||
|
}).status();
|
||||||
|
console.log("✅ Low-resource profile created.");
|
||||||
|
} else {
|
||||||
|
console.log("⏩ Low-resource profile already exists.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Launch VMs (if not already running)
|
||||||
|
for (let i = 1; i <= numMasters; i++) {
|
||||||
|
const vmName = `k3s-master${i}`;
|
||||||
|
const vmExists = await Deno.run({
|
||||||
|
cmd: ["incus", "list", vmName, "--format", "csv"],
|
||||||
|
stdout: "piped",
|
||||||
|
}).output().then((output) => new TextDecoder().decode(output).trim() !== "");
|
||||||
|
|
||||||
|
if (!vmExists) {
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["incus", "launch", "images:alpine/edge/cloud", vmName, "--profile", "low-resource"],
|
||||||
|
}).status();
|
||||||
|
console.log(`✅ VM ${vmName} launched.`);
|
||||||
|
} else {
|
||||||
|
console.log(`⏩ VM ${vmName} already exists.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 4: Install k3sup (if not installed)
|
||||||
|
const k3supInstalled = await Deno.run({
|
||||||
|
cmd: ["which", "k3sup"],
|
||||||
|
stdout: "null",
|
||||||
|
stderr: "null",
|
||||||
|
}).status().then((status) => status.success);
|
||||||
|
|
||||||
|
if (!k3supInstalled) {
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"],
|
||||||
|
}).status();
|
||||||
|
console.log("✅ k3sup installed.");
|
||||||
|
} else {
|
||||||
|
console.log("⏩ k3sup already installed.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 5: Bootstrap First Master Node (if not already bootstrapped)
|
||||||
|
const firstMasterIP = await Deno.run({
|
||||||
|
cmd: ["incus", "list", "k3s-master1", "--format", "csv", "--columns", "n4"],
|
||||||
|
stdout: "piped",
|
||||||
|
}).output().then((output) => new TextDecoder().decode(output).trim().split(",")[1].split(" ")[0])
|
||||||
|
|
||||||
|
const kubeconfigExists = await Deno.stat("./kubeconfig").then(() => true).catch(() => false);
|
||||||
|
|
||||||
|
if (!kubeconfigExists) {
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["k3sup", "install", "--ip", firstMasterIP, "--user", "root", "--cluster"],
|
||||||
|
}).status();
|
||||||
|
console.log("✅ First master node bootstrapped.");
|
||||||
|
} else {
|
||||||
|
console.log("⏩ First master node already bootstrapped.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 6: Join Additional Master Nodes (if not already joined)
|
||||||
|
for (let i = 2; i <= numMasters; i++) {
|
||||||
|
const vmName = `k3s-master${i}`;
|
||||||
|
const vmIP = await Deno.run({
|
||||||
|
cmd: ["incus", "list", vmName, "--format", "csv", "--columns", "n4"],
|
||||||
|
stdout: "piped",
|
||||||
|
}).output().then((output) => new TextDecoder().decode(output).trim().split(",")[1].split(" ")[0])
|
||||||
|
|
||||||
|
const joined = await Deno.run({
|
||||||
|
cmd: ["kubectl", "get", "nodes", vmName],
|
||||||
|
stdout: "null",
|
||||||
|
stderr: "null",
|
||||||
|
}).status().then((status) => status.success);
|
||||||
|
|
||||||
|
if (!joined) {
|
||||||
|
await Deno.run({
|
||||||
|
cmd: ["k3sup", "join", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "root"],
|
||||||
|
}).status();
|
||||||
|
console.log(`✅ VM ${vmName} joined the cluster.`);
|
||||||
|
} else {
|
||||||
|
console.log(`⏩ VM ${vmName} already joined the cluster.`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("🚀 HA k3s cluster setup complete!");
|
||||||
|
};
|
||||||
|
|
||||||
|
await new Command()
|
||||||
|
.name("setup-k3s-cluster")
|
||||||
|
.version("0.1.0")
|
||||||
|
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
||||||
|
.option("-m, --masters <numMasters:number>", "Number of master nodes", { default: 3 })
|
||||||
|
.action(({ masters }) => setupCluster(masters))
|
||||||
|
.parse(Deno.args);
|
||||||
BIN
shuttles/k3sup
BIN
shuttles/k3sup
Binary file not shown.
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
// Note: TypeScript errors related to Deno imports and namespace can be safely ignored
|
// Note: TypeScript errors related to Deno imports and namespace can be safely ignored
|
||||||
// These are only relevant when running the script with the Deno runtime
|
// These are only relevant when running the script with the Deno runtime
|
||||||
import { Command, EnumType } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
import { Command } from "https://deno.land/x/cliffy@v1.0.0-rc.4/command/mod.ts";
|
||||||
import { delay } from "https://deno.land/std/async/mod.ts";
|
import { delay } from "https://deno.land/std/async/mod.ts";
|
||||||
import { exists } from "https://deno.land/std/fs/mod.ts";
|
import { exists } from "https://deno.land/std/fs/mod.ts";
|
||||||
|
|
||||||
|
|
@ -12,9 +12,7 @@ const alpineConfig = ['--profile', 'cloud-init-alpine'];
|
||||||
const archImage = "archlinux/current/cloud";
|
const archImage = "archlinux/current/cloud";
|
||||||
const archConfig = ['--profile', 'cloud-init-arch'];
|
const archConfig = ['--profile', 'cloud-init-arch'];
|
||||||
|
|
||||||
const Provisioner = new EnumType(["incus", "qemu"])
|
const getIp = (i: number) => `10.110.36.${109 + i}`;
|
||||||
|
|
||||||
const getIp = (i: number) => `192.168.100.${9 + i}`;
|
|
||||||
|
|
||||||
const image = archImage;
|
const image = archImage;
|
||||||
const config = archConfig;
|
const config = archConfig;
|
||||||
|
|
@ -37,9 +35,9 @@ async function executeCommand(
|
||||||
stdout?: "piped" | "inherit" | "null",
|
stdout?: "piped" | "inherit" | "null",
|
||||||
stderr?: "piped" | "inherit" | "null",
|
stderr?: "piped" | "inherit" | "null",
|
||||||
throwOnError?: boolean
|
throwOnError?: boolean
|
||||||
} = {stdout: 'piped', stderr: 'piped', throwOnError: true}
|
} = {}
|
||||||
): Promise<{ success: boolean; output?: string; error?: string }> {
|
): Promise<{ success: boolean; output?: string; error?: string }> {
|
||||||
const {stdout = "piped", stderr = "piped", throwOnError = true} = options;
|
const { stdout = "piped", stderr = "piped", throwOnError = true } = options;
|
||||||
|
|
||||||
log.debug(`Executing: ${cmdArray.join(" ")}`);
|
log.debug(`Executing: ${cmdArray.join(" ")}`);
|
||||||
|
|
||||||
|
|
@ -51,7 +49,7 @@ async function executeCommand(
|
||||||
stderr: stderr === "piped" ? "piped" : stderr === "inherit" ? "inherit" : "null",
|
stderr: stderr === "piped" ? "piped" : stderr === "inherit" ? "inherit" : "null",
|
||||||
});
|
});
|
||||||
|
|
||||||
const {code, stdout: stdoutOutput, stderr: stderrOutput} = await command.output();
|
const { code, stdout: stdoutOutput, stderr: stderrOutput } = await command.output();
|
||||||
|
|
||||||
const stdoutText = stdout === "piped" ? new TextDecoder().decode(stdoutOutput).trim() : "";
|
const stdoutText = stdout === "piped" ? new TextDecoder().decode(stdoutOutput).trim() : "";
|
||||||
const stderrText = stderr === "piped" ? new TextDecoder().decode(stderrOutput).trim() : "";
|
const stderrText = stderr === "piped" ? new TextDecoder().decode(stderrOutput).trim() : "";
|
||||||
|
|
@ -74,21 +72,21 @@ async function executeCommand(
|
||||||
if (throwOnError) {
|
if (throwOnError) {
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
return {success: false, error: errorMessage};
|
return { success: false, error: errorMessage };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if VM is ready for SSH connections
|
// Check if VM is ready for SSH connections
|
||||||
async function isVmReadyForSsh(ip: string, user: string, sshKeyPath: string, maxAttempts = 30): Promise<boolean> {
|
async function isVmReadyForSsh(ip: string, user: string, maxAttempts = 30): Promise<boolean> {
|
||||||
log.info(`Checking if VM at ${ip} is ready for SSH connections...`);
|
log.info(`Checking if VM at ${ip} is ready for SSH connections...`);
|
||||||
|
|
||||||
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
||||||
log.debug(`SSH readiness check attempt ${attempt}/${maxAttempts}`);
|
log.debug(`SSH readiness check attempt ${attempt}/${maxAttempts}`);
|
||||||
|
|
||||||
const {success} = await executeCommand(
|
const { success } = await executeCommand(
|
||||||
["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "-i", sshKeyPath, "echo", "ready"],
|
["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "echo", "ready"],
|
||||||
`check SSH connectivity to ${ip}`,
|
`check SSH connectivity to ${ip}`,
|
||||||
{throwOnError: false}
|
{ throwOnError: false, stderr: "null" }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (success) {
|
if (success) {
|
||||||
|
|
@ -106,10 +104,10 @@ async function isVmReadyForSsh(ip: string, user: string, sshKeyPath: string, max
|
||||||
|
|
||||||
// Check if VM is running
|
// Check if VM is running
|
||||||
async function isVmRunning(vmName: string): Promise<boolean> {
|
async function isVmRunning(vmName: string): Promise<boolean> {
|
||||||
const {success, output} = await executeCommand(
|
const { success, output } = await executeCommand(
|
||||||
["incus", "list", vmName, "--format", "json"],
|
["incus", "list", vmName, "--format", "json"],
|
||||||
`check if VM ${vmName} is running`,
|
`check if VM ${vmName} is running`,
|
||||||
{throwOnError: false}
|
{ throwOnError: false }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!success || !output) {
|
if (!success || !output) {
|
||||||
|
|
@ -130,14 +128,12 @@ async function isVmRunning(vmName: string): Promise<boolean> {
|
||||||
async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
||||||
log.info("Starting cleanup process...");
|
log.info("Starting cleanup process...");
|
||||||
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (const vmName of vmNames) {
|
for (const vmName of vmNames) {
|
||||||
// Check if VM exists
|
// Check if VM exists
|
||||||
const {success, output} = await executeCommand(
|
const { success, output } = await executeCommand(
|
||||||
["incus", "list", vmName, "--format", "csv"],
|
["incus", "list", vmName, "--format", "csv"],
|
||||||
`check if VM ${vmName} exists`,
|
`check if VM ${vmName} exists`,
|
||||||
{throwOnError: false}
|
{ throwOnError: false }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (success && output) {
|
if (success && output) {
|
||||||
|
|
@ -148,7 +144,7 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
||||||
await executeCommand(
|
await executeCommand(
|
||||||
["incus", "stop", vmName, "--force"],
|
["incus", "stop", vmName, "--force"],
|
||||||
`stop VM ${vmName}`,
|
`stop VM ${vmName}`,
|
||||||
{throwOnError: false}
|
{ throwOnError: false }
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -158,7 +154,7 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
||||||
await executeCommand(
|
await executeCommand(
|
||||||
["incus", "delete", vmName],
|
["incus", "delete", vmName],
|
||||||
`remove VM ${vmName}`,
|
`remove VM ${vmName}`,
|
||||||
{throwOnError: false}
|
{ throwOnError: false }
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -167,12 +163,37 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
||||||
log.success("Cleanup completed");
|
log.success("Cleanup completed");
|
||||||
}
|
}
|
||||||
|
|
||||||
const bootstrapIncus = async () => {
|
const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
||||||
|
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
|
||||||
|
|
||||||
|
const createdVMs: string[] = [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get hostname and user
|
||||||
|
const { output: hostname } = await executeCommand(
|
||||||
|
["hostnamectl", "hostname"],
|
||||||
|
"get hostname"
|
||||||
|
);
|
||||||
|
|
||||||
|
const { output: user } = await executeCommand(
|
||||||
|
["whoami"],
|
||||||
|
"get current user"
|
||||||
|
);
|
||||||
|
|
||||||
|
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
|
||||||
|
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
|
||||||
|
|
||||||
|
// Check if SSH keys exist
|
||||||
|
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
|
||||||
|
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
|
||||||
|
throw new Error("SSH keys not found");
|
||||||
|
}
|
||||||
|
|
||||||
// Step 1: Create Low-Resource Profile (if not exists)
|
// Step 1: Create Low-Resource Profile (if not exists)
|
||||||
const {success: profileExists} = await executeCommand(
|
const { success: profileExists } = await executeCommand(
|
||||||
["incus", "profile", "show", "low-resource"],
|
["incus", "profile", "show", "low-resource"],
|
||||||
"check if low-resource profile exists",
|
"check if low-resource profile exists",
|
||||||
{throwOnError: false}
|
{ stdout: "null", stderr: "null", throwOnError: false }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!profileExists) {
|
if (!profileExists) {
|
||||||
|
|
@ -197,24 +218,18 @@ const bootstrapIncus = async () => {
|
||||||
} else {
|
} else {
|
||||||
log.skip("Low-resource profile already exists");
|
log.skip("Low-resource profile already exists");
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
const makeSureVMExists = async (provisioner: "incus" | "qemu", idx: number, vmName: string, sshKey: string) => {
|
// Read SSH key
|
||||||
switch (provisioner) {
|
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
|
||||||
case "incus":
|
|
||||||
return makeSureVMExistsIncus(idx, vmName, sshKey);
|
|
||||||
case "qemu":
|
|
||||||
return makeSureVMExistsQEMU(idx, vmName, sshKey);
|
|
||||||
default:
|
|
||||||
throw new Error(`Unknown VM method: ${method}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const makeSureVMExistsIncus = async (idx: number, vmName: string, sshKey: string) => {
|
// Step 3: Launch VMs (if not already running)
|
||||||
const {success: vmExists, output: vmOutput} = await executeCommand(
|
for (let i = 1; i <= numMasters; i++) {
|
||||||
|
const vmName = `k3s-master${i}`;
|
||||||
|
|
||||||
|
const { success: vmExists, output: vmOutput } = await executeCommand(
|
||||||
["incus", "list", vmName, "--format", "csv"],
|
["incus", "list", vmName, "--format", "csv"],
|
||||||
`check if VM ${vmName} exists`,
|
`check if VM ${vmName} exists`,
|
||||||
{throwOnError: false}
|
{ throwOnError: false }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!vmExists || !vmOutput) {
|
if (!vmExists || !vmOutput) {
|
||||||
|
|
@ -225,7 +240,7 @@ const makeSureVMExistsIncus = async (idx: number, vmName: string, sshKey: string
|
||||||
);
|
);
|
||||||
|
|
||||||
await executeCommand(
|
await executeCommand(
|
||||||
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(idx)}`],
|
["incus", "config", 'device', 'add', vmName, 'eth0', 'nic', 'nictype=bridged', 'parent=incusbr0', `ipv4.address=${getIp(i)}`],
|
||||||
`configure network for VM ${vmName}`
|
`configure network for VM ${vmName}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
@ -234,72 +249,27 @@ const makeSureVMExistsIncus = async (idx: number, vmName: string, sshKey: string
|
||||||
`start VM ${vmName}`
|
`start VM ${vmName}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
createdVMs.push(vmName);
|
||||||
log.success(`VM ${vmName} started`);
|
log.success(`VM ${vmName} started`);
|
||||||
return true
|
} else {
|
||||||
|
// Check if VM is running, if not, start it
|
||||||
|
const isRunning = await isVmRunning(vmName);
|
||||||
|
if (!isRunning) {
|
||||||
|
log.info(`Starting existing VM ${vmName}...`);
|
||||||
|
await executeCommand(
|
||||||
|
["incus", "start", vmName],
|
||||||
|
`start VM ${vmName}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
log.skip(`VM ${vmName} already exists`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if VM is running, if not, start it
|
|
||||||
const isRunning = await isVmRunning(vmName);
|
|
||||||
if (!isRunning) {
|
|
||||||
log.info(`Starting existing VM ${vmName}...`);
|
|
||||||
await executeCommand(
|
|
||||||
["incus", "start", vmName],
|
|
||||||
`start VM ${vmName}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
log.skip(`VM ${vmName} already exists`);
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
const makeSureVMExistsQEMU = async (idx: number, vmName: string, sshKey: string) => {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
const setupCluster = async (provisioner: Provisioner, numMasters: number, forceCleanup = false) => {
|
|
||||||
log.info(`Starting setup of k3s cluster with ${numMasters} master nodes`);
|
|
||||||
|
|
||||||
const createdVMs: string[] = [];
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Get hostname and user
|
|
||||||
const {output: hostname} = await executeCommand(
|
|
||||||
["hostnamectl", "hostname"],
|
|
||||||
"get hostname"
|
|
||||||
);
|
|
||||||
|
|
||||||
const {output: user} = await executeCommand(
|
|
||||||
["whoami"],
|
|
||||||
"get current user"
|
|
||||||
);
|
|
||||||
|
|
||||||
const sshKeyPubFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}.pub`;
|
|
||||||
const sshKeyPrivateFileName = `/home/${user}/.ssh/nl.fourlights.${hostname}`;
|
|
||||||
|
|
||||||
// Check if SSH keys exist
|
|
||||||
if (!await exists(sshKeyPubFileName) || !await exists(sshKeyPrivateFileName)) {
|
|
||||||
log.error(`Required SSH keys not found: ${sshKeyPubFileName} or ${sshKeyPrivateFileName}`);
|
|
||||||
throw new Error("SSH keys not found");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bootstrap our provisioner
|
|
||||||
if (provisioner === "incus") await bootstrapIncus();
|
|
||||||
|
|
||||||
// Read SSH key
|
|
||||||
const sshKey = await Deno.readTextFile(sshKeyPubFileName);
|
|
||||||
|
|
||||||
// Step 3: Launch VMs (if not already running)
|
|
||||||
for (let i = 1; i <= numMasters; i++) {
|
|
||||||
const vmName = `shuttle-${i}`;
|
|
||||||
|
|
||||||
const created = await makeSureVMExists(provisioner, i, vmName, sshKey);
|
|
||||||
if (created) createdVMs.push(vmName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 4: Install k3sup (if not installed)
|
// Step 4: Install k3sup (if not installed)
|
||||||
const {success: k3supInstalled} = await executeCommand(
|
const { success: k3supInstalled } = await executeCommand(
|
||||||
["which", "k3sup"],
|
["which", "k3sup"],
|
||||||
"check if k3sup is installed",
|
"check if k3sup is installed",
|
||||||
{throwOnError: false}
|
{ stdout: "null", stderr: "null", throwOnError: false }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!k3supInstalled) {
|
if (!k3supInstalled) {
|
||||||
|
|
@ -308,7 +278,6 @@ const setupCluster = async (provisioner: Provisioner, numMasters: number, forceC
|
||||||
["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"],
|
["sh", "-c", "curl -sLS https://get.k3sup.dev | sh"],
|
||||||
"install k3sup"
|
"install k3sup"
|
||||||
);
|
);
|
||||||
await executeCommand(["chmod", "+x", "./k3sup"], "make executable");
|
|
||||||
log.success("k3sup installed");
|
log.success("k3sup installed");
|
||||||
} else {
|
} else {
|
||||||
log.skip("k3sup already installed");
|
log.skip("k3sup already installed");
|
||||||
|
|
@ -318,7 +287,7 @@ const setupCluster = async (provisioner: Provisioner, numMasters: number, forceC
|
||||||
const firstMasterIP = getIp(1);
|
const firstMasterIP = getIp(1);
|
||||||
log.info(`Waiting for first master node (${firstMasterIP}) to be ready...`);
|
log.info(`Waiting for first master node (${firstMasterIP}) to be ready...`);
|
||||||
|
|
||||||
const vmReady = await isVmReadyForSsh(firstMasterIP, "picard", sshKeyPrivateFileName);
|
const vmReady = await isVmReadyForSsh(firstMasterIP, "picard");
|
||||||
if (!vmReady) {
|
if (!vmReady) {
|
||||||
throw new Error(`First master node at ${firstMasterIP} is not ready for SSH connections`);
|
throw new Error(`First master node at ${firstMasterIP} is not ready for SSH connections`);
|
||||||
}
|
}
|
||||||
|
|
@ -329,9 +298,8 @@ const setupCluster = async (provisioner: Provisioner, numMasters: number, forceC
|
||||||
if (!kubeconfigExists) {
|
if (!kubeconfigExists) {
|
||||||
log.info("Bootstrapping first master node...");
|
log.info("Bootstrapping first master node...");
|
||||||
await executeCommand(
|
await executeCommand(
|
||||||
["./k3sup", "install", "--host", firstMasterIP, "--user", "picard", "--cluster", "--ssh-key", sshKeyPrivateFileName],
|
["k3sup", "install", "--ip", firstMasterIP, "--user", "picard", "--cluster", "--ssh-key", sshKeyPrivateFileName],
|
||||||
"bootstrap first master node",
|
"bootstrap first master node"
|
||||||
{throwOnError: false}
|
|
||||||
);
|
);
|
||||||
log.success("First master node bootstrapped");
|
log.success("First master node bootstrapped");
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -340,29 +308,28 @@ const setupCluster = async (provisioner: Provisioner, numMasters: number, forceC
|
||||||
|
|
||||||
// Step 6: Join Additional Master Nodes (if not already joined)
|
// Step 6: Join Additional Master Nodes (if not already joined)
|
||||||
for (let i = 2; i <= numMasters; i++) {
|
for (let i = 2; i <= numMasters; i++) {
|
||||||
const vmName = `shuttle-${i}`;
|
const vmName = `k3s-master${i}`;
|
||||||
const vmIP = getIp(i);
|
const vmIP = getIp(i);
|
||||||
|
|
||||||
// Wait for VM to be ready
|
// Wait for VM to be ready
|
||||||
log.info(`Waiting for ${vmName} (${vmIP}) to be ready...`);
|
log.info(`Waiting for ${vmName} (${vmIP}) to be ready...`);
|
||||||
const nodeReady = await isVmReadyForSsh(vmIP, "picard", sshKeyPrivateFileName);
|
const nodeReady = await isVmReadyForSsh(vmIP, "picard");
|
||||||
if (!nodeReady) {
|
if (!nodeReady) {
|
||||||
log.warning(`VM ${vmName} is not ready for SSH connections, skipping join operation`);
|
log.warning(`VM ${vmName} is not ready for SSH connections, skipping join operation`);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const {success: joined} = await executeCommand(
|
const { success: joined } = await executeCommand(
|
||||||
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", vmName],
|
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", vmName],
|
||||||
`check if ${vmName} has joined the cluster`,
|
`check if ${vmName} has joined the cluster`,
|
||||||
{throwOnError: false}
|
{ stdout: "null", stderr: "null", throwOnError: false }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!joined) {
|
if (!joined) {
|
||||||
log.info(`Joining ${vmName} to the cluster...`);
|
log.info(`Joining ${vmName} to the cluster...`);
|
||||||
await executeCommand(
|
await executeCommand(
|
||||||
["./k3sup", "join", "--server", "--host", vmIP, "--server-ip", firstMasterIP, "--user", "picard", "--ssh-key", sshKeyPrivateFileName],
|
["k3sup", "join", "--server", "--ip", vmIP, "--server-ip", firstMasterIP, "--user", "picard", "--ssh-key", sshKeyPrivateFileName],
|
||||||
`join ${vmName} to the cluster`,
|
`join ${vmName} to the cluster`
|
||||||
{throwOnError: false}
|
|
||||||
);
|
);
|
||||||
log.success(`VM ${vmName} joined the cluster`);
|
log.success(`VM ${vmName} joined the cluster`);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -374,10 +341,10 @@ const setupCluster = async (provisioner: Provisioner, numMasters: number, forceC
|
||||||
|
|
||||||
// Verify cluster status
|
// Verify cluster status
|
||||||
log.info("Verifying cluster status...");
|
log.info("Verifying cluster status...");
|
||||||
const {success: clusterVerified, output: nodesOutput} = await executeCommand(
|
const { success: clusterVerified, output: nodesOutput } = await executeCommand(
|
||||||
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", "-o", "wide"],
|
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", "-o", "wide"],
|
||||||
"verify cluster nodes",
|
"verify cluster nodes",
|
||||||
{throwOnError: false}
|
{ throwOnError: false }
|
||||||
);
|
);
|
||||||
|
|
||||||
if (clusterVerified) {
|
if (clusterVerified) {
|
||||||
|
|
@ -390,7 +357,6 @@ const setupCluster = async (provisioner: Provisioner, numMasters: number, forceC
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||||
log.error(`Failed to set up cluster: ${errorMessage}`);
|
log.error(`Failed to set up cluster: ${errorMessage}`);
|
||||||
throw error
|
|
||||||
|
|
||||||
if (createdVMs.length > 0) {
|
if (createdVMs.length > 0) {
|
||||||
log.warning("An error occurred during setup. Cleaning up created resources...");
|
log.warning("An error occurred during setup. Cleaning up created resources...");
|
||||||
|
|
@ -404,10 +370,8 @@ const setupCluster = async (provisioner: Provisioner, numMasters: number, forceC
|
||||||
await new Command()
|
await new Command()
|
||||||
.name("setup-k3s-cluster")
|
.name("setup-k3s-cluster")
|
||||||
.version("0.1.0")
|
.version("0.1.0")
|
||||||
.type("provisioner", Provisioner)
|
|
||||||
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
||||||
.option("-p, --provisioner <provisioner:provisioner>", "Provisioner of VMs", {default: "incus"})
|
.option("-m, --masters <numMasters:number>", "Number of master nodes", { default: 3 })
|
||||||
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3})
|
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", { default: false })
|
||||||
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", {default: false})
|
.action(({ masters, cleanup }) => setupCluster(masters, cleanup))
|
||||||
.action(({provisioner, masters, cleanup}) => setupCluster(provisioner, masters, cleanup))
|
|
||||||
.parse(Deno.args);
|
.parse(Deno.args);
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue