WIP
This commit is contained in:
parent
4abf8bc6a9
commit
426f5d9a21
|
|
@ -4,5 +4,6 @@
|
|||
<file url="file://$PROJECT_DIR$/infra/modules/argocd/values.yaml" dialect="yaml" />
|
||||
<file url="file://$PROJECT_DIR$/infra/modules/fusionauth/values.yaml" dialect="yaml" />
|
||||
<file url="file://$PROJECT_DIR$/infra/modules/mongodb/values.yaml" dialect="yaml" />
|
||||
<file url="file://$PROJECT_DIR$/infra/modules/zot/values.yaml.tftpl" dialect="TFTPL" />
|
||||
</component>
|
||||
</project>
|
||||
|
|
@ -1,11 +1,7 @@
|
|||
config:
|
||||
bookmarks:
|
||||
- Developer:
|
||||
- Github:
|
||||
- abbr: GH
|
||||
href: https://github.com/
|
||||
services:
|
||||
widgets:
|
||||
# show the kubernetes widget, with the cluster summary and individual nodes
|
||||
- kubernetes:
|
||||
cluster:
|
||||
show: true
|
||||
|
|
@ -18,9 +14,6 @@ config:
|
|||
cpu: true
|
||||
memory: true
|
||||
showLabel: true
|
||||
- search:
|
||||
provider: duckduckgo
|
||||
target: _blank
|
||||
kubernetes:
|
||||
mode: cluster
|
||||
settings:
|
||||
|
|
@ -36,13 +29,13 @@ enableRbac: true
|
|||
ingress:
|
||||
main:
|
||||
enabled: true
|
||||
annotations:
|
||||
# Example annotations to add Homepage to your Homepage!
|
||||
gethomepage.dev/enabled: "true"
|
||||
gethomepage.dev/name: "Homepage"
|
||||
gethomepage.dev/description: "Dynamically Detected Homepage"
|
||||
gethomepage.dev/group: "Dynamic"
|
||||
gethomepage.dev/icon: "homepage.png"
|
||||
#annotations:
|
||||
# # Example annotations to add Homepage to your Homepage!
|
||||
# gethomepage.dev/enabled: "true"
|
||||
# gethomepage.dev/name: "Homepage"
|
||||
# gethomepage.dev/description: "Dynamically Detected Homepage"
|
||||
# gethomepage.dev/group: "Dynamic"
|
||||
# gethomepage.dev/icon: "homepage.png"
|
||||
hosts:
|
||||
- host: ${service_uri}
|
||||
paths:
|
||||
|
|
|
|||
|
|
@ -6,15 +6,22 @@ resource "random_password" "minio_access_key" {
|
|||
resource "random_password" "minio_secret_key" {
|
||||
length = 40
|
||||
special = true
|
||||
#override_special = "!#$%&*()-_=+[]{}<>:?"
|
||||
#min_special = 2
|
||||
#min_upper = 2
|
||||
#min_lower = 2
|
||||
#min_numeric = 2
|
||||
}
|
||||
|
||||
resource "helm_release" "minio" {
|
||||
name = "minio"
|
||||
repository = "https://charts.bitnami.com/bitnami"
|
||||
repository = "oci://registry-1.docker.io/bitnamicharts"
|
||||
chart = "minio"
|
||||
namespace = var.namespace
|
||||
create_namespace = true
|
||||
version = "14.7.16"
|
||||
version = "16.0.0"
|
||||
wait = true
|
||||
wait_for_jobs = true
|
||||
|
||||
set_sensitive {
|
||||
name = "auth.rootUser"
|
||||
|
|
|
|||
|
|
@ -1,10 +1,28 @@
|
|||
resource "minio_s3_bucket" "overlay" {
|
||||
resource "null_resource" "health_check" {
|
||||
depends_on = [var.wait_on]
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = <<-EOT
|
||||
until curl -s -f "https://${var.server}/minio/health/live" || [[ $attempts -ge 10 ]]; do
|
||||
sleep 10
|
||||
attempts=$((attempts+1))
|
||||
done
|
||||
if [[ $attempts -ge 10 ]]; then
|
||||
echo "Minio health check failed after maximum attempts"
|
||||
exit 1
|
||||
fi
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
resource "minio_s3_bucket" "overlay" {
|
||||
depends_on = [null_resource.health_check]
|
||||
bucket = var.name
|
||||
acl = "private"
|
||||
}
|
||||
|
||||
resource "minio_s3_bucket_policy" "overlay" {
|
||||
depends_on = [minio_s3_bucket.overlay]
|
||||
bucket = minio_s3_bucket.overlay.bucket
|
||||
policy = jsonencode({
|
||||
"Version" : "2012-10-17",
|
||||
|
|
@ -20,7 +38,7 @@ resource "minio_s3_bucket_policy" "overlay" {
|
|||
"s3:GetBucketLocation"
|
||||
],
|
||||
"Resource" : [
|
||||
"arn:aws:s3:::bouwroute"
|
||||
minio_s3_bucket.overlay.arn
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
@ -34,7 +52,7 @@ resource "minio_s3_bucket_policy" "overlay" {
|
|||
"s3:ListBucket"
|
||||
],
|
||||
"Resource" : [
|
||||
"arn:aws:s3:::bouwroute"
|
||||
minio_s3_bucket.overlay.arn
|
||||
],
|
||||
"Condition" : {
|
||||
"StringEquals" : {
|
||||
|
|
@ -55,7 +73,7 @@ resource "minio_s3_bucket_policy" "overlay" {
|
|||
"s3:GetObject"
|
||||
],
|
||||
"Resource" : [
|
||||
"arn:aws:s3:::bouwroute/**"
|
||||
"${minio_s3_bucket.overlay.arn}/**"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
@ -63,10 +81,12 @@ resource "minio_s3_bucket_policy" "overlay" {
|
|||
}
|
||||
|
||||
resource "minio_iam_user" "overlay" {
|
||||
depends_on = [null_resource.health_check]
|
||||
name = var.name
|
||||
}
|
||||
|
||||
resource "minio_iam_policy" "overlay" {
|
||||
depends_on = [minio_s3_bucket.overlay]
|
||||
name = minio_s3_bucket.overlay.bucket
|
||||
policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
|
|
@ -74,7 +94,7 @@ resource "minio_iam_policy" "overlay" {
|
|||
{
|
||||
Effect = "Allow"
|
||||
Action = ["s3:ListBucket"]
|
||||
Resource = ["arn:aws:s3:::${var.name}"]
|
||||
Resource = [minio_s3_bucket.overlay.arn]
|
||||
},
|
||||
{
|
||||
Effect = "Allow"
|
||||
|
|
@ -83,7 +103,7 @@ resource "minio_iam_policy" "overlay" {
|
|||
"s3:PutObject",
|
||||
"s3:DeleteObject"
|
||||
]
|
||||
Resource = ["arn:aws:s3:::${var.name}/*"]
|
||||
Resource = ["${minio_s3_bucket.overlay.arn}/*"]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
|
@ -91,11 +111,14 @@ resource "minio_iam_policy" "overlay" {
|
|||
|
||||
|
||||
resource "minio_iam_user_policy_attachment" "overlay" {
|
||||
depends_on = [minio_iam_user.overlay, minio_iam_policy.overlay]
|
||||
|
||||
user_name = minio_iam_user.overlay.id
|
||||
policy_name = minio_iam_policy.overlay.id
|
||||
}
|
||||
|
||||
resource "minio_iam_service_account" "overlay" {
|
||||
depends_on = [minio_iam_user.overlay, minio_s3_bucket.overlay]
|
||||
target_user = minio_iam_user.overlay.name
|
||||
policy = jsonencode({
|
||||
Version = "2012-10-17"
|
||||
|
|
@ -103,7 +126,7 @@ resource "minio_iam_service_account" "overlay" {
|
|||
{
|
||||
Effect = "Allow"
|
||||
Action = ["s3:ListBucket"]
|
||||
Resource = ["arn:aws:s3:::${var.name}"]
|
||||
Resource = [minio_s3_bucket.overlay.arn]
|
||||
},
|
||||
{
|
||||
Effect = "Allow"
|
||||
|
|
@ -112,12 +135,16 @@ resource "minio_iam_service_account" "overlay" {
|
|||
"s3:PutObject",
|
||||
"s3:DeleteObject"
|
||||
]
|
||||
Resource = ["arn:aws:s3:::${var.name}/*"]
|
||||
Resource = ["${minio_s3_bucket.overlay.arn}/*"]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
output "bucket" {
|
||||
value = var.name
|
||||
}
|
||||
|
||||
output "access_key" {
|
||||
value = minio_iam_service_account.overlay.access_key
|
||||
sensitive = true
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ terraform {
|
|||
required_providers {
|
||||
minio = {
|
||||
source = "aminueza/minio"
|
||||
version = "~> 2.5.0"
|
||||
version = "~> 3.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,11 +11,11 @@ ports:
|
|||
port: 8000
|
||||
protocol: TCP
|
||||
proxyProtocol:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
forwardedHeaders:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 0
|
||||
idleTimeout: 0
|
||||
readTimeout: 0
|
||||
|
|
@ -26,11 +26,11 @@ ports:
|
|||
port: 8443
|
||||
protocol: TCP
|
||||
proxyProtocol:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
forwardedHeaders:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 0
|
||||
idleTimeout: 0
|
||||
readTimeout: 0
|
||||
|
|
@ -41,9 +41,9 @@ ports:
|
|||
port: 2223
|
||||
protocol: TCP
|
||||
proxyProtocol:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 600s
|
||||
idleTimeout: 60s
|
||||
readTimeout: 600s
|
||||
|
|
@ -54,9 +54,9 @@ ports:
|
|||
port: 8993
|
||||
protocol: TCP
|
||||
proxyProtocol:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 600s
|
||||
idleTimeout: 300s
|
||||
readTimeout: 600s
|
||||
|
|
@ -67,9 +67,9 @@ ports:
|
|||
port: 8995
|
||||
protocol: TCP
|
||||
proxyProtocol:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 600s
|
||||
idleTimeout: 300s
|
||||
readTimeout: 600s
|
||||
|
|
@ -80,9 +80,9 @@ ports:
|
|||
port: 4190
|
||||
protocol: TCP
|
||||
proxyProtocol:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 600s
|
||||
idleTimeout: 300s
|
||||
readTimeout: 600s
|
||||
|
|
@ -93,7 +93,7 @@ ports:
|
|||
port: 8025
|
||||
protocol: TCP
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 300s
|
||||
idleTimeout: 300s
|
||||
readTimeout: 300s
|
||||
|
|
@ -104,9 +104,9 @@ ports:
|
|||
port: 8465
|
||||
protocol: TCP
|
||||
proxyProtocol:
|
||||
trustedIPs: [127.0.0.1/8,10.0.0.0/8]
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimouts:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 300s
|
||||
idleTimeout: 300s
|
||||
readTimeout: 300s
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ module "zitadel_project_application_ua" {
|
|||
|
||||
name = "${ var.name } (Swagger)"
|
||||
|
||||
redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html"]
|
||||
redirect_uris = ["${local.uri}/swagger/oauth2-redirect.html", "${local.uri}/hangfire/signin-oidc", "${local.uri}/signin-oidc"]
|
||||
post_logout_redirect_uris = [local.uri]
|
||||
}
|
||||
|
||||
|
|
@ -67,6 +67,7 @@ resource "kubernetes_secret" "api" {
|
|||
|
||||
data = {
|
||||
"authority" = local.authority
|
||||
"audience" = var.project_id
|
||||
"client_id" = module.zitadel_project_application_api.client_id
|
||||
"client_secret" = module.zitadel_project_application_api.client_secret
|
||||
}
|
||||
|
|
@ -113,6 +114,7 @@ resource "kubernetes_secret" "service-account" {
|
|||
"audience" = var.project_id
|
||||
"client_id" = module.zitadel_service_account[count.index].client_id
|
||||
"client_secret" = module.zitadel_service_account[count.index].client_secret
|
||||
"scope" = join(" ", concat(["openid", "profile", "urn:zitadel:iam:org:project:id:${var.project_id}:aud"], var.roles))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
/**
|
||||
* sets the roles an additional claim in the token with roles as value an project as key
|
||||
*
|
||||
* The role claims of the token look like the following:
|
||||
*
|
||||
* // added by the code below
|
||||
* "groups": ["{roleName}", "{roleName}", ...],
|
||||
*
|
||||
* Flow: Complement token, Triggers: Pre Userinfo creation, Pre access token creation
|
||||
*
|
||||
* @param ctx
|
||||
* @param api
|
||||
*/
|
||||
function groupsClaim(ctx, api) {
|
||||
if (ctx.v1.user.grants === undefined || ctx.v1.user.grants.count == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
let grants = [];
|
||||
ctx.v1.user.grants.grants.forEach((claim) => {
|
||||
claim.roles.forEach((role) => {
|
||||
grants.push(role);
|
||||
});
|
||||
});
|
||||
|
||||
api.v1.claims.setClaim("groups", grants);
|
||||
api.v1.claims.setClaim("scope", grants);
|
||||
}
|
||||
|
|
@ -13,6 +13,29 @@ resource "zitadel_org" "default" {
|
|||
is_default = true
|
||||
}
|
||||
|
||||
// resource "zitadel_action" "groups-claim" {
|
||||
// org_id = zitadel_org.default.id
|
||||
// name = "groupsClaim"
|
||||
// script = templatefile("${path.module}/groupsClaim.action.tftpl", {})
|
||||
// allowed_to_fail = true
|
||||
// timeout = "10s"
|
||||
// }
|
||||
//
|
||||
// resource "zitadel_trigger_actions" "groups-claim-pre-user-info" {
|
||||
// org_id = zitadel_org.default.id
|
||||
// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
|
||||
// trigger_type = "TRIGGER_TYPE_PRE_USERINFO_CREATION"
|
||||
// action_ids = [zitadel_action.groups-claim.id]
|
||||
// }
|
||||
//
|
||||
// resource "zitadel_trigger_actions" "groups-claim-pre-access-token" {
|
||||
// org_id = zitadel_org.default.id
|
||||
// flow_type = "FLOW_TYPE_CUSTOMISE_TOKEN"
|
||||
// trigger_type = "TRIGGER_TYPE_PRE_ACCESS_TOKEN_CREATION"
|
||||
// action_ids = [zitadel_action.groups-claim.id]
|
||||
// }
|
||||
|
||||
|
||||
output "org_id" {
|
||||
value = zitadel_org.default.id
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,47 @@ resource "helm_release" "zot" {
|
|||
]
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "traefik_middleware_request_body" {
|
||||
depends_on = [helm_release.zot]
|
||||
manifest = {
|
||||
apiVersion = "traefik.io/v1alpha1"
|
||||
kind = "Middleware"
|
||||
metadata = {
|
||||
name = "request-body"
|
||||
namespace = "registry"
|
||||
}
|
||||
spec = {
|
||||
buffering = {
|
||||
maxRequestBodyBytes = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "traefik_middleware_request_timeouts" {
|
||||
depends_on = [helm_release.zot]
|
||||
manifest = {
|
||||
apiVersion = "traefik.io/v1alpha1"
|
||||
kind = "Middleware"
|
||||
metadata = {
|
||||
name = "request-timeouts"
|
||||
namespace = "registry"
|
||||
}
|
||||
spec = {
|
||||
headers = {
|
||||
customRequestHeaders = {
|
||||
"X-Forwarded-Timeout-Read" = "3600s"
|
||||
"X-Forwarded-Timeout-Write" = "3600s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "installed" {
|
||||
value = true
|
||||
depends_on = [helm_release.zot]
|
||||
depends_on = [
|
||||
kubernetes_manifest.traefik_middleware_request_body, kubernetes_manifest.traefik_middleware_request_timeouts,
|
||||
helm_release.zot
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,9 +3,36 @@ ingress:
|
|||
className: "traefik"
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
traefik.ingress.kubernetes.io/router.middlewares: default-preserve-host-headers@kubernetescrd
|
||||
traefik.ingress.kubernetes.io/proxy-body-size: "0"
|
||||
traefik.ingress.kubernetes.io/router.middlewares: registry-request-body@kubernetescrd,registry-request-timeouts@kubernetescrd,default-preserve-host-headers@kubernetescrd
|
||||
gethomepage.dev/enabled: "true"
|
||||
gethomepage.dev/name: "Registry"
|
||||
gethomepage.dev/description: "OCI Registry"
|
||||
gethomepage.dev/group: "Tools"
|
||||
gethomepage.dev/icon: "docker.png"
|
||||
hosts:
|
||||
- host: ${ service_uri }
|
||||
paths:
|
||||
- path: /
|
||||
persistence: true
|
||||
pvc:
|
||||
create: true
|
||||
name: zot
|
||||
accessMode: "ReadWriteOnce"
|
||||
storage: 8Gi
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 5000
|
||||
mountConfig: true
|
||||
configFiles:
|
||||
config.json: |-
|
||||
{
|
||||
"storage": { "rootDirectory": "/var/lib/registry" },
|
||||
"http": { "address": "0.0.0.0", "port": "5000" },
|
||||
"log": { "level": "error" },
|
||||
"extensions": {
|
||||
"scrub": {
|
||||
"enable": true,
|
||||
"interval": "12h"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ locals {
|
|||
}
|
||||
|
||||
resource "kubernetes_namespace" "tenant" {
|
||||
depends_on = [var.wait_on]
|
||||
|
||||
metadata {
|
||||
name = lower(local.name)
|
||||
}
|
||||
|
|
@ -22,6 +24,15 @@ module "bootstrap-zitadel" {
|
|||
}
|
||||
|
||||
// create uploads bucket in minio
|
||||
module "minio" {
|
||||
source = "../../modules/minio/tenant"
|
||||
|
||||
access_key = var.minio_access_key
|
||||
secret_key = var.minio_secret_key
|
||||
server = var.minio_server
|
||||
|
||||
name = "365zon"
|
||||
}
|
||||
|
||||
// create minio secret
|
||||
resource "kubernetes_secret" "storage" {
|
||||
|
|
@ -31,10 +42,10 @@ resource "kubernetes_secret" "storage" {
|
|||
}
|
||||
|
||||
data = {
|
||||
Storage__AccountName = var.minio_access_key
|
||||
Storage__AccountKey = var.minio_secret_key
|
||||
Storage__BlobUri = var.minio_service_uri
|
||||
Storage__S3BucketName = "uploads"
|
||||
Storage__AccountName = module.minio.access_key
|
||||
Storage__AccountKey = module.minio.secret_key
|
||||
Storage__BlobUri = var.minio_api_uri
|
||||
Storage__S3BucketName = module.minio.bucket
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -54,3 +65,17 @@ resource "kubernetes_secret" "connection_strings" {
|
|||
// next, we need to set-up:
|
||||
// - the wildcard tls (*.365zon.venus.fourlights.dev)
|
||||
// - argocd for all relevant apps
|
||||
//
|
||||
output "minio_access_key" {
|
||||
value = module.minio.access_key
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "minio_secret_key" {
|
||||
value = module.minio.secret_key
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "minio_bucket" {
|
||||
value = module.minio.bucket
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@ variable "minio_secret_key" {
|
|||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "minio_service_uri" { type = string }
|
||||
variable "minio_api_uri" { type = string }
|
||||
variable "minio_server" { type = string }
|
||||
variable "mongodb_connection_string" {
|
||||
type = string
|
||||
sensitive = true
|
||||
|
|
@ -19,3 +20,7 @@ variable "rabbitmq_connection_string" {
|
|||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "wait_on" {
|
||||
type = any
|
||||
default = true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,31 +18,31 @@ module "zitadel_project" {
|
|||
module "zitadel_project_operator_roles" {
|
||||
source = "../../../modules/zitadel/project/roles"
|
||||
|
||||
wait_on = [module.zitadel_project.installed]
|
||||
wait_on = module.zitadel_project.installed
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
group = "Operator"
|
||||
roles = [
|
||||
"manage:profiles", "manage:contacts", "manage:addresses", "manage:enquiries", "manage:flowstates",
|
||||
"manage:flowevents", "manage:files"
|
||||
"manage:flowevents", "manage:files", "manage:brands"
|
||||
]
|
||||
}
|
||||
|
||||
module "zitadel_project_configurator_roles" {
|
||||
source = "../../../modules/zitadel/project/roles"
|
||||
wait_on = [module.zitadel_project_operator_roles.installed]
|
||||
source = "../../../modules/zitadel/project/roles"
|
||||
wait_on = module.zitadel_project_operator_roles.installed
|
||||
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
group = "Configurator"
|
||||
roles = [
|
||||
"manage:brands", "manage:flows"
|
||||
"manage:flows"
|
||||
]
|
||||
}
|
||||
|
||||
module "zitadel_project_developer_roles" {
|
||||
source = "../../../modules/zitadel/project/roles"
|
||||
wait_on = [module.zitadel_project_configurator_roles.installed]
|
||||
source = "../../../modules/zitadel/project/roles"
|
||||
wait_on = module.zitadel_project_configurator_roles.installed
|
||||
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
|
|
@ -54,7 +54,7 @@ module "zitadel_project_developer_roles" {
|
|||
|
||||
module "zitadel_project_user_grant" {
|
||||
source = "../../../modules/zitadel/project/user-grant"
|
||||
wait_on = [module.zitadel_project_developer_roles.installed]
|
||||
wait_on = module.zitadel_project_developer_roles.installed
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
user_id = var.user_id
|
||||
|
|
@ -66,8 +66,8 @@ module "zitadel_project_user_grant" {
|
|||
// TODO: Add read roles
|
||||
|
||||
module "zitadel_project_application_core" {
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = [module.zitadel_project_user_grant.installed]
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = module.zitadel_project_user_grant.installed
|
||||
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
|
|
@ -84,8 +84,8 @@ module "zitadel_project_application_core" {
|
|||
}
|
||||
|
||||
module "zitadel_project_application_salesforce" {
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = [module.zitadel_project_application_core.installed]
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = module.zitadel_project_application_core.installed
|
||||
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
|
|
@ -101,8 +101,8 @@ module "zitadel_project_application_salesforce" {
|
|||
}
|
||||
|
||||
module "zitadel_project_application_external" {
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = [module.zitadel_project_application_salesforce.installed]
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = module.zitadel_project_application_salesforce.installed
|
||||
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
|
|
@ -118,8 +118,8 @@ module "zitadel_project_application_external" {
|
|||
}
|
||||
|
||||
module "zitadel_project_application_module_internal" {
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = [module.zitadel_project_application_external.installed]
|
||||
source = "../../../modules/zitadel/api-m2m-swagger"
|
||||
wait_on = module.zitadel_project_application_external.installed
|
||||
|
||||
org_id = var.org_id
|
||||
project_id = module.zitadel_project.project_id
|
||||
|
|
|
|||
BIN
shuttles/k3sup
BIN
shuttles/k3sup
Binary file not shown.
|
|
@ -35,9 +35,9 @@ async function executeCommand(
|
|||
stdout?: "piped" | "inherit" | "null",
|
||||
stderr?: "piped" | "inherit" | "null",
|
||||
throwOnError?: boolean
|
||||
} = {}
|
||||
} = {stdout: 'piped', stderr: 'piped', throwOnError: true}
|
||||
): Promise<{ success: boolean; output?: string; error?: string }> {
|
||||
const { stdout = "piped", stderr = "piped", throwOnError = true } = options;
|
||||
const {stdout = "piped", stderr = "piped", throwOnError = true} = options;
|
||||
|
||||
log.debug(`Executing: ${cmdArray.join(" ")}`);
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ async function executeCommand(
|
|||
stderr: stderr === "piped" ? "piped" : stderr === "inherit" ? "inherit" : "null",
|
||||
});
|
||||
|
||||
const { code, stdout: stdoutOutput, stderr: stderrOutput } = await command.output();
|
||||
const {code, stdout: stdoutOutput, stderr: stderrOutput} = await command.output();
|
||||
|
||||
const stdoutText = stdout === "piped" ? new TextDecoder().decode(stdoutOutput).trim() : "";
|
||||
const stderrText = stderr === "piped" ? new TextDecoder().decode(stderrOutput).trim() : "";
|
||||
|
|
@ -72,21 +72,21 @@ async function executeCommand(
|
|||
if (throwOnError) {
|
||||
throw error;
|
||||
}
|
||||
return { success: false, error: errorMessage };
|
||||
return {success: false, error: errorMessage};
|
||||
}
|
||||
}
|
||||
|
||||
// Check if VM is ready for SSH connections
|
||||
async function isVmReadyForSsh(ip: string, user: string, maxAttempts = 30): Promise<boolean> {
|
||||
async function isVmReadyForSsh(ip: string, user: string, sshKeyPath: string, maxAttempts = 30): Promise<boolean> {
|
||||
log.info(`Checking if VM at ${ip} is ready for SSH connections...`);
|
||||
|
||||
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
||||
log.debug(`SSH readiness check attempt ${attempt}/${maxAttempts}`);
|
||||
|
||||
const { success } = await executeCommand(
|
||||
["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "echo", "ready"],
|
||||
const {success} = await executeCommand(
|
||||
["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5", `${user}@${ip}`, "-i", sshKeyPath, "echo", "ready"],
|
||||
`check SSH connectivity to ${ip}`,
|
||||
{ throwOnError: false, stderr: "null" }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (success) {
|
||||
|
|
@ -104,10 +104,10 @@ async function isVmReadyForSsh(ip: string, user: string, maxAttempts = 30): Prom
|
|||
|
||||
// Check if VM is running
|
||||
async function isVmRunning(vmName: string): Promise<boolean> {
|
||||
const { success, output } = await executeCommand(
|
||||
const {success, output} = await executeCommand(
|
||||
["incus", "list", vmName, "--format", "json"],
|
||||
`check if VM ${vmName} is running`,
|
||||
{ throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (!success || !output) {
|
||||
|
|
@ -128,12 +128,14 @@ async function isVmRunning(vmName: string): Promise<boolean> {
|
|||
async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
||||
log.info("Starting cleanup process...");
|
||||
|
||||
return;
|
||||
|
||||
for (const vmName of vmNames) {
|
||||
// Check if VM exists
|
||||
const { success, output } = await executeCommand(
|
||||
const {success, output} = await executeCommand(
|
||||
["incus", "list", vmName, "--format", "csv"],
|
||||
`check if VM ${vmName} exists`,
|
||||
{ throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (success && output) {
|
||||
|
|
@ -144,7 +146,7 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
|||
await executeCommand(
|
||||
["incus", "stop", vmName, "--force"],
|
||||
`stop VM ${vmName}`,
|
||||
{ throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -154,7 +156,7 @@ async function cleanup(vmNames: string[], shouldRemove = false): Promise<void> {
|
|||
await executeCommand(
|
||||
["incus", "delete", vmName],
|
||||
`remove VM ${vmName}`,
|
||||
{ throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -170,12 +172,12 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
|
||||
try {
|
||||
// Get hostname and user
|
||||
const { output: hostname } = await executeCommand(
|
||||
const {output: hostname} = await executeCommand(
|
||||
["hostnamectl", "hostname"],
|
||||
"get hostname"
|
||||
);
|
||||
|
||||
const { output: user } = await executeCommand(
|
||||
const {output: user} = await executeCommand(
|
||||
["whoami"],
|
||||
"get current user"
|
||||
);
|
||||
|
|
@ -190,10 +192,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
}
|
||||
|
||||
// Step 1: Create Low-Resource Profile (if not exists)
|
||||
const { success: profileExists } = await executeCommand(
|
||||
const {success: profileExists} = await executeCommand(
|
||||
["incus", "profile", "show", "low-resource"],
|
||||
"check if low-resource profile exists",
|
||||
{ stdout: "null", stderr: "null", throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (!profileExists) {
|
||||
|
|
@ -226,10 +228,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
for (let i = 1; i <= numMasters; i++) {
|
||||
const vmName = `k3s-master${i}`;
|
||||
|
||||
const { success: vmExists, output: vmOutput } = await executeCommand(
|
||||
const {success: vmExists, output: vmOutput} = await executeCommand(
|
||||
["incus", "list", vmName, "--format", "csv"],
|
||||
`check if VM ${vmName} exists`,
|
||||
{ throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (!vmExists || !vmOutput) {
|
||||
|
|
@ -266,10 +268,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
}
|
||||
|
||||
// Step 4: Install k3sup (if not installed)
|
||||
const { success: k3supInstalled } = await executeCommand(
|
||||
const {success: k3supInstalled} = await executeCommand(
|
||||
["which", "k3sup"],
|
||||
"check if k3sup is installed",
|
||||
{ stdout: "null", stderr: "null", throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (!k3supInstalled) {
|
||||
|
|
@ -287,7 +289,7 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
const firstMasterIP = getIp(1);
|
||||
log.info(`Waiting for first master node (${firstMasterIP}) to be ready...`);
|
||||
|
||||
const vmReady = await isVmReadyForSsh(firstMasterIP, "picard");
|
||||
const vmReady = await isVmReadyForSsh(firstMasterIP, "picard", sshKeyPrivateFileName);
|
||||
if (!vmReady) {
|
||||
throw new Error(`First master node at ${firstMasterIP} is not ready for SSH connections`);
|
||||
}
|
||||
|
|
@ -313,16 +315,16 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
|
||||
// Wait for VM to be ready
|
||||
log.info(`Waiting for ${vmName} (${vmIP}) to be ready...`);
|
||||
const nodeReady = await isVmReadyForSsh(vmIP, "picard");
|
||||
const nodeReady = await isVmReadyForSsh(vmIP, "picard", sshKeyPrivateFileName);
|
||||
if (!nodeReady) {
|
||||
log.warning(`VM ${vmName} is not ready for SSH connections, skipping join operation`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const { success: joined } = await executeCommand(
|
||||
const {success: joined} = await executeCommand(
|
||||
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", vmName],
|
||||
`check if ${vmName} has joined the cluster`,
|
||||
{ stdout: "null", stderr: "null", throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (!joined) {
|
||||
|
|
@ -341,10 +343,10 @@ const setupCluster = async (numMasters: number, forceCleanup = false) => {
|
|||
|
||||
// Verify cluster status
|
||||
log.info("Verifying cluster status...");
|
||||
const { success: clusterVerified, output: nodesOutput } = await executeCommand(
|
||||
const {success: clusterVerified, output: nodesOutput} = await executeCommand(
|
||||
["kubectl", "--kubeconfig=./kubeconfig", "get", "nodes", "-o", "wide"],
|
||||
"verify cluster nodes",
|
||||
{ throwOnError: false }
|
||||
{throwOnError: false}
|
||||
);
|
||||
|
||||
if (clusterVerified) {
|
||||
|
|
@ -371,7 +373,7 @@ await new Command()
|
|||
.name("setup-k3s-cluster")
|
||||
.version("0.1.0")
|
||||
.description("Automate the setup of an HA k3s cluster using incus and k3sup")
|
||||
.option("-m, --masters <numMasters:number>", "Number of master nodes", { default: 3 })
|
||||
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", { default: false })
|
||||
.action(({ masters, cleanup }) => setupCluster(masters, cleanup))
|
||||
.option("-m, --masters <numMasters:number>", "Number of master nodes", {default: 3})
|
||||
.option("-c, --cleanup", "Force cleanup of VMs if setup fails", {default: false})
|
||||
.action(({masters, cleanup}) => setupCluster(masters, cleanup))
|
||||
.parse(Deno.args);
|
||||
|
|
|
|||
|
|
@ -2,13 +2,41 @@ locals {
|
|||
tld = "fourlights.dev"
|
||||
cluster_dns = "venus.${local.tld}"
|
||||
is_installed = true
|
||||
node_count = 3
|
||||
node_count = 1
|
||||
}
|
||||
|
||||
module "registry" {
|
||||
source = "../../infra/modules/zot"
|
||||
|
||||
service_uri = "registry.${local.cluster_dns}"
|
||||
resource "kubernetes_manifest" "traefik-helm-config" {
|
||||
manifest = {
|
||||
apiVersion = "helm.cattle.io/v1"
|
||||
kind = "HelmChartConfig"
|
||||
metadata = {
|
||||
name = "traefik"
|
||||
namespace = "kube-system"
|
||||
}
|
||||
spec = {
|
||||
valuesContent = <<EOF
|
||||
serversTransport:
|
||||
forwardingTimeouts:
|
||||
dialTimeout: 0
|
||||
responseHeaderTimeout: 0
|
||||
idleConnTimeout: 0
|
||||
logs:
|
||||
general:
|
||||
level: ERROR
|
||||
ports:
|
||||
web:
|
||||
proxyProtocol:
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
forwardedHeaders:
|
||||
trustedIPs: [ 127.0.0.1/8,10.0.0.0/8 ]
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
writeTimeout: 0
|
||||
idleTimeout: 0
|
||||
readTimeout: 0
|
||||
EOF
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "preserve-host-middleware" {
|
||||
|
|
@ -154,14 +182,17 @@ module "redis" {
|
|||
module "tenant-365zon" {
|
||||
source = "../../infra/tenants/365zon"
|
||||
|
||||
wait_on = module.minio.installed
|
||||
|
||||
org_id = module.zitadel-bootstrap.org_id
|
||||
user_id = module.zitadel-bootstrap.user_id
|
||||
domain = module.zitadel.server
|
||||
jwt_profile_file = module.zitadel.jwt_profile_file
|
||||
|
||||
minio_access_key = module.minio.minio_access_key
|
||||
minio_secret_key = module.minio.minio_secret_key
|
||||
minio_service_uri = module.minio.minio_api_uri
|
||||
minio_access_key = module.minio.minio_access_key
|
||||
minio_secret_key = module.minio.minio_secret_key
|
||||
minio_server = module.minio.minio_server
|
||||
minio_api_uri = module.minio.minio_api_uri
|
||||
|
||||
mongodb_connection_string = module.mongodb.connection_string
|
||||
rabbitmq_connection_string = module.rabbitmq.connection_string
|
||||
|
|
@ -209,3 +240,28 @@ output "mongodb-connection-string" {
|
|||
value = module.mongodb.connection_string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "rabbitmq-connection-string" {
|
||||
value = module.rabbitmq.connection_string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "minio-access-key" {
|
||||
value = module.tenant-365zon.minio_access_key
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "minio-secret-key" {
|
||||
value = module.tenant-365zon.minio_secret_key
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "minio-root-access-key" {
|
||||
value = module.minio.minio_access_key
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "minio-root-secret-key" {
|
||||
value = module.minio.minio_secret_key
|
||||
sensitive = true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{"type":"serviceaccount","keyId":"310142761184133898","key":"-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEApSaCjkOBVIe33bEIwENq1jGj6MgbN+NqYRN6EVDWVnESM10/\n188hB9UDCvNR5kcBbaz2bD/ymZ/ppWSLqmXONwc3/PtiOluBfkvR1q2pEh+f13wz\n72dbhOVqf+YhL3lndiQ/OupGzaCbmsBNPGag7mgaPxlgoWTWIItPnOomIhwtwXgy\nNyzt9Fmyh/4JsRlIYO90ZO32vKXABRMCGsKxvcY9CR4+LIqddns83YASGFnQ5oBo\nObc8EN2Di7uKWzNwxUJuZtFlHXp06su2mWDGJhKusHYW4KUIs2uwFtjJfAXG/adT\n8qVgi174m1jU2ocSd6o9IqDYf50arCinbgtAdwIDAQABAoIBABwrB1WQefya8Wdk\njKOOXCiQau6HQu0zYq+QDN/rM8OmoX4VR5Bdibq2QECb47otHjdAqv8noQ9G0Ske\njxvPJW8JUilaDxT5CosqD25YTGAE+NReINWSgW+XWaTa8YoRYO4rnIVF9DGaVS/9\n4K6OqqA/LUrZ3ztn4YXHfRq8bSif86GMo1GkwH8xOMJHdaxCs8YzAbpGURL03QtL\nemVNs9VwSWLmnK71FpXkko0aGi14naS7E4jv8uutykLQsc+QE7m9B4OiDkijKCP9\nQwvw/3RZYcrRuWz7uSANyxG4Uc8JhPdUIyvpkvUz8NfRLTDoSAEq1NQuxpyjLYYU\n7uzYcWECgYEAzKZ5wGTJBZafen2I61L8XAMk2df63nnEK+YuZqNZ6yH6IY7cCrlJ\n3LbeNoHNcGMXw1mf9Z9vvAjz7nbec2BYN1KRMR9QOTHcqwQZcOOJnwhdO4uAlsFZ\ngiyoLYCQP8Z6IIC4ht+2hmf8hS3CmWUPAXyLOcg4ok6SRdyNsfWiLwkCgYEAzpbL\n8szYqNY+r5n1DQ9d6zNb2cbkFfzZDxn64BA1xQZtRgxfzNAOvsGl5pPWve7oS/8Y\nmPx+1b08NvCcTuaow7CCw+IDHsI43TRNbvPQBWtINBE6eeBs3laaNvmxTZU5HGog\nt1yRtk0u64hKT7+L7Ku5JP79pxzNOIs1hnImU38CgYAaH84+/x6iNf4Ztti5oZhR\nbp1PqcB+kfC24eVeeM/LskSp8ACq5chGApoPPzaoeB3adCB1TGsJB+OLt2TiOZRJ\nS6L5MFQfWPwgYJ+Wx5UT1g+AwGgj1n7EnUrCtDy1x3Jjn8rufLRiJ/gWUCcdScdG\nm01yjNqd7YXCoUr9Qqv3cQKBgGd2klHZUbDNC7v6SQXvakP/BsM8nsJ8TWEIy+In\nfCZen59zVw9GK/xRE3s1E1kwK1rUOUd1PThie6OwQTgqwN6wqezcZl+jOcNfDGDC\n7q2oGxMohbbANQXtLXLW/nsyftXCOPxb+gXpBdSj/0ONVNCE+EaVBggJnqXw4i+h\nP5yVAoGBAIoXRgX3mSBsC/xgKIXQb4c9WT7W78IOpU43mbX9jC/emfLkOvuxR/Cv\nmJDgTv2zUq7uItbvXmxwmU7JVYlBFaWERsAqzzWUUsdfM3tBFdBbcH9fzoEG0j4u\nkqCwU1if6HTHCmunqt1ZQKN3oP1Uycn/1ZL6NR8ilqIcjCzh4JPQ\n-----END RSA PRIVATE KEY-----\n","expirationDate":"2026-01-01T00:00:00Z","userId":"310142761184068362"}
|
||||
{"type":"serviceaccount","keyId":"313768085818048552","key":"-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAtdNVdOOD80x5NDMusGpdfWvo91N9MnOETO0RLhhyrSRyO6vo\nfxKD68nKK/SwUpq8dl9vNCzBFqOsbz2tRPp3jeV+6YdMwGgnQQxEVpOfRftd0718\nycQcaWIauuU3xuyA+rj74CWjlg+R9b5dWbo2p/2Ey+ygO60LWrRhDWnNslRpbPco\nrw2StHnPqpORQLn0Oj7qyns6lSeq20hwWJu9+IbTYrWPA0HnO7WZZBUgukX0prai\n4edAZJfwoo7UJKH6ZETvQbCVuA8UMHqLbUB0U4se9+d+OuotoghVCMH3HAcVh/7A\ndQvTVa6ix6DiXjtqtQytt1+fRL7bCcntRn1kyQIDAQABAoIBAC3wPRERQo8/7QeB\nPvSXixNbJjsGvwT2JqEA7GxHBQI1yR7GajFgzi/OhePhKINWUPNfXUtDW22K4NAi\nNxrMZVRWfWAnLP8X0YMfxExTc9RMlAIhR9v6TmtZvAMoUpVRv6yY/Bo/qDsLqAb8\nl71JzPFYniqfmEQ7jjjWhgbLiorZVyZsRSBuaTYIqJccbq/zZ/O+D3xXdSEwbOri\nxPhqjsWQz6q3jxcc3FAAmzxEMwFBwx8pbocVUyCn43LifLjuXk831SMg6l9Q5mCd\nEi7UYXkZzcPtrdo3mg682FEsSna7VFUlBBl/kEXdSvuGRMZZfEYsx8TrI524sDe7\nPxN2LgECgYEAwHjNlgBox3kMg6uO5USQo5CzCHMOQhsIITwax353LPrsuhYssFUS\nvMyrSlZDvw87xuZEVEhUDuvMQlUOcCvNjHvstt1HPB3hku4KlZrgRUBmgsVGi2sG\nlAxczrp/1qenVU3z1Y2u6DCTM3ASPyb4smYvLDRgm1aTD8VY2pAfaaECgYEA8dbz\nUAEeagH6Au6VQ6404ZrjrrokzsjjLUzuhRah/zlpKWqaOEfldSCEHu0sZSPG20T/\nt2KhKqNzpTsSv6H0QmhD3k1/b42Sr2bu8WbKbQTOeuY64TA341PS7vGzVh5iXN5H\nAo0D5hUoiFhPlXI5Xzpo5sDy8pnX18DREnwnOikCgYBouNHTDcH01m3yrkN/hwDT\nngVrUX6uhRq1SoifhrW9KYHn9ey2vHMHeqywM6OErvstS1heK0RhIfbvGGxUp+Cc\n0UiIbnk1wmRbl1z27V+dDl84Q7IQZVkc8GUGrf6kgm/PQCytQvuppdRRpmanKcMi\n/eoouQ7fNgmqCQxBTy1oQQKBgAoFuneqVDRYeJ/+ezke0xo2bREkrbnUIXYTJh9l\n3LjTDESnIlUKxbug6VjOw3Q9k1Qq+94BvGQj+frzA8flUlYeNBHWbF2XJGkYfvaK\nKAF0nYoCWJZUhTxqkOdOJPyArdrja1XzuiGi5tcfscyjuzTzGr3VaTwcnBFY8FEv\nzABhAoGBAJDlWEJYN94VWkbYpA0ak4CopZONaK3zo+LPnd8RteRF1Bb7nbVLxoWK\n3JyRdjKBGgZHjJVjeVvfHZd2RcH/toKsJ8Oj3ImfMFLlBmx6C8nzLVvIKCjmGpRV\nVISNo5nUrEn9/9vghSlWTSiV/jDm4ExPqn4am6xNyscjNo8aPiNo\n-----END RSA PRIVATE KEY-----\n","expirationDate":"2026-01-01T00:00:00Z","userId":"313768085817983016"}
|
||||
|
|
|
|||
Loading…
Reference in New Issue