rewrite yaml merge

This commit is contained in:
Karsten Gorskowski 2023-12-06 23:39:12 +01:00
parent de6ceed232
commit 15ecaf906d
6 changed files with 184 additions and 111 deletions

View File

@ -1,8 +1,11 @@
---
defaults: defaults:
elasticsearch: elasticsearch:
version: 8.11.1 version: 8.11.1
kibana: kibana:
version: 8.11.1 version: 8.11.1
logstash:
version: 8.11.1
S: S:
elasticsearch: elasticsearch:
coordinator: coordinator:
@ -15,19 +18,16 @@ S:
requests: requests:
cpu: 1 cpu: 1
memory: 2Gi memory: 2Gi
storage: 1Gi
storageClass: topolvm-provisioner
data: data:
replicas: 3 replicas: 4
resources: resources:
limits: limits:
cpu: 1 cpu: 2
memory: 4Gi memory: 4Gi
requests: requests:
cpu: 1 cpu: 2
memory: 4Gi memory: 4Gi
storage: 30Gi storage: 50Gi
storageClass: topolvm-provisioner
master: master:
dedicated: true dedicated: true
replicas: 3 replicas: 3
@ -38,16 +38,14 @@ S:
requests: requests:
cpu: 500m cpu: 500m
memory: 2Gi memory: 2Gi
storage: 1Gi
storageClass: topolvm-provisioner
kafka: kafka:
replicas: 3 replicas: 3
resources: resources:
limits: limits:
cpu: 750m cpu: 1
memory: 2Gi memory: 2Gi
requests: requests:
cpu: 750m cpu: 1
memory: 2Gi memory: 2Gi
storage: 30Gi storage: 30Gi
storageClass: topolvm-provisioner storageClass: topolvm-provisioner
@ -59,11 +57,21 @@ S:
retentionMs: "86400000" retentionMs: "86400000"
segmentBytes: "250000000" segmentBytes: "250000000"
segmentMs: "3600000" segmentMs: "3600000"
pipelines:
replicas: 2 logstash:
logging: ingest:
name: simple replicas: 2
workers: 3 resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
pipelines:
replicas: 2
logging:
workers: 3
M: M:
elasticsearch: elasticsearch:
@ -80,15 +88,15 @@ M:
storage: 1Gi storage: 1Gi
storageClass: topolvm-provisioner storageClass: topolvm-provisioner
data: data:
replicas: 5 replicas: 6
resources: resources:
limits: limits:
cpu: 2 cpu: 4
memory: 4Gi memory: 8Gi
requests: requests:
cpu: 2 cpu: 4
memory: 4Gi memory: 8Gi
storage: 50Gi storage: 500Gi
storageClass: topolvm-provisioner storageClass: topolvm-provisioner
master: master:
dedicated: true dedicated: true
@ -106,10 +114,10 @@ M:
replicas: 3 replicas: 3
resources: resources:
limits: limits:
cpu: 1 cpu: 2
memory: 4Gi memory: 4Gi
requests: requests:
cpu: 1 cpu: 2
memory: 4Gi memory: 4Gi
storage: 50Gi storage: 50Gi
storageClass: topolvm-provisioner storageClass: topolvm-provisioner
@ -121,8 +129,25 @@ M:
retentionMs: "86400000" retentionMs: "86400000"
segmentBytes: "250000000" segmentBytes: "250000000"
segmentMs: "3600000" segmentMs: "3600000"
pipelines:
replicas: 3 logstash:
logging: ingest:
name: simple replicas: 3
workers: 3 resources:
limits:
cpu: 4
memory: 8Gi
requests:
cpu: 4
memory: 8Gi
pipelines:
replicas: 3
logging:
workers: 3
resources:
limits:
cpu: 4
memory: 8Gi
requests:
cpu: 4
memory: 8Gi

View File

@ -1,6 +1,15 @@
# set standard values for new elastic tenant helm values # set standard values for new elastic tenant helm values
--- ---
elasticsearch: elasticsearch:
data:
resources:
requests:
storageClass: topolvm-provisioner
master:
resources:
requests:
storage: 1Gi
storageClass: topolvm-provisioner
config: config:
flavor: placeholder flavor: placeholder
logging: logging:
@ -144,7 +153,7 @@ kafka:
logging: logging:
enabled: true enabled: true
javaOpts: javaOpts:
heap: -Xmx1g -Xms1g heap: -Xms -Xms
replicas: 3 replicas: 3
# TODO: should be removed and replaced with "normal" kafka img when all tenants run `self-managed/kafka` # TODO: should be removed and replaced with "normal" kafka img when all tenants run `self-managed/kafka`
setup: setup:
@ -249,13 +258,6 @@ logstash:
nodeSelectors: null nodeSelectors: null
port: 8443 port: 8443
replicas: 2 replicas: 2
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
pipeline: pipeline:
config: config:
deadLetterQueue: false deadLetterQueue: false

View File

@ -1,5 +1,6 @@
# tenant/commands/init.py # tenant/commands/init.py
import os import os
import shutil
from tenant.utils.common import get_secure_password, generate_key, generate_csr from tenant.utils.common import get_secure_password, generate_key, generate_csr
from tenant.utils.terraform import create_tfvars_file from tenant.utils.terraform import create_tfvars_file
import tenant.utils.generate_secrets_file import tenant.utils.generate_secrets_file
@ -27,71 +28,62 @@ def execute(args):
ingress = input( ingress = input(
"Please enter the FQDN of the Kibana ingress, without the 'kibana' prefix: " "Please enter the FQDN of the Kibana ingress, without the 'kibana' prefix: "
) )
tenant_size = input("Please enter the desired size of the tenant (S/M/L): ").upper()
target_directory = args.target target_directory = args.target
tenant_directory = os.path.join(target_directory, tenant_name) tenant_directory = os.path.join(target_directory, tenant_name)
# Check if the tenant directory already exists
if os.path.exists(tenant_directory):
print(
f"Error: Tenant directory '{tenant_directory}' already exists. Init aborted."
)
return
# Prompt the user for the GitSync password securely
git_sync_password = get_secure_password(
prompt="Please insert predefined password for GitSync: "
)
# define and create necessary folder structure # define and create necessary folder structure
terraform_directory = os.path.join(tenant_directory, "00-terraform") terraform_directory = os.path.join(tenant_directory, "00-terraform")
certificates_directory = os.path.join(tenant_directory, "01-certificates") certificates_directory = os.path.join(tenant_directory, "01-certificates")
kubernetes_directory = os.path.join(tenant_directory, "02-kubernetes") kubernetes_directory = os.path.join(tenant_directory, "02-kubernetes")
helm_directory = os.path.join(tenant_directory, "03-helm") helm_directory = os.path.join(tenant_directory, "03-helm")
os.makedirs(certificates_directory) values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
os.makedirs(terraform_directory)
os.makedirs(kubernetes_directory)
os.makedirs(helm_directory)
# generate key and csr if not exist # generate key and csr if not exist
keyfile = os.path.join(certificates_directory, ingress + ".key") keyfile = os.path.join(certificates_directory, ingress + ".key")
csrfile = os.path.join(certificates_directory, ingress + ".csr") csrfile = os.path.join(certificates_directory, ingress + ".csr")
if os.path.exists(keyfile): # Check if the tenant directory already exists
print("Keyfile file already exists") if os.path.exists(tenant_directory):
print(keyfile) user_input = input(
exit(1) f"Attention: Tenant directory '{tenant_directory}' already exists. Do you want to continue? (y/n): "
else: )
generate_key(keyfile) if user_input.lower() == "y":
generate_csr(csrfile, ingress) user_input = input(
"Should the directory be deleted? Otherwise we will use existing values to resize tenant. (y/n): "
)
if user_input.lower() != "y":
if os.path.exists(values_file):
template_values(
tenant_name, tenant_size, "laas", ingress, values_file
)
print(f"templated new values file in '{values_file}'")
exit(0)
else:
shutil.rmtree(tenant_directory)
else:
exit(1)
# Prompt the user for the GitSync password securely
git_sync_password = get_secure_password(
prompt="Please insert predefined password for GitSync: "
)
user_confirmation = input(f"Do you want a CSR/Keyfile to be created (y/n): ")
if user_confirmation == "y":
if os.path.exists(keyfile):
print("Keyfile file already exists")
print(keyfile)
exit(1)
else:
generate_key(keyfile)
generate_csr(csrfile, ingress)
# Create symbolic links for *.tf files in tenant directory # Create symbolic links for *.tf files in tenant directory
source_tf_dir = os.path.join(target_directory, "terraform") source_tf_dir = os.path.join(target_directory, "terraform")
target_tf_dir = terraform_directory target_tf_dir = terraform_directory
# for filename in os.listdir(source_tf_dir):
# if filename.endswith(".tf"):
# source_path = os.path.join(source_tf_dir, filename)
# target_path = os.path.join(target_tf_dir, filename)
# # Ensure the source path is correct before creating the symbolic link
# if os.path.exists(source_path):
# relative_path = os.path.relpath(source_path, target_tf_dir)
# os.symlink(relative_path, target_path)
# else:
# print(
# f"Warning: Source file '{filename}' not found in '{source_tf_dir}'."
# )
variables = {
"tenant_name": tenant_name,
}
tfvars_filepath = os.path.join(terraform_directory, tenant_name + ".tfvars")
create_tfvars_file(variables, tfvars_filepath)
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
if os.path.exists(values_file): if os.path.exists(values_file):
print("Values file already exists") print("Values file already exists")
print(values_file) print(values_file)

View File

@ -4,6 +4,7 @@ from tenant.utils.common import get_secure_password
import os import os
from tenant.utils.template_values import template_values from tenant.utils.template_values import template_values
def add_subparser(subparsers): def add_subparser(subparsers):
resize_parser = subparsers.add_parser( resize_parser = subparsers.add_parser(
"resize", help="Resize resources for a tenant" "resize", help="Resize resources for a tenant"
@ -15,17 +16,16 @@ def add_subparser(subparsers):
"--target", default=".", help="Target directory (default: current directory)" "--target", default=".", help="Target directory (default: current directory)"
) )
def execute(args): def execute(args):
ingress = input(
"Please enter the FQDN of the Kibana ingress, without the 'kibana' prefix: "
)
target_directory = args.target target_directory = args.target
tenant_name = args.tenant_name tenant_name = args.tenant_name
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
new_size = args.new_size new_size = args.new_size
tenant_directory = os.path.join(target_directory, tenant_name) tenant_directory = os.path.join(target_directory, tenant_name)
helm_directory = os.path.join(tenant_directory, "03-helm") helm_directory = os.path.join(tenant_directory, "03-helm")
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml") values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
template_values(tenant_name, new_size, "laas", ingress, values_file) template_values(tenant_name, new_size, "laas", values_file)
# Use the shared function # Use the shared function
# shared_function() # shared_function()

View File

@ -72,7 +72,7 @@ def generate_secrets_file(secrets_file, git_sync_password):
"sops", "sops",
"-e", "-e",
"--in-place", "--in-place",
"--hc-vault-transit=https://vault.laas.cloud.itz.in.bund.de/v1/sops/key/laas-bn-infra", "--age=age1gpnes7752n47qutltpy0trtz0wvdgtuudluuxde6efjysmrw03sqlp34z4",
secrets_file, secrets_file,
], ],
check=True, check=True,

View File

@ -1,15 +1,42 @@
import ruamel.yaml import ruamel.yaml
import os import os
import sys
from tenant.utils.common import calculate_java_settings from tenant.utils.common import calculate_java_settings
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA
TYPE_DSA = crypto.TYPE_DSA
key = crypto.PKey()
yaml = ruamel.yaml.YAML() yaml = ruamel.yaml.YAML()
def merge_data(original_data, additional_data):
merged_data = original_data.copy()
for key, value in additional_data.items():
if (
key in merged_data
and isinstance(merged_data[key], dict)
and isinstance(value, dict)
):
# Recursively merge dictionaries
merged_data[key] = merge_data(merged_data[key], value)
elif (
key in merged_data
and isinstance(merged_data[key], list)
and isinstance(value, list)
):
# Extend lists
if key == "pipelines":
# Handle the "pipelines" key to ensure uniqueness
for item in value:
if item not in merged_data[key]:
merged_data[key].append(item)
else:
merged_data[key].extend(value)
else:
# Overwrite or add the key-value pair
merged_data[key] = value
return merged_data
def recursively_sort_dict(input_dict): def recursively_sort_dict(input_dict):
sorted_dict = dict(sorted(input_dict.items())) sorted_dict = dict(sorted(input_dict.items()))
for key, value in sorted_dict.items(): for key, value in sorted_dict.items():
@ -30,8 +57,22 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
if flavor not in ["laas", "daas"]: if flavor not in ["laas", "daas"]:
print("Invalid flavor") print("Invalid flavor")
else: else:
# map "selected_values" to "mapping" in reference to the tenant size
mapped_values = mapping[tenant_size] mapped_values = mapping[tenant_size]
# merge "kafka" entries
existing_values["kafka"] = merge_data(
existing_values.get("kafka", {}), mapped_values.get("kafka", {})
)
# merge "elasticsearch" entries
existing_values["elasticsearch"] = merge_data(
existing_values.get("elasticsearch", {}),
mapped_values.get("elasticsearch", {}),
)
# Merge the "logstash" entries
existing_values["logstash"] = merge_data(
existing_values.get("logstash", {}),
mapped_values.get("logstash", {}),
)
# setting default values from defaults block # setting default values from defaults block
existing_values["oauthProxy"]["clientId"] = tenant_name + "-logging-client" existing_values["oauthProxy"]["clientId"] = tenant_name + "-logging-client"
@ -42,18 +83,22 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
"elasticsearch" "elasticsearch"
]["version"] ]["version"]
existing_values["kafka"]["zookeeper"]["javaOpts"] = calculate_java_settings(
existing_values["kafka"]["zookeeper"]["resources"]["requests"]["memory"]
)
# configure resource sizing of elasticsearch components # configure resource sizing of elasticsearch components
existing_values["elasticsearch"]["config"]["flavor"] = flavor existing_values["elasticsearch"]["config"]["flavor"] = flavor
existing_values["elasticsearch"]["data"] = mapped_values["elasticsearch"][ # existing_values["elasticsearch"]["data"] = mapped_values["elasticsearch"][
"data" # "data"
] # ]
existing_values["elasticsearch"]["coordinator"] = mapped_values[ # existing_values["elasticsearch"]["coordinator"] = mapped_values[
"elasticsearch" # "elasticsearch"
]["coordinator"] # ]["coordinator"]
existing_values["elasticsearch"]["master"] = mapped_values["elasticsearch"][ # existing_values["elasticsearch"]["master"] = mapped_values["elasticsearch"][
"master" # "master"
] # ]
# configure kibana # configure kibana
existing_values["kibana"]["image"]["version"] = mapping["defaults"][ existing_values["kibana"]["image"]["version"] = mapping["defaults"][
"kibana" "kibana"
@ -61,10 +106,15 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
# configure resource sizing of kafka components # configure resource sizing of kafka components
existing_values["kafka"]["resources"] = mapped_values["kafka"]["resources"] existing_values["kafka"]["resources"] = mapped_values["kafka"]["resources"]
existing_values["kafka"]["javaOpts"]["heap"] = calculate_java_settings( existing_values["kafka"]["javaOpts"]["heap"] = calculate_java_settings(
mapped_values["kafka"]["resources"]["requests"]["memory"] mapped_values["kafka"]["resources"]["requests"]["memory"]
) )
existing_values["logstash"]["image"]["version"] = mapping["defaults"][
"logstash"
]["version"]
# explicitly set ingress domain # explicitly set ingress domain
existing_values["ingress"]["domain"] = ingress existing_values["ingress"]["domain"] = ingress
@ -80,17 +130,21 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
"bn-" + tenant_name + "-pki-INT-cert-signers" "bn-" + tenant_name + "-pki-INT-cert-signers"
) )
existing_values["logstash"]["pipeline"]["pipelines"][0][ # existing_values["logstash"]["pipeline"]["pipelines"][0][
"workers" # "workers"
] = mapped_values["pipelines"]["logging"]["workers"] # ] = mapped_values["logstash"]["pipelines"]["logging"]["workers"]
existing_values["logstash"]["pipeline"]["replicas"] = mapped_values[ # existing_values["logstash"]["pipeline"]["replicas"] = mapped_values[
"pipelines" # "logstash"
]["replicas"] # ]["pipelines"]["replicas"]
existing_values["logstash"]["ingest"]["javaOpts"] = calculate_java_settings(
existing_values["logstash"]["ingest"]["resources"]["requests"]["memory"]
)
existing_values["kafka"]["topics"]["logging"]["partitions"] = ( existing_values["kafka"]["topics"]["logging"]["partitions"] = (
mapped_values["pipelines"]["replicas"] existing_values["logstash"]["pipelines"]["replicas"]
* mapped_values["pipelines"]["logging"]["workers"] * existing_values["logstash"]["pipelines"]["logging"]["workers"]
) )
# order the values in the "existing_values" dictionary alphabetically # order the values in the "existing_values" dictionary alphabetically