rewrite yaml merge
This commit is contained in:
parent
de6ceed232
commit
15ecaf906d
|
|
@ -1,8 +1,11 @@
|
|||
---
|
||||
defaults:
|
||||
elasticsearch:
|
||||
version: 8.11.1
|
||||
kibana:
|
||||
version: 8.11.1
|
||||
logstash:
|
||||
version: 8.11.1
|
||||
S:
|
||||
elasticsearch:
|
||||
coordinator:
|
||||
|
|
@ -15,19 +18,16 @@ S:
|
|||
requests:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
storage: 1Gi
|
||||
storageClass: topolvm-provisioner
|
||||
data:
|
||||
replicas: 3
|
||||
replicas: 4
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 1
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
storage: 30Gi
|
||||
storageClass: topolvm-provisioner
|
||||
storage: 50Gi
|
||||
master:
|
||||
dedicated: true
|
||||
replicas: 3
|
||||
|
|
@ -38,16 +38,14 @@ S:
|
|||
requests:
|
||||
cpu: 500m
|
||||
memory: 2Gi
|
||||
storage: 1Gi
|
||||
storageClass: topolvm-provisioner
|
||||
kafka:
|
||||
replicas: 3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 750m
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
storage: 30Gi
|
||||
storageClass: topolvm-provisioner
|
||||
|
|
@ -59,11 +57,21 @@ S:
|
|||
retentionMs: "86400000"
|
||||
segmentBytes: "250000000"
|
||||
segmentMs: "3600000"
|
||||
pipelines:
|
||||
replicas: 2
|
||||
logging:
|
||||
name: simple
|
||||
workers: 3
|
||||
|
||||
logstash:
|
||||
ingest:
|
||||
replicas: 2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
pipelines:
|
||||
replicas: 2
|
||||
logging:
|
||||
workers: 3
|
||||
|
||||
M:
|
||||
elasticsearch:
|
||||
|
|
@ -80,15 +88,15 @@ M:
|
|||
storage: 1Gi
|
||||
storageClass: topolvm-provisioner
|
||||
data:
|
||||
replicas: 5
|
||||
replicas: 6
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
cpu: 4
|
||||
memory: 8Gi
|
||||
requests:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
storage: 50Gi
|
||||
cpu: 4
|
||||
memory: 8Gi
|
||||
storage: 500Gi
|
||||
storageClass: topolvm-provisioner
|
||||
master:
|
||||
dedicated: true
|
||||
|
|
@ -106,10 +114,10 @@ M:
|
|||
replicas: 3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 1
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
storage: 50Gi
|
||||
storageClass: topolvm-provisioner
|
||||
|
|
@ -121,8 +129,25 @@ M:
|
|||
retentionMs: "86400000"
|
||||
segmentBytes: "250000000"
|
||||
segmentMs: "3600000"
|
||||
pipelines:
|
||||
replicas: 3
|
||||
logging:
|
||||
name: simple
|
||||
workers: 3
|
||||
|
||||
logstash:
|
||||
ingest:
|
||||
replicas: 3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4
|
||||
memory: 8Gi
|
||||
requests:
|
||||
cpu: 4
|
||||
memory: 8Gi
|
||||
pipelines:
|
||||
replicas: 3
|
||||
logging:
|
||||
workers: 3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4
|
||||
memory: 8Gi
|
||||
requests:
|
||||
cpu: 4
|
||||
memory: 8Gi
|
||||
|
|
@ -1,6 +1,15 @@
|
|||
# set standard values for new elastic tenant helm values
|
||||
---
|
||||
elasticsearch:
|
||||
data:
|
||||
resources:
|
||||
requests:
|
||||
storageClass: topolvm-provisioner
|
||||
master:
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClass: topolvm-provisioner
|
||||
config:
|
||||
flavor: placeholder
|
||||
logging:
|
||||
|
|
@ -144,7 +153,7 @@ kafka:
|
|||
logging:
|
||||
enabled: true
|
||||
javaOpts:
|
||||
heap: -Xmx1g -Xms1g
|
||||
heap: -Xms -Xms
|
||||
replicas: 3
|
||||
# TODO: should be removed and replaced with "normal" kafka img when all tenants run `self-managed/kafka`
|
||||
setup:
|
||||
|
|
@ -249,13 +258,6 @@ logstash:
|
|||
nodeSelectors: null
|
||||
port: 8443
|
||||
replicas: 2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
pipeline:
|
||||
config:
|
||||
deadLetterQueue: false
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# tenant/commands/init.py
|
||||
import os
|
||||
import shutil
|
||||
from tenant.utils.common import get_secure_password, generate_key, generate_csr
|
||||
from tenant.utils.terraform import create_tfvars_file
|
||||
import tenant.utils.generate_secrets_file
|
||||
|
|
@ -27,71 +28,62 @@ def execute(args):
|
|||
ingress = input(
|
||||
"Please enter the FQDN of the Kibana ingress, without the 'kibana' prefix: "
|
||||
)
|
||||
tenant_size = input("Please enter the desired size of the tenant (S/M/L): ").upper()
|
||||
|
||||
target_directory = args.target
|
||||
|
||||
tenant_directory = os.path.join(target_directory, tenant_name)
|
||||
|
||||
# Check if the tenant directory already exists
|
||||
if os.path.exists(tenant_directory):
|
||||
print(
|
||||
f"Error: Tenant directory '{tenant_directory}' already exists. Init aborted."
|
||||
)
|
||||
return
|
||||
|
||||
# Prompt the user for the GitSync password securely
|
||||
git_sync_password = get_secure_password(
|
||||
prompt="Please insert predefined password for GitSync: "
|
||||
)
|
||||
|
||||
# define and create necessary folder structure
|
||||
terraform_directory = os.path.join(tenant_directory, "00-terraform")
|
||||
certificates_directory = os.path.join(tenant_directory, "01-certificates")
|
||||
kubernetes_directory = os.path.join(tenant_directory, "02-kubernetes")
|
||||
helm_directory = os.path.join(tenant_directory, "03-helm")
|
||||
|
||||
os.makedirs(certificates_directory)
|
||||
os.makedirs(terraform_directory)
|
||||
os.makedirs(kubernetes_directory)
|
||||
os.makedirs(helm_directory)
|
||||
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
|
||||
|
||||
# generate key and csr if not exist
|
||||
keyfile = os.path.join(certificates_directory, ingress + ".key")
|
||||
csrfile = os.path.join(certificates_directory, ingress + ".csr")
|
||||
|
||||
if os.path.exists(keyfile):
|
||||
print("Keyfile file already exists")
|
||||
print(keyfile)
|
||||
exit(1)
|
||||
else:
|
||||
generate_key(keyfile)
|
||||
generate_csr(csrfile, ingress)
|
||||
# Check if the tenant directory already exists
|
||||
if os.path.exists(tenant_directory):
|
||||
user_input = input(
|
||||
f"Attention: Tenant directory '{tenant_directory}' already exists. Do you want to continue? (y/n): "
|
||||
)
|
||||
if user_input.lower() == "y":
|
||||
user_input = input(
|
||||
"Should the directory be deleted? Otherwise we will use existing values to resize tenant. (y/n): "
|
||||
)
|
||||
if user_input.lower() != "y":
|
||||
if os.path.exists(values_file):
|
||||
template_values(
|
||||
tenant_name, tenant_size, "laas", ingress, values_file
|
||||
)
|
||||
print(f"templated new values file in '{values_file}'")
|
||||
exit(0)
|
||||
else:
|
||||
shutil.rmtree(tenant_directory)
|
||||
else:
|
||||
exit(1)
|
||||
|
||||
# Prompt the user for the GitSync password securely
|
||||
git_sync_password = get_secure_password(
|
||||
prompt="Please insert predefined password for GitSync: "
|
||||
)
|
||||
|
||||
user_confirmation = input(f"Do you want a CSR/Keyfile to be created (y/n): ")
|
||||
if user_confirmation == "y":
|
||||
if os.path.exists(keyfile):
|
||||
print("Keyfile file already exists")
|
||||
print(keyfile)
|
||||
exit(1)
|
||||
else:
|
||||
generate_key(keyfile)
|
||||
generate_csr(csrfile, ingress)
|
||||
|
||||
# Create symbolic links for *.tf files in tenant directory
|
||||
source_tf_dir = os.path.join(target_directory, "terraform")
|
||||
target_tf_dir = terraform_directory
|
||||
|
||||
# for filename in os.listdir(source_tf_dir):
|
||||
# if filename.endswith(".tf"):
|
||||
# source_path = os.path.join(source_tf_dir, filename)
|
||||
# target_path = os.path.join(target_tf_dir, filename)
|
||||
# # Ensure the source path is correct before creating the symbolic link
|
||||
# if os.path.exists(source_path):
|
||||
# relative_path = os.path.relpath(source_path, target_tf_dir)
|
||||
# os.symlink(relative_path, target_path)
|
||||
# else:
|
||||
# print(
|
||||
# f"Warning: Source file '{filename}' not found in '{source_tf_dir}'."
|
||||
# )
|
||||
|
||||
variables = {
|
||||
"tenant_name": tenant_name,
|
||||
}
|
||||
|
||||
tfvars_filepath = os.path.join(terraform_directory, tenant_name + ".tfvars")
|
||||
create_tfvars_file(variables, tfvars_filepath)
|
||||
|
||||
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
|
||||
if os.path.exists(values_file):
|
||||
print("Values file already exists")
|
||||
print(values_file)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from tenant.utils.common import get_secure_password
|
|||
import os
|
||||
from tenant.utils.template_values import template_values
|
||||
|
||||
|
||||
def add_subparser(subparsers):
|
||||
resize_parser = subparsers.add_parser(
|
||||
"resize", help="Resize resources for a tenant"
|
||||
|
|
@ -15,17 +16,16 @@ def add_subparser(subparsers):
|
|||
"--target", default=".", help="Target directory (default: current directory)"
|
||||
)
|
||||
|
||||
|
||||
def execute(args):
|
||||
ingress = input(
|
||||
"Please enter the FQDN of the Kibana ingress, without the 'kibana' prefix: "
|
||||
)
|
||||
target_directory = args.target
|
||||
tenant_name = args.tenant_name
|
||||
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
|
||||
new_size = args.new_size
|
||||
tenant_directory = os.path.join(target_directory, tenant_name)
|
||||
helm_directory = os.path.join(tenant_directory, "03-helm")
|
||||
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
|
||||
template_values(tenant_name, new_size, "laas", ingress, values_file)
|
||||
template_values(tenant_name, new_size, "laas", values_file)
|
||||
|
||||
# Use the shared function
|
||||
# shared_function()
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ def generate_secrets_file(secrets_file, git_sync_password):
|
|||
"sops",
|
||||
"-e",
|
||||
"--in-place",
|
||||
"--hc-vault-transit=https://vault.laas.cloud.itz.in.bund.de/v1/sops/key/laas-bn-infra",
|
||||
"--age=age1gpnes7752n47qutltpy0trtz0wvdgtuudluuxde6efjysmrw03sqlp34z4",
|
||||
secrets_file,
|
||||
],
|
||||
check=True,
|
||||
|
|
|
|||
|
|
@ -1,15 +1,42 @@
|
|||
import ruamel.yaml
|
||||
import os
|
||||
import sys
|
||||
from tenant.utils.common import calculate_java_settings
|
||||
from OpenSSL import crypto
|
||||
|
||||
TYPE_RSA = crypto.TYPE_RSA
|
||||
TYPE_DSA = crypto.TYPE_DSA
|
||||
key = crypto.PKey()
|
||||
|
||||
yaml = ruamel.yaml.YAML()
|
||||
|
||||
|
||||
def merge_data(original_data, additional_data):
|
||||
merged_data = original_data.copy()
|
||||
|
||||
for key, value in additional_data.items():
|
||||
if (
|
||||
key in merged_data
|
||||
and isinstance(merged_data[key], dict)
|
||||
and isinstance(value, dict)
|
||||
):
|
||||
# Recursively merge dictionaries
|
||||
merged_data[key] = merge_data(merged_data[key], value)
|
||||
elif (
|
||||
key in merged_data
|
||||
and isinstance(merged_data[key], list)
|
||||
and isinstance(value, list)
|
||||
):
|
||||
# Extend lists
|
||||
if key == "pipelines":
|
||||
# Handle the "pipelines" key to ensure uniqueness
|
||||
for item in value:
|
||||
if item not in merged_data[key]:
|
||||
merged_data[key].append(item)
|
||||
else:
|
||||
merged_data[key].extend(value)
|
||||
else:
|
||||
# Overwrite or add the key-value pair
|
||||
merged_data[key] = value
|
||||
|
||||
return merged_data
|
||||
|
||||
|
||||
def recursively_sort_dict(input_dict):
|
||||
sorted_dict = dict(sorted(input_dict.items()))
|
||||
for key, value in sorted_dict.items():
|
||||
|
|
@ -30,8 +57,22 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
|
|||
if flavor not in ["laas", "daas"]:
|
||||
print("Invalid flavor")
|
||||
else:
|
||||
# map "selected_values" to "mapping" in reference to the tenant size
|
||||
mapped_values = mapping[tenant_size]
|
||||
# merge "kafka" entries
|
||||
existing_values["kafka"] = merge_data(
|
||||
existing_values.get("kafka", {}), mapped_values.get("kafka", {})
|
||||
)
|
||||
# merge "elasticsearch" entries
|
||||
existing_values["elasticsearch"] = merge_data(
|
||||
existing_values.get("elasticsearch", {}),
|
||||
mapped_values.get("elasticsearch", {}),
|
||||
)
|
||||
|
||||
# Merge the "logstash" entries
|
||||
existing_values["logstash"] = merge_data(
|
||||
existing_values.get("logstash", {}),
|
||||
mapped_values.get("logstash", {}),
|
||||
)
|
||||
|
||||
# setting default values from defaults block
|
||||
existing_values["oauthProxy"]["clientId"] = tenant_name + "-logging-client"
|
||||
|
|
@ -42,18 +83,22 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
|
|||
"elasticsearch"
|
||||
]["version"]
|
||||
|
||||
existing_values["kafka"]["zookeeper"]["javaOpts"] = calculate_java_settings(
|
||||
existing_values["kafka"]["zookeeper"]["resources"]["requests"]["memory"]
|
||||
)
|
||||
|
||||
# configure resource sizing of elasticsearch components
|
||||
existing_values["elasticsearch"]["config"]["flavor"] = flavor
|
||||
|
||||
existing_values["elasticsearch"]["data"] = mapped_values["elasticsearch"][
|
||||
"data"
|
||||
]
|
||||
existing_values["elasticsearch"]["coordinator"] = mapped_values[
|
||||
"elasticsearch"
|
||||
]["coordinator"]
|
||||
existing_values["elasticsearch"]["master"] = mapped_values["elasticsearch"][
|
||||
"master"
|
||||
]
|
||||
# existing_values["elasticsearch"]["data"] = mapped_values["elasticsearch"][
|
||||
# "data"
|
||||
# ]
|
||||
# existing_values["elasticsearch"]["coordinator"] = mapped_values[
|
||||
# "elasticsearch"
|
||||
# ]["coordinator"]
|
||||
# existing_values["elasticsearch"]["master"] = mapped_values["elasticsearch"][
|
||||
# "master"
|
||||
# ]
|
||||
# configure kibana
|
||||
existing_values["kibana"]["image"]["version"] = mapping["defaults"][
|
||||
"kibana"
|
||||
|
|
@ -61,10 +106,15 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
|
|||
|
||||
# configure resource sizing of kafka components
|
||||
existing_values["kafka"]["resources"] = mapped_values["kafka"]["resources"]
|
||||
|
||||
existing_values["kafka"]["javaOpts"]["heap"] = calculate_java_settings(
|
||||
mapped_values["kafka"]["resources"]["requests"]["memory"]
|
||||
)
|
||||
|
||||
existing_values["logstash"]["image"]["version"] = mapping["defaults"][
|
||||
"logstash"
|
||||
]["version"]
|
||||
|
||||
# explicitly set ingress domain
|
||||
existing_values["ingress"]["domain"] = ingress
|
||||
|
||||
|
|
@ -80,17 +130,21 @@ def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
|
|||
"bn-" + tenant_name + "-pki-INT-cert-signers"
|
||||
)
|
||||
|
||||
existing_values["logstash"]["pipeline"]["pipelines"][0][
|
||||
"workers"
|
||||
] = mapped_values["pipelines"]["logging"]["workers"]
|
||||
# existing_values["logstash"]["pipeline"]["pipelines"][0][
|
||||
# "workers"
|
||||
# ] = mapped_values["logstash"]["pipelines"]["logging"]["workers"]
|
||||
|
||||
existing_values["logstash"]["pipeline"]["replicas"] = mapped_values[
|
||||
"pipelines"
|
||||
]["replicas"]
|
||||
# existing_values["logstash"]["pipeline"]["replicas"] = mapped_values[
|
||||
# "logstash"
|
||||
# ]["pipelines"]["replicas"]
|
||||
|
||||
existing_values["logstash"]["ingest"]["javaOpts"] = calculate_java_settings(
|
||||
existing_values["logstash"]["ingest"]["resources"]["requests"]["memory"]
|
||||
)
|
||||
|
||||
existing_values["kafka"]["topics"]["logging"]["partitions"] = (
|
||||
mapped_values["pipelines"]["replicas"]
|
||||
* mapped_values["pipelines"]["logging"]["workers"]
|
||||
existing_values["logstash"]["pipelines"]["replicas"]
|
||||
* existing_values["logstash"]["pipelines"]["logging"]["workers"]
|
||||
)
|
||||
|
||||
# order the values in the "existing_values" dictionary alphabetically
|
||||
|
|
|
|||
Loading…
Reference in New Issue