Compare commits

...

3 Commits

Author SHA1 Message Date
Karsten Gorskowski 15ecaf906d rewrite yaml merge 2023-12-06 23:39:12 +01:00
Karsten Gorskowski de6ceed232 enable logging per default 2023-12-04 16:44:05 +01:00
Karsten Gorskowski 4891693218 rewrite 2023-12-04 15:30:36 +01:00
7 changed files with 702 additions and 46 deletions

153
helm/mapping-values.yaml Normal file
View File

@ -0,0 +1,153 @@
---
defaults:
elasticsearch:
version: 8.11.1
kibana:
version: 8.11.1
logstash:
version: 8.11.1
S:
elasticsearch:
coordinator:
dedicated: false
replicas: 0
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 2Gi
data:
replicas: 4
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
storage: 50Gi
master:
dedicated: true
replicas: 3
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
kafka:
replicas: 3
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 2Gi
storage: 30Gi
storageClass: topolvm-provisioner
topics:
logging:
partitions: 3
replication: 2
retentionBytes: "1000000000"
retentionMs: "86400000"
segmentBytes: "250000000"
segmentMs: "3600000"
logstash:
ingest:
replicas: 2
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
pipelines:
replicas: 2
logging:
workers: 3
M:
elasticsearch:
coordinator:
dedicated: true
replicas: 3
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 2Gi
storage: 1Gi
storageClass: topolvm-provisioner
data:
replicas: 6
resources:
limits:
cpu: 4
memory: 8Gi
requests:
cpu: 4
memory: 8Gi
storage: 500Gi
storageClass: topolvm-provisioner
master:
dedicated: true
replicas: 3
resources:
limits:
cpu: 1
memory: 4Gi
requests:
cpu: 1
memory: 4Gi
storage: 1Gi
storageClass: topolvm-provisioner
kafka:
replicas: 3
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
storage: 50Gi
storageClass: topolvm-provisioner
topics:
logging:
partitions: 3
replication: 2
retentionBytes: "1000000000"
retentionMs: "86400000"
segmentBytes: "250000000"
segmentMs: "3600000"
logstash:
ingest:
replicas: 3
resources:
limits:
cpu: 4
memory: 8Gi
requests:
cpu: 4
memory: 8Gi
pipelines:
replicas: 3
logging:
workers: 3
resources:
limits:
cpu: 4
memory: 8Gi
requests:
cpu: 4
memory: 8Gi

318
helm/template-values.yaml Normal file
View File

@ -0,0 +1,318 @@
# set standard values for new elastic tenant helm values
---
elasticsearch:
data:
resources:
requests:
storageClass: topolvm-provisioner
master:
resources:
requests:
storage: 1Gi
storageClass: topolvm-provisioner
config:
flavor: placeholder
logging:
enabled: true
rbac:
customUsers:
logstash_writer:
email:
enabled: true
roles:
- logstash_admin_user
- logstash_internal
userRoles:
elastic-superuser:
applications:
- application: "*"
privileges:
- "*"
resources:
- "*"
cluster:
- all
indices:
- allow_restricted_indices: true
names: "*"
privileges:
- all
run_as:
- "*"
logstash_writer:
applications: [ ]
cluster:
- manage_index_templates
- monitor
- manage_ilm
indices:
- field_security:
grant:
- "*"
names:
- pn-*
- pits-*
- test-*
- dead-letter-*
- customer-*
privileges:
- write
- create
- create_index
- manage
- manage_ilm
metadata: { }
run_as: [ ]
transient_metadata:
enabled: true
prometheus_reader:
applications: [ ]
cluster:
- cluster:monitor/prometheus/metrics
- cluster:monitor/health
- cluster:monitor/nodes/stats
- cluster:monitor/state
indices:
- names: "*"
privileges:
- monitor
xpack:
license:
self_generated_type: basic
security:
accessAgreement: |
Sie sind dabei, auf potentiell durch die DSGVO geschützte Daten
zuzugreifen.
Durch Einloggen in dieses System bestätigen Sie, Kenntnis genommen
zu haben, dass ihre Nutzung zwecks Auditierung protokolliert wird.
Sie sind dem Datengeheimnis iSd Art. 29 DSGVO verpflichtet.
Unautorisierte Nutzung des Systems ist strikt untersagt und kann
rechtliche Konsequenzen zur Folge haben.
Vor einer dienstlich notwendigen Übermittlung von Log-Daten mit
personenbezogenen Daten ist eine (Pseudo-)Anonymisierung
durchzuführen. Hierzu ist eine Datenschutzfolgeabschätzung beim
Datenschutzbeauftragten ITZ vorzulegen.
Für nicht anonymisierte Daten ist ein solcher Versand nur zulässig,
wenn mit dem Hersteller eine Auftragsdatenvereinbarung nach Art. 28
DSGVO oder § 62 BDSG, bei Auslandsbezug gem. Art. 44 - 46 DSGVO
und/oder § 79 - 83 BDSG, abgeschlossen wurde. Zuwiderhandlung stellt
regelmäßig einen Datenschutzverletzung dar.
audit:
enabled: false
ilmPolicies:
logging-std-7d:
policy:
phases:
delete:
actions:
delete:
delete_searchable_snapshot: true
min_age: 7d
hot:
actions: { }
min_age: 0ms
image:
version: placeholder
indexTemplates:
logging:
index_patterns:
- logging-*
template:
settings:
index:
lifecycle:
name: logging-std-7d
number_of_replicas: 1
number_of_shards: 5
unassigned:
node_left:
delayed_timeout: 15m
ml:
dedicated: false
epr:
enabled: false
ingress:
elasticsearch:
enabled: false
inspect: false
epr:
enabled: false
kibana:
enabled: true
inspect: false
kowl:
enabled: false
logstash:
inspect: false
kafka:
config:
logging:
enabled: true
javaOpts:
heap: -Xms -Xms
replicas: 3
# TODO: should be removed and replaced with "normal" kafka img when all tenants run `self-managed/kafka`
setup:
image:
name: laas-craas.bcsv.cloud.itzbund.net/laas/self-managed/kafka
version: 7.4.0-1
topics:
# TODO: 1) retention time of 7d is sufficient for §100/§5 tenants
# TODO: 2) decide whether topics should be created here or in Logstash
logging:
partitions: 3
replication: 2
# NOTICE: max capacity: repl * part * rBytes + influx(gcFreq = 5min)
retentionBytes: "1400000000"
retentionMs: "86400000"
segmentBytes: "500000000"
segmentMs: "3600000"
zookeeper:
config:
logging:
enabled: true
javaOpts:
heapSize: 500m
replicas: 3
resources:
limits:
cpu: 300m
memory: 1000Mi
requests:
cpu: 300m
memory: 1000Mi
storage: 1Gi
storageClass: topolvm-provisioner
kibana:
apiVersion: post710
config:
logging:
enabled: true
monitoring:
enabled: true
rbac:
userRoles:
kibana-user:
elasticsearch:
cluster: [ ]
indices:
- allow_restricted_indices: false
names:
- logging-*
privileges:
- read
- monitor
- view_index_metadata
run_as: [ ]
kibana:
- base: [ ]
feature:
dashboard:
- all
discover:
- all
savedObjectsManagement:
- all
savedObjectsTagging:
- all
visualize:
- all
xpack:
security:
cookieName: sid
enabled: true
image:
version: placeholder
replicas: 1
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
logstash:
config:
logging:
enabled: true
gitSync:
# TODO: use either branch name for floating versions,
# TODO: tag/commit as fixed version equivalents
branch: itzb/release
username: tenant-reader
image:
version: 8.7.1
ingest:
codec: json
enabled: true
hostNetwork: false
inProto: http
javaOpts:
heap: -Xms2000m -Xmx2000m
nodePort: null
nodeSelectors: null
port: 8443
replicas: 2
pipeline:
config:
deadLetterQueue: false
enabled: true
javaOpts:
heap: -Xms2000m -Xmx2000m
memcached:
enabled: false
pipelines:
- customer: laas
id: simple
injections:
inputThreads: 3
name: simple
workers: 3
pseudonymize: false
replicas: 3
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
syncPipelines: false
oauthProxy:
allowedGroups: [ ]
emailClaim: email
groupsClaim: groups
image:
name: laas-craas.bcsv.cloud.itzbund.net/laas/self-managed/oauth2-proxy
version: v7.4.0
issuerUrl:
host: idp.laas.cloud.itz.in.bund.de
request_logging: true
scope:
- email
- groups
- openid
- profile
tls:
externalCertificates:
kibana:
tls_crt: |
-----BEGIN CERTIFICATE-----
# Insert Certificate here when obtained from DTRust
-----END CERTIFICATE-----
tls_key: placeholderOverwriteFromSecret
issuer:
auth:
path: placeholder
role: placeholder
name: placeholder
secret:
role: placeholder
serviceAccount: default
organizations:
- ITZB

View File

@ -1,9 +1,10 @@
# tenant/commands/init.py
import os
import shutil
from tenant.utils.common import get_secure_password, generate_key, generate_csr
from tenant.utils.terraform import create_tfvars_file
import tenant.utils.generatecsr
import tenant.utils.generate_secrets_file
from tenant.utils.template_values import template_values
def add_subparser(subparsers):
@ -27,69 +28,67 @@ def execute(args):
ingress = input(
"Please enter the FQDN of the Kibana ingress, without the 'kibana' prefix: "
)
tenant_size = input("Please enter the desired size of the tenant (S/M/L): ").upper()
target_directory = args.target
tenant_directory = os.path.join(target_directory, tenant_name)
# Check if the tenant directory already exists
if os.path.exists(tenant_directory):
print(
f"Error: Tenant directory '{tenant_directory}' already exists. Init aborted."
)
return
# Prompt the user for the GitSync password securely
git_sync_password = get_secure_password(
prompt="Please insert predefined password for GitSync: "
)
# define and create necessary folder structure
terraform_directory = os.path.join(tenant_directory, "00-terraform")
certificates_directory = os.path.join(tenant_directory, "01-certificates")
kubernetes_directory = os.path.join(tenant_directory, "02-kubernetes")
helm_directory = os.path.join(tenant_directory, "03-helm")
os.makedirs(certificates_directory)
os.makedirs(terraform_directory)
os.makedirs(kubernetes_directory)
os.makedirs(helm_directory)
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
# generate key and csr if not exist
keyfile = os.path.join(certificates_directory, ingress + ".key")
csrfile = os.path.join(certificates_directory, ingress + ".csr")
if os.path.exists(keyfile):
print("Keyfile file already exists")
print(keyfile)
exit(1)
else:
generate_key(keyfile)
generate_csr(csrfile, ingress)
# Check if the tenant directory already exists
if os.path.exists(tenant_directory):
user_input = input(
f"Attention: Tenant directory '{tenant_directory}' already exists. Do you want to continue? (y/n): "
)
if user_input.lower() == "y":
user_input = input(
"Should the directory be deleted? Otherwise we will use existing values to resize tenant. (y/n): "
)
if user_input.lower() != "y":
if os.path.exists(values_file):
template_values(
tenant_name, tenant_size, "laas", ingress, values_file
)
print(f"templated new values file in '{values_file}'")
exit(0)
else:
shutil.rmtree(tenant_directory)
else:
exit(1)
# Prompt the user for the GitSync password securely
git_sync_password = get_secure_password(
prompt="Please insert predefined password for GitSync: "
)
user_confirmation = input(f"Do you want a CSR/Keyfile to be created (y/n): ")
if user_confirmation == "y":
if os.path.exists(keyfile):
print("Keyfile file already exists")
print(keyfile)
exit(1)
else:
generate_key(keyfile)
generate_csr(csrfile, ingress)
# Create symbolic links for *.tf files in tenant directory
source_tf_dir = os.path.join(target_directory, "terraform")
target_tf_dir = terraform_directory
for filename in os.listdir(source_tf_dir):
if filename.endswith(".tf"):
source_path = os.path.join(source_tf_dir, filename)
target_path = os.path.join(target_tf_dir, filename)
# Ensure the source path is correct before creating the symbolic link
if os.path.exists(source_path):
relative_path = os.path.relpath(source_path, target_tf_dir)
os.symlink(relative_path, target_path)
else:
print(
f"Warning: Source file '{filename}' not found in '{source_tf_dir}'."
)
variables = {
"tenant_name": tenant_name,
}
tfvars_filepath = os.path.join(terraform_directory, tenant_name + ".tfvars")
create_tfvars_file(variables, tfvars_filepath)
if os.path.exists(values_file):
print("Values file already exists")
print(values_file)
else:
template_values(tenant_name, "S", "laas", ingress, values_file)
# generate secrets file if not already exist, not yet encrypted on the fly
secrets_file = os.path.join(helm_directory, tenant_name + ".secrets.yaml")

View File

@ -1,18 +1,31 @@
# tenant/commands/resize.py
from tenant.utils.common import get_secure_password
import os
from tenant.utils.template_values import template_values
def add_subparser(subparsers):
resize_parser = subparsers.add_parser(
"resize", help="Resize resources for a tenant"
)
resize_parser.add_argument("tenant_name", help="Name of the tenant")
resize_parser.add_argument("--new-size", type=int, help="New size for resources")
resize_parser.add_argument("--new-size", help="New size for resources")
resize_parser.add_argument(
"--target", default=".", help="Target directory (default: current directory)"
)
def execute(args):
target_directory = args.target
tenant_name = args.tenant_name
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
new_size = args.new_size
tenant_directory = os.path.join(target_directory, tenant_name)
helm_directory = os.path.join(tenant_directory, "03-helm")
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
template_values(tenant_name, new_size, "laas", values_file)
# Use the shared function
# shared_function()

View File

@ -1,5 +1,6 @@
# tenant/utils/common.py
import getpass
import re
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA

View File

@ -2,6 +2,7 @@ import random
import os
import ruamel.yaml
import string
import subprocess
yaml = ruamel.yaml.YAML()
@ -65,3 +66,16 @@ def generate_secrets_file(secrets_file, git_sync_password):
},
file,
)
try:
subprocess.run(
[
"sops",
"-e",
"--in-place",
"--age=age1gpnes7752n47qutltpy0trtz0wvdgtuudluuxde6efjysmrw03sqlp34z4",
secrets_file,
],
check=True,
)
except subprocess.CalledProcessError as e:
print(f"Error: {e.returncode}\n{e.stderr}")

View File

@ -0,0 +1,158 @@
import ruamel.yaml
import os
import sys
from tenant.utils.common import calculate_java_settings
yaml = ruamel.yaml.YAML()
def merge_data(original_data, additional_data):
merged_data = original_data.copy()
for key, value in additional_data.items():
if (
key in merged_data
and isinstance(merged_data[key], dict)
and isinstance(value, dict)
):
# Recursively merge dictionaries
merged_data[key] = merge_data(merged_data[key], value)
elif (
key in merged_data
and isinstance(merged_data[key], list)
and isinstance(value, list)
):
# Extend lists
if key == "pipelines":
# Handle the "pipelines" key to ensure uniqueness
for item in value:
if item not in merged_data[key]:
merged_data[key].append(item)
else:
merged_data[key].extend(value)
else:
# Overwrite or add the key-value pair
merged_data[key] = value
return merged_data
def recursively_sort_dict(input_dict):
sorted_dict = dict(sorted(input_dict.items()))
for key, value in sorted_dict.items():
if isinstance(value, dict):
sorted_dict[key] = recursively_sort_dict(value)
return sorted_dict
def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
print(os.getcwd())
with open("./helm/template-values.yaml", "r", encoding="utf-8") as file:
existing_values = yaml.load(file)
with open("./helm/mapping-values.yaml", "r") as file:
mapping = yaml.load(file)
if tenant_size in mapping:
if flavor not in ["laas", "daas"]:
print("Invalid flavor")
else:
mapped_values = mapping[tenant_size]
# merge "kafka" entries
existing_values["kafka"] = merge_data(
existing_values.get("kafka", {}), mapped_values.get("kafka", {})
)
# merge "elasticsearch" entries
existing_values["elasticsearch"] = merge_data(
existing_values.get("elasticsearch", {}),
mapped_values.get("elasticsearch", {}),
)
# Merge the "logstash" entries
existing_values["logstash"] = merge_data(
existing_values.get("logstash", {}),
mapped_values.get("logstash", {}),
)
# setting default values from defaults block
existing_values["oauthProxy"]["clientId"] = tenant_name + "-logging-client"
existing_values["oauthProxy"]["issuerUrl"]["realmPath"] = (
"/realms/" + tenant_name
)
existing_values["elasticsearch"]["image"]["version"] = mapping["defaults"][
"elasticsearch"
]["version"]
existing_values["kafka"]["zookeeper"]["javaOpts"] = calculate_java_settings(
existing_values["kafka"]["zookeeper"]["resources"]["requests"]["memory"]
)
# configure resource sizing of elasticsearch components
existing_values["elasticsearch"]["config"]["flavor"] = flavor
# existing_values["elasticsearch"]["data"] = mapped_values["elasticsearch"][
# "data"
# ]
# existing_values["elasticsearch"]["coordinator"] = mapped_values[
# "elasticsearch"
# ]["coordinator"]
# existing_values["elasticsearch"]["master"] = mapped_values["elasticsearch"][
# "master"
# ]
# configure kibana
existing_values["kibana"]["image"]["version"] = mapping["defaults"][
"kibana"
]["version"]
# configure resource sizing of kafka components
existing_values["kafka"]["resources"] = mapped_values["kafka"]["resources"]
existing_values["kafka"]["javaOpts"]["heap"] = calculate_java_settings(
mapped_values["kafka"]["resources"]["requests"]["memory"]
)
existing_values["logstash"]["image"]["version"] = mapping["defaults"][
"logstash"
]["version"]
# explicitly set ingress domain
existing_values["ingress"]["domain"] = ingress
# template tls configuration
existing_values["tls"]["issuer"]["name"] = tenant_name + "-issuer"
existing_values["tls"]["issuer"]["auth"]["path"] = (
"/v1/auth/bn-" + tenant_name + "-cert-manager"
)
existing_values["tls"]["issuer"]["auth"]["role"] = (
"bn-" + tenant_name + "-pki-INT-cert-signers"
)
existing_values["tls"]["issuer"]["secret"]["role"] = (
"bn-" + tenant_name + "-pki-INT-cert-signers"
)
# existing_values["logstash"]["pipeline"]["pipelines"][0][
# "workers"
# ] = mapped_values["logstash"]["pipelines"]["logging"]["workers"]
# existing_values["logstash"]["pipeline"]["replicas"] = mapped_values[
# "logstash"
# ]["pipelines"]["replicas"]
existing_values["logstash"]["ingest"]["javaOpts"] = calculate_java_settings(
existing_values["logstash"]["ingest"]["resources"]["requests"]["memory"]
)
existing_values["kafka"]["topics"]["logging"]["partitions"] = (
existing_values["logstash"]["pipelines"]["replicas"]
* existing_values["logstash"]["pipelines"]["logging"]["workers"]
)
# order the values in the "existing_values" dictionary alphabetically
existing_values = recursively_sort_dict(existing_values)
# write ordered values to instance-new.yaml
with open(values_file, "w", encoding="utf-8") as new_file:
yaml.dump(existing_values, new_file)
else:
print("Invalid tenant sizing")