Compare commits

..

2 Commits
main ... rework

Author SHA1 Message Date
Karsten Gorskowski de6ceed232 enable logging per default 2023-12-04 16:44:05 +01:00
Karsten Gorskowski 4891693218 rewrite 2023-12-04 15:30:36 +01:00
7 changed files with 597 additions and 14 deletions

128
helm/mapping-values.yaml Normal file
View File

@ -0,0 +1,128 @@
defaults:
elasticsearch:
version: 8.11.1
kibana:
version: 8.11.1
S:
elasticsearch:
coordinator:
dedicated: false
replicas: 0
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 2Gi
storage: 1Gi
storageClass: topolvm-provisioner
data:
replicas: 3
resources:
limits:
cpu: 1
memory: 4Gi
requests:
cpu: 1
memory: 4Gi
storage: 30Gi
storageClass: topolvm-provisioner
master:
dedicated: true
replicas: 3
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
storage: 1Gi
storageClass: topolvm-provisioner
kafka:
replicas: 3
resources:
limits:
cpu: 750m
memory: 2Gi
requests:
cpu: 750m
memory: 2Gi
storage: 30Gi
storageClass: topolvm-provisioner
topics:
logging:
partitions: 3
replication: 2
retentionBytes: "1000000000"
retentionMs: "86400000"
segmentBytes: "250000000"
segmentMs: "3600000"
pipelines:
replicas: 2
logging:
name: simple
workers: 3
M:
elasticsearch:
coordinator:
dedicated: true
replicas: 3
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 2Gi
storage: 1Gi
storageClass: topolvm-provisioner
data:
replicas: 5
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
storage: 50Gi
storageClass: topolvm-provisioner
master:
dedicated: true
replicas: 3
resources:
limits:
cpu: 1
memory: 4Gi
requests:
cpu: 1
memory: 4Gi
storage: 1Gi
storageClass: topolvm-provisioner
kafka:
replicas: 3
resources:
limits:
cpu: 1
memory: 4Gi
requests:
cpu: 1
memory: 4Gi
storage: 50Gi
storageClass: topolvm-provisioner
topics:
logging:
partitions: 3
replication: 2
retentionBytes: "1000000000"
retentionMs: "86400000"
segmentBytes: "250000000"
segmentMs: "3600000"
pipelines:
replicas: 3
logging:
name: simple
workers: 3

316
helm/template-values.yaml Normal file
View File

@ -0,0 +1,316 @@
# set standard values for new elastic tenant helm values
---
elasticsearch:
config:
flavor: placeholder
logging:
enabled: true
rbac:
customUsers:
logstash_writer:
email:
enabled: true
roles:
- logstash_admin_user
- logstash_internal
userRoles:
elastic-superuser:
applications:
- application: "*"
privileges:
- "*"
resources:
- "*"
cluster:
- all
indices:
- allow_restricted_indices: true
names: "*"
privileges:
- all
run_as:
- "*"
logstash_writer:
applications: [ ]
cluster:
- manage_index_templates
- monitor
- manage_ilm
indices:
- field_security:
grant:
- "*"
names:
- pn-*
- pits-*
- test-*
- dead-letter-*
- customer-*
privileges:
- write
- create
- create_index
- manage
- manage_ilm
metadata: { }
run_as: [ ]
transient_metadata:
enabled: true
prometheus_reader:
applications: [ ]
cluster:
- cluster:monitor/prometheus/metrics
- cluster:monitor/health
- cluster:monitor/nodes/stats
- cluster:monitor/state
indices:
- names: "*"
privileges:
- monitor
xpack:
license:
self_generated_type: basic
security:
accessAgreement: |
Sie sind dabei, auf potentiell durch die DSGVO geschützte Daten
zuzugreifen.
Durch Einloggen in dieses System bestätigen Sie, Kenntnis genommen
zu haben, dass ihre Nutzung zwecks Auditierung protokolliert wird.
Sie sind dem Datengeheimnis iSd Art. 29 DSGVO verpflichtet.
Unautorisierte Nutzung des Systems ist strikt untersagt und kann
rechtliche Konsequenzen zur Folge haben.
Vor einer dienstlich notwendigen Übermittlung von Log-Daten mit
personenbezogenen Daten ist eine (Pseudo-)Anonymisierung
durchzuführen. Hierzu ist eine Datenschutzfolgeabschätzung beim
Datenschutzbeauftragten ITZ vorzulegen.
Für nicht anonymisierte Daten ist ein solcher Versand nur zulässig,
wenn mit dem Hersteller eine Auftragsdatenvereinbarung nach Art. 28
DSGVO oder § 62 BDSG, bei Auslandsbezug gem. Art. 44 - 46 DSGVO
und/oder § 79 - 83 BDSG, abgeschlossen wurde. Zuwiderhandlung stellt
regelmäßig einen Datenschutzverletzung dar.
audit:
enabled: false
ilmPolicies:
logging-std-7d:
policy:
phases:
delete:
actions:
delete:
delete_searchable_snapshot: true
min_age: 7d
hot:
actions: { }
min_age: 0ms
image:
version: placeholder
indexTemplates:
logging:
index_patterns:
- logging-*
template:
settings:
index:
lifecycle:
name: logging-std-7d
number_of_replicas: 1
number_of_shards: 5
unassigned:
node_left:
delayed_timeout: 15m
ml:
dedicated: false
epr:
enabled: false
ingress:
elasticsearch:
enabled: false
inspect: false
epr:
enabled: false
kibana:
enabled: true
inspect: false
kowl:
enabled: false
logstash:
inspect: false
kafka:
config:
logging:
enabled: true
javaOpts:
heap: -Xmx1g -Xms1g
replicas: 3
# TODO: should be removed and replaced with "normal" kafka img when all tenants run `self-managed/kafka`
setup:
image:
name: laas-craas.bcsv.cloud.itzbund.net/laas/self-managed/kafka
version: 7.4.0-1
topics:
# TODO: 1) retention time of 7d is sufficient for §100/§5 tenants
# TODO: 2) decide whether topics should be created here or in Logstash
logging:
partitions: 3
replication: 2
# NOTICE: max capacity: repl * part * rBytes + influx(gcFreq = 5min)
retentionBytes: "1400000000"
retentionMs: "86400000"
segmentBytes: "500000000"
segmentMs: "3600000"
zookeeper:
config:
logging:
enabled: true
javaOpts:
heapSize: 500m
replicas: 3
resources:
limits:
cpu: 300m
memory: 1000Mi
requests:
cpu: 300m
memory: 1000Mi
storage: 1Gi
storageClass: topolvm-provisioner
kibana:
apiVersion: post710
config:
logging:
enabled: true
monitoring:
enabled: true
rbac:
userRoles:
kibana-user:
elasticsearch:
cluster: [ ]
indices:
- allow_restricted_indices: false
names:
- logging-*
privileges:
- read
- monitor
- view_index_metadata
run_as: [ ]
kibana:
- base: [ ]
feature:
dashboard:
- all
discover:
- all
savedObjectsManagement:
- all
savedObjectsTagging:
- all
visualize:
- all
xpack:
security:
cookieName: sid
enabled: true
image:
version: placeholder
replicas: 1
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
logstash:
config:
logging:
enabled: true
gitSync:
# TODO: use either branch name for floating versions,
# TODO: tag/commit as fixed version equivalents
branch: itzb/release
username: tenant-reader
image:
version: 8.7.1
ingest:
codec: json
enabled: true
hostNetwork: false
inProto: http
javaOpts:
heap: -Xms2000m -Xmx2000m
nodePort: null
nodeSelectors: null
port: 8443
replicas: 2
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
pipeline:
config:
deadLetterQueue: false
enabled: true
javaOpts:
heap: -Xms2000m -Xmx2000m
memcached:
enabled: false
pipelines:
- customer: laas
id: simple
injections:
inputThreads: 3
name: simple
workers: 3
pseudonymize: false
replicas: 3
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 2
memory: 4Gi
syncPipelines: false
oauthProxy:
allowedGroups: [ ]
emailClaim: email
groupsClaim: groups
image:
name: laas-craas.bcsv.cloud.itzbund.net/laas/self-managed/oauth2-proxy
version: v7.4.0
issuerUrl:
host: idp.laas.cloud.itz.in.bund.de
request_logging: true
scope:
- email
- groups
- openid
- profile
tls:
externalCertificates:
kibana:
tls_crt: |
-----BEGIN CERTIFICATE-----
# Insert Certificate here when obtained from DTRust
-----END CERTIFICATE-----
tls_key: placeholderOverwriteFromSecret
issuer:
auth:
path: placeholder
role: placeholder
name: placeholder
secret:
role: placeholder
serviceAccount: default
organizations:
- ITZB

View File

@ -2,8 +2,8 @@
import os
from tenant.utils.common import get_secure_password, generate_key, generate_csr
from tenant.utils.terraform import create_tfvars_file
import tenant.utils.generatecsr
import tenant.utils.generate_secrets_file
from tenant.utils.template_values import template_values
def add_subparser(subparsers):
@ -71,18 +71,18 @@ def execute(args):
source_tf_dir = os.path.join(target_directory, "terraform")
target_tf_dir = terraform_directory
for filename in os.listdir(source_tf_dir):
if filename.endswith(".tf"):
source_path = os.path.join(source_tf_dir, filename)
target_path = os.path.join(target_tf_dir, filename)
# Ensure the source path is correct before creating the symbolic link
if os.path.exists(source_path):
relative_path = os.path.relpath(source_path, target_tf_dir)
os.symlink(relative_path, target_path)
else:
print(
f"Warning: Source file '{filename}' not found in '{source_tf_dir}'."
)
# for filename in os.listdir(source_tf_dir):
# if filename.endswith(".tf"):
# source_path = os.path.join(source_tf_dir, filename)
# target_path = os.path.join(target_tf_dir, filename)
# # Ensure the source path is correct before creating the symbolic link
# if os.path.exists(source_path):
# relative_path = os.path.relpath(source_path, target_tf_dir)
# os.symlink(relative_path, target_path)
# else:
# print(
# f"Warning: Source file '{filename}' not found in '{source_tf_dir}'."
# )
variables = {
"tenant_name": tenant_name,
@ -91,6 +91,13 @@ def execute(args):
tfvars_filepath = os.path.join(terraform_directory, tenant_name + ".tfvars")
create_tfvars_file(variables, tfvars_filepath)
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
if os.path.exists(values_file):
print("Values file already exists")
print(values_file)
else:
template_values(tenant_name, "S", "laas", ingress, values_file)
# generate secrets file if not already exist, not yet encrypted on the fly
secrets_file = os.path.join(helm_directory, tenant_name + ".secrets.yaml")
if os.path.exists(secrets_file):

View File

@ -1,18 +1,31 @@
# tenant/commands/resize.py
from tenant.utils.common import get_secure_password
import os
from tenant.utils.template_values import template_values
def add_subparser(subparsers):
resize_parser = subparsers.add_parser(
"resize", help="Resize resources for a tenant"
)
resize_parser.add_argument("tenant_name", help="Name of the tenant")
resize_parser.add_argument("--new-size", type=int, help="New size for resources")
resize_parser.add_argument("--new-size", help="New size for resources")
resize_parser.add_argument(
"--target", default=".", help="Target directory (default: current directory)"
)
def execute(args):
ingress = input(
"Please enter the FQDN of the Kibana ingress, without the 'kibana' prefix: "
)
target_directory = args.target
tenant_name = args.tenant_name
new_size = args.new_size
tenant_directory = os.path.join(target_directory, tenant_name)
helm_directory = os.path.join(tenant_directory, "03-helm")
values_file = os.path.join(helm_directory, tenant_name + ".values.yaml")
template_values(tenant_name, new_size, "laas", ingress, values_file)
# Use the shared function
# shared_function()

View File

@ -1,5 +1,6 @@
# tenant/utils/common.py
import getpass
import re
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA

View File

@ -2,6 +2,7 @@ import random
import os
import ruamel.yaml
import string
import subprocess
yaml = ruamel.yaml.YAML()
@ -65,3 +66,16 @@ def generate_secrets_file(secrets_file, git_sync_password):
},
file,
)
try:
subprocess.run(
[
"sops",
"-e",
"--in-place",
"--hc-vault-transit=https://vault.laas.cloud.itz.in.bund.de/v1/sops/key/laas-bn-infra",
secrets_file,
],
check=True,
)
except subprocess.CalledProcessError as e:
print(f"Error: {e.returncode}\n{e.stderr}")

View File

@ -0,0 +1,104 @@
import ruamel.yaml
import os
from tenant.utils.common import calculate_java_settings
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA
TYPE_DSA = crypto.TYPE_DSA
key = crypto.PKey()
yaml = ruamel.yaml.YAML()
def recursively_sort_dict(input_dict):
sorted_dict = dict(sorted(input_dict.items()))
for key, value in sorted_dict.items():
if isinstance(value, dict):
sorted_dict[key] = recursively_sort_dict(value)
return sorted_dict
def template_values(tenant_name, tenant_size, flavor, ingress, values_file):
print(os.getcwd())
with open("./helm/template-values.yaml", "r", encoding="utf-8") as file:
existing_values = yaml.load(file)
with open("./helm/mapping-values.yaml", "r") as file:
mapping = yaml.load(file)
if tenant_size in mapping:
if flavor not in ["laas", "daas"]:
print("Invalid flavor")
else:
# map "selected_values" to "mapping" in reference to the tenant size
mapped_values = mapping[tenant_size]
# setting default values from defaults block
existing_values["oauthProxy"]["clientId"] = tenant_name + "-logging-client"
existing_values["oauthProxy"]["issuerUrl"]["realmPath"] = (
"/realms/" + tenant_name
)
existing_values["elasticsearch"]["image"]["version"] = mapping["defaults"][
"elasticsearch"
]["version"]
# configure resource sizing of elasticsearch components
existing_values["elasticsearch"]["config"]["flavor"] = flavor
existing_values["elasticsearch"]["data"] = mapped_values["elasticsearch"][
"data"
]
existing_values["elasticsearch"]["coordinator"] = mapped_values[
"elasticsearch"
]["coordinator"]
existing_values["elasticsearch"]["master"] = mapped_values["elasticsearch"][
"master"
]
# configure kibana
existing_values["kibana"]["image"]["version"] = mapping["defaults"][
"kibana"
]["version"]
# configure resource sizing of kafka components
existing_values["kafka"]["resources"] = mapped_values["kafka"]["resources"]
existing_values["kafka"]["javaOpts"]["heap"] = calculate_java_settings(
mapped_values["kafka"]["resources"]["requests"]["memory"]
)
# explicitly set ingress domain
existing_values["ingress"]["domain"] = ingress
# template tls configuration
existing_values["tls"]["issuer"]["name"] = tenant_name + "-issuer"
existing_values["tls"]["issuer"]["auth"]["path"] = (
"/v1/auth/bn-" + tenant_name + "-cert-manager"
)
existing_values["tls"]["issuer"]["auth"]["role"] = (
"bn-" + tenant_name + "-pki-INT-cert-signers"
)
existing_values["tls"]["issuer"]["secret"]["role"] = (
"bn-" + tenant_name + "-pki-INT-cert-signers"
)
existing_values["logstash"]["pipeline"]["pipelines"][0][
"workers"
] = mapped_values["pipelines"]["logging"]["workers"]
existing_values["logstash"]["pipeline"]["replicas"] = mapped_values[
"pipelines"
]["replicas"]
existing_values["kafka"]["topics"]["logging"]["partitions"] = (
mapped_values["pipelines"]["replicas"]
* mapped_values["pipelines"]["logging"]["workers"]
)
# order the values in the "existing_values" dictionary alphabetically
existing_values = recursively_sort_dict(existing_values)
# write ordered values to instance-new.yaml
with open(values_file, "w", encoding="utf-8") as new_file:
yaml.dump(existing_values, new_file)
else:
print("Invalid tenant sizing")