×
Sample configuration for AWS with an ALB Ingress controller
Download this sample ALB Ingress configuration provided by ITRS for installations with High Availability (HA) disabled.
# Example Obcerv configuration for AWS with ALB ingress controller.
#
# Intended for demo installations with HA disabled.
#
# The resource requests total ~20 cores and ~43GiB memory (assuming collection-agent DaemonSet runs on 3 nodes)
# and includes Linkerd resources.
#
# Disk requirements:
# - Timescale:
# - 1 TiB data disk
# - 30 GiB WAL disk
# - Kafka: 140 GiB
# - Loki: 30 GiB
# - Zookeeper: 1 GiB
# - etcd: 1 GiB
# - Downsampled Metrics:
# - Raw: 5 GiB
# - Bucketed: 5 GiB
#
# The AWS Load Balancer Controller is required in order to support external ingestion. This example assumes version
# 2.3.0 or later is installed. See https://kubernetes-sigs.github.io/aws-load-balancer-controller/.
#
# The AWS Load Balancer Controller requires annotations for each ingress configured below.
# Be sure to change the certificate ARN and group names. The group name can be any unique value (for example
# use the same value you set for externalHostname) but it must be the same for the `apps` and `iam` ingresses.
#
# The `alb.ingress.kubernetes.io/target-type` annotation controls how traffic is routed to pods. The simplest
# option ("ip") is used below. If this is not supported in your cluster, the default setting of "instance" must be
# used instead and all services backed by each ingress must be changed to NodePort instead of the default ClusterIP.
#
defaultStorageClass: "gp2"
apps:
externalHostname: "obcerv.mydomain.internal"
ingress:
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:...
alb.ingress.kubernetes.io/group.name: obcerv.mydomain.internal
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/ssl-redirect: "443"
alb.ingress.kubernetes.io/target-type: ip
ingestion:
externalHostname: "obcerv-ingestion.mydomain.internal"
replicas: 2
ingress:
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/backend-protocol-version: GRPC
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:...
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]'
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: ip
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "512Mi"
cpu: "500m"
iam:
ingress:
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:...
alb.ingress.kubernetes.io/group.name: obcerv.mydomain.internal
alb.ingress.kubernetes.io/ssl-redirect: "443"
alb.ingress.kubernetes.io/target-type: ip
zookeeper:
replicas: 1
resources:
requests:
memory: "256Mi"
cpu: "200m"
limits:
memory: "512Mi"
cpu: "200m"
kafka:
replicas: 1
diskSize: "140Gi"
consumer:
fetchMaxWaitMs: 1000
fetchMinBytes: 4194304
resources:
requests:
memory: "3Gi"
cpu: "1"
limits:
memory: "3Gi"
cpu: "2"
timescale:
dataDiskSize: "1Ti"
walDiskSize: "30Gi"
resources:
requests:
memory: "14Gi"
cpu: "2"
limits:
memory: "14Gi"
cpu: "3"
compressAfter: 1h
retention:
entity_attributes:
chunkSize: 2d
metrics:
chunkSize: 20m
metrics_5m:
chunkSize: 1h
metrics_15m:
chunkSize: 2h
metrics_1h:
chunkSize: 6h
metrics_3h:
chunkSize: 12h
metrics_12h:
chunkSize: 2d
metrics_1d:
chunkSize: 3d
statuses:
chunkSize: 7d
loki:
diskSize: "30Gi"
sinkd:
replicas: 1
rawReplicas: 1
resources:
requests:
memory: "1Gi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "400m"
rawResources:
requests:
memory: "1Gi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "400m"
platformd:
replicas: 2
resources:
requests:
memory: "1536Mi"
cpu: "1"
limits:
memory: "2Gi"
cpu: "1500m"
dpd:
replicas: 1
jvmOpts: "-Xms1G -Xmx1G"
resources:
requests:
memory: "1100Mi"
cpu: "750m"
limits:
memory: "1500Mi"
cpu: "1250m"
metricForecastd:
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "768Mi"
cpu: "500m"
downsampledMetricsStream:
replicas: 1
bucketedReplicas: 1
resources:
requests:
memory: "1Gi"
cpu: "750m"
limits:
memory: "2Gi"
cpu: "2"
bucketedResources:
requests:
memory: "1536Mi"
cpu: "1"
limits:
memory: "3Gi"
cpu: "2"
entityStream:
intermediate:
resources:
requests:
memory: "768Mi"
cpu: "300m"
limits:
memory: "1Gi"
cpu: "500m"
final:
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "1400Mi"
cpu: "500m"
collection:
metrics:
resources:
requests:
memory: "768Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "250m"
["Obcerv"]
["User Guide", "Technical Reference"]