×
Sample configuration for AWS with NGINX Ingress controller (extra small, no HA)
Download this sample AWS with NGINX Ingress controller configuration provided by ITRS for installations with High Availability (HA) disabled.
# Example Obcerv configuration for AWS with nginx ingress controller.
#
# Intended for demo/micro installations with HA disabled.
#
# The resource requests total ~20 cores and ~40GiB memory
# and includes Linkerd resources.
#
# Disk requirements:
# - Timescale:
# - 500 GiB data disk
# - 30 GiB WAL disk
# - Kafka: 100 GiB
# - Loki: 10 GiB
# - Zookeeper: 1 GiB
# - etcd: 1 GiB
# - Downsampled Metrics:
# - Raw: 5 GiB
# - Bucketed: 5 GiB
#
defaultStorageClass: "gp3"
apps:
externalHostname: "obcerv.mydomain.internal"
ingress:
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.org/mergeable-ingress-type: "master"
ingestion:
externalHostname: "obcerv-ingestion.mydomain.internal"
replicas: 1
ingress:
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "512Mi"
cpu: "500m"
iam:
ingress:
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.org/mergeable-ingress-type: "minion"
zookeeper:
replicas: 1
resources:
requests:
memory: "256Mi"
cpu: "200m"
limits:
memory: "512Mi"
cpu: "200m"
kafka:
replicas: 1
diskSize: "100Gi"
consumer:
fetchMaxWaitMs: 250
fetchMinBytes: 524288
resources:
requests:
memory: "3Gi"
cpu: "1"
limits:
memory: "3Gi"
cpu: "2"
timescale:
dataDiskSize: "500Gi"
walDiskSize: "30Gi"
resources:
requests:
memory: "14Gi"
cpu: "2"
limits:
memory: "14Gi"
cpu: "4"
compressAfter: 3h
retention:
entity_attributes:
chunkSize: 2d
retention: 1y
metrics:
chunkSize: 8h
retention: 30d
metrics_5m:
chunkSize: 1d
retention: 90d
metrics_1h:
chunkSize: 5d
retention: 180d
metrics_1d:
chunkSize: 20d
retention: 1y
statuses:
chunkSize: 7d
retention: 1y
signal_details:
chunkSize: 7d
retention: 30d
loki:
diskSize: "10Gi"
sinkd:
replicas: 1
rawReplicas: 1
resources:
requests:
memory: "1Gi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "400m"
rawResources:
requests:
memory: "1Gi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "400m"
platformd:
replicas: 1
resources:
requests:
memory: "1536Mi"
cpu: "1"
limits:
memory: "2Gi"
cpu: "1500m"
dpd:
replicas: 1
jvmOpts: "-Xmx2G -XX:NewSize=1G"
metricsMultiplexer:
maxFilterResultCacheSize: 200000
maxConcurrentOps: 100
localParallelism: 6
selfMonitoringThresholds:
metrics_partition_lag_warn: 100000
metrics_partition_lag_critical: 500000
resources:
requests:
memory: "3Gi"
cpu: "2"
limits:
memory: "3500Mi"
cpu: "3"
metricForecastd:
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "768Mi"
cpu: "500m"
downsampledMetricsStream:
replicas: 1
bucketedReplicas: 1
jvmOpts: "-XX:InitialRAMPercentage=50 -XX:MaxRAMPercentage=50"
resources:
requests:
memory: "1Gi"
cpu: "750m"
limits:
memory: "1536Mi"
cpu: "1"
bucketedResources:
requests:
memory: "1536Mi"
cpu: "1"
limits:
memory: "1536Mi"
cpu: "1500m"
entityStream:
intermediate:
resources:
requests:
memory: "768Mi"
cpu: "300m"
limits:
memory: "1Gi"
cpu: "500m"
final:
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "1536Mi"
cpu: "500m"
signalsStream:
resources:
requests:
memory: "512Mi"
cpu: "150m"
limits:
memory: "768Mi"
cpu: "300m"
etcd:
replicas: 1
collection:
metrics:
resources:
requests:
memory: "768Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "250m"
["Obcerv"]
["User Guide", "Technical Reference"]