Sample configuration for AWS EC2 handling 50k metrics/sec (medium) with NGINX Ingress controller

Download this sample AWS EC2 handling 50k metrics/sec (medium) configuration provided by ITRS.

# Example Obcerv configuration for AWS EC2 handling 50k metrics/sec.
#
# Nodes:
# - (3) m5.4xlarge (16 CPU, 64GiB Memory) for Timescale
# - (3) c5.4xlarge (16 CPU, 32GiB Memory) for all other workloads
#
# The resource requests for Timescale total 24 cores and 182GiB memory.
# The resource requests for the other workloads total ~37 cores and ~82GiB memory.
# These totals include Linkerd resources.
#
# Disk requirements:
# - Timescale:
#   - 16 TiB data disk for each replica (x3)
#   - 50 GiB WAL disk for each replica (x3)
# - Kafka: 200 GiB for each replica (x3)
# - Loki: 30 GiB for each replica (x1)
# - Zookeeper: 1 GiB for each replica (x3)
# - etcd: 1 GiB for each replica (x3)
# - Downsampled Metrics:
#   - Raw: 5 GiB for each replica (x3)
#   - Bucketed: 5 GiB for each replica (x3)
#
# The configuration references a StorageClass named `io1-25` which uses io1 with 25 iopsPerGB - you can create
# this class or change the config to use a class of your own, but it should be similar in performance.
#
# This configuration is based upon a certain number of Obcerv entities, average metrics per entity, and
# average metrics collection interval. The following function can be used to figure out what type of load to expect:
#
# metrics/sec = (Obcerv entities * metrics/entity) / average metrics collection interval
#
# In this example configuration, we have the following:
#
# 50,000 metrics/sec = (125,000 Obcerv entities * 4 metrics/entity) / 10 seconds average metrics collection interval
#

defaultStorageClass: "gp2"
apps:
  externalHostname: "obcerv.mydomain.internal"
  ingress:
    annotations:
      kubernetes.io/ingress.class: "nginx"
      nginx.org/mergeable-ingress-type: "master"
ingestion:
  externalHostname: "obcerv-ingestion.mydomain.internal"
  replicas: 2
  ingress:
    annotations:
      kubernetes.io/ingress.class: "nginx"
      nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
  resources:
    requests:
      memory: "512Mi"
      cpu: "500m"
    limits:
      memory: "512Mi"
      cpu: "500m"
iam:
  ingress:
    annotations:
      kubernetes.io/ingress.class: "nginx"
      nginx.org/mergeable-ingress-type: "minion"
zookeeper:
  replicas: 3
  resources:
    requests:
      memory: "256Mi"
      cpu: "200m"
    limits:
      memory: "512Mi"
      cpu: "200m"
kafka:
  replicas: 3
  diskSize: "200Gi"
  storageClass: "io1-25"
  defaultPartitions: 12
  consumer:
    fetchMaxWaitMs: 500
    fetchMinBytes: 524288
  resources:
    requests:
      memory: "6Gi"
      cpu: "2"
    limits:
      memory: "6Gi"
      cpu: "3"
timescale:
  clusterSize: 3
  dataDiskSize: "50Gi"
  dataStorageClass: "io1-25"
  timeseriesDiskCount: 4
  timeseriesDiskSize: "4Ti" # Max disk size for AWS io1 is 16Ti
  timeseriesStorageClass: "io1-25"
  walDiskSize: "50Gi"
  walStorageClass: "io1-25"
  resources:
    requests:
      memory: "60Gi"
      cpu: "8"
    limits:
      memory: "60Gi"
      cpu: "8"
  compressAfter: 1h
  nodeSelector:
    instancegroup: timescale-nodes
  tolerations:
  - key: dedicated
    operator: Equal
    value: timescale-nodes
    effect: NoSchedule
  retention:
    entity_attributes:
      chunkSize: 2d
    metrics:
      chunkSize: 20m
      retention: 30d
    metrics_5m:
      chunkSize: 1h
      retention: 60d
    metrics_15m:
      chunkSize: 2h
      retention: 120d
    metrics_1h:
      chunkSize: 6h
      retention: 180d
    metrics_3h:
      chunkSize: 12h
      retention: 180d
    metrics_12h:
      chunkSize: 2d
      retention: 1y
    metrics_1d:
      chunkSize: 3d
      retention: 2y
    statuses:
      chunkSize: 7d
      retention: 2y
loki:
  diskSize: "30Gi"
  storageClass: "io1-25"
  ingestionBurstSize: 9
  ingestionRateLimit: 6
  maxPayloadSize: 6291456
  resources:
    requests:
      memory: "512Mi"
      cpu: "250m"
    limits:
      memory: "512Mi"
      cpu: "300m"
sinkd:
  replicas: 2
  rawReplicas: 3
  jvmOpts: "-Xms768M -Xmx768M -XX:MaxDirectMemorySize=100M"
  metrics:
    maxPollRecords: 25000
  resources:
    requests:
      memory: "1152Mi"
      cpu: "250m"
    limits:
      memory: "1152Mi"
      cpu: "400m"
  rawResources:
    requests:
      memory: "1Gi"
      cpu: "250m"
    limits:
      memory: "1Gi"
      cpu: "400m"
platformd:
  replicas: 2
  resources:
    requests:
      memory: "1536Mi"
      cpu: "1"
    limits:
      memory: "2Gi"
      cpu: "1500m"
dpd:
  replicas: 2
  jvmOpts: "-Xmx3500M -XX:NewSize=2G"
  kafkaConsumerMaxPollRecords: 25000
  metricsMultiplexer:
    maxFilterResultCacheSize: 500000
    maxConcurrentOps: 500
    localParallelism: 6
  selfMonitoringThresholds:
    metrics_partition_lag_warn: 10000
    metrics_partition_lag_critical: 25000
  resources:
    requests:
      memory: "4Gi"
      cpu: "1500m"
    limits:
      memory: "5Gi"
      cpu: "2"
metricForecastd:
  resources:
    requests:
      memory: "512Mi"
      cpu: "250m"
    limits:
      memory: "768Mi"
      cpu: "500m"
downsampledMetricsStream:
  replicas: 3
  storageClass: "io1-25"
  bucketedReplicas: 3
  maxPollRecords: 25000
  rawRocksdb:
    totalOffHeapMemory: 134217728
    indexFilterRatio: 0.25
    totalMemTableMemory: 100663296
    blockSize: 16384
    writeBufferSize: 16777216
  bucketedRocksdb:
    totalOffHeapMemory: 16777216
    indexFilterRatio: 0.25
    totalMemTableMemory: 12582912
    blockSize: 8192
    writeBufferSize: 4194304
  resources:
    requests:
      memory: "3Gi"
      cpu: "1"
    limits:
      memory: "3Gi"
      cpu: "1500m"
  bucketedResources:
    requests:
      memory: "4Gi"
      cpu: "1500m"
    limits:
      memory: "4Gi"
      cpu: "3"
entityStream:
  intermediate:
    resources:
      requests:
        memory: "1Gi"
        cpu: "750m"
      limits:
        memory: "1536Mi"
        cpu: "1"
    rocksdb:
      memoryMib: 200
  final:
    resources:
      requests:
        memory: "512Mi"
        cpu: "300m"
      limits:
        memory: "1850Mi"
        cpu: "500m"
etcd:
  replicas: 3
collection:
  metrics:
    resources:
      requests:
        memory: "768Mi"
        cpu: "200m"
      limits:
        memory: "1Gi"
        cpu: "250m"
["Obcerv"] ["User Guide", "Technical Reference"]

Was this topic helpful?