// ==============================================================================
// OUTPUT: LOKI
// Defines the destination for all processed logs
// ==============================================================================
loki.write "local_loki" {
  endpoint {
    url = "http://loki:3100/loki/api/v1/push"
  }
}

// ==============================================================================
// STREAM 1: CONTAINER LOGS (VIA PODMAN SOCKET)
// Discovers containers and extracts rich metadata (e.g., pod_name, image)
// ==============================================================================

// 1. Discover all containers via the socket
discovery.docker "podman_containers" {
  host = "unix:///var/run/docker.sock"
}

// 2. Enrich metadata with useful labels for filtering in Grafana
discovery.relabel "podman_metadata" {
  targets = discovery.docker.podman_containers.targets

  // Container Name: Remove the slash (/) that the docker API adds (e.g. /my-app -> my-app)
  rule {
    source_labels = ["__meta_docker_container_name"]
    regex         = "/(.*)"
    target_label  = "container_name"
  }

  // Container ID: Extract the short container ID (first 12 chars) for precise debugging
  rule {
    source_labels = ["__meta_docker_container_id"]
    regex         = "(.{12}).*" 
    target_label  = "container_id"
  }

  // Image Name: Extract the image used by the container
  rule {
    source_labels = ["__meta_docker_image_name"]
    target_label  = "image"
  }

  // POD NAME: Podman sets the pod name in the label 'io.kubernetes.pod.name'
  rule {
    source_labels = ["__meta_docker_container_label_io_kubernetes_pod_name"]
    target_label  = "pod_name"
  }
  
  // Alternative Pod Name (some Podman versions use io.podman.pod.name)
  rule {
    source_labels = ["__meta_docker_container_label_io_podman_pod_name"]
    target_label  = "pod_name"
  }

  // Docker Compose Project (useful if you use compose groups instead of pods)
  rule {
    source_labels = ["__meta_docker_container_label_com_docker_compose_project"]
    target_label  = "project"
  }
}

// 3. Read the logs from the discovered containers
loki.source.docker "podman_stream" {
  host       = "unix:///var/run/docker.sock"
  targets    = discovery.relabel.podman_metadata.output
  forward_to = [loki.write.local_loki.receiver]
  
  labels = {
    job = "podman-containers",
  }
}

// ==============================================================================
// STREAM 2: HOST LOGS (VIA JOURNALD)
// Reads host-level system and service logs
// ==============================================================================

// 1. Processor for Journald logs to extract metadata and prevent duplicates
loki.relabel "journal_relabel" {
  forward_to = [loki.write.local_loki.receiver]

  // Extract Systemd Unit name (e.g., sshd.service, NetworkManager.service)
  rule {
    source_labels = ["__journal__systemd_unit"]
    target_label  = "unit"
  }

  // Extract Syslog Identifier (useful for identifying non-systemd processes)
  rule {
    source_labels = ["__journal_syslog_identifier"]
    target_label  = "syslog_identifier"
  }

  // Extract log priority/level (e.g., info, warning, err) for easy filtering
  rule {
    source_labels = ["__journal_priority_keyword"]
    target_label  = "level"
  }

  // IMPORTANT: Deduplication Filter!
  // Because Podman also writes container logs to Journald, we would get duplicates.
  // We throw away everything here that has a container ID (handled by Stream 1).
  rule {
    source_labels = ["__journal_container_id"]
    regex         = ".+"     // If it contains any value...
    action        = "drop"   // ...drop the log entry from this stream.
  }
}

// 2. Source: Read the journal files from the host
loki.source.journal "read_host_journal" {
  max_age = "12h"
  path    = "/var/log/journal"
  
  labels = {
    job      = "fedora-journal",
    nodename = "workstation",
  }

  forward_to = [loki.relabel.journal_relabel.receiver]
}

// ==============================================================================
// STREAM 3: CONTINUOUS PROFILING (PYROSCOPE)
// Scrapes pprof endpoints from monitoring tools and sends them to Pyroscope
// ==============================================================================

// 1. Output: Define the destination of profiles
pyroscope.write "backend" {
  endpoint {
    url = "http://pyroscope:4040"
  }
}

// 2. Source: Define which containers need to be scraped.
pyroscope.scrape "monitoring_tools" {
  targets = [
    {"__address__" = "alertmanager:9093", "service_name" = "alertmanager"},
    {"__address__" = "alloy:12345", "service_name" = "alloy"},
    {"__address__" = "blackbox-exporter:9115", "service_name" = "blackbox-exporter"},
    {"__address__" = "grafana:6060", "service_name" = "grafana"},
    {"__address__" = "host.containers.internal:9100", "service_name" = "node-exporter"},
    {"__address__" = "loki:3100", "service_name" = "loki"},
    {"__address__" = "otel-collector:1777", "service_name" = "otel-collector"},
    {"__address__" = "prometheus:9090", "service_name" = "prometheus"},
    {"__address__" = "tempo:3200", "service_name" = "tempo"},
    {"__address__" = "traefik:8083", "service_name" = "traefik"},
  ]
  
  // Send collected data to the backend defined in step 1
  forward_to = [pyroscope.write.backend.receiver]
  
  // Specific profiles
  profiling_config {
    profile.process_cpu { enabled = true }
    profile.memory { enabled = true }
    profile.mutex { enabled = true }
    profile.block { enabled = true }
    profile.goroutine { enabled = true }
  }
}

pyroscope.scrape "pyroscope_self" {
  targets = [
    {"__address__" = "pyroscope:4040", "service_name" = "pyroscope"},
  ]
  forward_to = [pyroscope.write.backend.receiver]

  profiling_config {
    profile.process_cpu { enabled = false }  // conflicteert met interne profiler
    profile.memory      { enabled = true }
    profile.mutex       { enabled = true }
    profile.block       { enabled = true }
    profile.goroutine   { enabled = true }
  }
}