ECK stack up and running and collect logs use fluentd in kubernetes

Use the Elasticsearch cloud on k8s component as logs and apm platform. the Fluentd collect the kubernetes container logs to Elasticsearch and kibana represent and search

The ECK operator configure

The ECK operator custom resources use kubectl providers.

 1terraform {
 2  required_providers {
 3    kubectl = {
 4      source = "gavinbunney/kubectl"
 5      version = "1.11.1"
 6    }
 7  }
 8}
 9
10resource "helm_release" "eck-operator" {
11  chart = "${path.module}/../../../../../charts/eck-operator"
12  name = "eck-operator"
13  namespace = "kube-monitor"
14
15  values = [
16    <<-EOF
17    image:
18      repository: "${var.repository_prefix}/public/eck-operator"
19    config:
20      containerRegistry: "${var.repository_prefix}/public"
21    internal:
22      kubeVersion: 1.21.1
23    EOF
24  ]
25}
26
27locals {
28  template_vars = {
29    namespace = "kube-monitor"
30    elasticsearch_version = "7.12.1"
31    elasticsearch_image = "${var.repository_prefix}/public/elasticsearch:7.12.1"
32    master_node_count = var.node_count.master
33    storage_size = var.es_storage_size
34    default_storage_class = var.default_storage_class_name
35    data_node_count = var.node_count.data
36  }
37}
38
39resource "kubectl_manifest" "elasticsearch" {
40  yaml_body = templatefile("${path.module}/elastic_cluster.yaml", local.template_vars)
41}

The elasticsearch custom resource definiations

 1apiVersion: elasticsearch.k8s.elastic.co/v1
 2kind: Elasticsearch
 3metadata:
 4  name: elasticsearch-cluster
 5  namespace: ${namespace}
 6spec:
 7  version: ${elasticsearch_version}
 8  volumeClaimDeletePolicy: DeleteOnScaledownOnly
 9  nodeSets:
10    - name: masters
11      count: ${master_node_count}
12      config:
13        node.roles: ["master"]
14      podTemplate:
15        metadata:
16          labels:
17            name: elasticsearch
18        spec:
19          containers:
20            - name: elasticsearch
21              image: ${elasticsearch_image}
22              resources:
23                requests:
24                  memory: 1Gi
25                  cpu: 1
26                limits:
27                  memory: 4Gi
28                  cpu: 2
29      volumeClaimTemplates:
30        - metadata:
31            name: elasticsearch-data
32          spec:
33            accessModes:
34              - ReadWriteMany
35            resources:
36              requests:
37                storage: ${storage_size}
38            storageClassName: ${default_storage_class}
39    - name: data
40      count: ${data_node_count}
41      config:
42        node.roles: ["data", "ingest", "ml", "transform"]
43      podTemplate:
44        metadata:
45          labels:
46            name: elasticsearch
47        spec:
48          containers:
49            - name: elasticsearch
50              image: ${elasticsearch_image}
51              resources:
52                requests:
53                  memory: 1Gi
54                  cpu: 1
55                limits:
56                  memory: 4Gi
57                  cpu: 2
58      volumeClaimTemplates:
59        - metadata:
60            name: elasticsearch-data
61          spec:
62            accessModes:
63              - ReadWriteMany
64            resources:
65              requests:
66                storage: ${storage_size}
67            storageClassName: ${default_storage_class}

The kibana terraform code

 1locals {
 2  kibana_template_vars = {
 3    namespace = "kube-monitor"
 4    version = "7.12.1"
 5    image = "${var.repository_prefix}/public/kibana:7.12.1"
 6    node_count = var.node_count.kibana
 7  }
 8}
 9
10resource "kubectl_manifest" "kibana" {
11  yaml_body = templatefile("${path.module}/kibana.yaml", local.kibana_template_vars)
12}
13
14resource "kubernetes_ingress" "kibana" {
15  metadata {
16    name = "kibana-ingress"
17    namespace = "kube-monitor"
18    labels = {
19      app = "kibana-ingress"
20    }
21  }
22  spec {
23    rule {
24      host = var.kibana_domain
25      http {
26        path {
27          path = "/"
28          backend {
29            service_name = "kibana-kb-http"
30            service_port = 5601
31          }
32        }
33      }
34    }
35  }
36}

The kibana custom resource

 1apiVersion: kibana.k8s.elastic.co/v1
 2kind: Kibana
 3metadata:
 4  name: kibana
 5  namespace: ${namespace}
 6spec:
 7  version: ${version}
 8  count: ${node_count}
 9  image: ${image}
10  elasticsearchRef:
11    name: elasticsearch-cluster
12  podTemplate:
13    metadata:
14      labels:
15        name: kibana
16    spec:
17      containers:
18        - name: kibana
19          resources:
20            requests:
21              memory: 1Gi
22              cpu: 0.5
23            limits:
24              memory: 2Gi
25              cpu: 2
26  http:
27    tls:
28      selfSignedCertificate:
29        disabled: true

The fluentd terraform configure

Use tcp inputs and scrap kuberntes containers log forward to aggregate fluentd and send to elasticsearch

  1resource "helm_release" "fluentd" {
  2  chart = "${path.module}/../../../../charts/fluentd"
  3  name = "fluentd"
  4  namespace = var.namespace
  5
  6  values = [
  7    <<-EOF
  8      image:
  9        registry: "${var.repository_prefix}"
 10        repository: "public/fluentd"
 11        tag: "1.13.1-debian-10-r0"
 12      forwarder:
 13        persistence:
 14          enabled: true
 15        extraVolumes:
 16         - name: dockerlink
 17           hostPath:
 18              path: "/data2/docker"
 19              type: "Directory"
 20        extraVolumeMounts:
 21         - name: dockerlink
 22           mountPath: "/data2/docker"
 23        extraEnv:
 24          - name: FLUENTD_UID
 25            value: "0"
 26        configMapFiles:
 27          fluentd-inputs.conf: |
 28            <source>
 29              @type tcp
 30              bind 0.0.0.0
 31              port 24224
 32              delimiter "\n"
 33              source_address_key client_addr
 34              tag ues.services
 35              <parse>
 36                @type json
 37              </parse>
 38            </source>
 39            <source>
 40              @type http
 41              port 9880
 42            </source>
 43            <source>
 44              @type tail
 45              path /var/log/containers/*.log
 46              exclude_path /var/log/containers/*fluentd*.log
 47              pos_file /opt/bitnami/fluentd/logs/buffers/fluentd-docker.pos
 48              tag kubernetes.*
 49              read_from_head true
 50              <parse>
 51                @type multi_format
 52                <pattern>
 53                  format json
 54                  time_key time
 55                  time_format %Y-%m-%dT%H:%M:%S.%NZ
 56                </pattern>
 57                <pattern>
 58                  format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
 59                  time_format %Y-%m-%dT%H:%M:%S.%N%:z
 60                </pattern>
 61              </parse>
 62            </source>
 63            <filter kubernetes.**>
 64              @type kubernetes_metadata
 65            </filter>            
 66        containerPorts:
 67           - name: tcp
 68             containerPort: 24224
 69             protocol: TCP
 70           - name: http
 71             containerPort: 9880
 72             protocol: TCP
 73        service:
 74          ports:
 75             tcp:
 76               port: 24224
 77               targetPort: tcp
 78               protocol: TCP
 79             http:
 80               port: 9880
 81               targetPort: http
 82               protocol: TCP
 83        resources:
 84          requests:
 85            cpu: 100m
 86            memory: 500Mi
 87          limits:
 88            cpu: 1000m
 89            memory: 2Gi
 90      aggregator:
 91        extraEnv:
 92          - name: ELASTIC_PASSWORD
 93            valueFrom:
 94              secretKeyRef:
 95                name: "elasticsearch-cluster-es-elastic-user"
 96                key: "elastic"
 97        configMapFiles:
 98          fluentd-output.conf: |
 99            <match fluentd.healthcheck>
100              @type stdout
101            </match>
102            <match kubernetes.**>
103              @type elasticsearch
104              include_tag_key true
105              suppress_type_name true
106              host "elasticsearch-cluster-es-data"
107              port "9200"
108              scheme "https"
109              ssl_verify false
110              user "elastic"
111              password "#{ENV['ELASTIC_PASSWORD']}"
112              logstash_format true
113              logstash_prefix kubernetes
114              logstash_dateformat %Y.%m
115              include_timestamp true
116              reconnect_on_error true
117              reload_on_failure true
118              reload_connections false
119              <buffer>
120                @type memory
121              </buffer>
122            </match>
123            <match ues.**>
124              @type elasticsearch
125              include_tag_key true
126              suppress_type_name true
127              host "elasticsearch-cluster-es-data"
128              port "9200"
129              scheme "https"
130              ssl_verify false
131              user "elastic"
132              password "#{ENV['ELASTIC_PASSWORD']}"
133              logstash_format true
134              logstash_prefix ues
135              logstash_dateformat %Y.%m
136              include_timestamp true
137              reconnect_on_error true
138              reload_on_failure true
139              reload_connections false
140              <buffer>
141                @type memory
142              </buffer>
143            </match>            
144        resources:
145          requests:
146            cpu: 100m
147            memory: 500Mi
148          limits:
149            cpu: 1000m
150            memory: 2Gi
151    EOF
152  ]
153}