Launch and setup the ELK stack use docker compose service
install ELK stack use docker-compose
Install and setup ELK stack in single machine use docker compose service. and uesfilebeat
ship the logs
- settings the centos system
1sudo yum install -y yum-utils
2sudo yum-config-manager \
3 --add-repo \
4 https://download.docker.com/linux/centos/docker-ce.repo
5sudo yum install docker-ce docker-ce-cli containerd.io
6systemctl enable docker
7systemctl start docker
8# install the docker-compose
9sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
10chmod +x /usr/local/bin/docker-compose
11
12# adjust the system parmeters
13echo "vm.max_map_count=262144" >> /etc/sysctl.conf
14sysctl -p
15systemctl restart docker
16
17echo "
18 root soft nofile 65535
19 root hard nofile 65535
20 * soft nofile 65535
21 * hard nofile 65535
22" >> /etc/security/limits.conf
- Download the ELK stack docker images
1docker pull docker.elastic.co/elasticsearch/elasticsearch:6.3.1
2docker pull docker.elastic.co/kibana/kibana:6.3.1
3docker pull docker.elastic.co/logstash/logstash:6.3.1
4
5# install elastic-curator
6rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch
7echo "
8[curator-5]
9name=CentOS/RHEL 7 repository for Elasticsearch Curator 5.x packages
10baseurl=https://packages.elastic.co/curator/5/centos/7
11gpgcheck=1
12gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch
13enabled=1
14" > /etc/yum.repos.d/curator.repo
15yum install elasticsearch-curator -y
- Create directory construct
1mkdir -p /data/elkstack/{elasticsearch/{data,package},curator,kibana/data,logstash/{conf,plugins}}
2
3# the folder level illustrate
4.
5└── elkstack
6 ├── curator
7 ├── elasticsearch
8 │ ├── data
9 │ └── package
10 ├── kibana
11 │ └── data
12 └── logstash
13 ├── conf
14 └── plugins
15
16chown 1000:1000 -R /data/elkstack
17
- Preapre the docker-compose.yml file
1version: '2'
2
3networks:
4 elk-net:
5 driver: bridge
6
7services:
8
9 elasticsearch:
10 image: docker.elastic.co/elasticsearch/elasticsearch:6.3.1
11 container_name: elasticsearch
12 environment:
13 - cluster.name=docker-cluster
14 - bootstrap.memory_lock=true
15 - "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
16 ulimits:
17 memlock:
18 soft: -1
19 hard: -1
20 nofile:
21 soft: 65536
22 hard: 65536
23 mem_limit: 16000m
24 cap_add:
25 - IPC_LOCK
26 restart: always
27 networks:
28 - elk-net
29 ports:
30 - "9200:9200"
31 - "9300:9300"
32 volumes:
33 - /data/elkstack/elasticsearch/data:/usr/share/elasticsearch/data
34 - /data/elkstack/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
35 - /data/elkstack/elasticsearch/package:/tmp/package
36 user: elasticsearch
37 # support the sql query
38 # bash -c "elasticsearch-plugin install file:///tmp/package/elasticsearch-sql-6.3.1.1.zip;
39 command: "elasticsearch"
40
41
42 logstash:
43 image: docker.elastic.co/logstash/logstash:6.3.1
44 container_name: logstash
45 restart: always
46 networks:
47 - elk-net
48 mem_limit: 1300m
49 ports:
50 - 5044:5044
51 volumes:
52 - /data/elkstack/logstash/conf:/config-dir
53 - /data/elkstack/logstash/plugins:/tmp/plugins
54 - /data/elkstack/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml
55 - /etc/localtime:/etc/localtime
56 links:
57 - 'elasticsearch:elasticsearch'
58 user: logstash
59 command: bash -c "logstash -f /config-dir --config.reload.automatic"
60
61
62 kibana:
63 image: docker.elastic.co/kibana/kibana:6.3.1
64 container_name: kibana
65 restart: always
66 mem_limit: 2000m
67 environment:
68 SERVER_NAME: test.kibana.com
69 networks:
70 - elk-net
71 ports:
72 - "5601:5601"
73 links:
74 - elasticsearch:elasticsearch
75 volumes:
76 - /data/elkstack/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
77 - /data/elkstack/kibana/data:/usr/share/kibana/data
- The ELK stack relative config files look like followings
the
/data/elkstack/elasticsearch/elasticsearch.yml
1cluster.name: "docker-cluster"
2network.host: 0.0.0.0
3
4discovery.zen.minimum_master_nodes: 1
5http.port: 9200
6transport.tcp.port: 9300
7# ping the healthcheck if has multiple nodes
8# discovery.zen.ping.unicast.hosts: ["172.16.1.3:9300", "172.16.1.4:9300"]
9discovery.zen.fd.ping_timeout: 120s
10discovery.zen.fd.ping_retries: 3
11discovery.zen.fd.ping_interval: 30s
12cluster.info.update.interval: 1m
13xpack.security.enabled: false
14indices.fielddata.cache.size: 20%
15indices.breaker.total.limit: 60%
16indices.recovery.max_bytes_per_sec: 100mb
17indices.memory.index_buffer_size: 20%
18script.painless.regex.enabled: true
the /data/elkstack/logstash/conf
f older contents
1# cat 01_input.conf
2input {
3 beats {
4 port => 5044
5 }
6
7}
8# cat 02_filter.conf
9filter {
10
11 if [fields][log_format] =~ /test_logs/ {
12
13 grok {
14 match => { "source" => "/data/var/log/apps/(?<app_id>[^/]+)"}
15
16 }
17
18 mutate {
19 gsub => ["message", "\t", " ", "message", "\n", ""]
20 }
21
22 json {
23 source => "message"
24
25 }
26
27 }
28}
29#cat 03_output.conf
30output {
31 if [log_format] =~ "test" {
32 stdout { codec => rubydebug }
33 }
34 else {
35 elasticsearch {
36 hosts => ["elasticsearch:9200"]
37 index => "test-%{+YYYY.MM}"
38 #user => user
39 #password => password
40 }
41 }
42}
43# /data/elkstack/logstash/logstash.yml
44http.host: "0.0.0.0"
the /data/elkstack/kibana/kibana.yml
contents
1server.name: kibana
2server.host: "0"
3elasticsearch.url: http://elasticsearch:9200
4xpack.security.enabled: false
- Configuration the curator
1mkdir -p /data/elkstack/curator
2echo "
3client:
4 hosts:
5 - 172.16.1.3
6 port: 9200
7 url_prefix:
8 use_ssl: False
9 certificate:
10 client_cert:
11 client_key:
12 ssl_no_validate: False
13 http_auth:
14 timeout: 150
15 master_only: False
16
17 logging:
18 loglevel: INFO
19 logfile:
20 logformat: default
21 blacklist: ['elasticsearch', 'urllib3']
22" > /data/elkstack/curator/config.yml
23
24# add the cleanup rules
25echo "
26actions:
27 1:
28 action: delete_indices
29 description: >-
30 Delete indices older than 60 days. Ignore the error if the filter does not result in an actionable list of indices (ignore_empty_list) and exit cleanly.
31 options:
32 ignore_empty_list: True
33 disable_action: False
34 filters:
35 - filtertype: pattern
36 kind: regex
37 # retain kibana|json|monitoring|metadata don't clean
38 value: '^((?!(kibana|json|monitoring|metadata)).)*$'
39 - filtertype: age
40 source: creation_date
41 direction: older
42 #timestring: '%Yi-%m-%d'
43 unit: days
44 unit_count: 60
45" > /data/elkstack/curator/action.yml
46#set the cron expression
47crontab -e 0 0 * * * /usr/bin/curator --config /data/elkstack/curator/config.yml /data/elkstack/curator/action.yml > /tmp/curator.log 2>&1
- Setup the
filebeat
in server collect the logs
1tar -xzvf filebeat-6.3.1-linux-x86_64.tar.gz
2vi /lib/systemd/system/filebeat.service
3systemctl daemon-reload
4systemctl enalbe filebeat
5systemctl start filebeat
the filebeat.service
service unit file look like this following
1[Unit]
2Description=filebeat
3Documentation=https://elasitc.io/
4After=network.target
5
6[Service]
7Type=simple
8ExecStart=/usr/local/filebeat-6.3.1/filebeat -c /usr/local/filebeat-6.3.1/test-filebeat.yml
9Restart=on-failure
10
11[Install]
12WantedBy=mulser.target
the test-filebeat.yml
configuration file content:
1filebeat.inputs:
2
3- type: log
4 enabled: true
5 paths:
6# - /data/var/log/apps/*.log
7 - /data/var/log/apps/**/logs/*.log
8# - /data/var/log/apps/**/*.log
9
10 fields:
11 log_format: ues_mgr
12
13 multiline.pattern: '^{|^\[|^\d+-\d+-\d+'
14 multiline.negate: true
15 multiline.match: after
16
17
18filebeat.config.modules:
19
20 path: ${path.config}/modules.d/*.yml
21 reload.enabled: false
22
23
24output.logstash:
25
26 hosts: ["localhost:5044"]
Don’t forget enable the port provider the services
1firewall-cmd --zone=public --add-port=5044/tcp --permanent
2firewall-cmd --zone=public --add-port=9200/tcp --permanent
3firewall-cmd --reload