docker-logstash.yml
version: '3.2'
services:
logstash:
image: logstash:latest
networks:
- net
volumes:
- /app/elk/logstash/logstash.conf:/some/config-dir/logstash.conf
- /app/elk/filebeat/filebeat_logstash_out.conf:/some/config-dir/filebeat_logstash_out.conf
- /app/elk/city/GeoLite2-City.mmdb:/GeoLite2-City.mmdb
# - /etc/timezone:/etc/timezone:ro
# - /etc/localtime:/etc/localtime:ro
ports:
- target: 5044
published: 5044
protocol: tcp
mode: host
command: -f /some/config-dir/
deploy:
mode: global
networks:
net:
driver: overlay
internal: false
ipam:
driver: default
config:
- subnet: 10.0.66.0/24
logstash.conf
input {
kafka{
codec => "json"
bootstrap_servers=> "192.168.31.146:9092,192.168.31.147:9092,192.168.31.167:9092"
#topics => ["backstage_logs","logstash-.*"]
topics_pattern => "backstage_.*"
group_id => "logstash"
auto_offset_reset => "latest"
type => "logs"
}
}
#filter {
# if "nginx-accesslog" in [tags] {
# grok {
# match => { "message" => "%{IPORHOST:http_host} %{IPORHOST:clientip} - %{USERNAME:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:http_verb} %{NOTSPACE:http_request}(?: HTTP/%{NUMBER:http_version})?|%{DATA:raw_http_request})\" %{NUMBER:response} (?:%{NUMBER:bytes_read}|-) %{QS:referrer} %{QS:agent} %{QS:xforwardedfor} %{NUMBER:request_time:float} %{GREEDYDATA:traceID}"}
# }
# mutate {
# convert => ["status","integer"]
# convert => ["body_bytes_sent","integer"]
# convert => ["request_time","float"]
# }
# geoip {
# source=>"remote_addr"
# }
# date {
# match => [ "timestamp","dd/MMM/YYYY:HH:mm:ss Z"]
# }
# useragent {
# source=>"http_user_agent"
# }
# }
#
# if "tomcat-accesslog" in [tags] {
# grok {
# match => { "message" => "%{IPORHOST:clientip} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:http_verb} %{NOTSPACE:http_request}(?: HTTP/%{NUMBER:http_version})?|%{DATA:raw_http_request})\" %{NUMBER:response} (?:%{NUMBER:bytes_read}|-) %{QS:referrer} %{NUMBER:request_time:float} %{GREEDYDATA:traceID}"}
# }
# date {
# match => [ "timestamp","dd/MMM/YYYY:HH:mm:ss Z"]
# }
# }
#}
output {
if [type] == "logs" {
elasticsearch {
hosts => ["192.168.31.146:9200","192.168.31.147:9200","192.168.31.167:9200"]
index => "backstage_%{type}-%{+YYYY.MM.dd}"
manage_template => true
document_type => "%{type}"
}
}
}
filebeat_logstash_out.conf
input {
beats {
port => 5044
codec => json # 直接将filebeat保存在message中的json字串解析出来
type => "logs"
}
}
filter {
if [type] == "nginxaccess" {
mutate {
remove_field => ["tags", "beat"]
#删除filebeat自动添加的字段
## 测试发现:如果换成drop { remove_field =>
## 无输出
}
}
}
output {
if [type] == "nginxaccess" {
elasticsearch {
hosts => ["192.168.31.146:9200","192.168.31.147:9200","192.168.31.167:9200"]
index => "filebeat-%{type}-%{+YYYY.MM.dd}"
manage_template => true
document_type => "%{type}"
}
stdout{
codec=>rubydebug
}
}
}
上面是配置logstash的消费者,如果需要filebeat去采集其他机器的日志文件,请往下接着配
docker-filebeat.yml
version: '3.2'
services:
filebeat:
image: prima/filebeat:latest
networks:
- net
volumes:
- /docker-swarm/ELK/prod/filebeat.yml:/filebeat.yml
- /nginx/logs/:/nginx/logs/
# - /etc/timezone:/etc/timezone:ro
# - /etc/localtime:/etc/localtime:ro
deploy:
mode: global
networks:
net:
driver: overlay
internal: false
ipam:
driver: default
config:
- subnet: 10.0.66.0/24
filebeat.yml
filebeat.prospectors:
- input_type: log
paths:
- /nginx/logs/*.log
tags: ["nginx-accesslog"]
document_type: nginxaccess
#output.kafka:
# enabled: true
# hosts: ["192.168.31.163:9092"]
# # topic: logstash-%{[type]}
# topic: backstage_%{[type]}
output:
logstash:
hosts: ["192.168.31.146:5044","192.168.31.147:5044","192.168.31.167:5044"]
# worker: 1
# loadbalance: true
# index: filebeat
网友评论