1、添加tomcat监控模版
yum install java-1.8.0-openjdk tomcat-webapps tomcat-admin-webapps tomcat-docs-webapp -y #在被监控节点安装tomcat包
与jmx接口通信需要使用特定的客户端,还需要在其他节点或者主节点安装一个特定的客户端组件zabbix-gateway,这里在主节点直接安装上
yum install zabbix-java-gateway -y 在主节点安装
systemctl start zabbix-java-gateway #启动服务
修改主节点配置文件,加入JavaGateway的地址端口及进程数
修改tomcat配置文件支持jmx
CATALINA_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.manageme
nt.jmxremote.port=12345 -Dcom.sun.management.jmxremote.ssh=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rm
i.server.hostname=192.168.1.197"
添加jmx主机
添加模板
2、搭建zabbix-proxy,并测试
在代理端安装zabbix-proxy包,mariadb-server存储监控数据
vim /etc/my.cnf#在proxy节点修改配置数据库文件
skip_name_resolve=ON
之后创建数据库,授权账号,导入表结构,
MariaDB [(none)]> create database zabbix character set utf8 collate utf8_bin;
Query OK, 1 row affected (0.01 sec)
MariaDB [(none)]> grant all privileges on zabbix.* to zabbix@'192.168.1.%' identified by '123456';
Query OK, 0 rows affected (0.00 sec)
zcat /usr/share/doc/zabbix-proxy-mysql-3.0.28/schema.sql.gz | mysql -uzabbix -h 192.168.1.197 -p zabbix
ServerActive=192.168.1.197 #修改agent端配置,更换server地址
Server=192.168.1.196,192.168.1.197
vim /etc/zabbix/zabbix_proxy.conf
Server=192.168.1.196
Hostname=node2
DBName=zabbix
DBUser=zabbix
DBPassword=123456
HeartbeatFrequency=60 #每60秒向主节点发送心跳
ConfigFrequency=5 #主节点每几秒过来proxy拖去配置文件
DataSenderFrequency=1#每1秒向主节点发送监控项
主节点添加proxy
创建主机,将node1,和node2自身都加入node2的proxy代理统一来监控
添加模板
3、添加redis监控模版(自动发现的方式,一台主机,多个redis端口)
1.安装Zabbix Server端与数据库
脚本内容:
# vim redis.sh
#!/bin/bash
redis_status(){
R_PORT=$1
R_COMMAND=$2
(echo -en "INFO \r\n";sleep 1;) | nc 127.0.0.1 "$R_PORT" > /usr/local/zabbix/redis_"$R_PORT".tmp
REDIS_STAT_VALUE=$(grep ""$R_COMMAND":" /usr/local/zabbix/redis_"$R_PORT".tmp | cut -d ':' -f2)
echo $REDIS_STAT_VALUE
}
help(){
echo "${0} + redis_status + PORT + COMMAND"
}
main(){
case $1 in
redis_status)
redis_status $2 $3
;;
*)
help
;;
esac
}
main $1 $2 $3
Server端验证脚本:
# bash redis.sh redis_status 6379 connected_clients
# zabbix_get -s 192.168.15.205 -p 10050 -k "redis_status[redis_status,6379,connected_clients]"
加入自动发现
定义条件
4、搭建ELK日志收集系统
yum install java-1.8.0-openjdk-devel -y #elasticsearch用java开发,依赖于Jvm需要安装jdk组件
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.6.12.rpm#下载官网制作好的rpm包
ansible all -m yum -a 'name=elasticsearch-5.6.12.rpm state=installed' #同意安装包
三台主机搭建集群,全部需要安装上述elk安装包
192.168.1.195 node1
192.168.1.196 node2
192.168.1.198 node3
vim /etc/elasticsearch/elasticsearch.yml #编辑配置文件
cluster.name: myels #配置集群名称
node.name: node1 #节点名称
path.data: /els/data #存储路径
path.logs: /els/logs
network.host: 192.168.1.196 #对外通信主机名
http.port: 9200#监听端口
discovery.zen.ping.unicast.hosts: ["node1","node2", "node3"] #集群节点发现可以是ip也可以是主机名
discovery.zen.minimum_master_nodes: 1 #内部协调时的主节点,对外els没有主节点这一称呼
#修改文件属主
ansible all -m shell -a 'mkdir -p /els/{data,logs}'
ansible all -m shell -a 'chown elasticsearch:elasticsearch /els/*'
开启启动成功
9200位客户端提供服务,9300位集群内部端口
测试访问
查询节点RESTful API:
curl -X<VERB> '<PROTOCOL>://<HOST>:<PORT>/<PATH>?<QUERY_STRING>' -d '<BODY>'
<BODY>:json格式的请求主体;
curl -XPUT http://node1:9200/myindex/students/1 -d ' #创建文档
> {"name":"GUO","age":17,"kf":"ab"}'
{"_index":"myindex","_type":"students","_id":"1","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"created":true}[root@centos7 els]#
[root@centos7 els]# curl http://node1:9200/myindex/students/1?pretty #请求数据
{
"_index" : "myindex",
"_type" : "students",
"_id" : "1",
"_version" : 1,
"found" : true,
"_source" : {
"name" : "GUO",
"age" : 17,
"kf" : "ab"
}
}
[root@centos7 els]#
/_search:搜索所有的索引和类型;
/INDEX_NAME/_search:搜索指定的单个索引;
/INDEX1,INDEX2/_search:搜索指定的多个索引;
/s*/_search:搜索所有以s开头的索引;
/INDEX_NAME/TYPE_NAME/_search:搜索指定的单个索引的指定类型;
Logstash组件:
简历web服务。收集web服务日志
安装jdk,下载logstash包
wget https://artifacts.elastic.co/downloads/logstash/logstash-6.8.1.rpm #下载logstash包
yum install logstash-6.8.1.rpm #本地安装
/usr/share/logstash/bin/logstash #将服务程序文件写入path变量中
[root@node2 ~]# vim /etc/profile.d/logstash.sh
export PATH=$PATH:/usr/share/logstash/bin/
###添加配置文件,增加输入输出插件配置段,input段,output段,以及fiter段(过滤数据)
https://www.elastic.co/guide/en/logstash/5.6/index.html官方文档说明
cd /etc/logstash/con.d/
[root@node2 conf.d]# vim ss.conf #添加一个简单配置,从交互输入到json格式输出
input {
stdin {}
}
output {
stdout {
codec => rubydebug
}
}
需要给定配置文件路径,我这里在当前目录所以不需要给出
以上格式就可以保存至elasticsearch中
#修改配置文件从web日志中加载信息
input {
file {
path => '["/var/log/httpd/access_log"]'
start_position => "beginning"
}
}
ouput {
stdout {
codec => rubydebug
}
}
数据获取成功。接下来使用fiter字段做数据加工
使用grok插件
使用表达式匹配字段。使用系统自带的匹配模式,大写字母为变量。表示证的表达式
%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}
less /usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-patterns-core-4.1.2/patterns/grok-patterns #logstash系统自带的表达式文件
配置示例,match能将message字段中使用某个模式将字段分段处理好
1.input { file { path => "/var/log/http.log" } } filter { grok { match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } } }
2.input {
file {
path => ["/var/log/httpd/access_log"]
start_position => "beginning"
}
}
filter {
grok {
match => { "message" => "%{HTTPD_COMBINEDLOG}" }
}
}
output {
stdout {
codec => rubydebug
}
}
将message字段切分开,将结果加上一个key
filter {
grok {
match => { "message" => "%{HTTPD_COMBINEDLOG}" }
remove_field => " message"
}
加工数据字段后 将message字段删除掉 在grok中加入remove_field => " message"
geoip插件。GeoIP过滤器根据来自Maxmind GeoLite2数据库的数据添加有关IP地址地理位置的信息
wget https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz 下载maxmind数据库在本地,
配置示例。放在filter字段中
geoop {
source => "clientip" #需要匹配的字段
target => "geoip" #将匹配到的字段替换成这里的字段
database => "/etc/logstash/maxmind/geolite-city.mmdb" #mixmind数据库位置mmdb为特定压缩格式
}
tar xf GeoLite2-City.tar.gz
mkdir /etc/logstash/maxmind/
[root@centos7 ~]# mv GeoLite2-City_20191015/ /etc/logstash/maxmind/
[root@centos7 ~]# cd /etc/logstash/maxmind/
[root@centos7 maxmind]# ln -s GeoLite2-City_20191015/GeoLite2-City.mmdb ./
将clientip替换成geoip。并且提供时区。城市。国家等等详细信息
mutate插件:变异筛选器可让您对字段执行常规变异。您可以重命名,删除,替换和修改事件中的字段。
过滤器{ mutate { split => [ “主机名” ,“。] add_field => { “ shortHostname” => “%{hostname [0]}” } } mutate { 重命名=> [ “ shortHostname” ,“ hostname” ] } }
filter { mutate { split => ["hostname", "."] add_field => { "shortHostname" => "%{hostname[0]}" } } mutate { rename => ["shortHostname", "hostname" ] }}
elasticsearch 插件
output { #将输出从屏幕保存至elasticsearch
elasticsearch {
hosts => ["http://node1:9200","http://node2:9200"] #添加主机名
index => "logstash-%{+YYYY.MM.dd}" #添加索引信息
document_type => "httpd_access_logs"#添加类型描述
}
}
查看索引状态
curl node2:9200/logstash-*/_search?q=157.84.* | jq . #搜索logstash开头的索引,基于ip地址搜索
5、用filebeat收集系统日志并发送到ELK,展示出来
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.6.8-x86_64.rpm #下载安装filebeat组件。
vim filebeat.yml
- /var/log/httpd/access_log
hosts: ["192.168.1.197:5044"] #修改logstash主机地址,
修改logstash的配置文件,将输入替换为beats。端口改为5044,如果不在同一台机器也可加hosts => "IPADDR"
input {
beats {
port => 5044
}
logstash -f beatels.conf #启动logstash
启动filebeat
通过redis做消息队列服务
vim /etc/redis.conf#修改一下地址及认证口令
bind 0.0.0.0
requirepass 123456
修改filebeat文件
修改logstash入站插件为redis
input {
redis {
host => "192.168.1.197"
port => 6379
password => "centos"
db => 0
key => "filebeat"
data_type => "list"
}
}
在我们没启动logstash之前filebeat拿到的日志文件会存在redis中,
启动之后logstash会在redis拿去数据存放至elasticsearch中
elasticsearch的存储的数据会不断增加
数据将会被分片至其他节点
表达式判定字段
编辑filebeat配置文件。加入两个字段
fields:
log_type: access
#- c:\programdata\elasticsearch\logs\*
- paths:
- /var/log/httpd/error_log
fields:
log_type: errors
修改logstash配置文件,将错误日志和访问日志分开存放
input {
redis {
host => "192.168.1.197"
port => 6379
password => "centos"
db => 0
key => "filebeat"
data_type => "list"
}
}
filter {
if [fields][log_type] == "access"{
grok {
match => { "message" => "%{HTTPD_COMBINEDLOG}" }
remove_field => "message"
}
geoip {
source => "clientip"
target => "geoip"
database => "/etc/logstash/maxmind/GeoLite2-City.mmdb"
}
}
}
output {
if [fields][log_type] == "access" {
elasticsearch {
hosts => ["http://node1:9200/","http://node2:9200/","http://node3:9200"]
index => "logstash-%{+YYYY.MM.dd}"
document_type => "httpd_access_logs"
}
} else {
elasticsearch {
hosts => ["http://node1:9200/","http://node2:9200/","http://node3:9200"]
index => "logstash-%{+YYYY.MM.dd}"
document_type => "httpd_error_logs"
}
}
}
6、收集nginx,tomcat日志,php错误日志,并用kibana画出php错误日志图
wget https://artifacts.elastic.co/downloads/kibana/kibana-5.6.8-x86_64.rpm#下载安装kibana图形界面
修改配置文件里的基础信息
1 nginx 日志格式配置
[root@elk-5-10 config]# cd /usr/local/nginx/conf/
[root@elk-5-10 conf]# vi nginx.conf
log_format access '$http_host $remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" $http_x_forwarded_for';
2 日志格式数据样品
2.1 访问日志:
ss00.xxxxxx.me 150.138.154.157 - - [25/Jul/2017:03:02:35 +0800] "GET /csm/7_527.html HTTP/1.1" 304 0 "http://www.twww.com/tetris/page/64000159042/?ad_id=62928537191&cid=62928889880&req_id=0" "Mozilla/5.0 (Linux; Android 6.0.1; Redmi 4X Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Mobile Safari/537.36" 183.204.183.118
2.2 错误日志
2017/07/25 15:23:29 [error] 24881#0: *33 open() "/usr/local/nginx-1.12.0/html/favicon.ico" failed (2: No such file or directory), client: 192.168.1.103, server: www.zyb.com, request: "GET /favicon.ico HTTP/1.1", host: "www.zyb.com"
3 logstash 配置文件
input {
file {
type => "nginx-access"
path => "/data/weixin.sys.mingyaohui.com.log"
start_position => beginning
}
file {
type => "nginx-error"
path => "/data/nginx_error.log"
start_position => beginning
}
}
filter {
if [type] == "nginx-access" {
grok {
match => ["message","%{IPORHOST:clientip} %{NGUSER:ident} %{NGUSER:auth}
"%{WORD:verb} %{URIPATHPARAM:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:response} (?:%{NUMBER:bytes}|-) (?:"(?:%{URI:referrer}|-)"|%{QS:referrer}) %{QS:agent} %{IPORHOST:forwordip}" ]}
}
} else if [type] == "nginx-error" {
grok {
match => [ "message" , "(?<timestamp>%{YEAR}[./-]%{MONTHNUM}[./-]%{MONTHDAY}[- ]%{TIME})
%{POSINT:pid}#%{NUMBER}: %{GREEDYDATA:errormessage}(?:, client: (?<clientip>%{IP}|%{HOSTNAME}))(?:, server: %{IPORHOST:server}?)(?:, request: %{QS:request})?(?:, upstream: (?<upstream>\"%{URI}\"|%{QS}))?(?:, host: %{QS:request_host})?(?:, referrer: \"%{URI:referrer}\")?"]
}
}
# add geo-location info
geoip {
source => "clientip"
}
}
output {
elasticsearch {
hosts => ["10.0.0.10"]
index => "%{type}-%{+YYYY.MM.dd}"
}
}
4 kibana分析效果图
参考资料
https://grokdebug.herokuapp.com/patterns#
https://github.com/adventure-yunfei/ELK-for-nginx-log/blob/master/logstash.conf
————————————————
原文链接:https://blog.csdn.net/zyb378747350/article/details/76084840
tomcat:
. 修改Tomcat accesslog的日志格式,我这里修改问 json 字符串
3. 配置 logstash 的 config, 输入为 tomcat accesslog 文件,输出为 redis,logstash 虽然是 java 编写,但它的这个配置文件格式,感觉是ruby的语法,由filebeat收集,logstash转换
收集php
filebeat.prospectors:
- input_type: log
paths:
- /alidata1/www/timecash22/api3/application/logs/api3/2017/*/*.php
document_type: api3_json
multiline.pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
multiline.negate: true
multiline.match: after
- input_type: log
paths:
- /alidata1/www/timecash22/api3/application/logs/2017/*/*.php
document_type: api3_error_log
multiline.pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
multiline.negate: true
multiline.match: after
- input_type: log
paths:
- /alidata1/www/timecash22/wx/application/logs/2017/*/*.php
document_type: wx_error_log #这是的key是什么,到redis里就是什么
multiline.pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
multiline.negate: true
multiline.match: after
output.redis:
hosts: ["10.45.40.112"]
password: "timecash2016"
#key: "api3_json"
keys:
- key: "%{[type]}"
配置logstash配置文件
input {
redis {
host => "redis的iP"
password => "redis的密码"
port => 6379
key => "api3_json"
type => "api3_json"
data_type => "list"
}
}
filter {
date {
match => [ "timestamp" , "YYYY-MM-dd HH:mm:ss" ] #时间,从kibana上显示每条日志的时间
}
}
output {
elasticsearch {
hosts => "elasticsearch的IP"
#protocol =>"http"
index=>"api3_json_%{+YYYY.MM.dd}" #存到es里索引的名称
document_type=>"api3_json"
}
stdout{
codec => rubydebug
}
}
网友评论