一、部署ES集群
1、安装JDK
1. 上传jdk的包:
[root@iZuf6e8932c78hsjvmunulZ ~]# ls
jdk-8u211-linux-x64.tar.gz
[root@iZuf6e8932c78hsjvmunulZ ~]# tar xzf jdk-8u211-linux-x64.tar.gz -C /usr/local/
[root@iZuf6e8932c78hsjvmunulZ ~]# cd /usr/local/
[root@iZuf6e8932c78hsjvmunulZ local]# ls
aegis bin etc games include jdk1.8.0_211 lib lib64 libexec sbin share src
[root@iZuf6e8932c78hsjvmunulZ local]# mv jdk1.8.0_211/ java
2. 设置环境变量
[root@iZuf6e8932c78hsjvmunulZ local]# vim /etc/profile.d/java.sh
JAVA_HOME=/usr/local/java
PATH=$JAVA_HOME/bin:$PATH
export JAVA_HOME PATH
2、安装配置ES
1.创建用户
[root@iZuf6e8932c78hsjvmunulZ ~]# useradd elsearch
2.安装配置ES
[root@iZuf6e8932c78hsjvmunulZ ~]# tar xzf elasticsearch-6.5.4.tar.gz -C /usr/local/
[root@iZuf6e8932c78hsjvmunulZ ~]# vim /usr/local/elasticsearch-6.5.4/config/elasticsearch.yml
cluster.name: bjbpe01-elk
node.name: elk01
node.master: true
node.data: true
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
network.host: 0.0.0.0
http.port: 9200
discovery.zen.ping.unicast.hosts: ["172.19.136.69", "172.19.136.70"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 150s
discovery.zen.fd.ping_retries: 10
client.transport.ping_timeout: 60s
http.cors.enabled: true
http.cors.allow-origin: "*"
3、设置堆大小
确保堆内存最小值(Xms)与最大值(Xmx)的大小相同,防止程序在运行时改变堆内存大小。
如果系统内存足够大,将堆内存最大和最小值设置为31G,因为有一个32G性能瓶颈问题。
堆内存大小不要超过系统内存的50%
[root@iZuf6e8932c78hsjvmunulZ ~]# sed -i 's/-Xms1g/-Xms2g/' /usr/local/elasticsearch-6.5.4/config/jvm.options
[root@iZuf6e8932c78hsjvmunulZ ~]# sed -i 's/-Xmx1g/-Xmx2g/' /usr/local/elasticsearch-6.5.4/config/jvm.options
4、创建ES数据及日志存储目录和修改目录权限
[root@iZuf6e8932c78hsjvmunulZ ~]# mkdir -p /data/elasticsearch/{data,logs}
[root@iZuf6e8932c78hsjvmunulZ ~]# chown -R elsearch:elsearch /data/elasticsearch
[root@iZuf6e8932c78hsjvmunulZ ~]# chown -R elsearch:elsearch /usr/local/elasticsearch-6.5.4
5、系统优化
增加最大文件数
[root@iZuf6e8932c78hsjvmunumZ ~]# echo "* - nofile 65536" >> /etc/security/limits.conf
增加最大进程数
[root@iZuf6e8932c78hsjvmunumZ ~]# echo "* soft nproc 31717" >> /etc/security/limits.conf
增加最大内存映射
[root@iZuf6e8932c78hsjvmunumZ ~]# echo "vm.max_map_count=262144" >> /etc/sysctl.conf
[root@iZuf6e8932c78hsjvmunumZ ~]# sysctl -p
测试:浏览器访问http://101.132.107.146:9200/
image.png6、安装配置Head监控插件
1.安装node
[root@iZuf6e8932c78hsjvmunulZ ~]# ls
elasticsearch-6.5.4.tar.gz jdk-8u211-linux-x64.tar.gz
elasticsearch-head-master.zip node-v4.4.7-linux-x64.tar.gz
[root@iZuf6e8932c78hsjvmunulZ ~]# tar xzf node-v4.4.7-linux-x64.tar.gz -C /usr/local/
[root@iZuf6e8932c78hsjvmunulZ ~]# vim /etc/profile
NODE_HOME=/usr/local/node-v4.4.7-linux-x64
PATH=$NODE_HOME/bin:$PATH
export NODE_HOME PATH
[root@iZuf6e8932c78hsjvmunulZ ~]# source /etc/profile
[root@iZuf6e8932c78hsjvmunulZ ~]# node --version
v4.4.7
2. 下载head插件
[root@iZuf6e8932c78hsjvmunulZ ~]# ls
elasticsearch-6.5.4.tar.gz node-v4.4.7-linux-x64.tar.gz
elasticsearch-head-master.zip phantomjs-2.1.1-linux-x86_64.tar.bz2
jdk-8u211-linux-x64.tar.gz
[root@iZuf6e8932c78hsjvmunulZ ~]# yum -y install unzip
[root@iZuf6e8932c78hsjvmunulZ ~]# unzip -d /usr/local/ elasticsearch-head-master.zip
3. 安装grunt
[root@iZuf6e8932c78hsjvmunulZ ~]# cd /usr/local/elasticsearch-head-master/
[root@iZuf6e8932c78hsjvmunulZ elasticsearch-head-master]# yum -y install npm
[root@iZuf6e8932c78hsjvmunulZ elasticsearch-head-master]# npm install -g grunt-cli
[root@iZuf6e8932c78hsjvmunulZ elasticsearch-head-master]# grunt --version #检查grunt版本号
grunt-cli v1.3.2
4. 修改head源码
vim /usr/local/elasticsearch-head-master/Gruntfile.js +95
5. 下载head必要的文件
[root@iZuf6e8932c78hsjvmunulZ ~]# ls
elasticsearch-6.5.4.tar.gz node-v4.4.7-linux-x64.tar.gz
elasticsearch-head-master.zip phantomjs-2.1.1-linux-x86_64.tar.bz2
jdk-8u211-linux-x64.tar.gz
[root@iZuf6e8932c78hsjvmunulZ ~]# yum -y install bzip2
[root@iZuf6e8932c78hsjvmunulZ ~]# tar -jxf phantomjs-2.1.1-linux-x86_64.tar.bz2 -C /tmp/
6. 运行head
[root@iZuf6e8932c78hsjvmunulZ ~]# yum -y install fontconfig
[root@iZuf6e8932c78hsjvmunulZ ~]# cd /tmp/phantomjs-2.1.1-linux-x86_64/bin/
[root@iZuf6e8932c78hsjvmunulZ bin]# mv phantomjs /usr/bin/
[root@iZuf6e8932c78hsjvmunulZ bin]# cd /usr/local/elasticsearch-head-master/
[root@iZuf6e8932c78hsjvmunulZ elasticsearch-head-master]# npm install
[root@iZuf6e8932c78hsjvmunulZ elasticsearch-head-master]# nohup grunt server &
7. 测试:访问http://101.132.107.146:9100/
image.png二、部署Kibana
1、安装配置
[root@iZuf6e8932c78hsjvmunumZ ~]# ls
elasticsearch-6.5.4.tar.gz jdk-8u211-linux-x64.tar.gz kibana-6.5.4-linux-x86_64.tar.gz
[root@iZuf6e8932c78hsjvmunumZ ~]# tar xzf kibana-6.5.4-linux-x86_64.tar.gz -C /usr/local/
[root@iZuf6e8932c78hsjvmunumZ ~]# vim /usr/local/kibana-6.5.4-linux-x86_64/config/kibana.yml
server.port: 5601
server.host: "172.19.136.70"
elasticsearch.url: "http://172.19.136.70:9200"
kibana.index: ".kibana"
2、启动
[root@iZuf6e8932c78hsjvmunumZ ~]# cd /usr/local/kibana-6.5.4-linux-x86_64/
[root@iZuf6e8932c78hsjvmunumZ kibana-6.5.4-linux-x86_64]# nohup ./bin/kibana &
[root@iZuf6e8932c78hsjvmunumZ kibana-6.5.4-linux-x86_64]# ss -lntp
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:9200 *:* users:(("java",pid=21948,fd=215))
LISTEN 0 128 *:9300 *:* users:(("java",pid=21948,fd=200))
LISTEN 0 128 *:22 *:* users:(("sshd",pid=1209,fd=3))
LISTEN 0 128 172.19.136.70:5601 *:* users:(("node",pid=25494,fd=10))
三、部署Kafka
1、安装JDK
[root@iZuf68bqc6wjofphl70uonZ ~]# ls
jdk-8u211-linux-x64.tar.gz kafka_2.11-2.0.0.tgz
[root@iZuf68bqc6wjofphl70uonZ ~]# tar xzf jdk-8u211-linux-x64.tar.gz -C /usr/local/
[root@iZuf68bqc6wjofphl70uonZ ~]# cd /usr/local/
[root@iZuf68bqc6wjofphl70uonZ local]# mv jdk1.8.0_211/ java
[root@iZuf68bqc6wjofphl70uonZ local]# vim /etc/profile.d/java.sh
JAVA_HOME=/usr/local/java
PATH=$JAVA_HOME/bin:$PATH
export JAVA_HOME PATH
[root@iZuf68bqc6wjofphl70uonZ local]# source /etc/profile.d/java.sh
[root@iZuf68bqc6wjofphl70uonZ local]# java -version
java version "1.8.0_211"
Java(TM) SE Runtime Environment (build 1.8.0_211-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.211-b12, mixed mode)
2、安装配置ZK
Kafka运行依赖ZK,Kafka官网提供的tar包中,已经包含了ZK,这里不再额下载ZK程序
[root@iZuf68bqc6wjofphl70uonZ ~]# tar zxf kafka_2.11-2.0.0.tgz -C /usr/local/
[root@iZuf68bqc6wjofphl70uonZ ~]# sed -i 's/^[^#]/#&/' /usr/local/kafka_2.11-2.0.0/config/zookeeper.properties
[root@iZuf68bqc6wjofphl70uonZ ~]# cat /usr/local/kafka_2.11-2.0.0/config/zookeeper.properties
dataDir=/opt/data/zookeeper/data
dataLogDir=/opt/data/zookeeper/logs
clientPort=2181
tickTime=2000
initLimit=20
syncLimit=10
server.1=172.19.136.71:2888:3888
server.2=172.19.136.72:2888:3888
server.3=172.19.136.73:2888:3888
[root@iZuf68bqc6wjofphl70uonZ ~]# mkdir -p /opt/data/zookeeper/{data,logs}
[root@iZuf68bqc6wjofphl70uonZ ~]# echo 1 > /opt/data/zookeeper/data/myid
3、配置Kafka
[root@iZuf68bqc6wjofphl70uonZ ~]# sed -i 's/^[^#]/#&/' /usr/local/kafka_2.11-2.0.0/config/server.properties
[root@iZuf68bqc6wjofphl70uonZ ~]# vim /usr/local/kafka_2.11-2.0.0/config/server.properties
broker.id=1
listeners=PLAINTEXT://172.19.136.71:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/opt/data/kafka/logs
num.partitions=6
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=2
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=536870912
log.retention.check.interval.ms=300000
zookeeper.connect=172.19.136.71:2181,172.19.136.72:2181,172.19.136.73:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
[root@iZuf68bqc6wjofphl70uonZ ~]# mkdir -p /opt/data/kafka/logs
4、配置其他节点
只需把配置好的安装包直接分发到其他节点,然后修改ZK的myid,Kafka的broker.id和listeners就可以了
5、启动、验证ZK集群
1. 启动
[root@iZuf68bqc6wjofphl70uooZ ~]# cd /usr/local/kafka_2.11-2.0.0/ && nohup bin/zookeeper-server-start.sh config/zookeeper.properties &
2. 验证
[root@iZuf68bqc6wjofphl70uonZ ~]# echo conf | nc 127.0.0.1 2181
clientPort=2181
dataDir=/opt/data/zookeeper/data/version-2
dataLogDir=/opt/data/zookeeper/logs/version-2
tickTime=2000
maxClientCnxns=60
minSessionTimeout=4000
maxSessionTimeout=40000
serverId=1
initLimit=20
syncLimit=10
electionAlg=3
electionPort=3888
quorumPort=2888
peerType=0
[root@iZuf68bqc6wjofphl70uonZ ~]# echo stat | nc 127.0.0.1 2181
Zookeeper version: 3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/2018 00:39 GMT
Clients:
/127.0.0.1:38412[0](queued=0,recved=1,sent=0)
Latency min/avg/max: 0/0/0
Received: 2
Sent: 1
Connections: 1
Outstanding: 0
Zxid: 0x0
Mode: follower
Node count: 4
查看端口:
[root@iZuf68bqc6wjofphl70uomZ ~]# lsof -i:2181
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
java 27255 root 95u IPv4 1178057 0t0 TCP *:eforward (LISTEN)
6、启动、验证kafka
1. 启动(三个节点一次执行)
[root@iZuf68bqc6wjofphl70uonZ ~]# cd /usr/local/kafka_2.11-2.0.0/
[root@iZuf68bqc6wjofphl70uonZ kafka_2.11-2.0.0]# nohup bin/kafka-server-start.sh config/server.properties &
2. 验证
在172.19.136.71上创建topic
[root@iZuf68bqc6wjofphl70uonZ kafka_2.11-2.0.0]# bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic llf123
Created topic "llf123".
查询172.19.136.72上的topic
[root@iZuf68bqc6wjofphl70uomZ kafka_2.11-2.0.0]# bin/kafka-topics.sh --zookeeper 172.19.136.72:2181 --list
llf123
查询172.19.136.73上的topic
[root@iZuf68bqc6wjofphl70uooZ kafka_2.11-2.0.0]# bin/kafka-topics.sh --zookeeper 172.19.136.73:2181 --list
llf123
模拟消费生产和消费
1. 发送消息到172.19.136.71
[root@iZuf68bqc6wjofphl70uonZ kafka_2.11-2.0.0]# bin/kafka-console-producer.sh --broker-list 172.19.136.71:9092 --topic llf123
>Hello World!
2. 从172.19.136.72上接受消息
[root@iZuf68bqc6wjofphl70uomZ kafka_2.11-2.0.0]# bin/kafka-console-consumer.sh --bootstrap-server 172.19.136.72:9092 --topic llf123 --from-beginning
Hello World!
四、部署Logstash
- 安装
[root@iZuf6e8932c78hsjvmunukZ ~]# ls
elasticsearch-6.5.4.tar.gz jdk-8u211-linux-x64.tar.gz logstash-6.5.4.tar.gz
[root@iZuf6e8932c78hsjvmunukZ ~]# tar xzf logstash-6.5.4.tar.gz -C /usr/local/
- 配置
[root@iZuf6e8932c78hsjvmunukZ ~]# mkdir -p /usr/local/logstash-6.5.4/etc/conf.d
[root@iZuf6e8932c78hsjvmunukZ ~]# vim /usr/local/logstash-6.5.4/etc/conf.d/input.conf
input {
kafka {
type => "audit_log"
codec => "json"
topics => "nginx"
decorate_events => true
bootstrap_servers => "172.19.136.71:9092,172.19.136.72:9092,172.19.136.73:9092"
}
}
[root@iZuf6e8932c78hsjvmunukZ ~]# vim /usr/local/logstash-6.5.4/etc/conf.d/ootput.conf
output {
if [type] == "audit_log" {
elasticsearch {
hosts => ["172.19.136.68","172.19.136.69","172.19.136.70"]
index => 'logstash-audit_log-%{+YYYY-MM-dd}'
}
}
}
- 启动
[root@iZuf6e8932c78hsjvmunukZ logstash-6.5.4]# cd /usr/local/logstash-6.5.4/
[root@iZuf6e8932c78hsjvmunukZ logstash-6.5.4]# nohup bin/logstash -f etc/conf.d/ --config.reload.automatic &
五、部署Filebeat
- 安装
[root@iZuf68bqc6wjofphl70uooZ ~]# ls
filebeat-6.5.4-linux-x86_64.tar.gz jdk-8u211-linux-x64.tar.gz kafka_2.11-2.0.0.tgz
[root@iZuf68bqc6wjofphl70uooZ ~]# tar xzf filebeat-6.5.4-linux-x86_64.tar.gz
[root@iZuf68bqc6wjofphl70uooZ ~]# mv filebeat-6.5.4-linux-x86_64 filebeat
- 配置
[root@iZuf68bqc6wjofphl70uooZ ~]# cd filebeat
[root@iZuf68bqc6wjofphl70uooZ filebeat]# vim filebeat.yml
filebeat.prospectors:
- input_type: log
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.add_error_key: true
json.message_key: log
output.kafka:
hosts: ["172.19.136.71:9092","172.19.136.72:9092","172.19.136.73:9092"]
topic: 'nginx'
- 启动
[root@iZuf68bqc6wjofphl70uooZ filebeat]# nohup ./filebeat -e -c filebeat.yml &
用于测试
首先确保端口没有起来,防止端口被占用报错
bin/logstash -e 'input { stdin{} } output { elasticsearch { hosts => ["192.168.1.160:9200"]} }'
网友评论