在前次文章(https://www.jianshu.com/p/c83921f848ef)中介绍了NFS+keepalived+Sersync搭建nfs群集实现k8s存储的过程,但在使用中发现,遇到大量频繁的文件更新时,Sersync和sync这种同步方案性能不够,所以这次又尝试了使用drbd方案。
drbd的介绍请自行百度,不作详述。
原理
本方案 NFS 的高可用方案,两台文件服务器分别Master(192.168.53.14
Slave(192.168.53.149),使用 keepalived 生成一个虚拟 IP(192.168.53.150),使
nfs+drbd+keepalived进行 Master 与 Slave 之间文件相互同步,确保高可用。
1、添加虚拟机k8s-nfs-Master并配置
1.1 添加虚拟机
从Redhat7.4 Template(centos机器内网安装drbd不成功)模板机克隆到虚拟机k8s-nfs-Master,ip修改为192.168.53.148,并关闭防火墙
1.2 配置NFS服务
[root@k8s-nfs-Master ~]# rpm -qa nfs-utils rpcbind # 确认未安装nfs服务
[root@k8s-nfs-Master ~]# yum install -y nfs-utils rpcbind
已加载插件:fastestmirror.......完毕!
[root@k8s-nfs-Master ~]# systemctl start nfs
[root@k8s-nfs-Master ~]# systemctl status nfs
......
5月 18 10:47:46 k8s-nfsServer systemd[1]: Starting NFS server and services...
5月 18 10:47:47 k8s-nfsServer systemd[1]: Started NFS server and services.
[root@k8s-nfs-Master ~]# systemctl enable nfs # 设置开机启动
Created symlink from /etc/systemd/system/multi-user.target.wants/nfsserver.service to /usr/lib/systemd/system/nfs-server.service.
[root@k8s-nfs-Master ~]# systemctl start rpcbind
[root@k8s-nfs-Master ~]# systemctl status rpcbind
......
5月 18 10:47:36 k8s-nfsServer systemd[1]: Starting RPC bind service...
5月 18 10:47:36 k8s-nfsServer systemd[1]: Started RPC bind service.
[root@k8s-nfs-Master ~]# systemctl enable rpcbind
[root@k8s-nfs-Master ~]# mkdir /nfs #创建共享目录
[root@k8s-nfs-Master ~]# echo '/nfs
192.168.53.150/24(rw,sync,all_squash)'>/etc/exports
[root@k8s-nfs-Master ~]# systemctl restart rpcbind
[root@k8s-nfs-Master ~]# systemctl restart nfs
1.3 配置drbd(文件同步)
采用单主模式:典型的高可靠性集群方案。DRBD负责接收数据,把数据写到本地磁盘,然后发送给另一个主机,另一个主机再将数据存到自己的磁盘中。
1.3.1 配置hosts文件
[root@k8s-nfs-Master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
k8s-nfs-Master
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
k8s-nfs-Master
192.168.53.148 k8s-nfs-Master
192.168.53.149 k8s-nfs-Slave
1.3.2 安装相关支撑程序
到 http://oss.linbit.com/drbd 下载drbd-9.0.19-1.tar.gz、drbd-utils-9.12.1.tar.gz,再将drbd-9.0.19-1.tar.gz、drbd-utils-9.12.2.tar.gz上传到虚拟机,再装一些支撑软件
[root@k8s-nfs-Master ~]# yum -y install gcc make automake autoconf kernel-devel
kernel-headers flex libxslt libxslt-devel asciidoc
1.3.3 编译drbd-utils
[root@k8s-nfs-Master ~]# tar zxvf drbd-utils-9.12.2.tar.gz
[root@k8s-nfs-Master ~]# cd drbd-utils-9.12.2
[root@k8s-nfs-Master drbd-utils-9.12.2]# ./autogen.sh
[root@k8s-nfs-Master drbd-utils-9.12.2]# ./configure --prefix=/usr/ --
localstatedir=/var --sysconfdir=/etc
[root@k8s-nfs-Master drbd-utils-9.12.2]# make KDIR=/usr/src/kernels/$(uname -r)
[root@k8s-nfs-Master drbd-utils-9.12.2]# make install
[root@k8s-nfs-Master drbd-utils-9.12.2]# cp scripts/drbd /etc/init.d/. ##复制启动脚本
1.3.4 编译drbd
[root@k8s-nfs-Master ~]# tar -zxvf drbd-9.0.19-1.tar.gz
# cd drbd-9.0.19-1
# make KDIR=/usr/src/kernels/$(uname -r)
# make install
# make -C drbd install
1.3.5 安装drbd模块
# cd drbd-9.0.19-1/drbd
# cp drbd.ko /lib/modules/$(uname -r)/kernel/lib/
# cp drbd_transport_tcp.ko /lib/modules/$(uname -r)/kernel/lib/
# depmod ##更新内核包关联文件
# modprobe drbd ##安装内核模块
# modprobe drbd_transport_tcp
# lsmod |grep drbd ##查看模块是否加载成功
drbd_transport_tcp 22144 1
drbd 555120 2 drbd_transport_tcp
libcrc32c 12644 4 xfs,drbd,ip_vs,nf_conntrack
1.3.6 查看drbd版本及路径
# cat /proc/drbd
version: 9.0.19-1 (api:2/proto:86-115)
GIT-hash: 6f5fa5d348a99e5eeb09d83c49853d72e614fd07 build by root@k8s-nfs-slave,
2020-06-29 02:58:39
Transports (api:16): tcp (9.0.19-1)
1.3.7 添加新磁盘并分区
关闭虚拟机,为其添加一块200G的新硬盘再上电
[root@k8s-nfs-Master ~]# fdisk -l # 新硬盘已识别
Disk /dev/sdb: 214.7 GB, 214748364800 bytes, 419430400 sectors
.......
Disk identifier: 0xcb65f098
Device Boot Start End Blocks Id System
[root@k8s-nfs-Master ~]# fdisk /dev/sdb # 开始分区
# 依次:n,n,p,回车,回车,w
[root@k8s-nfs-Master ~]# partprobe
[root@k8s-nfs-Master ~]# fdisk -l
1.3.8 配置drbd资源文件
[root@k8s-nfs-Master ~]# vi /etc/drbd.d/drbd.res
resource r1 { #定义资源组名称为r1
net {
cram-hmac-alg sha1; #使用sha1加密
shared-secret "123456"; #生成加密密钥
}
volume 1 { #定义卷组
device /dev/drbd1; #逻辑设备路径(建立块设备)
disk /dev/sdb1; #物理设备路径(用于复制的分区)
meta-disk internal; #meta data信息存放的方式,该处为内部存储,即和真实数据放在一起存储
}
on k8s-nfs-Master { #on开头,后面是主机名称(需与uname -n相同)
node-id 0;
address 192.168.53.148:7000; #设置drbd监听地址和端口
}
on k8s-nfs-Slave {
node-id 1;
address 192.168.53.149:7000;
}
connection {
host k8s-nfs-Master port 7000;
host k8s-nfs-Slave port 7000;
net {
protocol C; #使用drbd的第三种同步协议,表示收到对方主机写入确认后,则认为写入完成
}
}
}
1.3.9 配置资源
# dd if=/dev/zero of=/dev/sdb1 bs=1M count=100 ##用指定大小的块拷贝一个文件
# drbdadm -c /etc/drbd.conf create-md all ##创建设备元数据
initializing activity log
......
success
# drbdadm create-md r1 ##创建r1设备元
You want me to create a v09 style flexible-size internal meta data block.
......
New drbd meta data block successfully created.
# drbdadm up r1 ##启用该资源
Device '1' is configured!
Command 'drbdmeta 1 v09 /dev/sdb1 internal apply-al' terminated with exit code
20
# /root/drbd-utils-9.12.2/scripts/drbd-overview.pl ##查看状态
...... 1:r1/1 Connected(1*) Second/Primar UpToDa/UpToD
1.3.10 格式化新分区并挂载
# mkfs.ext4 /dev/drbd1 ##格式化/dev/drbd1
# mount /dev/drbd1 /nfs ##挂载
1.4 安装并配置Keepalived
[root@k8s-nfs-Master ~]# yum -y install keepalived.x86_64
已加载插件:fastestmirror
......
完毕!
[root@k8s-nfs-Master ~]# rm -f /etc/keepalived/keepalived.conf
[root@k8s-nfs-Master ~]# cat << EOF | tee /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id NFS-Master
}
vrrp_script chk_nfs
{
script "/etc/keepalived/nfs_check.sh" #监控脚本
interval 2 # 2秒一次
weight -20 # keepalived部署了两台所以设为20,如果三台就设为30
}
vrrp_instance VI_1 {
state BACKUP # 两台主机都设为backup非抢占模式
interface ens192
virtual_router_id 51
priority 100
advert_int 1
nopreempt # 设置为非抢占模式必须要该参数
authentication {
auth_type PASS
auth_pass abcdef
}
track_script {
chk_nfs
}
notify_stop /etc/keepalived/notify_stop.sh # keepalived停服时调用
virtual_ipaddress {
192.168.53.150/24
}
}
EOF
# 配置监控脚本
[root@k8s-nfs-Master ~]# cat << EOF | tee /etc/keepalived/nfs_check.sh
#!/bin/bash
# 日志文件大于5M就只保留最后50行
[ `du -m /tmp/nfs-chk.log | awk '{print $1}'` -gt 5 ] && tail -50 /tmp/nfs-chk.log >/tmp/nfs-tmp && mv /tmp/nfs-tmp /tmp/nfs-chk.log
vip=`ip a |grep 53.150|wc -l`
if [ $vip -eq 1 ];then # 主keepalived机器检查
service nfs status &>/dev/null # 检查nfs可用性
if [ $? -ne 0 ];then # 如果服务状态不正常,先尝试重启服务
time=`date "+%F %H:%M:%S"`
echo -e "$time ------主机NFS服务故障,重启之!------\n" >>/tmp/nfs-chk.log
service nfs start &>/dev/null
fi
nfsStatus=`ps -C nfsd --no-header | wc -l`
if [ $nfsStatus -eq 0 ];then # 若重启nfs服务后,仍不正常
time=`date "+%F %H:%M:%S"`
echo -e "$time ------nfs服务故障且重启失败,切换到备用服务器------\n">>/tmp/nfs-chk.log
service nfs stop &>>/tmp/nfs-chk.log # 停止nfs服务
umount /dev/drbd1 &>>/tmp/nfs-chk.log # 卸载drbd设备
drbdadm secondary r1 &>>/tmp/nfs-chk.log # 将drbd主降级为备
service keepalived stop &>>/tmp/nfs-chk.log # 关闭keepalived(切换)
time=`date "+%F %H:%M:%S"`
echo -e "$time ------切换结束!------\n" >>/tmp/nfs-chk.log
sleep 2
service keepalived start &>>/tmp/nfs-chk.log # 再开启keepalived服务
else
# drbd置主没有,挂载没有
drbdadm role r1|grep Primary &>/dev/null
if [ $? -ne 0 ];then # drbd未置Primary
time=`date "+%F %H:%M:%S"`
echo -e "$time ------将本机置为DRBD主机并挂载/nfs目录------\n" >>/tmp/nfschk.log
drbdadm primary r1 &>>/tmp/nfs-chk.log # 将drbd置为主
mount /dev/drbd1 /nfs &>>/tmp/nfs-chk.log # 挂载drbd设备
fi
fi
else # keepalived备机检查
service nfs status &>/dev/null
if [ $? -eq 0 ];then # NFS服务必须处于关闭状态
time=`date "+%F %H:%M:%S"`
echo -e "$time ------关闭备机NFS服务------\n" >>/tmp/nfs-chk.log
service nfs stop &>>/tmp/nfs-chk.log
fi
drbdadm role r1|grep Primary &>/dev/null
if [ $? -eq 0 ];then # drbd必须置备并卸载drbd设备
time=`date "+%F %H:%M:%S"`
echo -e "$time ------备机置secondary并卸载备机drbd设备------\n" >>/tmp/nfschk.log
drbdadm secondary r1 &>>/tmp/nfs-chk.log
umount /dev/drbd1 &>>/tmp/nfs-chk.log &>>/tmp/nfs-chk.log
fi
fi
EOF
# 配置keepalived停服脚本
[root@k8s-nfs-Master ~]# cat << EOF | tee /etc/keepalived/notify_stop.sh
#!/bin/bash
time=`date "+%F %H:%M:%S"`
echo -e "$time ------开始切换到备用服务器------\n" >>/tmp/nfs-chk.log
service nfs stop &>>/tmp/nfs-chk.log # 停止nfs服务
service smb stop &>>/tmp/nfs-chk.log # 停止smb服务
umount /dev/drbd1 &>>/tmp/nfs-chk.log # 卸载drbd设备
drbdadm secondary r1 &>>/tmp/nfs-chk.log # 将drbd主降级为备
time=`date "+%F %H:%M:%S"`
echo -e "$time ------切换结束!------\n" >>/tmp/nfs-chk.log
sleep 2
service keepalived start &>>/tmp/nfs-chk.log # 再开启keepalived
EOF
[root@k8s-nfs-Master ~]# chmod +x /etc/keepalived/nfs_check.sh
# 启动服务
[root@k8s-nfs-Master ~]# systemctl start keepalived.service && systemctl enable
keepalived.service
说明:在实际工作中,一定要连接到VIP进行操作,如果直连到slave机上对同步目录进行操作,是不会同步到master上的
2、添加虚拟机k8s-nfs-Slave并配置
将k8s-nfs-Master关机并复制到k8s-nfs-Slave,再打开k8s-nfs-Slave,修改一些配置
- 修改机器名与IP
[root@k8s-nfs-Slave ~]# nmcli general hostname k8s-nfs-Slave
[root@k8s-nfs-Slave ~]# sed -i 's/148/149/' /etc/sysconfig/networkscripts/ifcfg-ens192
[root@k8s-nfs-Slave ~]# service network restart
[root@k8s-nfs-Slave drbd]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4
localhost4.localdomain4 k8s-nfs-Slave
::1 localhost localhost.localdomain localhost6
localhost6.localdomain6 k8s-nfs-Slave
192.168.53.148 k8s-nfs-Master
192.168.53.149 k8s-nfs-Slave
- 修改/etc/keepalived/keepalived.conf
router_id NFS-Slave
priority 80 #从节点的权重要比主节点低 - 重启keepalived服务
[root@k8s-nfs-Slave ~]# service keepalived restart
Redirecting to /bin/systemctl restart keepalived.service
PS: 在keepalived.conf文档中,两台机state都设为“BACKUP”,这样,当发生切换后,即使master的keepalived服务恢复了,也不会再次抢过vip,减少了一次ip漂移的事件
3、文件同步与高可用测试
3.1 keepalived切换测试
找到VIP所在机器,进行切换
# service keepalived stop
# 用下面几个命令分别在两台机查看状态
# ps -C nfsd --no-header | wc -l # 查看nfs服务状态
# ip a |grep 53.150 # 查看是否vip
# ps -C keepalived --no-header | wc -l # 查看keepalived状态
# drbdadm role r1 # 查看drbd主备状态
# mount -l |grep drbd1 # 查看挂载情况
# cat /tmp/nfs-chk.log # 查看切换日志
3.2 查看切换前后EFK日志系统数
efk日志的存储挂载在了/nfs/elasticsearch-data目录,尝试在切换前面查看kibana界面的“索引管理”中的日志文件大小及“Discover”页面中的数据是否正常显示(缓冲1分钟左右)
3.3 查看切换前后grafana中监控数据
查看“Kubernetes Deployment(prometheus)“的数据是否正常,是否有中断
注意:1)因采用了自动化脚本,所以最好不要手工切换drbd的状态!
2)drbd同时只有主机才可能访问,备机无法访问共享目录,所以不要直接登录到备机对共享目录(/nfs)
中的文件进行增删更新操作!
4、配置NFS客户端
注意:在K8S集群中的每台机器上都要执行!
[root@k8s-master0 ~]# yum -y install nfs-utils
[root@k8s-master0 ~]# systemctl start nfs
[root@k8s-master0 ~]# systemctl enable nfs
[root@k8s-master0 ~]# systemctl status nfs
[root@k8s-master0 ~]# showmount -e 192.168.53.150
Export list for 192.168.53.150:
/nfs 192.168.53.150/24
[root@k8s-master0 ~]# mkdir /nfs
[root@k8s-master0 ~]# mount -t nfs 192.168.53.150:/nfs /nfs # 挂载
# 再配置开机自动挂载
[root@k8s-master0 ~]# echo '192.168.53.150:/nfs /nfs nfs rw,tcp,intr 0 0' >> /etc/fstab
[root@k8s-master0 ~]# df /nfs -hl # 查看挂载状况
......
网友评论