安装配置(node1/node2上操作)
1、准备:
两个节点node1和node2均按照centos6.5系统,每个节点两块磁盘,一块用作安装系统,一块用作drbd
192.168.1.151node1
192.168.1.203node2
修改主机名:
# hostnamectl set-hostnamenode1
# hostnamectl set-hostnamenode2
2、磁盘分区如下
[root@node1~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 500M 0 part /boot
└─sda2 8:2 0 19.5G 0 part
├─centos-swap 253:0 0 2G 0 lvm [SWAP]
└─centos-root 253:1 0 17.5G 0 lvm /
sdb 8:16 0 20G 0 disk
sr0 11:0 1 1024M 0 rom
[root@node2~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 500M 0 part /boot
└─sda2 8:2 0 19.5G 0 part
├─centos-swap 253:0 0 2G 0 lvm [SWAP]
└─centos-root 253:1 0 17.5G 0 lvm /
sdb 8:16 0 20G 0 disk
sr0 11:0 1 1024M 0 rom
3、创建lvm(每个节点都需执行)
# pvcreate /dev/sdb
# vgcreate data /dev/sdb
# lvcreate -L 10G -n mysql data
4、关闭防火墙(每个节点都需执行)
5、配置hosts文件(每个节点都需执行)
192.168.1.151node1mysql1
192.168.1.203node2mysql2
6、配置ntp(每个节点都需执行)
7、配置互信(每个节点都需执行)
# ssh-keygen -t dsa -f ~/.ssh/id_dsa -N ""
# ssh-copy-idnode1
# ssh-copy-idnode2
8、安装drbd
# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
# yum install -y kmod-drbd84 drbd84-utils
9、配置文件介绍
a、/etc/drbd.conf //主配置文件
主配置文件中包含了全局配置文件及”drbd.d/”目录下以.res结尾的文件
# You can find an example in /usr/share/doc/drbd.../drbd.conf.example
include "drbd.d/global_common.conf";
include "drbd.d/*.res";
b、/etc/drbd.d/global_common.conf //全局配置文件
global {
usage-count no;#是否参加DRBD使用统计,默认为yes。官方统计drbd的装机量
# minor-count dialog-refresh disable-ip-verification
}
common {
protocol C;#使用DRBD的同步协议
handlers {
# These are EXAMPLE handlers only.
# They may have severe implications,
# like hard resetting the node under certain circumstances.
# Be careful when chosing your poison.
pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
# fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
# split-brain "/usr/lib/drbd/notify-split-brain.sh root";
# out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
# before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";
# after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
}
startup {
# wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
}
options {
# cpu-mask on-no-data-accessible
}
disk {
on-io-error detach;#配置I/O错误处理策略为分离
# size max-bio-bvecs on-io-error fencing disk-barrier disk-flushes
# disk-drain md-flushes resync-rate resync-after al-extents
# c-plan-ahead c-delay-target c-fill-target c-max-rate
# c-min-rate disk-timeout
}
net {
# protocol timeout max-epoch-size max-buffers unplug-watermark
# connect-int ping-int sndbuf-size rcvbuf-size ko-count
# allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
# after-sb-1pri after-sb-2pri always-asbp rr-conflict
# ping-timeout data-integrity-alg tcp-cork on-congestion
# congestion-fill congestion-extents csums-alg verify-alg
# use-rle
}
syncer {
rate 1024M; #设置主备节点同步时的网络速率
}
}
注释:on-io-error 策略可能为以下选项之一
detach分离: 这是默认和推荐的选项,如果在节点上发生底层的硬盘I/O错误,它会将设备运行在Diskless无盘模式下。
pass_on: DRBD会将I/O错误报告到上层,在主节点上,它会将其报告给挂载的文件系统,但是在此节点上就往往忽略(因此此节点上没有可以报告的上层)
-local-in-error: 调用本地磁盘I/O处理程序定义的命令;这需要有相应的local-io-error调用的资源处理程序处理错误的命令;
这就给管理员有足够自由的权力命令或是脚本调用local-io-error处理I/O错误。
c、创建资源 /etc/drbd.d/mysql.res
resourcemysql{#资源名称
protocolC; #使用协议
meta-disk internal;
device/dev/drbd1; #DRBD设备名称
syncer {
verify-alg sha1; #加密算法
}
net {
allow-two-primaries;
}
onnode1{
disk/dev/data/mysql; #drbd1使用的磁盘分区
address192.168.1.151:7789; #设置DRBD监听地址与端口
}
onnode2{
disk/dev/data/mysql;
address192.168.1.203:7789;
}
}
10、将配置文件拷贝到node2上
# scp -rp /etc/drbd.d/*node2:/etc/drbd.d/
11、启用drbd【node1】
# drbdadm create-mdmysql
# modprobe drbd
# drbdadm upmysql
# drbdadm -- --force primary mysql
查看状态
# cat /proc/drbd
12、配置对端节点
# sshnode2"drbdadm create-md mysql"
# sshnode2"modprobe drbd"
# sshnode2"drbdadm up mysql"
13、格式化设备并挂载【node1】
# mkfs.ext4/dev/drbd1
# mount /dev/drbd1 /mnt
查看状态
service drbd status (主节点)
14、报错,需要更新内核,并重启服务器
[root@node1 ~]# drbdadm create-md mysql
ERROR: modinfo: could not find module drbd
drbd.d/global_common.conf:55: Parse error: 'a syncer option keyword' expected,
but got 'rate'
[root@node1]# lsmod |grep drbd
[root@node1]#
yum install gcc gcc-c++ make glibc flex kernel
kernel-devel kernel-headers
重启既可解决问题,需要保证如下的能够找到。(必须重启)
[root@node1]# ls -ld /usr/src/kernels/$(uname -r)/
drwxr-xr-x 22 root root 4096 Aug 18 13:59 /usr/src/kernels/2.6.32-573.3.1.el6.x86_64/
15、故障恢复
实际生产环境若DRBD1宕机,在DRBD2状态信息中ro的值会显示为Secondary/Unknown,只需要进行DRBD提权操作即可。
# umount /data
# drbdsetup /dev/drbd1 secondary (故障后,降级为从)
#drbdsetup /dev/drbd1 primary(提权为主)
#mount /dev/drbd1 /data
网友评论