一、实验环境
1、集群共4台Contos7机器
2、关闭selinux,iptables,firewalld
3、修改主机名分别为node[1-4],并配置hosts
4、时间同步
5、考虑是否用到仲裁设备(这里不使用)
二、安装配置
CentOS 7: corosync v2 + pacemaker
corosync v2:vote system
pacemaker:独立服务
集群全生命周期管理工具
pcs yum info pcs
crmsh
1、安装并启动pcsd(all node)
[root@node1~]# yum install pcs -y
[root@node1~]# rpm -ql pcs #查看安装的文件
[root@node1~]# systemctl start pcsd && systemctl enable pcsd
[root@node1~]# echo "test123456" | passwd --stdin hacluster #为hacluster用户设置密码
2、配置corosync (arbitrary node)这里在node1
[root@node1 ~]# pcs cluster auth node1 node2 node3 node4 -u hacluster #输入刚刚echo的密码,成功后会看到如下提示
[root@node1 ~]# pcs cluster setup --name mycluster node1 node2 node3 node4 #设置集群并同步生成的配置文件到/etc/corosync/
image.png
[root@node1 ~]# 查看生成的配置文件
image.png
查看配置文件(这里无需修改)
[root@node1 ~]# cat /etc/corosync/corosync.conf
totem {
version: 2
secauth: off
cluster_name: mycluster
transport: udpu
}
nodelist {
node {
ring0_addr: node1
nodeid: 1
}
node {
ring0_addr: node2
nodeid: 2
}
node {
ring0_addr: node3
nodeid: 3
}
node {
ring0_addr: node4
nodeid: 4
}
}
quorum {
provider: corosync_votequorum
}
logging {
to_logfile: yes
logfile: /var/log/cluster/corosync.log
to_syslog: yes
}
[root@node1 ~]#
3、启动集群
[root@node1 ~]# pcs cluster start --all
node1: Starting Cluster...
node2: Starting Cluster...
node3: Starting Cluster...
node4: Starting Cluster...
4、验证是否启动
[root@node1 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 1
RING ID 0
id = 192.168.66.129
status = ring 0 active with no faults
[root@node1 ~]# pcs status
Cluster name: mycluster
WARNING: no stonith devices and stonith-enabled is not false
Stack: corosync
Current DC: node1 (version 1.1.16-12.el7_4.8-94ff4df) - partition with quorum #全局决策节点
Last updated: Wed Apr 18 14:40:39 2018
Last change: Wed Apr 18 14:37:50 2018 by hacluster via crmd on node1
4 nodes configured
0 resources configured
Online: [ node1 node2 node3 node4 ]
No resources
Daemon Status:
corosync: active/disabled
pacemaker: active/disabled
pcsd: active/enabled
#看到enable 为正常状态
#关闭警告stonith
[root@node1 ~]# pcs property set stonith-enabled=false
[root@node1 ~]# pcs property list
Cluster Properties:
cluster-infrastructure: corosync
cluster-name: mycluster
dc-version: 1.1.16-12.el7_4.8-94ff4df
have-watchdog: false
stonith-enabled: false
5、安装配置crmsh
[root@node1 ~]# cd /etc/yum.repos.d/
[root@node1 yum.repos.d]# wget http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/network:ha-clustering:Stable.repo
[root@node1 ~]# yum install wget -y
#会提示丢包,去下载单独安装
python-parallax-1.0.1-29.1.noarch: [Errno 256] No more mirrors to try.
[root@node1 ~]# wget ftp://ftp.pbone.net/mirror/ftp5.gwdg.de/pub/opensuse/repositories/devel:/languages:/python/SLE_11_SP3/x86_64/python-parallax-1.0.1-9.8.x86_64.rpm
[root@node1 ~]# yum install -y python-parallax-1.0.1-9.8.x86_64.rpm
[root@node1 ~]# yum install wget -y
基础的集群已经配置完毕,下面配置高可用httpd
三、高可用web服务
1、资源为httpd 和 vip
2、所有节点安装httpd [root@node1 ~]# yum install httpd -y
#为演示效果,为每个节点提供不同的页面
[root@node1 ~]# echo '<h1>node1</h1>' >> /var/www/html/index.html
[root@node1 ~]#
[root@node1 ~]# systemctl disable httpd #启动哪个节点的httpd有rm决定 (7不用 ,以为就算开机启动,在7也是第一次访问才启动)
3、配置资源
[root@node1 ~]# crm ra
crm(live)ra# list systemctl
ERROR: ra.list: class systemctl does not exist
crm(live)ra# list systemd #找到httpd 7必须enable httpd
getty@tty1 gssproxy
httpd ip6tables
ipset iptables
crm(live)ra#
#配置vip资源
crm(live)ra# info ocf:heartbeat:IPaddr
crm(live)ra# cd
crm(live)# configure
crm(live)configure# primitive webip ocf:heartbeat:IPaddr params ip=192.168.66.200
crm(live)configure# show
node 1: node1
node 2: node2
node 3: node3
node 4: node4
primitive webip IPaddr \
params ip=192.168.66.200
property cib-bootstrap-options: \
have-watchdog=false \
dc-version=1.1.16-12.el7_4.8-94ff4df \
cluster-infrastructure=corosync \
cluster-name=mycluster \
stonith-enabled=false
#验证webip 并提交
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# cd
crm(live)# status
Stack: corosync
Current DC: node1 (version 1.1.16-12.el7_4.8-94ff4df) - partition with quorum
Last updated: Wed Apr 18 15:46:56 2018
Last change: Wed Apr 18 15:46:51 2018 by root via cibadmin on node1
4 nodes configured
1 resource configured
Online: [ node1 node2 node3 node4 ]
Full list of resources:
webip (ocf::heartbeat:IPaddr): Started node1
crm(live)#
#node standby 节点软下线 可以测试资源转移
#同样的方式定义httpd资源
crm(live)# configure
crm(live)configure# primitive webserver systemd:httpd
#验证webserver并提交
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# cd
crm(live)# status
Stack: corosync
Current DC: node1 (version 1.1.16-12.el7_4.8-94ff4df) - partition with quorum
Last updated: Wed Apr 18 15:55:57 2018
Last change: Wed Apr 18 15:55:09 2018 by root via cibadmin on node1
4 nodes configured
2 resources configured
Online: [ node1 node2 node3 node4 ]
Full list of resources:
webip (ocf::heartbeat:IPaddr): Started node2
webserver (systemd:httpd): Started node1
#此时 webip 和 webserver在不同节点,不是我们期望的,下面定义组资源,让他们在同一个节点
crm(live)# configure
crm(live)configure# group webservice webip webserver #ip先启动
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# cd
crm(live)# status
Stack: corosync
Current DC: node1 (version 1.1.16-12.el7_4.8-94ff4df) - partition with quorum
Last updated: Wed Apr 18 15:59:54 2018
Last change: Wed Apr 18 15:59:49 2018 by root via cibadmin on node1
4 nodes configured
2 resources configured
Online: [ node1 node2 node3 node4 ]
Full list of resources:
Resource Group: webservice
webip (ocf::heartbeat:IPaddr): Started node2
webserver (systemd:httpd): Started node2
至此高可用httpd就完成了
网友评论