目录
一、LVS DR模式搭建
二、keepalived + LVS
一、LVS DR模式搭建
实验环境:
#假设以下IP均为公网IP
分发器dir
主机名:minglinux-03
内网IP 192.168.162.128
真实服务器rs1
主机名:minglinux-01
内网IP 192.168.162.130
真实服务器rs2
主机名:minglinux-02
内网IP 192.168.162.132
VIP(虚拟ip):192.168.162.120
DR模式下的所有机器都需要绑定该IP
- dir配置
//编写脚本 vim /usr/local/sbin/lvs_dr.sh
[root@minglinux-03 ~] vim /usr/local/sbin/lvs_dr.sh
#写入以下内容
#!/bin/bash
echo 1 > /proc/sys/net/ipv4/ip_forward #打开端口转发
ipv=/usr/sbin/ipvsadm
vip=192.168.162.200
rs1=192.168.162.130
rs2=192.168.162.132
#注意这里的网卡名字
ifdown ens33 重启网卡,使网卡释放原来的虚拟ip
ifup ens33
ifconfig ens33:2 $vip broadcast $vip netmask 255.255.255.255 up #设置VIP网卡为ens33:2
route add -host $vip dev ens33:2 #增加路由
$ipv -C #清空规则
$ipv -A -t $vip:80 -s wrr #指定加权轮循算法
$ipv -a -t $vip:80 -r $rs1:80 -g -w 1 #-g表示LVS模式为DR ,-w指定权重为1
$ipv -a -t $vip:80 -r $rs2:80 -g -w 1
[root@minglinux-03 ~] sh !$ #执行脚本
sh /usr/local/sbin/lvs_dr.sh
[root@minglinux-03 ~] ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.162.200:80 wrr
-> 192.168.162.130:80 Route 1 0 0
-> 192.168.162.132:80 Route 1 0 0
[root@minglinux-03 ~] route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.162.2 0.0.0.0 UG 0 0 0 ens33
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 ens33
169.254.0.0 0.0.0.0 255.255.0.0 U 1003 0 0 ens37
192.168.150.0 0.0.0.0 255.255.255.0 U 0 0 0 ens37
192.168.162.0 0.0.0.0 255.255.255.0 U 0 0 0 ens33
192.168.162.200 0.0.0.0 255.255.255.255 UH 0 0 0 ens33
[root@minglinux-03 ~] ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:7a:0f:05 brd ff:ff:ff:ff:ff:ff
inet 192.168.162.128/24 brd 192.168.162.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.162.200/32 brd 192.168.162.200 scope global ens33:2
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe7a:f05/64 scope link
valid_lft forever preferred_lft forever
3: ens37: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:7a:0f:0f brd ff:ff:ff:ff:ff:ff
inet 192.168.150.123/24 brd 192.168.150.255 scope global ens37
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe7a:f0f/64 scope link
valid_lft forever preferred_lft forever
- rs配置
#首先将两台rs网关改回
#在两台rs上也编写脚本/usr/local/sbin/lvs_rs.sh
[root@minglinux-01 ~] vim /usr/local/sbin/lvs_rs.sh
#写入以下内容
#/bin/bash
vip=192.168.162.200
#把vip绑定在lo上,是为了实现rs直接把结果返回给客户端
ifdown lo
ifup lo
ifconfig lo:0 $vip broadcast $vip netmask 255.255.255.255 up
route add -host $vip lo:0
#以下操作为更改arp内核参数,目的是为了让rs顺利发送mac地址给客户端
#参考文档www.cnblogs.com/lgfeng/archive/2012/10/16/2726308.html
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
[root@minglinux-01 ~] sh /usr/local/sbin/lvs_rs.sh
[root@minglinux-01 ~] route -n
\Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.162.2 0.0.0.0 UG 0 0 0 ens33
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 ens33
192.168.162.0 0.0.0.0 255.255.255.0 U 0 0 0 ens33
192.168.162.200 0.0.0.0 255.255.255.255 UH 0 0 0 lo
[root@minglinux-02 ~] sh /usr/local/sbin/lvs_rs.sh
[root@minglinux-02 ~] route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.162.2 0.0.0.0 UG 0 0 0 ens33
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 ens33
192.168.162.0 0.0.0.0 255.255.255.0 U 0 0 0 ens33
192.168.162.200 0.0.0.0 255.255.255.255 UH 0 0 0 lo
- 测试
windows浏览器刷新切访问的rs(Ctrl+F5强制刷新浏览器不会有缓存数据)
二、keepalived + LVS
LVS架构中,不管是NAT模式还是DR模式,当后端的RS宕掉时,调度器依然会把请求转发到宕掉的RS上,这不是我们想要的。
keepalived+LVS的架构可以避免这个问题。
完整的keepalived+LVS架构需要有两台调度器实现高可用,提供调度服务的只需要一台,另外一台作为备用。但keepalived本身也有负载均衡的功能,所以本次实验可以只安装一台keepalived
keepalived内置了ipvsadm的功能,所以不需要再安装ipvsadm包,也不用编写和执行那个lvs_dir的脚本
- 实验环境:
分发器dir(安装keepalived)
主机名:minglinux-03
内网IP 192.168.162.128
真实服务器rs1
主机名:minglinux-01
内网IP 192.168.162.130
真实服务器rs2
主机名:minglinux-02
内网IP 192.168.162.132
VIP(虚拟ip):192.168.162.120
- 编辑keepalived配置文件
[root@minglinux-03 ~] vim /etc/keepalived/keepalived.conf
#输入以下内容
vrrp_instance VI_1 {
#备用服务器上为 BACKUP
state MASTER
#绑定vip的网卡为ens33,你的网卡和阿铭的可能不一样,这里需要你改一下
interface ens33
virtual_router_id 51
#备用服务器上为90
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass aminglinux
}
virtual_ipaddress {
192.168.162.200
}
}
virtual_server 192.162.200 80 {
#(每隔10秒查询realserver状态)
delay_loop 10
#(lvs 算法)
lb_algo wlc
#(DR模式)
lb_kind DR
#(同一IP的连接60秒内被分配到同一台realserver)
persistence_timeout 0
#(用TCP协议检查realserver状态)
protocol TCP
real_server 192.168.162.130 80 {
#(权重)
weight 100
TCP_CHECK {
#(10秒无响应超时)
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 192.168.162.132 80 {
weight 100
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
[root@minglinux-03 ~] ipvsadm -C #清空之前的ipvsadm规则和VIP
[root@minglinux-03 ~] systemctl restart network
[root@minglinux-03 ~] systemctl start keepalived
[root@minglinux-03 ~] ps aux |grep keepalived
root 4881 0.0 0.0 118676 1400 ? Ss 17:31 0:00 /usr/sbin/keepalived -D
root 4882 0.0 0.1 129608 3332 ? S 17:31 0:00 /usr/sbin/keepalived -D
root 4883 0.0 0.1 129476 2616 ? S 17:31 0:00 /usr/sbin/keepalived -D
root 5602 0.0 0.0 112720 988 pts/1 S+ 17:43 0:00 grep --color=auto keepalived
[root@minglinux-03 ~] ipvsadm -ln #查看ipvsadm规则
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.162.200:80 wlc
-> 192.168.162.130:80 Route 100 0 0
-> 192.168.162.132:80 Route 100 0 0
- 两台rs上执行/usr/local/sbin/lvs_rs.sh脚本
[root@minglinux-01 ~] sh /usr/local/sbin/lvs_rs.sh
[root@minglinux-02 ~] sh /usr/local/sbin/lvs_rs.sh
- 测试
#将 rs1 的 nginx 服务关闭
[root@minglinux-01 ~] systemctl stop nginx
[root@minglinux-01 ~] ps aux |grep nginx
root 3184 0.0 0.0 112720 980 pts/0 S+ 21:58 0:00 grep --color=auto nginx
#查看 dir 上的 ipvsadm 规则,只剩下 RS2 的规则了,说明调度器已经不会把请求转发到“宕掉”的RS1上
[root@minglinux-03 ~] ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.162.200:80 wlc
-> 192.168.162.132:80 Route 100 0 1
# 重新将RS1的 nginx 服务开启
[root@minglinux-01 ~] systemctl start nginx
[root@minglinux-01 ~] ps aux |grep nginx
root 3218 0.0 0.0 46028 1300 ? Ss 22:03 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
nobody 3219 0.0 0.2 48516 3944 ? S 22:03 0:00 nginx: worker process
nobody 3220 0.0 0.2 48516 3944 ? S 22:03 0:00 nginx: worker process
root 3222 0.0 0.0 112720 984 pts/0 S+ 22:03 0:00 grep --color=auto nginx
[root@minglinux-03 ~] ipvsadm -ln #此时dir 的ipvsadm规则又有了两台 rs
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.162.200:80 wlc
-> 192.168.162.130:80 Route 100 0 0
-> 192.168.162.132:80 Route 100 0 0
扩展
haproxy+keepalived http://blog.csdn.net/xrt95050/article/details/40926255
nginx、lvs、haproxy比较 http://www.csdn.net/article/2014-07-24/2820837
keepalived中自定义脚本 vrrp_script http://my.oschina.net/hncscwc/blog/158746
lvs dr模式只使用一个公网ip的实现方法 http://storysky.blog.51cto.com/628458/338726
网友评论