美文网首页
LVS(双主热备)+keepalived+Nginx基于DR模式

LVS(双主热备)+keepalived+Nginx基于DR模式

作者: liurongming | 来源:发表于2021-12-12 19:08 被阅读0次

    一、环境准备

    1、操作系统:CentOS Linux release 7.9.2009 (Core);
    2、Nginx安装包:nginx-1.20.1-1.el7.ngx.x86_64.rpm;
    3、两个虚拟的IP地址:172.18.5.100/200(未被使用的IP);
    4、本次采用同网段LVS+DR模式,规划4台机器及2个VIP:

    机器 IP 角色
    主机1 172.18.5.110 lvs master
    主机2 172.18.5.148 lvs slave
    主机3 172.18.5.223 nginx01
    主机4 172.18.5.221 nginx02
    - 172.18.5.100 vip
    - 172.18.5.200 vip

    PS:采用DR模式,因此每台Nginx服务器都要设置回环的IP,即在lo网卡上设置VIP的地址。
    A、便于测试,在VirtualBox虚拟机搭建,采用两个网卡(桥接模式+仅主机模式[非必须]);
    B、提前关闭防火墙及不兼容网卡配置工具:

    # 清空关闭防火墙
    iptables -F # 清除防火墙规则
    systemctl stop firewalld  # 停止防火墙服务,关闭防火墙 (新式)
    service  firewalld stop   # 停止防火墙服务,关闭防火墙(老式)
    systemctl  disable firewalld  # 设置firewalld服务开机不启动
    
    # 关掉NetworkManager
    systemctl stop  NetworkManager
    systemctl disable NetworkManager
    
    # 关闭selinux
    # vi /etc/sysconfig/selinux 
        # enforcing - SELinux security policy is enforced.
         #    selinux 策略 强制执行
         # permissive - SELinux prints warnings instead of enforcing.
         # selinux策略不强制执行,只是给与警告
    
         # disabled - No SELinux policy is loaded.
         # selinux策略不加载执行,禁用
    SELINUX=disabled   修改为disabled
    
     # 永久修改了selinux的状态,需要重新启动linux系统,才会生效
    

    二、部署步骤

    1、首先在lvs master节点和slave节点安装ipvsadm 和keepalived:

    # 安装ipvsadm 和keepalived
    yum install ipvsadm keepalived  -y
    
    # 对应版本
    [root@localhost ~]# ipvsadm -v
    ipvsadm v1.27 2008/5/15 (compiled with popt and IPVS v1.2.1)
    [root@localhost ~]# keepalived  -v
    Keepalived v1.3.5 (03/19,2017), git commit v1.3.5-6-g6fa32f2
    

    2、在nginx01和nginx02节点上安装nginx:

    # 安装nginx
    rpm -ivh nginx-1.20.1-1.el7.ngx.x86_64.rpm
    systemctl start nginx
    systemctl status nginx
    systemctl enable nginx
    
    # 编辑index.html追加区分IP
    vim /usr/share/nginx/html/index.html
    

    安装成功后,分别进行内网测试成功即可。
    3、在两台LVS节点分上别keepalived配置
    关键配置部分已进行注释,已经启动http及https转发,其它配置参考即可。
    在100master即200slave上:

    ! Configuration File for keepalived
    
    global_defs {
        router_id LVS_MASTER_106 # 设置局域网都重复
    }
    
    vrrp_instance VI_1 { # V1_1 每个实例重名
        state MASTER
        interface enp0s8 # 替换主机网卡
        virtual_router_id 51 # 每组互通唯一路由ID
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1234 # 修改密码
        }
        virtual_ipaddress {
            172.18.5.100 # 设置虚拟IP
        }
    }
    
    vrrp_instance VI_2 { # V1_2 每个实例重名
        state BACKUP
        interface enp0s8
        virtual_router_id 52
        priority 50
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1234
        }
        virtual_ipaddress {
            172.18.5.200
        }
    }
    
    virtual_server 172.18.5.100 80 {# 监听VIP和端口
        delay_loop 6
        lb_algo wrr # 权重轮询
        lb_kind DR # DR模式
        persistence_timeout 50 # 刷新仍有效时间
        protocol TCP # 指定协议
    
        real_server 172.18.5.221 80 {
            weight 1
            TCP_CHECK { # 健康检查
                connect_timeout 3 
                retry 3 
                delay_before_retry 3
                connect_port 80
            }
        }
    
        real_server 172.18.5.223 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    
    virtual_server 172.18.5.100 443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    
        real_server 172.18.5.223 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    }
    
    virtual_server 172.18.5.200 80 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    
        real_server 172.18.5.223 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    
    virtual_server 172.18.5.200 443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    
        real_server 172.18.5.223 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    }
    

    在200master即100slave上:

    ! Configuration File for keepalived
    
    global_defs {
        router_id LVS_SLAVE_106
    }
    
    vrrp_instance VI_1 {
        state BACKUP
        interface enp0s8
        virtual_router_id 51
        priority 50
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1234
        }
        virtual_ipaddress {
            172.18.5.100
        }
    }
    
    vrrp_instance VI_2 {
        state BACKUP
        interface enp0s8
        virtual_router_id 52
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1234
        }
        virtual_ipaddress {
            172.18.5.200
        }
    }
    
    virtual_server 172.18.5.100 80 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    
        real_server 172.18.5.223 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    
    virtual_server 172.18.5.100 443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    
        real_server 172.18.5.223 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    }
    
    virtual_server 172.18.5.200 80 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    
        real_server 172.18.5.223 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    
    virtual_server 172.18.5.200 443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    
        real_server 172.18.5.223 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    }
    

    以上设置完成后,即可启动LVS主从机器上的Keepalived执行命令:systemctl start Keepalived,此时可见每台机器会自动新建一个VIP;注意此时,我们无论我们访问 100还是200的IP,请求依然不会成功。这是因为DR模式,不仅要在LVS机器上配置,还要再RIP机器上,进行相应的VIP及其路由规则设置等。接下来,我们补充其它规则配置。
    4、在两台RIP机器(即Nginx)分别配置
    主要配置:
    A、配置不响应也不请求ARP,以防VIP冲突;
    B、启动路由转发功能(非跨网段非必须);
    C、在本地网卡上,新建两个别名子网卡;
    D、指定虚拟IP的路由规则(同网段不设置也可以);

    # 设置ARP广播被动模式
    echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
    echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
    
    # 启用路由转发(不跨网段可以不开启)
    sysctl -w net.ipv4.ip_forward=1
    
    # 新建别名子网卡可以从lo:0开始。
    ifconfig lo:1 172.18.5.100 broadcast 172.18.5.100 netmask 255.255.255.255 up
    ifconfig lo:2 172.18.5.200 broadcast 172.18.5.200 netmask 255.255.255.255 up
    
    # 启用VIP路由接口规则(同网段不设置也可以)
    route add -host 172.18.5.100 dev lo:1
    route add -host 172.18.5.200 dev lo:2
    

    此时,可以看到172.18.5.100/200都已经生效;

    [root@localhost ~]# ip addr
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
        inet 172.18.5.100/32 brd 172.18.5.100 scope global lo:1
           valid_lft forever preferred_lft forever
        inet 172.18.5.200/32 brd 172.18.5.200 scope global lo:2
           valid_lft forever preferred_lft forever
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
        link/ether 08:00:27:59:ae:de brd ff:ff:ff:ff:ff:ff
        inet 192.168.56.108/24 brd 192.168.56.255 scope global noprefixroute enp0s3
           valid_lft forever preferred_lft forever
        inet6 fe80::b7a8:d918:1fcc:330/64 scope link noprefixroute
           valid_lft forever preferred_lft forever
    3: enp0s8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
        link/ether 08:00:27:b2:99:8e brd ff:ff:ff:ff:ff:ff
        inet 172.18.5.223/24 brd 172.18.5.255 scope global noprefixroute enp0s8
           valid_lft forever preferred_lft forever
        inet6 fe80::5b5a:76e1:4465:ee8/64 scope link noprefixroute
           valid_lft forever preferred_lft forever
    

    至此,lvs+keepalived+nginx 基于DR模式,搭建成功。

    三、生产优化

    以上虽然已经配置成功,可以看到效果。但是没法用于生产环境中,因为都是临时设置。生产环境,希望能固化配置且系统重启时自动加载配置。
    1、两DIP机器已采用keepalived管理,设置keepalived自启动即可;
    2、两RIP中的nginx前面已经设置好自动,因此不需要再处理;
    3、剩下两RIP相关设置,则需要进行固化和自启动配置;
    其相关配置如下:

    • A、RIP器都要配置ARP
      vim /etc/sysctl.conf
    # 开启路由转发
    net.ipv4.ip_forward=1
    
    # configration for lvs
    net.ipv4.conf.all.arp_ignore = 1
    net.ipv4.conf.default.arp_ignore = 1
    net.ipv4.conf.lo.arp_ignore = 1
    net.ipv4.conf.all.arp_announce = 2
    net.ipv4.conf.default.arp_announce = 2
    net.ipv4.conf.lo.arp_announce = 2
    

    退出执行立即生效:sysctl -p

    • B、RIP器都要配置虚拟IP
      进入网卡配置目录:cd /etc/sysconfig/network-scripts
      复制一份配置:cp ifcfg-lo ifcfg-lo:1
      vim ifcfg-lo:1 配置如下:
    DEVICE=lo:1 
    IPADDR=172.18.5.100
    NETMASK=255.255.255.255
    ONBOOT=yes
    

    复制一份配置:cp ifcfg-lo ifcfg-lo:2
    vim ifcfg-lo:2 配置如下:

    DEVICE=lo:2 
    IPADDR=172.18.5.200
    NETMASK=255.255.255.255
    ONBOOT=yes
    

    传输到其它机器:

    cd /etc/sysconfig/network-scripts/
    scp ./ifcfg-lo:1 root@192.168.56.109:/etc/sysconfig/network-scripts/
    scp ./ifcfg-lo:2 root@192.168.56.109:/etc/sysconfig/network-scripts/
    

    配置完成后重启网卡,systemctl restart network,注意配置好ARP之后再操作。
    因为有两个VIP,因此要配置两个子网卡。

    • C、RIP器都要开机自启动配置路由规则
      在Centos7下,rc.local文件,开机默认不执行,因为没执行权限。
    [root@localhost rc.d]# ll /etc/rc.local
    lrwxrwxrwx. 1 root root 13 Dec 11 14:28 /etc/rc.local -> rc.d/rc.local
    [root@localhost  rc.d]# ll /etc/rc.d/rc.local
    -rw-r--r--. 1 root root 473 Oct  2  2020 /etc/rc.d/rc.local
    

    因此尝试,添加权限来执行。chmod +x /etc/rc.d/rc.local再进行编辑vi /etc/rc.d/rc.local添加执行命令,来测试是否可行,可行就采用此配置,因为简单。

    经过测试,方案可行:

    [root@localhost  ~]# route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    172.18.5.100    0.0.0.0         255.255.255.255 UH    0      0        0 lo
    172.18.5.200    0.0.0.0         255.255.255.255 UH    0      0        0 lo
    

    因此,在/etc/rc.d/rc.local中,追加如下配置:
    route add -host 172.18.5.100 dev lo:1
    route add -host 172.18.5.200 dev lo:2

    [root@localhost ~]# cat /etc/rc.d/rc.local
    #!/bin/bash
    # THIS FILE IS ADDED FOR COMPATIBILITY PURPOSES
    #
    # It is highly advisable to create own systemd services or udev rules
    # to run scripts during boot instead of using this file.
    #
    # In contrast to previous versions due to parallel execution during boot
    # this script will NOT be run after all other services.
    #
    # Please note that you must run 'chmod +x /etc/rc.d/rc.local' to ensure
    # that this script will be executed during boot.
    
    touch /var/lock/subsys/local
    
    # configration for lvs
    route add -host 172.18.5.100 dev lo:1
    route add -host 172.18.5.200 dev lo:2
    [root@localhost ~]#
    

    至此,LVS(双主热备)+keepalived+Nginx基于DR模式高可用方案,可用生产环境搭建完成。

    PS:整理ipvsadm 常用命令:

    # 常见查看命令
    ipvsadm -L -n
    ipvsadm -Lnc
    ipvsadm -S -n
    ipvsadm -L --timeout
    
    # 手动命令创建,可代替keepalived创建。
    ipvsadm -A -t 172.18.5.100:80 -s wrr -p 50
    ipvsadm -a -t 172.18.5.100:80 -r 172.18.5.221:80 -g -w 1
    ipvsadm -a -t 172.18.5.100:80 -r 172.18.5.223:80 -g -w 1
    

    常见问题解决:
    1、不同端口session丢失问题
    案例:一个用户在访问购物网站时同时使用HTTP(80)和HTTPS(443)两种协议,我们需要将其定义到同一台Real Server上,而其他的服务不受限制。
    解决:利用防火墙标记结合处理
    将持久和防火墙标记结合起来就能够实现端口姻亲功能,只要是来自某一客户端的对某一特定服务(需要不同的端口)的访问都定义到同一台
    Real Server上去。具体配置如下(注意都只在LVS机器执行即可):
    分开端口配置

    iptables -t mangle -A PREROUTING -d 172.18.5.1 -i eth0 -p tcp --dport 80 -j MARK --set-mark 80443
    iptables -t mangle -A PREROUTING -d 172.18.5.1 -i eth0 -p tcp --dport 443 -j MARK --set-mark 80443
    ipvsadm -A -f 80443  -s rr -p 600
    ipvsadm -a -f 80443  -r 172.18.5.221 -g -w 1
    ipvsadm -a -f 80443  -r 172.18.5.223 -g -w 1  
    

    合并多端口配置

    # FWM:firewall mask 
    # 打标记方法:
    iptables -t mangle -A PREROUTING -d $vip -p $protocol --dport $port -j MARK --set-mark NUMBER
    
    #基于标记定义集群:
    ipvsadm -A -f NUMBER [option]
    
    # 基于打标记方法定义lvs集群:
    iptables -t mangle -A PREROUTING -d 172.18.5.100  -p tcp -m multiport --dports 80,443 -j MARK --set-mark 80443  
    ipvsadm -A -f 80443  -s rr -p 600
    ipvsadm -a -f 80443  -r 172.18.5.221 -g -w 1
    ipvsadm -a -f 80443  -r 172.18.5.223 -g -w 1
    iptables-save > /etc/sysconfig/iptables
    ipvsadm-save > /etc/sysconfig/ipvsadm
    

    采用ldirectord持久化则使用配置

    vim /etc/ha.d/ldirectord.cf
    virtual=80443    # VIP地址,改为标记
    protocol=fwm   # tcp类型,改为fwm
    

    采用Keepalived配置:
    A、基于组配置
    这个配置是可选的,这里配置主要是为了让一台realserver上的某个服务可以属于多个Virtual Server,并且只做一次健康检查

    virtual_server_group <STRING> {
      # VIP port
      <IPADDR> <PORT>
      <IPADDR> <PORT>
      fwmark <INT>
    }
    

    B、虚拟主机配置
    virtual server可以以下面三种的任意一种来配置

    1. virtual server IP port {}
    2. virtual server fwmark int {}
    3. virtual server group string {}
    

    基于fwmark 示例:

    # 准备好防火墙标记
    iptables -t mangle -A PREROUTING -d 172.18.5.100  -p tcp -m multiport --dports 80,443 -j MARK --set-mark 180443  
    iptables -t mangle -A PREROUTING -d 172.18.5.200  -p tcp -m multiport --dports 80,443 -j MARK --set-mark 280443  
    # iptables -t mangle -L -n
    Chain PREROUTING (policy ACCEPT)
    target     prot opt source               destination
    MARK       tcp  --  0.0.0.0/0            172.18.5.100         multiport dports 80,443 MARK set 0x2c0db
    MARK       tcp  --  0.0.0.0/0            172.18.5.200         multiport dports 80,443 MARK set 0x4477b
    
    # 编辑fwmark 
    ! Configuration File for keepalived
    
    global_defs {
        router_id LVS_MASTER_106
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface enp0s8
        virtual_router_id 51
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1234
        }
        virtual_ipaddress {
            172.18.5.100
        }
    }
    
    vrrp_instance VI_2 {
        state BACKUP
        interface enp0s8
        virtual_router_id 52
        priority 50
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1234
        }
        virtual_ipaddress {
            172.18.5.200
        }
    }
    
    virtual_server fwmark 180443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    
        real_server 172.18.5.223 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    
    virtual_server fwmark 180443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    
        real_server 172.18.5.223 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    }
    
    virtual_server fwmark 280443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    
        real_server 172.18.5.223 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    
    virtual_server fwmark 280443 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistence_timeout 50
        protocol TCP
    
        real_server 172.18.5.221 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    
        real_server 172.18.5.223 443 {
            weight 1
            TCP_CHECK {
                connect_timeout 3
                retry 3
                delay_before_retry 3
                connect_port 443
            }
        }
    }
    # 107配置类推即可
    
    root@lvs106-master keepalived] # systemctl restart keepalived
    root@lvs106-master keepalived keepalived] # ipvsadm -S -n
    
    -A -f 180443 -s wrr -p 50
    -a -f 180443 -r 172.18.5.221:80 -g -w 1
    -a -f 180443 -r 172.18.5.221:443 -g -w 1
    -a -f 180443 -r 172.18.5.223:80 -g -w 1
    -a -f 180443 -r 172.18.5.223:443 -g -w 1
    -A -f 280443 -s wrr -p 50
    -a -f 280443 -r 172.18.5.221:80 -g -w 1
    -a -f 280443 -r 172.18.5.221:443 -g -w 1
    -a -f 280443 -r 172.18.5.223:80 -g -w 1
    -a -f 280443 -r 172.18.5.223:443 -g -w 1
    

    两台LVS机器,都进行重启自动配置

    # 添加权限
    chmod +x /etc/rc.d/rc.local
    
    # 编辑开启自启动配置
    vim /etc/rc.d/rc.local
    # configued for lvs
    #!/bin/bash
    # THIS FILE IS ADDED FOR COMPATIBILITY PURPOSES
    #
    # It is highly advisable to create own systemd services or udev rules
    # to run scripts during boot instead of using this file.
    #
    # In contrast to previous versions due to parallel execution during boot
    # this script will NOT be run after all other services.
    #
    # Please note that you must run 'chmod +x /etc/rc.d/rc.local' to ensure
    # that this script will be executed during boot.
    
    touch /var/lock/subsys/local
    # configued for lvs
    iptables -t mangle -F
    iptables -t mangle -A PREROUTING -d 172.18.5.100  -p tcp -m multiport --dports 80,443 -j MARK --set-mark 180443
    iptables -t mangle -A PREROUTING -d 172.18.5.200  -p tcp -m multiport --dports 80,443 -j MARK --set-mark 280443
    

    配置完成防火墙标签,把80和443绑定后,可能还会有个小问题。本次,虚拟出来又两个VIP,如果此时有采用DNS均衡轮询时,两次请求IP可能不一致。因此LVS生产环境,尽量只用一个VIP即主备方式即可,不建议采用双主方式。

    相关文章

      网友评论

          本文标题:LVS(双主热备)+keepalived+Nginx基于DR模式

          本文链接:https://www.haomeiwen.com/subject/siikfrtx.html