美文网首页
负载均衡

负载均衡

作者: 挑战_bae7 | 来源:发表于2021-01-29 11:15 被阅读0次

    1. 虚拟IP地址 VIP 配置4种方法

    yum install net-tools -y
    ifconfig eth0:1 192.168.199.2 netmask 255.255.255.0 up
    ifconfig eth0:1 down
    
    ip addr add 192.168.199.3/24 dev eth0
    ip addr del 192.168.199.3/24 dev eth0
    
    [root@vip1 ~]# cat  /etc/sysconfig/network-scripts/ifcfg-eth0:1
    DEVICE="eth0:1"
    NAME="eth0:1"
    IPADDR=192.168.199.5
    NETMASK=255.255.255.0
    systemctl restart network
    
    [root@vip1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
    TYPE=Ethernet
    PROXY_METHOD=none
    BROWSER_ONLY=no
    BOOTPROTO=none
    DEFROUTE=yes
    IPV4_FAILURE_FATAL=no
    IPV6INIT=yes
    IPV6_AUTOCONF=yes
    IPV6_DEFROUTE=yes
    IPV6_FAILURE_FATAL=no
    IPV6_ADDR_GEN_MODE=stable-privacy
    NAME=eth0
    UUID=48635c6d-a8ef-4a3c-80b2-b36b77553f27
    DEVICE=eth0
    ONBOOT=yes
    IPV6_PRIVACY=no
    IPADDR=192.168.199.30
    IPADDR1=192.168.199.6  ##添加这行 IPADDR1
    PREFIX=24
    GATEWAY=192.168.199.1
    DNS1=192.168.199.1
    

    2. LVS

    角色 IP地址 操作系统 VIP 配置
    vip1 192.168.199.30 centos7.x 192.168.199.2 ipvsadm、vip
    vip2 192.168.199.31 centos7.x 192.168.199.2 nginx、vip、arp抑制
    vip3 192.168.199.32 centos7.x 192.168.199.2 nginx、vip、arp抑制

    2.1 lvs dr模式 直接路由模式 需要配置arp抑制 外网请求通过lvs,真实服务器进行回应

    dr模式直接修改目的mac把请求传递给后端真实服务器,负载均衡器和真实服务器要在同一个局域网
    dr模式下后端服务器也需要具有vip,后端服务器vip只是用来处理请求。负载均衡器vip用来提供服务三台服务器都需要配置同一个vip,这个就比较麻烦了,容易造成ip冲突的问题

    cat > /etc/yum.repos.d/nginx.repo <<EOF
    [nginx-stable]
    name=nginx stable repo
    baseurl=http://nginx.org/packages/centos/\$releasever/\$basearch/
    gpgcheck=1
    enabled=1
    gpgkey=https://nginx.org/keys/nginx_signing.key
    module_hotfixes=true
    EOF
    yum install nginx -y
    systemctl start nginx
    systemctl enable nginx
    systemctl stop firewalld
    systemctl disabled firewalld
    echo `hostname` >/usr/share/nginx/html/index.html
    
    arp 抑制
    cat >> /etc/sysctl.conf <<EOF
    net.ipv4.conf.all.arp_ignore=1
    net.ipv4.conf.lo.arp_ignore=1
    net.ipv4.conf.all.arp_announce=2
    net.ipv4.conf.lo.arp_announce=2
    EOF
    sysctl -p
    修改回来正常
    net.ipv4.conf.all.arp_ignore=0
    net.ipv4.conf.lo.arp_ignore=0
    net.ipv4.conf.all.arp_announce=0
    net.ipv4.conf.lo.arp_announce=0
    sysctl -a |grep arp_ignore 查询全部
    arp_ignore配置为1
    arp请求当目的地址是配置在接收网卡的才会响应
    arp_announce配置为2
    发送arp请求时选用配置在发送网卡的地址
    
    vip配置
    ifconfig lo:1 192.168.199.2 netmask 255.255.255.255 up
    
    安装ipvs
    yum install -y ipvsadm
    ipvsadm --help
       -l   列出负载负载均衡规则
       -n   不进行dns解析,通过-ln连用
       -C   清除所有的规则
       -A   添加一个虚拟服务器
       -a   添加一个真实服务器
       -S   保存规则
       -R   恢复规则
       -t  TCP协议
       -D 删除
    [root@vip1 ~]# ipvsadm -A -t 192.168.199.2:80 -s rr
    [root@vip1 ~]# ipvsadm -a -t 192.168.199.2:80 -r 192.168.199.31:80 -g -w 1
    [root@vip1 ~]# ipvsadm -a -t 192.168.199.2:80 -r 192.168.199.32:80 -g -w 1
    部署说明 端口必须都一样 不然不起效果
    ipvsadm -A中:-s代表使用哪个负载均衡算法
    rr: 代表轮循算法
    wrr: 代表加权轮循
    lc: 代表最小连接
    wlc: 代表加权最小连接
    sh: 代表源地址hash分配
    ipvsadm -a中:-g代表dr模式、-m代表nat模式、-i代表tun模式
    [root@localhost ~]# curl 192.168.199.2
    vip3
    [root@localhost ~]# curl 192.168.199.2
    vip2
    
    其他算法配置
    ipvsadm -C 清空规则
    ipvsadm -Sn 输出当前规则
    ipvsadm -Sn >/tmp/ipvs保存到文件中
    cat /root/ipvs |ipvsadm -R 导入配置
    
    ipvsadm -A中:-s代表使用哪个负载均衡算法
    rr: 代表轮循算法   ---按照顺序轮询
    wrr: 代表加权轮循 ---加权重
    lc: 代表最小连接  ---那个最小连接 下次就是那个
    wlc: 代表加权最小连接 ---在最小连接按照权重
    sh: 代表源地址hash分配   ---第一次ip地址访问那个 以后都是那个
    
    加权负载均衡算法实战,验证
    ipvsadm -C
    ipvsadm -A -t 192.168.199.2:80 -s wrr
    ipvsadm -a -t 192.168.199.2:80 -r 192.168.199.31:80 -g -w 4
    ipvsadm -a -t 192.168.199.2:80 -r 192.168.199.32:80 -g -w 1
    
    最小连接数算法实战
    ipvsadm -C
    ipvsadm -A -t 192.168.199.2:22 -s lc
    ipvsadm -a -t 192.168.199.2:22 -r 192.168.199.31:22 -g -w 1
    ipvsadm -a -t 192.168.199.2:22 -r 192.168.199.32:22 -g -w 1
    
    源地址hash算法实战
    ipvsadm -C
    ipvsadm -A -t 192.168.199.2:80 -s sh
    ipvsadm -a -t 192.168.199.2:80 -r 192.168.199.31:80 -g -w 1
    ipvsadm -a -t 192.168.199.2:80 -r 192.168.199.32:80 -g -w 1
    

    2.2 lvs nat模式

    角色 IP地址 操作系统 IP 配置
    vip1 192.168.199.30 centos7.x 172.16.1.1 ipvsadm、VIP、两块网卡、配置snat
    vip2 centos7.x 172.16.1.2 nginx 网关172.16.1.1
    vip3 centos7.x 172.16.1.3 nginx 网关172.16.1.1
    配置ip
    [root@vip1 ~]#nmcli connection add ipv4.method manual con-name ens7  ifname ens7 ipv4.addresses 172.16.1.1/24  type ethernet
    [root@vip1 ~]#nmcli connection up ens7
    
    [root@vip2 ~]# nmcli connection modify eth0 ipv4.method  manual ipv4.addresses 172.16.1.3/24 ipv4.gateway 172.16.1.1 ipv4.dns 223.5.5.5
    [root@vip2 ~]# nmcli connection up eth0
    
    [root@vip3 ~]# nmcli connection modify eth0 ipv4.method  manual ipv4.addresses 172.16.1.3/24 ipv4.gateway 172.16.1.1 ipv4.dns 223.5.5.5
    [root@vip3 ~]# nmcli connection up eth0
    
    配置SNAT
    sysctl -w net.ipv4.ip_forward=1
    iptables -t nat -A POSTROUTING -s 172.16.1.0/24 -o eth0 -j MASQUERADE
    [root@vip3 ~]# ping www.qq.com
    PING ins-r23tsuuf.ias.tencent-cloud.net (112.53.42.52) 56(84) bytes of data.
    64 bytes from 112.53.42.52 (112.53.42.52): icmp_seq=1 ttl=51 time=33.1 ms
    
    vip配置
    ifconfig lo:1 192.168.199.2 netmask 255.255.255.255 up
    ipvsadm -C
    ipvsadm -A -t 192.168.199.2:80 -s rr
    ipvsadm -a -t 192.168.199.2:80 -r 172.16.1.2:82 -m -w 1 #端口可以不一样
    ipvsadm -a -t 192.168.199.2:80 -r 172.16.1.3:82 -m -w 1
    [root@localhost ~]# curl 192.168.199.2
    vip2
    [root@localhost ~]# curl 192.168.199.2
    vip3
    

    3. keepalive 使用vrrp路由协议

    https://www.keepalived.org/download.html

    角色 IP地址 操作系统 IP
    vip2 192.168.199.31 centos7.x keepalive
    vip3 192.168.199.32 centos7.x keepalive
    yum -y install wget gcc make tar openssl openssl-devel libnl libnl-devel libnfnetlink-devel
    cd /usr/local/src
    wget https://www.keepalived.org/software/keepalived-2.2.1.tar.gz
    tar -zxvf keepalived-2.2.1.tar.gz
    cd keepalived-2.2.1
    ./configure --prefix=/usr/local/keepalived
    make && make install
    
    mv /usr/local/keepalived/etc/keepalived/keepalived.conf{,.bak}
    vrrp_instance配置,配置vip
    state       配置主或者备
    interface       配置vip使用的物理网卡
    virtual_router_id   虚拟路由id
    priority        权重配置,主的权重比备的高
    advert_int  主每2s发个vrrp包,说明自己正常
    authentication  认证信息
    virtual_ipaddress   配置vip
    主master
    vim /usr/local/keepalived/etc/keepalived/keepalived.conf
    vrrp_instance VI_1 {
        state MASTER
        interface eth0
        virtual_router_id 31
        priority 100
        advert_int 2
        notify_master "/root/wx.py Yaohui 当前master节点是vip2"
        notify_backup "/root/wx.py Yaohui vip2服务启动"
        notify_fault "/root/wx.py Yaohui vip2切换失败"
        authentication {
            auth_type PASS
            auth_pass 666666
        }
        virtual_ipaddress {
            192.168.199.2/24
        }
    }
    
    备backup
    vim /usr/local/keepalived/etc/keepalived/keepalived.conf
    vrrp_instance VI_1 {
        state BACKUP
        interface eth0
        virtual_router_id 31
        priority 99
        advert_int 2
        notify_master "/root/wx.py Yaohui 当前master节点是vip3"
        notify_backup "/root/wx.py Yaohui vip3服务启动"
        notify_fault "/root/wx.py Yaohui vip3切换失败"
        authentication {
            auth_type PASS
            auth_pass 666666
        }
        virtual_ipaddress {
            192.168.199.2/24
        }
    }
    检查配置,启动keepalived
    /usr/local/keepalived/sbin/keepalived -t -f /usr/local/keepalived/etc/keepalived/keepalived.conf
    /usr/local/keepalived/sbin/keepalived -f /usr/local/keepalived/etc/keepalived/keepalived.conf
    pkill keepalive 通过杀掉进程看看VIP漂移
    root@vip3 ~]# tailf /var/log/messages
    Jan 29 15:36:52 vip3 Keepalived[10447]: Command line: '/usr/local/keepalived/sbin/keepalived' '-f'
    Jan 29 15:36:52 vip3 Keepalived[10447]:              '/usr/local/keepalived/etc/keepalived/keepalived.conf'
    Jan 29 15:36:52 vip3 Keepalived[10447]: Configuration file /usr/local/keepalived/etc/keepalived/keepalived.conf
    Jan 29 15:36:52 vip3 Keepalived[10448]: NOTICE: setting config option max_auto_priority should result in better keepalived performance
    Jan 29 15:36:52 vip3 Keepalived[10448]: Starting VRRP child process, pid=10449
    Jan 29 15:36:52 vip3 Keepalived_vrrp[10449]: WARNING - default user 'keepalived_script' for script execution does not exist - please create.
    Jan 29 15:36:52 vip3 Keepalived_vrrp[10449]: SECURITY VIOLATION - scripts are being executed but script_security not enabled.
    Jan 29 15:36:52 vip3 Keepalived_vrrp[10449]: (VI_1) Entering BACKUP STATE (init)
    Jan 29 15:37:12 vip3 Keepalived_vrrp[10449]: (VI_1) Backup received priority 0 advertisement
    Jan 29 15:37:12 vip3 Keepalived_vrrp[10449]: (VI_1) Entering MASTER STATE
    
    默认keepalive 会恢复后自动根据优先级抢占  VIP会来回漂移
    主节点master 
    vim /usr/local/keepalived/etc/keepalived/keepalived.conf
    vrrp_instance VI_1 {
        state BACKUP   ##这项改成MASTER就不会抢占
        nopreempt ##添加这项
        interface eth0
        virtual_router_id 31
        priority 100
        advert_int 2
        notify_master "/root/wx.py Yaohui 当前master节点是vip2"
        notify_backup "/root/wx.py Yaohui vip2服务启动"
        notify_fault "/root/wx.py Yaohui vip2切换失败"
        authentication {
            auth_type PASS
            auth_pass 666666
        }
        virtual_ipaddress {
            192.168.199.2/24
        }
    }
    
    keepalived脑裂出现
    pkill -9 keepalived  强制杀掉进程 相当于突然断电 
    变成两台主机都有相同的VIP 
    systemctl restart network 恢复 或者修复keepalive后正常
    防火墙阻止vrrp协议 也会引起脑裂 排除网络问题
    iptables -A INPUT -p vrrp -j DROP  
    iptables -A OUTPUT -p vrrp -j DROP
    

    4. keepalive搭配lvs 通过端口 状态 脚本检测后端服务器

    角色 IP地址 操作系统 VIP 配置
    vip1 192.168.199.30 centos7.x 192.168.199.2 keepalive、ipvsadm
    vip2 192.168.199.31 centos7.x 192.168.199.2 nginx、vip、arp抑制
    vip3 192.168.199.32 centos7.x 192.168.199.2 nginx、vip、arp抑制

    恢复之前lvs dr模式 keepalive自身支持lvs 不需要添加规则

    4.1 keepalive 配置

    vim /usr/local/keepalived/etc/keepalived/keepalived.conf 
    vrrp_instance VI_1 {
        state BACKUP
        interface eth0
        nopreempt
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass 666666
        }
        virtual_ipaddress {
            192.168.199.2/24
        }
    }
    
    virtual_server 192.168.199.2 80 {
        delay_loop 3
        lb_algo rr
        lb_kind DR
        protocol TCP
    
        real_server 192.168.199.31 80 {
            weight 1
            TCP_CHECK {
            connect_port 80
            connect_timeout 6
            retry 3
            delay_before_retry 3
          }
        }
        real_server 192.168.199.32 80 {
            weight 1
            TCP_CHECK {
            connect_port 80
            connect_timeout 6
            retry 3
            delay_before_retry 3
          }
        }
    /usr/local/keepalived/sbin/keepalived -f /usr/local/keepalived/etc/keepalived/keepalived.conf
    keepalived+lvs的配置说明
    delay_loop  健康检查的时间,正常情况下的检查间隔
    lb_algo         负载均衡轮循算法wrr
    lb_kind     负载均衡工作模式
    real_server 配置后端真实服务器
    
    real_server配置说明
    weight  配置后端真实服务器的权重
    TCP_CHECK   对后端真实服务器进行健康检测
    
    TCP_CHECK说明
    connect_port        检测端口
    connect_timeout     超时时间
    retry       重试次数
    delay_before_retry  重试等待的时间
    
    检验结果:
    [root@vip1 ~]# ipvsadm -ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  192.168.199.2:80 rr
      -> 192.168.199.31:80            Route   1      0          0         
      -> 192.168.199.32:80            Route   1      0          0        
    [root@localhost ~]# curl 192.168.199.2
    vip2
    [root@localhost ~]# curl 192.168.199.2
    vip3
    
    cat > /usr/lib/systemd/system/keepalived.service <<EOF
    [Unit]
    Description=LVS and VRRP High Availability Monitor
    After=network-online.target syslog.target
    Wants=network-online.target
    
    [Service]
    Type=forking
    PIDFile=/run/keepalived.pid
    KillMode=process
    EnvironmentFile=-/usr/local/keepalived/etc/sysconfig/keepalived
    ExecStart=/usr/local/keepalived/sbin/keepalived \$KEEPALIVED_OPTIONS -f /usr/local/keepalived/etc/keepalived/keepalived.conf
    ExecReload=/bin/kill -HUP \$MAINPID
    
    [Install]
    WantedBy=multi-user.target
    EOF
    systemctl daemon-reload
    systemctl start keepalived
    

    4.2 keepalive 健康检查方式

    keepalived的三种健康检查方式
    TCP_CHECK       tcp端口检测  
    HTTP_GET        http接口检测  
    MISC_CHECK      自定义脚本检测 
    
    http接口检测
    HTTP_GET {
              url {
                path /index.html
                status_code 200
              }  
              connect_timeout 3
              retry 3
              delay_before_retry 3
            }
    
    自定义脚本检测 
    MISC_CHECK {
              misc_path "/root/cs.sh 192.168.199.2"
              misc_timeout 30
            }
    
    cat cs.sh
    #!/bin/sh
    serverip=$1
    curl -s -m 2 http://$serverip
    if [ $? -eq 0 ];then
    exit 0
    else
    exit 1
    fi
    chmod +x /root/cs.sh
    
    vim /usr/local/keepalived/etc/keepalived/keepalived.conf
    vrrp_instance VI_1 {
        state BACKUP
        interface eth0
        nopreempt
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass 666666
        }
        virtual_ipaddress {
            192.168.199.2/24
        }
    }
    
    virtual_server 192.168.199.2 80 {
        delay_loop 3
        lb_algo rr
        lb_kind DR
        protocol TCP
    
        real_server 192.168.199.31 80 {
            weight 1
            MISC_CHECK {
              misc_path "/root/cs.sh 192.168.199.31"
              misc_timeout 30
            }
    #5. haproxy
        }
        real_server 192.168.199.32 80 {
            weight 1
             MISC_CHECK {
              misc_path "/root/cs.sh 192.168.199.32"
              misc_timeout 30
    
            }
          }
    }
    

    5. haproxy

    https://www.haproxy.com/

    角色 IP地址 操作系统 配置
    vip1 192.168.199.30 centos7.x haproxy
    vip2 192.168.199.31 centos7.x nginx
    vip3 192.168.199.32 centos7.x nginx

    5.1 haproxy 编译

    useradd haproxy -s /sbin/nologin
    wget https://www.haproxy.org/download/2.3/src/haproxy-2.3.4.tar.gz
    tar xf haproxy-2.3.4.tar.gz
    cd haproxy-2.3.4/
    make TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 PREFIX=/usr/local/haproxy
    make install PREFIX=/usr/local/haproxy
    haproxy安装验证
    /usr/local/haproxy/sbin/haproxy -v
    /usr/local/haproxy/sbin/haproxy -h
    -f  #指定haproxy的配置文件
    -c  #检查配置文件,不启动
    
    参数优化
    vim /etc/sysctl.conf  设置连接数
    net.core.somaxconn = 10240
    sysctl -p
    ulimit -n 查看最大文件打开数
    vim /etc/security/limits.conf
    * - nofile 10240
    退出当前会话重新登录 
    ulimit -n  查看是否修改完成
    
    系统日志配置/etc/rsyslog.conf,haproxy依赖于系统日志服务
    vim /etc/rsyslog.conf
    *.info;mail.none;authpriv.none;cron.none;local2.none                /var/log/messages 添加local2.no
    local2.*   /var/log/haproxy.log
    $ModLoad imudp
    $UDPServerRun 514
    $AllowedSender udp, 127.0.0.1/32
    systemctl restart rsyslogd
    [root@vip1 haproxy]# ss -lntup |grep 514
    udp    UNCONN     0      0         *:514                   *:*                   users:(("rsyslogd",pid=21799,fd=3))
    udp    UNCONN     0      0      [::]:514                [::]:*                   users:(("rsyslogd",pid=21799,fd=4))
    
    mkdir /usr/local/haproxy/conf -p
    vim /usr/local/haproxy/conf/haproxy.conf
    global
        log         127.0.0.1 local2
        chroot      /usr/local/haproxy
        pidfile     /usr/local/haproxy/haproxy.pid
        maxconn     10240
        user        haproxy
        group       haproxy
        daemon
    
    defaults
        mode  tcp
        log  global
        timeout client 3600s
        timeout connect 2s
        timeout server 3600s
        maxconn  10240
    
    listen testgetcp ##名称随意
        bind 0.0.0.0:80
        balance roundrobin ##调度算法
        server tcp31 192.168.199.31:80 weight 1
        server tcp32 192.168.199.32:80 weight 1
    
    haproxy配置讲解
    timeout client      #客户端和haproxy的非活动超时
    timeout connect #haproxy连接真实服务器的超时时间
    timeout server      #haproxy和真实服务器的非活动超时
    bind                #代表haproxy的监听设置
    balance         #设置负载均衡的算法
    server          #设置真实的服务器
    
    /usr/local/haproxy/sbin/haproxy -c -f /usr/local/haproxy/conf/haproxy.conf 检查配置
    /usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/conf/haproxy.conf 手动启动
    
    systemctl 启动
    cat >/usr/lib/systemd/system/haproxy.service <<EOF
    [Unit]
    Description=haproxy
    After=network.target
    [Service]
    Type=forking
    ExecStart=/usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/conf/haproxy.conf
    [Install]
    WantedBy=multi-user.target
    EOF
     systemctl daemon-reload
     systemctl start haproxy
    curl 192.168.199.30
    tailf /var/log/haproxy.log
    an 30 10:05:02 localhost haproxy[21409]: Connect from 192.168.199.1:48574 to 192.168.199.30:80 (testgetcp/TCP)
    Jan 30 10:05:02 localhost haproxy[21409]: Connect from 192.168.199.1:48576 to 192.168.199.30:80 (testgetcp/TCP)
    

    5.2 haproxy http复制均衡

    vim /usr/local/haproxy/conf/haproxy.conf
    global
        log         127.0.0.1 local2
        chroot      /usr/local/haproxy
        pidfile     /usr/local/haproxy/haproxy.pid
        maxconn     10240
        user        haproxy
        group       haproxy
        daemon
    
    defaults
        mode http ##修改此项
        log global
        option  forwardfor
        option httplog   ##修改此项
        timeout client 3600s
        timeout connect 2s
        timeout server 3600s
        maxconn  10240
    
    listen httpforward
        bind 0.0.0.0:80
        balance roundrobin
        server http31 192.168.199.31:80 weight 1
        server http32 192.168.199.32:80 weight 1
    
    curl 192.168.199.30 访问后
    tailf /var/log/haproxy.log
    Jan 30 10:11:11 localhost haproxy[22584]: 192.168.199.1:48632 [30/Jan/2021:10:11:11.974] httpforward httpforward/http31 0/0/0/1/1 200 207 - - ---- 1/1/0/0/0 0/0 "GET / HTTP/1.1"
    Jan 30 10:11:12 localhost haproxy[22584]: 192.168.199.1:48634 [30/Jan/2021:10:11:12.980] httpforward httpforward/http32 0/0/0/0/0 200 207 - - ---- 1/1/0/0/0 0/0 "GET / HTTP/1.1"
    

    5.3 haproxy 负载算法

    balance roundrobin                      #动态加权轮循
    balance static-rr                       #静态加权轮循
    balance source hash-type map-based      #源地址hash,如果有session可以采取这种算法
    balance uri len 5                       #只适用于http层负载均衡,基于uri的前5个字符,缓存
    balance uri depth 2                     #只适用于http层负载均衡,基于uri的目录数
    balance leastconn                       #最小连接数,haproxy需要维护和真实服务器的连接数
    

    5.4 haproxy 负载算法 高可用

    vim /usr/local/haproxy/conf/haproxy.conf
    global
        log         127.0.0.1 local2
        chroot      /usr/local/haproxy
        pidfile     /usr/local/haproxy/haproxy.pid
        maxconn     10240
        user        haproxy
        group       haproxy
        daemon
    
    defaults
        mode http
        log global
        option  forwardfor
        option httplog
        timeout client 3600s
        timeout connect 2s
        timeout server 3600s
        maxconn  10240
    
    listen httpforward
        bind 0.0.0.0:80
        balance roundrobin
        server http31 192.168.199.31:80 weight 1 check inter 2000 fall 3 rise 3
        server http32 192.168.199.32:80 weight 1 check inter 2000 fall 3 rise 3
    高可用性配置
    check   #启动后端服务器健康检测
    inter   #设置检测的间隔
    fall        #连续3次检测失败就认为后端服务器异常,从负载均衡移出
    rise        #连续3次检测成功就认为后端服务器正常,加入到负载均衡
    
    [root@vip3 ~]# tcpdump -i any -nn 'port 80'  通过抓包发现 一直在检测后端服务器是否存活
    tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
    listening on any, link-type LINUX_SLL (Linux cooked), capture size 262144 bytes
    10:27:23.144177 IP 192.168.199.30.51856 > 192.168.199.32.80: Flags [S], seq 39644271, win 29200, options [mss 1460,sackOK,TS val 81483483 ecr 0,nop,wscale 7], length 0
    10:27:23.144223 IP 192.168.199.32.80 > 192.168.199.30.51856: Flags [S.], seq 3196914195, ack 39644272, win 28960, options [mss 1460,sackOK,TS val 81480204 ecr 81483483,nop,wscale 7], length 0
    
    haproxy的http检测
    vim /usr/local/haproxy/conf/haproxy.conf
    global
        log         127.0.0.1 local2
        chroot      /usr/local/haproxy
        pidfile     /usr/local/haproxy/haproxy.pid
        maxconn     10240
        user        haproxy
        group       haproxy
        daemon
    
    defaults
        mode http
        log global
        option forwardfor
        option httplog
        timeout client 3600s
        timeout connect 2s
        timeout server 3600s
        maxconn  10240
    
    listen httpforward
        bind 0.0.0.0:80
        option httpchk GET /index.html  ##添加这行
        balance roundrobin
        server http31 192.168.199.31:80 weight 1 check inter 2000 fall 3 rise 3
        server http32 192.168.199.32:80 weight 1 check inter 2000 fall 3 rise 3
    通过后端服务器  haproxy 一直在访问后端 直到返回200
    [root@vip3 ~]# tailf /var/log/nginx/access.log 
    192.168.199.30 - - [30/Jan/2021:10:30:14 +0800] "GET /index.html HTTP/1.0" 200 5 "-" "-" "-"
    192.168.199.30 - - [30/Jan/2021:10:30:16 +0800] "GET /index.html HTTP/1.0" 200 5 "-" "-" "-"
    

    5.5 haproxy 通过acl 实现动静分离

    global
        log         127.0.0.1 local2
        chroot      /usr/local/haproxy
        pidfile     /usr/local/haproxy/haproxy.pid
        maxconn     10240
        user        haproxy
        group       haproxy
        daemon
    
    defaults
        mode http
        log global
        option  forwardfor
        option httplog
        timeout connect 2s
        timeout client 360s
        timeout server 360s
        maxconn  10240
    
    frontend read_write
        bind 0.0.0.0:80
        acl dynamic path_beg -i /java  ##匹配
        acl dynamic path_end  -i .php ##匹配
        use_backend dynamicserver if dynamic ##匹配后动态
        default_backend staticserver
    
    backend staticserver
        balance roundrobin
        server tcp50 192.168.199.31:80 weight 1 check inter 3000 fall 3 rise 3
    
    backend dynamicserver
        balance roundrobin
        server tcp51 192.168.199.32:80 weight 1 check inter 3000 fall 3 rise 3
    
    动静分离的说明 
    acl dynamic path_beg -i /action #代表以/action开头的  begin
    acl dynamic path_end  -i .php       #代表以php结尾的      end
    
    另外,动静分离也支持使用正则表达式。\.php$
    acl dynamic url_reg  /shijiange[0-9]        #普通
    acl dynamic url_reg  /shijiange(00|99)  #支持扩展正则
    

    6. Haproxy 搭配 Keepalived

    角色 IP地址 操作系统 配置
    vip1 192.168.199.30 centos7.x haproxy、keepalived
    vip2 192.168.199.31 centos7.x haproxy、keepalived
    vip3 192.168.199.32 centos7.x nginx
    vip4 192.168.199.33 centos7.x nginx

    6.1 keepalive配置

    [root@vip1 ~]# vim /usr/local/keepalived/etc/keepalived/keepalived.conf 
    vrrp_script check_haproxy
    {
        script "/root/check.sh"
        interval 3
        weight -20  #检查exit 1 后优先级减去20
    }
    vrrp_instance VI_1 {
        state BACKUP
        interface eth0
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass 666666
        }
        virtual_ipaddress {
            192.168.199.2/24
        }
        track_script
        {
            check_haproxy
        }
    }
    
    [root@vip1 ~]# vim /root/check.sh 
    #!/bin/sh
    curl -s -m 2 http://192.168.199.30 #vip2修改192.168.199.31
    if [ $? -eq 0 ];then
      exit 0
    else
      exit 1
    fi
    
    chmod +x /root/check.sh 
    systemctl start keepalived.service
    

    6.2 haproxy配置

    [root@vip1 ~]# vim /usr/local/haproxy/conf/haproxy.conf 
    global
        log         127.0.0.1 local2
        chroot      /usr/local/haproxy
        pidfile     /usr/local/haproxy/haproxy.pid
        maxconn     10240
        user        haproxy
        group       haproxy
        daemon
    
    defaults
        mode http
        log global
        option forwardfor
        option httplog
        timeout client 3600s
        timeout connect 2s
        timeout server 3600s
        maxconn  10240
    
    listen httpforward
        bind 0.0.0.0:80
        option httpchk GET /index.html 
        balance roundrobin
        server http32 192.168.199.32:80 weight 1 check inter 2000 fall 3 rise 3
        server http33 192.168.199.33:80 weight 1 check inter 2000 fall 3 rise 3
    
    systemctl start haproxy.service
    curl 192.168.199.30  测试
    curl 192.168.199.31
    

    相关文章

      网友评论

          本文标题:负载均衡

          本文链接:https://www.haomeiwen.com/subject/xqhqtltx.html