美文网首页
Cluster cases

Cluster cases

作者: 程序员札记 | 来源:发表于2023-03-04 09:43 被阅读0次

1、health-check
实验环境

envoy:Front Proxy,地址为172.31.18.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.18.11
webserver02:第二个后端服务
webserver02-sidecar:第二个后端服务的Sidecar Proxy,地址为172.31.18.12
front-envoy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: webservice
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: web_cluster_01 }
          http_filters:
          - name: envoy.filters.http.router
  clusters:
  - name: web_cluster_01
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: web_cluster_01
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: myservice, port_value: 80 }
    health_checks:
    - timeout: 5s
      interval: 10s
      unhealthy_threshold: 2
      healthy_threshold: 2
      http_health_check:
        path: /livez
        expected_statuses:
          start: 200
          end: 399

front-envoy-with-tcp-check.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: webservice
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: web_cluster_01 }
          http_filters:
          - name: envoy.filters.http.router
  clusters:
  - name: web_cluster_01
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: web_cluster_01
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: myservice, port_value: 80 }
    health_checks:
    - timeout: 5s
      interval: 10s
      unhealthy_threshold: 2
      healthy_threshold: 2
      tcp_health_check: {}

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }

docker-compose.yaml

version: '3.3'

services:
  envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    # - ./front-envoy-with-tcp-check.yaml:/etc/envoy/envoy.yaml
    networks:
      envoymesh:
        ipv4_address: 172.31.18.2
        aliases:
        - front-proxy
    depends_on:
    - webserver01-sidecar
    - webserver02-sidecar

  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.18.11
        aliases:
        - myservice

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.18.12
        aliases:
        - myservice

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.18.0/24

实验验证

docker-compose up
克隆窗口测试

# 持续请求服务上的特定路径/livez
root@test:~# while true; do curl 172.31.18.2; sleep 1; done
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.18.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.18.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.18.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.18.11!
......

# 等服务调度就绪后,另启一个终端,修改其中任何一个服务的/livez响应为非"OK"值,例如,修改第一个后端端点;
root@test:~# curl -X POST -d 'livez=FAIL' http://172.31.18.11/livez

# 通过请求的响应结果即可观测服务调度及响应的记录
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
#不在调度到172.31.18.11

# 请求中,可以看出第一个端点因主动健康状态检测失败,因而会被自动移出集群,直到其再次转为健康为止;
# 我们可使用类似如下命令修改为正常响应结果;
root@test:~# curl -X POST -d 'livez=OK' http://172.31.18.11/livez

iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.18.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.18.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.18.11!
#172.31.18.11故障恢复,参与调度

2、outlier-detection
实验环境

envoy:Front Proxy,地址为172.31.20.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.20.11
webserver02:第二个后端服务
webserver02-sidecar:第二个后端服务的Sidecar Proxy,地址为172.31.20.12
webserver03:第三个后端服务
webserver03-sidecar:第三个后端服务的Sidecar Proxy,地址为172.31.20.13

front-envoy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: webservice
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: web_cluster_01 }
          http_filters:
          - name: envoy.filters.http.router
  clusters:
  - name: web_cluster_01
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: web_cluster_01
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: myservice, port_value: 80 }
    outlier_detection:
      consecutive_5xx: 3
      base_ejection_time: 10s
      max_ejection_percent: 10

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }

docker-compose.yaml

version: '3.3'

services:
  envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    networks:
      envoymesh:
        ipv4_address: 172.31.20.2
        aliases:
        - front-proxy
    depends_on:
    - webserver01-sidecar
    - webserver02-sidecar
    - webserver03-sidecar

  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.20.11
        aliases:
        - myservice

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.20.12
        aliases:
        - myservice

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

  webserver03-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: green
    networks:
      envoymesh:
        ipv4_address: 172.31.20.13
        aliases:
        - myservice

  webserver03:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver03-sidecar"
    depends_on:
    - webserver03-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.20.0/24

实验验证

docker-compose up
克隆窗口测试

# 持续请求服务上的特定路径/livez
root@test:~# while true; do curl 172.31.20.2/livez && echo; sleep 1; done
OK
OK
OK
OK
OK
......

# 等服务调度就绪后,另启一个终端,修改其中任何一个服务的/livez响应为非"OK"值,例如,修改第一个后端端点;
root@test:~# curl -X POST -d 'livez=FAIL' http://172.31.20.11/livez

# 而后回到docker-compose命令的控制台上,或者直接通过请求的响应结果 ,即可观测服务调度及响应的记录
webserver01_1          | 127.0.0.1 - - [02/Dec/2021 13:43:54] "POST /livez HTTP/1.1" 200 -
webserver02_1          | 127.0.0.1 - - [02/Dec/2021 13:43:55] "GET /livez HTTP/1.1" 200 -
webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:43:56] "GET /livez HTTP/1.1" 200 -
webserver01_1          | 127.0.0.1 - - [02/Dec/2021 13:43:57] "GET /livez HTTP/1.1" 506 -
webserver02_1          | 127.0.0.1 - - [02/Dec/2021 13:43:58] "GET /livez HTTP/1.1" 200 -
webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:43:59] "GET /livez HTTP/1.1" 200 -
webserver01_1          | 127.0.0.1 - - [02/Dec/2021 13:44:00] "GET /livez HTTP/1.1" 506 -
webserver02_1          | 127.0.0.1 - - [02/Dec/2021 13:44:01] "GET /livez HTTP/1.1" 200 -
webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:44:02] "GET /livez HTTP/1.1" 200 -
webserver01_1          | 127.0.0.1 - - [02/Dec/2021 13:44:03] "GET /livez HTTP/1.1" 506 -
webserver02_1          | 127.0.0.1 - - [02/Dec/2021 13:44:04] "GET /livez HTTP/1.1" 200 -
webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:44:05] "GET /livez HTTP/1.1" 200 -

# 请求中,可以看出第一个端点因响应5xx的响应码,每次被加回之后,会再次弹出,除非使用类似如下命令修改为正常响应结果;
root@test:~#curl -X POST -d 'livez=OK' http://172.31.20.11/livez

webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:45:32] "GET /livez HTTP/1.1" 200 -
webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:45:33] "GET /livez HTTP/1.1" 200 -
webserver01_1          | 127.0.0.1 - - [02/Dec/2021 13:45:34] "GET /livez HTTP/1.1" 200 -
webserver02_1          | 127.0.0.1 - - [02/Dec/2021 13:45:35] "GET /livez HTTP/1.1" 200 -
webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:45:36] "GET /livez HTTP/1.1" 200 -
webserver01_1          | 127.0.0.1 - - [02/Dec/2021 13:45:37] "GET /livez HTTP/1.1" 200 -
webserver02_1          | 127.0.0.1 - - [02/Dec/2021 13:45:38] "GET /livez HTTP/1.1" 200 -
webserver03_1          | 127.0.0.1 - - [02/Dec/2021 13:45:39] "GET /livez HTTP/1.1" 200 -

3、 least-requests
实验环境

envoy:Front Proxy,地址为172.31.22.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.22.11
webserver02:第二个后端服务
webserver02-sidecar:第二个后端服务的Sidecar Proxy,地址为172.31.22.12
webserver03:第三个后端服务
webserver03-sidecar:第三个后端服务的Sidecar Proxy,地址为172.31.22.13

front-envoy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: webservice
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: web_cluster_01 }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: web_cluster_01
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: LEAST_REQUEST
    load_assignment:
      cluster_name: web_cluster_01
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: red
                port_value: 80
          load_balancing_weight: 1
        - endpoint:
            address:
              socket_address:
                address: blue
                port_value: 80
          load_balancing_weight: 3
        - endpoint:
            address:
              socket_address:
                address: green
                port_value: 80
          load_balancing_weight: 5

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }

send-request.sh

#!/bin/bash
declare -i red=0
declare -i blue=0
declare -i green=0

#interval="0.1"
counts=300

echo "Send 300 requests, and print the result. This will take a while."
echo ""
echo "Weight of all endpoints:"
echo "Red:Blue:Green = 1:3:5"

for ((i=1; i<=${counts}; i++)); do
    if curl -s http://$1/hostname | grep "red" &> /dev/null; then
        # $1 is the host address of the front-envoy.
        red=$[$red+1]
    elif curl -s http://$1/hostname | grep "blue" &> /dev/null; then
        blue=$[$blue+1]
    else
        green=$[$green+1]
    fi
#    sleep $interval
done

echo ""
echo "Response from:"
echo "Red:Blue:Green = $red:$blue:$green"

docker-compose.yaml

version: '3.3'

services:
  envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    networks:
      envoymesh:
        ipv4_address: 172.31.22.2
        aliases:
        - front-proxy
    depends_on:
    - webserver01-sidecar
    - webserver02-sidecar
    - webserver03-sidecar

  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.22.11
        aliases:
        - myservice
        - red

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.22.12
        aliases:
        - myservice
        - blue

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

  webserver03-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: green
    networks:
      envoymesh:
        ipv4_address: 172.31.22.13
        aliases:
        - myservice
        - green

  webserver03:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver03-sidecar"
    depends_on:
    - webserver03-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.22.0/24

实验验证

docker-compose up
克隆窗口测试

# 使用如下脚本即可直接发起服务请求,并根据结果中统计的各后端端点的响应大体比例,判定其是否能够大体符合加权最少连接的调度机制;
./send-request.sh 172.31.22.2

root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/least-requests# ./send-request.sh 172.31.22.2
Send 300 requests, and print the result. This will take a while.

Weight of all endpoints:
Red:Blue:Green = 1:3:5

Response from:
Red:Blue:Green = 56:80:164
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/least-requests# ./send-request.sh 172.31.22.2
Send 300 requests, and print the result. This will take a while.

Weight of all endpoints:
Red:Blue:Green = 1:3:5

Response from:
Red:Blue:Green = 59:104:137

4、weighted-rr
实验环境

envoy:Front Proxy,地址为172.31.27.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.27.11
webserver02:第二个后端服务
webserver02-sidecar:第二个后端服务的Sidecar Proxy,地址为172.31.27.12
webserver03:第三个后端服务
webserver03-sidecar:第三个后端服务的Sidecar Proxy,地址为172.31.27.13

front-envoy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: webservice
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: web_cluster_01 }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: web_cluster_01
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: web_cluster_01
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: red
                port_value: 80
          load_balancing_weight: 1
        - endpoint:
            address:
              socket_address:
                address: blue
                port_value: 80
          load_balancing_weight: 3
        - endpoint:
            address:
              socket_address:
                address: green
                port_value: 80
          load_balancing_weight: 5

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }

send-request.sh

#!/bin/bash
declare -i red=0
declare -i blue=0
declare -i green=0

#interval="0.1"
counts=300

echo "Send 300 requests, and print the result. This will take a while."
echo ""
echo "Weight of all endpoints:"
echo "Red:Blue:Green = 1:3:5"

for ((i=1; i<=${counts}; i++)); do
    if curl -s http://$1/hostname | grep "red" &> /dev/null; then
        # $1 is the host address of the front-envoy.
        red=$[$red+1]
    elif curl -s http://$1/hostname | grep "blue" &> /dev/null; then
        blue=$[$blue+1]
    else
        green=$[$green+1]
    fi
#    sleep $interval
done

echo ""
echo "Response from:"
echo "Red:Blue:Green = $red:$blue:$green"

docker-compose.yaml

version: '3.3'

services:
  envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    networks:
      envoymesh:
        ipv4_address: 172.31.27.2
        aliases:
        - front-proxy
    depends_on:
    - webserver01-sidecar
    - webserver02-sidecar
    - webserver03-sidecar

  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.27.11
        aliases:
        - myservice
        - red

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.27.12
        aliases:
        - myservice
        - blue

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

  webserver03-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: green
    networks:
      envoymesh:
        ipv4_address: 172.31.27.13
        aliases:
        - myservice
        - green

  webserver03:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver03-sidecar"
    depends_on:
    - webserver03-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.27.0/24

实验验证

docker-compose up
窗口克隆测试

# 使用如下脚本即可直接发起服务请求,并根据结果中统计的各后端端点的响应大体比例,判定其是否能够大体符合加权最少连接的调度机制;
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/weighted-rr# ./send-request.sh 172.31.27.2
Send 300 requests, and print the result. This will take a while.

Weight of all endpoints:
Red:Blue:Green = 1:3:5

Response from:
Red:Blue:Green = 55:81:164

5、locality-weighted
实验环境

envoy:Front Proxy,地址为172.31.31.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.31.11, 别名为red和webservice1
webserver02:第二个后端服务
webserver02-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.31.12, 别名为blue和webservice1
webserver03:第三个后端服务
webserver03-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.31.13, 别名为green和webservice1
webserver04:第四个后端服务
webserver04-sidecar:第四个后端服务的Sidecar Proxy,地址为172.31.31.14, 别名为gray和webservice2
webserver05:第五个后端服务
webserver05-sidecar:第五个后端服务的Sidecar Proxy,地址为172.31.31.15, 别名为black和webservice2

front-envoy.yaml

admin:
  access_log_path: "/dev/null"
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    name: listener_http
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          codec_type: auto
          stat_prefix: ingress_http
          route_config:
            name: local_route
            virtual_hosts:
            - name: backend
              domains:
              - "*"
              routes:
              - match:
                  prefix: "/"
                route:
                  cluster: webcluster1
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: webcluster1
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    http2_protocol_options: {}
    load_assignment:
      cluster_name: webcluster1
      policy:
        overprovisioning_factor: 140
      endpoints:
      - locality:
          region: cn-north-1
        priority: 0
        load_balancing_weight: 10
        lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: webservice1, port_value: 80 }
      - locality:
          region: cn-north-2
        priority: 0
        load_balancing_weight: 20
        lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: webservice2, port_value: 80 }
    health_checks:
    - timeout: 5s
      interval: 10s
      unhealthy_threshold: 2
      healthy_threshold: 1
      http_health_check:
        path: /livez
        expected_statuses:
          start: 200
          end: 399

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }

send-request.sh

#!/bin/bash
declare -i colored=0
declare -i colorless=0

interval="0.1"

while true; do
    if curl -s http://$1/hostname | grep -E "red|blue|green" &> /dev/null; then
        # $1 is the host address of the front-envoy.
        colored=$[$colored+1]
    else
        colorless=$[$colorless+1]
    fi
    echo $colored:$colorless
    sleep $interval
done

docker-compose.yaml

version: '3'

services:
  front-envoy:
   image: envoyproxy/envoy-alpine:v1.20.0
   environment:
     - ENVOY_UID=0
    volumes:
      - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    networks:
      - envoymesh
    expose:
      # Expose ports 80 (for general traffic) and 9901 (for the admin server)
      - "80"
      - "9901"
  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.31.11
        aliases:
        - webservice1
        - red

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
   image: envoyproxy/envoy-alpine:v1.20.0
   environment:
     - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.31.12
        aliases:
        - webservice1
        - blue

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

  webserver03-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: green
    networks:
      envoymesh:
        ipv4_address: 172.31.31.13
        aliases:
        - webservice1
        - green

  webserver03:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver03-sidecar"
    depends_on:
    - webserver03-sidecar

  webserver04-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: gray
    networks:
      envoymesh:
        ipv4_address: 172.31.31.14
        aliases:
        - webservice2
        - gray

  webserver04:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver04-sidecar"
    depends_on:
    - webserver04-sidecar

  webserver05-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: black
    networks:
      envoymesh:
        ipv4_address: 172.31.31.15
        aliases:
        - webservice2
        - black

  webserver05:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver05-sidecar"
    depends_on:
    - webserver05-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.31.0/24

实验验证

docker-compose up
窗口克隆测试

# 通过send-requests.sh脚本进行测试,可发现,用户请求被按权重分配至不同的locality之上,每个locality内部再按负载均衡算法进行调度;
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/locality-weighted#./send-requests.sh 172.31.31.2

......
283:189
283:190
283:191
284:191
285:191
286:191
286:192
286:193
287:193
......

# 可以试着将权重较高的一组中的某一主机的健康状态团置为不可用;

6、ring-hash
实验环境

envoy:Front Proxy,地址为172.31.25.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.25.11
webserver02:第二个后端服务
webserver02-sidecar:第二个后端服务的Sidecar Proxy,地址为172.31.25.12
webserver03:第三个后端服务
webserver03-sidecar:第三个后端服务的Sidecar Proxy,地址为172.31.25.13

front-envoy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: webservice
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route:
                  cluster: web_cluster_01
                  hash_policy:
                  # - connection_properties:
                  #     source_ip: true
                  - header:
                      header_name: User-Agent
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: web_cluster_01
    connect_timeout: 0.5s
    type: STRICT_DNS
    lb_policy: RING_HASH
    ring_hash_lb_config:
      maximum_ring_size: 1048576
      minimum_ring_size: 512
    load_assignment:
      cluster_name: web_cluster_01
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: myservice
                port_value: 80
    health_checks:
    - timeout: 5s
      interval: 10s
      unhealthy_threshold: 2
      healthy_threshold: 2
      http_health_check:
        path: /livez
        expected_statuses:
          start: 200
          end: 399

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }

send-request.sh

#!/bin/bash
declare -i red=0
declare -i blue=0
declare -i green=0

interval="0.1"
counts=200

echo "Send 300 requests, and print the result. This will take a while."

for ((i=1; i<=${counts}; i++)); do
    if curl -s http://$1/hostname | grep "red" &> /dev/null; then
        # $1 is the host address of the front-envoy.
        red=$[$red+1]
    elif curl -s http://$1/hostname | grep "blue" &> /dev/null; then
        blue=$[$blue+1]
    else
        green=$[$green+1]
    fi
    sleep $interval
done

echo ""
echo "Response from:"
echo "Red:Blue:Green = $red:$blue:$green"

docker-compose.yaml

version: '3.3'

services:
  envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    networks:
      envoymesh:
        ipv4_address: 172.31.25.2
        aliases:
        - front-proxy
    depends_on:
    - webserver01-sidecar
    - webserver02-sidecar
    - webserver03-sidecar

  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.25.11
        aliases:
        - myservice
        - red

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.25.12
        aliases:
        - myservice
        - blue

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

  webserver03-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: green
    networks:
      envoymesh:
        ipv4_address: 172.31.25.13
        aliases:
        - myservice
        - green

  webserver03:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver03-sidecar"
    depends_on:
    - webserver03-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.25.0/24

实验验证

docker-compose up
克隆窗口测试

# 我们在路由hash策略中,hash计算的是用户的浏览器类型,因而,使用如下命令持续发起请求可以看出,用户请求将始终被定向到同一个后端端点;因为其浏览器类型一直未变。
while true; do curl 172.31.25.2; sleep .3; done

# 我们可以模拟使用另一个浏览器再次发请求;其请求可能会被调度至其它节点,也可能仍然调度至前一次的相同节点之上;这取决于hash算法的计算结果;
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/ring-hash# while true; do curl 172.31.25.2; sleep .3; done
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.25.11!
......

# 也可使用如下脚本,验证同一个浏览器的请求是否都发往了同一个后端端点,而不同浏览器则可能会被重新调度;
root@test:~# while true; do index=$[$RANDOM%10]; curl -H "User-Agent: Browser_${index}" 172.31.25.2/user-agent && curl -H "User-Agent: Browser_${index}" 172.31.25.2/hostname && echo ; sleep .1; done
User-Agent: Browser_0
ServerName: green

User-Agent: Browser_0
ServerName: green

User-Agent: Browser_2
ServerName: red

User-Agent: Browser_2
ServerName: red

User-Agent: Browser_5
ServerName: blue

User-Agent: Browser_9
ServerName: red

# 也可以使用如下命令,将一个后端端点的健康检查结果置为失败,动态改变端点,并再次判定其调度结果,验证此前调度至该节点的请求是否被重新分配到了其它节点;
root@test:~# curl -X POST -d 'livez=FAIL' http://172.31.25.11/livez

iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.25.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.25.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.25.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.25.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.25.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.25.12!
#172.31.25.11故障,被调度到172.31.25.12了

7、priority-levels
实验环境

envoy:Front Proxy,地址为172.31.29.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.29.11, 别名为red和webservice1
webserver02:第二个后端服务
webserver02-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.29.12, 别名为blue和webservice1
webserver03:第三个后端服务
webserver03-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.29.13, 别名为green和webservice1
webserver04:第四个后端服务
webserver04-sidecar:第四个后端服务的Sidecar Proxy,地址为172.31.29.14, 别名为gray和webservice2
webserver05:第五个后端服务
webserver05-sidecar:第五个后端服务的Sidecar Proxy,地址为172.31.29.15, 别名为black和webservice2

front-envoy.yaml

admin:
  access_log_path: "/dev/null"
  address:
    socket_address:
      address: 0.0.0.0
      port_value: 9901

static_resources:
  listeners:
  - address:
      socket_address:
        address: 0.0.0.0
        port_value: 80
    name: listener_http
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          codec_type: auto
          stat_prefix: ingress_http
          route_config:
            name: local_route
            virtual_hosts:
            - name: backend
              domains:
              - "*"
              routes:
              - match:
                  prefix: "/"
                route:
                  cluster: webcluster1
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: webcluster1
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    http2_protocol_options: {}
    load_assignment:
      cluster_name: webcluster1
      policy:
        overprovisioning_factor: 140
      endpoints:
      - locality:
          region: cn-north-1
        priority: 0
        lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: webservice1
                port_value: 80
      - locality:
          region: cn-north-2
        priority: 1
        lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: webservice2
                port_value: 80
    health_checks:
    - timeout: 5s
      interval: 10s
      unhealthy_threshold: 2
      healthy_threshold: 1
      http_health_check:
        path: /livez
        expected_statuses:
          start: 200
          end: 399

front-envoy-v2.yaml

admin:
  access_log_path: "/dev/null"
  address:
    socket_address:
      address: 0.0.0.0
      port_value: 9901

static_resources:
  listeners:
  - address:
      socket_address:
        address: 0.0.0.0
        port_value: 80
    name: listener_http
    filter_chains:
    - filters:
      - name: envoy.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
          codec_type: auto
          stat_prefix: ingress_http
          route_config:
            name: local_route
            virtual_hosts:
            - name: backend
              domains:
              - "*"
              routes:
              - match:
                  prefix: "/"
                route:
                  cluster: webcluster1
          http_filters:
          - name: envoy.router

  clusters:
  - name: webcluster1
    connect_timeout: 0.5s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    http2_protocol_options: {}
    load_assignment:
      cluster_name: webcluster1
      policy:
        overprovisioning_factor: 140
      endpoints:
      - locality:
          region: cn-north-1
        priority: 0
        lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: webservice1
                port_value: 80
      - locality:
          region: cn-north-2
        priority: 1
        lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: webservice2
                port_value: 80
    health_checks:
    - timeout: 5s
      interval: 10s
      unhealthy_threshold: 2
      healthy_threshold: 1
      http_health_check:
        path: /livez

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }

docker-compose.yaml

version: '3'

services:
  front-envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
      - ./front-envoy-v2.yaml:/etc/envoy/envoy.yaml
   networks:
      envoymesh:
        ipv4_address: 172.31.29.2
        aliases:
        - front-proxy
    expose:
      # Expose ports 80 (for general traffic) and 9901 (for the admin server)
      - "80"
      - "9901"
  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.29.11
        aliases:
        - webservice1
        - red

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.29.12
        aliases:
        - webservice1
        - blue

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

  webserver03-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: green
    networks:
      envoymesh:
        ipv4_address: 172.31.29.13
        aliases:
        - webservice1
        - green

  webserver03:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver03-sidecar"
    depends_on:
    - webserver03-sidecar

  webserver04-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: gray
    networks:
      envoymesh:
        ipv4_address: 172.31.29.14
        aliases:
        - webservice2
        - gray

  webserver04:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver04-sidecar"
    depends_on:
    - webserver04-sidecar

  webserver05-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: black
    networks:
      envoymesh:
        ipv4_address: 172.31.29.15
        aliases:
        - webservice2
        - black

  webserver05:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver05-sidecar"
    depends_on:
    - webserver05-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.29.0/24

实验验证

docker-compose up
窗口克隆测试

持续请求服务,可发现,请求均被调度至优先级为0的webservice1相关的后端端点之上;
while true; do curl 172.31.29.2; sleep .5; done

# 等确定服务的调度结果后,另启一个终端,修改webservice1中任何一个后端端点的/livez响应为非"OK"值,例如,修改第一个后端端点;
curl -X POST -d 'livez=FAIL' http://172.31.29.11/livez

# 而后通过请求的响应结果可发现,因过载因子为1.4,客户端的请求仍然始终只发往webservice1的后端端点blue和green之上;

# 等确定服务的调度结果后,再修改其中任何一个服务的/livez响应为非"OK"值,例如,修改第一个后端端点;
curl -X POST -d 'livez=FAIL' http://172.31.29.12/livez

# 请求中,可以看出第一个端点因响应5xx的响应码,每次被加回之后,会再次弹出,除非使用类似如下命令修改为正常响应结果;
curl -X POST -d 'livez=OK' http://172.31.29.11/livez

# 而后通过请求的响应结果可发现,因过载因子为1.4,优先级为0的webserver1已然无法锁住所有的客户端请求,于是,客户端的请求的部分流量将被转发至webservice2的端点之上;

8、lb-subsets
实验环境

envoy:Front Proxy,地址为172.31.33.2
[e1, e7]:7个后端服务
front-envoy.yaml

admin:
  access_log_path: "/dev/null"
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    name: listener_http
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          codec_type: auto
          stat_prefix: ingress_http
          route_config:
            name: local_route
            virtual_hosts:
            - name: backend
              domains:
              - "*"
              routes:
              - match:
                  prefix: "/"
                  headers:
                  - name: x-custom-version
                    exact_match: pre-release
                route:
                  cluster: webcluster1
                  metadata_match:
                    filter_metadata:
                      envoy.lb:
                        version: "1.2-pre"
                        stage: "dev"
              - match:
                  prefix: "/"
                  headers:
                  - name: x-hardware-test
                    exact_match: memory
                route:
                  cluster: webcluster1
                  metadata_match:
                    filter_metadata:
                      envoy.lb:
                        type: "bigmem"
                        stage: "prod"
              - match:
                  prefix: "/"
                route:
                  weighted_clusters:
                    clusters:
                    - name: webcluster1
                      weight: 90
                      metadata_match:
                        filter_metadata:
                          envoy.lb:
                            version: "1.0"
                    - name: webcluster1
                      weight: 10
                      metadata_match:
                        filter_metadata:
                          envoy.lb:
                            version: "1.1"
                  metadata_match:
                    filter_metadata:
                      envoy.lb:
                        stage: "prod"
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: webcluster1
    connect_timeout: 0.5s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: webcluster1
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: e1
                port_value: 80
          metadata:
            filter_metadata:
              envoy.lb:
                stage: "prod"
                version: "1.0"
                type: "std"
                xlarge: true
        - endpoint:
            address:
              socket_address:
                address: e2
                port_value: 80
          metadata:
            filter_metadata:
              envoy.lb:
                stage: "prod"
                version: "1.0"
                type: "std"
        - endpoint:
            address:
              socket_address:
                address: e3
                port_value: 80
          metadata:
            filter_metadata:
              envoy.lb:
                stage: "prod"
                version: "1.1"
                type: "std"
        - endpoint:
            address:
              socket_address:
                address: e4
                port_value: 80
          metadata:
            filter_metadata:
              envoy.lb:
                stage: "prod"
                version: "1.1"
                type: "std"
        - endpoint:
            address:
              socket_address:
                address: e5
                port_value: 80
          metadata:
            filter_metadata:
              envoy.lb:
                stage: "prod"
                version: "1.0"
                type: "bigmem"
        - endpoint:
            address:
              socket_address:
                address: e6
                port_value: 80
          metadata:
            filter_metadata:
              envoy.lb:
                stage: "prod"
                version: "1.1"
                type: "bigmem"
        - endpoint:
            address:
              socket_address:
                address: e7
                port_value: 80
          metadata:
            filter_metadata:
              envoy.lb:
                stage: "dev"
                version: "1.2-pre"
                type: "std"
    lb_subset_config:
      fallback_policy: DEFAULT_SUBSET            
      default_subset:
        stage: "prod"
        version: "1.0"
        type: "std"
      subset_selectors:
      - keys: ["stage", "type"]
      - keys: ["stage", "version"]
      - keys: ["version"]
      - keys: ["xlarge", "version"]
    health_checks:
    - timeout: 5s
      interval: 10s
      unhealthy_threshold: 2
      healthy_threshold: 1
      http_health_check:
        path: /livez
        expected_statuses:
          start: 200
          end: 399

test.sh

#!/bin/bash
declare -i v10=0
declare -i v11=0

for ((counts=0; counts<200; counts++)); do
    if curl -s http://$1/hostname | grep -E "e[125]" &> /dev/null; then
        # $1 is the host address of the front-envoy.
        v10=$[$v10+1]
    else
        v11=$[$v11+1]
    fi
done

echo "Requests: v1.0:v1.1 = $v10:$v11"

docker-compose.yaml

version: '3'

services:
  front-envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
      - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    networks:
      envoymesh:
        ipv4_address: 172.31.33.2
    expose:
      # Expose ports 80 (for general traffic) and 9901 (for the admin server)
      - "80"
      - "9901"

  e1:
    image: ikubernetes/demoapp:v1.0
    hostname: e1
    networks:
      envoymesh:
        ipv4_address: 172.31.33.11
        aliases:
          - e1
    expose:
      - "80"

  e2:
    image: ikubernetes/demoapp:v1.0
    hostname: e2
    networks:
      envoymesh:
        ipv4_address: 172.31.33.12
        aliases:
          - e2
    expose:
      - "80"

  e3:
    image: ikubernetes/demoapp:v1.0
    hostname: e3
    networks:
      envoymesh:
        ipv4_address: 172.31.33.13
        aliases:
          - e3
    expose:
      - "80"

  e4:
    image: ikubernetes/demoapp:v1.0
    hostname: e4
    networks:
      envoymesh:
        ipv4_address: 172.31.33.14
        aliases:
          - e4
    expose:
      - "80"

  e5:
    image: ikubernetes/demoapp:v1.0
    hostname: e5
    networks:
      envoymesh:
        ipv4_address: 172.31.33.15
        aliases:
          - e5
    expose:
      - "80"

  e6:
    image: ikubernetes/demoapp:v1.0
    hostname: e6
    networks:
      envoymesh:
        ipv4_address: 172.31.33.16
        aliases:
          - e6
    expose:
      - "80"

  e7:
    image: ikubernetes/demoapp:v1.0
    hostname: e7
    networks:
      envoymesh:
        ipv4_address: 172.31.33.17
        aliases:
          - e7
    expose:
      - "80"

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.33.0/24

实验验证

docker-compose up
窗口克隆测试

# test.sh脚本接受front-envoy的地址,并持续向该地址发起请求,而后显示流量分配的结果;根据路由规则,未指定x-hardware-test和x-custom-version且给予了相应值的请求,均会调度给默认子集,且在两个组之间进行流量分发;
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# ./test.sh 172.31.33.2
Requests: v1.0:v1.1 = 183:17


# 我们可以指定特殊的首部发出特定的请求,例如附带有”x-hardware-test: memory”的请求,将会被分发至特定的子集;该子集要求标签type的值为bigmem,而标签stage的值为prod;该子集共有e5和e6两个端点
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-hardware-test: memory" 172.31.33.2/hostname
ServerName: e6
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-hardware-test: memory" 172.31.33.2/hostname
ServerName: e5
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-hardware-test: memory" 172.31.33.2/hostname
ServerName: e6
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-hardware-test: memory" 172.31.33.2/hostname
ServerName: e5


# 或者,我们也可以指定特殊的首部发出特定的请求,例如附带有”x-custom-version: pre-release”的请求,将会被分发至特定的子集;该子集要求标签version的值为1.2-pre,而标签stage的值为dev;该子集有e7一个端点;
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-custom-version: pre-release" 172.31.33.2/hostname
ServerName: e7
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-custom-version: pre-release" 172.31.33.2/hostname
ServerName: e7
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-custom-version: pre-release" 172.31.33.2/hostname
ServerName: e7
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/lb-subsets# curl -H "x-custom-version: pre-release" 172.31.33.2/hostname
ServerName: e7

9、circuit-breaker
实验环境

envoy:Front Proxy,地址为172.31.35.2
webserver01:第一个后端服务
webserver01-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.35.11, 别名为red和webservice1
webserver02:第二个后端服务
webserver02-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.35.12, 别名为blue和webservice1
webserver03:第三个后端服务
webserver03-sidecar:第一个后端服务的Sidecar Proxy,地址为172.31.35.13, 别名为green和webservice1
webserver04:第四个后端服务
webserver04-sidecar:第四个后端服务的Sidecar Proxy,地址为172.31.35.14, 别名为gray和webservice2
webserver05:第五个后端服务
webserver05-sidecar:第五个后端服务的Sidecar Proxy,地址为172.31.35.15, 别名为black和webservice2

front-envoy.yaml

admin:
  access_log_path: "/dev/null"
  address:
    socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
  listeners:
  - address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    name: listener_http
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          codec_type: auto
          stat_prefix: ingress_http
          route_config:
            name: local_route
            virtual_hosts:
            - name: backend
              domains:
              - "*"
              routes:
              - match:
                  prefix: "/livez"
                route:
                  cluster: webcluster2
              - match:
                  prefix: "/"
                route:
                  cluster: webcluster1
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: webcluster1
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: webcluster1
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: webservice1
                port_value: 80
    circuit_breakers:
      thresholds:         
        max_connections: 1
        max_pending_requests: 1
        max_retries: 3

  - name: webcluster2
    connect_timeout: 0.25s
    type: STRICT_DNS
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: webcluster2
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address:
                address: webservice2
                port_value: 80
    outlier_detection:
      interval: "1s"
      consecutive_5xx: "3"
      consecutive_gateway_failure: "3"
      base_ejection_time: "10s"
      enforcing_consecutive_gateway_failure: "100"
      max_ejection_percent: "30"
      success_rate_minimum_hosts: "2"

envoy-sidecar-proxy.yaml

admin:
  profile_path: /tmp/envoy.prof
  access_log_path: /tmp/admin_access.log
  address:
    socket_address:
       address: 0.0.0.0
       port_value: 9901

static_resources:
  listeners:
  - name: listener_0
    address:
      socket_address: { address: 0.0.0.0, port_value: 80 }
    filter_chains:
    - filters:
      - name: envoy.filters.network.http_connection_manager
        typed_config:
          "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
          stat_prefix: ingress_http
          codec_type: AUTO
          route_config:
            name: local_route
            virtual_hosts:
            - name: local_service 
              domains: ["*"]
              routes:
              - match: { prefix: "/" }
                route: { cluster: local_cluster }
          http_filters:
          - name: envoy.filters.http.router

  clusters:
  - name: local_cluster
    connect_timeout: 0.25s
    type: STATIC
    lb_policy: ROUND_ROBIN
    load_assignment:
      cluster_name: local_cluster
      endpoints:
      - lb_endpoints:
        - endpoint:
            address:
              socket_address: { address: 127.0.0.1, port_value: 8080 }
    circuit_breakers:
      thresholds:         
        max_connections: 1
        max_pending_requests: 1
        max_retries: 2

send-requests.sh

#!/bin/bash
#
if [ $# -ne 2 ]
then
    echo "USAGE: $0 <URL> <COUNT>"
    exit 1;
fi

URL=$1
COUNT=$2
c=1
#interval="0.2"

while [[ ${c} -le ${COUNT} ]];
do
  #echo "Sending GET request: ${URL}"
  curl -o /dev/null -w '%{http_code}\n' -s ${URL} &
  (( c++ ))
#  sleep $interval
done

wait

docker-compose.yaml

version: '3'

services:
  front-envoy:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
      - ./front-envoy.yaml:/etc/envoy/envoy.yaml
    networks:
      - envoymesh
    expose:
      # Expose ports 80 (for general traffic) and 9901 (for the admin server)
      - "80"
      - "9901"

  webserver01-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: red
    networks:
      envoymesh:
        ipv4_address: 172.31.35.11
        aliases:
        - webservice1
        - red

  webserver01:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver01-sidecar"
    depends_on:
    - webserver01-sidecar

  webserver02-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: blue
    networks:
      envoymesh:
        ipv4_address: 172.31.35.12
        aliases:
        - webservice1
        - blue

  webserver02:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver02-sidecar"
    depends_on:
    - webserver02-sidecar

  webserver03-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: green
    networks:
      envoymesh:
        ipv4_address: 172.31.35.13
        aliases:
        - webservice1
        - green

  webserver03:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver03-sidecar"
    depends_on:
    - webserver03-sidecar

  webserver04-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: gray
    networks:
      envoymesh:
        ipv4_address: 172.31.35.14
        aliases:
        - webservice2
        - gray

  webserver04:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver04-sidecar"
    depends_on:
    - webserver04-sidecar

  webserver05-sidecar:
    image: envoyproxy/envoy-alpine:v1.20.0
    environment:
      - ENVOY_UID=0
    volumes:
    - ./envoy-sidecar-proxy.yaml:/etc/envoy/envoy.yaml
    hostname: black
    networks:
      envoymesh:
        ipv4_address: 172.31.35.15
        aliases:
        - webservice2
        - black

  webserver05:
    image: ikubernetes/demoapp:v1.0
    environment:
      - PORT=8080
      - HOST=127.0.0.1
    network_mode: "service:webserver05-sidecar"
    depends_on:
    - webserver05-sidecar

networks:
  envoymesh:
    driver: bridge
    ipam:
      config:
        - subnet: 172.31.35.0/24

实验验证

docker-compose up
窗口克隆测试

# 通过send-requests.sh脚本进行webcluster1的请求测试,可发现,有部分请求的响应码为5xx,这其实就是被熔断的处理结果;
root@test:/apps/servicemesh_in_practise-develop/Cluster-Manager/circuit-breaker# ./send-requests.sh http://172.31.35.2/ 300

200
200
200
503  #熔断
200
200

相关文章

网友评论

      本文标题:Cluster cases

      本文链接:https://www.haomeiwen.com/subject/bzyrldtx.html