美文网首页
Envoy过滤与监控示例

Envoy过滤与监控示例

作者: Walker_7797 | 来源:发表于2022-04-29 17:10 被阅读0次

    一、HTTP-Connection-Manager

    • httproute-simple-match
    cd servicemesh_in_practise/HTTP-Connection-Manager/httproute-simple-match
    # 启动
    docker-compose up
    
    # 验证
    curl 172.31.50.10 # 默认规则
    Hello from App behind Envoy! Hostname: ed9dbef5686c, Address: 172.31.50.5!
    curl -H "Host: ilinux.io" 172.31.50.10 # 匹配主机头规则
    Hello from App behind Envoy! Hostname: acb8353a7ebc, Address: 172.31.50.4!
    curl -H "Host: ilinux.io" 172.31.50.10/service/blue # 匹配主机名和服务名后缀规则
    Hello from App behind Envoy (service light_blue)! hostname: 733ad649eb51 resolved hostname: 172.31.50.6
    curl -I -H "Host: ilinux.io" 172.31.50.10/service/helloblue # 匹配主机名和服务名后缀重定向规则
    HTTP/1.1 301 Moved Permanently
    location: http://ilinux.io/service/blue
    date: Thu, 28 Apr 20xx xx:19:52 GMT
    server: envoy
    transfer-encoding: chunked
    curl -I -H "Host: ilinux.io" 172.31.50.10/service/yellow
    This page will be provided soon later.
    
    • httproute-headers-match
    cd servicemesh_in_practise/HTTP-Connection-Manager/httproute-headers-match
    # 启动
    docker-compose up
    
    # 验证
    curl 172.31.52.10 # 默认规则
    iKubernetes demoapp v1.0 !! ClientIP: 172.31.52.10, ServerName: demoapp-v1.0-1, ServerIP: 172.31.52.5!
    curl -H "X-Canary: true" 172.31.52.10 # 匹配主机头规则
    iKubernetes demoapp v1.2 !! ClientIP: 172.31.52.10, ServerName: demoapp-v1.2-1, ServerIP: 172.31.52.2!
    curl  172.31.52.10?username=vip_linux # 匹配查询字符串前缀规则
    iKubernetes demoapp v1.1 !! ClientIP: 172.31.52.10, ServerName: demoapp-v1.1-1, ServerIP: 172.31.52.6!
    
    • http-traffic-shifting
    cd servicemesh_in_practise/HTTP-Connection-Manager/http-traffic-shifting
    # 启动
    docker-compose up
    
    # 验证
    ./send-request.sh 172.31.55.10 # 验证新旧版本流量比例
    curl -XPOST http://172.31.55.10:9901/runtime_modify?routing.traffic_shift.demoapp=90 # 切换10%的流量到新版本app
    OK
    curl -XPOST http://172.31.55.10:9901/runtime_modify?routing.traffic_shift.demoapp=0 # 切换所有流量到新版app
    OK
    
    image.png image.png
    • http-traffic-splitting
    cd servicemesh_in_practise/HTTP-Connection-Manager/http-traffic-splitting
    # 启动
    docker-compose up
    
    # 验证
    ./send-request.sh 172.31.57.10 # 验证新旧版本流量比例
    curl -XPOST 'http://172.31.57.10:9901/runtime_modify?routing.traffic_split.demoapp.demoappv10=10&routing.traffic_split.demoapp.demoappv11=90' # 切换90%的流量到新版本app
    OK
    curl -XPOST 'http://172.31.57.10:9901/runtime_modify?routing.traffic_split.demoapp.demoappv10=0&routing.traffic_split.demoapp.demoappv11=100' # 切换所有流量到新版app
    OK
    

    两个集群的流量比例加起来需要等于100

    image.png image.png
    • http-request-mirror
    cd servicemesh_in_practise/HTTP-Connection-Manager/http-request-mirror
    # 启动
    docker-compose up
    
    # 验证
    ./send-request.sh 172.31.60.10 # 发送请求,默认有20%的流量发给测试版本,观测日志可以看到
    curl -XPOST 'http://172.31.60.10:9901/runtime_modify?routing.request_mirror.demoapp=50' # 复制50%的流量到测试版本
    OK
    curl -XPOST 'http://172.31.60.10:9901/runtime_modify?routing.request_mirror.demoapp=100' # 复制所有流量到测试版本
    OK
    
    image.png image.png image.png
    • fault-injection
    cd servicemesh_in_practise/HTTP-Connection-Manager/fault-injection
    # 启动
    docker-compose up
    
    # 验证
    curl -w"@curl_format.txt" -o /dev/null -s "http://172.31.62.10/service/red" # 10%的流量延时故障注入,遇上故障,传输时长增加10s
        time_namelookup:  0.000023
           time_connect:  0.000195
        time_appconnect:  0.000000
       time_pretransfer:  0.000496
          time_redirect:  0.000000
     time_starttransfer:  10.012071
                        ----------
             time_total:  10.012236
    curl -w '%{http_code}\n' -o /dev/null -s "http://172.31.62.10/service/blue" # 10%的流量注入503故障,遇上故障,返回503响应码
    503
    
    • timeout-retries
    cd servicemesh_in_practise/HTTP-Connection-Manager/timeout-retries
    # 启动
    docker-compose up
    
    # 验证
    curl -w"@curl_format.txt" -o /dev/null -s "http://172.31.65.10/service/red" # 50%的流量延时故障注入,设置超时时长为1s,遇上故障,1s即返回
        time_namelookup:  0.000022
           time_connect:  0.000158
        time_appconnect:  0.000000
       time_pretransfer:  0.000195
          time_redirect:  0.000000
     time_starttransfer:  1.001305
                        ----------
             time_total:  1.001412
    ./send-requests.sh http://172.31.65.10/service/blue 100 # 50%的流量注入503故障,增加重试功能,503出现概率大大降低
    200
    200
    200
    200
    503
    200
    200
    200
    200
    200
    200
    200
    200
    503
    200
    ./send-requests.sh http://172.31.65.10/service/colors 100
    200
    200
    200
    200
    200
    200
    200
    504 # 504响应码是由于上游请求超时导致
    200
    200
    200
    504
    200
    

    二、Monitoring-and-Tracing

    • monitoring
    cd servicemesh_in_practise/Monitoring-and-Tracing/monitoring
    # 启动
    docker-compose up
    
    # 验证
    while true; do curl 172.31.70.10; sleep 0.$RANDOM; done
    

    访问prometheus


    image.png

    访问grafana


    image.png
    • access-log
    cd servicemesh_in_practise/Monitoring-and-Tracing/access-log
    # 启动
    docker-compose up
    
    # 验证
    curl 172.31.73.10 # 查看访问日志
    
    image.png

    编辑front_envoy.yaml文件,注释json-format格式,打开text-format格式

     # 启动
    docker-compose up
    
    # 验证
    curl 172.31.73.10 # 查看访问日志
    
    image.png
    cd servicemesh_in_practise/Monitoring-and-Tracing/accesslog-with-efk
     # 启动
    docker-compose up
    
    # 验证
    while true; do curl 172.31.76.10/service/colors; sleep 0.$RANDOM; done
    # 验证es
    curl 172.31.76.15:9200
    {
      "name" : "myes01",
      "cluster_name" : "myes",
      "cluster_uuid" : "H_iE6pcgSgixypqBZFrzuA",
      "version" : {
        "number" : "7.14.2",
        "build_flavor" : "default",
        "build_type" : "docker",
        "build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
        "build_date" : "2021-09-15T10:18:09.722761972Z",
        "build_snapshot" : false,
        "lucene_version" : "8.9.0",
        "minimum_wire_compatibility_version" : "6.8.0",
        "minimum_index_compatibility_version" : "6.0.0-beta1"
      },
      "tagline" : "You Know, for Search"
    }
    # 查看es索引
    curl 172.31.76.15:9200/_cat/indices
    green  open .geoip_databases                  ysusGG6bQrSQie3VgRDhuw 1 0  40   0  37.7mb  37.7mb
    yellow open filebeat-7.14.2-20xx.xx.28-000001 KC1dWFZ4TtOvFXudTtl_gw 1 1   0   0    208b    208b
    green  open .apm-custom-link                  EL91AX5VShGzoKJZqjhTow 1 0   0   0    208b    208b
    green  open .kibana_task_manager_7.14.2_001   Z1h_EiWdSTalLMGbu_R_lA 1 0  14 183  77.2kb  77.2kb
    green  open .apm-agent-configuration          9B_-D92yQaS17Di44e4gFw 1 0   0   0    208b    208b
    green  open .kibana_7.14.2_001                P1tti9iQS0iTjfoqlpiGww 1 0  10   0   2.3mb   2.3mb
    yellow open filebeat-2022.04.28               rzRoPfUFTWuNK_wWlCASPQ 1 1 292   0 290.8kb 290.8kb
    green  open .kibana-event-log-7.14.2-000001   dg4EalAiRbGMvPyos1ZHow 1 0   1   0   5.5kb   5.5kb
    

    kibana展示


    image.png
    • monitoring-and-accesslog
    cd servicemesh_in_practise/Monitoring-and-Tracing/monitoring-and-accesslog
     # 启动
    docker-compose up
    
    # 验证
    while true; do curl 172.31.79.10/service/colors; sleep 0.$RANDOM; done
    # 验证es
    curl 172.31.79.15:9200
    {
      "name" : "myes01",
      "cluster_name" : "myes",
      "cluster_uuid" : "SMKEiNPeQe2eTFExMT5p9A",
      "version" : {
        "number" : "7.14.2",
        "build_flavor" : "default",
        "build_type" : "docker",
        "build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
        "build_date" : "2021-09-15T10:18:09.722761972Z",
        "build_snapshot" : false,
        "lucene_version" : "8.9.0",
        "minimum_wire_compatibility_version" : "6.8.0",
        "minimum_index_compatibility_version" : "6.0.0-beta1"
      },
      "tagline" : "You Know, for Search"
    }
    # 查看es索引
    curl 172.31.79.15:9200/_cat/indices
    green  open .geoip_databases                  hrkkroDNRaKu2a0JGN-nSA 1 0  40  0  37.7mb  37.7mb
    yellow open filebeat-7.14.2-20xx.xx.28-000001 VbsXUmPoTvuPagf9Etxtzg 1 1   0  0    208b    208b
    green  open .apm-custom-link                  pFrJAe0BRhm7DrCcodldVg 1 0   0  0    208b    208b
    green  open .apm-agent-configuration          aHQxL3AmQFWqYmJeoi1fzQ 1 0   0  0    208b    208b
    green  open .kibana_task_manager_7.14.2_001   8Gq6PvpzQValMTAuHuDu8Q 1 0  14 87 235.3kb 235.3kb
    green  open .kibana_7.14.2_001                CW4oUQQESFmp3_Zn5QBQiQ 1 0  11  1   4.6mb   4.6mb
    yellow open filebeat-2022.04.28               UDHC5YUjS_C-KyUAx9G2RA 1 1 275  0 432.6kb 432.6kb
    green  open .kibana-event-log-7.14.2-000001   _bGofajrQLCqVKMktz0zpg 1 0   2  0  10.9kb  10.9kb
    green  open .tasks                            BWvcKq2ESI2vUhjt02ItLw 1 0   2  0   7.7kb   7.7kb
    

    kibana展示


    1651135764(1).jpg

    grafana展示


    image.png
    • zipkin-tracing
    cd servicemesh_in_practise/Monitoring-and-Tracing/zipkin-tracing
     # 启动
    docker-compose up
    
    # 验证
    while true; do curl 172.31.79.10/service/colors; sleep 0.$RANDOM; done
    

    zipkin展示


    image.png

    三、监控与跟踪

    • 自定义日志格式
      修改Monitoring-and-Tracing中access-log的front-envoy.yaml文件,定义日志格式
    vim front-envoy.yaml # 将text_format改为如下内容
    text_format: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - - [%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %BYTES_SENT% \"-\" \"%REQ(USER-AGENT)%\"\n"
    
    # 验证
    172.31.73.1 - - [20xx-xx-28Txx:24:31.520Z] "GET / HTTP/1.1" 200 75 "-" "curl/7.68.0"
    
    • skywalking监控
    # docker-compose文件如下
    cat docker-compose.yml
    version: '3.3'
    
    services:
      front-envoy:
        image: envoyproxy/envoy-alpine:v1.21-latest
        environment:
          - ENVOY_UID=0
          - ENVOY_GID=0
        volumes:
        - "./front_envoy/envoy-config.yaml:/etc/envoy/envoy.yaml"
        networks:
          envoymesh:
            ipv4_address: 172.31.85.10
            aliases:
            - front-envoy
            - front
        ports:
        - 8080:80
        - 9901:9901
    
      service_a_envoy:
        image: envoyproxy/envoy-alpine:v1.21-latest
        environment:
          - ENVOY_UID=0
          - ENVOY_GID=0
        volumes:
        - "./service_a/envoy-config.yaml:/etc/envoy/envoy.yaml"
        networks:
          envoymesh:
            aliases:
            - service_a_envoy
            - service-a-envoy
        ports:
        - 8786
        - 8788
        - 8791
    
      service_a:
        build: service_a/
        network_mode: "service:service_a_envoy"
        #ports:
        #- 8081
        depends_on:
        - service_a_envoy
    
      service_b_envoy:
        image: envoyproxy/envoy-alpine:v1.21-latest
        environment:
          - ENVOY_UID=0
          - ENVOY_GID=0
        volumes:
        - "./service_b/envoy-config.yaml:/etc/envoy/envoy.yaml"
        networks:
          envoymesh:
            aliases:
            - service_b_envoy
            - service-b-envoy
        ports:
        - 8789
    
      service_b:
        build: service_b/
        network_mode: "service:service_b_envoy"
        #ports:
        #- 8082
        depends_on:
        - service_b_envoy
    
      service_c_envoy:
        image: envoyproxy/envoy-alpine:v1.21-latest
        environment:
          - ENVOY_UID=0
          - ENVOY_GID=0
        volumes:
        - "./service_c/envoy-config.yaml:/etc/envoy/envoy.yaml"
        networks:
          envoymesh:
            aliases:
            - service_c_envoy
            - service-c-envoy
        ports:
        - 8790
    
      service_c:
        build: service_c/
        network_mode: "service:service_c_envoy"
        #ports:
        #- 8083
        depends_on:
        - service_c_envoy
    
      es7:
        image: elasticsearch:7.17.3
        container_name: es7
        ports:
          - 9200:9200
          - 9300:9300
        environment:
          - discovery.type=single-node #单机模式
          - bootstrap.memory_lock=true #锁定物理内存地址
          - "ES_JAVA_OPTS=-Xms1048m -Xmx1048m" #堆内存大小
          - TZ=Asia/Shanghai
        ulimits:
          memlock:
            soft: -1
            hard: -1
        networks:
          envoymesh:
            ipv4_address: 172.31.85.15
    
      skywalking-oap:
        image: apache/skywalking-oap-server:8.6.0-es7
        container_name: skywalking-oap
        restart: always
        depends_on:
          - es7
        links:
          - es7
        ports:
          - 11800:11800
          - 12800:12800
        environment:
          TZ: Asia/Shanghai
          SW_STORAGE: elasticsearch7
          SW_STORAGE_ES_CLUSTER_NODES: es7:9200
        networks:
          envoymesh:
            ipv4_address: 172.31.85.16
    
      skywalking-ui:
        image: apache/skywalking-ui:8.6.0
        container_name: skywalking-ui
        restart: always
        depends_on:
          - skywalking-oap
        links:
          - skywalking-oap
        ports:
          - 8081:8080
        environment:
          TZ: Asia/Shanghai
          SW_OAP_ADDRESS: skywalking-oap:12800
        networks:
          envoymesh:
            ipv4_address: 172.31.85.17
    
    networks:
      envoymesh:
        driver: bridge
        ipam:
          config:
            - subnet: 172.31.85.0/24
    
    # envoy配置文件如下
    cat envoy-config.yaml
    node:
      id: front-envoy
      cluster: front-envoy
    
    admin:
      profile_path: /tmp/envoy.prof
      access_log_path: /tmp/admin_access.log
      address:
        socket_address:
           address: 0.0.0.0
           port_value: 9901
    
    layered_runtime:
      layers:
      - name: admin
        admin_layer: {}
    
    static_resources:
      listeners:
      - name: http_listener-service_a
        address:
          socket_address:
            address: 0.0.0.0
            port_value: 80
        traffic_direction: OUTBOUND
        filter_chains:
        - filters:
          - name: envoy.filters.network.http_connection_manager
            typed_config:
              "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
              generate_request_id: true
              tracing:
                provider:
                  name: envoy.tracers.skywalking
                  typed_config:
                    "@type": type.googleapis.com/envoy.config.trace.v3.SkyWalkingConfig
                    grpc_service:
                      envoy_grpc:
                        cluster_name: skywalking
                      timeout: 0.250s
                    client_config:
                      service_name: front-envoy
                      instance_name: front-envoy-1
              codec_type: AUTO
              stat_prefix: ingress_http
              route_config:
                name: local_route
                virtual_hosts:
                - name: backend
                  domains:
                  - "*"
                  routes:
                  - match:
                      prefix: "/"
                    route:
                      cluster: service_a
                    decorator:
                      operation: checkAvailability
                  response_headers_to_add:
                  - header:
                      key: "x-b3-traceid"
                      value: "%REQ(x-b3-traceid)%"
                  - header:
                      key: "x-request-id"
                      value: "%REQ(x-request-id)%"
              http_filters:
              - name: envoy.filters.http.router
    
      clusters:
      - name: skywalking
        type: STRICT_DNS
        lb_policy: ROUND_ROBIN
        typed_extension_protocol_options:
          envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
            "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions
            explicit_http_config:
              http2_protocol_options: {}
        load_assignment:
          cluster_name: skywalking
          endpoints:
          - lb_endpoints:
            - endpoint:
                address:
                  socket_address:
                    address: skywalking-oap
                    port_value: 11800
    
      - name: service_a
        connect_timeout: 0.25s
        type: strict_dns
        lb_policy: ROUND_ROBIN
        load_assignment:
          cluster_name: service_a
          endpoints:
          - lb_endpoints:
            - endpoint:
                address:
                  socket_address:
                    address: service_a_envoy
                    port_value: 8786
    # 启动
    docker-compose up
    
    #验证
    while true; do curl 172.31.85.10; sleep 0.$RANDOM; done
    

    参考配置:https://github.com/envoyproxy/envoy/blob/main/examples/skywalking-tracing/front-envoy-skywalking.yaml
    注:一定要找对skywalking和es对应的版本,在尝试apache/skywalking-oap-server:8.7.0-es7和apache/skywalking-ui:8.7.0,启动skywalking后,访问ui界面日志会报错,容器可以正常启动,就是界面不显示概览图;还需要注意skywalking暴露的端口和front-envoy暴露的端口,不要冲突;现在yaml中的版本,启动的时候日志会有些报错,但是不影响使用;全部启动完成后,执行访问命令产生日志和记录,skywalking-ui界面要对应好时区,然后点击刷新按钮,需要等会儿才会有数据显示

    skywalking验证


    image.png image.png image.png

    相关文章

      网友评论

          本文标题:Envoy过滤与监控示例

          本文链接:https://www.haomeiwen.com/subject/lngryrtx.html