一、Envoy xDS示例
代码克隆参照Envoy示例博文
- eds-filesystem
cd servicemesh_in_practise/Dynamic-Configuration/eds-filesystem
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
# 验证
curl 172.31.11.2
image.png注:该示例的docker-compose中有三个envoy镜像位置,需要添加三次;而且envoy监听的listener端口和端点发现端口,可以一样,不会冲突
修改配置文件,模拟添加节点,让eds动态发现
docker exec -it edsfilesystem_envoy_1 sh
cd /etc/envoy/eds.conf.d
cat eds.yaml.v2 > eds.yaml
mv eds.yaml bak && mv bak eds.yaml # 此步骤是在容器的时候需要运行,在宿主机不需要运行,是为了强制改变文件,以便基于inode监视的工作机制可被触发,让envoy动态发现新增节点
curl 172.31.11.2:9901/clusters # 可以通过这个命令查看新增接节点,mv命令执行前,虽然已经新增接节点,但是envoy并没有发现,mv命令执行后,可以查看到
# 验证
curl 172.31.11.2
image.png
- lds-cds-filesystem
cd servicemesh_in_practise/Dynamic-Configuration/lds-cds-filesystem
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
...
- webserver01-sidecar
- webserver # 添加别名,后续修改cds.yaml配置文件中的域名验证
...
- webserver02-sidecar
- webserver # 添加别名,后续修改cds.yaml配置文件中的域名验证
# 启动
docker-compose up
# 验证
curl 172.31.12.2
image.png环境变量添加三次
修改配置文件,监听端口验证
docker exec -it ldscdsfilesystem_envoy_1 sh
cd /etc/envoy/conf.d
# 修改listener端口,从80改为10080
vi lds.yaml
resources:
- "@type": type.googleapis.com/envoy.config.listener.v3.Listener
name: listener_http
address:
socket_address: { address: 0.0.0.0, port_value: 10080 }
mv lds.yaml bak && mv bak lds.yaml
curl 172.31.12.2:9901/listeners # 查看监听端口
# 验证
curl 172.31.12.2:10080
image.png
修改配置文件,删除节点验证
vi cds.yaml
resources:
- "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster
name: webcluster
connect_timeout: 1s
type: STRICT_DNS
load_assignment:
cluster_name: webcluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: webserver01
port_value: 80
mv cds.yaml bak && mv bak cds.yaml
curl 172.31.12.2:9901/clusters # 验证集群节点信息
# 验证
curl 172.31.12.2:10080
image.png
修改域名解析验证
vi cds.yaml
mv cds.yaml bak && mv bak cds.yaml
curl 172.31.12.2:9901/clusters # 验证集群节点信息
# 验证
curl 172.31.12.2:10080
image.png
- lds-cds-grpc
cd servicemesh_in_practise/Dynamic-Configuration/lds-cds-grpc
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
curl 172.31.15.2:9901/clusters # 查看集群信息
curl 172.31.15.2:9901/listeners # 查看监听端口信息
# 验证
curl 172.31.15.2
image.png
修改配置文件,监听端口和添加节点验证
docker exec -it ldscdsgrpc_xdsserver_1 sh
cd /etc/envoy-xds-server/config
cat config.yaml-v2 > config.yaml # 由于配置文件实时生效,最好不要直接编辑config.yaml文件,vi的自动保存功能会让xDS server下发配置
curl 172.31.15.2:9901/clusters # 查看集群信息
curl 172.31.15.2:9901/listeners # 查看监听端口信息
yum install jq -y # 安装jq命令
# 根据config_dump接口查出来的信息,用jq命令过滤出根据配置信息动态发现的集群
curl -s 172.31.15.2:9901/config_dump | jq '.configs[1].dynamic_active_clusters'
[
{
"version_info": "411",
"cluster": {
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "webcluster", # 对应集群的名字
"type": "EDS",
"eds_cluster_config": {
"eds_config": {
"api_config_source": {
"api_type": "GRPC", # api协议
"grpc_services": [
{
"envoy_grpc": {
"cluster_name": "xds_cluster" # 发现webcluster对应的上游动态服务(xDS server)集群名字
}
}
],
"set_node_on_first_message_only": true,
"transport_api_version": "V3" # api版本
},
"resource_api_version": "V3"
}
},
"connect_timeout": "5s",
"dns_lookup_family": "V4_ONLY"
},
"last_updated": "20xx-04-25Txx:13:45.024Z"
}
]
# 查看listener详细信息
curl -s 172.31.15.2:9901/config_dump?resource=dynamic_listeners | jq '.configs[0].active_state.listener'
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "listener_http",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 10080
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "http",
"rds": {
"config_source": {
"api_config_source": {
"api_type": "GRPC",
"grpc_services": [
{
"envoy_grpc": {
"cluster_name": "xds_cluster"
}
}
],
"set_node_on_first_message_only": true,
"transport_api_version": "V3"
},
"resource_api_version": "V3"
},
"route_config_name": "local_route"
},
"http_filters": [
{
"name": "envoy.filters.http.router"
}
]
}
}
]
}
]
}
# 验证
curl 172.31.15.2:10080
image.png
修改配置文件,listener名字验证
vi config.yaml
name: myconfig
spec:
listeners:
- name: listener_http1 # 修改名字
address: 0.0.0.0
port: 10081 # 修改端口号
curl 172.31.15.2:9901/listeners # 查看监听端口信息
# 验证
curl 172.31.15.2:10080
curl 172.31.15.2:10081
修改listener名字会生成一个新的listener,并且原来的listener也存在且可以访问
image.png
- 健康检测(health-check)
cd servicemesh_in_practise/Cluster-Manager/health-check
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
curl 172.31.18.2:9901/clusters # 查看集群信息
curl 172.31.18.2:9901/listeners # 查看监听端口信息
# 验证
curl 172.31.18.2
image.png
修改livez检测值为FAIL验证
curl -XPOST -d "livez=FAIL" 172.31.18.11/livez # 修改web服务sidercar的livez值
curl -I 172.31.18.11/livez # 修改后验证响应码为506
HTTP/1.1 506 Variant Also Negotiates
content-type: text/html; charset=utf-8
content-length: 4
server: envoy
date: Tue, 26 Apr 20xx xx:54:29 GMT
x-envoy-upstream-service-time: 1
# 验证
curl 172.31.18.2
image.png
修改livez检测值为OK验证
curl -XPOST -d "livez=OK" 172.31.18.11/livez
curl -I 172.31.18.11/livez
HTTP/1.1 200 OK
content-type: text/html; charset=utf-8
content-length: 2
server: envoy
date: Tue, 26 Apr 20xx xx:57:38 GMT
x-envoy-upstream-service-time: 1
# 验证
curl 172.31.18.2
image.png
- 异常检测(outlier-detection)
cd servicemesh_in_practise/Cluster-Manager/outlier-detection
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
# 验证
curl 172.31.20.2
image.png
模拟故障,修改livez的值为FAIL验证
curl -XPOST -d 'livez=FAIL' 172.31.20.12/livez
curl -I 172.31.20.12/livez
# 验证
while true; do curl 172.31.20.2/livez; sleep 0.5; done
image.png
恢复故障验证
curl -XPOST -d 'livez=OK' 172.31.20.12/livez
curl -I 172.31.20.12/livez
# 验证
while true; do curl 172.31.20.2/livez; sleep 0.5; done
image.png
- least-requests
cd servicemesh_in_practise/Cluster-Manager/least-requests
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
# 验证
./send-request.sh 172.31.22.2 # 使用脚本发送测试
Send 300 requests, and print the result. This will take a while.
Weight of all endpoints:
Red:Blue:Green = 1:3:5
Response from:
Red:Blue:Green = 35:92:173
- ring-hash
cd servicemesh_in_practise/Cluster-Manager/ring-hash
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
# 验证
while true; do index=$[$RANDOM%3]; curl -H "User-Agent: Browser_${index}" 172.31.25.2/user-agent && curl -H "User-Agent: Browser_${index}" 172.31.25.2/hostname && echo; sleep .1; done # 使用循环测试
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_2
ServerName: red
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_2
ServerName: red
User-Agent: Browser_0
ServerName: green
User-Agent: Browser_2
ServerName: red
User-Agent: Browser_1
ServerName: blue
相同的浏览器id,请求落在相同的主机
- priority-levels
cd servicemesh_in_practise/Cluster-Manager/priority-levels
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
# 验证
while true; do curl 172.31.29.2; sleep .5;done # 正常五个节点,只发给配置高优先级的三个节点
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.29.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.29.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.29.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.29.12!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: red, ServerIP: 172.31.29.11!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: green, ServerIP: 172.31.29.13!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: blue, ServerIP: 172.31.29.12!
模拟故障验证
curl -XPOST -d 'livez=FAIL' 172.31.29.13/livez
while true; do curl 172.31.29.2; sleep .5;done # 按照默认超配因子1.4计算,有大约6.8%的流量会调度到低优先级的两个节点上
v3版本的优先级调度暂时没调通
image.png
- lb-subsets
环境介绍
endpoint | stage | version | type | xlarge |
---|---|---|---|---|
e1 | prod | 1.0 | std | true |
e2 | prod | 1.0 | std | |
e3 | prod | 1.1 | std | |
e4 | prod | 1.1 | std | |
e5 | prod | 1.0 | bigmem | |
e6 | prod | 1.1 | bigmem | |
e7 | dev | 1.2-pre | std |
标签分类
keys: [stage,type],subnets如下
[prod,std] - e1,e2,e3,e4
[prod,bigmem] - e5,e6
[dev,std] - e7
[dev,bigmem] - 没有
keys: [stage,version]
[prod,1.0] - e1,e2,e5
[prod,1.1] - e3,e4,e6
[prod,1.2-pre] - 没有
[dev,1.0] - 没有
[dev,1.1] - 没有
[dev,1.2-pre] - e7
keys:[version]
[1.0] - e1,e2,e5
[1.1] - e3,e4,e6
[1.2-pre] - e7
keys:[xlarge,version]
[true,1.0] - e1
[true,1.1] - 没有
[true,1.2-pre] - 没有
共生成如上10种子集(subnet),还有一个默认的自己
stage=prod,type=std,version=1.0 - e1,e2
cd servicemesh_in_practise/Cluster-Manager/lb-subsets
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 替换routes部分为如下内容
routes:
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: pre-release
- name: x-environment-state
exact_match: dev
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.2-pre"
stage: "dev"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.0
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.1
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.1"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-hardware-test
exact_match: memory
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
type: "bigmem"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-hardware-test
exact_match: std
- name: x-environment-state
exact_match: prod
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
type: "std"
stage: "prod"
- match:
prefix: "/"
headers:
- name: x-hardware-test
exact_match: std
- name: x-environment-state
exact_match: dev
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
type: "std"
stage: "dev"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.0
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: v1.1
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.1"
- match:
prefix: "/"
headers:
- name: x-custom-version
exact_match: pre-release
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.2-pre"
- match:
prefix: "/"
headers:
- name: x-custom-xlarge
exact_match: isTrue
- name: x-custom-version
exact_match: pre-release
route:
cluster: webcluster1
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
xlarge: "true"
- match:
prefix: "/"
route:
weighted_clusters:
clusters:
- name: webcluster1
weight: 90
metadata_match:
filter_metadata:
envoy.lb:
version: "1.0"
- name: webcluster1
weight: 10
metadata_match:
filter_metadata:
envoy.lb:
version: "1.1"
metadata_match:
filter_metadata:
envoy.lb:
stage: "prod"
http_filters:
- name: envoy.filters.http.router
# 启动
docker-compose up
# 验证
./test.sh 172.31.33.2 # 发送200次,根据组合比例,大概是9:1
Requests: v1.0:v1.1 = 184:16
curl -H "x-custom-version: v1.0" -H "x-environment-state: prod" 172.31.33.2/hostname # 调用1.0,prod子集
ServerName: e1
ServerName: e2
ServerName: e5
curl -H "x-custom-version: v1.1" -H "x-environment-state: prod" 172.31.33.2/hostname # 调用1.1,prod子集
curl -H "x-custom-version: pre-release" -H "x-environment-state: dev" 172.31.33.2/hostname # 调用1.2-pre,dev子集
ServerName: e7
curl -H "x-environment-state: prod" -H "x-hardware-test: memory" 172.31.33.2/hostname # 调用prod,bigmem子集
ServerName: e5
ServerName: e6
curl -H "x-environment-state: prod" -H "x-hardware-test: std" 172.31.33.2/hostname # 调用prod,std子集
ServerName: e1
ServerName: e2
ServerName: e3
ServerName: e4
curl -H "x-environment-state: dev" -H "x-hardware-test: std" 172.31.33.2/hostname # 调用dev,std子集
ServerName: e7
curl -H "x-custom-version: v1.0" 172.31.33.2/hostname # 调用1.0子集
ServerName: e1
ServerName: e2
ServerName: e5
curl -H "x-custom-version: v1.1" 172.31.33.2/hostname # 调用1.1子集
ServerName: e3
ServerName: e4
ServerName: e6
curl -H "x-custom-version: pre-release" 172.31.33.2/hostname # 调用1.2-pre子集
ServerName: e7
curl -H "x-custom-version: pre-release" -H "x-custom-xlarge: isTrue" 172.31.33.2/hostname # 调用true,1.0子集
ServerName: e1
curl 172.31.33.2/hostname # 调用默认子集
ServerName: e1
ServerName: e2
- circuit-breaker
cd servicemesh_in_practise/Cluster-Manager/circuit-breaker
# 修改docker-compose配置文件
vim docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.21-latest
environment: # 添加环境变量
- ENVOY_UID=0 # 添加环境变量
# 启动
docker-compose up
# 验证
./send-requests.sh http://172.31.35.2 300 # 对集群一发送300次请求,超过最大连接数,会有报错503的响应码出现
./send-requests.sh http://172.31.35.2/livez 300 # 对集群二发送300次请求,超过最大连接数,会有报错503的响应码出现
image.png
image.png
模拟故障验证
curl -XPOST -d 'livez=FAIL' 172.31.35.14/livez
./send-requests.sh http://172.31.35.2/livez 300 # 此时发送请求,会触发outlier_detection设置,看日志会发现,在请求三次报错506之后,会将故障主机踢出集群,所有请求会有更多的503出现
image.png
网友评论