提示:为便于建立测试环境,示例将以 docker-compose工具编排运行,相关的代码位于
[https://github.com/ikubernetes/servicemesh_in_practice.git]
1、envoy-echo
telnet ip 端口,输入什么信息,会显示什么信息
envoy.yaml
static_resources:
listeners: - name: listener_0
address:
socket_address:
address: 0.0.0.0 port_value: 8080 filter_chains: - filters: - name: envoy.filters.network.echo
Dockerfile
FROM envoyproxy/envoy-alpine:v1.20.0 ADD envoy.yaml /etc/envoy/
docker-compose.yaml
version: '3.3' services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.4.2 aliases: - envoy-echo networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.4.0/24
运行envoy-echo实验
root@test:/apps/servicemesh_in_practise/Envoy-Basics/envoy-echo# docker-compose up
telnet envoy的ip+端口
root@test:~# telnet 172.31.4.2 8080 Trying 172.31.4.2...
Connected to 172.31.4.2.
Escape character is '^]'.
#输入什么,会显示什么
root@test:~# telnet 172.31.4.2 8080 Trying 172.31.4.2...
Connected to 172.31.4.2.
Escape character is '^]'.
abc
abc
ni hao
ni hao
修改envoy配置文件在envoy容器中测试
root@test:/apps/servicemesh_in_practise/Envoy-Basics/envoy-echo# docker-compose down
编辑envoy-v2.yaml
admin:
access_log_path: /dev/null address:
socket_address:
address: 127.0.0.1 port_value: 0 static_resources:
clusters:
name: cluster_0
connect_timeout: 0.25s
load_assignment:
cluster_name: cluster_0
endpoints: - lb_endpoints: - endpoint:
address:
socket_address:
address: 127.0.0.1 port_value: 0 listeners: - name: listener_0
address:
socket_address:
address: 127.0.0.1 port_value: 8080 filter_chains: - filters: - name: envoy.filters.network.echo
docker-cpmpose.yaml
version: '3.3' services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 volumes: - ./envoy-v2.yaml:/etc/envoy/envoy.yaml #使用envoy-v2.yaml
networks:
envoymesh:
ipv4_address: 172.31.4.2 aliases: - envoy-echo networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.4.0/24
再次运行
root@test:/apps/servicemesh_in_practise/Envoy-Basics/envoy-echo# docker-compose up
进去容器
root@test:/apps/servicemesh_in_practise/Envoy-Basics/envoy-echo# docker-compose exec envoy sh
/ # nc 127.0.0.1 8080 abv
abv
ni hao world
ni hao world
#输入什么,就显示什么
2、http-ingress
实验环境
两个Service:
envoy:Sidecar Proxy
webserver01:第一个后端服务,地址为127.0.0.1
访问enovy的ip+端口,便可访问enovy后端的服务
envoy.yaml
static_resources:
listeners: - name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains: - filters: - name: envoy.filters.network.http_connection_manager
typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts: - name: web_service_1
domains: ["*"]
routes: - match: { prefix: "/" }
route: { cluster: local_cluster }
http_filters: - name: envoy.filters.http.router
clusters: - name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints: - lb_endpoints: - endpoint:
address:
socket_address: { address: 127.0.0.1, port_value: 8080 }
docker-compose.yaml
version: '3' services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 environment: - ENVOY_UID=0 #docker-compose up报error initializing configuration '/etc/envoy/envoy.yaml': cannot bind '0.0.0.0:80': Permission denied需要添加该环境变量
volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.3.2 aliases: - ingress
webserver01:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
- HOST=127.0.0.1 network_mode: "service:envoy" depends_on: - envoy
networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.3.0/24
访问172.31.3.2:80,可以被envoy转发到后端webserver01上
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/http-ingress# docker-compose up
#重新克隆一个窗口多访问几次
root@test:/apps/servicemesh_in_practise/Envoy-Basics/http-ingress# curl 172.31.3.2 iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: d4eda0b2b84c, ServerIP: 172.31.3.2! root@test:/apps/servicemesh_in_practise/Envoy-Basics/http-ingress# curl 172.31.3.2 iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: dc4bd7a1316f, ServerIP: 172.31.3.2! root@test:/apps/servicemesh_in_practise/Envoy-Basics/http-ingress# curl 172.31.3.2 iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: dc4bd7a1316f, ServerIP: 172.31.3.2! root@test:/apps/servicemesh_in_practise/Envoy-Basics/http-ingress# curl 172.31.3.2 iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.1, ServerName: dc4bd7a1316f, ServerIP: 172.31.3.2! #在前台运行的envoy程序上查看日志信息
......
webserver01_1 | * Running on http://127.0.0.1:8080/ (Press CTRL+C to quit)
webserver01_1 | 127.0.0.1 - - [01/Dec/2021 08:36:37] "GET / HTTP/1.1" 200 - webserver01_1 | 127.0.0.1 - - [01/Dec/2021 08:38:49] "GET / HTTP/1.1" 200 - webserver01_1 | 127.0.0.1 - - [01/Dec/2021 08:38:50] "GET / HTTP/1.1" 200 -
3、http-egress
实验环境
三个Service:
envoy:Front Proxy,地址为172.31.4.2 webserver01:第一个外部服务,地址为172.31.4.11 webserver02:第二个外部服务,地址为172.31.4.12
访问envoy的ip,可以转发到webserver01和webserver02上
envoy.yaml
static_resources:
listeners: - name: listener_0
address:
socket_address: { address: 127.0.0.1, port_value: 80 }
filter_chains: - filters: - name: envoy.filters.network.http_connection_manager
typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts: - name: web_service_1
domains: ["*"]
routes: - match: { prefix: "/" }
route: { cluster: web_cluster }
http_filters: - name: envoy.filters.http.router
clusters: - name: web_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: web_cluster
endpoints: - lb_endpoints: - endpoint:
address:
socket_address: { address: 172.31.4.11, port_value: 80 } - endpoint:
address:
socket_address: { address: 172.31.4.12, port_value: 80 }
docker-compose.yaml
version: '3.3' services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 environment: - ENVOY_UID=0 volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.4.2 aliases: - front-proxy
depends_on: - webserver01 - webserver02
client:
image: ikubernetes/admin-toolbox:v1.0 network_mode: "service:envoy" depends_on: - envoy
webserver01:
image: ikubernetes/demoapp:v1.0
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.4.11 aliases: - webserver01
webserver02:
image: ikubernetes/demoapp:v1.0
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.4.12 aliases: - webserver02
networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.4.0/24
实验验证
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/http-egress# docker-compose up
另外克隆一个窗口,进入容器
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/http-egress# docker-compose exec client sh [root@b8f9b62f2771 /]#
[root@b8f9b62f2771 /]# curl 127.0.0.1 iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver01, ServerIP: 172.31.4.11! [root@b8f9b62f2771 /]# curl 127.0.0.1 iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver02, ServerIP: 172.31.4.12! [root@b8f9b62f2771 /]# curl 127.0.0.1 iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver01, ServerIP: 172.31.4.11! [root@b8f9b62f2771 /]# curl 127.0.0.1 iKubernetes demoapp v1.0 !! ClientIP: 172.31.4.2, ServerName: webserver02, ServerIP: 172.31.4.12! #在容器访问127.0.0.1,envoy会把请求以轮询的方式转发到webserver01和webserver02上
4、http-front-proxy
实验环境
三个Service:
envoy:Front Proxy,地址为172.31.2.2 webserver01:第一个后端服务,地址为172.31.2.11 webserver02:第二个后端服务,地址为172.31.2.12 #把域名www.ik8s.io和www.magedu.com映射到172.31.2.2 #访问域名www.ik8s.io会轮询转发到webserver01和webserver02上
#访问域名www.magedu.com会跳转到www.ik8s.io,并轮询转发到webserver01和webserver02上
envoy.yaml
static_resources:
listeners: - name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains: - filters: - name: envoy.filters.network.http_connection_manager
typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts: - name: web_service_1
domains: ["*.ik8s.io", "ik8s.io"]
routes: - match: { prefix: "/" }
route: { cluster: local_cluster } - name: web_service_2
domains: ["*.magedu.com",“magedu.com"]
routes: - match: { prefix: "/" }
redirect:
host_redirect: "www.ik8s.io" http_filters: - name: envoy.filters.http.router
clusters: - name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints: - lb_endpoints: - endpoint:
address:
socket_address: { address: 172.31.2.11, port_value: 8080 } - endpoint:
address:
socket_address: { address: 172.31.2.12, port_value: 8080 }
docker-compose.yaml
version: '3.3' services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 environment: - ENVOY_UID=0 volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.2.2 aliases: - front-proxy
depends_on: - webserver01 - webserver02
webserver01:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.2.11 aliases: - webserver01
webserver02:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.2.12 aliases: - webserver02
networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.2.0/24
实验验证
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/http-front-proxy# docker-compose up
另外克隆一个窗口
#访问域名www.ik8s.io,envoy会以轮询的方式转发到webserver01和webserver02上
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/http-front-proxy# curl -H "host: www.ik8s.io" 172.31.2.2 iKubernetes demoapp v1.0 !! ClientIP: 172.31.2.2, ServerName: webserver01, ServerIP: 172.31.2.11! root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/http-front-proxy# curl -H "host: www.ik8s.io" 172.31.2.2 iKubernetes demoapp v1.0 !! ClientIP: 172.31.2.2, ServerName: webserver02, ServerIP: 172.31.2.12! #访问域名www.magedu.com会跳转到www.ik8s.io上
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/http-front-proxy# curl -I -H "host: www.magedu.com" 172.31.2.2 HTTP/1.1 301 Moved Permanently
location: http://www.ik8s.io/ #跳转到了www.ik8s.io上
date: Wed, 01 Dec 2021 14:07:55 GMT
server: envoy
transfer-encoding: chunked
5、tcp-front-proxy
实验环境
三个Service:
envoy:Front Proxy,地址为172.31.1.2 webserver01:第一个后端服务,地址为172.31.1.11 webserver02:第二个后端服务,地址为172.31.1.12 #访问envoy的ip:172.31.1.2,会以轮询的方式转发到webserver01和webserver02上
envoy.yaml
static_resources:
listeners:
name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains: - filters: - name: envoy.tcp_proxy
typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy
stat_prefix: tcp
cluster: local_cluster
clusters: - name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints: - lb_endpoints: - endpoint:
address:
socket_address: { address: 172.31.1.11, port_value: 8080 } - endpoint:
address:
socket_address: { address: 172.31.1.12, port_value: 8080 }
docker-compose.yaml
version: '3.3' services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 environment: - ENVOY_UID=0 volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.1.2 aliases: - front-proxy
depends_on: - webserver01 - webserver02
webserver01:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.1.11 aliases: - webserver01
webserver02:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.1.12 aliases: - webserver02
networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.1.0/24
实验验证
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/tcp-front-proxy# docker-compose up
另外克隆一个窗口,访问envoy的ip:17231.1.2会已轮询的方式转发到webserver01和webserver02上
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/tcp-front-proxy# curl 172.31.1.2 iKubernetes demoapp v1.0 !! ClientIP: 172.31.1.2, ServerName: webserver01, ServerIP: 172.31.1.11! root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/tcp-front-proxy# curl 172.31.1.2 iKubernetes demoapp v1.0 !! ClientIP: 172.31.1.2, ServerName: webserver02, ServerIP: 172.31.1.12!
6、admin-interface
实验环境
三个Service:
envoy:Front Proxy,地址为172.31.5.2 webserver01:第一个后端服务,地址为172.31.5.11 webserver02:第二个后端服务,地址为172.31.5.12 #访问envoy的9901端口可以个获取相应的信息
envoy.yaml
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0 #在生产环境配置127.0.0.1;否则不安全
port_value: 9901 static_resources:
listeners: - name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains: - filters: - name: envoy.filters.network.http_connection_manager
typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts: - name: web_service_1
domains: ["*.ik8s.io", "ik8s.io"]
routes: - match: { prefix: "/" }
route: { cluster: local_cluster } - name: web_service_2
domains: ["*.magedu.com",“magedu.com"]
routes: - match: { prefix: "/" }
redirect:
host_redirect: "www.ik8s.io" http_filters: - name: envoy.filters.http.router
clusters: - name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints: - lb_endpoints: - endpoint:
address:
socket_address: { address: 172.31.5.11, port_value: 8080 } - endpoint:
address:
socket_address: { address: 172.31.5.12, port_value: 8080 }
docker-compose.yaml
services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 environment: - ENVOY_UID=0 volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.5.2 aliases: - front-proxy
depends_on: - webserver01 - webserver02
webserver01:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.5.11 aliases: - webserver01
webserver02:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.5.12 aliases: - webserver02
networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.5.0/24
实验验证
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/admin-interface# docker-compose up
另外克隆一个窗口访问172.31.5.1:9901
#显示帮助信息
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/admin-interface# curl 172.31.5.2:9901/help
admin commands are: /: Admin home page /certs: print certs on machine /clusters: upstream cluster status /config_dump: dump current Envoy configs (experimental) /contention: dump current Envoy mutex contention stats (if enabled) /cpuprofiler: enable/disable the CPU profiler /drain_listeners: drain listeners /healthcheck/fail: cause the server to fail health checks /healthcheck/ok: cause the server to pass health checks /heapprofiler: enable/disable the heap profiler /help: print out list of admin commands /hot_restart_version: print the hot restart compatibility version /init_dump: dump current Envoy init manager information (experimental) /listeners: print listener info
/logging: query/change logging levels /memory: print current allocation/heap usage /quitquitquit: exit the server /ready: print server state, return 200 if LIVE, otherwise return 503
/reopen_logs: reopen access logs /reset_counters: reset all counters to zero /runtime: print runtime values /runtime_modify: modify runtime values /server_info: print server version/status information /stats: print server stats /stats/prometheus: print server stats in prometheus format /stats/recentlookups: Show recent stat-name lookups /stats/recentlookups/clear: clear list of stat-name lookups and counter /stats/recentlookups/disable: disable recording of reset stat-name lookup names /stats/recentlookups/enable: enable recording of reset stat-name lookup names
# 查看完成的配置信息
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/admin-interface# curl 172.31.5.2:9901/config_dump
......
}
]
},
{ "name": "web_service_2", "domains": [ "*.magedu.com", "“magedu.com\"" ], "routes": [
{ "match": { "prefix": "/" }, "redirect": { "host_redirect": "www.ik8s.io" }
}
]
}
]
}, "last_updated": "2021-12-01T14:26:32.586Z" ......
#列出各Listener
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/admin-interface# curl 172.31.5.2:9901/listeners
listener_0::0.0.0.0:80 #列出各cluster
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/admin-interface# curl 172.31.5.2:9901/clusters
local_cluster::observability_name::local_cluster
local_cluster::default_priority::max_connections::1024 local_cluster::default_priority::max_pending_requests::1024 local_cluster::default_priority::max_requests::1024 local_cluster::default_priority::max_retries::3 local_cluster::high_priority::max_connections::1024 local_cluster::high_priority::max_pending_requests::1024 local_cluster::high_priority::max_requests::1024 local_cluster::high_priority::max_retries::3 local_cluster::added_via_api::false local_cluster::172.31.5.11:8080::cx_active::0 local_cluster::172.31.5.11:8080::cx_connect_fail::0 local_cluster::172.31.5.11:8080::cx_total::0 local_cluster::172.31.5.11:8080::rq_active::0 local_cluster::172.31.5.11:8080::rq_error::0 local_cluster::172.31.5.11:8080::rq_success::0 local_cluster::172.31.5.11:8080::rq_timeout::0 local_cluster::172.31.5.11:8080::rq_total::0 local_cluster::172.31.5.11:8080::hostname::
local_cluster::172.31.5.11:8080::health_flags::healthy
local_cluster::172.31.5.11:8080::weight::1 local_cluster::172.31.5.11:8080::region::
local_cluster::172.31.5.11:8080::zone::
local_cluster::172.31.5.11:8080::sub_zone::
local_cluster::172.31.5.11:8080::canary::false local_cluster::172.31.5.11:8080::priority::0 local_cluster::172.31.5.11:8080::success_rate::-1.0 local_cluster::172.31.5.11:8080::local_origin_success_rate::-1.0 local_cluster::172.31.5.12:8080::cx_active::0 local_cluster::172.31.5.12:8080::cx_connect_fail::0 local_cluster::172.31.5.12:8080::cx_total::0 local_cluster::172.31.5.12:8080::rq_active::0 local_cluster::172.31.5.12:8080::rq_error::0 local_cluster::172.31.5.12:8080::rq_success::0 local_cluster::172.31.5.12:8080::rq_timeout::0 local_cluster::172.31.5.12:8080::rq_total::0 local_cluster::172.31.5.12:8080::hostname::
local_cluster::172.31.5.12:8080::health_flags::healthy
local_cluster::172.31.5.12:8080::weight::1 local_cluster::172.31.5.12:8080::region::
local_cluster::172.31.5.12:8080::zone::
local_cluster::172.31.5.12:8080::sub_zone::
local_cluster::172.31.5.12:8080::canary::false local_cluster::172.31.5.12:8080::priority::0 local_cluster::172.31.5.12:8080::success_rate::-1.0 local_cluster::172.31.5.12:8080::local_origin_success_rate::-1.0
7、layered-runtime
实验环境
三个Service:
envoy:Front Proxy,地址为172.31.14.2 webserver01:第一个后端服务,地址为172.31.14.11 webserver02:第二个后端服务,地址为172.31.14.12
envoy.yaml
admin:
profile_path: /tmp/envoy.prof
access_log_path: /tmp/admin_access.log
address:
socket_address:
address: 0.0.0.0 port_value: 9901 layered_runtime:
layers: - name: static_layer_0
static_layer:
health_check:
min_interval: 5
- name: admin_layer_0
admin_layer: {}
static_resources:
listeners: - name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains: - filters: - name: envoy.filters.network.http_connection_manager
typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts: - name: web_service_1
domains: ["*.ik8s.io", "ik8s.io"]
routes: - match: { prefix: "/" }
route: { cluster: local_cluster } - name: web_service_2
domains: ["*.magedu.com",“magedu.com"]
routes: - match: { prefix: "/" }
redirect:
host_redirect: "www.ik8s.io" http_filters: - name: envoy.filters.http.router
clusters: - name: local_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_cluster
endpoints: - lb_endpoints: - endpoint:
address:
socket_address: { address: 172.31.14.11, port_value: 8080 } - endpoint:
address:
socket_address: { address: 172.31.14.12, port_value: 8080 }
docker-compose.yaml
version: '3.3' services:
envoy:
image: envoyproxy/envoy-alpine:v1.20.0 environment: - ENVOY_UID=0 volumes: - ./envoy.yaml:/etc/envoy/envoy.yaml
networks:
envoymesh:
ipv4_address: 172.31.14.2 aliases: - front-proxy
depends_on: - webserver01 - webserver02
webserver01:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver01
networks:
envoymesh:
ipv4_address: 172.31.14.11 aliases: - webserver01
webserver02:
image: ikubernetes/demoapp:v1.0 environment: - PORT=8080
hostname: webserver02
networks:
envoymesh:
ipv4_address: 172.31.14.12 aliases: - webserver02
networks:
envoymesh:
driver: bridge
ipam:
config: - subnet: 172.31.14.0/24
实验验证
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/layered-runtime# docker-compose up
另外克隆一个窗口
root@test:/apps/servicemesh_in_practise-develop/Envoy-Basics/layered-runtime# curl 172.31.14.2:9901/runtime
{ "entries": { "health_check.min_interval": { "final_value": "5", "layer_values": [ "5", "" ]
}
}, "layers": [ "static_layer_0", "admin_layer_0" ]
}
网友评论