1.背景
在实际应用中一组pod中统一时间内只有一个active节点,其他的节点为standby状态。即需要进行选主操作。来对于外暴露主节点的的IP。
我们知道,非容器化的的一些节点的选主需要借助于其他组件比如zookeeper,consul,etcd等,那容器的应用在集群内部没有可用的组件(除非单独在创建),本次所介绍的方法,本质上是通过kube-apiserver实现,其实变相的是借助集群的存储(etcd),为了减少对应用本省的改造,降低对应用本省的侵入性,采用sidecars模式(在lstio等微服务中使用的很常见)。
在正常情况kubernetes中的deployment通过service暴露节点是,会吧endpointsz下的所有节点地址都暴露。如下:
[root@host229 yaml]# kubectl get ep,svc
NAME ENDPOINTS AGE
endpoints/tomcat 192.168.0.2:8080,192.168.1.2:8080,192.168.2.3:8080 19d
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/tomcat ClusterIP 10.254.209.95 <none> 80/TCP 19d
[root@host229 yaml]# kubectl describe service/tomcat
Name: tomcat
Namespace: default
Labels: run=tomcat
Annotations: <none>
Selector: run=tomcat
Type: ClusterIP
IP: 10.254.209.95
Port: <unset> 80/TCP
TargetPort: 8080/TCP
Endpoints: 192.168.0.2:8080,192.168.1.2:8080,192.168.2.3:8080
Session Affinity: None
Events: <none>
其实本次用的组件leader-elector,本质上是通过与kube-apiserver直接交互,然后在程序内部控制endpoint的生成,使选出的主节点的地址写在endpoint。
交互图如下:
leader-elector.png
2.实战
编写yaml文件
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: leader-elector
spec:
replicas: 3
template:
metadata:
labels:
run: leader-elector
spec:
serviceAccountName: leader-elector-sa
serviceAccount: leader-elector-sa
containers:
- image: tomcat
name: tomcat
- image: gcr.io/google_containers/leader-elector:0.4
name: leader-elector
args:
- --election=example //endpoint的名称
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: leader-elector-sa
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: leader-elector-cr
namespace: default
rules:
- apiGroups:
- ""
//需要用到的资源对象
resources:
- namespaces
- endpoints
//需要用到的动作权限
verbs:
- list
- get
- watch
- create
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: leader-elector-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: leader-elector-cr
subjects:
- kind: ServiceAccount
name: leader-elector-sa
namespace: default
[root@host229 yaml]# kubectl apply -f leader-elector.yaml
deployment.extensions/leader-elector created
serviceaccount/leader-elector-sa created
clusterrole.rbac.authorization.k8s.io/leader-elector-cr created
clusterrolebinding.rbac.authorization.k8s.io/leader-elector-crb created
[root@host229 yaml]# kubectl get pod
leader-elector-75fdcf4f46-2bdq6 2/2 Running 0 3m 192.168.2.16 host227
leader-elector-75fdcf4f46-4l84s 2/2 Running 0 3m 192.168.1.11 host228
leader-elector-75fdcf4f46-kmr9w 2/2 Running 0 3m 192.168.3.14 host214
//查看主节点
[root@host229 yaml]# kubectl logs deployment.apps/leader-elector leader-elector
Found 3 pods, using pod/leader-elector-75fdcf4f46-kmr9w
is the leader
leader-elector-75fdcf4f46-kmr9w is the leader
I1127 02:32:37.867264 9 leaderelection.go:180] sucessfully acquired lease default/example
[root@host229 yaml]# kubectl get ep
NAME ENDPOINTS AGE
example 192.168.3.14 4m
网友评论