1. 准备
1.1 需求
需要 go 1.19以上
1.2 获取配置文件
将k8s集群master的 ~/.kube/config文件拷贝到代码适合位置
本文将
~/.kube/config
文件内容拷贝至代码:conf/kube.conf
文件中。
2. 几个包的说明
-
k8s.io/api/apps/v1
:对应K8SapiVersion: apps/v1
接口操作的对象
如Deployment、DaemonSet、StatefulSet等
-
k8s.io/api/core/v1
:对应K8SapiVersion: v1
接口操作的对象
如:ConfigMap、Service、NameSpace等
-
k8s.io/apimachinery/pkg/apis/meta/v1
:对对象的实际操作,如增删改查等。 -
k8s.io/client-go/kubernetes
用于链接k8s集群
3. 链接k8s集群
package main
import (
"fmt"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
func ConnectK8s()(clientSet *kubernetes.Clientset,err error) {
configPath := "conf/kube.conf"
config, err := clientcmd.BuildConfigFromFlags("", configPath)
if err != nil {
return nil, err
}
clientSet, err = kubernetes.NewForConfig(config)
if err != nil {
return nil,err
}
return clientSet,nil
}
4. Node
4.1 node相关结构体
4.1.1 NodeList
所在包:"k8s.io/api/core/v1"
type NodeList struct {
v1.TypeMeta `json:",inline"`
v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
其中Items
中各Node结构体如下:
4.1.2 Node
所在包:"k8s.io/api/core/v1"
type Node struct {
v1.TypeMeta `json:",inline"`
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
其中各成员详细说明如下:
4.1.3 TypeMeta
所在包 "k8s.io/apimachinery/pkg/apis/meta/v1"
type TypeMeta struct {
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
}
其对应yml文件的
apiVersion: v1
kind: Node
4.1.4 ObjectMeta
所在包:"k8s.io/apimachinery/pkg/apis/meta/v1"
type ObjectMeta struct {
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"`
}
对应yml文件中的
metadata:
……
4.1.5 NodeSpec
所在包:"k8s.io/api/core/v1"
type NodeSpec struct {
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"`
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
}
对应yml文件的
spec:
……
4.1.6 NodeStatus
所在包:"k8s.io/api/core/v1"
type NodeStatus struct {
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
}
对应yml文件的
status:
……
4.1.7 对照yml文件示例
附原生k8s集群上一个node节点信息,大家可以对照理解一下以上结构体
apiVersion: v1
kind: Node
metadata:
annotations:
flannel.alpha.coreos.com/backend-data: '{"VtepMAC":"3e:31:18:5c:89:6a"}'
flannel.alpha.coreos.com/backend-type: vxlan
flannel.alpha.coreos.com/kube-subnet-manager: "true"
flannel.alpha.coreos.com/public-ip: 10.10.239.204
kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: "0"
volumes.kubernetes.io/controller-managed-attach-detach: "true"
creationTimestamp: "2022-07-18T09:25:53Z"
labels:
beta.kubernetes.io/arch: amd64
beta.kubernetes.io/os: linux
kubernetes.io/arch: amd64
kubernetes.io/hostname: crust-n01
kubernetes.io/os: linux
node_type: cpu
name: crust-n01
resourceVersion: "23109809"
selfLink: /api/v1/nodes/crust-n01
uid: 0a83df46-9900-4b22-b12a-8e6266b5a795
spec:
podCIDR: 10.244.5.0/24
podCIDRs:
- 10.244.5.0/24
status:
addresses:
- address: 10.10.xxx.204
type: InternalIP
- address: crust-n01
type: Hostname
…………
4.2 Get Node List
语法
- 语法
func (NodeInterface) List(ctx context.Context, opts v1.ListOptions) (*v1.NodeList, error)
- 语法示例
nodeList,err := clientSet.CoreV1().Nodes().List(context.TODO(), metaV1.ListOptions{})
完整示例
- 定义函数
package crowK8S
import (
"context"
coreV1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func GetNodeList(clientSet *kubernetes.Clientset)(nodeList *coreV1.NodeList,err error) {
nodeList,err = clientSet.CoreV1().Nodes().List(context.TODO(), metaV1.ListOptions{})
if err != nil {
return nodeList,err
}
return nodeList,nil
}
- 调用示例
package main
import (
"fmt"
"go-k8s/crowK8S"
)
func main() {
clientSet,err := crowK8S.ConnectK8s()
if err !=nil {
fmt.Println(err)
}
nodeList,err := crowK8S.GetNodeList(clientSet)
for err != nil{
fmt.Println(err)
}
//fmt.Printf("%+v",nodeList)
for _,nodeInfo := range nodeList.Items{
fmt.Printf("node 的名字为:%s\n",nodeInfo.Name)
}
}
- 结果显示
API server listening at: 127.0.0.1:52897
node 的名字为:crust-m01
node 的名字为:crust-m02
node 的名字为:crust-m03
node 的名字为:crust-n01
node 的名字为:crust-n02
node 的名字为:crust-n03
node 的名字为:sa2
4.3 Get Node
语法
- 语法
func (NodeInterface) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1.Node, error)
- 语法示例
nodeInfo,err = clientSet.CoreV1().Nodes().Get(context.TODO(),nodeName, metaV1.GetOptions{})
完整示例
- 定义函数
package crowK8S
import (
"context"
coreV1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func GetNode(clientSet *kubernetes.Clientset,nodeName string)(nodeInfo *coreV1.Node,err error) {
nodeInfo,err = clientSet.CoreV1().Nodes().Get(context.TODO(),nodeName, metaV1.GetOptions{})
if err != nil {
return nodeInfo,err
}
return nodeInfo,nil
}
- 调用示例
package main
import (
"fmt"
"go-k8s/crowK8S"
)
func main() {
clientSet,err := crowK8S.ConnectK8s()
if err !=nil {
fmt.Println(err)
}
nodeInfo,err := crowK8S.GetNode(clientSet,"crust-n02")
if err !=nil {
fmt.Println(err)
}
fmt.Printf("%+v",nodeInfo.ObjectMeta)
}
- 结果显示
以下结果是我格式化了的,实际是一个json字串。
{
Name: crust - n02 GenerateName: Namespace: SelfLink: /api/v
1 / nodes / crust - n02 UID: 491 b0e8b - b70e - 46 a0 - bc81 - 78080 d806030 ResourceVersion: 18804706 Generation: 0 CreationTimestamp: 2022 - 07 - 12 16: 58: 15 + 0800 CST DeletionTimestamp: < nil > DeletionGracePeriodSeconds: < nil > Labels: map[beta.kubernetes.io / arch: amd64 beta.kubernetes.io / os: linux kubernetes.io / arch: amd64 kubernetes.io / hostname: crust - n02 kubernetes.io / os: linux] Annotations: map[flannel.alpha.coreos.com / backend - data: {
"VtepMAC": "fa:8a:d8:b0:e1:76"
}
flannel.alpha.coreos.com / backend - type: vxlan flannel.alpha.coreos.com / kube - subnet - manager: true flannel.alpha.coreos.com / public - ip: 10.10 .239 .205 kubeadm.alpha.kubernetes.io / cri - socket: /var/run / dockershim.sock node.alpha.kubernetes.io / ttl: 0 volumes.kubernetes.io / controller - managed - attach - detach: true] OwnerReferences: [] Finalizers: [] ManagedFields: [{
Manager: kubeadm Operation: Update APIVersion: v1 Time: 2022 - 07 - 12 16: 58: 15 + 0800 CST FieldsType: FieldsV1 FieldsV1: {
"f:metadata": {
"f:annotations": {
"f:kubeadm.alpha.kubernetes.io/cri-socket": {}
}
}
}
Subresource:
} {
Manager: flanneld Operation: Update APIVersion: v1 Time: 2022 - 07 - 12 16: 58: 20 + 0800 CST FieldsType: FieldsV1 FieldsV1: {
"f:metadata": {
"f:annotations": {
"f:flannel.alpha.coreos.com/backend-data": {},
"f:flannel.alpha.coreos.com/backend-type": {},
"f:flannel.alpha.coreos.com/kube-subnet-manager": {},
"f:flannel.alpha.coreos.com/public-ip": {}
}
}
}
Subresource:
} {
Manager: kubelet Operation: Update APIVersion: v1 Time: 2022 - 08 - 05 23: 32: 40 + 0800 CST FieldsType: FieldsV1 FieldsV1: {
"f:metadata": {
"f:annotations": {
".": {},
"f:volumes.kubernetes.io/controller-managed-attach-detach": {}
},
"f:labels": {
".": {},
"f:beta.kubernetes.io/arch": {},
"f:beta.kubernetes.io/os": {},
"f:kubernetes.io/arch": {},
"f:kubernetes.io/hostname": {},
"f:kubernetes.io/os": {}
}
},
"f:status": {
"f:allocatable": {
"f:ephemeral-storage": {}
},
"f:capacity": {
"f:ephemeral-storage": {}
},
"f:conditions": {
"k:{\"type\":\"DiskPressure\"}": {
"f:lastHeartbeatTime": {},
"f:lastTransitionTime": {},
"f:message": {},
"f:reason": {},
"f:status": {}
},
"k:{\"type\":\"MemoryPressure\"}": {
"f:lastHeartbeatTime": {}
},
"k:{\"type\":\"PIDPressure\"}": {
"f:lastHeartbeatTime": {}
},
"k:{\"type\":\"Ready\"}": {
"f:lastHeartbeatTime": {},
"f:lastTransitionTime": {},
"f:message": {},
"f:reason": {},
"f:status": {}
}
},
"f:images": {}
}
}
Subresource:
} {
Manager: kube - controller - manager Operation: Update APIVersion: v1 Time: 2022 - 08 - 06 00: 00: 54 + 0800 CST FieldsType: FieldsV1 FieldsV1: {
"f:metadata": {
"f:annotations": {
"f:node.alpha.kubernetes.io/ttl": {}
}
},
"f:spec": {
"f:podCIDR": {},
"f:podCIDRs": {
".": {},
"v:\"10.244.2.0/24\"": {}
}
}
}
Subresource:
}]
}
网友评论