美文网首页
如何让pod运行到指定node节点且节点拒绝运行其他pod

如何让pod运行到指定node节点且节点拒绝运行其他pod

作者: 开始懂了90 | 来源:发表于2018-12-04 15:28 被阅读0次

需求: 有个pod需要12核12G的配置,但主机最大是16核16G的配置,这样只能让这个node拒绝其他pod,只接受此pod,且这个pod 只能调度到这个node节点,不然会影响到其他节点的正常运行

pod调度的相关概念

配置

主机名: test-scan-110  
主机IP: 10.39.0.110
服务: openvas

给节点打污点taint 这样node节点只会接受配置了tolerations的pod

kubectl taint node test-scan-110 scan=openvas:NoSchedule

导出openvas配置文件(此时为低配置2c8g的openvas服务 虽不能满足需求 但需要以此为配置文件进行修改 )

#查看deployment
[root@test-master-113 liangming]# kubectl get deployment -n hexuelin
NAME       DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
openvas    1         1         1            1           21m
openvas1   1         1         1            1           3d
scan       1         1         1            1           17h

# 查看svc

[root@test-master-113 liangming]# kubectl get svc -nhexuelin
NAME       TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                                                                                            AGE
openvas    ClusterIP   10.2.99.5      11.11.1.1     443/TCP,9390/TCP                                                                                   23m
openvas1   ClusterIP   10.15.115.36   11.11.1.1     443/TCP,9390/TCP                                                                                   3d
scan       ClusterIP   10.1.190.63    11.11.1.1     9990/TCP,9991/TCP,9992/TCP,9993/TCP,9994/TCP,9995/TCP,9996/TCP,9997/TCP,9998/TCP,9999/TCP,80/TCP   17h

#  导出openvas 的 deplement 和 svc 的配置文件
kubectl get deployment openvas -nhexuelin -o yaml > /tmp/openvas-deployment.yaml
kubectl get svc openvas -nhexuelin -oyaml > /tmp/openvas-svc.yaml

配置tolerations

vi /tmp/openvas-deployment.yaml
# 添加一下字段在spec.template.spec 级别添加如下字段 其中key value 的值是执行命令时
kubectl taint node test-scan-110 scan=openvas:NoSchedule 中定义的key和value
tolerations:
  - effect: NoSchedule
    key: scan
    operator: Equal
    value: openvas


# 这样配置了污点taint的node节点 只会接受带有key=scan value=openvas标签的pod
# 但这样不能保证指定pod 一定到这个指定node节点来,po的也可以票到其他资源充足的node节点
# 因此需要配置亲和度affinity 这样pod就会分配到这个指定的node节点,不用 nodeselect 是因为其将会被affinity替代

给node节点打标签

kubectl label node test-scan-110 func=scan

配置亲和度affnity

vi /tmp/openvas-deployment.yaml
# 修改spec.template.spec.affinity 的相关值
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: func
            operator: In
            values:
            - scan

# 修改3个位置: key、operator、values

修改已有的container配置

vi /tmp/openvas-deployment.yaml
# 修改spec.template.spec.containers.env.resources 的cpu memory 得值
resources:
  limits:
    cpu: "12"
    memory: 12Gi
  requests:
    cpu: "12"
    memory: 12Gi

删除已有的openvas 服务

# 因为原来的openvas 已经占用了node节点的很多资源,如果直接在此基础上执行kubectl apply -f /tmp/openvas-deployment.yaml 有可能是失败, 因为k8s 默认是创建新的服务成功之后才会删除旧的服务, 而原有的服务已占用了很多资源从而会导致新的应用资源不足 从而创建失败

kubectl delete deployment openvas -nhexuelin
kubectl delete svc openvas -nhexuelin

执行应用配置文件(完结)

kubectl apply -f /tmp/openvas-deployment.yaml
kubectl apply -f /tmp/openvas-svc.yaml

查看/tmp/openvas-deployment.yaml

[root@test-master-113 liangming]# cat ~/liangming/openvas.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  annotations:
    deployment.kubernetes.io/revision: "1"
  creationTimestamp: 2018-12-04T02:09:09Z
  generation: 1
  labels:
    ClusterID: CID-ca4135da3326
    UserID: "52"
    name: openvas
    tenxcloud.com/appName: openvas
    tenxcloud.com/svcName: openvas
  name: openvas
  namespace: hexuelin
  resourceVersion: "8390068"
  selfLink: /apis/extensions/v1beta1/namespaces/hexuelin/deployments/openvas
  uid: 960af622-f769-11e8-8b68-5254eec04736
spec:
  minReadySeconds: 10
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 5
  selector:
    matchLabels:
      name: openvas
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        ClusterID: CID-ca4135da3326
        UserID: "52"
        diskType: ""
        name: openvas
        tenxcloud.com/appName: openvas
        tenxcloud.com/svcName: openvas
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: func
                operator: In
                values:
                - scan
      tolerations:
      - effect: NoSchedule
        key: scan
        operator: Equal
        value: openvas
      containers:
      - env:
        - name: CPU_LIMIT
          valueFrom:
            resourceFieldRef:
              containerName: openvas
              divisor: "0"
              resource: limits.cpu
        - name: MEM_LIMIT
          valueFrom:
            resourceFieldRef:
              containerName: openvas
              divisor: "0"
              resource: limits.memory
        - name: PATH
          value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
        - name: DEBIAN_FRONTEND
          value: noninteractive
        - name: OV_PASSWORD
          value: admin
        - name: PUBLIC_HOSTNAME
          value: openvas
        - name: APP_NAME
          value: openvas
        - name: SERVICE_NAME
          value: openvas
        - name: CLUSTER_ID
          value: CID-ca4135da3326
        - name: USER_ID
          value: "52"
        image: harbor.enncloud.cn/enncloud/openvas:v0.1
        imagePullPolicy: Always
        name: openvas
        ports:
        - containerPort: 443
          protocol: TCP
        - containerPort: 9390
          protocol: TCP
        resources:
          limits:
            cpu: "8"
            memory: 10Gi
          requests:
            cpu: "8"
            memory: 10Gi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      imagePullSecrets:
      - name: registrysecret
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30

##以下字段为k8s添加字段 需要手动删除
status:
  availableReplicas: 1
  conditions:
  - lastTransitionTime: 2018-12-04T02:19:30Z
    lastUpdateTime: 2018-12-04T02:19:30Z
    message: Deployment has minimum availability.
    reason: MinimumReplicasAvailable
    status: "True"
    type: Available
  - lastTransitionTime: 2018-12-04T02:19:18Z
    lastUpdateTime: 2018-12-04T02:19:30Z
    message: ReplicaSet "openvas-658fb8c4bc" has successfully progressed.
    reason: NewReplicaSetAvailable
    status: "True"
    type: Progressing
  observedGeneration: 1
  readyReplicas: 1
  replicas: 1
  updatedReplicas: 1

查看svc

[root@test-master-113 liangming]# cat /tmp/openvas-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"system/lbgroup":"group-darso","tenxcloud.com/schemaPortname":"openvas-0/TCP/15968,openvas-1/TCP/20040"},"creationTimestamp":"2018-12-04T02:09:09Z","labels":{"ClusterID":"CID-ca4135da3326","UserID":"52","name":"openvas","tenxcloud.com/appName":"openvas","tenxcloud.com/svcName":"openvas"},"name":"openvas","namespace":"hexuelin","resourceVersion":"8390016","selfLink":"/api/v1/namespaces/hexuelin/services/openvas","uid":"960e7689-f769-11e8-8b68-5254eec04736"},"spec":{"clusterIP":"10.2.99.5","externalIPs":["11.11.1.1"],"ports":[{"name":"openvas-0","port":443,"protocol":"TCP","targetPort":443},{"name":"openvas-1","port":9390,"protocol":"TCP","targetPort":9390}],"selector":{"name":"openvas"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}}
    system/lbgroup: group-darso
    tenxcloud.com/schemaPortname: openvas-0/TCP/15968,openvas-1/TCP/20040
  creationTimestamp: 2018-12-04T02:19:54Z
  labels:
    ClusterID: CID-ca4135da3326
    UserID: "52"
    name: openvas
    tenxcloud.com/appName: openvas
    tenxcloud.com/svcName: openvas
  name: openvas
  namespace: hexuelin
  resourceVersion: "8391430"
  selfLink: /api/v1/namespaces/hexuelin/services/openvas
  uid: 164b853f-f76b-11e8-8b68-5254eec04736
spec:
  clusterIP: 10.2.99.5
  externalIPs:
  - 11.11.1.1
  ports:
  - name: openvas-0
    port: 443
    protocol: TCP
    targetPort: 443
  - name: openvas-1
    port: 9390
    protocol: TCP
    targetPort: 9390
  selector:
    name: openvas
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

相关文章

网友评论

      本文标题:如何让pod运行到指定node节点且节点拒绝运行其他pod

      本文链接:https://www.haomeiwen.com/subject/evamcqtx.html