美文网首页
glusterfs之动态PVC

glusterfs之动态PVC

作者: xiao_b4b1 | 来源:发表于2018-11-11 15:43 被阅读0次

    动态PVC可以简化管理员创建PV的过程,提升整个分布式存储的使用效率和体验;底层存储部署完成以后,用户只需要通过storageclass就可以申请pvc使用,减少管理员干预的过程

    环境

    角色 节点
    glusterfs 集群 master-192,node-193(已部署)
    k8s集群 master-192,node-193,node-194(已部署)
    hekti容器 master-192(未部署)

    部署heketi

    heketi提供RESTful接口与kubernetes对接,提供动态创建pvc的能力

    由于glusterfs采用单独部署,故heketi工作采用ssh模式

    git clone https://github.com/xiaotech/glusterfs_pvc.git
    1.创建ssh免密钥认证key

    ssh-keygen -f /etc/heketi/heketi_key -t rsa -N ''

    [root@master-192 self]# ls /etc/heketi/ -l
    总用量 164
    -rw------- 1 root root   1679 11月  9 14:58 heketi_key
    -rw-r--r-- 1 root root    397 11月  9 14:58 heketi_key.pub
    

    2.创建serviceaccount

    [root@master-192 self]# kubectl create -f heketi-service-account.json

    apiVersion: v1
    kind: ServiceAccount
    metadata: 
        name: heketi-service-account
    

    3.创建heketi配置文件

    [root@master-192 self]# kubectl create cm heketi-config --from-file=./heketi.json

    {
      "_port_comment": "Heketi Server Port Number",
      "port": "8080",
    
      "_use_auth": "Enable JWT authorization. Please enable for deployment",
      "use_auth": true,
    
      "_jwt": "Private keys for access",
      "jwt": {
        "_admin": "Admin has access to all APIs",
        "admin": {
          "key": "xiaotech"
        },
        "_user": "User only has access to /volumes endpoint",
        "user": {
          "key": "My Secret"
        }
      },
    
      "_glusterfs_comment": "GlusterFS Configuration",
      "glusterfs": {
        "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
        "executor": "ssh",
    
        "_db_comment": "Database file name",
        "db": "/var/lib/heketi/heketi.db",
    
        "kubeexec": {
          "rebalance_on_expansion": true
        },
    
        "sshexec": {
          "rebalance_on_expansion": true,
          "keyfile": "/etc/ssh_key/heketi_key",
          "fstab": "/etc/fstab",
          "port": "22",
          "user": "root",
          "sudo": false
        }
      },
    
      "_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
      "backup_db_to_kube_secret": false
    }
    

    4.创建heketi deployment和service

    [root@master-192 self]# kubectl create -f heketi-deploy.yaml

    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: heketi
    spec:
      replicas: 1
      template:
        metadata:
          labels:
            app: heketi
        spec:
          containers:
          - env:
            - name: HEKETI_EXECUTOR
              value: ssh
            - name: HEKETI_DB_PATH
              value: /var/lib/heketi/heketi.db
            - name: HEKETI_FSTAB
              value: /var/lib/heketi/fstab
            - name: HEKETI_SNAPSHOT_LIMIT
              value: "14"
            - name: HEKETI_KUBE_GLUSTER_DAEMONSET
              value: "y"
            image: xiaotech/heketi:7
            livenessProbe:
              failureThreshold: 3
              httpGet:
                path: /hello
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 30
              timeoutSeconds: 3
            name: heketi
            ports:
            - containerPort: 8080
              protocol: TCP
            readinessProbe:
              failureThreshold: 3
              httpGet:
                path: /hello
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 3
              timeoutSeconds: 3
            volumeMounts:
            - mountPath: /var/lib/heketi
              name: db
            - mountPath: /etc/ssh_key
              name: key
            - mountPath: /etc/heketi/
              name: config
          nodeSelector:
            kubernetes.io/hostname: master-192
          serviceAccount: heketi-service-account
          serviceAccountName: heketi-service-account
          volumes:
          - emptyDir: {}
            name: db
          - hostPath:
              path: /etc/heketi/
            name: key
          - configMap:
              name: heketi-config
            name: config
    

    [root@master-192 self]# kubectl create -f heketi-service.yaml

    apiVersion: v1
    kind: Service
    metadata:
      labels:
        app: heketi
      name: heketi
      namespace: default
    spec:
      selector:
        app: heketi
      ports:
        - port: 8080
      type: NodePort
    

    5.验证

    [root@master-192 self]# heketi-cli --server http://172.30.81.192:31131 --user=admin --secret=xiaotech cluster list
    Clusters:
    

    6.创建topology

    [root@master-192 home]# heketi-cli --server http://172.30.81.192:31131 --user=admin --secret=xiaotech topology load --json ./heketi-topology.json 
        Found node 172.30.81.192 on cluster f9256fd3b71b47cfdaff71d605ce2766
            Adding device /dev/vdb ... OK
        Found node 172.30.81.193 on cluster f9256fd3b71b47cfdaff71d605ce2766
            Adding device /dev/vdb ... OK
    

    7.创建storageclass

    [root@master-192 self]# kubectl create -f storageclass-glusterfs.yaml 
    storageclass.storage.k8s.io/glusterfs created
    

    8.创建动态pvc
    [root@master-192 self]# kubectl create -f glusterfs-pvc.yaml

    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: test
      namespace: default
      annotations:
        volume.beta.kubernetes.io/storage-class: "glusterfs"
    spec:
      accessModes:
      - ReadWriteMany
      resources:
        requests:
          storage: 2Gi
    
    [root@master-192 self]# lsblk 
    NAME                                                                              MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
    vda                                                                               252:0    0  40G  0 disk 
    ├─vda1                                                                            252:1    0   1G  0 part /boot
    └─vda2                                                                            252:2    0  39G  0 part 
      ├─centos-root                                                                   253:0    0  35G  0 lvm  /
      └─centos-swap                                                                   253:1    0   4G  0 lvm  
    vdb                                                                               252:16   0  20G  0 disk 
    ├─vg_8f9e89da702ee20d753162ee528de5cc-tp_1dab4f6ca2aa9932a56fc9b8f1209abf_tmeta   253:2    0  12M  0 lvm  
    │ └─vg_8f9e89da702ee20d753162ee528de5cc-tp_1dab4f6ca2aa9932a56fc9b8f1209abf-tpool 253:4    0   2G  0 lvm  
    │   ├─vg_8f9e89da702ee20d753162ee528de5cc-tp_1dab4f6ca2aa9932a56fc9b8f1209abf     253:5    0   2G  0 lvm  
    │   └─vg_8f9e89da702ee20d753162ee528de5cc-brick_1dab4f6ca2aa9932a56fc9b8f1209abf  253:6    0   2G  0 lvm  /var/lib/heketi/mounts/vg_8
    └─vg_8f9e89da702ee20d753162ee528de5cc-tp_1dab4f6ca2aa9932a56fc9b8f1209abf_tdata   253:3    0   2G  0 lvm  
      └─vg_8f9e89da702ee20d753162ee528de5cc-tp_1dab4f6ca2aa9932a56fc9b8f1209abf-tpool 253:4    0   2G  0 lvm  
        ├─vg_8f9e89da702ee20d753162ee528de5cc-tp_1dab4f6ca2aa9932a56fc9b8f1209abf     253:5    0   2G  0 lvm  
        └─vg_8f9e89da702ee20d753162ee528de5cc-brick_1dab4f6ca2aa9932a56fc9b8f1209abf  253:6    0   2G  0 lvm  /var/lib/heketi/mounts/vg_8
    [root@master-192 self]# gluster volume info
     
    Volume Name: vol_8449601e8b59a3ea1ac5dc9c0e76fbe5
    Type: Replicate
    Volume ID: 0b948972-0bb2-4887-9839-7f4161f65227
    Status: Started
    Snapshot Count: 0
    Number of Bricks: 1 x 2 = 2
    Transport-type: tcp
    Bricks:
    Brick1: 172.30.81.193:/var/lib/heketi/mounts/vg_6b1b9f0c4532e4b2e2347a5861d01beb/brick_2164a574204385f2e9b03d50dd7cc91a/brick
    Brick2: 172.30.81.192:/var/lib/heketi/mounts/vg_8f9e89da702ee20d753162ee528de5cc/brick_1dab4f6ca2aa9932a56fc9b8f1209abf/brick
    Options Reconfigured:
    transport.address-family: inet
    nfs.disable: on
    performance.client-io-threads: off
    

    删除pvc
    [root@master-192 self]# kubectl delete -f glusterfs-pvc.yaml

    [root@master-192 self]# lsblk 
    NAME            MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
    vda             252:0    0  40G  0 disk 
    ├─vda1          252:1    0   1G  0 part /boot
    └─vda2          252:2    0  39G  0 part 
      ├─centos-root 253:0    0  35G  0 lvm  /
      └─centos-swap 253:1    0   4G  0 lvm  
    vdb             252:16   0  20G  0 disk 
    [root@master-192 self]# gluster volume info
    No volumes present
    

    相关文章

      网友评论

          本文标题:glusterfs之动态PVC

          本文链接:https://www.haomeiwen.com/subject/trbxfqtx.html