美文网首页k8s
glusterfs and heketi(容器版)

glusterfs and heketi(容器版)

作者: 老吕子 | 来源:发表于2017-04-26 15:45 被阅读1797次

    准备

    • k8s 1.6.1
    • 3 nodes with volume: /dev/vdb
      1 master

    下载 部署脚本

    git clone https://github.com/gluster/gluster-kubernetes && cd deploy

    针对实际情况修改配置

    cp topology.json.sample topology.json
    修改对应的主机名(nodes),ip,和数据卷

    [root@cloud4ourself-kube1 deploy]# kubectl get nodes
    NAME STATUS AGE VERSION
    cloud4ourself-kube1.novalocal Ready 16h v1.6.1
    cloud4ourself-kube2.novalocal Ready 40m v1.6.1
    cloud4ourself-kube3.novalocal Ready 37m v1.6.1
    cloud4ourself-kube4.novalocal Ready 36m v1.6.1

    同时要求所有node节点存在主机的解析记录

    执行部署脚本

    wget https://github.com/heketi/heketi/releases/download/v4.0.0/heketi-client-v4.0.0.linux.amd64.tar.gz
    tar -xvf heketi-client-v4.0.0.linux.amd64.tar.gz
    cp heketi-client/bin/heketi-cli /bin/
    
    ./gk-deploy -g -n default
    
    [Y]es, [N]o? [Default: Y]: y
    Using Kubernetes CLI.
    NAME      STATUS    AGE
    default   Active    22h
    Using namespace "default".
    Checking that heketi pod is not running ... OK
    serviceaccount "heketi-service-account" created
    clusterrolebinding "heketi-sa-view" created
    node "cloud4ourself-kube2.novalocal" labeled
    node "cloud4ourself-kube3.novalocal" labeled
    node "cloud4ourself-kube4.novalocal" labeled
    daemonset "glusterfs" created
    Waiting for GlusterFS pods to start ... OK
    service "deploy-heketi" created
    deployment "deploy-heketi" created
    Waiting for deploy-heketi pod to start ... OK
    Creating cluster ... ID: 4cfe35ce3cdc64b8afb8dbc46cad0e09
    Creating node cloud4ourself-kube2.novalocal ... ID: 503d25c29fb24d8c7f0ed58eb0e2c0ab
    Adding device /dev/vdb ... OK
    Creating node cloud4ourself-kube3.novalocal ... ID: 5d4a294df68a64f224b7331ba9e12c52
    Adding device /dev/vdb ... OK
    Creating node cloud4ourself-kube4.novalocal ... ID: 1c5291d656c3c2fd1286e29e260668a1
    Adding device /dev/vdb ... OK
    heketi topology loaded.
    Saving heketi-storage.json
    secret "heketi-storage-secret" created
    endpoints "heketi-storage-endpoints" created
    service "heketi-storage-endpoints" created
    job "heketi-storage-copy-job" created
    service "deploy-heketi" deleted
    job "heketi-storage-copy-job" deleted
    deployment "deploy-heketi" deleted
    secret "heketi-storage-secret" deleted
    service "heketi" created
    deployment "heketi" created
    Waiting for heketi pod to start ... OK
    heketi is now running and accessible via http://192.168.231.68:8080/
    Ready to create and provide GlusterFS volumes.
    
    [root@cloud4ourself-kube1 deploy]# kubectl get pod
    NAME                      READY     STATUS    RESTARTS   AGE
    glusterfs-66gn0           1/1       Running   0          3m
    glusterfs-srhh8           1/1       Running   0          3m
    glusterfs-z55q5           1/1       Running   0          3m
    heketi-1125625054-pl9qc   1/1       Running   0          1m
    [root@cloud4ourself-kube1 deploy]# export HEKETI_CLI_SERVER=$(kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}')
    [root@cloud4ourself-kube1 deploy]#
    [root@cloud4ourself-kube1 deploy]#
    [root@cloud4ourself-kube1 deploy]# echo $HEKETI_CLI_SERVER
    http://10.107.39.87:8080
    [root@cloud4ourself-kube1 deploy]# curl $HEKETI_CLI_SERVER/hello
    Hello from Heketi[root@cloud4ourself-kube1 deploy]#
    

    失败重试

    kubectl delete -f kube-templates/deploy-heketi-deployment.yaml
    kubectl delete -f kube-templates/heketi-deployment.yaml
    kubectl delete -f kube-templates/heketi-service-account.yaml
    kubectl delete -f kube-templates/glusterfs-daemonset.yaml
    
    rm -rf /var/lib/heketi
    rm -rf /var/lib/glusterd
    

    使用

    cd docs/examples/hello_world
    [root@cloud4ourself-kube1 hello_world]# kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}'
    http://10.107.39.87:8080
    #依据此处url修改下面文件
    [root@cloud4ourself-kube1 hello_world]# cat gluster-storage-class.yaml
    apiVersion: storage.k8s.io/v1beta1
    kind: StorageClass
    metadata:
      name: gluster-heketi
    provisioner: kubernetes.io/glusterfs
    parameters:
      resturl: "http://10.107.39.87:8080"
      restuser: ""
      restuserkey: ""
      
      
    [root@cloud4ourself-kube1 hello_world]# kubectl create -f gluster-storage-class.yaml
    storageclass "gluster-heketi" created
    [root@cloud4ourself-kube1 hello_world]# kubectl create -f gluster-pvc.yaml
    persistentvolumeclaim "gluster1" created
    [root@cloud4ourself-kube1 hello_world]# kubectl get pv
    No resources found.
    [root@cloud4ourself-kube1 hello_world]# kubectl get pvc
    NAME       STATUS    VOLUME                                     CAPACITY   ACCESSMODES   STORAGECLASS     AGE
    gluster1   Bound     pvc-45c39fbe-2a57-11e7-bdbe-fa163e72ab1f   5Gi        RWO           gluster-heketi   8s
    [root@cloud4ourself-kube1 hello_world]# kubectl get pv
    NAME                                       CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS    CLAIM              STORAGECLASS     REASON    AGE
    pvc-45c39fbe-2a57-11e7-bdbe-fa163e72ab1f   5Gi        RWO           Delete          Bound     default/gluster1   gluster-heketi             4s
    
    
    #修改docker image如下所示
    [root@cloud4ourself-kube1 hello_world]# grep image nginx-pod.yaml
        image: docker.io/nginx
        
    [root@cloud4ourself-kube1 hello_world]# kubectl apply -f nginx-pod.yaml
    pod "gluster-pod1" created
    
    [root@cloud4ourself-kube1 hello_world]# kubectl exec -ti gluster-pod1 /bin/sh
    # df
    Filesystem                                                                                          1K-blocks    Used Available Use% Mounted on
    /dev/mapper/docker-253:1-201327648-815a5158af94af25a973d7c36b90045b6a8b2a73129b38ddf9aac18e21fdd632  10474496  149224  10325272   2% /
    tmpfs                                                                                                 1941100       0   1941100   0% /dev
    tmpfs                                                                                                 1941100       0   1941100   0% /sys/fs/cgroup
    /dev/vda1                                                                                            41930056 4021060  37908996  10% /etc/hosts
    shm                                                                                                     65536       0     65536   0% /dev/shm
    10.9.5.70:vol_f97fb046f75d9c6a9d7f5180136a348c                                                        5230592   33408   5197184   1% /usr/share/nginx/html
    tmpfs                                                                                                 1941100      12   1941088   1% /run/secrets/kubernetes.io/serviceaccount
    
    # cd  /usr/share/nginx/html
    # ls
    # echo 'Hello World from GlusterFS!!!' > index.html
    # ls
    index.html
    #
    [root@cloud4ourself-kube1 hello_world]#
    [root@cloud4ourself-kube1 hello_world]# kubectl get pod -o wide
    NAME                      READY     STATUS    RESTARTS   AGE       IP                NODE
    gluster-pod1              1/1       Running   0          11m       192.168.195.132   cloud4ourself-kube2.novalocal
    glusterfs-66gn0           1/1       Running   0          39m       10.9.5.90         cloud4ourself-kube2.novalocal
    glusterfs-srhh8           1/1       Running   0          39m       10.9.5.70         cloud4ourself-kube4.novalocal
    glusterfs-z55q5           1/1       Running   0          39m       10.9.5.71         cloud4ourself-kube3.novalocal
    heketi-1125625054-pl9qc   1/1       Running   0          37m       192.168.231.68    cloud4ourself-kube3.novalocal
    
    [root@cloud4ourself-kube1 hello_world]# curl http://192.168.195.132/
    Hello World from GlusterFS!!!
    
    [root@cloud4ourself-kube1 hello_world]# kubectl exec -it glusterfs-srhh8 -- bash
    [root@cloud4ourself-kube4 /]#  mount | grep heketi
    /dev/vda1 on /var/lib/heketi type xfs (rw,relatime,attr2,inode64,noquota)
    /dev/mapper/vg_e89a8a2c4bc72289bc0181f3956413ed-brick_9cec68c35c030dcc994fb0e3173bacc1 on /var/lib/heketi/mounts/vg_e89a8a2c4bc72289bc0181f3956413ed/brick_9cec68c35c030dcc994fb0e3173bacc1 type xfs (rw,noatime,nouuid,attr2,inode64,logbsize=256k,sunit=512,swidth=512,noquota)
    /dev/mapper/vg_e89a8a2c4bc72289bc0181f3956413ed-brick_450ab555997d00d8ef7ad6255e1b5a3d on /var/lib/heketi/mounts/vg_e89a8a2c4bc72289bc0181f3956413ed/brick_450ab555997d00d8ef7ad6255e1b5a3d type xfs (rw,noatime,nouuid,attr2,inode64,logbsize=256k,sunit=512,swidth=512,noquota)
    [root@cloud4ourself-kube4 /]# cd /var/lib/heketi/mounts/
    
    [root@cloud4ourself-kube4 mounts]# cd /var/lib/heketi/mounts/
    [root@cloud4ourself-kube4 mounts]# cd vg_e89a8a2c4bc72289bc0181f3956413ed/
    [root@cloud4ourself-kube4 vg_e89a8a2c4bc72289bc0181f3956413ed]# ls
    brick_450ab555997d00d8ef7ad6255e1b5a3d  brick_9cec68c35c030dcc994fb0e3173bacc1
    <g_e89a8a2c4bc72289bc0181f3956413ed]# ls */brick/index.html
    brick_450ab555997d00d8ef7ad6255e1b5a3d/brick/index.html
    <956413ed]# cat brick_450ab555997d00d8ef7ad6255e1b5a3d/brick/index.html
    Hello World from GlusterFS!!!
    [root@cloud4ourself-kube4 vg_e89a8a2c4bc72289bc0181f3956413ed]#
    
    # 可见 在nginx pod和gluster pod 都存在相同的文件index.html
    # 测试ok
    
    

    问题1

    Adding device /dev/vdb ... Unable to add device: Unable to execute command on glusterfs-xp1nx:   Can't initialize physical volume "/dev/vdb"of volume group "vg_dc649bdf755667e58c5d779f9d900057" without -ff
    
    
    #dd if=/dev/zero of=/dev/vdb bs=1k count=1
    #blockdev --rereadpt /dev/vdb
    

    问题2

    Do you wish to proceed with deployment?
    
    [Y]es, [N]o? [Default: Y]: y
    Using Kubernetes CLI.
    NAME      STATUS    AGE
    default   Active    19h
    Using namespace "default".
    Checking that heketi pod is not running ... OK
    serviceaccount "heketi-service-account" created
    clusterrolebinding "heketi-sa-view" created
    node "cloud4ourself-kube2.novalocal" labeled
    node "cloud4ourself-kube3.novalocal" labeled
    node "cloud4ourself-kube4.novalocal" labeled
    daemonset "glusterfs" created
    Waiting for GlusterFS pods to start ... OK
    service "deploy-heketi" created
    deployment "deploy-heketi" created
    Waiting for deploy-heketi pod to start ... OK
    Creating cluster ... ID: 7b699e445901f3efac45a19be90bd9e5
    Creating node cloud4ourself-kube2.novalocal ... ID: a44ebd7a78fe10aec57145aee37963f0
    Adding device /dev/vdb ... OK
    Creating node cloud4ourself-kube3.novalocal ... ID: d3100cfcee906bc6017ddf556d712c8d
    Adding device /dev/vdb ... OK
    Creating node cloud4ourself-kube4.novalocal ... ID: dd7eb941f973b0a023a7948ecee55898
    Adding device /dev/vdb ... OK
    heketi topology loaded.
    Error: Unable to execute command on glusterfs-pbzcj: volume create: heketidbstorage: failed: Staging failed on 10.9.5.70. Error: Host 10.9.5.90 is not in 'Peer in Cluster' state
    Staging failed on 10.9.5.71. Error: Host 10.9.5.90 is not in 'Peer in Cluster' state
    Failed on setup openshift heketi storage
    

    登录相关pod,发现gluster peer status 显示有问题

    [root@cloud4ourself-kube1 deploy]# kubectl exec -it glusterfs-q0k02 -- bash
    [root@cloud4ourself-kube4 /]# gluster
    gluster> peer status
    Number of Peers: 2
    
    Hostname: localhost
    Uuid: cf378040-4836-4f93-8311-2688efc8385e
    State: Peer in Cluster (Disconnected)
    
    Hostname: 10.9.5.71
    Uuid: f0d2e19b-d231-49cc-a239-075eb5cca9f1
    State: Peer in Cluster (Connected)
    gluster>
    

    于是修改/etc/hosts 将所有node节点添加解析记录

    参考

    http://neharawat.in/glusterfs-volume-management-using-heketi/
    http://gluster.readthedocs.io/en/latest/Quick-Start-Guide/Quickstart/
    https://github.com/heketi/heketi/wiki
    https://github.com/gluster/gluster-kubernetes

    相关文章

      网友评论

        本文标题:glusterfs and heketi(容器版)

        本文链接:https://www.haomeiwen.com/subject/derhzttx.html