美文网首页微服务架构和实践Kubernetes
k8s使用glusterfs实现动态持久化存储

k8s使用glusterfs实现动态持久化存储

作者: 647f379ea944 | 来源:发表于2018-08-12 09:38 被阅读1次

    简介

    本文章介绍如何使用glusterfs为k8s提供动态申请pv的功能。glusterfs提供底层存储功能,heketi为glusterfs提供restful风格的api,方便管理glusterfs。支持k8s的pv的3种访问模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany

    访问模式只是能力描述,并不是强制执行的,对于没有按pvc声明的方式使用pv,存储提供者应该负责访问时的运行错误。例如如果设置pvc的访问模式为ReadOnlyMany ,pod挂载后依然可写,如果需要真正的不可写,申请pvc是需要指定 readOnly: true 参数

    安装

    实验用的Vagrantfile

    # -*- mode: ruby -*-
    # vi: set ft=ruby :
    
    ENV["LC_ALL"] = "en_US.UTF-8"
    
    Vagrant.configure("2") do |config|
        (1..3).each do |i|
          config.vm.define "lab#{i}" do |node|
            node.vm.box = "centos-7.4-docker-17"
            node.ssh.insert_key = false
            node.vm.hostname = "lab#{i}"
            node.vm.network "private_network", ip: "11.11.11.11#{i}"
            node.vm.provision "shell",
              inline: "echo hello from node #{i}"
            node.vm.provider "virtualbox" do |v|
              v.cpus = 2
              v.customize ["modifyvm", :id, "--name", "lab#{i}", "--memory", "3096"]
              file_to_disk = "lab#{i}_vdb.vdi"
              unless File.exist?(file_to_disk)
                # 50GB
                v.customize ['createhd', '--filename', file_to_disk, '--size', 50 * 1024]
              end
              v.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', file_to_disk]
            end
          end
        end
    end
    

    环境配置说明

    # 安装 glusterfs 每节点需要提前加载 dm_thin_pool 模块
    modprobe dm_thin_pool
    
    # 配置开启自加载
    cat >/etc/modules-load.d/glusterfs.conf<<EOF
    dm_thin_pool
    EOF
    
    # 安装 glusterfs-fuse
    yum install -y glusterfs-fuse
    

    安装glusterfs与heketi

    # 安装 heketi client
    # https://github.com/heketi/heketi/releases
    # 去github下载相关的版本
    wget https://github.com/heketi/heketi/releases/download/v7.0.0/heketi-client-v7.0.0.linux.amd64.tar.gz
    tar xf heketi-client-v7.0.0.linux.amd64.tar.gz
    cp heketi-client/bin/heketi-cli /usr/local/bin
    
    # 查看版本
    heketi-cli -v
    
    # 如下部署步骤都在如下目录执行
    cd heketi-client/share/heketi/kubernetes
    
    # 在k8s中部署 glusterfs
    kubectl create -f glusterfs-daemonset.json
    
    # 查看 node 节点
    kubectl get nodes
    
    # 给提供存储 node 节点打 label
    kubectl label node lab1 lab2 lab3 storagenode=glusterfs
    
    # 查看 glusterfs 状态
    kubectl get pods -o wide
    
    # 部署 heketi server 
    # 配置 heketi server 的权限
    kubectl create -f heketi-service-account.json
    kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account
    
    # 创建 cofig secret
    kubectl create secret generic heketi-config-secret --from-file=./heketi.json
    
    # 初始化部署
    kubectl create -f heketi-bootstrap.json
    
    # 查看 heketi bootstrap 状态
    kubectl get pods -o wide
    kubectl get svc
    
    # 配置端口转发 heketi server
    HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep deploy-heketi | awk '{print $1}')
    kubectl port-forward $HEKETI_BOOTSTRAP_POD 58080:8080
    
    # 测试访问
    # 另起一终端
    curl http://localhost:58080/hello
    
    # 配置 glusterfs
    # hostnames/manage 字段里必须和 kubectl get node 一致
    # hostnames/storage 指定存储网络 ip 本次实验使用与k8s集群同一个ip
    cat >topology.json<<EOF
    {
      "clusters": [
        {
          "nodes": [
            {
              "node": {
                "hostnames": {
                  "manage": [
                    "lab1"
                  ],
                  "storage": [
                    "11.11.11.111"
                  ]
                },
                "zone": 1
              },
              "devices": [
                {
                  "name": "/dev/sdb",
                  "destroydata": false
                }
              ]
            },
            {
              "node": {
                "hostnames": {
                  "manage": [
                    "lab2"
                  ],
                  "storage": [
                    "11.11.11.112"
                  ]
                },
                "zone": 1
              },
              "devices": [
                {
                  "name": "/dev/sdb",
                  "destroydata": false
                }
              ]
            },
            {
              "node": {
                "hostnames": {
                  "manage": [
                    "lab3"
                  ],
                  "storage": [
                    "11.11.11.113"
                  ]
                },
                "zone": 1
              },
              "devices": [
                {
                  "name": "/dev/sdb",
                  "destroydata": false
                }
              ]
            }
          ]
        }
      ]
    }
    EOF
    export HEKETI_CLI_SERVER=http://localhost:58080
    heketi-cli topology load --json=topology.json
    
    # 使用 Heketi 创建一个用于存储 Heketi 数据库的 volume
    heketi-cli setup-openshift-heketi-storage
    kubectl create -f heketi-storage.json
    
    # 查看状态
    # 等所有job完成 即状态为 Completed
    # 才能进行如下的步骤
    kubectl get pods
    kubectl get job
    
    # 删除部署时产生的相关资源
    kubectl delete all,service,jobs,deployment,secret --selector="deploy-heketi"
    
    # 部署 heketi server
    kubectl create -f heketi-deployment.json
    
    # 查看 heketi server 状态
    kubectl get pods -o wide
    kubectl get svc
    
    # 查看 heketi 状态信息
    # 配置端口转发 heketi server
    HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep heketi | awk '{print $1}')
    kubectl port-forward $HEKETI_BOOTSTRAP_POD 58080:8080
    export HEKETI_CLI_SERVER=http://localhost:58080
    heketi-cli cluster list
    heketi-cli volume list
    

    测试

    # 创建 StorageClass
    # 由于没有开启认证
    # restuser restuserkey 可以随意写
    HEKETI_SERVER=$(kubectl get svc | grep heketi | head -1 | awk '{print $3}')
    echo $HEKETI_SERVER
    cat >storageclass-glusterfs.yaml<<EOF
    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
      name: gluster-heketi
    provisioner: kubernetes.io/glusterfs
    parameters:
      resturl: "http://$HEKETI_SERVER:8080"
      restauthenabled: "false"
      restuser: "will"
      restuserkey: "will"
      gidMin: "40000"
      gidMax: "50000"
      volumetype: "replicate:3"
    EOF
    kubectl create -f storageclass-glusterfs.yaml
    
    # 查看
    kubectl get sc
    
    # 创建pvc测试
    cat >gluster-pvc-test.yaml<<EOF
    apiVersion: v1
    kind: PersistentVolumeClaim
    metadata:
     name: gluster1
     annotations:
       volume.beta.kubernetes.io/storage-class: gluster-heketi
    spec:
     accessModes:
      - ReadWriteOnce
     resources:
       requests:
         storage: 5Gi
    EOF
    kubectl apply -f gluster-pvc-test.yaml
     
    # 查看
    kubectl get pvc
    kubectl get pv
     
    # 创建 nginx pod 挂载测试
    cat >nginx-pod.yaml<<EOF
    apiVersion: v1
    kind: Pod
    metadata:
      name: nginx-pod1
      labels:
        name: nginx-pod1
    spec:
      containers:
      - name: nginx-pod1
        image: nginx:alpine
        ports:
        - name: web
          containerPort: 80
        volumeMounts:
        - name: gluster-vol1
          mountPath: /usr/share/nginx/html
      volumes:
      - name: gluster-vol1
        persistentVolumeClaim:
          claimName: gluster1
    EOF
    kubectl apply -f nginx-pod.yaml
     
    # 查看
    kubectl get pods -o wide
     
    # 修改文件内容
    kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from GlusterFS!!! > /usr/share/nginx/html/index.html'
     
    # 访问测试
    POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
    curl http://$POD_ID
     
    # node 节点查看文件内容
    GLUSTERFS_POD=$(kubectl get pod | grep glusterfs | head -1 | awk '{print $1}')
    kubectl exec -ti $GLUSTERFS_POD /bin/sh
    mount | grep heketi
    cat /var/lib/heketi/mounts/vg_56033aa8a9131e84faa61a6f4774d8c3/brick_1ac5f3a0730457cf3fcec6d881e132a2/brick/index.html
    

    参考文档

    相关文章

      网友评论

        本文标题:k8s使用glusterfs实现动态持久化存储

        本文链接:https://www.haomeiwen.com/subject/ntxsbftx.html