美文网首页Openshift:可靠的Kubernetes发行版Cephk8s-openshift-okd
Ceph的搭建流程及openshift上使用ceph rbd实现

Ceph的搭建流程及openshift上使用ceph rbd实现

作者: 潘晓华Michael | 来源:发表于2018-07-21 10:37 被阅读25次
    Ceph Openshift

    Ceph分布式块存储部署机器列表

    名称 核数 内存 ip hostname 外挂磁盘大小(G)
    管理节点admin 2 4 192.168.1.2 admin.ceph.com
    监控节点monitor 2 4 192.168.1.3 monitor.ceph.com
    存储节点node1 2 4 192.168.1.4 node1.ceph.com 100G
    存储节点node2 2 4 192.168.1.5 node2.ceph.com 100G

    部署Ceph RBD

    1、给每台机器设置hostname

    # 设置hostname 
     hostnamectl --static set-hostname admin.ceph.com   #192.168.1.2
     hostnamectl --static set-hostname monitor.ceph.com #192.168.1.3
     hostnamectl --static set-hostname node1.ceph.com   #192.168.1.4
     hostnamectl --static set-hostname node2.ceph.com   #192.168.1.5
    

    2、给每个服务器创建用户ceph

    # 添加用户ceph 
    ansible -i hosts all -m user -a "name=ceph"
    # 登录每台机器使用passwd命令给每个用户创建密码ceph
    # 创建ceph用户密码 
    [root@admin ~]# passwd ceph
    Changing password for user ceph.
    New password: ceph
    BAD PASSWORD: The password is shorter than 8 characters
    Retype new password: ceph
    passwd: all authentication tokens updated successfully.
    # 为每台服务器上的ceph用户添加root权限(用root用户登录)
    # 为用户添加root权限 
    ansible -i hosts all -m shell -a 'echo "ceph ALL=(root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph'
    ansible -i hosts all -m shell -a 'chmod 0440 /etc/sudoers.d/ceph'
    
    1. 为admin节点设置访问其它服务器免密码登录
    [ceph@admin ~]$ ssh-keygen 
    Generating public/private rsa key pair.
    Enter file in which to save the key (/home/ceph/.ssh/id_rsa): 
    Created directory '/home/ceph/.ssh'.
    Enter passphrase (empty for no passphrase): 
    Enter same passphrase again: 
    Your identification has been saved in /home/ceph/.ssh/id_rsa.
    Your public key has been saved in /home/ceph/.ssh/id_rsa.pub.
    The key fingerprint is:
    SHA256:rH/HNUPm4HPtxOXzbndOwGzpy6bwA1frhW9S5cywl2Q ceph@admin.ceph.com
    The key's randomart image is:
    +---[RSA 2048]----+
    |                 |
    |                 |
    |                 |
    |       .    .o=Eo|
    |        S  . *O%+|
    |       .  . +oX+%|
    |      .   .+ =.X+|
    |       .  .o+.+oO|
    |        .. .o+o*=|
    +----[SHA256]-----+
    [ceph@admin ~]$ ssh-copy-id ceph@monitor.ceph.com
    [ceph@admin ~]$ ssh-copy-id ceph@node1.ceph.com
    [ceph@admin ~]$ ssh-copy-id ceph@node2.ceph.com
    
    1. 创建集群
    # admin.ceph.com创建my-cluster文件夹 
    $ ssh ceph@admin
    $ mkdir my-cluster
    $ cd my-cluster
    # 安装ceph-deploy
    # 安装ceph-deploy 
    yum install ceph-deploy
    # 清除之前的配置数据
    # 清除之前的数据 
    ceph-deploy uninstall admin.ceph.com monitor.ceph.com node1.ceph.com node2.ceph.com
    # 清除远程主机/var/lib/ceph /etc/ceph中的包和数据
    # 清除远程主机/var/lib/ceph /etc/ceph中的包和数据 
    ceph-deploy purge admin.ceph.com monitor.ceph.com node1.ceph.com node2.ceph.com
    # 清除/var/lib/ceph及/etc/ceph下ceph目录及以下内容全部
    # 清除/var/lib/ceph及/etc/ceph下ceph目录及以下内容全部: 
    ceph-deploy purgedata admin.ceph.com monitor.ceph.com node1.ceph.com node2.ceph.com
    # 清除my-cluster目录中的认证密钥文件
    # 清除my-cluster目录中的认证密钥文件 
    ceph-deploy forgetkeys
    # 关闭所有节点的防火墙及安全防护项(青云平台机器默认是不开启的,这步可以不用做)
    # 关闭所有节点的防火墙及安全防护项 
    ansible -i ceph-hosts all -m service -a 'name=iptables state=stopped'
    ansible -i ceph-hosts all -m shell -a 'setenforce 0'
    
    # 创建集群 
    $ ceph-deploy new monitor.ceph.com
    # 在~/my-cluster下会生成三个文件
    # ~/my-cluster下会生成三个文件 
    $ ll
    total 24
    -rw-rw-r-- 1 ceph ceph   251 Jan 12 16:34 ceph.conf
    -rw-rw-r-- 1 ceph ceph 15886 Jan 12 16:30 ceph.log
    -rw------- 1 ceph ceph    73 Jan 12 16:30 ceph.mon.keyring
    
    # 系统默认的osd pool为3,目前osd为2,需要修改默认值
    # 修改osd默认值为2 
    [ceph@admin my-cluster]$ cat ceph.conf 
    [global]
    fsid = 25c13add-967e-4912-bb33-ebbc2cb9376d
    mon_initial_members = monitor
    mon_host = 192.168.1.3
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx
    filestore_xattr_use_omap = true
    osd pool default size=2
    
    # 部署安装ceph 
    ceph-deploy install admin.ceph.com monitor.ceph.com node1.ceph.com node2.ceph.com
    # 创建Ceph Monitor
     $ ceph-deploy mon create monitor.ceph.com
     $ ceph-deploy gatherkeys monitor.ceph.com
    # 创建ceph osd
    # 切到node1.ceph.com
    给node1数据节点挂载磁盘/dev/sdc
    # 挂载磁盘 
    $ sudo mkfs.xfs -f /dev/sdc
    $ mkdir /var/lib/ceph/osd/osd-0
    $ sudo mount /dev/sdc /var/lib/ceph/osd/osd-0
    $ sudo chown ceph:ceph /var/lib/ceph/osd/osd-0
    # 同样的方法把node2.ceph.com的磁盘挂载到/var/lib/ceph/osd/osd-1
    # 挂载磁盘 
    $ sudo mkfs.xfs -f /dev/sdc
    $ mkdir /var/lib/ceph/osd/osd-1
    $ sudo mount /dev/sdc /var/lib/ceph/osd/osd-1
    $ sudo chown ceph:ceph /var/lib/ceph/osd/osd-1
    # 准备并激活osd,返回到admin管理节点
    # 准备并激活ceph osd 
    $ ceph-deploy osd prepare node1.ceph.com:/var/lib/ceph/osd/osd-0 node2.ceph.com:/var/lib/ceph/osd/osd-1
    $ ceph-deploy osd activate node1.ceph.com:/var/lib/ceph/osd/osd-0 node2.ceph.com:/var/lib/ceph/osd/osd-1
    # 拷贝配置文件及key文件
    $ ceph-deploy admin admin.ceph.com monitor.ceph.com node1.ceph.com node2.ceph.com
    # 为ceph.clinet.admin.keyring添加可读权限(admin节点与monitor节点都添加)
    # 添加读取权限 
     $ sudo chmod +r /etc/ceph/ceph.client.admin.keyring
    # 查看集群状态
    $ ceph health
    $ ceph osd tree #查看当前节点
    # 完成!!!
    

    Openshift上创建RBD Storageclass

    1. 首先查看my-cluster文件夹下的ceph.client.admin.keyring文件
    $ cat ceph.client.admin.keyring 
    [client.admin]
        key = AQBUilha86ufLhAA2BxJn7sG8qVYndokVwtvyA==
        caps mds = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"
    # 使用admin的key在openshift上创建secret
    
    1. 创建secret私钥
    oc  create secret generic ceph-secret --type="kubernetes.io/rbd" --from-literal=key='AQBUilha86ufLhAA2BxJn7sG8qVYndokVwtvyA==' --namespace=kube-system
    
    1. 在需要使用ceph-rbd的project也需要添加secret
    oc  create secret generic ceph-secret --type="kubernetes.io/rbd" --from-literal=key='AQBUilha86ufLhAA2BxJn7sG8qVYndokVwtvyA==' --namespace=project
    
    1. 创建storageclass
    # storageclass.yaml
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      name: ceph-rbd-sc
    provisioner: kubernetes.io/rbd
    parameters:
      monitors: 192.168.1.3:6789
      adminId: admin
      adminSecretName: ceph-secret
      adminSecretNamespace: kube-system
      pool: rbd
      userId: admin
      userSecretName: ceph-secret
    # oc create -f storageclass.yaml
    

    说明:adminId默认值为admin,pool默认值为rbd, userId默认值与adminId一样.所以这三个值可以不填写。

    1. 创建PVC(可通过yaml创建也可以在openshift的webconsole中选择对应的storageclass创建)
    # ceph-rbd-pvc.yaml 
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: myclaim
    spec:
      accessModes:
        - ReadWriteOnce
      resources:
        requests:
          storage: 8Gi
      storageClassName: ceph-rbd-sc
    # oc create -f ceph-rbd-pvc.yaml -n project
    

    结果展示

    Ceph存储PVC.png

    相关文章

      网友评论

        本文标题:Ceph的搭建流程及openshift上使用ceph rbd实现

        本文链接:https://www.haomeiwen.com/subject/qebuyftx.html