美文网首页好文收藏ceph
ceph-deploy部署ceph(mimic)

ceph-deploy部署ceph(mimic)

作者: 挑战_bae7 | 来源:发表于2021-04-01 20:48 被阅读0次
    角色 cluster 角色 public 硬盘
    cephadm 10.0.0.51 admin 192.168.199.51 vda 40G
    mon01 10.0.0.52 mon,osd 192.168.199.52 vda 40G
    mon02 10.0.0.53 mon,osd 192.168.199.53 vda 40G
    mon03 10.0.0.54 mon,osd,mgr 192.168.199.54 vda 40G
    stror04 10.0.0.55 osd,mgr 192.168.199.55 vda 40G

    1.准备环境 关闭selinux、防火墙、配置时间同步、新建cephadm

    cephadm执行:
    yum  install -y ansible
    vim /etc/ansible/ansible.cfg
    module_set_locale = False
    vim /etc/ansible/hosts
    [cephadm]
    10.0.0.51 host=cephadm.local.cn ansible_connection=local
    [ceph]
    10.0.0.52 host=mon01
    10.0.0.53 host=mon02
    10.0.0.54 host=mon03
    10.0.0.55 host=stor04
    [ceph:vars]
    domain=local.cn
    #新建普通用户安装ceph 赋予sudo root权限无密码
    useradd cephadm && echo "456" |passwd --stdin cephadm
    vim /etc/sudoers.d/cephadm 
    cephadm  ALL=(root)  NOPASSWD:  ALL
    vim /etc/hosts
    10.0.0.51 cephadm
    10.0.0.52 mon01
    10.0.0.51 cephadm
    10.0.0.52 mon01
    10.0.0.53 mon02
    10.0.0.54 mon03
    10.0.0.55 stor04
    
    ansible all -k  -m lineinfile -a "path=/etc/selinux/config regexp='^SELINUX=' line='SELINUX=disabled'"
    ansible all -k -m shell -a "setenforce 0"
    ansible all -k -m service -a "name=firewalld state=stopped enabled=no"
    ansible all -k -m shell -a "useradd cephadm && echo "456" |passwd --stdin cephadm"
    ansible all -k  -m copy -a "src=/etc/sudoers.d/cephadm dest=/etc/sudoers.d/cephadm"
    ansible all -k  -m copy -a "src=/etc/hosts dest=/etc/hosts"
    ansible all -k -m hostname -a 'name={{ host }}.{{ domain }}'
    ansible 10.0.0.51 -m replace -a "path=/etc/chrony.conf regexp='(server 0.*)' replace='server ntp.aliyun.com iburst'"
    ansible 10.0.0.51 -m replace -a "path=/etc/chrony.conf regexp='(server [1-3].*)' replace='#\1'"
    ansible 10.0.0.51 -m replace -a "path=/etc/chrony.conf regexp='(#allow.*)' replace='allow 10.0.0.0/24'"
    ansible ceph -k -m replace -a "path=/etc/chrony.conf regexp='(server 0.*)' replace='server 10.0.0.51 iburst'"
    ansible ceph -k -m replace -a "path=/etc/chrony.conf regexp='(server [1-3].*)' replace='#\1'"
    ansible -k all -m service -a "name=chronyd state=restarted"
    #配置各个主机之间cephadm ssh免密登录 
    su - cephadm
    cat  > ~/.ssh/config <<EOF
    StrictHostKeyChecking no
    EOF
    chmod 400 ~/.ssh/config
    vim sshkey.sh
    #!/bin/bash
    [ ! -f ~/.ssh/id_rsa ] && ssh-keygen -f ~/.ssh/id_rsa -P ''
    NET=10.0.0
    export SSHPASS=456
    for IP in {51..55};do
      sshpass -e ssh-copy-id $NET.$IP
    done
    

    2.部署ceph mimic

    2.1 初始化ceph

    cephadm:
    rpm -ivh http://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/ceph-release-1-1.el7.noarch.rpm
    yum install -y epel-release
     yum install ceph-deploy python-setuptools python2-subprocess32 -y
    
    [root@cephadm ~]# su - cephadm 
    [cephadm@cephadm ~]$ mkdir ceph-cluster
    [cephadm@cephadm ~]$ cd ceph-cluster
    ceph-deploy new --help  查看帮助
    [cephadm@cephadm ceph-cluster]$ ceph-deploy new --cluster-network 192.168.199.0/24 --public-network=10.0.0.0/24 mon01  ##初始化mon节点
    [cephadm@cephadm ceph-cluster]$ ll
    -rw-rw-r-- 1 cephadm cephadm  256 3月  31 15:36 ceph.conf
    -rw-rw-r-- 1 cephadm cephadm 3264 3月  31 15:36 ceph-deploy-ceph.log
    -rw------- 1 cephadm cephadm   73 3月  31 15:36 ceph.mon.keyring
    [cephadm@cephadm ceph-cluster]$ cat ceph.conf   开始未选择网络 编辑此配置文件也可以
    [global]
    fsid = f59d07d8-b127-46f7-b990-f34083581cfc
    public_network = 10.0.0.0/24
    cluster_network = 192.168.199.0/24
    mon_initial_members = mon01
    mon_host = 10.0.0.52
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx
    

    2.2 安装ceph集群 在mon1 mon2 mon3 stor4

    [cephadm@cephadm ceph-cluster]$ ceph-deploy install mon01 mon02 mon3 stor4
    相当于 ssh远程主机执行
    rpm -ivh http://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/ceph-release-1-1.el7.noarch.rpm
    yum -y install epel-release
    yum install -y ceph ceph-radosgw
    ceph-deploy install --no-adjust-repos mon01 mon02 mon3 stor4 
    

    2.3 配置初始化mon节点 并收集所有密钥

    [cephadm@cephadm ceph-cluster]$ ceph-deploy mon create-initial #默认读取配置文件完成
    [cephadm@cephadm ceph-cluster]$ ll
    总用量 220
    -rw------- 1 cephadm cephadm    113 3月  31 16:36 ceph.bootstrap-mds.keyring
    -rw------- 1 cephadm cephadm    113 3月  31 16:36 ceph.bootstrap-mgr.keyring
    -rw------- 1 cephadm cephadm    113 3月  31 16:36 ceph.bootstrap-osd.keyring
    -rw------- 1 cephadm cephadm    113 3月  31 16:36 ceph.bootstrap-rgw.keyring
    -rw------- 1 cephadm cephadm    151 3月  31 16:36 ceph.client.admin.keyring
    -rw-rw-r-- 1 cephadm cephadm    256 3月  31 15:36 ceph.conf
    -rw-rw-r-- 1 cephadm cephadm 124475 3月  31 16:36 ceph-deploy-ceph.log
    -rw------- 1 cephadm cephadm     73 3月  31 15:36 ceph.mon.keyring
    

    2.4 将配置文件和admin秘钥拷贝到其他节点

    如果不做交互式可以只复制config
    [cephadm@cephadm ceph-cluster]$ ceph-deploy admin mon01 mon02 mon03 stor04
    [root@mon01 ~]# ll /etc/ceph/ #cephadm无法访问
    总用量 12
    -rw------- 1 root root 151 3月  31 16:39 ceph.client.admin.keyring
    -rw-r--r-- 1 root root 256 3月  31 16:39 ceph.conf 
    -rw-r--r-- 1 root root  92 4月  24 2020 rbdmap
    -rw------- 1 root root   0 3月  31 16:36 tmpjtTfFI
    
    setfacl -m u:cephadm:rw /etc/ceph/ceph.client.admin.keyring  授权访问
    

    2.5 配置manager节点(仅luminious以上版本)

    [cephadm@cephadm ceph-cluster]$ ceph-deploy mgr create  mon03
    

    2.6 查看ceph状态

    [cephadm@cephadm ceph-cluster]$ sudo yum install ceph-common -y
    [cephadm@cephadm ceph-cluster]$ ceph-deploy admin cephadm
    [cephadm@cephadm ceph-cluster]$ sudo setfacl -m u:cephadm:rw /etc/ceph/ceph.client.admin.keyring
    [cephadm@cephadm ceph-cluster]$ ceph -s
      cluster:
        id:     f59d07d8-b127-46f7-b990-f34083581cfc
        health: HEALTH_WARN
                OSD count 0 < osd_pool_default_size 3
     
      services:
        mon: 1 daemons, quorum mon01
        mgr: mon03(active)
        osd: 0 osds: 0 up, 0 in
     
      data:
        pools:   0 pools, 0 pgs
        objects: 0  objects, 0 B
        usage:   0 B used, 0 B / 0 B avail
        pgs:     
    

    2.7 向RADOS集群添加OSD

    擦写磁盘文件数据
    ceph-deploy disk zap mon01 /dev/vda
    ceph-deploy disk zap mon02 /dev/vda
    ceph-deploy disk zap mon03 /dev/vda
    ceph-deploy disk zap stor04 /dev/vda
    添加磁盘
    [cephadm@cephadm ceph-cluster]$ ceph-deploy osd --help
    usage: ceph-deploy osd [-h] {list,create} ...
        ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
        ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
        ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device  ##block-db osd源数据 block-wal osd的日志文件
    ceph-deploy osd create mon01 --data /dev/vda
    ceph-deploy osd create mon02 --data /dev/vda
    ceph-deploy osd create mon03 --data /dev/vda
    ceph-deploy osd create stor04 --data /dev/vda
    
    [cephadm@cephadm ceph-cluster]$ ceph -s
      cluster:
        id:     f59d07d8-b127-46f7-b990-f34083581cfc
        health: HEALTH_OK
      services:
        mon: 1 daemons, quorum mon01
        mgr: mon03(active)
        osd: 4 osds: 4 up, 4 in
     
      data:
        pools:   0 pools, 0 pgs
        objects: 0  objects, 0 B
        usage:   4.0 GiB used, 156 GiB / 160 GiB avail
        pgs:     
    查看命令
    ceph osd stat
    ceph osd dump
    ceph osd ls
    

    2.8 向ceph添加mon节点冗余 一般奇数

    [cephadm@cephadm ceph-cluster]$ ceph-deploy mon add mon02
    [cephadm@cephadm ceph-cluster]$ ceph-deploy mon add mon03
    [cephadm@cephadm ceph-cluster]$ ceph quorum_status --format json-pretty 以友好显示mon节点
    

    2.9 向ceph添加mgr节点冗余

    [cephadm@cephadm ceph-cluster]$ ceph-deploy mgr create stor04
    [cephadm@cephadm ceph-cluster]$ ceph -s
      cluster:
        id:     f59d07d8-b127-46f7-b990-f34083581cfc
        health: HEALTH_OK
     
      services:
        mon: 3 daemons, quorum mon01,mon02,mon03
        mgr: mon03(active), standbys: stor04
        osd: 4 osds: 4 up, 4 in
     
      data:
        pools:   0 pools, 0 pgs
        objects: 0  objects, 0 B
        usage:   4.0 GiB used, 156 GiB / 160 GiB avail
        pgs:     
    

    2.10 移除RADOS集群osd的方法

    # 使OSD进入out状态
    [root@cephL ceph-deploy]# ceph osd out 0
    marked out osd.0.
    # 观察数据迁移
    [root@cephL ceph-deploy]# ceph -w
    # 停止对应的OSD进程
    [root@cephL ceph-deploy]# sudo systemctl stop ceph-osd@0
    # 清除数据
    [root@cephL ceph-deploy]# ceph osd purge 0 --yes-i-really-mean-it
    purged osd.0
    # 在ceph.conf中移除osd配置
    [root@cephL ceph-deploy]# vi /etc/ceph/ceph.conf 
    

    2.11 配置dashboard

    https://docs.ceph.com/en/latest/mgr/dashboard/

    ceph mgr module enable dashboard  #启用dashboard
    ceph config set mgr mgr/dashboard/ssl false #禁用加密
    #重启服务
    eph mgr module disable dashboard
    ceph mgr module enable dashboard
    #配置IP和端口
    ceph config set mgr mgr/dashboard/mon03/server_addr 10.0.0.54
    ceph config set mgr mgr/dashboard/mon03/server_port 8080
    ceph config set mgr mgr/dashboard/mon03/ssl_server_port 8443
    #配置用户
    ceph dashboard set-login-credentials test test
    [root@mon03 ~]# ceph mgr services
    {
        "dashboard": "http://mon03.local.cn:8080/"
    }
    
    图片.png

    3. 客户端使用

    3.1 挂载rbd块

    创建pool 通常在创建pool之前,需要覆盖默认的pg_num,官方推荐:
    若少于5个OSD, 设置pg_num为128。
    5~10个OSD,设置pg_num为512。
    10~50个OSD,设置pg_num为4096。
    超过50个OSD,可以参考pgcalc计算
    ceph osd pool create mypool 128 128 #创建存储池  默认三副本
    rbd pool init mypool #初始化存储池
    rbd create mypool/disk01 --size 10G  #创建存储盘
    sudo rbd map disk01  #内核版本低无法挂载需要升级内核或者
    rbd feature disable mypool/disk01 exclusive-lock object-map fast-diff deep-flatten
    sudo rbd map disk01  #直接映射本地 
    或者
    rbd create mypool/disk02 --size 10G --image-feature layering #指定参数 10G存储盘
    rbd ls -l #查看
    sudo rbd map disk02  #直接映射本地 
    
    sudo mkfs.xfs /dev/rbd0 #格式化
    sudo mount /dev/rbd0 /mnt #挂载
    [cephadm@cephadm ceph-cluster]$ df -h
    /dev/rbd0                 10G   33M   10G    1% /mnt
    

    centos7 升级内核

    http://elrepo.org/tiki/HomePage

    导入key 安装yum源
    rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
    yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
    查看可以安装内核版本
    yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
    安装指定内核版本
    yum --enablerepo=elrepo-kernel install kernel-ml kernel-ml-devel
    查看当前内核
    awk -F\' ' $1=="menuentry " {print $2}' /etc/grub2.cfg
    CentOS Linux (5.11.11-1.el7.elrepo.x86_64) 7 (Core)
    CentOS Linux (3.10.0-1127.el7.x86_64) 7 (Core)
    CentOS Linux (0-rescue-d7999659ea104b01a700c01370e679df) 7 (Core)
    设置内核从5.11.11
    grub2-set-default 0
    reboot
    uname -r
    卸载低版本内核
    yum remove kernel
    
    sudo rbd create rbd/disk03 --size 2G
    sudo rbd map disk03
    sudo mkfs.xfs /dev/rbd0
    sudo mount /dev/rbd0 /mnt
    

    rbd块扩容 不建议rbd块中分区

    rbd resize rbd/disk03 --size 5G  扩容5G
    rbd -p rbd info --image disk03 查看磁盘信息
    xfs_growfs /dev/rbd0 xfs扩容磁盘
    resize2fs /dev/rbd0 ext系列扩展磁盘
    

    3.2 挂载cephfs文件系统

    https://docs.ceph.com/en/latest/man/8/mount.ceph/

    创建名为cephfs_data的pool
    ceph osd pool create cephfs_data 128
    创建名为cephfs_metadata的pool
    ceph osd pool create cephfs_metadata 128
    启用
    ceph fs new cephfs cephfs_metadata cephfs_data 
    ceph fs ls
    ceph mds stat 
    [root@stor04 mnt]# cat /etc/ceph/ceph.client.admin.keyring 
    [client.admin]
        key = AQB4NGRg1aThJxAAE1h/AKBKfnjIL6eOsUh8QA==
        caps mds = "allow *"
        caps mgr = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"
    客户端安装:
    yum -y install ceph-fuse
    sudo mount -t ceph 10.0.0.52:6789:/ /mnt -o name=admin,secret=AQB4NGRg1aThJxAAE1h/AKBKfnjIL6eOsUh8QA==
    或者
    sudo ceph-authtool -p /etc/ceph/ceph.client.admin.keyring > admin.key 
    chmod 600 admin.key
    mount -t ceph mon01:6789:/ /mnt -o name=admin,secretfile=admin.key 
    [root@stor04 mnt]# df -h
    10.0.0.52:6789:/          100G     0   100G    0% /mnt
    

    4. 删除ceph pool

    [cephadm@cephadm ceph-cluster]$ vim ceph.conf  ##添加可以删除pool参数
    [global]
    mon_allow_pool_delete = true
    [cephadm@cephadm ceph-cluster]$ ceph-deploy --overwrite-conf admin cephadm mon01 mon02 mon03 stor04 #下发修改后ceph文件
    重启4个osd节点的mon mgr 服务
    systemctl list-units --type=service|grep ceph 查看服务
    systemctl restart ceph-mgr@mon01.service
    systemctl restart ceph-mon@mon01.service
    systemctl restart ceph-mgr@mon02.service
    systemctl restart ceph-mon@mon02.service
    systemctl restart ceph-mgr@mon03.service
    systemctl restart ceph-mon@mon03.service
    systemctl restart ceph-mgr@stor04.service
    systemctl restart ceph-mon@stor04.service
    删除pool pool名字写两遍 挂载的无法删除 显示busy
    ceph osd pool rm mypool mypool --yes-i-really-really-mean-it 
    

    相关文章

      网友评论

        本文标题:ceph-deploy部署ceph(mimic)

        本文链接:https://www.haomeiwen.com/subject/aupvhltx.html