美文网首页
Ceph Luminous:集群安装手册

Ceph Luminous:集群安装手册

作者: 圣地亚哥_SVIP | 来源:发表于2019-07-16 15:44 被阅读0次

    Ceph 安装手册

    物理环境准备

    采用三台虚拟机,网络利用主机网络实现:

    创建网络:

    此网络用于利用主机网络对外通信,public网络:

    pub_osd.xml:
    <network connections='10'>
      <name>lm_pub</name>
      <forward dev='br-mgmt' mode='nat'>
        <nat>
          <port start='1024' end='65535'/>
        </nat>
        <interface dev='br-mgmt'/>
      </forward>
      <bridge name='virbr1' stp='on' delay='0'/>
      <ip address='192.168.30.1' netmask='255.255.255.0'>
        <dhcp>
          <range start='192.168.30.100' end='192.168.30.254'/>
        </dhcp>
      </ip>
    </network>
    
    #virsh net-define pub_osd.xml
    #virsh net-start lm_pub
    #virsh net-autostart lm_pub
    

    OSD网:

    <network connections='10'>
      <name>osd</name>
      <forward mode='nat'>
        <nat>
          <port start='1024' end='65535'/>
        </nat>
      </forward>
      <bridge name='virbr2' stp='on' delay='0'/>
      <ip address='192.168.130.1' netmask='255.255.255.0'>
        <dhcp>
          <range start='192.168.130.2' end='192.168.130.254'/>
        </dhcp>
      </ip>
    </network>
    
    #virsh net-define osd.xml
    #virsh net-start osd
    #virsh net-autostart osd
    

    注:
    如果虚机创建后,网卡没有IP,可以ifdown {网卡},ifup {网卡}。

    创建存储盘:
    系统盘,日志盘,数据盘:

    #qemu-img create -f qcow2 lm1_sys.qcow2 50G
    #qemu-img create -f qcow2 lm2_sys.qcow2 50G
    #qemu-img create -f qcow2 lm3_sys.qcow2 50G
    #qemu-img create -f qcow2 lm1_journal.qcow2 100G
    #qemu-img create -f qcow2 lm2_journal.qcow2 100G
    #qemu-img create -f qcow2 lm3_journal.qcow2 100G
    #qemu-img create -f qcow2 lm1_osd1.qcow2 100G
    #qemu-img create -f qcow2 lm1_osd2.qcow2 100G
    #qemu-img create -f qcow2 lm2_osd1.qcow2 100G
    #qemu-img create -f qcow2 lm2_osd2.qcow2 100G
    #qemu-img create -f qcow2 lm3_osd1.qcow2 100G
    #qemu-img create -f qcow2 lm3_osd2.qcow2 100G
    

    安装虚机

    安装三台虚机:

    #nic1、nic2: 分别指定网络lm_pub, osd
    #挂载对应的块设备
    

    配置源:

    #此处我们是内部源。
    
    #yum clean all
    #yum makecache
    

    配置ceph deploy节点,以下均在ceph-deploy节点运行
    此处luminous1作为deploy节点。

    配置/etc/hosts:

    192.168.30.110 luminous1 
    192.168.30.103 luminous2 
    192.168.30.202 luminous3 
    

    配置luminous1 到 luminous{1~3}的免密登录:

    #ssh-keygen 
    #ssh-copy-id luminous1
    #ssh-copy-id luminous2
    #ssh-copy-id luminous3
    

    安装ansible批量操作(也可以单个节点执行命令):

    #yum install ansible -y
    编辑/etc/ansible/hosts:
    [ceph]
    luminous1
    luminous2
    luminous3
    

    测试一下:

    # ansible ceph -m ping
    luminous3 | SUCCESS => {
        "changed": false, 
        "ping": "pong"
    }
    luminous1 | SUCCESS => {
        "changed": false, 
        "ping": "pong"
    }
    luminous2 | SUCCESS => {
        "changed": false, 
        "ping": "pong"
    }
    

    NTP同步:

    由于没有NTP源,lm2及lm3均与lm1同步,lm1与自身同步
    
    #ansible ceph -m command -a "yum install ntp iptables-services -y"
    

    lm1 ntp server 配置如下:
    lm1, ntp.conf:

        driftfile /var/lib/ntp/drift
        restrict default nomodify notrap nopeer noquery
        restrict 127.0.0.1 
        restrict ::1
        server 127.127.1.0     # local clock
            
        fudge  127.127.1.0 stratum 10
        includefile /etc/ntp/crypto/pw
        keys /etc/ntp/keys
        disable monitor
    

    防火墙放开

    #iptables -I INPUT -p udp -m udp --dport 123 -j ACCEPT
    

    拷贝ntp.conf至其余节点,server设置为luminous1:

    #ansible ceph -m copy -a "src=/root/ntp.conf dest=/etc/ntp.conf mode=0644" -l luminous2,luminous3
    lm2,lm3,ntp.conf:
    
        driftfile /var/lib/ntp/drift
        restrict default nomodify notrap nopeer noquery
        restrict 127.0.0.1 
        restrict ::1
        server 192.168.30.110 
            
        includefile /etc/ntp/crypto/pw
        keys /etc/ntp/keys
        disable monitor
    
    重启NTP服务:
    #ansible ceph -m command -a "systemctl start ntpd"
    #ansible ceph -m command -a "systemctl enable ntpd"
    

    关闭seliux:

    #ansible ceph -m command -a "setenforce 0"
    #ansible ceph -m command -a "sed -i  's/SELINUX=enforcing/SELINUX=permissive/g' /etc/selinux/config"
    

    防火墙配置,mon及osd对应的接口,mon之间利用6789,OSD:6800-7300:

    #ansible ceph -m command -a "iptables -I INPUT -p tcp --dport 6789 -j ACCEPT"
    #ansible ceph -m command -a "iptables -I INPUT -m multiport -p tcp --dports 6800:7300 -j ACCEPT"
    #ansible ceph -m command -a "service iptables save"
    

    推送Ceph集群

    安装ceph-deploy:

    #yum install ceph-deploy -y
    

    初始化monintor节点

    #mkdir ceph
    #cd ceph
    指定public网以及cluster网
    #ceph-deploy new luminous1 luminous2 luminous3 --cluster-network 192.168.130.142/24 --public-network 192.168.30.110/24
    

    生成以下文件:

    ceph.conf  ceph-deploy-ceph.log  ceph.mon.keyring
    

    所有节点上安装ceph包:

    #ansible ceph -m command -a "yum install ceph ceph-radosgw -y"
    

    设置monitor:

    #ceph-deploy mon create-initial
    会生成需要的各种keyring文件:
    ceph.bootstrap-mds.keyring
    ceph.bootstrap-mgr.keyring
    ceph.bootstrap-osd.keyring
    ceph.bootstrap-rgw.keyring
    ceph.client.admin.keyring
    
    admin keyring文件推送至各个节点
    #ceph-deploy admin luminous1 luminous2 luminous3
    
    此时集群已处于健康状态
    #ceph -s
        cluster:
        id:     e87cd2a8-3a98-4c60-b2f2-cb4f88c845a0
        health: HEALTH_OK
        
        services:
        mon: 3 daemons, quorum luminous2,luminous1,luminous3
        mgr: no daemons active
        osd: 0 osds: 0 up, 0 in
        
        data:
        pools:   0 pools, 0 pgs
        objects: 0 objects, 0B
        usage:   0B used, 0B / 0B avail
        pgs:  
    

    部署mgr节点及添加OSD,mgr节点与montior部署同一节点:

    #ceph-deploy mgr create luminous1 luminous2 luminous3
    

    添加OSD:

    新版ceph中,利用ceph-volume管理OSD,官方推荐使用lvm管理磁盘。
    我们每节点有3个外置盘,一个盘作为block.db及block.wal,另两个盘作为OSD独立使用。
    
    OSD盘设置:
    #ansible ceph -m command -a "pvcreate /dev/vdc"
    #ansible ceph -m command -a "pvcreate /dev/vdd"
    #ansible ceph -m command -a "vgcreate datavg1 /dev/vdc"
    #ansible ceph -m command -a "vgcreate datavg2 /dev/vdd"
    #ansible ceph -m command -a "lvcreate -n datalv1 -l 100%Free datavg1"
    #ansible ceph -m command -a "lvcreate -n datalv2 -l 100%Free datavg2"
    
    SSD盘设置(block.db, block.wal):
    #ansible ceph -m command -a "parted /dev/vdb mklabel gpt"
    #ansible ceph -m command -a "parted /dev/vdb mkpart primary 2048s 25%"
    #ansible ceph -m command -a "parted /dev/vdb mkpart primary 25% 50%"
    #ansible ceph -m command -a "parted /dev/vdb mkpart primary 50% 75%"
    #ansible ceph -m command -a "parted /dev/vdb mkpart primary 75% 100%"
    
    #ansible ceph -m command -a "pvcreate /dev/vdb1"
    #ansible ceph -m command -a "pvcreate /dev/vdb2"
    #ansible ceph -m command -a "pvcreate /dev/vdb3"
    #ansible ceph -m command -a "pvcreate /dev/vdb4"
    
    #ansible ceph -m command -a "vgcreate block_db_vg1 /dev/vdb1"
    #ansible ceph -m command -a "vgcreate block_db_vg2 /dev/vdb2"
    #ansible ceph -m command -a "vgcreate block_wal_vg1 /dev/vdb3"
    #ansible ceph -m command -a "vgcreate block_wal_vg2 /dev/vdb4"
    
    #ansible ceph -m command -a "lvcreate -n dblv1 -l 100%Free block_db_vg1"
    #ansible ceph -m command -a "lvcreate -n dblv2 -l 100%Free block_db_vg2"
    #ansible ceph -m command -a "lvcreate -n wallv1 -l 100%Free block_wal_vg1"
    #ansible ceph -m command -a "lvcreate -n wallv2 -l 100%Free block_wal_vg2"
    

    激活OSD:

    #ceph-deploy --overwrite-conf osd create --bluestore --data datavg1/datalv1 --block-db block_db_vg1/dblv1 --block-wal block_wal_vg1/wallv1 luminous1
    #ceph-deploy --overwrite-conf osd create --bluestore --data datavg2/datalv2 --block-db block_db_vg2/dblv2 --block-wal block_wal_vg2/wallv2 luminous1
    #ceph-deploy --overwrite-conf osd create --bluestore --data datavg1/datalv1 --block-db block_db_vg1/dblv1 --block-wal block_wal_vg1/wallv1 luminous2
    #ceph-deploy --overwrite-conf osd create --bluestore --data datavg2/datalv2 --block-db block_db_vg2/dblv2 --block-wal block_wal_vg2/wallv2 luminous2
    #ceph-deploy --overwrite-conf osd create --bluestore --data datavg1/datalv1 --block-db block_db_vg1/dblv1 --block-wal block_wal_vg1/wallv1 luminous3
    #ceph-deploy --overwrite-conf osd create --bluestore --data datavg2/datalv2 --block-db block_db_vg2/dblv2 --block-wal block_wal_vg2/wallv2 luminous3
    

    完成ceph集群的部署:

    #ceph -s
      cluster:
        id:     e87cd2a8-3a98-4c60-b2f2-cb4f88c845a0
        health: HEALTH_OK
     
      services:
        mon: 3 daemons, quorum luminous2,luminous1,luminous3
        mgr: luminous1(active), standbys: luminous2, luminous3
        osd: 6 osds: 6 up, 6 in
     
      data:
        pools:   0 pools, 0 pgs
        objects: 0 objects, 0B
        usage:   6.02GiB used, 594GiB / 600GiB avail
        pgs:     
    
       #ceph mgr module enable dashboard  #加载mgr的监控界面,默认监听7000端口
    

    在主机中配置dnat,即可以通过主机的对外IP访问此portal,具体配置可见另一篇blog

    下一章介绍
    RGW多网关高可用部署
    基于Haproxy及Keeplived的部署结构。

    升级至N版

    #ansible ceph -m command -a "yum update ceph ceph-radosgw -y"
    #ansible ceph -m command -a "iptables -I INPUT -p tcp --dport 3300 -j ACCEPT"
    #ansible ceph -m command -a "service iptables save"
    #ceph mon enable-msgr2
    

    mgr配置:

    生成一个https证书,也可以指定证书。只有生成证书后,restful及dashboard才会启动
    #ceph restful create-self-signed-cert
    #ceph dashboard create-self-signed-cert
    创建一个admin用户
    #ceph dashboard ac-user-create admin admin administrator
    #ansible ceph -m command -a "iptables -I INPUT -p tcp --dport 8003 -j ACCEPT"
    

    更改监听端口:

    Restful, luminous1表示对应的哪个实例
    #ceph config set mgr mgr/restful/luminous1/server_addr 0.0.0.0
    #ceph config set mgr mgr/restful/luminous1/server_port {port}
    
    Dashboard
    #ceph config set mgr mgr/dashboard/luminous1/server_addr 0.0.0.0
    #ceph config set mgr mgr/dashboard/luminous1/server_port {port} 
    
    #ceph mgr fail {mgr-instance}  //切换主备
    
    测试接口
    #curl  -k https://192.168.30.110:7000
    #curl  -k https://192.168.30.110:8003
    

    重启所有mon/osd/mgr/rgw

    相关文章

      网友评论

          本文标题:Ceph Luminous:集群安装手册

          本文链接:https://www.haomeiwen.com/subject/mzdtlctx.html