美文网首页
ceph jewel multisite

ceph jewel multisite

作者: akka9 | 来源:发表于2017-09-11 03:52 被阅读0次
    realm=cn
    zone_group=cn-south                 
    zone_group_endpoint=http://s3files.domain.com
    master_zone=cn-south-1            
    master_zone_endpoint=http://s3cnsz1.domain.com
    secondary_zone=cn-south-2        
    secondary_zone_endpoint=http://s3cnsz2.domain.com
    
    sync_access_key=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
    sync_secret_key=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1)
    echo sync_access_key=$sync_access_key
    echo sync_secret_key=$sync_secret_key
    
    # delete all pools, run as root
    #for poolname in $(rados lspools); do 
    #ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it;done
    
    rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
    rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
    rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
    rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
    rados rmpool default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
    rados rmpool default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it
    rados rmpool .rgw.root .rgw.root  --yes-i-really-really-mean-it
    
    # delete defalut zone group, zone
    radosgw-admin zonegroup remove --rgw-zonegroup=default --rgw-zone=default
    radosgw-admin period update --commit
    radosgw-admin zone delete --rgw-zone=default
    radosgw-admin period update --commit
    radosgw-admin zonegroup delete --rgw-zonegroup=default
    radosgw-admin period update --commit
    
    
    # create realm
    radosgw-admin realm create --rgw-realm=${realm} --default
    
    # create zone group
    radosgw-admin zonegroup create --rgw-zonegroup=${zone_group} \
        --master --default --endpoints=${zone_group_endpoint}
    
    # create master zone
    radosgw-admin zone create --rgw-zonegroup=${zone_group} --rgw-zone=${master_zone} \
        --master --default --endpoints=${master_zone_endpoint}
    
    # create system sync user
    radosgw-admin user create --uid="syssync" --display-name="Sync User" --system \
    --access-key=${sync_access_key} --secret=${sync_secret_key}
    
    # add the user to system
    radosgw-admin zone modify --rgw-zone=${master_zone} --access-key=${sync_access_key}  --secret=${sync_secret_key}
    radosgw-admin period update --commit
    
    # update ceph.conf
    [client.rgw.rgw1]
    host = rgw1
    rgw frontends = "civetweb port=7480 num_threads=150"
    rgw_zone=cn-south-1
    
    [client.rgw.rgw2]
    host = rgw2
    rgw frontends = "civetweb port=7480 num_threads=150"
    rgw_zone=cn-south-1
    
    .....
    
    ceph-deploy --overwrite-conf admin $node1 $node2 $node3 $node4 $node5
    
    # setup loadbalance
    # setup nginx, config three endpoint vhosts.
    
    # secondary zone
    
    # delete all pools, run as root
    systemctl restart ceph-radosgw@rgw.`hostname -s`
    systemctl status ceph-radosgw@rgw.`hostname -s`
    rados lspools
    for poolname in $(rados lspools);do 
    ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it;done
    
    
    # delete default 
    radosgw-admin zone delete --rgw-zone=default
    rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
    rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
    rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
    rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
    rados rmpool default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
    rados rmpool default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it
    rados rmpool .rgw.root .rgw.root  --yes-i-really-really-mean-it
    rados rmpool ${secondary_zone}.rgw.control ${secondary_zone}.rgw.control --yes-i-really-really-mean-it
    rados rmpool ${secondary_zone}.rgw.data.root ${secondary_zone}.rgw.data.root --yes-i-really-really-mean-it
    rados rmpool ${secondary_zone}.rgw.gc ${secondary_zone}.rgw.gc --yes-i-really-really-mean-it
    rados rmpool ${secondary_zone}.rgw.log ${secondary_zone}.rgw.log --yes-i-really-really-mean-it
    radosgw-admin zone delete --rgw-zone=${secondary_zone}
    
    # create secondary 
    radosgw-admin realm pull --url=${master_zone_endpoint} --access-key=${sync_access_key}  --secret=${sync_secret_key}
    radosgw-admin period pull --url=${master_zone_endpoint} --access-key=${sync_access_key}  --secret=${sync_secret_key}
    
    radosgw-admin realm default --rgw-realm=${realm}
    radosgw-admin zonegroup default --rgw-zonegroup=${zone_group}
    
    # create secondary zone
    radosgw-admin zone create --rgw-zonegroup=${zone_group} \
        --rgw-zone=${secondary_zone} \
        --access-key=${sync_access_key}  --secret=${sync_secret_key} \
        --endpoints=${secondary_zone_endpoint}
    
    radosgw-admin period update --commit --url=${master_zone_endpoint} --access-key=${sync_access_key}  --secret=${sync_secret_key}
    
    # if error, restart rgw , delete all secondary pools , do it again.
    
    # update secondary zone ceph.conf
    [client.rgw.rgw1]
    host = rgw1
    rgw frontends = "civetweb port=7480 num_threads=150"
    rgw_zone=cn-south-2
    
    [client.rgw.rgw2]
    host = rgw2
    rgw frontends = "civetweb port=7480 num_threads=150"
    rgw_zone=cn-south-2
    
    ceph-deploy --overwrite-conf admin $node1 $node2 $node3
    
    # every secondary rgw node
    systemctl restart ceph-radosgw@rgw.`hostname -s`
    systemctl status ceph-radosgw@rgw.`hostname -s`
    
    #master
    radosgw-admin period update --commit
    
    #check sync status
    radosgw-admin sync status
    
    

    check info

    radosgw-admin realm list
    radosgw-admin realm list-periods
    radosgw-admin zonegroup list
    radosgw-admin zonegroup-map get
    radosgw-admin zone list
    
    
    

    =======
    http://ceph.com/wp-content/uploads/2017/01/Understanding-a-Multi-Site-Ceph-Gateway-Installation-170119.pdf
    http://www.jianshu.com/p/31a6f8df9a8f
    http://blog.csdn.net/for_tech/article/details/68927956

    相关文章

      网友评论

          本文标题:ceph jewel multisite

          本文链接:https://www.haomeiwen.com/subject/tabojxtx.html