美文网首页
ceph jewel multisite

ceph jewel multisite

作者: akka9 | 来源:发表于2017-09-11 03:52 被阅读0次
realm=cn
zone_group=cn-south                 
zone_group_endpoint=http://s3files.domain.com
master_zone=cn-south-1            
master_zone_endpoint=http://s3cnsz1.domain.com
secondary_zone=cn-south-2        
secondary_zone_endpoint=http://s3cnsz2.domain.com

sync_access_key=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
sync_secret_key=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1)
echo sync_access_key=$sync_access_key
echo sync_secret_key=$sync_secret_key

# delete all pools, run as root
#for poolname in $(rados lspools); do 
#ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it;done

rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
rados rmpool default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
rados rmpool default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it
rados rmpool .rgw.root .rgw.root  --yes-i-really-really-mean-it

# delete defalut zone group, zone
radosgw-admin zonegroup remove --rgw-zonegroup=default --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zone delete --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zonegroup delete --rgw-zonegroup=default
radosgw-admin period update --commit


# create realm
radosgw-admin realm create --rgw-realm=${realm} --default

# create zone group
radosgw-admin zonegroup create --rgw-zonegroup=${zone_group} \
    --master --default --endpoints=${zone_group_endpoint}

# create master zone
radosgw-admin zone create --rgw-zonegroup=${zone_group} --rgw-zone=${master_zone} \
    --master --default --endpoints=${master_zone_endpoint}

# create system sync user
radosgw-admin user create --uid="syssync" --display-name="Sync User" --system \
--access-key=${sync_access_key} --secret=${sync_secret_key}

# add the user to system
radosgw-admin zone modify --rgw-zone=${master_zone} --access-key=${sync_access_key}  --secret=${sync_secret_key}
radosgw-admin period update --commit

# update ceph.conf
[client.rgw.rgw1]
host = rgw1
rgw frontends = "civetweb port=7480 num_threads=150"
rgw_zone=cn-south-1

[client.rgw.rgw2]
host = rgw2
rgw frontends = "civetweb port=7480 num_threads=150"
rgw_zone=cn-south-1

.....

ceph-deploy --overwrite-conf admin $node1 $node2 $node3 $node4 $node5

# setup loadbalance
# setup nginx, config three endpoint vhosts.

# secondary zone

# delete all pools, run as root
systemctl restart ceph-radosgw@rgw.`hostname -s`
systemctl status ceph-radosgw@rgw.`hostname -s`
rados lspools
for poolname in $(rados lspools);do 
ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it;done


# delete default 
radosgw-admin zone delete --rgw-zone=default
rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
rados rmpool default.rgw.users.uid default.rgw.users.uid --yes-i-really-really-mean-it
rados rmpool default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it
rados rmpool .rgw.root .rgw.root  --yes-i-really-really-mean-it
rados rmpool ${secondary_zone}.rgw.control ${secondary_zone}.rgw.control --yes-i-really-really-mean-it
rados rmpool ${secondary_zone}.rgw.data.root ${secondary_zone}.rgw.data.root --yes-i-really-really-mean-it
rados rmpool ${secondary_zone}.rgw.gc ${secondary_zone}.rgw.gc --yes-i-really-really-mean-it
rados rmpool ${secondary_zone}.rgw.log ${secondary_zone}.rgw.log --yes-i-really-really-mean-it
radosgw-admin zone delete --rgw-zone=${secondary_zone}

# create secondary 
radosgw-admin realm pull --url=${master_zone_endpoint} --access-key=${sync_access_key}  --secret=${sync_secret_key}
radosgw-admin period pull --url=${master_zone_endpoint} --access-key=${sync_access_key}  --secret=${sync_secret_key}

radosgw-admin realm default --rgw-realm=${realm}
radosgw-admin zonegroup default --rgw-zonegroup=${zone_group}

# create secondary zone
radosgw-admin zone create --rgw-zonegroup=${zone_group} \
    --rgw-zone=${secondary_zone} \
    --access-key=${sync_access_key}  --secret=${sync_secret_key} \
    --endpoints=${secondary_zone_endpoint}

radosgw-admin period update --commit --url=${master_zone_endpoint} --access-key=${sync_access_key}  --secret=${sync_secret_key}

# if error, restart rgw , delete all secondary pools , do it again.

# update secondary zone ceph.conf
[client.rgw.rgw1]
host = rgw1
rgw frontends = "civetweb port=7480 num_threads=150"
rgw_zone=cn-south-2

[client.rgw.rgw2]
host = rgw2
rgw frontends = "civetweb port=7480 num_threads=150"
rgw_zone=cn-south-2

ceph-deploy --overwrite-conf admin $node1 $node2 $node3

# every secondary rgw node
systemctl restart ceph-radosgw@rgw.`hostname -s`
systemctl status ceph-radosgw@rgw.`hostname -s`

#master
radosgw-admin period update --commit

#check sync status
radosgw-admin sync status

check info

radosgw-admin realm list
radosgw-admin realm list-periods
radosgw-admin zonegroup list
radosgw-admin zonegroup-map get
radosgw-admin zone list


=======
http://ceph.com/wp-content/uploads/2017/01/Understanding-a-Multi-Site-Ceph-Gateway-Installation-170119.pdf
http://www.jianshu.com/p/31a6f8df9a8f
http://blog.csdn.net/for_tech/article/details/68927956

相关文章

  • ceph jewel multisite

    check info =======http://ceph.com/wp-content/uploads/2017...

  • Ceph Multisite

    说明:多数据中心(multisite)功能oNest v6.1新增的功能,旨在实现异地双活,提供了备份容灾的能力。...

  • ceph jewel xfs w/o bluestore roc

    系统: Ubuntu 16.04ceph: jewel 10.2.7部署方式: ceph-deploy 部署...

  • Ceph RGW multisite代码实现

    multisite代码中大量使用了Boost的协程,在了解协程的使用方法后,整体代码结构还是比较清晰的。 协程实现...

  • BlueStore 架构及原理分析

    Ceph 底层存储引擎经过了数次变迁,目前最常用的是 BlueStore,在 Jewel 版本中引入,用来取代 F...

  • 趣解 ceph rgw multisite data sync

    multisite是ceph rgw对象数据异地容灾备份的一个有效方案,笔者希望深入理解该技术,并应用于生产环...

  • Ceph Jewel版本 多节点安装

    安装准备 1.0 准备 三台虚拟机分别改hosts和hostname文件为ceph-node1、ceph-node...

  • Ceph Jewel 版本osd添加和删除

    增加/删除 OSD 如果您的集群已经在运行,你可以在运行时添加或删除 OSD 。 增加 OSD 你迟早要扩容集群,...

  • Cephfs多用户隔离

    背景 ceph 最新版本(jewel)中添加了对单机群多文件系统的支持, 官方表示虽然一直的 bug 均已得到修复...

  • Ceph RGW multisite异地容灾备份方案

    1.前言 容灾 (Disaster Recovery),即容灾备份或灾备,是业务连续性系统的一个子集,用于保障 I...

网友评论

      本文标题:ceph jewel multisite

      本文链接:https://www.haomeiwen.com/subject/tabojxtx.html