echo deb https://mirrors.aliyun.com/ceph/debian-nautilus/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
apt-get update
apt-get install ceph-deploy
mkdir -p ceph
cd ceph
admin=host0
node1=host1
node2=host2
node3=host3
node4=
node5=
# 安装软件
ceph-deploy install --no-adjust-repos $admin
ceph-deploy install --no-adjust-repos $node1 $node2 $node3
# create new cluster
ceph-deploy new $node1 $node2 $node3 $node4 $node5
# init mon
ceph-deploy --overwrite-conf mon create-initial
# deploy conf
ceph-deploy --overwrite-conf admin $admin
ceph-deploy --overwrite-conf admin $node1 $node2 $node3 $node4 $node5
# check mon
sudo ceph quorum_status --format json-pretty
sudo /usr/bin/ceph --cluster=ceph mon stat --format=json
# create mgr
ceph-deploy --overwrite-conf mgr create $node1 $node2 $node3
# create osd
ceph-deploy --overwrite-conf osd create --zap-disk \
$node1:/dev/xvdb $node1:/dev/xvdc \
$node2:/dev/xvdb $node2:/dev/xvdc \
$node3:/dev/xvdb $node3:/dev/xvdc \
$node4:/dev/xvdb $node4:/dev/xvdc \
$node5:/dev/xvdb $node5:/dev/xvdc
ceph-deploy osd create --data /dev/xvdb $node1
ceph-deploy osd create --data /dev/xvdb $node2
ceph-deploy osd create --data /dev/xvdb $node3
# check osd
sudo /usr/bin/ceph --cluster=ceph osd stat --format=json
sudo ceph osd tree
# 部署 rgw ,以支持 s3 协议
ceph-deploy rgw create $node1 $node2 $node3 $node4 $node5
# cephfs mds
# mds 名称不能以数字开头,为避免创建失败,每个服务前面加上"mds-" 前缀
ceph-deploy mds create $node1:mds-$node1 $node2:mds-$node2 $node3:mds-$node3
#ceph-deploy mds create $node1 $node2 $node3 $node4 $node5
# set pg_num for pools
sudo ceph osd pool create rbdpool 8
sudo ceph osd pool set rbdpool pg_num 32
sudo ceph osd pool set rbdpool pgp_num 32
sudo ceph osd pool create cephfs_data 32
sudo ceph osd pool create cephfs_metadata 32
sudo ceph osd lspools
# cephfs
sudo ceph fs new cephfs cephfs_metadata cephfs_data
sudo ceph fs ls
sudo ceph osd pool set rbd pg_num 256
sudo ceph osd pool set rbd pgp_num 256
sudo ceph osd pool set .rgw.root pg_num 256
sudo ceph osd pool set .rgw.root pgp_num 256
sudo ceph osd pool set default.rgw.control pg_num 256
sudo ceph osd pool set default.rgw.control pgp_num 256
sudo ceph osd pool set default.rgw.data.root pg_num 256
sudo ceph osd pool set default.rgw.data.root pgp_num 256
sudo ceph osd pool set default.rgw.gc pg_num 256
sudo ceph osd pool set default.rgw.gc pgp_num 256
sudo ceph osd pool set default.rgw.log pg_num 256
sudo ceph osd pool set default.rgw.log pgp_num 256
# check pg_num
sudo ceph osd dump
网友评论