配置文件中添加osd模块
[osd]
osd journal size = 1024
[osd.0]
host = zw-vm-138
[osd.1]
host = zw-vm-139
[osd.2]
host = zw-vm-140
生成uuid作为osd ID
#uuidgen
e33dcfb0-31d5-4953-896d-007c7c295410
创建osd实例
ceph osd create e33dcfb0-31d5-4953-896d-007c7c295410
创建osd目录
mkdir -p /var/lib/ceph/osd/ceph-0
创建osd文件系统
ceph-osd -i 0 --mkfs --mkkey --osd-uuid e33dcfb0-31d5-4953-896d-007c7c295410
添加osd keyring 到集群认证数据库
ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-0/keyring
添加机器到CURSH map中
ceph osd crush add-bucket zw-vm-138 host
ceph osd crush move zw-vm-138 root=default
添加osd到map
ceph osd crush add osd.0 1.0 host=zw-vm-138
在每个节点重复以上步骤(注意修改对应id和主机名)
需要修改
-i {id}
osd.{id}
cphe-{id}
host={hostname}
完成后状态
# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 3.00000 root default
-2 1.00000 host zw-vm-138
0 1.00000 osd.0 up 1.00000 1.00000
-3 1.00000 host zw-vm-139
1 1.00000 osd.1 up 1.00000 1.00000
-4 1.00000 host zw-vm-140
2 1.00000 osd.2 up 1.00000 1.00000
# ceph -s
cluster caefadfb-ad33-453f-be23-778fdbd0a892
health HEALTH_OK
monmap e1: 3 mons at {0=10.3.0.138:6789/0,1=10.3.0.139:6789/0,2=10.3.0.140:6789/0}
election epoch 20, quorum 0,1,2 0,1,2
osdmap e36: 3 osds: 3 up, 3 in
flags sortbitwise
pgmap v431: 164 pgs, 2 pools, 16 bytes data, 3 objects
28914 MB used, 240 GB / 282 GB avail
164 active+clean
启动服务
ln -s /etc/init.d/ceph /etc/init.d/ceph-osd.0
rc-update add ceph-osd.0 default
/etc/init.d/ceph-osd.0 start
网友评论