ceph 集群角色定义
Ceph osd 对象存储守护程序 (节点三个以上)
Ceph Monitors 监视器 (节点1,3,5,7.....) 简称Mon:维护集群状态映射列如 ceph 集群 包含多少存储池-->PG,此外还负责管理守护程序和客户端之间的身份验证。
Ceph managers 管理器 (节点数两个以上)负则跟踪运行时指标,ceph集群状态 存储利用率、当前性能指标、系统负载
RADOS cluster 由多台host存储服务器组成的ceph集群
OSD 每台服务器磁盘组成的存储空间(Object Storage Daemon 对象存储设备)
MDS (ceph 元数据 服务器 ceph-mds):
Pool 存储池 <---PG<----OSD
ceph 元数据保存方式 key-value ;RADOS中实现方式:xattrs 和omap
ceph 常用用存储引擎 bluestore filestore kvstore memstore 。
omap (object map 对象映射)
filestore&& leveldb :leveldb 持久化存储的KV系统,直接将大部分数据存储到磁盘上,前提xfs系统。
三者之间关系如下图

Ceph 存储文件过程
1、计算文件到对象存储设备的映射 ------>2、通过hash算法算出文件对应的Pool中的PG----
--->3、通过CRUSH 算法计算PG到OSD,PG-OSD映射: [CRUSH (pgid)-->(osd1,osd2,osd3...)]
---->4、PG中的主OSD 将对象写入硬盘----->5、主OSD将数据同步给备份OSD,并等待备份OSD返回确认。---->6、主OSD 将写入完成返回给客户端。
ceph存储文件过程
Ceph部署准备
各个节点都需准备!!
1、 apt 源 准备
echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >>/etc/apt/sources.list
wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc'|sudo apt-key add -
apt update
2、时间同步准备
apt install chrony -y
echo "server ntp.aliyun.com minpoll 4 maxpoll 10 iburst" >> /etc/chrony/chrony.conf
systemctl restart chrony
hwclock -w
reboot
3、python2准备
apt install python2.7 -y
ln -sv /usr/bin/python2.7 /usr/bin/python2
4、账号准备 :
groupadd -r -g 2023 cephok&& useradd -r -m -s /bin/bash -u 2023 -g 2023 cephok && echo cephok:123456|chpasswd
echo \"cephok ALL=(root) NOPASSWD:All\" |sudo tee /etc/sudoers.d/cephok
5、host准备:

cephok@ceph-ploy:~$ cat ~/.ssh/config
HOST ceph-deploy
HostName 172.32.6.100
User cephok
PORT 22
HOST ceph-mon1
HostName 172.32.6.101
User cephok
PORT 22
HOST ceph-mon2
HostName 172.32.6.102
User cephok
PORT 22
HOST ceph-mon3
HostName 172.32.6.103
User cephok
PORT 22
HOST ceph-mgr1
HostName 172.32.6.104
User cephok
PORT 22
HOST ceph-mgr2
HostName 172.32.6.105
User cephok
PORT 22
HOST ceph-node1
HostName 172.32.6.106
User cephok
PORT 22
HOST ceph-node2
HostName 172.32.6.107
User cephok
PORT 22
HOST ceph-node3
HostName 172.32.6.108
User cephok
PORT 22
HOST ceph-node4
HostName 172.32.6.109
User cephok
PORT 22
HOST ceph-client1
HostName 172.32.6.110
User cephok
PORT 22
HOST ceph-client2
HostName 172.32.6.111
User cephok
PORT 22
cephok@ceph-ploy:~$ for host in ceph-{mon1,mon2,mon3,mgr1,mgr2,node1,node2,node3,node4,client1,client2}
do
sudo scp /etc/hosts cephok@${host}:/etc
done
cephok@ceph-ploy:~$ ssh-keygen
cephok@ceph-ploy:~$for host in ceph-{mon1,mon2,mon3,mgr1,mgr2,node1,node2,node3,node4,client1,client2}
do
ssh-copy-id cephok@${host}
done
ceph集群部署步骤
cephok@ceph-ploy: sudo mv ceph-cluster
cephok@ceph-ploy:cd ceph-cluster
#部署工具安装
cephok@ceph-ploy:~/ceph-cluster$ sudo apt install ceph-deploy
#创建ceph集群
cephok@ceph-ploy:~/ceph-cluster$ ceph-deploy new --cluster-network 192.168.6.0/24 --public-network 172.31.6.0/24 ceph-mon1
#验证集群创建
cephok@ceph-ploy:~/ceph-cluster$ll
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
#各个节点安装ceph&&存储节点初始化
cephok@ceph-ploy:~/ceph-cluster$ ceph-deploy install --no-adjust-repos --nogpgcheck ceph-deploy ceph-mon1 ceph-mon2 ceph-mon3 ceph-mgr1 ceph-mgr2 ceph-node1 ceph-node2 ceph-node3 ceph-node4
#mon节点安装组件
cephok@ceph-ploy:~/ceph-cluster$ for host in ceph-{mon1,mon2,mon3};do ssh cephok@${host} "sudo apt install ceph-mon -y" ;done
#mgr节点安装组件
cephok@ceph-ploy:~/ceph-cluster$for host in ceph-{mgr1,mg2};do ssh cephok@${host} "sudo apt install ceph-mgr -y" ;done
#mon节点初始化
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy mon create-initial
#验证mon初始化
cephok@ceph-ploy:~/ceph-cluster$ls
ceph.bootstrap-mds.keyring ceph.bootstrap-osd.keyring ceph.client.admin.keyring ceph-deploy-ceph.log
ceph.bootstrap-mgr.keyring ceph.bootstrap-rgw.keyring
ceph.conf ceph.mon.keyring
##mon节点验证
cephok@ceph-ploy:~/ceph-cluster$ ssh ceph-mon1 "ps -aux|grep ceph"
root 747 0.0 0.2 40080 11840 ? Ss 20:22 0:00 /usr/bin/python3.6 /usr/bin/ceph-crash
ceph 756 0.8 3.8 589060 152784 ? Ssl 20:22 0:20 /usr/bin/ceph-mon -f --cluster ceph --id ceph-mon1 --setuser ceph --setgroup ceph
##客户端组件安装
cephok@ceph-ploy:~/ceph-cluster$ ssh ceph-client1 'sudo apt install ceph-common'
#各节点密钥分发
cephok@ceph-ploy:~/ceph-cluster$ ceph-deploy admin ceph-deploy ceph-mon1 ceph-mon2 ceph-mon3 ceph-mgr1 ceph-mgr2 ceph-node1 ceph-node2 ceph-node3 ceph-node4 ceph-client1
#各个节点密钥验证
cephok@ceph-ploy:~/ceph-cluster$ for host in ceph-{mon1,mon2,mon3,mgr1,mgr2,node1,node2,node3,node4,client1};do ssh cephok@${host} "sudo ls -l /etc/ceph/" ;done
total 8
-rw-r--r-- 1 root root 263 Aug 24 23:31 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:31 tmpM6AtR7
total 12
-rw------- 1 root root 151 Aug 24 23:43 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 24 23:43 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:43 tmpTkuNf8
total 12
-rw------- 1 root root 151 Aug 24 23:43 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 24 23:43 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:43 tmpQxMBCv
total 12
-rw------- 1 root root 151 Aug 24 23:55 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 24 23:55 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:51 tmpnFT8qk
total 12
-rw------- 1 root root 151 Aug 24 23:58 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 24 23:58 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:53 tmpc3RGSa
total 12
-rw------- 1 root root 151 Aug 25 00:19 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 27 20:38 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:35 tmpFs1K8_
total 12
-rw------- 1 root root 151 Aug 25 00:19 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 27 20:38 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:35 tmpg8uPWK
total 12
-rw------- 1 root root 151 Aug 25 00:19 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 27 20:39 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:35 tmpaezDyT
total 12
-rw------- 1 root root 151 Aug 25 00:19 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 27 20:39 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 24 23:35 tmpgdLp8e
total 12
-rw------- 1 root root 151 Aug 28 00:18 ceph.client.admin.keyring
-rw-r--r-- 1 root root 263 Aug 28 00:18 ceph.conf
-rw-r--r-- 1 root root 92 Jul 8 22:17 rbdmap
-rw------- 1 root root 0 Aug 28 00:05 tmpIuay98
#设置认证文件的属主属组
cephok@ceph-ploy:~/ceph-cluster$ for host in ceph-{mon1,mon2,mon3,mgr1,mgr2,node1,node2,node3,node4,client1};do ssh cephok@${host} "sudo apt install acl -y && sudo setfacl -m u:ceph:rw /etc/ceph/ceph.client.admin.keyring" ;done
cephok@ceph-ploy:~/ceph-cluster$ for host in ceph-{mon1,mon2,mon3,mgr1,mgr2,node1,node2,node3,node4,client1};do ssh cephok@${host} "sudo apt install acl -y && sudo setfacl -m u:cephok:rw /etc/ceph/ceph.client.admin.keyring" ;done
#创建mgr服务器
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy mgr create ceph-mgr1
#mon节点
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy mon add ceph-mon2
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy mon add ceph-mon3
##mon节点验证
cephok@ceph-ploy:~/ceph-cluster$ sudo ceph quorum_status
cephok@ceph-ploy:~/ceph-cluster$sudo ceph quorum_status --format json-pretty
#mgr节点扩充
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy mgr create ceph-mgr1
cephok@ceph-ploy:~/ceph-cluster$ceph-de2loy mgr create ceph-mgr2
##mgr节点验证
cephok@ceph-ploy:~/ceph-cluster$sudo ceph -s
##############################################################################################################################################################
##OSD 节点重点操作
#1、列出各节点磁盘
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk list ceph-node1
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk list ceph-node2
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk list ceph-node3
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk list ceph-node4
#2、擦除各节点磁盘
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node1 /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node1 /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node1 /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node1 /dev/sde
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node2 /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node2 /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node2 /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node2 /dev/sde
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node3 /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node3 /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node3 /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node3 /dev/sde
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node4 /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node4 /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node4 /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy disk zap ceph-node4 /dev/sde
#3、添加各节点磁盘
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node1 --data /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node1 --data /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node1 --data /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node1 --data /dev/sde
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node2 --data /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node2 --data /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node2 --data /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node2 --data /dev/sde
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node3 --data /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node3 --data /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node3 --data /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node3 --data /dev/sde
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node4 --data /dev/sdb
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node4 --data /dev/sdc
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node4 --data /dev/sdd
cephok@ceph-ploy:~/ceph-cluster$ceph-deploy osd create ceph-node4 --data /dev/sde
#4、验证OSD状态
cephok@ceph-ploy:~/ceph-cluster$ sudo ceph -s
cluster:
id: 6f02893c-aeef-47b4-9638-e6c063acddd1
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph-mon1,cetph-mon2,ceph-mon3 (age 66m)
mgr: ceph-mgr1(active, since 63m), standbys: ceph-mgr2
osd: 16 osds: 16 up (since 65m), 16 in (since 2d)
data:
pools: 3 pools, 97 pgs
objects: 400 objects, 1.5 GiB
usage: 4.8 GiB used, 70 GiB / 75 GiB avail
pgs: 97 active+clean
#5、设置OSD节点自启动
root@ceph-node1:~#systemctl enable ceph-osd@0 ceph-osd@1 ceph-osd@2 ceph-osd@3
root@ceph-node2:~#systemctl enable ceph-osd@4 ceph-osd@5 ceph-osd@6 ceph-osd@7
root@ceph-node3:~#systemctl enable ceph-osd@8 ceph-osd@9 ceph-osd@10 ceph-osd@11
root@ceph-node4:~#systemctl enable ceph-osd@12 ceph-osd@13 ceph-osd@14 ceph-osd@15
#6、OSD节点基本验证
root@ceph-node4:~# ps auxf | grep ceph-osd | grep -v grep
ceph 1420 0.4 2.0 999364 83504 ? Ssl 20:26 0:23 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph
ceph 1976 0.5 2.6 1032136 106476 ? Ssl 20:26 0:24 /usr/bin/ceph-osd -f --cluster ceph --id 11 --setuser ceph --setgroup ceph
ceph 1984 0.4 1.8 1030092 74480 ? Ssl 20:26 0:22 /usr/bin/ceph-osd -f --cluster ceph --id 9 --setuser ceph --setgroup ceph
#7、OSD基本操作
#停用设备: sudo ceph osd out {osd-num}
#移除OSD的认证key: ceph auth del osd.{osd-num}
#移除OSD设备: ceph osd rm {osd-num} 前提前两步已经做完
Ceph 基本应用
ceph存储池基本操作
cephok@ceph-ploy:~/ceph-cluster$ sudo ceph osd pool create mypool 32 32
#查看存储池
cephok@ceph-ploy:~/ceph-cluster$ sudo ceph osd pool ls
device_health_metrics
nubelpool
fz1
mypool
#或者
cephok@ceph-ploy:~/ceph-cluster$ sudo rados lspools
device_health_metrics
nubelpool
fz1
mypool
存储池的使用
1、文件上传 第一个test为对象ID ,第二个为文件名
cephok@ceph-ploy:~/ceph-cluster$ sudo rados put test test --pool=mypool
2、列出文件
cephok@ceph-ploy:~/ceph-cluster$ sudo rados ls --pool=mypool
test
3、文件信息
cephok@ceph-ploy:~/ceph-cluster$ sudo ceph osd map mypool test
osdmap e582 pool 'mypool' (4) object 'test' -> pg 4.40e8aab5 (4.15) -> up ([9,14,12], p9) acting ([9,14,12], p9)
4、下载文件
cephok@ceph-ploy:~/ceph-cluster$ sudo rados get test --pool=mypool /opt/test.bak
cephok@ceph-ploy:~/ceph-cluster$ ll /opt
total 12
drwxr-xr-x 2 root root 56 Aug 30 22:05 ./
drwxr-xr-x 22 root root 326 Jan 14 2021 ../
-rw-r--r-- 1 root root 8 Aug 30 22:05 test.bak
5、修改文件 原理对象id不变,存储对象改变替换原有对象
cephok@ceph-ploy:~/ceph-cluster$ sudo rados put test /etc/passwd --pool=mypool
cephok@ceph-ploy:~/ceph-cluster$ sudo rados get test --pool=mypool /opt/passwd.bak
##验证
cephok@ceph-ploy:~/ceph-cluster$ cat /opt/test.bak
text 02
cephok@ceph-ploy:~/ceph-cluster$ cat /opt/passwd.bak|tail -3
ntp:x:111:115::/nonexistent:/usr/sbin/nologin
_chrony:x:112:116:Chrony daemon,,,:/var/lib/chrony:/usr/sbin/nologin
cephok:x:2023:2023::/home/cephok:/bin/bash
6、文件删除
cephok@ceph-ploy:~/ceph-cluster$ sudo rados rm test --pool=mypool
####验证
cephok@ceph-ploy:~/ceph-cluster$ sudo rados ls --pool=mypool</pre>
块设备RBD基本操作
#1、块设备存储池创建
cephok@ceph-ploy:~/ceph-cluster$ sudo ceph osd pool create rbdtest1 64 64
pool 'rbdtest1' created
#2、开启存储池RBD功能
cephok@ceph-ploy:~/ceph-cluster$ sudo ceph osd pool application enable rbdtest1 rbd
enabled application 'rbd' on pool 'rbdtest1'
#3、初始化RBD存储池
cephok@ceph-ploy:~/ceph-cluster$ sudo rbd pool init -p rbdtest1
#4、在rbd存储池创建映像(image)!!!存储池无法直接用于块设备
###1) 第一种创建方式默认映像属性,第二种指定映像属性常用 --image-format format-id 指定对象布局。
cephok@ceph-ploy:~/ceph-cluster$ sudo rbd create rbdimg1 --size 1G --pool rbdtest1
cephok@ceph-ploy:~/ceph-cluster$ sudo rbd create rbdimg2 --size 500M --pool rbdtest1 --image-format 2 --image-feature layering
cephok@ceph-ploy:~/ceph-cluster$ sudo rbd ls --pool rbdtest1
rbdimg1
rbdimg2
###2)查看映像属性信息
cephok@ceph-ploy:~/ceph-cluster$
cephok@ceph-ploy:~/ceph-cluster$ sudo rbd --image rbdimg1 --pool rbdtest1 info
rbd image 'rbdimg1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: fb17f15f1e3d
block_name_prefix: rbd_data.fb17f15f1e3d
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Mon Aug 30 22:54:14 2021
access_timestamp: Mon Aug 30 22:54:14 2021
modify_timestamp: Mon Aug 30 22:54:14 2021
cephok@ceph-ploy:~/ceph-cluster$ sudo rbd --image rbdimg2 --pool rbdtest1 info
rbd image 'rbdimg2':
size 500 MiB in 125 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: fb1a99213ad7
block_name_prefix: rbd_data.fb1a99213ad7
format: 2
features: layering
op_features:
flags:
create_timestamp: Mon Aug 30 22:55:50 2021
access_timestamp: Mon Aug 30 22:55:50 2021
modify_timestamp: Mon Aug 30 22:55:50 2021
###features属性详解
layering: 支持分层
striping: 支持条带化 v2
exclusive-lock: 支持独占锁
object-map: 支持对象映射(依赖 exclusive-lock )
fast-diff: 快速计算差异(依赖 object-map )
deep-flatten: 支持快照扁平化操作
journaling: 支持记录 IO 操作(依赖独占锁)
###3)客户端操作 注上面操作已做可忽略下面操作
cephok@ceph-ploy:~/ceph-cluster$ ceph-deploy install --no-adjust-repos --nogpgcheck ceph-client1
cephok@ceph-ploy:~/ceph-cluster$ ceph-deploy admin ceph-client1
cephok@ceph-ploy:~/ceph-cluster$ ssh ceph-client1 'sudo apt install acl -y && sudo setfacl -m u:ceph:rw /etc/ceph/ceph.client.admin.keyring'
cephok@ceph-ploy:~/ceph-cluster$ ssh ceph-client1 ' sudo setfacl -m u:cephok:rw /etc/ceph/ceph.client.admin.keyring'
###4)客户端ceph rbd映像映射。!!!前提第三步已做
cephok@ceph-client1:/rbddata$ sudo rbd -p rbdtest1 map rbdimg2
rbd: warning: image already mapped as /dev/rbd0
/dev/rbd1
cephok@ceph-client1:/rbddata$ sudo rbd -p rbdtest1 map rbdimg1
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable rbdtest1/rbdimg1 object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
cephok@ceph-client1:~$ sudo mkfs.ext4 -m0 /dev/rbd0
cephok@ceph-client1:~$ sudo mkdir /data
cephok@ceph-client1:~$ sudo mount /dev/rbd0 /rbddata/
cephok@ceph-client1:~$ sudo cp /etc/passwd /rbddata
cephok@ceph-client1:~$ sudo df -h
###验证
root@ceph-client1:/rbddata#dd if=/dev/zero of=/rbddata/cephfile bs=1MB count=100
cephok@ceph-client1:/rbddata$ ll /rbddata
total 97677
drwxr-xr-x 3 root root 1024 Aug 30 23:32 ./
drwxr-xr-x 24 root root 4096 Aug 30 23:31 ../
-rw-r--r-- 1 root root 100000000 Aug 30 23:32 cephfile
drwx------ 2 root root 12288 Aug 30 23:30 lost+found/
-rw-r--r-- 1 root root 1732 Aug 30 23:31 passwd
root@ceph-client1:/rbddata# sudo ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 75 GiB 70 GiB 5.4 GiB 5.4 GiB 7.22
TOTAL 75 GiB 70 GiB 5.4 GiB 5.4 GiB 7.22
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 1 0 B 0 0 B 0 21 GiB
nubelpool 2 32 0 B 0 0 B 0 21 GiB
fz1 3 64 1.5 GiB 400 4.4 GiB 6.61 21 GiB
mypool 4 32 0 B 0 0 B 0 21 GiB
rbdtest1 5 64 104 MiB 46 311 MiB 0.48 21 GiB
网友评论