环境
ubuntu 18.04
ansible 2.9.27
git ansible stable-5.0
ceph 15.2.17
python2.7 apt 安装 ansible 时自动匹配的
- 三台测试节点
节点名 | ip | 硬盘数(osd数) |
---|---|---|
TEST-01 | 192.168.100.6 | 3 |
TEST-02 | 192.168.100.7 | 4 |
TEST-03 | 192.168.100.8 | 4 |
一、 安装 ansible
- 安装 ansible
add-apt-repository ppa:ansible/ansible
apt update
apt install ansible
- 查看 ansible 版本
# ansible --version
ansible 2.9.27
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/dist-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.17 (default, Mar 8 2023, 18:40:28) [GCC 7.5.0]
- 下载对应版本的 ansible
根据文档 Releases使用 ansible 的
stable-5.0
分支
git clone https://github.com/ceph/ceph-ansible.git
git checkout $branch
pip install -r requirements.txt
二、配置
1. 配置所有节点 hosts
127.0.0.1 localhost
192.168.100.6 TEST-01
192.168.100.7 TEST-02
192.168.100.8 TEST-03
2. 复制 ssh key
ssh-copy-id TEST-01
ssh-copy-id TEST-02
ssh-copy-id TEST-03
3. 配置 ansible hosts /etc/ansible/hosts
# 监控节点
[mons]
TEST-01
# 管理节点
[mgrs]
TEST-02
# osd 节点
[osds]
TEST-01
TEST-02
TEST-03
# 客户端节点
[clients]
TEST-01
2. 配置 /ceph-ansible/group_vars/all.yml
以下配置主要修改:
网络配置:public_network
、cluster_network
、monitor_interface
、
部署模式:osd_scenario
pg 数量:osd_pool_default_pg_num
、osd_pool_default_pgp_num
如何计算 pg 数
指定安装 ceph 的版本:ceph_stable_release
---
dummy:
# 用于存放从远程主机上获取的文件。这个配置常见于涉及文件提取操作的场景,比如获取日志、配置文件或其他需要从目标主机提取的内容。
fetch_directory: fetch/
# 集群名字
cluster: ceph
# 这里定义的名字和 /etc/ansible/hosts 中对应
mon_group_name: mons
osd_group_name: osds
mgr_group_name: mgrs
client_group_name: clients
debian_package_dependencies:
- python-pycurl
# 启动 ntp 服务
ntp_service_enabled: true
ntp_daemon_type: ntpd
# 定义 Ceph 的安装来源
# 设置为 repository 时,Ceph 软件包将从官方的 Ceph 仓库或指定的外部仓库中安装
# 设置为 distro 时,Ceph 软件包将从操作系统的默认软件包仓库(distribution repository)中安装
# 设置为 local 时,Ceph 软件包将从本地文件系统或自定义本地仓库中安装
ceph_origin: repository
# 指定 Ceph 使用的仓库
# community:使用 Ceph 官方社区版本的仓库
ceph_repository: community
ceph_mirror: https://mirrors.aliyun.com/ceph
ceph_stable_key: https://mirrors.aliyun.com/ceph/keys/release.asc
# ceph 版本
ceph_stable_release: octopus
ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
generate_fsid: true
cephx: true
# Ceph 的公共网络
public_network: "192.168.100.0/29"
# Ceph 的集群网络
cluster_network: "192.168.100.0/29"
# Ceph Monitor 节点使用的网络接口。这个接口将用于 Ceph Monitor 节点之间以及客户端与 Monitor 节点之间的通信
monitor_interface: enp7s0f0
# 用于指定 Ceph OSD(对象存储守护进程)的部署场景
# collocated: 数据和日志(即 WAL 和 DB)都存储在同一个设备上。这是最简单的配置方式,适用于没有专用日志设备的小规模集群。
# non-collocated: 在这种场景中,数据和日志存储在不同的设备上。通常,数据存储在 HDD 上,而日志存储在 SSD 上,以提高性能。
# lvm: 使用 LVM(逻辑卷管理器)来管理 OSD 数据和日志设备。LVM 提供更灵活的存储管理,可以在需要时调整存储分配
osd_scenario: lvm
journal_size: 204800
ceph_conf_overrides:
global:
# 设置支持的认证方式,这里为 cephx,是 Ceph 的默认认证机制
auth_supported: cephx
# 集群间通信需要的认证方式,设置为 cephx
auth_cluster_required: cephx
# 服务间通信需要的认证方式,设置为 cephx
auth_service_required: cephx
# 客户端访问需要的认证方式,设置为 cephx
auth_client_required: cephx
# OSD 近满的比例阈值,设置为 85%
mon_osd_nearfull_ratio: 0.85
# OSD 满的比例阈值,设置为 95%
mon_osd_full_ratio: 0.95
# OSD 标记为 down 后在多少秒内将其踢出集群,设置为 3600 秒(1 小时)
mon_osd_down_out_interval: 3600
# 允许的时钟漂移值,设置为 0.15(15%)
mon_clock_drift_allowed: 0.15
# 消息传输类型,设置为 async(异步)
ms_type: async
# 异步操作线程数
ms_async_op_threads: 4
# 使用异步 I/O 进行日志记录
journal_aio: true
# 使用直接 I/O 进行日志记录
journal_dio: true
# 强制使用异步 I/O
journal_force_aio: true
# 强制使用直接 I/O
journal_force_dio: true
# 日志队列最大操作数
journal_queue_max_ops: 50000
# 日志最大写入条目数
journal_max_write_entries: 10000
# 日志最大写入字节数
journal_max_write_bytes: 1073741824
# debug 设置为 0 禁用
debug_tp: 0
debug_timer: 0
debug_throttle: 0
debug_rgw: 0
debug_rbd: 0
debug_rados: 0
debug_perfcounter: 0
debug_paxos: 0
debug_osd: 0
debug_optracker: 0
debug_objecter: 0
debug_objectcacher: 0
debug_objclass: 0
debug_ms: 0
debug_monc: 0
debug_mon: 0
debug_mds_migrator: 0
debug_mds_log_expire: 0
debug_mds_log: 0
debug_mds_locker: 0
debug_mds_balancer: 0
debug_mds: 0
debug_lockdep: 0
debug_journaler: 0
debug_journal: 0
debug_hadoop: 0
debug_finisher: 0
debug_filestore: 0
debug_filer: 0
debug_crush: 0
debug_context: 0
debug_client: 0
debug_civetweb: 0
debug_buffer: 0
debug_auth: 0
debug_asok: 0
# 设置 XFS 文件系统的挂载选项
osd_mount_options_xfs: "rw,noatime,nobarrier,attr2,inode64,noquota,logbsize=256k,logbufs=8,allocsize=4M"
# 设置 OSD 客户端消息上限
osd_client_message_cap: 2000
# 设置 OSD 操作线程数
osd_op_threads: 8
# 设置 OSD 磁盘线程数
osd_disk_threads: 1
# 设置 OSD 恢复操作的最大并发数
osd_recovery_max_active: 1
# 设置 OSD 恢复线程数
osd_recovery_threads: 1
# 设置 OSD 最大回填数
osd_max_backfills: 1
# 设置 OSD 客户端操作优先级
osd_client_op_priority: 70
# 设置 OSD 恢复操作优先级
osd_recovery_op_priority: 10
# 设置 CRUSH 选择叶类型
osd_crush_chooseleaf_type: 1
# 设置 OSD 池的默认副本数
osd_pool_default_size: 2
# 设置 OSD 池的最小副本数
osd_pool_default_min_size: 1
# 设置 OSD 池的默认 PG 数
osd_pool_default_pg_num: 256
# 设置 OSD 池的默认 PGP 数
osd_pool_default_pgp_num: 256
# 设置 OSD 客户端消息大小上限
osd_client_message_size_cap: 1073741824
osd_client_message_cap: 200
osd_op_threads: 4
# 设置 OSD 操作线程超时时间
osd_op_thread_timeout: 360
# 设置 OSD 最大 scrub 数
osd_max_scrubs: 1
# 设置 OSD scrub 开始的小时
osd_scrub_begin_hour: 1
# OSD scrub 结束的小时
osd_scrub_end_hour: 6
# 设置 OSD scrub 负载阈值
osd_scrub_load_threshold: 0.5
# 设置 OSD scrub 最小间隔时间
osd_scrub_min_interval: 86400
# 设置 OSD scrub 最大间隔时间
osd_scrub_max_interval: 604800
# 设置 OSD 深度 scrub 间隔时间
osd_deep_scrub_interval: 604800
# 设置 OSD 池纠删码条带宽度
osd_pool_erasure_code_stripe_width: 65536
# 设置 OSD 池的默认纠删码配置文件
osd_pool_default_erasure_code_profile: "plugin=isa technique=reed_sol_van k=4 m=2 ruleset-root=default crush-failure-domain=host"
filestore_flusher: false
filestore_journal_writeahead: true
filestore_op_threads: 32
filestore_queue_max_ops: 15000
filestore_queue_max_bytes: 1048576000
filestore_queue_committing_max_ops: 5000
filestore_queue_committing_max_bytes: 1048576000
filestore_max_sync_interval: 10
filestore_min_sync_interval: 5
filestore_wbthrottle_xfs_ios_start_flusher: 10000
filestore_wbthrottle_xfs_ios_hard_limit: 10000
filestore_wbthrottle_xfs_inodes_start_flusher: 10000
filestore_wbthrottle_xfs_inodes_hard_limit: 10000
filestore_wbthrottle_enable: false
filestore_expected_throughput_ops: 20000
filestore_fd_cache_size: 32768
filestore_fd_cache_shards: 32
filestore_omap_backend: leveldb
filestore_merge_threshold: -1
filestore_split_multiple: 16000
client:
# 指定创建新的 RBD(RADOS Block Device)映像时使用的默认格式版本
# 1: 支持快照和克隆功能。
# 2: 新的格式,支持更多的高级功能,如快照、克隆、条带化、对象映射等。
rbd_default_format: 2
# 设置新创建的 RBD 映像时启用的默认功能 (这里3是 1 + 2)
# 1: Layering(支持快照和克隆)
# 2: Striping v2(条带化)
# 4: Exclusive-lock(独占锁)
# 8: Object map(对象映射)
# 16: Fast-diff(快速差异)
# 32: Deep-flatten(深度扁平化)
rbd_default_features: 3
os_tuning_params:
# 设置系统允许的最大进程 ID 数
- { name: kernel.pid_max, value: 4194303 }
# 设置内核在遇到严重错误时重新启动的延迟时间(以秒为单位)
- { name: kernel.panic, value: 20 }
# 控制在检测到挂起任务时是否触发内核 panic
# 0: 禁用在检测到挂起任务时触发内核 panic
- { name: kernel.hung_task_panic, value: 0 }
# 设置检测挂起任务的超时时间(以秒为单位)
- { name: kernel.hung_task_timeout_secs, value: 120 }
# 设置系统允许的最大线程数
- { name: kernel.threads-max, value: 1000000 }
# 设置系统允许的最大文件句柄数
- { name: fs.file-max, value: 26234859 }
# 控制内存区域回收模式,主要用于 NUMA(非统一内存访问)系统
# 2: 启用内存区域回收模式
- { name: vm.zone_reclaim_mode, value: 2 }
# 控制内核交换(swap)的倾向,值越小表示越不倾向于使用交换
# 0: 禁用交换,最大限度地使用物理内
- { name: vm.swappiness, value: 0 }
# 设置系统预留的最小空闲内存量(以 KB 为单位)
- { name: vm.min_free_kbytes, value: 4194303 }
# 设置系统额外预留的空闲内存量(以 KB 为单位),用于减少内存碎片
- { name: vm.extra_free_kbytes, value: 4194303 }
4. 配置 /ceph-ansible/group_vars/clients.yml
Ceph 客户端的相关设置,包括创建存储池(pools)、生成客户端密钥(keys)
生成
key
方式python -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print(base64.b64encode(header + key))"
---
dummy:
# 指示是否将 Ceph 集群的管理员密钥复制到客户端。这通常用于使客户端能够进行更高级别的管理操作。
copy_admin_key: true
# 指示是否使用用户配置。这可能是一个自定义标志,用于控制特定的用户配置逻辑。
user_config: true
# 定义需要创建的存储池及其参数
# ceph_conf_overrides.global.osd_pool_default_pg_num 设置 pg 数。这里应用全局配置
pools:
- { name: images, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
- { name: template, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
- { name: backup, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
- { name: ebs, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
# 定义客户端密钥和其权限。每个密钥都有一个名称、密钥字符串、监控权限(mon_cap)、OSD 权限(osd_cap)、文件模式和可选的访问控制列表(ACLs)。
keys:
- { name: client.images, ket: "xxxxxx==", caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=images" }, mode: "0600"}
- { name: client.template, ket: "xxxxxx==", caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=template" }, mode: "0600"}
- { name: client.backup, ket: "xxxxxx==", caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=backup" }, mode: "0600"}
- { name: client.ebs, ket: "xxxxxx==", caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=ebs" }, mode: "0600"}
5. 配置 /ceph-ansible/group_vars/osds.yml
配置 Ceph 集群中的 OSD(对象存储守护进程)。该文件定义了哪些主机会运行 OSD、使用哪些磁盘以及其他相关的 OSD 配置
---
dummy:
# 用于指定 Ceph OSD(对象存储守护进程)的部署场景
# collocated: 数据和日志(即 WAL 和 DB)都存储在同一个设备上。这是最简单的配置方式,适用于没有专用日志设备的小规模集群。
# non-collocated: 在这种场景中,数据和日志存储在不同的设备上。通常,数据存储在 HDD 上,而日志存储在 SSD 上,以提高性能。
# lvm: 使用 LVM(逻辑卷管理器)来管理 OSD 数据和日志设备。LVM 提供更灵活的存储管理,可以在需要时调整存储分配
osd_scenario: lvm
# 指定使用的存储引擎,例如 bluestore(推荐)或 filestore
osd_objectstore: bluestore
# 是否启用磁盘加密
dmcrypt: false
# 列出用于存储数据的磁盘设备
devices:
- /dev/sdb
- /dev/sdc
- /dev/sdd
- /dev/sdf
# 配置专用设备(通常是用于存储 BlueStore 或 FileStore 日志、WAL 或 DB 的设备)的选项。
# 这些专用设备通常是更快的存储设备,如 NVMe 或 SSD,与 OSD 数据存储的设备(HDD 或 SSD)分开,用于提升性能
dedicated_devices:
- /dev/sde
6. 如果每个节点磁盘的编号不一样可以为每个节点单独创建 osd 配置
- 在 ansible 根目录创建
host_vars
目录 - 在
host_vars
目录创建对应节点名名字的文件,如TEST-02
。编辑内容如下
host_vars/TEST-02
---
dummy:
osd_scenario: lvm
osd_objectstore: bluestore
dmcrypt: false
devices:
- /dev/sdb
- /dev/sdc
- /dev/sde
7. 配置 /ceph-ansible/site.yml
我这里只安装
mon
、osds
、clients
、mgrs
把其他不用的删除即可
---
# Defines deployment design and assigns role to server groups
- hosts:
- mons
- osds
- clients
- mgrs
gather_facts: false
any_errors_fatal: true
become: true
tags: always
vars:
delegate_facts_host: True
pre_tasks:
# If we can't get python2 installed before any module is used we will fail
# so just try what we can to get it installed
- import_tasks: raw_install_python.yml
- name: gather facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when:
- not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- name: gather and delegate facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
run_once: true
when: delegate_facts_host | bool
tasks:
- import_role:
name: ceph-defaults
# dummy container setup is only supported on x86_64
# when running with containerized_deployment: true this task
# creates a group that contains only x86_64 hosts.
# when running with containerized_deployment: false this task
# will add all client hosts to the group (and not filter).
- name: create filtered clients group
add_host:
name: "{{ item }}"
groups: _filtered_clients
with_items: "{{ groups.get(client_group_name, []) | intersect(ansible_play_batch) }}"
when: (hostvars[item]['ansible_facts']['architecture'] == 'x86_64') or (not containerized_deployment | bool)
- import_role:
name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
name: ceph-validate
- import_role:
name: ceph-infra
- import_role:
name: ceph-common
- hosts: mons
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph monitor install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_mon:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-mon
- import_role:
name: ceph-mgr
when: groups.get(mgr_group_name, []) | length == 0
post_tasks:
- name: set ceph monitor install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_mon:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: mgrs
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph manager install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_mgr:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-mgr
post_tasks:
- name: set ceph manager install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_mgr:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: osds
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph osd install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_osd:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-osd
post_tasks:
- name: set ceph osd install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_osd:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: clients
gather_facts: false
become: True
any_errors_fatal: true
tags: 'ceph_client'
pre_tasks:
- name: set ceph client install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_client:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-client
post_tasks:
- name: set ceph client install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_client:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts:
- mons
- osds
- mgrs
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph crash install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_crash:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
tasks_from: container_binary.yml
- import_role:
name: ceph-handler
- import_role:
name: ceph-crash
post_tasks:
- name: set ceph crash install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_crash:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: mons
gather_facts: false
become: True
any_errors_fatal: true
tasks:
- import_role:
name: ceph-defaults
- name: get ceph status from the first monitor
command: ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- ceph_status is not skipped
- ceph_status is successful
三、安装
1. 查看链接是否正常
ansible all -m ping
2. 执行安装
ansible-playbook site.yml
3. 查看 ceph 状态
ceph -s
四、添加 osd
这里是
TEST-01
节点中新增一个磁盘名/dev/sde
1. 创建 host_vars/TEST-01
---
dummy:
osd_scenario: lvm
osd_objectstore: bluestore
dmcrypt: false
devices:
- /dev/sde
2. 真对 TEST-01
执行 ansible
ansible-playbook site.yml --limit TEST-01
执行结果如下
.......
TEST-01 : ok=152 changed=16 unreachable=0 failed=0 skipped=321 rescued=0 ignored=0
INSTALLER STATUS ****************************************************************************
Install Ceph OSD : Complete (0:02:56)
Install Ceph Crash : Complete (0:00:06)
Thursday 15 August 2024 09:34:00 +0800 (0:00:00.041) 0:03:36.503 *******
===============================================================================
ceph-handler : restart ceph osds daemon(s) ----------------------------------------- 104.33s
ceph-handler : re-enable pg autoscale on pools -------------------------------------- 11.74s
ceph-handler : disable pg autoscale on pools ---------------------------------------- 11.32s
ceph-osd : wait for all osd to be up ------------------------------------------------ 11.10s
ceph-osd : use ceph-volume lvm batch to create bluestore osds ------------------------ 7.31s
ceph-common : install dependencies for apt modules ----------------------------------- 3.35s
ceph-infra : update cache for Debian based OSs --------------------------------------- 3.28s
gather and delegate facts ------------------------------------------------------------ 3.04s
ceph-osd : systemd start osd --------------------------------------------------------- 2.64s
ceph-config : look up for ceph-volume rejected devices ------------------------------- 2.51s
ceph-osd : apply operating system tuning --------------------------------------------- 2.01s
ceph-osd : set noup flag ------------------------------------------------------------- 1.79s
ceph-osd : install dependencies ------------------------------------------------------ 1.42s
ceph-common : configure debian ceph community repository stable key ------------------ 1.42s
ceph-common : install ceph for debian ------------------------------------------------ 1.41s
ceph-infra : install ntpd ------------------------------------------------------------ 1.38s
ceph-common : install dependencies --------------------------------------------------- 1.36s
ceph-osd : collect osd ids ----------------------------------------------------------- 1.16s
ceph-common : update apt cache if cache_valid_time has expired ----------------------- 0.96s
ceph-crash : create client.crash keyring --------------------------------------------- 0.94s
网友评论