美文网首页分布式存储-CEPH
ceph分布式存储-数据不均衡调整

ceph分布式存储-数据不均衡调整

作者: lihanglucien | 来源:发表于2018-12-26 13:27 被阅读6次

    1. 查看数据分布是否均衡

    #查看osd使用情况
    $ ceph osd df tree
    ID CLASS WEIGHT    REWEIGHT SIZE   USE    AVAIL  %USE VAR  PGS TYPE NAME
    -1       196.21051        -   190T   347G   190T 0.18 1.00   - root default
    -3        65.40106        - 61390G   115G 61274G 0.19 1.06   -     host ceph-xxx-osd01
     0   hdd   5.45009        0      0      0      0    0    0   0         osd.0
     1   hdd   5.45009  1.00000  5580G  8640M  5572G 0.15 0.85 125         osd.1
     2   hdd   5.45009  1.00000  5580G  7797M  5573G 0.14 0.77 128         osd.2
     3   hdd   5.45009  1.00000  5580G 10811M  5570G 0.19 1.06 131         osd.3
     4   hdd   5.45009  1.00000  5580G 11150M  5570G 0.20 1.10 124         osd.4
     5   hdd   5.45009  0.95001  5580G 13175M  5568G 0.23 1.29 114         osd.5
     6   hdd   5.45009  1.00000  5580G  9769M  5571G 0.17 0.96 141         osd.6
     7   hdd   5.45009  0.90002  5580G 11773M  5569G 0.21 1.16 102         osd.7
     8   hdd   5.45009  1.00000  5580G 11805M  5569G 0.21 1.16 118         osd.8
     9   hdd   5.45009  1.00000  5580G 12964M  5568G 0.23 1.27 123         osd.9
    10   hdd   5.45009  0.90002  5580G 12315M  5568G 0.22 1.21 123         osd.10
    11   hdd   5.45009  1.00000  5580G  8549M  5572G 0.15 0.84 115         osd.11
    -5        65.40106        - 66971G   115G 66855G 0.17 0.97   -     host ceph-xxx-osd02
    12   hdd   5.45009  1.00000  5580G  8975M  5572G 0.16 0.88 115         osd.12
    13   hdd   5.45009  1.00000  5580G 10832M  5570G 0.19 1.06 106         osd.13
    14   hdd   5.45009  1.00000  5580G 10183M  5570G 0.18 1.00 111         osd.14
    15   hdd   5.45009  1.00000  5580G  9337M  5571G 0.16 0.92  93         osd.15
    16   hdd   5.45009  1.00000  5580G 11508M  5569G 0.20 1.13 111         osd.16
    17   hdd   5.45009  1.00000  5580G  6237M  5574G 0.11 0.61 118         osd.17
    18   hdd   5.45009  1.00000  5580G  9822M  5571G 0.17 0.97 116         osd.18
    19   hdd   5.45009  0.95001  5580G 13169M  5568G 0.23 1.29 122         osd.19
    20   hdd   5.45009  1.00000  5580G  7996M  5573G 0.14 0.79 132         osd.20
    21   hdd   5.45009  1.00000  5580G  7671M  5573G 0.13 0.75  97         osd.21
    22   hdd   5.45009  1.00000  5580G  7959M  5573G 0.14 0.78 121         osd.22
    23   hdd   5.45009  0.85004  5580G 15036M  5566G 0.26 1.48 102         osd.23
    -7        65.40839        - 66978G   115G 66862G 0.17 0.97   -     host ceph-xxx-osd03
    24   hdd   5.45070  1.00000  5581G  8716M  5572G 0.15 0.86 128         osd.24
    25   hdd   5.45070  1.00000  5581G 11036M  5570G 0.19 1.08 113         osd.25
    26   hdd   5.45070  1.00000  5581G  9507M  5572G 0.17 0.93 113         osd.26
    27   hdd   5.45070  0.85004  5581G 12854M  5568G 0.22 1.26 105         osd.27
    28   hdd   5.45070  1.00000  5581G  9550M  5572G 0.17 0.94 102         osd.28
    29   hdd   5.45070  1.00000  5581G  9189M  5572G 0.16 0.90 109         osd.29
    30   hdd   5.45070  1.00000  5581G  7804M  5573G 0.14 0.77 105         osd.30
    31   hdd   5.45070  1.00000  5581G 11366M  5570G 0.20 1.12 116         osd.31
    32   hdd   5.45070  1.00000  5581G  9286M  5572G 0.16 0.91 110         osd.32
    33   hdd   5.45070  1.00000  5581G 10233M  5571G 0.18 1.01 115         osd.33
    34   hdd   5.45070  1.00000  5581G  8223M  5573G 0.14 0.81 107         osd.34
    35   hdd   5.45070  1.00000  5581G 10989M  5570G 0.19 1.08 121         osd.35
                          TOTAL   190T   347G   190T 0.18
    MIN/MAX VAR: 0.61/1.48  STDDEV: 0.03
      
    #查看osd_num,PGS, %USE
    ceph osd df tree | awk '/osd\./{print $NF" "$(NF-1)" "$(NF-3) }'
    osd.1 125 0.15
    osd.2 128 0.14
    osd.3 131 0.19
    osd.4 124 0.20
    osd.5 114 0.23
    osd.6 141 0.17
    osd.7 102 0.21
    osd.8 118 0.21
    osd.9 123 0.23
    osd.10 123 0.22
    osd.11 115 0.15
    osd.12 115 0.16
    osd.13 106 0.19
    osd.14 111 0.18
    osd.15 93 0.16
    osd.16 111 0.20
    osd.17 118 0.11
    osd.18 116 0.17
    osd.19 122 0.23
    osd.20 132 0.14
    osd.21 97 0.13
    osd.22 121 0.14
    osd.23 102 0.26
    osd.24 128 0.15
    osd.25 113 0.19
    osd.26 113 0.17
    osd.27 105 0.22
    osd.28 102 0.17
    osd.29 109 0.16
    osd.30 105 0.14
    osd.31 116 0.20
    osd.32 110 0.16
    osd.33 115 0.18
    osd.34 107 0.14
    osd.35 121 0.19
    

    2. reweight-by-pg 按归置组分布情况调整 OSD 的权重

    $ceph osd reweight-by-pg
    moved 35 / 4032 (0.868056%)                                         #35个PG发送迁移
    avg 115.2                                                           #每个OSD承载的平均PG数目为115.2
    stddev 10.378 -> 9.47418 (expected baseline 10.5787)             #执行本次调整后, 标准方差将由10.378变为9.47418
    min osd.15 with 93 -> 92 pgs (0.807292 -> 0.798611 * mean)            #当前负载最轻的OSD为osd.15,只承载了93个PG, 执行本次调整后,将承载92个PG
    max osd.6 with 141 -> 132 pgs (1.22396 -> 1.14583 * mean)         #当前负载最重的OSD为osd.6, 承载了141个PG, 执行本次调整后,讲承载132个PG
      
     
    oload 120
    max_change 0.05
    max_change_osds 4
    average_utilization 21.1365
    overload_utilization 25.3638
    osd.6 weight 1.0000 -> 0.9500                                        #执行本次调整后,对osd.6,osd.23,osd.7,osd.27的reweight进行调整
    osd.23 weight 0.8500 -> 0.9000
    osd.7 weight 0.9000 -> 0.9500
    osd.27 weight 0.8500 -> 0.9000
    

    3. reweight-by-utilization 按利用率调整 OSD 的权重

    $ceph osd reweight-by-utilization
    moved 35 / 4032 (0.868056%)                                         #35个PG发送迁移
    avg 115.2                                                           #每个OSD承载的平均PG数目为115.2
    stddev 10.378 -> 9.47418 (expected baseline 10.5787)             #执行本次调整后, 标准方差将由10.378变为9.47418
    min osd.15 with 93 -> 92 pgs (0.807292 -> 0.798611 * mean)            #当前负载最轻的OSD为osd.15,只承载了93个PG, 执行本次调整后,将承载92个PG
    max osd.6 with 141 -> 132 pgs (1.22396 -> 1.14583 * mean)         #当前负载最重的OSD为osd.6, 承载了141个PG, 执行本次调整后,讲承载132个PG
      
      
    oload 120
    max_change 0.05
    max_change_osds 4
    average_utilization 21.1365
    overload_utilization 25.3638
    osd.6 weight 1.0000 -> 0.9500                                        #执行本次调整后,对osd.6,osd.23,osd.7,osd.27的reweight进行调整
    osd.23 weight 0.8500 -> 0.9000
    osd.7 weight 0.9000 -> 0.9500
    osd.27 weight 0.8500 -> 0.9000
    

    4. 数据均衡后还原权重

    #统计osd_num, REWEIGHT
    $ceph osd df tree | awk '/osd\./{print $NF" "$4 }'
    osd.1 1.00000
    osd.2 1.00000
    osd.3 1.00000
    osd.4 1.00000
    osd.5 0.90002
    osd.6 0.95001
    osd.7 0.90002
    osd.8 1.00000
    osd.9 1.00000
    osd.10 0.90002
    osd.11 1.00000
    osd.12 1.00000
    osd.13 1.00000
    osd.14 1.00000
    osd.15 1.00000
    osd.16 1.00000
    osd.17 1.00000
    osd.18 1.00000
    osd.19 0.95001
    osd.20 1.00000
    osd.21 1.00000
    osd.22 1.00000
    osd.23 0.85004
    osd.24 1.00000
    osd.25 1.00000
    osd.26 1.00000
    osd.27 0.85004
    osd.28 1.00000
    osd.29 1.00000
    osd.30 1.00000
    osd.31 1.00000
    osd.32 1.00000
    osd.33 1.00000
    osd.34 1.00000
    osd.35 1.00000
      
    #依次设置osd权重为默认值,1.0
    #ceph osd reweight {id} {weight}
    #说明:osd weight的取值为0~1
      
    $ ceph osd reweight 5 1.0
    

    相关文章

      网友评论

        本文标题:ceph分布式存储-数据不均衡调整

        本文链接:https://www.haomeiwen.com/subject/zerhlqtx.html