美文网首页
openstack mitaka版本 使用

openstack mitaka版本 使用

作者: 挑战_bae7 | 来源:发表于2021-02-07 15:49 被阅读0次

    1. 修改管理员密码

    图片.png
    默认账户default ---admin---ADMIN_PASS
    [root@controller ~]# vim /root/admin-openrc 
    export OS_PROJECT_DOMAIN_NAME=default
    export OS_USER_DOMAIN_NAME=default
    export OS_PROJECT_NAME=admin
    export OS_USERNAME=admin
    export OS_PASSWORD=123456 修改此项
    export OS_AUTH_URL=http://controller:35357/v3
    export OS_IDENTITY_API_VERSION=3
    export OS_IMAGE_API_VERSION=2
    
    source /root/admin-openrc   不然控制节点无法查看运行状态
    

    2. 想新建虚拟实例在指定的宿主机上

    图片.png
    图片.png

    3.新建普通用户 有删除创建实例的权限

    图片.png

    4. 新建项目 创建配额 限制普通用户创建实例

    图片.png
    图片.png
    图片.png
    限制生效.png

    5. glance 镜像服务迁移出来

    controller 停掉glance服务
    [root@controller ~]# systemctl stop openstack-glance-api.service openstack-glance-registry.service 
    [root@controller ~]# systemctl disable openstack-glance-api.service openstack-glance-registry.service 
    Removed symlink /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service.
    Removed symlink /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service.
    [root@controller ~]# mysqldump -B glance >glance.sql
    scp /root/glance.sql root@10.0.0.12:/root
    [root@controller ~]# ll /var/lib/glance/images/  glance镜像文件目录
    总用量 12980
    -rw-r----- 1 glance glance 13287936 2月   3 08:52 5c95b5dc-50e2-4cad-b334-dfe3a3c9302f
    
    修改控制节点数据库keystone
    [root@controller ~]# source /root/admin-openrc
    [root@controller ~]# openstack endpoint list|grep glance
    | 6400a1d109f94fc7b0a68cf643731ba9 | RegionOne | glance       | image        | True    | internal  | http://controller:9292                    |
    | a23581fa8fc64771a0c9d580868fa19f | RegionOne | glance       | image        | True    | public    | http://controller:9292                    |
    | ae40a6fc6d4e44159950cecd32d17748 | RegionOne | glance       | image        | True    | admin     | http://controller:9292    
    [root@controller ~]# mysqldump keystone endpoint > endpoint.sql
    [root@controller ~]# cp endpoint.sql{,.bak}
    [root@controller ~]# vim endpoint.sql
    :%s#http://controller:9292#http://10.0.0.12:9292#gc    -y -y -y
    [root@controller ~]# mysql keystone <endpoint.sql
    [root@controller ~]# openstack endpoint list|grep glance
    | 6400a1d109f94fc7b0a68cf643731ba9 | RegionOne | glance       | image        | True    | internal  | http://10.0.0.12:9292                     |
    | a23581fa8fc64771a0c9d580868fa19f | RegionOne | glance       | image        | True    | public    | http://10.0.0.12:9292                     |
    | ae40a6fc6d4e44159950cecd32d17748 | RegionOne | glance       | image        | True    | admin     | http://10.0.0.12:9292                     |
    
    [root@controller ~]# openstack image list  检验
    +--------------------------------------+--------+--------+
    | ID                                   | Name   | Status |
    +--------------------------------------+--------+--------+
    | 5c95b5dc-50e2-4cad-b334-dfe3a3c9302f | cirros | active |
    +--------------------------------------+--------+--------+
    
    排错 看最上面或者最下面
    [root@controller ~]# cd /var/log/nova/
    [root@controller nova]# ls -1 | awk '{print ">"$0}'
    >nova-api.log
    >nova-conductor.log
    >nova-consoleauth.log
    >nova-manage.log
    >nova-novncproxy.log
    >nova-scheduler.log
    [root@controller nova]# ls -1 | awk '{print ">"$0}'|bash
    [root@controller nova]# ls
    nova-api.log  nova-conductor.log  nova-consoleauth.log  nova-manage.log  nova-novncproxy.log  nova-scheduler.log
    [root@controller nova]# grep -i error *.log 
    
    排错
    [root@controller ~]# openstack image list
    Internal Server Error (HTTP 500)
    [root@glance glance]# tailf api.log
    ImportError: No module named memcache
    yum install -y pythone-pip
    pip install --upgrade pip
    
    glance节点
    [root@localhost ~]# vim /etc/hosts
    10.0.0.11       controller
    yum install mariadb mariadb-server python2-PyMySQL -y
    echo '[mysqld]
    bind-address = 10.0.0.12
    default-storage-engine = innodb
    innodb_file_per_table
    max_connections = 4096
    collation-server = utf8_general_ci
    character-set-server = utf8' >/etc/my.cnf.d/openstack.cnf
    
    systemctl enable mariadb.service
    systemctl start mariadb.service
    mysql_secure_installation ---回车-n-y-y-y-y
    
    mysql -u root -p
    CREATE DATABASE glance;
    GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
      IDENTIFIED BY 'GLANCE_DBPASS';
    GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
      IDENTIFIED BY 'GLANCE_DBPASS';
    source /root/glance.sql;
    show tables;查看
    
    yum install openstack-glance openstack-utils -y
    scp -pr root@10.0.0.11:/etc/glance/glance-registry.conf /etc/glance/glance-registry.conf
    scp -pr root@10.0.0.11:/etc/glance/glance-api.conf /etc/glance/glance-api.conf
    openstack-config --set /etc/glance/glance-registry.conf  database  connection  mysql+pymysql://glance:GLANCE_DBPASS@10.0.0.12/glance
    openstack-config --set /etc/glance/glance-api.conf  database  connection  mysql+pymysql://glance:GLANCE_DBPASS@10.0.0.12/glance
    systemctl enable openstack-glance-api.service openstack-glance-registry.service
    systemctl start openstack-glance-api.service openstack-glance-registry.service
    
    scp -pr root@10.0.0.11:/var/lib/glance/images/* /var/lib/glance/images/
    [root@glance ~]# chown -R glance.glance /var/lib/glance/images/
    [root@glance ~]# ll /var/lib/glance/images/
    总用量 12980
    -rw-r----- 1 glance glance 13287936 2月   3 08:52 5c95b5dc-50e2-4cad-b334-dfe3a3c9302f
    
    所有节点都运行
    sed  "s#http://controller:9292#http://10.0.0.12:9292#g" /etc/nova/nova.conf|grep 9292
    sed  -i "s#http://controller:9292#http://10.0.0.12:9292#g" /etc/nova/nova.conf
    控制节点
    systemctl restart openstack-nova-api.service
    运算节点
    systemctl restart openstack-nova-compute.service
    创建一个实例 看是否启动
    
    图片.png
    添加镜像
    图片.png
    官方镜像源:
    https://docs.openstack.org/image-guide/obtain-images.html
    国内镜像地址:中科大镜像源
    http://mirrors.ustc.edu.cn/centos-cloud/centos/7/images/

    6. cinder 块存储服务

    mysql -u root -p
    CREATE DATABASE cinder;
    GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
      IDENTIFIED BY 'CINDER_DBPASS';
    GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
      IDENTIFIED BY 'CINDER_DBPASS';
    source /root/admin-openrc
    openstack user create --domain default --password CINDER_PASS cinder
    openstack role add --project service --user cinder admin
    openstack service create --name cinder   --description "OpenStack Block Storage" volume
    openstack service create --name cinderv2   --description "OpenStack Block Storage" volumev2
    openstack endpoint create --region RegionOne   volume public http://controller:8776/v1/%\(tenant_id\)s
    openstack endpoint create --region RegionOne   volume internal http://controller:8776/v1/%\(tenant_id\)s
    openstack endpoint create --region RegionOne   volume admin http://controller:8776/v1/%\(tenant_id\)s
    openstack endpoint create --region RegionOne   volumev2 public http://controller:8776/v2/%\(tenant_id\)s
    openstack endpoint create --region RegionOne   volumev2 internal http://controller:8776/v2/%\(tenant_id\)s
    openstack endpoint create --region RegionOne   volumev2 admin http://controller:8776/v2/%\(tenant_id\)s
    
    \cp /etc/cinder/cinder.conf{,.bak}
    egrep -v "^$|#" /etc/cinder/cinder.conf.bak >/etc/cinder/cinder.conf
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  rpc_backend  rabbit
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  auth_strategy  keystone
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  my_ip  10.0.0.11
    openstack-config --set /etc/cinder/cinder.conf  database  connection  mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  auth_uri  http://controller:5000
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  auth_url  http://controller:35357
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  memcached_servers  controller:11211
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  auth_type  password
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  project_domain_name  default
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  user_domain_name  default
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  project_name  service
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  username  cinder
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  password  CINDER_PASS
    openstack-config --set /etc/cinder/cinder.conf  oslo_concurrency  lock_path  /var/lib/cinder/tmp
    openstack-config --set /etc/cinder/cinder.conf  oslo_messaging_rabbit  rabbit_host  controller
    openstack-config --set /etc/cinder/cinder.conf  oslo_messaging_rabbit  rabbit_userid  openstack
    openstack-config --set /etc/cinder/cinder.conf  oslo_messaging_rabbit  rabbit_password  RABBIT_PASS
    [root@controller ~]# md5sum /etc/cinder/cinder.conf
    e2119e2454022c6f13734d716afc7e1c  /etc/cinder/cinder.conf
    
     su -s /bin/sh -c "cinder-manage db sync" cinder
    
    openstack-config --set /etc/nova/nova.conf  cinder  os_region_name  RegionOne
    systemctl restart openstack-nova-api.service
    systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
    systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
    [root@controller ~]# cinder service-list
    +------------------+------------+------+---------+-------+------------+-----------------+
    |      Binary      |    Host    | Zone |  Status | State | Updated_at | Disabled Reason |
    +------------------+------------+------+---------+-------+------------+-----------------+
    | cinder-scheduler | controller | nova | enabled |   up  |     -      |        -        |
    +------------------+------------+------+---------+-------+------------+-----------------+
    

    配置一个存储节点

    yum install lvm2
    systemctl enable lvm2-lvmetad.service
    systemctl start lvm2-lvmetad.service
    
    添加2块硬盘
    echo '- - -'/sys/class/scsi_host/host0/scan
    fdisk -l
    pvcreate /dev/vdb 
    pvcreate /dev/vdc
    vgcreate cinder-ssd /dev/vdb
    vgcreate cinder-stat /dev/vdc
    vim /etc/lvm/lvm.conf +130  根据数据情况 我这边是vd* 
    filter = [ "a/vda/", "a/vdb/", "a/vdc/", "r/.*/"]
    
    \cp /etc/cinder/cinder.conf{,.bak}
    egrep -v "^$|#" /etc/cinder/cinder.conf.bak >/etc/cinder/cinder.conf
    openstack-config --set /etc/cinder/cinder.conf  database  connection  mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  rpc_backend  rabbit
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  auth_strategy  keystone
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  my_ip  10.0.0.12 ##改成自己实际的
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  glance_api_servers  http://10.0.0.12:9292 ##上面修改过glance
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  enabled_backends  ssd,sata
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  auth_uri  http://controller:5000
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  auth_url  http://controller:35357
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  memcached_servers  controller:11211
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  auth_type  password
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  project_domain_name  default
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  user_domain_name  default
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  project_name  service
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  username  cinder
    openstack-config --set /etc/cinder/cinder.conf  keystone_authtoken  password  CINDER_PASS
    openstack-config --set /etc/cinder/cinder.conf  oslo_concurrency  lock_path  /var/lib/cinder/tmp
    openstack-config --set /etc/cinder/cinder.conf  oslo_messaging_rabbit  rabbit_host  controller
    openstack-config --set /etc/cinder/cinder.conf  oslo_messaging_rabbit  rabbit_userid  openstack
    openstack-config --set /etc/cinder/cinder.conf  oslo_messaging_rabbit  rabbit_password  RABBIT_PASS
    openstack-config --set /etc/cinder/cinder.conf  ssd  volume_driver  cinder.volume.drivers.lvm.LVMVolumeDriver
    openstack-config --set /etc/cinder/cinder.conf  ssd  volume_group  cinder-ssd
    openstack-config --set /etc/cinder/cinder.conf  ssd  iscsi_protocol  iscsi
    openstack-config --set /etc/cinder/cinder.conf  ssd  iscsi_helper  lioadm
    openstack-config --set /etc/cinder/cinder.conf  ssd  volume_backend_name  ssd
    openstack-config --set /etc/cinder/cinder.conf  sata  volume_driver  cinder.volume.drivers.lvm.LVMVolumeDriver
    openstack-config --set /etc/cinder/cinder.conf  sata  volume_group  cinder-sata
    openstack-config --set /etc/cinder/cinder.conf  sata  iscsi_protocol  iscsi
    openstack-config --set /etc/cinder/cinder.conf  sata  iscsi_helper  lioadm
    openstack-config --set /etc/cinder/cinder.conf  sata  volume_backend_name  sata
    
    systemctl enable openstack-cinder-volume.service target.service
    systemctl start openstack-cinder-volume.service target.service
    [root@controller ~]# cinder service-list
    +------------------+-------------+------+---------+-------+----------------------------+-----------------+
    |      Binary      |     Host    | Zone |  Status | State |         Updated_at         | Disabled Reason |
    +------------------+-------------+------+---------+-------+----------------------------+-----------------+
    | cinder-scheduler |  controller | nova | enabled |   up  | 2021-02-06T08:45:35.000000 |        -        |
    |  cinder-volume   | glance@sata | nova | enabled |   up  | 2021-02-06T08:45:25.000000 |        -        |
    |  cinder-volume   |  glance@ssd | nova | enabled |   up  | 2021-02-06T08:45:25.000000 |        -        |
    +------------------+-------------+------+---------+-------+----------------------------+-----------------+
    

    web界面下


    图片.png
    [root@glance ~]# lvs
      LV                                          VG         Attr       LSize  Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
      root                                        centos     -wi-ao---- 46.99g                                                    
      swap                                        centos     -wi-ao----  2.00g                                                    
      volume-ad6b4c0c-2080-4922-b697-543f7b9d1b60 cinder-ssd -wi-a-----  5.00g  
    逻辑卷只能扩展不能缩容      
    

    7. 根据需要创建不同类型的逻辑卷

    根据volume_backend_name指定


    图片.png
    图片.png
    [root@glance ~]# lvs 验证
      LV                                          VG          Attr       LSize  Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
      root                                        centos      -wi-ao---- 46.99g                                                    
      swap                                        centos      -wi-ao----  2.00g                                                    
      volume-d61a0b44-d72a-4a9c-aee0-b2ab7c14b273 cinder-sata -wi-a-----  3.00g                                                    
      volume-d589e037-6f72-4910-baf3-7a7f6343d84e cinder-ssd  -wi-a-----  1.00g             
    
    图片.png
    验证
    ssh cirros@10.0.0.122
    sudo su -
    fdisk -l
    mkfs.ext4 /dev/vdb
    mount /dev/vdb /mnt
    cd /mnt
    echo "this is testing" >test.txt
    在存储节点上
    [root@glance ~]#  dd if=/dev/mapper/cinder--sata-volume--d61a0b44--d72a--4a9c--aee0--b2ab7c14b273 of=/opt/disk.raw
    [root@glance ~]# qemu-img info /opt/disk.raw 
    image: /opt/test.raw
    file format: raw
    virtual size: 3.0G (3221225472 bytes)
    disk size: 3.0G
    [root@glance ~]# mount -o loop /opt/disk.raw /test
    [root@glance ~]# ll /test/
    总用量 20
    drwx------ 2 root root 16384 2月   6 17:14 lost+found
    -rw------- 1 root root    16 2月   6 17:15 test.txt
    
    运算节点上 创建虚拟机目录文件 在每个运算节点上 
    [root@compute1 ~]# ll /var/lib/nova/instances/5ebab777-fc9d-4ded-a354-135af1f228c3/
    总用量 2280
    -rw------- 1 root root   25459 2月   6 17:27 console.log
    -rw-r--r-- 1 qemu qemu 2293760 2月   6 17:28 disk
    -rw-r--r-- 1 nova nova      79 2月   6 17:26 disk.info
    -rw-r--r-- 1 nova nova    2525 2月   6 17:26 libvirt.xml
    
    目录到逻辑卷.png
    [root@controller ~]# openstack-config --set /etc/cinder/cinder.conf  DEFAULT  glance_api_servers  http://10.0.0.12:9292
    [root@controller ~]# systemctl restart openstack-cinder-api.service 
    [root@compute1 ~]# ll /var/lib/nova/instances/90c08ba3-ad28-412b-8fab-4a49dac3093f/
    总用量 28              没有磁盘文件
    -rw------- 1 root root 24141 2月   6 17:39 console.log
    -rw-r--r-- 1 nova nova  2577 2月   6 17:39 libvirt.xml
    

    8. cinder使用nfs

    yum install nfs-utils -y
    [root@compute3 ~]# vim /etc/exports
    /data/ 10.0.0.0/24(rw,async,no_root_squash,no_all_squash)
    mkdir /data
    systemctl start nfs rpcbind
    systemctl enable nfs rpcbind
    [root@glance ~]# showmount -e 10.0.0.33
    Export list for 10.0.0.33:
    /data/ 10.0.0.0/24
    
    存储节点
    openstack-config --set /etc/cinder/cinder.conf  DEFAULT  enabled_backends  ssd,sata,nfs
    openstack-config --set /etc/cinder/cinder.conf  nfs  volume_driver  cinder.volume.drivers.nfs.NfsDriver
    openstack-config --set /etc/cinder/cinder.conf  nfs  nfs_shares_config  /etc/cinder/nfs_shares
    openstack-config --set /etc/cinder/cinder.conf  nfs  volume_backend_name  nfs
    
    cat >/etc/cinder/nfs_shares <<EOF
    10.0.0.33:/data
    EOF
    
    [root@glance ~]# systemctl restart openstack-cinder-volume.service
    [root@controller ~]# cinder service-list
    +------------------+-------------+------+---------+-------+----------------------------+-----------------+
    |      Binary      |     Host    | Zone |  Status | State |         Updated_at         | Disabled Reason |
    +------------------+-------------+------+---------+-------+----------------------------+-----------------+
    | cinder-scheduler |  controller | nova | enabled |   up  | 2021-02-07T08:19:37.000000 |        -        |
    |  cinder-volume   |  glance@nfs | nova | enabled |   up  |             -              |        -        |
    |  cinder-volume   | glance@sata | nova | enabled |   up  | 2021-02-07T08:19:40.000000 |        -        |
    |  cinder-volume   |  glance@ssd | nova | enabled |   up  | 2021-02-07T08:19:40.000000 |        -        |
    +------------------+-------------+------+---------+-------+----------------------------+-----------------+
    
    image.png
    image.png
    [root@compute3 data]# ls
    volume-de5974d1-5354-426e-81a5-42bb7c4d075a
    
    ssh cirros@10.0.0.127
    sudo su -
    mount /dev/vdb /tmp
    echo "this is nfs" >/tmp/testing.txt
    
    [root@compute3 data]# mount -o loop volume-de5974d1-5354-426e-81a5-42bb7c4d075a /tmp
    [root@compute3 data]# ll /tmp/
    总用量 20
    drwx------ 2 root root 16384 2月   7 16:45 lost+found
    -rw-r--r-- 1 root root    12 2月   7 16:47 testing.txt
    

    8. 控制节点兼职计算节点

    控制节点
    yum install -y openstack-nova-compute
    openstack-config --set /etc/nova/nova.conf  vnc enabled  True
    openstack-config --set /etc/nova/nova.conf  vnc vncserver_listen  0.0.0.0
    openstack-config --set /etc/nova/nova.conf  vnc vncserver_proxyclient_address  '$my_ip'
    openstack-config --set /etc/nova/nova.conf  vnc novncproxy_base_url  http://controller:6080/vnc_auto.html
    
    systemctl start libvirtd openstack-nova-compute.service 
    systemctl enable libvirtd openstack-nova-compute.service 
    [root@controller ~]# nova service-list
    +----+------------------+-----------------------+----------+----------+-------+----------------------------+-----------------+
    | Id | Binary           | Host                  | Zone     | Status   | State | Updated_at                 | Disabled Reason |
    +----+------------------+-----------------------+----------+----------+-------+----------------------------+-----------------+
    | 1  | nova-conductor   | controller            | internal | enabled  | up    | 2021-02-07T09:17:18.000000 | -               |
    | 2  | nova-scheduler   | controller            | internal | enabled  | up    | 2021-02-07T09:17:18.000000 | -               |
    | 3  | nova-consoleauth | controller            | internal | enabled  | up    | 2021-02-07T09:17:18.000000 | -               |
    | 7  | nova-compute     | compute1              | nova     | enabled  | up    | 2021-02-07T09:17:16.000000 | -               |
    | 8  | nova-compute     | localhost.localdomain | compute2 | disabled | down  | 2021-02-05T09:57:59.000000 | 123             |
    | 9  | nova-compute     | compute2              | zhiding  | enabled  | down  | 2021-02-05T09:43:21.000000 | -               |
    | 10 | nova-compute     | compute3              | compute3 | enabled  | up    | 2021-02-07T09:17:16.000000 | -               |
    | 11 | nova-compute     | controller            | nova     | enabled  | up    | 2021-02-07T09:17:17.000000 | -               |
    +----+------------------+-----------------------+----------+----------+-------+----------------------------+-----------------+
    

    9. 虚拟机冷迁移

    1:开启nova计算节点之间互信
    冷迁移需要nova计算节点之间使用nova用户互相免密码访问
    默认nova用户禁止登陆,开启所有计算节点的nova用户登录shell。
    
    usermod -s /bin/bash nova
    su - nova
    ssh-keygen -t rsa
    #生成密钥
    cp -fa .ssh/id_rsa.pub .sshauthorized_keys
    
    将公钥发送给其他计算节点的nova用户的/var/lib/nova/.ssh目录下,注意权限和所属组
    [nova@compute1 ~]$ scp -rp .ssh root@10.0.0.33:`pwd`
    [root@compute3 ~]# chown -R nova:nova /var/lib/nova/.ssh/
    
    2:修改控制节点nova.conf
    vim /etc/nova/nova.conf
    [DEFAULT]
    scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
    
    重启openstack-nova-scheduler
    systemctl restart openstack-nova-scheduler.service
    
    3:修改所有计算节点的nova配置
    vi /etc/nova/nova.conf
    [DEFAULT]
    allow_resize_to_same_host = True
    
    重启openstack-nova-compute
    systemctl restart openstack-nova-compute.service
    
    4:dashboard上进行操作---->创建实例---->实现冷迁移操作
    

    相关文章

      网友评论

          本文标题:openstack mitaka版本 使用

          本文链接:https://www.haomeiwen.com/subject/eucgtltx.html