窝游网:值得大家信赖的游戏下载站!
发布时间:2021-06-30 09:37:42来源:窝游网作者:窝游网
创建Pool
# ceph osd pool create volumes 64 # ceph osd pool create images 64 # ceph osd pool create vms 64
配置centos7 ceph yum源
在glance-api(控制节点)节点上
yum install python-rbd -y
(计算节点)在nova-compute和cinder-volume节点上
yum install ceph-common -y
集群ceph存储端操作
[root@ceph ~]# ssh controller sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf[root@ceph ~]# ssh compute sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
如果开启了cephx authentication,需要为Nova/Cinder and Glance创建新的用户,如下
ceph auth get-or-create client.cinder mon ‘allow r’ osd ‘allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images’ceph auth get-or-create client.glance mon ‘allow r’ osd ‘allow class-read object_prefix rbd_children, allow rwx pool=images’
为client.cinder, client.glance添加keyring,如下
ceph auth get-or-create client.glance | ssh controller sudo tee /etc/ceph/ceph.client.glance.keyringssh controller sudo chown glance:glance /etc/ceph/ceph.client.glance.keyringceph auth get-or-create client.cinder | ssh compute sudo tee /etc/ceph/ceph.client.cinder.keyringssh compute sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
为nova-compute节点上创建临时密钥
ceph auth get-key client.cinder | ssh {your-compute-node} tee client.cinder.key
此处为
ceph auth get-key client.cinder | ssh compute tee client.cinder.key
在所有计算节点上(本例就只有一台计算节点)执行如下操作:在计算节点上为libvert替换新的key
uuidgen 536f43c1-d367-45e0-ae64-72d987417c91cat > secret.xml <<EOF粘贴以下内容,注意将红色key替换为新生成的key。<secret ephemeral=’no’ private=’no’> <uuid>536f43c1-d367-45e0-ae64-72d987417c91</uuid> <usage type=’ceph’> <name>client.cinder secret</name> </usage> </secret>EOF
virsh secret-define –file secret.xml
以下—base64 后的秘钥为计算节点上/root目录下的client.cinder.key。是之前为计算节点创建的临时秘钥文件
virsh secret-set-value 536f43c1-d367-45e0-ae64-72d987417c91 AQCliYVYCAzsEhAAMSeU34p3XBLVcvc4r46SyA==
[root@compute ~]#rm –f client.cinder.key secret.xml
在控制节点操作
vim /etc/glance/glance-api.conf [DEFAULT]…default_store = rbd show_image_direct_url = True show_multiple_locations = True…[glance_store] stores = rbd default_store = rbd rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf rbd_store_chunk_size = 8取消Glance cache管理,去掉cachemanagement[paste_deploy] flavor = keystone
在计算节点操作
vim /etc/cinder/cinder.conf[DEFAULT]保留之前的enabled_backends = ceph #glance_api_version = 2 …[ceph]volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2 rbd_user = cinder volume_backend_name = ceph rbd_secret_uuid =536f43c1-d367-45e0-ae64-72d987417c91
请注意,每个计算节点uuid不同。按照实际情况填写。本例只有一个计算节点
注意,如果配置多个cinder后端,glance_api_version = 2必须添加到[DEFAULT]中。本例注释了
每个计算节点上,设置/etc/nova/nova.conf
vim /etc/nova/nova.conf[libvirt] virt_type = qemu hw_disk_discard = unmap images_type = rbd images_rbd_pool = vms images_rbd_ceph_conf = /etc/ceph/ceph.conf rbd_user = cinder rbd_secret_uuid = 536f43c1-d367-45e0-ae64-72d987417c91 disk_cachemodes=”network=writeback” libvirt_inject_password = false libvirt_inject_key = false libvirt_inject_partition = -2 live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED
控制节点
systemctl restart openstack-glance-api.service
计算节点
systemctl restart openstack-nova-compute.service openstack-cinder-volume.service
配置文件
1、nova
[root@controller nova]# cat nova.conf [DEFAULT] enabled_apis = osapi_compute,metadata rpc_backend = rabbit auth_strategy = keystone my_ip = 192.168.8.100 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] connection = mysql+pymysql://nova:Changeme_123@controller/nova_api [barbican] [cache] [cells] [cinder] os_region_name = RegionOne [conductor] [cors] [cors.subdomain] [database] connection = mysql+pymysql://nova:Changeme_123@controller/nova [ephemeral_storage_encryption] [glance] api_servers = http://controller:9292 [guestfs] [hyperv] [image_file_url] [ironic] [keymgr] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = Changeme_123 [libvirt] [libvirt] virt_type = qemu hw_disk_discard = unmap images_type = rbd images_rbd_pool = nova images_rbd_ceph_conf = /etc/cinder/ceph.conf rbd_user = cinder rbd_secret_uuid = 457eb676-33da-42ec-9a8c-9293d545c337 disk_cachemodes=”network=writeback” libvirt_inject_password = false libvirt_inject_key = false libvirt_inject_partition = -2 live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED [matchmaker_redis] [metrics] [neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = Changeme_123 service_metadata_proxy = True metadata_proxy_shared_secret = Changeme_123 [osapi_v21] [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] rabbit_host = controller rabbit_userid = openstack rabbit_password = Changeme_123 [oslo_middleware] [oslo_policy] [rdp] [serial_console] [spice] [ssl] [trusted_computing] [upgrade_levels] [vmware] [vnc] vncserver_listen = 0.0.0.0 vncserver_proxyclient_address = 192.168.8.100 enabled = True novncproxy_base_url = http://192.168.8.100:6080/vnc_auto.html [workarounds] [xenserver]
cinder
[root@controller nova]# cat /etc/cinder/cinder.conf [DEFAULT] rpc_backend = rabbit auth_strategy = keystone my_ip = 192.168.8.100 glance_host = controller enabled_backends = lvm,ceph glance_api_servers = http://controller:9292 [BACKEND] [BRCD_FABRIC_EXAMPLE] [CISCO_FABRIC_EXAMPLE] [COORDINATION] [FC-ZONE-MANAGER] [KEYMGR] [cors] [cors.subdomain] [database] connection = mysql+pymysql://cinder:Changeme_123@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = Changeme_123 [matchmaker_redis] [oslo_concurrency] lock_path = /var/lib/cinder/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] rabbit_host = controller rabbit_userid = openstack rabbit_password = Changeme_123 [oslo_middleware] [oslo_policy] [oslo_reports] [oslo_versionedobjects] [ssl] [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes iscsi_protocol = iscsi iscsi_helper = lioadm [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = cinder rbd_ceph_conf = /etc/cinder/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2 rbd_user = cinder rbd_secret_uuid =457eb676-33da-42ec-9a8c-9293d545c337 volume_backend_name = ceph
glance
[root@controller nova]# cat /etc/glance/glance-api.conf
[DEFAULT] #default_store = rbd show_image_direct_url = True #show_multiple_locations = True [cors] [cors.subdomain] [database] connection = mysql+pymysql://glance:Changeme_123@controller/glance [glance_store] stores = rbd default_store = rbd rbd_store_pool = glance rbd_store_user = glance rbd_store_ceph_conf = /etc/glance/ceph.conf rbd_store_chunk_size = 8 [image_format] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default username = glance password = Changeme_123 project_name = service [matchmaker_redis] [oslo_concurrency] [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_policy] [paste_deploy] flavor = keystone [profiler] [store_type_location_strategy] [task] [taskflow_executor]
ceph
[root@controller nova]# cat /etc/cinder/cinder.conf [DEFAULT] rpc_backend = rabbit auth_strategy = keystone my_ip = 192.168.8.100 glance_host = controller enabled_backends = lvm,ceph glance_api_servers = http://controller:9292 [BACKEND] [BRCD_FABRIC_EXAMPLE] [CISCO_FABRIC_EXAMPLE] [COORDINATION] [FC-ZONE-MANAGER] [KEYMGR] [cors] [cors.subdomain] [database] connection = mysql+pymysql://cinder:Changeme_123@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = Changeme_123 [matchmaker_redis] [oslo_concurrency] lock_path = /var/lib/cinder/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] rabbit_host = controller rabbit_userid = openstack rabbit_password = Changeme_123 [oslo_middleware] [oslo_policy] [oslo_reports] [oslo_versionedobjects] [ssl] [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes iscsi_protocol = iscsi iscsi_helper = lioadm [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = cinder rbd_ceph_conf = /etc/cinder/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2 rbd_user = cinder rbd_secret_uuid =457eb676-33da-42ec-9a8c-9293d545c337 volume_backend_name = ceph [root@controller nova]# cat /etc/glance/glance-api.conf [DEFAULT] #default_store = rbd show_image_direct_url = True #show_multiple_locations = True [cors] [cors.subdomain] [database] connection = mysql+pymysql://glance:Changeme_123@controller/glance [glance_store] stores = rbd default_store = rbd rbd_store_pool = glance rbd_store_user = glance rbd_store_ceph_conf = /etc/glance/ceph.conf rbd_store_chunk_size = 8 [image_format] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default username = glance password = Changeme_123 project_name = service [matchmaker_redis] [oslo_concurrency] [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_policy] [paste_deploy] flavor = keystone [profiler] [store_type_location_strategy] [task] [taskflow_executor][root@controller nova]# cat /etc/cinder/ceph.conf [global] heartbeat interval = 5 osd pool default size = 3 osd heartbeat grace = 10 #keyring = /etc/ceph/keyring.admin mon osd down out interval = 90 fsid = 5e8080b0-cc54-11e6-b346-000c29976397 osd heartbeat interval = 10 max open files = 131072 auth supported = cephx[mon] mon osd full ratio = .90 mon data = /var/lib/ceph/mon/mon$id mon osd nearfull ratio = .75 mon clock drift allowed = .200 mon osd allow primary affinity = true[mon.0] host = csm-node1 mon addr = 192.168.8.102:6789[mon.1] host = csm-node2 mon addr = 192.168.8.103:6789[mon.2] host = csm-node3 mon addr = 192.168.8.104:6789[osd] osd mount options xfs = rw,noatime,inode64,logbsize=256k,delaylog osd crush update on start = false filestore xattr use omap = true #keyring = /etc/ceph/keyring.$name osd mkfs type = xfs osd data = /var/lib/ceph/osd/osd$id osd heartbeat interval = 10 osd heartbeat grace = 10 osd mkfs options xfs = -f osd journal size = 0[osd.0] osd journal = /dev/sdb1 devs = /dev/sdb2 host = csm-node1 cluster addr = 192.168.8.102 public addr = 192.168.8.102[osd.1] osd journal = /dev/sdb1 devs = /dev/sdb2 host = csm-node2 cluster addr = 192.168.8.103 public addr = 192.168.8.103[osd.2] osd journal = /dev/sdb1 devs = /dev/sdb2 host = csm-node3 cluster addr = 192.168.8.104 public addr = 192.168.8.104[client.cinder] keyring=/etc/ceph/ceph.client.cinder.keyring
月圆之夜小红帽日记事件独眼巨人怎么选择
王牌战士幽灵实战技巧讲解
王牌战争文明重启各种资源刷新地点与详细位置大全
崩坏3精英工坊新加入了什么武器圣痕 精英工坊新武器圣痕一览
和平精英通讯塔是什么 和平精英通讯塔玩法详细解析
跑跑卡丁车手游制霸赛场挑战任务全攻略
玄元剑仙神魂悟道秘术有什么用
王牌战士团战如何切入详细讲解
王者荣耀王者模拟战即将上线 王者模拟战玩法介绍
炽姬无双
角色扮演
坠落星界
其它游戏
梦三国
角色扮演
邪恶疯人院
休闲益智
斗罗大陆手游
角色扮演
一刀传世
角色扮演
九州天空城3D
角色扮演
九州战国策
战争策略
穿越火线-枪战王者
枪战射击