diff --git a/.kitchen.yml b/.kitchen.yml index e9f847b..4b10462 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -27,8 +27,6 @@ suites: ceph: config: fsid: ae3f1d03-bacd-4a90-b869-1a4fabb107f2 - mon_initial_members: - - "127.0.0.1" - name: osd run_list: - "role[ceph-osd]" diff --git a/README.md b/README.md index dcb91c9..a0a3b9e 100644 --- a/README.md +++ b/README.md @@ -65,9 +65,9 @@ This cookbook can be used to implement a chosen cluster design. Most of the conf * `node['ceph']['config]'['global']['cluster network']` - a CIDR specification of a separate cluster replication network * `node['ceph']['config]'['global']['rgw dns name']` - the main domain of the radosgw daemon -Most notably, the configuration does **NOT** need to set the `mon_initial_members`, because the cookbook does a node search based on tags to find other mons in the same environment. +Most notably, the configuration does **NOT** need to set the `mon initial members`, because the cookbook does a node search based on TAGS to find other mons in the same environment. However, you can add them to `node['ceph']['config']['global']['mon initial members'] = ` -The other set of attributes that this recipe needs is `node['ceph']['osd_devices']`, which is an array of OSD definitions, similar to the following: +The other set of attributes that this recipe needs is `node['ceph']['osd']['devices']`, which is an array of OSD definitions, similar to the following: * {'device' => '/dev/sdb'} - Use a full disk for the OSD, with a small partition for the journal * {'type' => 'directory', 'device' => '/src/node/sdb1/ceph'} - Use a directory, and have a small file for the journal @@ -149,12 +149,8 @@ Ceph RGW nodes should use the ceph-radosgw role ### Ceph RADOS Gateway (RGW) ### Note: Only supports the newer 'civetweb' version of RGW (not Apache) -* `node['ceph']['radosgw']['api_fqdn']` - what vhost to configure in the web server -* `node['ceph']['radosgw']['admin_email']` - the admin email address to configure in the web server * `node['ceph']['radosgw']['port']` - Port of the rgw. Defaults to 80 -* `node['ceph']['radosgw']['webserver_companion']` - defaults to 'apache2', but it can be set to 'civetweb', or to false in order to leave it unconfigured -* `node['ceph']['radosgw']['path']` - where to save the s3gw.fcgi file -* `node['ceph']['config']['global']['rgw dns name']` - the main domain of the radosgw daemon, to calculate the bucket name from a subdomain +* `node['ceph']['radosgw']['rgw_dns_name']` - the main domain of the radosgw daemon, to calculate the bucket name from a subdomain ## Resources/Providers diff --git a/attributes/default.rb b/attributes/default.rb index 58b1d2f..eb308c2 100644 --- a/attributes/default.rb +++ b/attributes/default.rb @@ -51,11 +51,8 @@ # Override these in your environment file or here if you wish. Don't put them in the 'ceph''config''global' section. # The public and cluster network settings are critical for proper operations. -default['ceph']['network']['public']['cidr'] = '10.0.101.0/24' -default['ceph']['network']['cluster']['cidr'] = '192.168.101.0/24' - -# Will radosgw integrate with OpenStack Keystone - true/false -default['ceph']['radosgw']['keystone_auth'] = false +default['ceph']['network']['public']['cidr'] = ['10.121.1.0/24'] +default['ceph']['network']['cluster']['cidr'] = ['10.121.2.0/24'] # Tags are used to identify nodes for searching. # IMPORTANT diff --git a/attributes/mon.rb b/attributes/mon.rb index 643be9d..0f4528e 100644 --- a/attributes/mon.rb +++ b/attributes/mon.rb @@ -16,8 +16,7 @@ include_attribute 'ceph-chef' -# The ceph mon ips attribute gets built in a wrapper recipe or chef-repo style environment like ceph-chef -default['ceph']['mon']['ips'] = nil +default['ceph']['mon']['port'] = 6789 # init_style in each major section is allowed so that radosgw or osds or mons etc could be a different OS if required. # The default is everything on the same OS @@ -28,9 +27,6 @@ # MUST be set in the wrapper cookbook or chef-repo like project default['ceph']['mon']['role'] = 'search-ceph-mon' -# Default of 15 seconds but change to nil for default of .050 or set it to .050 -default['ceph']['mon']['clock_drift_allowed'] = 15 - case node['platform_family'] when 'debian', 'rhel', 'fedora' packages = ['ceph'] diff --git a/attributes/osd.rb b/attributes/osd.rb index 3a30d29..853de0f 100644 --- a/attributes/osd.rb +++ b/attributes/osd.rb @@ -35,11 +35,11 @@ # Example of how to set this up via attributes file. Change to support your naming, the correct OSD info etc. this # is ONLY an example. default['ceph']['osd']['remove']['devices'] = [ - { 'node' => 'ceph-vm3', 'osd' => 11, 'zap' => false, 'partition' => 1, 'device' => '/dev/sdf', 'journal' => '/dev/sdf' } + { 'node' => 'ceph-vm3', 'osd' => 11, 'zap' => false, 'partition' => 1, 'data' => '/dev/sdf', 'journal' => '/dev/sdf' } ] default['ceph']['osd']['add']['devices'] = [ - { 'node' => 'ceph-vm3', 'type' => 'hdd', 'device' => '/dev/sde', 'journal' => '/dev/sde' } + { 'node' => 'ceph-vm3', 'type' => 'hdd', 'data' => '/dev/sde', 'journal' => '/dev/sde' } ] case node['platform_family'] diff --git a/attributes/pools.rb b/attributes/pools.rb index c8e570e..c90458e 100644 --- a/attributes/pools.rb +++ b/attributes/pools.rb @@ -23,14 +23,38 @@ # NOTE: The values you set in pools are critical to a well balanced system. # RADOSGW - Rados Gateway section +# Update these if you want to setup federated regions and zones +# {region name}-{zone name}-{instance} -- Naming convention used but you can change it +default['ceph']['pools']['radosgw']['federated_regions'] = [] +# NOTE: If you use a region then you *must* have at least 1 zone defined and if you use a zone then must at least 1 +# region defined. +default['ceph']['pools']['radosgw']['federated_zones'] = [] +# Default for federated_instances is 1. If you would like to run multiple instances of radosgw per node then increase +# the federated_instances count. NOTE: When you do this, make sure if you use a load balancer that you account +# for the additional instance(s). Also, instances count *must* never be below 1. +default['ceph']['pools']['radosgw']['federated_instances'] = 1 + +# These two values *must* be set in your wrapper cookbook if using federated region/zone. They will be the root pool +# name used. For example, region - .us.rgw.root, zone - .us-east.rgw.root (these do not inlcude instances). +default['ceph']['pools']['radosgw']['federated_region_root_pool_name'] = nil +default['ceph']['pools']['radosgw']['federated_zone_root_pool_name'] = nil + # The cluster name will be prefixed to each name during the processing so please only include the actual name. -# No leading cluster name or leading '.' character. default['ceph']['pools']['radosgw']['names'] = [ - 'rgw', 'rgw.control', 'rgw.gc', 'rgw.root', 'users.uid', - 'users.email', 'users.swift', 'users', 'usage', 'log', 'intent-log', 'rgw.buckets', 'rgw.buckets.index', - 'rgw.buckets.extra' + '.rgw', '.rgw.control', '.rgw.gc', '.rgw.root', '.users.uid', + '.users.email', '.users.swift', '.users', '.usage', '.log', '.intent-log', '.rgw.buckets', '.rgw.buckets.index', + '.rgw.buckets.extra' ] +# NOTE: *DO NOT* modify this structure! This is an internal structure that gets dynamically updated IF federated +# options above are updated by wrapper cookbook etc. +default['ceph']['pools']['radosgw']['federated_names'] = [] +# 'rbd' federated_names is not used but present - do not remove! +default['ceph']['pools']['rbd']['federated_names'] = [] + +# NOTE: The radosgw names above will be appended to the federated region/zone names if they are present else just +# the radosgw names will be used. + # The 'ceph''osd''size''max' value will be used if no 'size' value is given in the pools settings! default['ceph']['pools']['radosgw']['settings'] = { 'pg_num' => 128, 'pgp_num' => 128, 'options' => '', 'force' => false, @@ -41,11 +65,11 @@ # The cluster name will be prefixed to each name during the processing so please only include the actual name. # No leading cluster name or leading '.' character. -# TODO: Address rbds later... default['ceph']['pools']['rbd']['names'] = [] default['ceph']['pools']['rbd']['settings'] = {} # List of pools to process # If your given environment does not use one of these then override it in your environment.yaml file +# NOTE: Only valid options are 'radosgw' and 'rbd' at present default['ceph']['pools']['active'] = ['radosgw', 'rbd'] diff --git a/attributes/radosgw.rb b/attributes/radosgw.rb index f73d2c2..8803403 100644 --- a/attributes/radosgw.rb +++ b/attributes/radosgw.rb @@ -2,7 +2,7 @@ # Cookbook Name:: ceph # Attributes:: radosgw # -# Copyright 2015, Bloomberg Finance L.P. +# Copyright 2016, Bloomberg Finance L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,17 +33,27 @@ include_attribute 'ceph-chef' -default['ceph']['radosgw']['api_fqdn'] = 'localhost' -default['ceph']['radosgw']['admin_email'] = 'admin@example.com' default['ceph']['radosgw']['port'] = 80 -default['ceph']['radosgw']['webserver'] = 'civetweb' +# default['ceph']['radosgw']['webserver'] = 'civetweb' +# IMPORTANT: The civetweb user manual is a good place to look for custom config for civetweb: +# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md +# Add the options to the single line of the 'frontends etc...' +# NOTE: Change the number of default threads that civetweb uses per node +default['ceph']['radosgw']['civetweb_num_threads'] = 10 +default['ceph']['radosgw']['civetweb_access_log_file'] = '/var/log/radosgw/civetweb.access.log' +default['ceph']['radosgw']['civetweb_error_log_file'] = '/var/log/radosgw/civetweb.error.log' # OpenStack Keystone specific -default['ceph']['radosgw']['keystone_admin_token'] = nil -default['ceph']['radosgw']['keystone_url'] = nil -default['ceph']['radosgw']['keystone_url_port'] = 35358 +# Will radosgw integrate with OpenStack Keystone - true/false +default['ceph']['radosgw']['keystone']['auth'] = false +default['ceph']['radosgw']['keystone']['admin']['token'] = nil +default['ceph']['radosgw']['keystone']['admin']['url'] = nil +default['ceph']['radosgw']['keystone']['admin']['port'] = 35357 +default['ceph']['radosgw']['keystone']['accepted_roles'] = 'admin Member _member_' +default['ceph']['radosgw']['keystone']['token_cache_size'] = 1000 +default['ceph']['radosgw']['keystone']['revocation_interval'] = 1200 -default['ceph']['radosgw']['dns_name'] = nil +# NOTE: For radosgw pools, see pools.rb attributes. # Number of RADOS handles RGW has access to - system default = 1 default['ceph']['radosgw']['rgw_num_rados_handles'] = 5 diff --git a/files/README.md b/files/README.md index 189dbcb..6af43a8 100644 --- a/files/README.md +++ b/files/README.md @@ -3,3 +3,6 @@ This file represents an updated init script that resides in /etc/init.d for RPM ## release.asc This file is the new ceph repo key. This file only changes if there is a serious issue such as a breach in the repo. If the key has to be replaced then there should be a recipe that does that and updates any mirrored repos that may be impacted. This key is updated locally so as to not have to pull it each time. This is very important for those installs behind proxies etc. + +## ceph-rest-api.service (systemd specific) +Systemd file for ceph-rest-api. It does not currently map to ceph.target since that should be upstream and it maybe using the Hammer release and systemd is only used in Infernalis and above. diff --git a/files/ceph-rest-api.service b/files/ceph-rest-api.service index e995b26..60930f4 100644 --- a/files/ceph-rest-api.service +++ b/files/ceph-rest-api.service @@ -1,10 +1,13 @@ [Unit] Description=Ceph RESTful API for admin purposes +After=network-online.target local-fs.target +Wants=network-online.target local-fs.target [Service] -Type=Simple +Type=Notify ExecStart=/usr/bin/python /bin/ceph-rest-api +ExecReload=/bin/kill -HUP $MAINPID Restart=always [Install] -WantedBy=default.target +WantedBy=multi-user.target diff --git a/libraries/ceph_chef_helper.rb b/libraries/ceph_chef_helper.rb index 178fc91..183d2f5 100644 --- a/libraries/ceph_chef_helper.rb +++ b/libraries/ceph_chef_helper.rb @@ -19,6 +19,42 @@ require 'json' +# NOTE: To create radosgw federated pools we need to override the default node['ceph']['pools']['radosgw']['names'] +# by rebuilding the structure dynamically based on the federated options. +def ceph_chef_build_federated_pool(pool) + node['ceph']['pools'][pool]['federated_regions'].each do |region| + node['ceph']['pools'][pool]['federated_zones'].each do |zone| + node['ceph']['pools'][pool]['federated_instances'].each do |instance| + node['ceph']['pools'][pool]['names'].each do |name| + # name should already have '.' as first character so don't add it to formating here + federated_name = ".#{region}-#{zone}-#{instance}#{name}" + if !node['ceph']['pools'][pool]['federated_names'].include? federated_name + node['ceph']['pools'][pool]['federated_names'] << federated_name + end + end + end + end + end +end + +def ceph_chef_create_pool(pool) + if !node['ceph']['pools'][pool]['federated_names'].empty? + node_loop = node['ceph']['pools'][pool]['federated_names'] + else + node_loop = node['ceph']['pools'][pool]['names'] + end + + node_loop.each do |name| + ceph_chef_pool name do + action :create + pg_num node['ceph']['pools'][pool]['settings']['pg_num'] + pgp_num node['ceph']['pools'][pool]['settings']['pgp_num'] + type node['ceph']['pools'][pool]['settings']['type'] + options node['ceph']['pools'][pool]['settings']['options'] if node['ceph']['pools'][pool]['settings']['options'] + end + end +end + def ceph_chef_is_mon_node val = false nodes = ceph_chef_mon_nodes @@ -251,17 +287,27 @@ def ceph_chef_save_restapi_secret(secret) # search for a matching monitor IP in the node environment. # 1. For each public network specified: # a. We look if the network is IPv6 or IPv4 -# b. We look for a route matching the network +# b. We look for a route matching the network. You can't assume all nodes will be part of the same subnet but they +# MUST be part of the same aggregate subnet. For example, if you have 10.121.1.0/24 (class C) as your public IP block +# and all of you racks/nodes are spanning the same CIDR block then all is well. However, if you have the same public IP +# block and your racks/nodes are each routable (L3) then those racks/nodes MUST be part of the aggregate CIDR which is +# 10.121.1.0/24 in the example here. So, you could have each rack of nodes on their own subnet like /27 which will give +# you a max of 8 subnets under the aggregate of /24. For example, rack1 could be 10.121.1.0/27, rack2 - 10.121.1.32/27, +# rack3 - 10.121.1.64/27 ... up to 8 racks in this example. # c. If we found match, we return the IP with the port -def ceph_chef_find_node_ip_in_network(network, nodeish = nil) +# This function is important because we TAG nodes for specific roles and then search for those tags to dynamically +# update the node data. Of course, another way would be to create node data specific to a given role such as mon, osd ... +def ceph_chef_find_node_ip_in_network(networks, nodeish = nil) require 'netaddr' nodeish = node unless nodeish - network.split(/\s*,\s*/).each do |n| - net = NetAddr::CIDR.create(n) - nodeish['network']['interfaces'].each do |_iface, addrs| - addresses = addrs['addresses'] || [] - addresses.each do |ip, params| - return ceph_chef_ip_address_to_ceph_chef_address(ip, params) if ceph_chef_ip_address_in_network?(ip, params, net) + networks.each do |network| + network.split(/\s*,\s*/).each do |n| + net = NetAddr::CIDR.create(n) + nodeish['network']['interfaces'].each do |_iface, addrs| + addresses = addrs['addresses'] || [] + addresses.each do |ip, params| + return ceph_chef_ip_address_to_ceph_chef_address(ip, params) if ceph_chef_ip_address_in_network?(ip, params, net) + end end end end @@ -283,19 +329,21 @@ def ceph_chef_ip_address_in_network?(ip, params, net) end end +# To get subcidr blocks to work within a supercidr aggregate the logic has to change +# from params['prefixlen'].to_i == net.bits to removing it def ceph_chef_ip4_address_in_network?(ip, params, net) - net.contains?(ip) && params.key?('broadcast') && params['prefixlen'].to_i == net.bits + net.contains?(ip) && params.key?('broadcast') end def ceph_chef_ip6_address_in_network?(ip, params, net) - net.contains?(ip) && params['prefixlen'].to_i == net.bits + net.contains?(ip) # && params['prefixlen'].to_i == net.bits end def ceph_chef_ip_address_to_ceph_chef_address(ip, params) if params['family'].eql?('inet') - return "#{ip}:6789" + return "#{ip}:#{node['ceph']['mon']['port']}" elsif params['family'].eql?('inet6') - return "[#{ip}]:6789" + return "[#{ip}]:#{node['ceph']['mon']['port']}" end end @@ -389,15 +437,7 @@ def ceph_chef_mon_nodes_host(nodes) # Returns a list of ip:port of ceph mon for public network def ceph_chef_mon_addresses - # if File.exist?("/var/run/ceph/#{node['ceph']['cluster']}-mon.#{node['hostname']}.asok") - # mon_ips = ceph_chef_quorum_members_ips - # else - # if node['ceph']['mon']['ips'] - # mon_ips = node['ceph']['mon']['ips'] - # else mon_ips = ceph_chef_mon_nodes_ip(ceph_chef_mon_nodes) - # end - # end mon_ips.reject { |m| m.nil? }.uniq end diff --git a/metadata.rb b/metadata.rb index 472dd25..3ed0630 100644 --- a/metadata.rb +++ b/metadata.rb @@ -4,7 +4,7 @@ license 'Apache v2.0' description 'Installs/Configures Ceph (Hammer and above)' long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) -version '0.9.7' +version '0.9.8' depends 'apache2', '>= 1.1.12' depends 'apt' diff --git a/recipes/default.rb b/recipes/default.rb index 61f9d58..b33058d 100644 --- a/recipes/default.rb +++ b/recipes/default.rb @@ -35,3 +35,7 @@ action :install not_if 'test -f /tmp/netaddr-1.5.0.gem' end + +if node['ceph']['pools']['radosgw']['federated_regions'] + ceph_chef_build_federated_pool('radosgw') +end diff --git a/recipes/osd.rb b/recipes/osd.rb index 6d6c3f9..870ec72 100644 --- a/recipes/osd.rb +++ b/recipes/osd.rb @@ -29,7 +29,7 @@ # "devices": [ # { # "type": "hdd", -# "device": "/dev/sdb", +# "data": "/dev/sdb", # "journal": "/dev/sdf", # "encrypted": true # } @@ -145,7 +145,7 @@ # Only one partition by default for ceph data partitions = 1 - directory osd_device['device'] do + directory osd_device['data'] do # owner 'root' # group 'root' owner node['ceph']['owner'] @@ -161,24 +161,24 @@ # is_device - Is the device a partition or not # is_ceph - Does the device contain the default 'ceph data' or 'ceph journal' label # The -v option is added to the ceph-disk script so as to get a verbose output if debugging is needed. No other reason. - execute "ceph-disk-prepare on #{osd_device['device']}" do + execute "ceph-disk-prepare on #{osd_device['data']}" do command <<-EOH - is_device=$(echo '#{osd_device['device']}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))') - is_ceph=$(parted --script #{osd_device['device']} print | egrep -sq '^ 1.*ceph') - ceph-disk -v prepare --cluster #{node['ceph']['cluster']} #{dmcrypt} --fs-type #{node['ceph']['osd']['fs_type']} #{osd_device['device']} #{osd_device['journal']} + is_device=$(echo '#{osd_device['data']}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))') + is_ceph=$(parted --script #{osd_device['data']} print | egrep -sq '^ 1.*ceph') + ceph-disk -v prepare --cluster #{node['ceph']['cluster']} #{dmcrypt} --fs-type #{node['ceph']['osd']['fs_type']} #{osd_device['data']} #{osd_device['journal']} if [[ ! -z $is_device ]]; then - ceph-disk -v activate #{osd_device['device']}#{partitions} + ceph-disk -v activate #{osd_device['data']}#{partitions} else - ceph-disk -v activate #{osd_device['device']} + ceph-disk -v activate #{osd_device['data']} fi sleep 2 EOH # NOTE: The meaning of the uuids used here are listed above - not_if "sgdisk -i1 #{osd_device['device']} | grep -i 4fbd7e29-9d25-41b8-afd0-062c0ceff05d" if !dmcrypt - not_if "sgdisk -i1 #{osd_device['device']} | grep -i 4fbd7e29-9d25-41b8-afd0-5ec00ceff05d" if dmcrypt + not_if "sgdisk -i1 #{osd_device['data']} | grep -i 4fbd7e29-9d25-41b8-afd0-062c0ceff05d" if !dmcrypt + not_if "sgdisk -i1 #{osd_device['data']} | grep -i 4fbd7e29-9d25-41b8-afd0-5ec00ceff05d" if dmcrypt # Only if there is no 'ceph *' found in the label. The recipe os_remove_zap should be called to remove/zap # all devices if you are wanting to add all of the devices again (if this is not the initial setup) - not_if "parted --script #{osd_device['device']} print | egrep -sq '^ 1.*ceph'" + not_if "parted --script #{osd_device['data']} print | egrep -sq '^ 1.*ceph'" action :run end @@ -188,5 +188,5 @@ # ceph-disk list end else - Log.info("node['ceph']['osd']['devices'] empty") + Log.info("node['ceph']['osd']['data'] empty") end diff --git a/recipes/osd_add.rb b/recipes/osd_add.rb index 38c3e6c..2072882 100644 --- a/recipes/osd_add.rb +++ b/recipes/osd_add.rb @@ -38,7 +38,7 @@ # next # end - directory osd_device['device'] do + directory osd_device['data'] do # owner 'root' # group 'root' owner node['ceph']['owner'] @@ -51,23 +51,23 @@ # is_device - Is the device a partition or not # is_ceph - Does the device contain a 'ceph data' or 'ceph journal' label - execute "ceph-disk-prepare on #{osd_device['device']}" do + execute "ceph-disk-prepare on #{osd_device['data']}" do command <<-EOH - is_device=$(echo '#{osd_device['device']}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))') - is_ceph=$(parted --script #{osd_device['device']} print | egrep -sq '^ 1.*ceph') - ceph-disk -v prepare --cluster #{node['ceph']['cluster']} #{dmcrypt} --fs-type #{node['ceph']['osd']['fs_type']} #{osd_device['device']} #{osd_device['journal']} + is_device=$(echo '#{osd_device['data']}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))') + is_ceph=$(parted --script #{osd_device['data']} print | egrep -sq '^ 1.*ceph') + ceph-disk -v prepare --cluster #{node['ceph']['cluster']} #{dmcrypt} --fs-type #{node['ceph']['osd']['fs_type']} #{osd_device['data']} #{osd_device['journal']} if [[ ! -z $is_device ]]; then - ceph-disk -v activate #{osd_device['device']}#{partitions} + ceph-disk -v activate #{osd_device['data']}#{partitions} else - ceph-disk -v activate #{osd_device['device']} + ceph-disk -v activate #{osd_device['data']} fi sleep 2 EOH - not_if "sgdisk -i1 #{osd_device['device']} | grep -i 4fbd7e29-9d25-41b8-afd0-062c0ceff05d" if !dmcrypt - not_if "sgdisk -i1 #{osd_device['device']} | grep -i 4fbd7e29-9d25-41b8-afd0-5ec00ceff05d" if dmcrypt + not_if "sgdisk -i1 #{osd_device['data']} | grep -i 4fbd7e29-9d25-41b8-afd0-062c0ceff05d" if !dmcrypt + not_if "sgdisk -i1 #{osd_device['data']} | grep -i 4fbd7e29-9d25-41b8-afd0-5ec00ceff05d" if dmcrypt # Only if there is no 'ceph *' found in the label. The recipe os_remove_zap should be called to remove/zap # all devices if you are wanting to add all of the devices again (if this is not the initial setup) - only_if "parted --script #{osd_device['device']} print | egrep -sq '^ 1.*ceph'" + only_if "parted --script #{osd_device['data']} print | egrep -sq '^ 1.*ceph'" action :run # notifies :create, "ruby_block[save osd_device status #{index}]", :immediately end @@ -85,5 +85,5 @@ # end end else - Log.info("node['ceph']['osd']['add']['devices'] empty") + Log.info("node['ceph']['osd']['add']['data'] empty") end diff --git a/recipes/pools_create.rb b/recipes/pools_create.rb index fc77cfd..e728ee3 100644 --- a/recipes/pools_create.rb +++ b/recipes/pools_create.rb @@ -22,21 +22,14 @@ if node['ceph']['pools']['active'] node['ceph']['pools']['active'].each do |pool| # Create pool and set type (replicated or erasure - default is replicated) - # #{pool} - node['ceph']['pools'][pool]['names'].each do |name| - pool_name = ".#{name}" - - ceph_chef_pool pool_name do - action :create - pg_num node['ceph']['pools'][pool]['settings']['pg_num'] - pgp_num node['ceph']['pools'][pool]['settings']['pgp_num'] - type node['ceph']['pools'][pool]['settings']['type'] - options node['ceph']['pools'][pool]['settings']['options'] if node['ceph']['pools'][pool]['settings']['options'] - end - - # TODO: Need to add for calculated PGs options - # TODO: Need to add crush_rule_set - # Set... + if pool == 'radosgw' && !node['ceph']['pools']['radosgw']['federated_regions'].empty? + # NOTE: *Must* have federated_regions and federated_zones if doing any federated processing! + ceph_chef_build_federated_pool(pool) end + + ceph_chef_create_pool(pool) + # TODO: Need to add for calculated PGs options + # TODO: Need to add crush_rule_set + # Set... end end diff --git a/recipes/pools_set.rb b/recipes/pools_set.rb index 06358ee..ee8cc12 100644 --- a/recipes/pools_set.rb +++ b/recipes/pools_set.rb @@ -26,8 +26,12 @@ node['ceph']['pools']['active'].each do |pool| # Create pool and set type (replicated or erasure - default is replicated) node['ceph']['pools'][pool]['names'].each do |name| - cluster = ".#{node['ceph']['cluster']}" unless node['ceph']['cluster'].downcase == 'ceph' - pool_name = "#{cluster}.#{name}" + unless node['ceph']['cluster'].downcase == 'ceph' + cluster = ".#{node['ceph']['cluster']}" + pool_name = "#{cluster}.#{name}" + else + pool_name = "#{name}" + end # TODO: Need to add for calculated PGs options # TODO: Need to add crush_rule_set diff --git a/recipes/radosgw.rb b/recipes/radosgw.rb index 669254a..caf79e3 100644 --- a/recipes/radosgw.rb +++ b/recipes/radosgw.rb @@ -32,7 +32,9 @@ not_if "test -d /var/log/radosgw" end -file "/var/log/radosgw/#{node['ceph']['cluster']}.client.radosgw.#{node['hostname']}.log" do +# file "/var/log/radosgw/#{node['ceph']['cluster']}.client.radosgw.#{node['hostname']}.log" do +# No need to for hostname as part of log file. It makes log collection easier. +file "/var/log/radosgw/#{node['ceph']['cluster']}.client.radosgw.log" do owner node['ceph']['owner'] group node['ceph']['group'] end @@ -66,7 +68,7 @@ end # IF you want specific recipes for civetweb then put them in the recipe referenced here. -include_recipe "ceph-chef::radosgw_#{node['ceph']['radosgw']['webserver']}" +include_recipe "ceph-chef::radosgw_civetweb" # NOTE: This base_key can also be the bootstrap-rgw key (ceph.keyring) if desired. Just change it here. base_key = "/etc/ceph/#{node['ceph']['cluster']}.client.admin.keyring" diff --git a/recipes/radosgw_users.rb b/recipes/radosgw_users.rb index 4147140..35b5ae3 100644 --- a/recipes/radosgw_users.rb +++ b/recipes/radosgw_users.rb @@ -26,6 +26,7 @@ rgw_admin_cap = JSON.parse(%x[radosgw-admin caps add --uid="#{node['ceph']['radosgw']['user']['admin']['uid']}" --caps="users=*;buckets=*;metadata=*;usage=*;zone=*"]) end not_if "radosgw-admin user info --uid='#{node['ceph']['radosgw']['user']['admin']['uid']}'" + ignore_failure true end # Create a test user unless you have overridden the attribute and removed the test user. @@ -36,5 +37,6 @@ rgw_tester_cap = JSON.parse(%x[radosgw-admin caps add --uid="#{node['ceph']['radosgw']['user']['test']['uid']}" --caps="#{node['ceph']['radosgw']['user']['test']['caps']}"]) end not_if "radosgw-admin user info --uid='#{node['ceph']['radosgw']['user']['test']['uid']}'" + ignore_failure true end end diff --git a/templates/default/ceph.conf.erb b/templates/default/ceph.conf.erb index 9907945..e5e1539 100644 --- a/templates/default/ceph.conf.erb +++ b/templates/default/ceph.conf.erb @@ -20,15 +20,12 @@ cephx sign messages = true # Note: mon host (required) and mon initial members (optional) should be in global section in addition # to the cluster and public network options since they are all critical to every node. - <% if node['ceph']['config']['mon_initial_members'] %> - mon initial members = <%= node['ceph']['config']['mon_initial_members'] %> - <% end -%> # List of all of the monitor nodes in the given cluster mon host = <%= @mon_addresses.sort.join(', ') %> # Suppress warning of too many pgs mon pg warn max per osd = 0 - cluster network = <%= node['ceph']['network']['cluster']['cidr'] %> - public network = <%= node['ceph']['network']['public']['cidr'] %> + cluster network = <%= @node['ceph']['network']['cluster']['cidr'].join(',') %> + public network = <%= @node['ceph']['network']['public']['cidr'].join(',') %> <% if !node['ceph']['config']['global'].nil? -%> # This is very flexible section. You can add more options OR override options from above simply by # specifying the values in your wrapper cookbook or your "chef-repo". If you override values then @@ -84,8 +81,6 @@ [mon] host = <%= node['hostname'] %> <% if !node['ceph']['mon']['clock_drift_allowed'].nil? -%> - # Default is .050 out of box but 15 in ceph cookbook. VMs (used in development) often drift so set this in the environment file if desired to override the defaults - mon clock drift allowed = <%= node['ceph']['mon']['clock_drift_allowed'] %> <% end -%> <% if !node['ceph']['config']['mon'].nil? -%> <% node['ceph']['config']['mon'].sort.each do |k, v| %> @@ -109,11 +104,14 @@ log file = /var/log/qemu/qemu-guest-$pid.log <% end -%> # admin socket must be writable by QEMU and allowed by SELinux or AppArmor - admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok + # admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok + admin socket = /var/run/ceph/$cluster-$type.$id.$pid.asok <% if @is_admin -%> [client.admin] keyring = /etc/ceph/$cluster.client.admin.keyring + # Force the admin to have a socket + admin socket = <% end -%> <% if @is_rgw -%> @@ -121,26 +119,24 @@ host = <%= node['hostname'] %> keyring = /etc/ceph/$cluster.client.radosgw.keyring rgw num rados handles = <%= node['ceph']['radosgw']['rgw_num_rados_handles'] %> - rgw frontends = civetweb port=<%= node['ceph']['radosgw']['port'] %> + rgw frontends = civetweb port=<%= node['ceph']['radosgw']['port'] %> num_threads=<%= node['ceph']['radosgw']['civetweb_num_threads'] %> error_log_file=<%= node['ceph']['radosgw']['civetweb_error_log_file'] %> access_log_file=<%= node['ceph']['radosgw']['civetweb_access_log_file'] %> # rgw enable ops log = false # rgw enable usage log = false pid file = /var/run/ceph/$name.pid - log file = /var/log/radosgw/$cluster.client.radosgw.<%= node['hostname'] -%>.log + log file = /var/log/radosgw/$cluster.client.radosgw.log # Increased to 1 to log HTTP return codes - http://tracker.ceph.com/issues/12432 debug rgw = 1/0 - <% if node['ceph']['radosgw']['keystone_auth'] %> - rgw keystone url = <%= node['ceph']['radosgw']['keystone_url'] %>:<%= node['ceph']['radosgw']['keystone_url_port'] %> - rgw keystone admin token = <%= node['ceph']['radosgw']['keystone_admin_token'] %> - rgw keystone accepted roles = admin Member _member_ - rgw keystone token cache size = 1000 - rgw keystone revocation interval = 1200 + rgw dns name = <%= node['fqdn'] %> + <% if node['ceph']['radosgw']['keystone']['auth'] %> + rgw keystone url = <%= node['ceph']['radosgw']['keystone']['admin']['url'] %>:<%= node['ceph']['radosgw']['keystone']['admin']['port'] %> + rgw keystone admin token = <%= node['ceph']['radosgw']['keystone']['admin']['token'] %> + rgw keystone accepted roles = <%= node['ceph']['radosgw']['keystone']['accepted_roles'] %> + rgw keystone token cache size = <%= node['ceph']['radosgw']['keystone']['token_cache_size'] %> + rgw keystone revocation interval = <%= node['ceph']['radosgw']['keystone']['revocation_interval'] %> rgw s3 auth use keystone = true <% end %> - <% if !node['ceph']['radosgw']['dns_name'].nil? -%> - rgw dns name = <%= node['ceph']['radosgw']['dns_name'] %> - <% end %> - <% if !node['ceph']['config']['rgw'].nil? -%> - <% node['ceph']['config']['rgw'].sort.each do |k, v| %> + <% if !node['ceph']['config']['radosgw'].nil? -%> + <% node['ceph']['config']['radosgw'].sort.each do |k, v| %> <%= k %> = <%= v %> <% end %> <% end -%>