Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • mirror/ansible-ceph-mon
  • logan/ansible-ceph-mon
2 results
Show changes
Commits on Source (397)
Showing
with 378 additions and 429 deletions
# Ansible role: Ceph Monitor
# Ansible role: ceph-mon
This role mainly bootstraps Ceph monitor(s) but also has several capabilities:
* Deploys Ceph monitor(s)
* Manages Ceph keys
* Can create OpenStack pools, users and keys
* Secures a cluster (protect pools)
* Bootstraps dockerized Ceph monitors
# Requirements
Nothing, it runs out of the box.
# Role variables
Have a look at: `defaults/main.yml`.
## Mandatory variables
None.
# Dependencies
The role `leseb.ceph-common` must be installed.
# Example Playbook
```
- hosts: servers
remote_user: ubuntu
roles:
- { role: leseb.ceph-mon }
```
# Contribution
**THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**
**PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**
# License
Apache
# Author Information
This role was created by [Sébastien Han](http://sebastien-han.fr/).
Documentation is available at http://docs.ceph.com/ceph-ansible/.
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
fetch_directory: fetch/
mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
fsid: "{{ cluster_uuid.stdout }}"
monitor_secret: "{{ monitor_keyring.stdout }}"
cephx: true
# CephFS
pool_default_pg_num: 128
cephfs_data: cephfs_data
cephfs_metadata: cephfs_metadata
cephfs: cephfs
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
# * nopgchange
# * nodelete
secure_cluster: false
secure_cluster_flags:
- nopgchange
- nodelete
- nosizechange
#############
# OPENSTACK #
#############
openstack_config: false
openstack_glance_pool:
name: images
pg_num: "{{ pool_default_pg_num }}"
openstack_cinder_pool:
name: volumes
pg_num: "{{ pool_default_pg_num }}"
openstack_nova_pool:
name: vms
pg_num: "{{ pool_default_pg_num }}"
openstack_cinder_backup_pool:
name: backups
pg_num: "{{ pool_default_pg_num }}"
openstack_keys:
- { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
- { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
- { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
##########
# DOCKER #
##########
mon_containerized_deployment: false
ceph_mon_docker_interface: eth0
#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
ceph_mon_docker_username: ceph
ceph_mon_docker_imagename: daemon
ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
monitor_secret: "{{ monitor_keyring.stdout }}"
admin_secret: 'admin_secret'
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
# * nopgchange
# * nodelete
secure_cluster: false
secure_cluster_flags:
- nopgchange
- nodelete
- nosizechange
client_admin_ceph_authtool_cap:
mon: allow *
osd: allow *
mds: allow *
mgr: allow *
###############
# CRUSH RULES #
###############
crush_rule_config: false
crush_rule_hdd:
name: HDD
root: HDD
type: host
default: false
crush_rule_ssd:
name: SSD
root: SSD
type: host
default: false
crush_rules:
- "{{ crush_rule_hdd }}"
- "{{ crush_rule_ssd }}"
# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
#
# [osds]
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
#
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
create_crush_tree: false
##########
# DOCKER #
##########
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
ceph_mon_docker_cpu_limit: 1
ceph_mon_container_listen_port: 3300
# Use this variable to add extra env configuration to run your mon container.
# If you want to set a custom admin keyring you can set this variable like following:
# ceph_mon_docker_extra_env: -e ADMIN_SECRET={{ admin_secret }}
ceph_mon_docker_extra_env:
mon_docker_privileged: false
mon_docker_net_host: true
ceph_config_keys: [] # DON'T TOUCH ME
###########
# SYSTEMD #
###########
# ceph_mon_systemd_overrides will override the systemd settings
# for the ceph-mon services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_mon_systemd_overrides:
# Service:
# PrivateDevices: False
#!/bin/bash
echo -n "Ceph state is: "
/usr/bin/ceph health
echo ""
---
galaxy_info:
company: Red Hat
author: Sébastien Han
description: Installs Ceph Monitor
license: Apache
min_ansible_version: 1.7
min_ansible_version: 2.7
platforms:
- name: Ubuntu
- name: EL
versions:
- trusty
categories:
- 7
galaxy_tags:
- system
dependencies:
- { role: ceph-common, when: not mon_containerized_deployment }
dependencies: []
---
# NOTE (leseb): wait for mon discovery and quorum resolution
# the admin key is not instantanely created so we have to wait a bit
- name: wait for client.admin key exists
wait_for:
path: /etc/ceph/ceph.client.admin.keyring
- name: create ceph rest api keyring
command: ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
args:
creates: /etc/ceph/ceph.client.restapi.keyring
changed_when: false
when:
cephx and
groups[restapi_group_name] is defined
- include: openstack_config.yml
when:
openstack_config and
cephx
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
- name: waiting for the monitor(s) to form the quorum...
command: >
{{ container_exec_cmd }}
ceph
--cluster {{ cluster }}
-n mon.
-k /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring
mon_status
--format json
register: ceph_health_raw
run_once: true
until: >
(ceph_health_raw.stdout != "") and (ceph_health_raw.stdout | default('{}') | from_json)['state'] in ['leader', 'peon']
retries: "{{ handler_health_mon_check_retries }}"
delay: "{{ handler_health_mon_check_delay }}"
changed_when: false
register: ceph_keys
when: cephx
- name: set keys permissions
file:
path: "{{ item }}"
mode: 0600
owner: root
group: root
with_items:
- "{{ ceph_keys.stdout_lines }}"
- name: copy keys to the ansible server
fetch:
src: "{{ item }}"
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
flat: yes
with_items:
- "{{ ceph_keys.stdout_lines }}"
- /var/lib/ceph/bootstrap-osd/ceph.keyring
- /var/lib/ceph/bootstrap-rgw/ceph.keyring
- /var/lib/ceph/bootstrap-mds/ceph.keyring
- name: tasks for MONs when cephx is enabled
when: cephx
block:
- name: fetch ceph initial keys
ceph_key:
state: fetch_initial_keys
cluster: "{{ cluster }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0400"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
CEPH_ROLLING_UPDATE: "{{ rolling_update }}"
- name: drop in a motd script to report status when logging in
copy:
src: precise/92-ceph
dest: /etc/update-motd.d/92-ceph
owner: root
group: root
mode: 0755
when: ansible_distribution_release == 'precise'
- name: copy keys to the ansible server
fetch:
src: "{{ item }}"
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
flat: yes
with_items:
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring
- /etc/ceph/{{ cluster }}.client.admin.keyring
when: inventory_hostname == groups[mon_group_name] | last
---
# NOTE (leseb): in the present playbook the conditional is done on the task
# We don't do this in main.yml because of the 'docker' variable, when set to true
# the role 'ceph-common' doesn't get inherited so the condition can not be evaluate
# since those check are performed by the ceph-common role
- name: create filesystem pools
command: ceph osd pool create {{ item }} {{ pool_default_pg_num }}
with_items:
- cephfs_data
- cephfs_metadata
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
- name: create ceph filesystem
command: ceph fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
---
- name: configure crush hierarchy
ceph_crush:
cluster: "{{ cluster }}"
location: "{{ hostvars[item]['osd_crush_location'] }}"
containerized: "{{ container_exec_cmd }}"
with_items: "{{ groups[osd_group_name] }}"
register: config_crush_hierarchy
when:
- inventory_hostname == groups.get(mon_group_name) | last
- create_crush_tree
- hostvars[item]['osd_crush_location'] is defined
- name: create configured crush rules
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}"
with_items: "{{ crush_rules | unique }}"
changed_when: false
when: inventory_hostname == groups.get(mon_group_name) | last
- name: get id for new default crush rule
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}"
register: info_ceph_default_crush_rule
changed_when: false
with_items: "{{ crush_rules }}"
when:
- inventory_hostname == groups.get(mon_group_name) | last
- item.default
# If multiple rules are set as default (should not be) then the last one is taken as actual default.
# the with_items statement overrides each iteration with the new one.
# NOTE(leseb): we should actually fail if multiple rules are set as default
- name: set_fact info_ceph_default_crush_rule_yaml
set_fact:
info_ceph_default_crush_rule_yaml: "{{ item.stdout | from_json() }}"
with_items: "{{ info_ceph_default_crush_rule.results }}"
when:
- inventory_hostname == groups.get(mon_group_name) | last
- not item.get('skipped', false)
- name: insert new default crush rule into daemon to prevent restart
command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}"
changed_when: false
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when:
- not config_crush_hierarchy.get('skipped', false)
- info_ceph_default_crush_rule_yaml | default('') | length > 0
- name: "add new default crush rule to {{ cluster }}.conf"
ini_file:
dest: "/etc/ceph/{{ cluster }}.conf"
section: "global"
option: "osd pool default crush rule"
value: "{{ info_ceph_default_crush_rule_yaml.rule_id }}"
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when:
- not config_crush_hierarchy.get('skipped', false)
- info_ceph_default_crush_rule_yaml | default('') | length > 0
---
- name: check if monitor initial keyring already exists
command: >
{{ container_exec_cmd | default('') }} ceph --cluster ceph --name mon. -k
/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}/keyring
auth get-key mon.
register: initial_mon_key
run_once: True
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
when: ceph_current_status.fsid is defined
- name: generate monitor initial keyring
local_action: shell python -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
creates={{ fetch_directory }}/monitor_keyring.conf
shell: >
python -c "import os ; import struct ;
import time; import base64 ; key = os.urandom(16) ;
header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ;
print(base64.b64encode(header + key).decode())"
register: monitor_keyring
sudo: false
when: monitor_secret != 'AQAWqilTCDh7CBAAawXt6kyTgLFCxSvJhTEmuw=='
run_once: True
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
when:
- initial_mon_key.skipped is defined
- ceph_current_status.fsid is undefined
- name: read monitor initial keyring if it already exists
local_action: command cat {{ fetch_directory }}/monitor_keyring.conf
removes={{ fetch_directory }}/monitor_keyring.conf
changed_when: false
register: monitor_keyring
sudo: false
when: monitor_secret != 'AQAWqilTCDh7CBAAawXt6kyTgLFCxSvJhTEmuw=='
- name: get initial keyring when it already exists
set_fact:
monitor_keyring: "{{ initial_mon_key.stdout if monitor_keyring.skipped is defined else monitor_keyring.stdout if initial_mon_key.skipped is defined }}"
- name: create monitor initial keyring
command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *'
args:
creates: /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
ceph_key:
name: mon.
state: present
dest: "/var/lib/ceph/tmp/"
secret: "{{ monitor_keyring }}"
cluster: "{{ cluster }}"
caps:
mon: allow *
import_key: False
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0400"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: set initial monitor key permissions
file:
path: /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
mode: 0600
owner: root
group: root
- name: copy the initial key in /etc/ceph (for containers)
command: >
cp /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
/etc/ceph/{{ cluster }}.mon.keyring
changed_when: false
when: containerized_deployment
- name: create monitor directory
- name: create (and fix ownership of) monitor directory
file:
path: /var/lib/ceph/mon/ceph-{{ ansible_hostname }}
path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
state: directory
owner: root
group: root
mode: 0755
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "u=rwX,g=rX,o=rX"
recurse: true
- name: create custom admin keyring
ceph_key:
name: client.admin
state: present
secret: "{{ admin_secret }}"
caps: "{{ client_admin_ceph_authtool_cap }}"
import_key: False
cluster: "{{ cluster }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0400"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: create_custom_admin_secret
when:
- cephx
- admin_secret != 'admin_secret'
- name: set_fact ceph-authtool container command
set_fact:
ceph_authtool_cmd: "{{ container_binary + ' run --net=host --rm -v /var/lib/ceph:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-authtool ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment else 'ceph-authtool' }}"
- name: import admin keyring into mon keyring
command: >
{{ ceph_authtool_cmd }}
/var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
when:
- not create_custom_admin_secret.get('skipped')
- cephx
- admin_secret != 'admin_secret'
- name: set_fact ceph-mon container command
set_fact:
ceph_mon_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-mon ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' +ceph_client_docker_image_tag if containerized_deployment else 'ceph-mon' }}"
- name: ceph monitor mkfs with keyring
command: >
{{ ceph_mon_cmd }}
--cluster {{ cluster }}
--setuser "{{ ceph_uid if containerized_deployment else 'ceph' }}"
--setgroup "{{ ceph_uid if containerized_deployment else 'ceph' }}"
--mkfs
-i {{ monitor_name }}
--fsid {{ fsid }}
--keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when: cephx
- name: ceph monitor mkfs
command: ceph-mon --mkfs -i {{ ansible_hostname }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
- name: ceph monitor mkfs without keyring
command: >
{{ ceph_mon_cmd }}
--cluster {{ cluster }}
--setuser "{{ ceph_uid if containerized_deployment else 'ceph' }}"
--setgroup "{{ ceph_uid if containerized_deployment else 'ceph' }}"
--mkfs
-i {{ monitor_name }}
--fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/ceph-{{ ansible_hostname }}/keyring
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when: not cephx
---
- name: set config and keys paths
set_fact:
ceph_config_keys:
- /etc/ceph/ceph.client.admin.keyring
- /etc/ceph/ceph.conf
- /etc/ceph/monmap
- /etc/ceph/ceph.mon.keyring
- /var/lib/ceph/bootstrap-osd/ceph.keyring
- /var/lib/ceph/bootstrap-rgw/ceph.keyring
- /var/lib/ceph/bootstrap-mds/ceph.keyring
- name: stat for ceph config and keys
stat:
path: "{{ item }}"
with_items: ceph_config_keys
changed_when: false
failed_when: false
register: statleftover
- name: fail if we find existing cluster files
fail:
msg: "looks like no cluster is running but ceph files are present, please remove them"
with_together:
- ceph_config_keys
- statleftover.results
when: item.1.stat.exists == true
---
- name: push ceph files to the ansible server
fetch:
src: "{{ item.0 }}"
dest: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
flat: yes
with_together:
- ceph_config_keys
- statconfig.results
when: item.1.stat.exists == false
---
- name: set config and keys paths
set_fact:
ceph_config_keys:
- /etc/ceph/ceph.client.admin.keyring
- /etc/ceph/ceph.conf
- /etc/ceph/monmap
- /etc/ceph/ceph.mon.keyring
- /var/lib/ceph/bootstrap-osd/ceph.keyring
- /var/lib/ceph/bootstrap-rgw/ceph.keyring
- /var/lib/ceph/bootstrap-mds/ceph.keyring
- name: stat for ceph config and keys
local_action: stat path={{ item }}
with_items: ceph_config_keys
changed_when: false
sudo: false
failed_when: false
register: statconfig
- name: try to fetch ceph config and keys
copy:
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
dest: "{{ item.0 }}"
owner: root
group: root
mode: 644
changed_when: false
with_together:
- ceph_config_keys
- statconfig.results
when: item.1.stat.exists == true
---
- name: check if a cluster is already running
shell: "docker ps | grep -sq 'ceph/daemon'"
register: ceph_health
changed_when: false
failed_when: false
- include: checks.yml
when: ceph_health.rc != 0
- include: pre_requisite.yml
- include: selinux.yml
when: ansible_os_family == 'RedHat'
- include: fetch_configs.yml
- include: start_docker_monitor.yml
- include: copy_configs.yml
---
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-osd
- /var/lib/ceph/bootstrap-mds
- /var/lib/ceph/bootstrap-rgw
- name: install pip on debian
apt:
name: pip
state: present
when: ansible_os_family == 'Debian'
- name: install pip on redhat
yum:
name: python-pip
state: present
when: ansible_os_family == 'RedHat'
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
- name: install docker-py
pip:
name: docker-py
version: 1.1.0
---
- name: check if selinux is enabled
command: getenforce
register: sestatus
changed_when: false
- name: set selinux permissions
shell: chcon -Rt svirt_sandbox_file_t {{ item }}
with_items:
- /etc/ceph
- /var/lib/ceph
changed_when: false
when: sestatus.stdout != 'Disabled'
---
- name: run the ceph Monitor docker image
docker:
image: "{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
name: "{{ ansible_hostname }}"
net: "host"
state: "running"
env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
---
- include: deploy_monitors.yml
when: not mon_containerized_deployment
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
when: containerized_deployment
- include: start_monitor.yml
when: not mon_containerized_deployment
- name: include deploy_monitors.yml
include_tasks: deploy_monitors.yml
when:
# we test for both container and non-container
- (mon_socket_stat is defined and mon_socket_stat.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0)
- not switch_to_containers | default(False)
- include: ceph_keys.yml
when: not mon_containerized_deployment
- name: include start_monitor.yml
include_tasks: start_monitor.yml
- include: create_mds_filesystems.yml
when:
not ceph_containerized_deployment and
groups[mds_group_name] is defined
- name: include_tasks ceph_keys.yml
include_tasks: ceph_keys.yml
when: not switch_to_containers | default(False)
- include: secure_cluster.yml
- name: include secure_cluster.yml
include_tasks: secure_cluster.yml
when:
secure_cluster and
not mon_containerized_deployment
- secure_cluster
- inventory_hostname == groups[mon_group_name] | first
- name: crush_rules.yml
include_tasks: crush_rules.yml
when: crush_rule_config
- include: ./docker/main.yml
when: mon_containerized_deployment
---
- name: create openstack pool
command: ceph osd pool create {{ item.name }} {{ item.pg_num }}
with_items:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
- "{{ openstack_nova_pool }}"
- "{{ openstack_cinder_backup_pool }}"
changed_when: false
failed_when: false
- name: create openstack keys
command: ceph auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/ceph.{{ item.name }}.keyring
args:
creates: /etc/ceph/ceph.{{ item.name }}.keyring
with_items: openstack_keys
changed_when: false
---
- name: collect all the pools
command: rados lspools
command: >
{{ container_exec_cmd }} rados --cluster {{ cluster }} lspools
changed_when: false
register: ceph_pools
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
check_mode: no
- name: secure the cluster
command: ceph osd pool set {{ item[0] }} {{ item[1] }} true
command: >
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
changed_when: false
with_nested:
- ceph_pools.stdout_lines
- secure_cluster_flags
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
- "{{ ceph_pools.stdout_lines|default([]) }}"
- "{{ secure_cluster_flags }}"
---
- name: activate monitor with upstart
- name: ensure systemd service override directory exists
file:
path: /var/lib/ceph/mon/ceph-{{ ansible_hostname }}/{{ item }}
state: touch
owner: root
group: root
mode: 0600
with_items:
- done
- upstart
when: ansible_distribution == "Ubuntu"
changed_when: false
state: directory
path: "/etc/systemd/system/ceph-mon@.service.d/"
when:
- not containerized_deployment
- ceph_mon_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- name: start and add that the monitor service to the init sequence (ubuntu)
service:
name: ceph-mon
state: started
enabled: yes
args: "id={{ ansible_hostname }}"
when: ansible_distribution == "Ubuntu"
# NOTE (leseb): somehow the service ansible module is messing things up
# as a safety measure we run the raw command
- name: start and add that the monitor service to the init sequence
command: service ceph start mon
changed_when: false
when: ansible_distribution != "Ubuntu"
- name: add ceph-mon systemd service overrides
config_template:
src: "ceph-mon.service.d-overrides.j2"
dest: "/etc/systemd/system/ceph-mon@.service.d/ceph-mon-systemd-overrides.conf"
config_overrides: "{{ ceph_mon_systemd_overrides | default({}) }}"
config_type: "ini"
when:
- not containerized_deployment
- ceph_mon_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- name: collect admin and bootstrap keys
command: ceph-create-keys --id {{ ansible_hostname }}
changed_when: false
failed_when: false
- name: generate systemd unit file for mon container
become: true
template:
src: "{{ role_path }}/templates/ceph-mon.service.j2"
dest: /etc/systemd/system/ceph-mon@.service
owner: "root"
group: "root"
mode: "0644"
notify: restart ceph mons
when: containerized_deployment
- name: get ceph monitor version
shell: ceph daemon mon."{{ ansible_hostname }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
changed_when: false
failed_when: "'No such file or directory' in ceph_version.stderr"
register: ceph_version
- name: start the monitor service
systemd:
name: ceph-mon@{{ monitor_name if not containerized_deployment else ansible_hostname }}
state: started
enabled: yes
masked: no
daemon_reload: yes
# {{ ansible_managed }}