Browse Source

import files

wip
Yann Autissier 1 year ago
parent
commit
44a6d37ba5
  1. 3
      .dockerignore
  2. 11
      .env.dist
  3. 2
      .gitignore
  4. 16
      Makefile
  5. 2
      ansible/.gitignore
  6. 9
      ansible/ansible.cfg
  7. 1711
      ansible/ec2.py
  8. 1
      ansible/inventories/.host.docker.internal
  9. 3
      ansible/inventories/group_vars/all
  10. 50
      ansible/inventories/host_vars/default
  11. 25
      ansible/inventories/host_vars/localhost
  12. 29
      ansible/playbook.yml
  13. 6
      ansible/playbooks/aws-cli.yml
  14. 6
      ansible/playbooks/disks.yml
  15. 6
      ansible/playbooks/docker.yml
  16. 6
      ansible/playbooks/hosts.yml
  17. 12
      ansible/roles/aws-cli/.gitrepo
  18. 35
      ansible/roles/aws-cli/README.md
  19. 7
      ansible/roles/aws-cli/defaults/main.yml
  20. 14
      ansible/roles/aws-cli/meta/main.yml
  21. 144
      ansible/roles/aws-cli/tasks/main.yml
  22. 7
      ansible/roles/aws-cli/templates/aws_cli_config.j2
  23. 5
      ansible/roles/aws-cli/templates/aws_cli_credentials.j2
  24. 12
      ansible/roles/disks/.gitrepo
  25. 21
      ansible/roles/disks/LICENSE
  26. 77
      ansible/roles/disks/README.md
  27. 8
      ansible/roles/disks/defaults/main.yml
  28. 21
      ansible/roles/disks/handlers/main.yml
  29. 182
      ansible/roles/disks/library/disks_ebs_config.py
  30. 21
      ansible/roles/disks/meta/main.yml
  31. 173
      ansible/roles/disks/tasks/main.yml
  32. 12
      ansible/roles/docker/.gitrepo
  33. 4
      ansible/roles/docker/AUTHORS.md
  34. 9
      ansible/roles/docker/CHANGELOG.md
  35. 20
      ansible/roles/docker/LICENSE
  36. 237
      ansible/roles/docker/README.md
  37. 41
      ansible/roles/docker/defaults/main.yml
  38. 1
      ansible/roles/docker/files/etc/sysctl.d/docker.conf
  39. 133
      ansible/roles/docker/files/usr/local/bin/docker-build
  40. 4
      ansible/roles/docker/files/usr/local/bin/docker-cleanup
  41. 4
      ansible/roles/docker/files/usr/local/bin/docker-cleanup-images
  42. 144
      ansible/roles/docker/files/usr/local/bin/docker-cleanup-volumes
  43. 8
      ansible/roles/docker/files/usr/local/bin/docker-get-image
  44. 8
      ansible/roles/docker/files/usr/local/bin/docker-get-status
  45. 4
      ansible/roles/docker/files/usr/local/bin/docker-log-cleanup
  46. 7
      ansible/roles/docker/files/usr/local/bin/docker-log-truncate
  47. 157
      ansible/roles/docker/files/usr/local/bin/docker-run
  48. 8
      ansible/roles/docker/handlers/main.yml
  49. 42
      ansible/roles/docker/meta/main.yml
  50. 6
      ansible/roles/docker/playbook.yml
  51. 17
      ansible/roles/docker/tasks/build.yml
  52. 16
      ansible/roles/docker/tasks/check.yml
  53. 18
      ansible/roles/docker/tasks/config.yml
  54. 23
      ansible/roles/docker/tasks/files.yml
  55. 12
      ansible/roles/docker/tasks/group.yml
  56. 30
      ansible/roles/docker/tasks/main.yml
  57. 21
      ansible/roles/docker/tasks/package.yml
  58. 53
      ansible/roles/docker/tasks/run.yml
  59. 32
      ansible/roles/docker/tasks/service.yml
  60. 34
      ansible/roles/docker/tasks/vars.yml
  61. 43
      ansible/roles/docker/tests/goss.yml
  62. 4
      ansible/roles/docker/tests/goss/group.yml
  63. 4
      ansible/roles/docker/tests/goss/main.yml
  64. 4
      ansible/roles/docker/tests/goss/main_centos-6.yml
  65. 4
      ansible/roles/docker/tests/goss/main_centos-7.yml
  66. 4
      ansible/roles/docker/tests/goss/main_debian.yml
  67. 3
      ansible/roles/docker/tests/goss/package.yml
  68. 3
      ansible/roles/docker/tests/goss/package_centos-6.yml
  69. 3
      ansible/roles/docker/tests/goss/package_centos-7.yml
  70. 3
      ansible/roles/docker/tests/goss/package_debian.yml
  71. 5
      ansible/roles/docker/tests/goss/service.yml
  72. 118
      ansible/roles/docker/tests/library/goss.py
  73. 6
      ansible/roles/docker/tests/main.yml
  74. 6
      ansible/roles/docker/tests/playbook.yml
  75. 11
      ansible/roles/docker/vars/centos-6.yml
  76. 13
      ansible/roles/docker/vars/centos-7.yml
  77. 15
      ansible/roles/docker/vars/debian.yml
  78. 1
      ansible/roles/hosts/.gitignore
  79. 12
      ansible/roles/hosts/.gitrepo
  80. 4
      ansible/roles/hosts/AUTHORS.md
  81. 5
      ansible/roles/hosts/CHANGELOG.md
  82. 20
      ansible/roles/hosts/LICENSE
  83. 110
      ansible/roles/hosts/README.md
  84. 142
      ansible/roles/hosts/defaults/main.yml
  85. 105
      ansible/roles/hosts/files/etc/init.d/zram
  86. 18
      ansible/roles/hosts/files/etc/init.d/zram-openrc
  87. 6
      ansible/roles/hosts/files/etc/issue.net
  88. 2
      ansible/roles/hosts/files/etc/local.d/ansible.start
  89. 39
      ansible/roles/hosts/files/etc/profile.d/rc.sh
  90. 218
      ansible/roles/hosts/files/etc/profile.d/rc_functions.sh
  91. 1
      ansible/roles/hosts/files/etc/ssh/github.com.pub
  92. 12
      ansible/roles/hosts/files/etc/systemd/system/zram.service
  93. 11
      ansible/roles/hosts/handlers/main.yml
  94. 46
      ansible/roles/hosts/meta/main.yml
  95. 7
      ansible/roles/hosts/playbook.yml
  96. 32
      ansible/roles/hosts/tasks/boot.yml
  97. 48
      ansible/roles/hosts/tasks/cloudinit.yml
  98. 37
      ansible/roles/hosts/tasks/files.yml
  99. 10
      ansible/roles/hosts/tasks/git.yml
  100. 30
      ansible/roles/hosts/tasks/main.yml
  101. Some files were not shown because too many files have changed in this diff Show More

3
.dockerignore

@ -0,0 +1,3 @@
.git*
build/cache/*
build/iso/*

11
.env.dist

@ -0,0 +1,11 @@
APP=yaip
APP_DOMAIN=${ENV}.${DOMAIN}
APP_HOST=${APP}.${APP_DOMAIN}
APP_NAME=${APP}
APP_PATH=/${ENV_SUFFIX}
APP_SCHEME=http
APP_URI=${APP_HOST}${APP_PATH}
APP_URL=${APP_SCHEME}://${APP_URI}
DOMAIN=localhost
ENV=dist
SSH_DIR=${HOME}/.ssh

2
.gitignore vendored

@ -0,0 +1,2 @@
/.env
/build

16
Makefile

@ -0,0 +1,16 @@
APP_TYPE := infra
include make/include.mk
##
# APP
app-build: build-rm infra-base
$(call install-parameters,,curator,build)
$(call make,docker-compose-build)
$(call make,up)
$(call make,docker-compose-exec ARGS='rm -Rf /root/.npm /log-buffer/*' SERVICE=logagent)
$(call make,docker-commit)
app-deploy: deploy-ping
app-install: base node up

2
ansible/.gitignore vendored

@ -0,0 +1,2 @@
playbook.retry
inventories/packer-provisioner-ansible*

9
ansible/ansible.cfg

@ -0,0 +1,9 @@
[defaults]
inventory = inventories
roles_path = roles
filter_plugins = plugins/filter
host_key_checking = False
[ssh_connection]
scp_if_ssh = smart
pipelining =True

1711
ansible/ec2.py

File diff suppressed because it is too large Load Diff

1
ansible/inventories/.host.docker.internal

@ -0,0 +1 @@
localhost ansible_host=host.docker.internal

3
ansible/inventories/group_vars/all

@ -0,0 +1,3 @@
---
# file: inventories/group_vars/all

50
ansible/inventories/host_vars/default

@ -0,0 +1,50 @@
---
# file: inventories/host_vars/default
aws_access_key_id: "{{ lookup('env','ANSIBLE_AWS_ACCESS_KEY_ID') }}"
aws_output_format: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_OUTPUT') or 'json' }}"
aws_region: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_REGION') or 'eu-west-1' }}"
aws_secret_access_key: "{{ lookup('env','ANSIBLE_AWS_SECRET_ACCESS_KEY') }}"
disks_additional_packages:
- e2fsprogs-extra
- nfs-utils
hosts_enable_cloudinit: false
hosts_enable_local: true
hosts_enable_rc: true
hosts_enable_zram: true
hosts_git_repositories:
- { "repo": "{{ lookup('env','ANSIBLE_GIT_REPOSITORY') }}", "dest": "{{ lookup('env','ANSIBLE_GIT_DIRECTORY') }}", "key_file": "{{ lookup('env','ANSIBLE_GIT_KEY_FILE') or '~/.ssh/id_rsa' }}", "version": "{{ lookup('env','ANSIBLE_GIT_VERSION') }}" }
hosts_packages:
- { "name": "ansible", "state": "present" }
- { "name": "coreutils", "state": "present" }
- { "name": "curl", "state": "present" }
- { "name": "git", "state": "present" }
- { "name": "groff", "state": "present" }
- { "name": "htop", "state": "present" }
- { "name": "less", "state": "present" }
- { "name": "lsof", "state": "present" }
- { "name": "make", "state": "present" }
- { "name": "openssh-client", "state": "present" }
- { "name": "util-linux", "state": "present" }
- { "name": "vim", "state": "present" }
- { "name": "zsh", "state": "present" }
hosts_ssh_private_keys:
- "{{ lookup('env','ANSIBLE_SSH_PRIVATE_KEY') or '~/.ssh/id_rsa' }}"
hosts_ssh_users:
- aya
hosts_user_env:
- ANSIBLE_AWS_ACCESS_KEY_ID
- ANSIBLE_AWS_SECRET_ACCESS_KEY
- ANSIBLE_CONFIG
- ANSIBLE_DISKS_NFS_DISK
- ANSIBLE_DISKS_NFS_OPTIONS
- ANSIBLE_DISKS_NFS_PATH
- ANSIBLE_DOCKER_IMAGE_TAG
- ANSIBLE_DOCKER_REGISTRY
- ANSIBLE_EXTRA_VARS
- ANSIBLE_GIT_DIRECTORY
- ANSIBLE_GIT_KEY_FILE
- ANSIBLE_GIT_REPOSITORY
- ANSIBLE_INVENTORY
- ANSIBLE_PLAYBOOK
- ENV

25
ansible/inventories/host_vars/localhost

@ -0,0 +1,25 @@
---
# file: inventories/host_vars/localhost
aws_access_key_id: "{{ lookup('env','ANSIBLE_AWS_ACCESS_KEY_ID') }}"
aws_output_format: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_OUTPUT') or 'json' }}"
aws_region: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_REGION') or 'eu-west-1' }}"
aws_secret_access_key: "{{ lookup('env','ANSIBLE_AWS_SECRET_ACCESS_KEY') }}"
disks_additional_disks:
- disk: /dev/xvdb
disable_periodic_fsck: true
fstype: ext4
mount_options: defaults
mount: /var/lib/docker
service: docker
- disk: "{{ lookup('env','ANSIBLE_DISKS_NFS_DISK') }}"
fstype: nfs
mount_options: "{{ lookup('env','ANSIBLE_DISKS_NFS_OPTIONS') }}"
mount: "{{ lookup('env','ANSIBLE_DISKS_NFS_PATH') }}"
disks_additional_services:
- rpc.statd
docker_image_tag: "{{ lookup('env','ANSIBLE_DOCKER_IMAGE_TAG') or 'latest' }}"
docker_registry: "{{ lookup('env','ANSIBLE_DOCKER_REGISTRY') }}"
hosts_enable_local: true
hosts_enable_rc: true
hosts_enable_zram: true

29
ansible/playbook.yml

@ -0,0 +1,29 @@
---
# file: playbook.yml
# bootstrap hosts
- hosts: default
gather_facts: false
pre_tasks:
- name: raw - install ansible requirements for alpine linux
raw: "[ -f /etc/alpine-release ] && /sbin/apk update && { which python3 >/dev/null 2>&1 || /sbin/apk add python3; } && { which sudo >/dev/null 2>&1 || /sbin/apk add sudo; } && { /bin/tar --version 2>/dev/null |grep busybox >/dev/null && /sbin/apk add tar; } && { ls /usr/lib/ssh/sftp-server >/dev/null 2>&1 || /sbin/apk add openssh-sftp-server; } || true"
# install default packages and user settings
- import_playbook: playbooks/hosts.yml
tags:
- hosts
# mount additional disks
- import_playbook: playbooks/disks.yml
tags:
- disks
# install docker
- import_playbook: playbooks/docker.yml
tags:
- docker
# install aws cli
- import_playbook: playbooks/aws-cli.yml
tags:
- aws-cli

6
ansible/playbooks/aws-cli.yml

@ -0,0 +1,6 @@
---
# file: playbooks/aws-cli.yml
- hosts: '{{ target | default("all") }}'
roles:
- aws-cli

6
ansible/playbooks/disks.yml

@ -0,0 +1,6 @@
---
# file: playbooks/disks.yml
- hosts: '{{ target | default("all") }}'
roles:
- disks

6
ansible/playbooks/docker.yml

@ -0,0 +1,6 @@
---
# file: playbooks/docker.yml
- hosts: '{{ target | default("all") }}'
roles:
- docker

6
ansible/playbooks/hosts.yml

@ -0,0 +1,6 @@
---
# file: playbooks/hosts.yml
- hosts: '{{ target | default("all") }}'
roles:
- hosts

12
ansible/roles/aws-cli/.gitrepo

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/1001Pharmacies/ansible-aws-cli
branch = master
commit = f10e38af3a9b36648576f9850e0d09fcc7a057df
parent = 9ee8bfab9d2f5e5591c2e8a3d6f3a03b56b36196
method = merge
cmdver = 0.4.0

35
ansible/roles/aws-cli/README.md

@ -0,0 +1,35 @@
# DEPRECATION NOTICE
We have moved away from Ansible and are in the process of removing or transferring ownership of our Ansible repositories. If you rely on this repository directly, please make arrangements to replace this dependency with your own fork.
# AWS CLI role for Ansible
Installs and configures the AWS CLI for conveniently interacting with AWS services such as S3.
## Requirements
- Tested on Ubuntu 12.04 Server;
- Ansible 2.0+
## Role Variables
The default variables are as follows:
aws_output_format: 'json'
aws_region: 'ap-southeast-2'
aws_access_key_id: 'YOUR_ACCESS_KEY_ID'
aws_secret_access_key: 'YOUR_SECRET_ACCESS_KEY'
## Example Playbook
- hosts: 'servers'
roles:
- role: 'dstil.aws-cli'
aws_output_format: 'json'
aws_region: 'ap-southeast-2'
aws_access_key_id: 'SUPER_SECRET_ACCESS_KEY_ID' # Don't version this or put it on pastebin
aws_secret_access_key: 'SUPER_SECRET_ACCESS_KEY' # Ditto
# License
This playbook is provided 'as-is' under the conditions of the BSD license. No fitness for purpose is guaranteed or implied.

7
ansible/roles/aws-cli/defaults/main.yml

@ -0,0 +1,7 @@
---
aws_cli_user: "{{ ansible_user|default('root') }}"
aws_cli_group: "{{ ansible_user|default('root') }}"
aws_output_format: 'json'
aws_region: 'eu-west-1'
aws_access_key_id: 'YOUR_ACCESS_KEY_ID'
aws_secret_access_key: 'YOUR_SECRET_ACCESS_KEY'

14
ansible/roles/aws-cli/meta/main.yml

@ -0,0 +1,14 @@
---
galaxy_info:
author: 'Rohan Liston'
description: 'Installs and configures the AWS CLI for conveniently interacting with AWS services such as S3.'
company: 'DSTIL'
license: 'BSD'
min_ansible_version: 2.0
platforms:
- name: 'Ubuntu'
versions:
- 'precise'
categories:
- 'development'
dependencies: []

144
ansible/roles/aws-cli/tasks/main.yml

@ -0,0 +1,144 @@
---
- name: 'Install AWS CLI'
tags: 'aws-cli'
become: 'yes'
pip: >
executable=pip
name=awscli
state=present
extra_args=--no-cache-dir
- name: 'Install docker python'
tags: 'aws-cli'
become: 'yes'
pip: >
name=docker
state=present
extra_args=--no-cache-dir
- name: 'Install boto python'
tags: 'aws-cli'
become: 'yes'
pip: >
name=boto3
state=present
extra_args=--no-cache-dir
- name: Set home directory of the user
set_fact:
home_dir: /home/{{ aws_cli_user }}
when: "not aws_cli_user == 'root'"
- name: Set home directory for root
set_fact:
home_dir: /root
when: "aws_cli_user == 'root'"
- name: 'Create the AWS config directory'
tags: 'aws-cli'
become: 'yes'
file: >
path={{ home_dir }}/.aws
state=directory
owner={{ aws_cli_user }}
group={{ aws_cli_group }}
mode=0755
- name: 'Copy AWS CLI config'
tags: 'aws-cli'
become: 'yes'
template: >
src=aws_cli_config.j2
dest={{ home_dir }}/.aws/config
owner={{ aws_cli_user }}
group={{ aws_cli_group }}
mode=0600
force=yes
- name: 'Copy AWS CLI credentials'
tags: 'aws-cli'
become: 'yes'
template: >
src=aws_cli_credentials.j2
dest={{ home_dir }}/.aws/credentials
owner={{ aws_cli_user }}
group={{ aws_cli_group }}
mode=0600
force=yes
- name: aws - check AWS meta-data URI
uri:
url: http://169.254.169.254/latest/meta-data
timeout: 1
register: aws_uri_check
tags: 'aws'
failed_when: False
- name: aws - get instance metadata
tags: 'aws'
ec2_metadata_facts:
when: aws_uri_check.status == 200
- name: aws - get instance tags
tags: 'aws'
ec2_tag:
aws_access_key: "{{ aws_access_key_id }}"
aws_secret_key: "{{ aws_secret_access_key }}"
region: "{{ ansible_ec2_placement_region }}"
resource: "{{ ansible_ec2_instance_id }}"
state: list
register: ec2_tags
when: ansible_ec2_instance_id is defined
- name: aws - set hostname
hostname: name="{{ ec2_tags.tags.hostname }}{% if ec2_tags.tags.domainname is defined %}.{{ ec2_tags.tags.domainname }}{% endif %}"
tags: 'aws'
when: ec2_tags.tags is defined and ec2_tags.tags.hostname is defined
- name: aws - ecr login
shell: "$(aws ecr get-login --no-include-email --region {{ aws_region }})"
tags: 'aws'
when: ec2_tags.tags is defined
- name: aws - prune docker objects (including non-dangling images)
docker_prune:
containers: yes
images: yes
images_filters:
dangling: false
networks: yes
volumes: yes
builder_cache: yes
tags: 'aws'
- name: aws - launch docker containers
docker_container:
image: "{{docker_registry|default(ec2_tags.tags.user)}}/{{ec2_tags.tags.user}}/{{ec2_tags.tags.env}}/{% if ':' in item %}{{item}}{% else %}{{item}}:{{docker_image_tag|default('latest')}}{% endif %}"
name: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}"
network_mode: host
pull: yes
restart_policy: always
volumes:
- "{{ lookup('env','ANSIBLE_DISKS_NFS_PATH') }}:/shared"
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/tmp/docker.sock:ro
tags: 'aws'
with_items: '{{ec2_tags.tags.services.split(" ")}}'
when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined
- name: aws - add docker containers to inventory
add_host:
name: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}"
ansible_connection: docker
changed_when: false
tags: 'aws'
with_items: '{{ec2_tags.tags.services.split(" ")}}'
when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined
- name: aws - run make deploy in docker containers
delegate_to: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}"
raw: "command -v make || exit 0 && make deploy CONTAINER={{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}} HOST={{ansible_ec2_local_ipv4}}"
tags: 'aws'
with_items: '{{ec2_tags.tags.services.split(" ")}}'
when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined

7
ansible/roles/aws-cli/templates/aws_cli_config.j2

@ -0,0 +1,7 @@
[default]
{% if aws_output_format|length %}
output = {{ aws_output_format }}
{% endif %}
{% if aws_region|length %}
region = {{ aws_region }}
{% endif %}

5
ansible/roles/aws-cli/templates/aws_cli_credentials.j2

@ -0,0 +1,5 @@
{% if aws_access_key_id|length and aws_secret_access_key|length %}
[default]
aws_access_key_id = {{ aws_access_key_id }}
aws_secret_access_key = {{ aws_secret_access_key }}
{% endif %}

12
ansible/roles/disks/.gitrepo

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/1001Pharmacies/ansible-disks
branch = master
commit = c0ac6978d715b461fbf20aca719cd5196bc60645
parent = d01cccd9bab3a63d60ba251e3719767635ccd5d2
method = merge
cmdver = 0.4.0

21
ansible/roles/disks/LICENSE

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Wizcorp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

77
ansible/roles/disks/README.md

@ -0,0 +1,77 @@
Disk
====
This role allows you to format extra disks and attach them to different mount points.
You can use it to move the data of different services to another disk.
Configuration
-------------
### Inventory
Because the configuration for additional disks must be stored using the YAML
syntax, you have to write it in a `group_vars` directory.
```yaml
# inventory/group_vars/GROUP_NAME
disks_additional_disks:
- disk: /dev/sdb
fstype: ext4
mount_options: defaults
mount: /data
user: www-data
group: www-data
disable_periodic_fsck: false
- disk: /dev/nvme0n1
part: /dev/nvme0n1p1
fstype: xfs
mount_options: defaults,noatime
mount: /data2
- device_name: /dev/sdf
fstype: ext4
mount_options: defaults
mount: /data
- disk: nfs-host:/nfs/export
fstype: nfs
mount_options: defaults,noatime
mount: /mnt/nfs
```
* `disk` is the device, you want to mount.
* `part` is the first partition name. If not specified, `1` will be appended to the disk name.
* `fstype` allows you to choose the filesystem to use with the new disk.
* `mount_options` allows you to specify custom mount options.
* `mount` is the directory where the new disk should be mounted.
* `user` sets owner of the mount directory (default: `root`).
* `group` sets group of the mount directory (default: `root`).
* `disable_periodic_fsck` deactivates the periodic ext3/4 filesystem check for the new disk.
You can add:
* `disks_package_use` is the required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
The following filesystems are currently supported:
- [btrfs](http://en.wikipedia.org/wiki/BTRFS) *
- [ext2](http://en.wikipedia.org/wiki/Ext2)
- [ext3](http://en.wikipedia.org/wiki/Ext3)
- [ext4](http://en.wikipedia.org/wiki/Ext4)
- [nfs](http://en.wikipedia.org/wiki/Network_File_System) *
- [xfs](http://en.wikipedia.org/wiki/XFS) *
*) Note: To use these filesystems you have to define and install additional software packages. Please estimate the right package names for your operating system.
```yaml
# inventory/group_vars/GROUP_NAME
disks_additional_packages:
- xfsprogs # package for mkfs.xfs on RedHat / Ubuntu
- btrfs-progs # package for mkfs.btrfs on CentOS / Debian
disks_additional_services:
- rpc.statd # start rpc.statd service for nfs
```
How it works
------------
It uses `sfdisk` to partition the disk with a single primary partition spanning the entire disk.
The specified filesystem will then be created with `mkfs`.
Finally the new partition will be mounted to the specified mount path.

8
ansible/roles/disks/defaults/main.yml

@ -0,0 +1,8 @@
---
# Aditional disks that need to be formated and mounted.
# See README for syntax and usage.
disks_additional_disks: []
disks_additional_packages: []
disks_additional_services: []
disks_discover_aws_nvme_ebs: False
disks_package_use: auto

21
ansible/roles/disks/handlers/main.yml

@ -0,0 +1,21 @@
---
# file: handlers/main.yml
- name: restart services
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_additional_disks_handler_notify.results }}'
service:
name: "{{item.0.service}}"
state: restarted
when: item.1.changed and item.0.service is defined
- name: restart services - nfs
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_additional_disks_nfs_handler_notify.results }}'
service:
name: "{{item.0.service}}"
state: restarted
when: item.1.changed and item.0.service is defined

182
ansible/roles/disks/library/disks_ebs_config.py

@ -0,0 +1,182 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ctypes import *
from fcntl import ioctl
from pathlib import Path
import json
import os
import subprocess
from ansible.module_utils.basic import *
module = AnsibleModule(argument_spec=dict(
config=dict(required=True, type='list'),
))
NVME_ADMIN_IDENTIFY = 0x06
NVME_IOCTL_ADMIN_CMD = 0xC0484E41
AMZN_NVME_VID = 0x1D0F
AMZN_NVME_EBS_MN = "Amazon Elastic Block Store"
class nvme_admin_command(Structure):
_pack_ = 1
_fields_ = [("opcode", c_uint8), # op code
("flags", c_uint8), # fused operation
("cid", c_uint16), # command id
("nsid", c_uint32), # namespace id
("reserved0", c_uint64),
("mptr", c_uint64), # metadata pointer
("addr", c_uint64), # data pointer
("mlen", c_uint32), # metadata length
("alen", c_uint32), # data length
("cdw10", c_uint32),
("cdw11", c_uint32),
("cdw12", c_uint32),
("cdw13", c_uint32),
("cdw14", c_uint32),
("cdw15", c_uint32),
("reserved1", c_uint64)]
class nvme_identify_controller_amzn_vs(Structure):
_pack_ = 1
_fields_ = [("bdev", c_char * 32), # block device name
("reserved0", c_char * (1024 - 32))]
class nvme_identify_controller_psd(Structure):
_pack_ = 1
_fields_ = [("mp", c_uint16), # maximum power
("reserved0", c_uint16),
("enlat", c_uint32), # entry latency
("exlat", c_uint32), # exit latency
("rrt", c_uint8), # relative read throughput
("rrl", c_uint8), # relative read latency
("rwt", c_uint8), # relative write throughput
("rwl", c_uint8), # relative write latency
("reserved1", c_char * 16)]
class nvme_identify_controller(Structure):
_pack_ = 1
_fields_ = [("vid", c_uint16), # PCI Vendor ID
("ssvid", c_uint16), # PCI Subsystem Vendor ID
("sn", c_char * 20), # Serial Number
("mn", c_char * 40), # Module Number
("fr", c_char * 8), # Firmware Revision
("rab", c_uint8), # Recommend Arbitration Burst
("ieee", c_uint8 * 3), # IEEE OUI Identifier
("mic", c_uint8), # Multi-Interface Capabilities
("mdts", c_uint8), # Maximum Data Transfer Size
("reserved0", c_uint8 * (256 - 78)),
("oacs", c_uint16), # Optional Admin Command Support
("acl", c_uint8), # Abort Command Limit
("aerl", c_uint8), # Asynchronous Event Request Limit
("frmw", c_uint8), # Firmware Updates
("lpa", c_uint8), # Log Page Attributes
("elpe", c_uint8), # Error Log Page Entries
("npss", c_uint8), # Number of Power States Support
("avscc", c_uint8), # Admin Vendor Specific Command Configuration
("reserved1", c_uint8 * (512 - 265)),
("sqes", c_uint8), # Submission Queue Entry Size
("cqes", c_uint8), # Completion Queue Entry Size
("reserved2", c_uint16),
("nn", c_uint32), # Number of Namespaces
("oncs", c_uint16), # Optional NVM Command Support
("fuses", c_uint16), # Fused Operation Support
("fna", c_uint8), # Format NVM Attributes
("vwc", c_uint8), # Volatile Write Cache
("awun", c_uint16), # Atomic Write Unit Normal
("awupf", c_uint16), # Atomic Write Unit Power Fail
("nvscc", c_uint8), # NVM Vendor Specific Command Configuration
("reserved3", c_uint8 * (704 - 531)),
("reserved4", c_uint8 * (2048 - 704)),
("psd", nvme_identify_controller_psd * 32), # Power State Descriptor
("vs", nvme_identify_controller_amzn_vs)] # Vendor Specific
class ebs_nvme_device:
def __init__(self, device):
self.device = device
self.ctrl_identify()
def _nvme_ioctl(self, id_response, id_len):
admin_cmd = nvme_admin_command(opcode = NVME_ADMIN_IDENTIFY,
addr = id_response,
alen = id_len,
cdw10 = 1)
with open(self.device, "w") as nvme:
ioctl(nvme, NVME_IOCTL_ADMIN_CMD, admin_cmd)
def ctrl_identify(self):
self.id_ctrl = nvme_identify_controller()
self._nvme_ioctl(addressof(self.id_ctrl), sizeof(self.id_ctrl))
def is_ebs(self):
if self.id_ctrl.vid != AMZN_NVME_VID:
return False
if self.id_ctrl.mn.strip() != AMZN_NVME_EBS_MN:
return False
return True
def get_volume_id(self):
vol = self.id_ctrl.sn.decode('utf-8')
if vol.startswith("vol") and vol[3] != "-":
vol = "vol-" + vol[3:]
return vol.strip()
def get_block_device(self, stripped=False):
dev = self.id_ctrl.vs.bdev.decode('utf-8')
if stripped and dev.startswith("/dev/"):
dev = dev[5:]
return dev.strip()
def update_disk(disk, mapping):
if 'device_name' not in disk:
return disk
device_name = disk['device_name'][5:]
if device_name not in mapping:
return disk
volume_id = mapping[device_name]
link_path = '/dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol%s' % volume_id[4:]
resolved = str(Path(link_path).resolve())
new_disk = dict(disk)
new_disk['disk'] = resolved
new_disk['part'] = '%sp1' % resolved
return new_disk
def main():
src_config = module.params['config']
lsblkOutput = subprocess.check_output(['lsblk', '-J'])
lsblk = json.loads(lsblkOutput.decode('utf-8'))
mapping = {}
for blockdevice in lsblk['blockdevices']:
try:
dev = ebs_nvme_device('/dev/%s' % blockdevice['name'])
except OSError:
continue
except IOError:
continue
if dev.is_ebs():
continue
mapping[dev.get_block_device()] = dev.get_volume_id()
new_config = [
update_disk(disk, mapping) for disk in src_config
]
facts = {'blockDeviceMapping': mapping, 'config': new_config, 'source_config': src_config}
result = {"changed": False, "ansible_facts": facts}
module.exit_json(**result)
main()

21
ansible/roles/disks/meta/main.yml

@ -0,0 +1,21 @@
galaxy_info:
author: Emilien Kenler <ekenler@wizcorp.jp>
description: This role allows setting up extra disks and their mount points
company: Wizcorp K.K.
license: MIT
min_ansible_version: 2.0.0
platforms:
- name: EL
versions:
- 6
- 7
- name: Debian
versions:
- wheezy
- jessie
- name: Ubuntu
versions:
- all
categories:
- system
dependencies: []

173
ansible/roles/disks/tasks/main.yml

@ -0,0 +1,173 @@
- name: 'Install Python PIP'
package: >
name=py3-pip
state=present
when: ansible_os_family|lower == "alpine"
- name: 'Install Python PIP'
package: >
name=python-pip
state=present
when: ansible_os_family|lower != "alpine"
- name: 'Install python-pathlib'
pip: >
name=pathlib
state=present
- name: "Discover NVMe EBS"
disks_ebs_config:
config: "{{ disks_additional_disks }}"
register: __disks_ebs_config
when: disks_discover_aws_nvme_ebs | default(True) | bool
- set_fact:
disks_additional_disks: "{{ disks_additional_disks|defaut([]) + __disks_ebs_config['ansible_facts']['config'] }}"
when: __disks_ebs_config is defined and 'ansible_facts' in __disks_ebs_config
- name: "Install parted"
package:
name: parted
state: present
use: '{{ disks_package_use }}'
when: disks_additional_disks
tags: ['disks', 'pkgs']
- name: "Install additional fs progs"
package:
name: "{{ item }}"
state: present
with_items: "{{ disks_additional_packages|default([]) }}"
when: disks_additional_packages is defined
tags: ['disks', 'pkgs']
- name: disks - start additional services
service:
name: "{{item}}"
enabled: yes
state: started
with_items: "{{ disks_additional_services|default([]) }}"
tags: ['disks', 'pkgs']
- name: "Get disk alignment for disks"
shell: |
if
[[ -e /sys/block/{{ item.disk | basename }}/queue/optimal_io_size && -e /sys/block/{{ item.disk | basename }}/alignment_offset && -e /sys/block/{{ item.disk | basename }}/queue/physical_block_size ]];
then
echo $[$(( ($(cat /sys/block/{{ item.disk | basename }}/queue/optimal_io_size) + $(cat /sys/block/{{ item.disk | basename }}/alignment_offset)) / $(cat /sys/block/{{ item.disk | basename }}/queue/physical_block_size) )) | 2048];
else
echo 2048;
fi
args:
creates: '{{ item.part | default(item.disk + "1") }}'
executable: '/bin/bash'
with_items: '{{ disks_additional_disks }}'
register: disks_offset
tags: ['disks']
- name: "Ensure the disk exists"
stat:
path: '{{ item.disk }}'
with_items: '{{ disks_additional_disks }}'
register: disks_stat
changed_when: False
tags: ['disks']
- name: "Partition additional disks"
shell: |
if
[ -b {{ item.disk }} ]
then
[ -b {{ item.part | default(item.disk + "1") }} ] || parted -a optimal --script "{{ item.disk }}" mklabel gpt mkpart primary {{ disks_offset.stdout|default("2048") }}s 100% && sleep 5 && partprobe {{ item.disk }}; sleep 5
fi
args:
creates: '{{ item.part | default(item.disk + "1") }}'
executable: '/bin/bash'
with_items: '{{ disks_additional_disks }}'
tags: ['disks']
- name: "Create filesystem on the first partition"
filesystem:
dev: '{{ item.0.part | default(item.0.disk + "1") }}'
force: '{{ item.0.force|d(omit) }}'
fstype: '{{ item.0.fstype }}'
opts: '{{ item.0.fsopts|d(omit) }}'
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_stat.results }}'
when: item.1.stat.exists
tags: ['disks']
- name: "Disable periodic fsck and reserved space on ext3 or ext4 formatted disks"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/sbin:/sbin"
shell: tune2fs -c0 -i0 -m0 {{ item.0.part | default(item.0.disk + "1") }}
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_stat.results }}'
when: "disks_additional_disks and ( item.0.fstype == 'ext4' or item.0.fstype == 'ext3' ) and item.0.disable_periodic_fsck|default(false)|bool and item.1.stat.exists"
tags: ['disks']
- name: "Ensure the mount directory exists"
file:
path: '{{ item.mount }}'
state: directory
with_items: '{{ disks_additional_disks }}'
tags: ['disks']
- name: "Get UUID for partition"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/sbin:/sbin"
command: blkid -s UUID -o value {{ item.0.part | default(item.0.disk + "1") }}
check_mode: no
register: disks_blkid
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_stat.results }}'
changed_when: False
when: item.1.stat.exists
tags: ['disks']
- name: "Mount additional disks"
mount:
name: '{{ item.0.mount }}'
fstype: '{{ item.0.fstype }}'
opts: '{{ item.0.mount_options|d(omit) }}'
passno: '0'
src: 'UUID={{ item.1.stdout }}'
state: '{{ item.0.mount_state|d("mounted") }}'
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_blkid.results }}'
- '{{ disks_stat.results }}'
when: item.2.stat.exists
tags: ['disks']
register: disks_additional_disks_handler_notify
notify:
- restart services
- name: "Mount additional disks - nfs"
mount:
name: '{{ item.mount }}'
fstype: '{{ item.fstype }}'
opts: '{{ item.mount_options|d(omit) }}'
src: '{{ item.disk }}'
state: '{{ item.mount_state|d("mounted") }}'
when: item.fstype == 'nfs'
with_items: '{{ disks_additional_disks }}'
tags: ['disks']
register: disks_additional_disks_nfs_handler_notify
notify:
- restart services - nfs
- name: "Ensure the permissions are set correctly"
file:
path: '{{ item.mount }}'
owner: '{{ item.user | default("root") }}'
group: '{{ item.group | default("root") }}'
state: directory
with_items: '{{ disks_additional_disks }}'
when: item.user is defined or item.group is defined
tags: ['disk']
- meta: flush_handlers

12
ansible/roles/docker/.gitrepo

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/1001Pharmacies/ansible-docker
branch = master
commit = 6217a899084cba00447195d1873b211462b60d52
parent = 4745dad8cb8a826ee3ac47accda79f96957b5e13
method = merge
cmdver = 0.4.0

4
ansible/roles/docker/AUTHORS.md

@ -0,0 +1,4 @@
# Authors
* **Yann Autissier** - *Initial work* - [aya](https://github.com/aya)

9
ansible/roles/docker/CHANGELOG.md

@ -0,0 +1,9 @@
# Changelog
## v1.0.0 (December 20, 2016)
Initial release.
* Install docker daemon
* Start and active docker service at boot
* Build and run docker images

20
ansible/roles/docker/LICENSE

@ -0,0 +1,20 @@
MIT License
Copyright (c) 2016 Yann Autissier
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

237
ansible/roles/docker/README.md

@ -0,0 +1,237 @@
# Ansible role to run dockers
An ansible role to install the [docker](https://www.docker.com/) daemon and build and run dockers.
It installs the docker daemon and ensure it is up and running.
It sets the STORAGE_DRIVER if the docker host uses systemd and it configures the MTU to 1450 if it is a VM running on OpenStack.
It builds and runs docker images on the docker host.
## Requirements
This Ansible role requires at least Ansible version 1.9.
## Role Variables
* `docker_check_kernel` - The minimum kernel version allowed on hosts to run docker.
``` yaml
# minimum kernel version
docker_check_kernel: '3.10'
```
* `docker_check_machine` - The hosts architecture needed to run docker.
``` yaml
# architecture
docker_check_machine: 'x86_64'
```
* `docker_package` - The name of the docker package.
``` yaml
# The docker package name
docker_package: docker
```
* `docker_packages` - A list of packages to install/remove before installing the docker package.
``` yaml
# A list of package to install/remove
# docker_packages:
# - { "name": "docker", "state": "absent" }
```
* `docker_init_config_directory` - The location of the configuration file of the docker daemon init script.
``` yaml
# Location of configuration files loaded by the init script
docker_init_config_directory: "/etc/sysconfig"
```
* `docker_opts` - The name of the environment variable used to pass options to the docker daemon.
``` yaml
# docker daemon options environment variable
docker_opts: "OPTIONS"
```
* `docker_services` - A list of system services to start
``` yaml
# services
docker_services:
- docker
```
* `dockers` - A list of docker images to build and run on the docker host with the docker-build and docker-run commands
``` yaml
# dockers
# dockers:
# - nginx
```
* `docker_cluster` - An optional cluster name to pass to the docker-build and docker-run commands
``` yaml
# docker cluster
# docker_cluster: ""
```
* `docker_cluster` - Starts the dockers if set to true.
``` yaml
# Start docker
docker_start: true
```
* `docker_restart` - Restarts dockers when their image has been updated. It removes current running dockers and start new ones.
``` yaml
# Stop and remove running docker to start a new one when image has been updated
docker_restart: true
```
* `docker_force_restart` - Restart dockers, even if image has not been updated. It removes current running dockers and start new ones.
``` yaml
# Stop and remove running docker to start a new one even if image has not been updated
docker_force_restart: false
```
## Helper scripts
This role comes with a few helper scripts. Here is a short description.
* `docker-build` - Build a docker image, reading options to pass to the `docker build` command from a Dockeropts file.
* `docker-cleanup` - Remove unused dockers.
* `docker-cleanup-images` - Remove unused docker images.
* `docker-cleanup-volumes` - Remove unused docker volumes.
* `docker-get-image` - Return sha256 of the image used by the docker.
* `docker-get-status` - Return the status of the docker.
* `docker-log-cleanup` - Empty the file logging the docker output on the docker host.
* `docker-log-truncate` - Truncate the file logging the docker output on the docker host.
* `docker-run` - Run a docker, reading options to pass to the `docker run` command from a Dockeropts file.
## Example
To launch this role on your `docker` hosts, run the default playbook.yml.
``` bash
$ ansible-playbook playbook.yml
```
### Build a docker image
On the docker hosts, you'll be able to build docker images and run dockers, based on Dockerfile and Dockeropts files located in the /etc/docker subdirectories.
To create an `nginx` docker image, create a directory /etc/docker/nginx with a Dockerfile and a Dockeropts files into.
``` bash
# mkdir -p /etc/docker/nginx
# cat << EOF > /etc/docker/nginx/Dockerfile
FROM nginx:alpine
EOF
# cat << EOF > /etc/docker/nginx/Dockeropts
DOCKER_ULIMIT="nofile=65536"
DOCKER_PORT="80:80"
EOF
```
Build your `nginx` docker image, then run it ! The docker-run command will read the Dockeropts file to add the --ulimit and --port options to the docker run command.
``` bash
# docker-build nginx && docker-run nginx
```
### Override your files
If you want to copy a file in your Dockerfile, say the default nginx.conf, you can use the DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX variables to get different versions of this file giving some context.
``` bash
# cat << EOF > /etc/docker/nginx/Dockerfile
FROM nginx:alpine
ARG DOCKER_BUILD_PREFIX
ARG DOCKER_BUILD_SUFFIX
COPY ./\${DOCKER_BUILD_PREFIX}nginx.conf\${DOCKER_BUILD_SUFFIX} /etc/nginx/nginx.conf
EOF
```
You can now override the nginx configuration file when you build your image.
* Without option, the docker-build command will search for the file beside your Dockerfile.
``` bash
# docker-build nginx && docker-run nginx
```
Both DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX variables are empty, the Dockerfile will search for a `./nginx.conf` file, ie the /etc/docker/nginx/nginx.conf file.
* With a -c|--cluster option, the docker-build command will search for the file in a subdirectory below your Dockerfile.
``` bash
# docker-build -c custom nginx && docker-run -c custom nginx
```
The DOCKER_BUILD_PREFIX variable is populated with 'custom/' to force the Dockerfile to search for a `./custom/nginx.conf` file, ie /etc/docker/nginx/custom/nginx.conf file.
* Whith an image name suffixed with a dash, the docker-build command will search for a suffixed file as well.
``` bash
# docker-build -c custom nginx-develop && docker-run -c custom nginx-develop
```
The DOCKER_BUILD_PREFIX variable is populated with 'custom/' and the DOCKER_BUILD_SUFFIX variable is populated with '-develop' to force the Dockerfile to search for a `./custom/nginx.conf-develop` file, ie /etc/docker/nginx/custom/nginx.conf-develop file.
### Override your options
The same override principle can be used for the Dockerfile and the Dockeropts file when using the docker-build and docker-run commands.
You can create a /etc/docker/nginx/custom/Dockeropts file that would override your default Dockeropt file, and a /etc/docker/nginx/custom/Dockeropts-develop file overriding both other files too.
The Dockeropts file accepts the following options.
* `SYSCTL` - values to set on the docker host via the sysctl command before running the docker
* `DOCKER_ARGS` - values to pass to the docker build command with --build-arg options
* `DOCKER_ENV` - values to pass to the docker run command with -e options
* `DOCKER_LINK` - values to pass to the docker run command with --link options
* `DOCKER_OPT` - values to pass to the docker run command with prefixed by --
* `DOCKER_PORT` - values to pass to the docker run command with -p options
* `DOCKER_ULIMIT` - values to pass to the docker run command with --ulimit options
* `DOCKER_VOLUME` - values to pass to the docker run command with -v options
* `HOST_VOLUME` - volumes to allow write access to from the docker on selinux enabled host
Overriding options is done several times, reading options from the more specific to the more generic file. In our example, files are read in this order :
/etc/docker/nginx/custom/Dockeropts-develop
/etc/docker/nginx/custom/Dockeropts
/etc/docker/nginx/Dockeropts
## Common configuration
Following configuration builds and runs the docker image 'nginx-develop' for the 'custom' cluster described in our example.
The Dockerfile and Dockeropts files needed in the /etc/docker/nginx directory should be present on the docker host, likely synchronised by an other ansible role.
``` yaml
docker_cluster: "custom"
docker:
- nginx-develop
```
## Tests
To test this role on your `docker` hosts, run the tests/playbook.yml playbook.
``` bash
$ ansible-playbook tests/playbook.yml
```
## Limitations
This role is known to work on Ubuntu, Debian, CentOS and Alpine Linux.

41
ansible/roles/docker/defaults/main.yml

@ -0,0 +1,41 @@
---
# file: defaults/main.yml
# minimum kernel version
docker_check_kernel: '3.10'
# architecture
docker_check_machine: 'x86_64'
# The docker package name
docker_package: docker
# A list of package to install/remove
# docker_packages:
# - { "name": "docker", "state": "absent" }
# Location of configuration files loaded by the init script
docker_init_config_directory: "/etc/sysconfig"
# docker daemon options environment variable
docker_opts: "OPTIONS"
# services
docker_services:
- docker
# dockers
# dockers:
# - nginx
# docker cluster
# docker_cluster: ""
# Start docker
docker_start: true
# Stop and remove running docker to start a new one when image has been updated
docker_restart: true
# Stop and remove running docker to start a new one even if image has not been updated
docker_force_restart: false

1
ansible/roles/docker/files/etc/sysctl.d/docker.conf

@ -0,0 +1 @@
kernel.pax.softmode=1

133
ansible/roles/docker/files/usr/local/bin/docker-build

@ -0,0 +1,133 @@
#!/bin/bash
# Author: Yann Autissier <yann.autissier@gmail.com>
DOCKER_IMAGE_REPOSITORY="centile"
DOCKER_BUILD_DIRECTORY="/etc/docker"
usage() {
echo Usage: $0 [-c cluster] [-f] [-q] [-t] image [image [...]]
echo -e "Build a docker image in the '${DOCKER_IMAGE_REPOSITORY}' repository."
echo
echo -e "image\tis a directory with a Dockerfile, default in '${DOCKER_BUILD_DIRECTORY}/image'."
echo -e "\t'image' can contains a dash. The suffixed part after the dash is taken into account"
echo -e "\tin the image name but not in the name of the directory containing the Dockerfile."
echo -e "\tsuffix will be available in your Dockerfile in the DOCKER_BUILD_SUFFIX build-arg."
echo
echo -e "Options:"
echo -e "\t-c 'cluster'\tAllow to override files in 'image' directory with existing files in"
echo -e "\t\t\tthe 'image/cluster' directory. 'cluster' will be available in your"
echo -e "\t\t\tDockerfile in the DOCKER_BUILD_PREFIX build-arg."
echo -e "\t-f\t\tforce build, do not use cache when building image."
echo -e "\t-q\t\tquiet mode, minimal output."
echo -e "\t-t\t\ttest mode, do nothing but output the command that would haev been launched."
echo
echo -e "EXAMPLES"
echo
echo -e "$0 elk"
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile"
echo
echo -e "$0 elk-es01"
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile"
echo -e "and build-arg DOCKER_BUILD_SUFFIX=-es01"
echo
echo -e "$0 -c elisa-sdc elk-es01"
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile,"
echo -e "build-arg DOCKER_BUILD_PREFIX=elisa-sdc/ and build-arg DOCKER_BUILD_SUFFIX=-es01"
echo
exit 1
}
while [ $# -gt 0 ]; do
case $1 in
-c|--cluster) shift && CLUSTER="$1"
;;
-f|--force) FORCE=1
;;
-t|--test) TEST=1
;;
-q|--quiet) QUIET=1
;;
-h|--help) usage
;;
*) args="${args:-} $1"
esac
shift
args="${args# }"
done
# check args
[ "${args:0:1}" = "-" ] && usage
# grsec/pax on alpine linux with docker < 1.12
[ -f /etc/alpine-release ] && while read major minor patch; do
if [ "${major}" -eq 1 ] && [ "${minor:-0}" -lt 12 ]; then
[ "$(sysctl -n kernel.grsecurity.chroot_deny_chmod 2>/dev/null)" = 1 ] && sysctl -w kernel.grsecurity.chroot_deny_chmod=0 2>/dev/null && grsec_disabled_chmod=1
[ "$(sysctl -n kernel.grsecurity.chroot_deny_mknod 2>/dev/null)" = 1 ] && sysctl -w kernel.grsecurity.chroot_deny_mknod=0 2>/dev/null && grsec_disabled_mknod=1
fi
done <<< $(apk version docker |awk -F '-' '/^docker/ {print $2}' |sed 's/\./ /g')
for arg in $args; do
# extract docker image name
image="$(basename ${arg})"
# keep part before the dash as the directory name
dir="$(dirname ${arg})/${image%-*}"
# keep part after the dash as an image suffix name
[ "${image##*-}" != "${image}" ] && suffix="${image##*-}"
# default to ${DOCKER_BUILD_DIRECTORY}/${dir} if ${dir} does not exists
[ ! -d "${dir}" ] && [ -d "${DOCKER_BUILD_DIRECTORY}/${dir}" ] && dir="${DOCKER_BUILD_DIRECTORY}/${dir#./}"
# directory exists && contains a Dockerfile
[ -d ${dir} ] && [ -f "${dir}/Dockerfile" ] || usage
# cluster directory exists
[ -n "${CLUSTER}" ] && { [ -d ${dir}/${CLUSTER} ] || usage; }
# search for Dockeropts files
files="${dir}/Dockeropts ${dir}/Dockeropts-${suffix}"
[ -n "${CLUSTER}" ] && files="${files} ${dir}/${CLUSTER}/Dockeropts ${dir}/${CLUSTER}/Dockeropts-${suffix}"
# source the Dockeropts files
for dockeropts in ${files}; do
[ -f "${dockeropts}" ] && . ${dockeropts}
done
# quiet build
[ ${QUIET} ] && DOCKER_BUILD_ARGS="--quiet" || DOCKER_BUILD_ARGS=""
# do not use cache
[ ${FORCE} ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --no-cache"
# extract DOCKER_ARGS
[ -n "${DOCKER_ARGS}" ] && for build_arg in ${DOCKER_ARGS}; do
DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg ${build_arg}"
done
# add DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX
[ -n "${CLUSTER}" ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg DOCKER_BUILD_PREFIX=${CLUSTER}/"
[ -n "${suffix}" ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg DOCKER_BUILD_SUFFIX=-${suffix}"
# search for Dockerfile
[ -n "${CLUSTER}" ] && files="${dir}/${CLUSTER}/Dockerfile-${suffix} ${dir}/${CLUSTER}/Dockerfile" || files=""
files="${files} ${dir}/Dockerfile-${suffix} ${dir}/Dockerfile"
# build docker image with 1st found Dockerfile
for dockerfile in ${files}; do
[ -f "${dockerfile}" ] || continue
[ ${QUIET} ] && [ ! ${TEST} ] && echo -n "${image} "
[ ! ${QUIET} ] && echo "Building image ${image}"
if [ ${TEST} ]; then
echo docker build ${DOCKER_BUILD_ARGS} -t ${DOCKER_IMAGE_REPOSITORY}/${image} -f ${dockerfile} ${dir}
else
docker build ${DOCKER_BUILD_ARGS} -t ${DOCKER_IMAGE_REPOSITORY}/${image} -f ${dockerfile} ${dir}
result=$?
fi
[ ${result:-0} -ge ${return:-0} ] && return=${result}
break
done
done
# grsec/pax
[ ${grsec_disabled_chmod} ] && sysctl -w kernel.grsecurity.chroot_deny_chmod=1 2>/dev/null
[ ${grsec_disabled_mknod} ] && sysctl -w kernel.grsecurity.chroot_deny_mknod=1 2>/dev/null
exit ${return:-1}

4
ansible/roles/docker/files/usr/local/bin/docker-cleanup

@ -0,0 +1,4 @@
#!/bin/sh
# Author: Yann Autissier <yann.autissier@gmail.com>
docker ps -q --no-trunc --filter status=exited,status=created,status=dead |while read docker; do docker rm ${docker}; done

4
ansible/roles/docker/files/usr/local/bin/docker-cleanup-images