import files

This commit is contained in:
Yann Autissier 2021-02-09 17:05:00 +01:00
parent f5c4576411
commit 44a6d37ba5
425 changed files with 23195 additions and 0 deletions

3
.dockerignore Normal file
View File

@ -0,0 +1,3 @@
.git*
build/cache/*
build/iso/*

11
.env.dist Normal file
View File

@ -0,0 +1,11 @@
APP=yaip
APP_DOMAIN=${ENV}.${DOMAIN}
APP_HOST=${APP}.${APP_DOMAIN}
APP_NAME=${APP}
APP_PATH=/${ENV_SUFFIX}
APP_SCHEME=http
APP_URI=${APP_HOST}${APP_PATH}
APP_URL=${APP_SCHEME}://${APP_URI}
DOMAIN=localhost
ENV=dist
SSH_DIR=${HOME}/.ssh

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/.env
/build

16
Makefile Normal file
View File

@ -0,0 +1,16 @@
APP_TYPE := infra
include make/include.mk
##
# APP
app-build: build-rm infra-base
$(call install-parameters,,curator,build)
$(call make,docker-compose-build)
$(call make,up)
$(call make,docker-compose-exec ARGS='rm -Rf /root/.npm /log-buffer/*' SERVICE=logagent)
$(call make,docker-commit)
app-deploy: deploy-ping
app-install: base node up

2
ansible/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
playbook.retry
inventories/packer-provisioner-ansible*

9
ansible/ansible.cfg Normal file
View File

@ -0,0 +1,9 @@
[defaults]
inventory = inventories
roles_path = roles
filter_plugins = plugins/filter
host_key_checking = False
[ssh_connection]
scp_if_ssh = smart
pipelining =True

1711
ansible/ec2.py Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
localhost ansible_host=host.docker.internal

View File

@ -0,0 +1,3 @@
---
# file: inventories/group_vars/all

View File

@ -0,0 +1,50 @@
---
# file: inventories/host_vars/default
aws_access_key_id: "{{ lookup('env','ANSIBLE_AWS_ACCESS_KEY_ID') }}"
aws_output_format: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_OUTPUT') or 'json' }}"
aws_region: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_REGION') or 'eu-west-1' }}"
aws_secret_access_key: "{{ lookup('env','ANSIBLE_AWS_SECRET_ACCESS_KEY') }}"
disks_additional_packages:
- e2fsprogs-extra
- nfs-utils
hosts_enable_cloudinit: false
hosts_enable_local: true
hosts_enable_rc: true
hosts_enable_zram: true
hosts_git_repositories:
- { "repo": "{{ lookup('env','ANSIBLE_GIT_REPOSITORY') }}", "dest": "{{ lookup('env','ANSIBLE_GIT_DIRECTORY') }}", "key_file": "{{ lookup('env','ANSIBLE_GIT_KEY_FILE') or '~/.ssh/id_rsa' }}", "version": "{{ lookup('env','ANSIBLE_GIT_VERSION') }}" }
hosts_packages:
- { "name": "ansible", "state": "present" }
- { "name": "coreutils", "state": "present" }
- { "name": "curl", "state": "present" }
- { "name": "git", "state": "present" }
- { "name": "groff", "state": "present" }
- { "name": "htop", "state": "present" }
- { "name": "less", "state": "present" }
- { "name": "lsof", "state": "present" }
- { "name": "make", "state": "present" }
- { "name": "openssh-client", "state": "present" }
- { "name": "util-linux", "state": "present" }
- { "name": "vim", "state": "present" }
- { "name": "zsh", "state": "present" }
hosts_ssh_private_keys:
- "{{ lookup('env','ANSIBLE_SSH_PRIVATE_KEY') or '~/.ssh/id_rsa' }}"
hosts_ssh_users:
- aya
hosts_user_env:
- ANSIBLE_AWS_ACCESS_KEY_ID
- ANSIBLE_AWS_SECRET_ACCESS_KEY
- ANSIBLE_CONFIG
- ANSIBLE_DISKS_NFS_DISK
- ANSIBLE_DISKS_NFS_OPTIONS
- ANSIBLE_DISKS_NFS_PATH
- ANSIBLE_DOCKER_IMAGE_TAG
- ANSIBLE_DOCKER_REGISTRY
- ANSIBLE_EXTRA_VARS
- ANSIBLE_GIT_DIRECTORY
- ANSIBLE_GIT_KEY_FILE
- ANSIBLE_GIT_REPOSITORY
- ANSIBLE_INVENTORY
- ANSIBLE_PLAYBOOK
- ENV

View File

@ -0,0 +1,25 @@
---
# file: inventories/host_vars/localhost
aws_access_key_id: "{{ lookup('env','ANSIBLE_AWS_ACCESS_KEY_ID') }}"
aws_output_format: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_OUTPUT') or 'json' }}"
aws_region: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_REGION') or 'eu-west-1' }}"
aws_secret_access_key: "{{ lookup('env','ANSIBLE_AWS_SECRET_ACCESS_KEY') }}"
disks_additional_disks:
- disk: /dev/xvdb
disable_periodic_fsck: true
fstype: ext4
mount_options: defaults
mount: /var/lib/docker
service: docker
- disk: "{{ lookup('env','ANSIBLE_DISKS_NFS_DISK') }}"
fstype: nfs
mount_options: "{{ lookup('env','ANSIBLE_DISKS_NFS_OPTIONS') }}"
mount: "{{ lookup('env','ANSIBLE_DISKS_NFS_PATH') }}"
disks_additional_services:
- rpc.statd
docker_image_tag: "{{ lookup('env','ANSIBLE_DOCKER_IMAGE_TAG') or 'latest' }}"
docker_registry: "{{ lookup('env','ANSIBLE_DOCKER_REGISTRY') }}"
hosts_enable_local: true
hosts_enable_rc: true
hosts_enable_zram: true

29
ansible/playbook.yml Normal file
View File

@ -0,0 +1,29 @@
---
# file: playbook.yml
# bootstrap hosts
- hosts: default
gather_facts: false
pre_tasks:
- name: raw - install ansible requirements for alpine linux
raw: "[ -f /etc/alpine-release ] && /sbin/apk update && { which python3 >/dev/null 2>&1 || /sbin/apk add python3; } && { which sudo >/dev/null 2>&1 || /sbin/apk add sudo; } && { /bin/tar --version 2>/dev/null |grep busybox >/dev/null && /sbin/apk add tar; } && { ls /usr/lib/ssh/sftp-server >/dev/null 2>&1 || /sbin/apk add openssh-sftp-server; } || true"
# install default packages and user settings
- import_playbook: playbooks/hosts.yml
tags:
- hosts
# mount additional disks
- import_playbook: playbooks/disks.yml
tags:
- disks
# install docker
- import_playbook: playbooks/docker.yml
tags:
- docker
# install aws cli
- import_playbook: playbooks/aws-cli.yml
tags:
- aws-cli

View File

@ -0,0 +1,6 @@
---
# file: playbooks/aws-cli.yml
- hosts: '{{ target | default("all") }}'
roles:
- aws-cli

View File

@ -0,0 +1,6 @@
---
# file: playbooks/disks.yml
- hosts: '{{ target | default("all") }}'
roles:
- disks

View File

@ -0,0 +1,6 @@
---
# file: playbooks/docker.yml
- hosts: '{{ target | default("all") }}'
roles:
- docker

View File

@ -0,0 +1,6 @@
---
# file: playbooks/hosts.yml
- hosts: '{{ target | default("all") }}'
roles:
- hosts

View File

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/1001Pharmacies/ansible-aws-cli
branch = master
commit = f10e38af3a9b36648576f9850e0d09fcc7a057df
parent = 9ee8bfab9d2f5e5591c2e8a3d6f3a03b56b36196
method = merge
cmdver = 0.4.0

View File

@ -0,0 +1,35 @@
# DEPRECATION NOTICE
We have moved away from Ansible and are in the process of removing or transferring ownership of our Ansible repositories. If you rely on this repository directly, please make arrangements to replace this dependency with your own fork.
# AWS CLI role for Ansible
Installs and configures the AWS CLI for conveniently interacting with AWS services such as S3.
## Requirements
- Tested on Ubuntu 12.04 Server;
- Ansible 2.0+
## Role Variables
The default variables are as follows:
aws_output_format: 'json'
aws_region: 'ap-southeast-2'
aws_access_key_id: 'YOUR_ACCESS_KEY_ID'
aws_secret_access_key: 'YOUR_SECRET_ACCESS_KEY'
## Example Playbook
- hosts: 'servers'
roles:
- role: 'dstil.aws-cli'
aws_output_format: 'json'
aws_region: 'ap-southeast-2'
aws_access_key_id: 'SUPER_SECRET_ACCESS_KEY_ID' # Don't version this or put it on pastebin
aws_secret_access_key: 'SUPER_SECRET_ACCESS_KEY' # Ditto
# License
This playbook is provided 'as-is' under the conditions of the BSD license. No fitness for purpose is guaranteed or implied.

View File

@ -0,0 +1,7 @@
---
aws_cli_user: "{{ ansible_user|default('root') }}"
aws_cli_group: "{{ ansible_user|default('root') }}"
aws_output_format: 'json'
aws_region: 'eu-west-1'
aws_access_key_id: 'YOUR_ACCESS_KEY_ID'
aws_secret_access_key: 'YOUR_SECRET_ACCESS_KEY'

View File

@ -0,0 +1,14 @@
---
galaxy_info:
author: 'Rohan Liston'
description: 'Installs and configures the AWS CLI for conveniently interacting with AWS services such as S3.'
company: 'DSTIL'
license: 'BSD'
min_ansible_version: 2.0
platforms:
- name: 'Ubuntu'
versions:
- 'precise'
categories:
- 'development'
dependencies: []

View File

@ -0,0 +1,144 @@
---
- name: 'Install AWS CLI'
tags: 'aws-cli'
become: 'yes'
pip: >
executable=pip
name=awscli
state=present
extra_args=--no-cache-dir
- name: 'Install docker python'
tags: 'aws-cli'
become: 'yes'
pip: >
name=docker
state=present
extra_args=--no-cache-dir
- name: 'Install boto python'
tags: 'aws-cli'
become: 'yes'
pip: >
name=boto3
state=present
extra_args=--no-cache-dir
- name: Set home directory of the user
set_fact:
home_dir: /home/{{ aws_cli_user }}
when: "not aws_cli_user == 'root'"
- name: Set home directory for root
set_fact:
home_dir: /root
when: "aws_cli_user == 'root'"
- name: 'Create the AWS config directory'
tags: 'aws-cli'
become: 'yes'
file: >
path={{ home_dir }}/.aws
state=directory
owner={{ aws_cli_user }}
group={{ aws_cli_group }}
mode=0755
- name: 'Copy AWS CLI config'
tags: 'aws-cli'
become: 'yes'
template: >
src=aws_cli_config.j2
dest={{ home_dir }}/.aws/config
owner={{ aws_cli_user }}
group={{ aws_cli_group }}
mode=0600
force=yes
- name: 'Copy AWS CLI credentials'
tags: 'aws-cli'
become: 'yes'
template: >
src=aws_cli_credentials.j2
dest={{ home_dir }}/.aws/credentials
owner={{ aws_cli_user }}
group={{ aws_cli_group }}
mode=0600
force=yes
- name: aws - check AWS meta-data URI
uri:
url: http://169.254.169.254/latest/meta-data
timeout: 1
register: aws_uri_check
tags: 'aws'
failed_when: False
- name: aws - get instance metadata
tags: 'aws'
ec2_metadata_facts:
when: aws_uri_check.status == 200
- name: aws - get instance tags
tags: 'aws'
ec2_tag:
aws_access_key: "{{ aws_access_key_id }}"
aws_secret_key: "{{ aws_secret_access_key }}"
region: "{{ ansible_ec2_placement_region }}"
resource: "{{ ansible_ec2_instance_id }}"
state: list
register: ec2_tags
when: ansible_ec2_instance_id is defined
- name: aws - set hostname
hostname: name="{{ ec2_tags.tags.hostname }}{% if ec2_tags.tags.domainname is defined %}.{{ ec2_tags.tags.domainname }}{% endif %}"
tags: 'aws'
when: ec2_tags.tags is defined and ec2_tags.tags.hostname is defined
- name: aws - ecr login
shell: "$(aws ecr get-login --no-include-email --region {{ aws_region }})"
tags: 'aws'
when: ec2_tags.tags is defined
- name: aws - prune docker objects (including non-dangling images)
docker_prune:
containers: yes
images: yes
images_filters:
dangling: false
networks: yes
volumes: yes
builder_cache: yes
tags: 'aws'
- name: aws - launch docker containers
docker_container:
image: "{{docker_registry|default(ec2_tags.tags.user)}}/{{ec2_tags.tags.user}}/{{ec2_tags.tags.env}}/{% if ':' in item %}{{item}}{% else %}{{item}}:{{docker_image_tag|default('latest')}}{% endif %}"
name: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}"
network_mode: host
pull: yes
restart_policy: always
volumes:
- "{{ lookup('env','ANSIBLE_DISKS_NFS_PATH') }}:/shared"
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/tmp/docker.sock:ro
tags: 'aws'
with_items: '{{ec2_tags.tags.services.split(" ")}}'
when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined
- name: aws - add docker containers to inventory
add_host:
name: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}"
ansible_connection: docker
changed_when: false
tags: 'aws'
with_items: '{{ec2_tags.tags.services.split(" ")}}'
when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined
- name: aws - run make deploy in docker containers
delegate_to: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}"
raw: "command -v make || exit 0 && make deploy CONTAINER={{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}} HOST={{ansible_ec2_local_ipv4}}"
tags: 'aws'
with_items: '{{ec2_tags.tags.services.split(" ")}}'
when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined

View File

@ -0,0 +1,7 @@
[default]
{% if aws_output_format|length %}
output = {{ aws_output_format }}
{% endif %}
{% if aws_region|length %}
region = {{ aws_region }}
{% endif %}

View File

@ -0,0 +1,5 @@
{% if aws_access_key_id|length and aws_secret_access_key|length %}
[default]
aws_access_key_id = {{ aws_access_key_id }}
aws_secret_access_key = {{ aws_secret_access_key }}
{% endif %}

View File

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/1001Pharmacies/ansible-disks
branch = master
commit = c0ac6978d715b461fbf20aca719cd5196bc60645
parent = d01cccd9bab3a63d60ba251e3719767635ccd5d2
method = merge
cmdver = 0.4.0

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Wizcorp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,77 @@
Disk
====
This role allows you to format extra disks and attach them to different mount points.
You can use it to move the data of different services to another disk.
Configuration
-------------
### Inventory
Because the configuration for additional disks must be stored using the YAML
syntax, you have to write it in a `group_vars` directory.
```yaml
# inventory/group_vars/GROUP_NAME
disks_additional_disks:
- disk: /dev/sdb
fstype: ext4
mount_options: defaults
mount: /data
user: www-data
group: www-data
disable_periodic_fsck: false
- disk: /dev/nvme0n1
part: /dev/nvme0n1p1
fstype: xfs
mount_options: defaults,noatime
mount: /data2
- device_name: /dev/sdf
fstype: ext4
mount_options: defaults
mount: /data
- disk: nfs-host:/nfs/export
fstype: nfs
mount_options: defaults,noatime
mount: /mnt/nfs
```
* `disk` is the device, you want to mount.
* `part` is the first partition name. If not specified, `1` will be appended to the disk name.
* `fstype` allows you to choose the filesystem to use with the new disk.
* `mount_options` allows you to specify custom mount options.
* `mount` is the directory where the new disk should be mounted.
* `user` sets owner of the mount directory (default: `root`).
* `group` sets group of the mount directory (default: `root`).
* `disable_periodic_fsck` deactivates the periodic ext3/4 filesystem check for the new disk.
You can add:
* `disks_package_use` is the required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
The following filesystems are currently supported:
- [btrfs](http://en.wikipedia.org/wiki/BTRFS) *
- [ext2](http://en.wikipedia.org/wiki/Ext2)
- [ext3](http://en.wikipedia.org/wiki/Ext3)
- [ext4](http://en.wikipedia.org/wiki/Ext4)
- [nfs](http://en.wikipedia.org/wiki/Network_File_System) *
- [xfs](http://en.wikipedia.org/wiki/XFS) *
*) Note: To use these filesystems you have to define and install additional software packages. Please estimate the right package names for your operating system.
```yaml
# inventory/group_vars/GROUP_NAME
disks_additional_packages:
- xfsprogs # package for mkfs.xfs on RedHat / Ubuntu
- btrfs-progs # package for mkfs.btrfs on CentOS / Debian
disks_additional_services:
- rpc.statd # start rpc.statd service for nfs
```
How it works
------------
It uses `sfdisk` to partition the disk with a single primary partition spanning the entire disk.
The specified filesystem will then be created with `mkfs`.
Finally the new partition will be mounted to the specified mount path.

View File

@ -0,0 +1,8 @@
---
# Aditional disks that need to be formated and mounted.
# See README for syntax and usage.
disks_additional_disks: []
disks_additional_packages: []
disks_additional_services: []
disks_discover_aws_nvme_ebs: False
disks_package_use: auto

View File

@ -0,0 +1,21 @@
---
# file: handlers/main.yml
- name: restart services
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_additional_disks_handler_notify.results }}'
service:
name: "{{item.0.service}}"
state: restarted
when: item.1.changed and item.0.service is defined
- name: restart services - nfs
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_additional_disks_nfs_handler_notify.results }}'
service:
name: "{{item.0.service}}"
state: restarted
when: item.1.changed and item.0.service is defined

View File

@ -0,0 +1,182 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ctypes import *
from fcntl import ioctl
from pathlib import Path
import json
import os
import subprocess
from ansible.module_utils.basic import *
module = AnsibleModule(argument_spec=dict(
config=dict(required=True, type='list'),
))
NVME_ADMIN_IDENTIFY = 0x06
NVME_IOCTL_ADMIN_CMD = 0xC0484E41
AMZN_NVME_VID = 0x1D0F
AMZN_NVME_EBS_MN = "Amazon Elastic Block Store"
class nvme_admin_command(Structure):
_pack_ = 1
_fields_ = [("opcode", c_uint8), # op code
("flags", c_uint8), # fused operation
("cid", c_uint16), # command id
("nsid", c_uint32), # namespace id
("reserved0", c_uint64),
("mptr", c_uint64), # metadata pointer
("addr", c_uint64), # data pointer
("mlen", c_uint32), # metadata length
("alen", c_uint32), # data length
("cdw10", c_uint32),
("cdw11", c_uint32),
("cdw12", c_uint32),
("cdw13", c_uint32),
("cdw14", c_uint32),
("cdw15", c_uint32),
("reserved1", c_uint64)]
class nvme_identify_controller_amzn_vs(Structure):
_pack_ = 1
_fields_ = [("bdev", c_char * 32), # block device name
("reserved0", c_char * (1024 - 32))]
class nvme_identify_controller_psd(Structure):
_pack_ = 1
_fields_ = [("mp", c_uint16), # maximum power
("reserved0", c_uint16),
("enlat", c_uint32), # entry latency
("exlat", c_uint32), # exit latency
("rrt", c_uint8), # relative read throughput
("rrl", c_uint8), # relative read latency
("rwt", c_uint8), # relative write throughput
("rwl", c_uint8), # relative write latency
("reserved1", c_char * 16)]
class nvme_identify_controller(Structure):
_pack_ = 1
_fields_ = [("vid", c_uint16), # PCI Vendor ID
("ssvid", c_uint16), # PCI Subsystem Vendor ID
("sn", c_char * 20), # Serial Number
("mn", c_char * 40), # Module Number
("fr", c_char * 8), # Firmware Revision
("rab", c_uint8), # Recommend Arbitration Burst
("ieee", c_uint8 * 3), # IEEE OUI Identifier
("mic", c_uint8), # Multi-Interface Capabilities
("mdts", c_uint8), # Maximum Data Transfer Size
("reserved0", c_uint8 * (256 - 78)),
("oacs", c_uint16), # Optional Admin Command Support
("acl", c_uint8), # Abort Command Limit
("aerl", c_uint8), # Asynchronous Event Request Limit
("frmw", c_uint8), # Firmware Updates
("lpa", c_uint8), # Log Page Attributes
("elpe", c_uint8), # Error Log Page Entries
("npss", c_uint8), # Number of Power States Support
("avscc", c_uint8), # Admin Vendor Specific Command Configuration
("reserved1", c_uint8 * (512 - 265)),
("sqes", c_uint8), # Submission Queue Entry Size
("cqes", c_uint8), # Completion Queue Entry Size
("reserved2", c_uint16),
("nn", c_uint32), # Number of Namespaces
("oncs", c_uint16), # Optional NVM Command Support
("fuses", c_uint16), # Fused Operation Support
("fna", c_uint8), # Format NVM Attributes
("vwc", c_uint8), # Volatile Write Cache
("awun", c_uint16), # Atomic Write Unit Normal
("awupf", c_uint16), # Atomic Write Unit Power Fail
("nvscc", c_uint8), # NVM Vendor Specific Command Configuration
("reserved3", c_uint8 * (704 - 531)),
("reserved4", c_uint8 * (2048 - 704)),
("psd", nvme_identify_controller_psd * 32), # Power State Descriptor
("vs", nvme_identify_controller_amzn_vs)] # Vendor Specific
class ebs_nvme_device:
def __init__(self, device):
self.device = device
self.ctrl_identify()
def _nvme_ioctl(self, id_response, id_len):
admin_cmd = nvme_admin_command(opcode = NVME_ADMIN_IDENTIFY,
addr = id_response,
alen = id_len,
cdw10 = 1)
with open(self.device, "w") as nvme:
ioctl(nvme, NVME_IOCTL_ADMIN_CMD, admin_cmd)
def ctrl_identify(self):
self.id_ctrl = nvme_identify_controller()
self._nvme_ioctl(addressof(self.id_ctrl), sizeof(self.id_ctrl))
def is_ebs(self):
if self.id_ctrl.vid != AMZN_NVME_VID:
return False
if self.id_ctrl.mn.strip() != AMZN_NVME_EBS_MN:
return False
return True
def get_volume_id(self):
vol = self.id_ctrl.sn.decode('utf-8')
if vol.startswith("vol") and vol[3] != "-":
vol = "vol-" + vol[3:]
return vol.strip()
def get_block_device(self, stripped=False):
dev = self.id_ctrl.vs.bdev.decode('utf-8')
if stripped and dev.startswith("/dev/"):
dev = dev[5:]
return dev.strip()
def update_disk(disk, mapping):
if 'device_name' not in disk:
return disk
device_name = disk['device_name'][5:]
if device_name not in mapping:
return disk
volume_id = mapping[device_name]
link_path = '/dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol%s' % volume_id[4:]
resolved = str(Path(link_path).resolve())
new_disk = dict(disk)
new_disk['disk'] = resolved
new_disk['part'] = '%sp1' % resolved
return new_disk
def main():
src_config = module.params['config']
lsblkOutput = subprocess.check_output(['lsblk', '-J'])
lsblk = json.loads(lsblkOutput.decode('utf-8'))
mapping = {}
for blockdevice in lsblk['blockdevices']:
try:
dev = ebs_nvme_device('/dev/%s' % blockdevice['name'])
except OSError:
continue
except IOError:
continue
if dev.is_ebs():
continue
mapping[dev.get_block_device()] = dev.get_volume_id()
new_config = [
update_disk(disk, mapping) for disk in src_config
]
facts = {'blockDeviceMapping': mapping, 'config': new_config, 'source_config': src_config}
result = {"changed": False, "ansible_facts": facts}
module.exit_json(**result)
main()

View File

@ -0,0 +1,21 @@
galaxy_info:
author: Emilien Kenler <ekenler@wizcorp.jp>
description: This role allows setting up extra disks and their mount points
company: Wizcorp K.K.
license: MIT
min_ansible_version: 2.0.0
platforms:
- name: EL
versions:
- 6
- 7
- name: Debian
versions:
- wheezy
- jessie
- name: Ubuntu
versions:
- all
categories:
- system
dependencies: []

View File

@ -0,0 +1,173 @@
- name: 'Install Python PIP'
package: >
name=py3-pip
state=present
when: ansible_os_family|lower == "alpine"
- name: 'Install Python PIP'
package: >
name=python-pip
state=present
when: ansible_os_family|lower != "alpine"
- name: 'Install python-pathlib'
pip: >
name=pathlib
state=present
- name: "Discover NVMe EBS"
disks_ebs_config:
config: "{{ disks_additional_disks }}"
register: __disks_ebs_config
when: disks_discover_aws_nvme_ebs | default(True) | bool
- set_fact:
disks_additional_disks: "{{ disks_additional_disks|defaut([]) + __disks_ebs_config['ansible_facts']['config'] }}"
when: __disks_ebs_config is defined and 'ansible_facts' in __disks_ebs_config
- name: "Install parted"
package:
name: parted
state: present
use: '{{ disks_package_use }}'
when: disks_additional_disks
tags: ['disks', 'pkgs']
- name: "Install additional fs progs"
package:
name: "{{ item }}"
state: present
with_items: "{{ disks_additional_packages|default([]) }}"
when: disks_additional_packages is defined
tags: ['disks', 'pkgs']
- name: disks - start additional services
service:
name: "{{item}}"
enabled: yes
state: started
with_items: "{{ disks_additional_services|default([]) }}"
tags: ['disks', 'pkgs']
- name: "Get disk alignment for disks"
shell: |
if
[[ -e /sys/block/{{ item.disk | basename }}/queue/optimal_io_size && -e /sys/block/{{ item.disk | basename }}/alignment_offset && -e /sys/block/{{ item.disk | basename }}/queue/physical_block_size ]];
then
echo $[$(( ($(cat /sys/block/{{ item.disk | basename }}/queue/optimal_io_size) + $(cat /sys/block/{{ item.disk | basename }}/alignment_offset)) / $(cat /sys/block/{{ item.disk | basename }}/queue/physical_block_size) )) | 2048];
else
echo 2048;
fi
args:
creates: '{{ item.part | default(item.disk + "1") }}'
executable: '/bin/bash'
with_items: '{{ disks_additional_disks }}'
register: disks_offset
tags: ['disks']
- name: "Ensure the disk exists"
stat:
path: '{{ item.disk }}'
with_items: '{{ disks_additional_disks }}'
register: disks_stat
changed_when: False
tags: ['disks']
- name: "Partition additional disks"
shell: |
if
[ -b {{ item.disk }} ]
then
[ -b {{ item.part | default(item.disk + "1") }} ] || parted -a optimal --script "{{ item.disk }}" mklabel gpt mkpart primary {{ disks_offset.stdout|default("2048") }}s 100% && sleep 5 && partprobe {{ item.disk }}; sleep 5
fi
args:
creates: '{{ item.part | default(item.disk + "1") }}'
executable: '/bin/bash'
with_items: '{{ disks_additional_disks }}'
tags: ['disks']
- name: "Create filesystem on the first partition"
filesystem:
dev: '{{ item.0.part | default(item.0.disk + "1") }}'
force: '{{ item.0.force|d(omit) }}'
fstype: '{{ item.0.fstype }}'
opts: '{{ item.0.fsopts|d(omit) }}'
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_stat.results }}'
when: item.1.stat.exists
tags: ['disks']
- name: "Disable periodic fsck and reserved space on ext3 or ext4 formatted disks"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/sbin:/sbin"
shell: tune2fs -c0 -i0 -m0 {{ item.0.part | default(item.0.disk + "1") }}
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_stat.results }}'
when: "disks_additional_disks and ( item.0.fstype == 'ext4' or item.0.fstype == 'ext3' ) and item.0.disable_periodic_fsck|default(false)|bool and item.1.stat.exists"
tags: ['disks']
- name: "Ensure the mount directory exists"
file:
path: '{{ item.mount }}'
state: directory
with_items: '{{ disks_additional_disks }}'
tags: ['disks']
- name: "Get UUID for partition"
environment:
PATH: "{{ ansible_env.PATH }}:/usr/sbin:/sbin"
command: blkid -s UUID -o value {{ item.0.part | default(item.0.disk + "1") }}
check_mode: no
register: disks_blkid
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_stat.results }}'
changed_when: False
when: item.1.stat.exists
tags: ['disks']
- name: "Mount additional disks"
mount:
name: '{{ item.0.mount }}'
fstype: '{{ item.0.fstype }}'
opts: '{{ item.0.mount_options|d(omit) }}'
passno: '0'
src: 'UUID={{ item.1.stdout }}'
state: '{{ item.0.mount_state|d("mounted") }}'
with_together:
- '{{ disks_additional_disks }}'
- '{{ disks_blkid.results }}'
- '{{ disks_stat.results }}'
when: item.2.stat.exists
tags: ['disks']
register: disks_additional_disks_handler_notify
notify:
- restart services
- name: "Mount additional disks - nfs"
mount:
name: '{{ item.mount }}'
fstype: '{{ item.fstype }}'
opts: '{{ item.mount_options|d(omit) }}'
src: '{{ item.disk }}'
state: '{{ item.mount_state|d("mounted") }}'
when: item.fstype == 'nfs'
with_items: '{{ disks_additional_disks }}'
tags: ['disks']
register: disks_additional_disks_nfs_handler_notify
notify:
- restart services - nfs
- name: "Ensure the permissions are set correctly"
file:
path: '{{ item.mount }}'
owner: '{{ item.user | default("root") }}'
group: '{{ item.group | default("root") }}'
state: directory
with_items: '{{ disks_additional_disks }}'
when: item.user is defined or item.group is defined
tags: ['disk']
- meta: flush_handlers

View File

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/1001Pharmacies/ansible-docker
branch = master
commit = 6217a899084cba00447195d1873b211462b60d52
parent = 4745dad8cb8a826ee3ac47accda79f96957b5e13
method = merge
cmdver = 0.4.0

View File

@ -0,0 +1,4 @@
# Authors
* **Yann Autissier** - *Initial work* - [aya](https://github.com/aya)

View File

@ -0,0 +1,9 @@
# Changelog
## v1.0.0 (December 20, 2016)
Initial release.
* Install docker daemon
* Start and active docker service at boot
* Build and run docker images

View File

@ -0,0 +1,20 @@
MIT License
Copyright (c) 2016 Yann Autissier
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,237 @@
# Ansible role to run dockers
An ansible role to install the [docker](https://www.docker.com/) daemon and build and run dockers.
It installs the docker daemon and ensure it is up and running.
It sets the STORAGE_DRIVER if the docker host uses systemd and it configures the MTU to 1450 if it is a VM running on OpenStack.
It builds and runs docker images on the docker host.
## Requirements
This Ansible role requires at least Ansible version 1.9.
## Role Variables
* `docker_check_kernel` - The minimum kernel version allowed on hosts to run docker.
``` yaml
# minimum kernel version
docker_check_kernel: '3.10'
```
* `docker_check_machine` - The hosts architecture needed to run docker.
``` yaml
# architecture
docker_check_machine: 'x86_64'
```
* `docker_package` - The name of the docker package.
``` yaml
# The docker package name
docker_package: docker
```
* `docker_packages` - A list of packages to install/remove before installing the docker package.
``` yaml
# A list of package to install/remove
# docker_packages:
# - { "name": "docker", "state": "absent" }
```
* `docker_init_config_directory` - The location of the configuration file of the docker daemon init script.
``` yaml
# Location of configuration files loaded by the init script
docker_init_config_directory: "/etc/sysconfig"
```
* `docker_opts` - The name of the environment variable used to pass options to the docker daemon.
``` yaml
# docker daemon options environment variable
docker_opts: "OPTIONS"
```
* `docker_services` - A list of system services to start
``` yaml
# services
docker_services:
- docker
```
* `dockers` - A list of docker images to build and run on the docker host with the docker-build and docker-run commands
``` yaml
# dockers
# dockers:
# - nginx
```
* `docker_cluster` - An optional cluster name to pass to the docker-build and docker-run commands
``` yaml
# docker cluster
# docker_cluster: ""
```
* `docker_cluster` - Starts the dockers if set to true.
``` yaml
# Start docker
docker_start: true
```
* `docker_restart` - Restarts dockers when their image has been updated. It removes current running dockers and start new ones.
``` yaml
# Stop and remove running docker to start a new one when image has been updated
docker_restart: true
```
* `docker_force_restart` - Restart dockers, even if image has not been updated. It removes current running dockers and start new ones.
``` yaml
# Stop and remove running docker to start a new one even if image has not been updated
docker_force_restart: false
```
## Helper scripts
This role comes with a few helper scripts. Here is a short description.
* `docker-build` - Build a docker image, reading options to pass to the `docker build` command from a Dockeropts file.
* `docker-cleanup` - Remove unused dockers.
* `docker-cleanup-images` - Remove unused docker images.
* `docker-cleanup-volumes` - Remove unused docker volumes.
* `docker-get-image` - Return sha256 of the image used by the docker.
* `docker-get-status` - Return the status of the docker.
* `docker-log-cleanup` - Empty the file logging the docker output on the docker host.
* `docker-log-truncate` - Truncate the file logging the docker output on the docker host.
* `docker-run` - Run a docker, reading options to pass to the `docker run` command from a Dockeropts file.
## Example
To launch this role on your `docker` hosts, run the default playbook.yml.
``` bash
$ ansible-playbook playbook.yml
```
### Build a docker image
On the docker hosts, you'll be able to build docker images and run dockers, based on Dockerfile and Dockeropts files located in the /etc/docker subdirectories.
To create an `nginx` docker image, create a directory /etc/docker/nginx with a Dockerfile and a Dockeropts files into.
``` bash
# mkdir -p /etc/docker/nginx
# cat << EOF > /etc/docker/nginx/Dockerfile
FROM nginx:alpine
EOF
# cat << EOF > /etc/docker/nginx/Dockeropts
DOCKER_ULIMIT="nofile=65536"
DOCKER_PORT="80:80"
EOF
```
Build your `nginx` docker image, then run it ! The docker-run command will read the Dockeropts file to add the --ulimit and --port options to the docker run command.
``` bash
# docker-build nginx && docker-run nginx
```
### Override your files
If you want to copy a file in your Dockerfile, say the default nginx.conf, you can use the DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX variables to get different versions of this file giving some context.
``` bash
# cat << EOF > /etc/docker/nginx/Dockerfile
FROM nginx:alpine
ARG DOCKER_BUILD_PREFIX
ARG DOCKER_BUILD_SUFFIX
COPY ./\${DOCKER_BUILD_PREFIX}nginx.conf\${DOCKER_BUILD_SUFFIX} /etc/nginx/nginx.conf
EOF
```
You can now override the nginx configuration file when you build your image.
* Without option, the docker-build command will search for the file beside your Dockerfile.
``` bash
# docker-build nginx && docker-run nginx
```
Both DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX variables are empty, the Dockerfile will search for a `./nginx.conf` file, ie the /etc/docker/nginx/nginx.conf file.
* With a -c|--cluster option, the docker-build command will search for the file in a subdirectory below your Dockerfile.
``` bash
# docker-build -c custom nginx && docker-run -c custom nginx
```
The DOCKER_BUILD_PREFIX variable is populated with 'custom/' to force the Dockerfile to search for a `./custom/nginx.conf` file, ie /etc/docker/nginx/custom/nginx.conf file.
* Whith an image name suffixed with a dash, the docker-build command will search for a suffixed file as well.
``` bash
# docker-build -c custom nginx-develop && docker-run -c custom nginx-develop
```
The DOCKER_BUILD_PREFIX variable is populated with 'custom/' and the DOCKER_BUILD_SUFFIX variable is populated with '-develop' to force the Dockerfile to search for a `./custom/nginx.conf-develop` file, ie /etc/docker/nginx/custom/nginx.conf-develop file.
### Override your options
The same override principle can be used for the Dockerfile and the Dockeropts file when using the docker-build and docker-run commands.
You can create a /etc/docker/nginx/custom/Dockeropts file that would override your default Dockeropt file, and a /etc/docker/nginx/custom/Dockeropts-develop file overriding both other files too.
The Dockeropts file accepts the following options.
* `SYSCTL` - values to set on the docker host via the sysctl command before running the docker
* `DOCKER_ARGS` - values to pass to the docker build command with --build-arg options
* `DOCKER_ENV` - values to pass to the docker run command with -e options
* `DOCKER_LINK` - values to pass to the docker run command with --link options
* `DOCKER_OPT` - values to pass to the docker run command with prefixed by --
* `DOCKER_PORT` - values to pass to the docker run command with -p options
* `DOCKER_ULIMIT` - values to pass to the docker run command with --ulimit options
* `DOCKER_VOLUME` - values to pass to the docker run command with -v options
* `HOST_VOLUME` - volumes to allow write access to from the docker on selinux enabled host
Overriding options is done several times, reading options from the more specific to the more generic file. In our example, files are read in this order :
/etc/docker/nginx/custom/Dockeropts-develop
/etc/docker/nginx/custom/Dockeropts
/etc/docker/nginx/Dockeropts
## Common configuration
Following configuration builds and runs the docker image 'nginx-develop' for the 'custom' cluster described in our example.
The Dockerfile and Dockeropts files needed in the /etc/docker/nginx directory should be present on the docker host, likely synchronised by an other ansible role.
``` yaml
docker_cluster: "custom"
docker:
- nginx-develop
```
## Tests
To test this role on your `docker` hosts, run the tests/playbook.yml playbook.
``` bash
$ ansible-playbook tests/playbook.yml
```
## Limitations
This role is known to work on Ubuntu, Debian, CentOS and Alpine Linux.

View File

@ -0,0 +1,41 @@
---
# file: defaults/main.yml
# minimum kernel version
docker_check_kernel: '3.10'
# architecture
docker_check_machine: 'x86_64'
# The docker package name
docker_package: docker
# A list of package to install/remove
# docker_packages:
# - { "name": "docker", "state": "absent" }
# Location of configuration files loaded by the init script
docker_init_config_directory: "/etc/sysconfig"
# docker daemon options environment variable
docker_opts: "OPTIONS"
# services
docker_services:
- docker
# dockers
# dockers:
# - nginx
# docker cluster
# docker_cluster: ""
# Start docker
docker_start: true
# Stop and remove running docker to start a new one when image has been updated
docker_restart: true
# Stop and remove running docker to start a new one even if image has not been updated
docker_force_restart: false

View File

@ -0,0 +1 @@
kernel.pax.softmode=1

View File

@ -0,0 +1,133 @@
#!/bin/bash
# Author: Yann Autissier <yann.autissier@gmail.com>
DOCKER_IMAGE_REPOSITORY="centile"
DOCKER_BUILD_DIRECTORY="/etc/docker"
usage() {
echo Usage: $0 [-c cluster] [-f] [-q] [-t] image [image [...]]
echo -e "Build a docker image in the '${DOCKER_IMAGE_REPOSITORY}' repository."
echo
echo -e "image\tis a directory with a Dockerfile, default in '${DOCKER_BUILD_DIRECTORY}/image'."
echo -e "\t'image' can contains a dash. The suffixed part after the dash is taken into account"
echo -e "\tin the image name but not in the name of the directory containing the Dockerfile."
echo -e "\tsuffix will be available in your Dockerfile in the DOCKER_BUILD_SUFFIX build-arg."
echo
echo -e "Options:"
echo -e "\t-c 'cluster'\tAllow to override files in 'image' directory with existing files in"
echo -e "\t\t\tthe 'image/cluster' directory. 'cluster' will be available in your"
echo -e "\t\t\tDockerfile in the DOCKER_BUILD_PREFIX build-arg."
echo -e "\t-f\t\tforce build, do not use cache when building image."
echo -e "\t-q\t\tquiet mode, minimal output."
echo -e "\t-t\t\ttest mode, do nothing but output the command that would haev been launched."
echo
echo -e "EXAMPLES"
echo
echo -e "$0 elk"
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile"
echo
echo -e "$0 elk-es01"
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile"
echo -e "and build-arg DOCKER_BUILD_SUFFIX=-es01"
echo
echo -e "$0 -c elisa-sdc elk-es01"
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile,"
echo -e "build-arg DOCKER_BUILD_PREFIX=elisa-sdc/ and build-arg DOCKER_BUILD_SUFFIX=-es01"
echo
exit 1
}
while [ $# -gt 0 ]; do
case $1 in
-c|--cluster) shift && CLUSTER="$1"
;;
-f|--force) FORCE=1
;;
-t|--test) TEST=1
;;
-q|--quiet) QUIET=1
;;
-h|--help) usage
;;
*) args="${args:-} $1"
esac
shift
args="${args# }"
done
# check args
[ "${args:0:1}" = "-" ] && usage
# grsec/pax on alpine linux with docker < 1.12
[ -f /etc/alpine-release ] && while read major minor patch; do
if [ "${major}" -eq 1 ] && [ "${minor:-0}" -lt 12 ]; then
[ "$(sysctl -n kernel.grsecurity.chroot_deny_chmod 2>/dev/null)" = 1 ] && sysctl -w kernel.grsecurity.chroot_deny_chmod=0 2>/dev/null && grsec_disabled_chmod=1
[ "$(sysctl -n kernel.grsecurity.chroot_deny_mknod 2>/dev/null)" = 1 ] && sysctl -w kernel.grsecurity.chroot_deny_mknod=0 2>/dev/null && grsec_disabled_mknod=1
fi
done <<< $(apk version docker |awk -F '-' '/^docker/ {print $2}' |sed 's/\./ /g')
for arg in $args; do
# extract docker image name
image="$(basename ${arg})"
# keep part before the dash as the directory name
dir="$(dirname ${arg})/${image%-*}"
# keep part after the dash as an image suffix name
[ "${image##*-}" != "${image}" ] && suffix="${image##*-}"
# default to ${DOCKER_BUILD_DIRECTORY}/${dir} if ${dir} does not exists
[ ! -d "${dir}" ] && [ -d "${DOCKER_BUILD_DIRECTORY}/${dir}" ] && dir="${DOCKER_BUILD_DIRECTORY}/${dir#./}"
# directory exists && contains a Dockerfile
[ -d ${dir} ] && [ -f "${dir}/Dockerfile" ] || usage
# cluster directory exists
[ -n "${CLUSTER}" ] && { [ -d ${dir}/${CLUSTER} ] || usage; }
# search for Dockeropts files
files="${dir}/Dockeropts ${dir}/Dockeropts-${suffix}"
[ -n "${CLUSTER}" ] && files="${files} ${dir}/${CLUSTER}/Dockeropts ${dir}/${CLUSTER}/Dockeropts-${suffix}"
# source the Dockeropts files
for dockeropts in ${files}; do
[ -f "${dockeropts}" ] && . ${dockeropts}
done
# quiet build
[ ${QUIET} ] && DOCKER_BUILD_ARGS="--quiet" || DOCKER_BUILD_ARGS=""
# do not use cache
[ ${FORCE} ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --no-cache"
# extract DOCKER_ARGS
[ -n "${DOCKER_ARGS}" ] && for build_arg in ${DOCKER_ARGS}; do
DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg ${build_arg}"
done
# add DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX
[ -n "${CLUSTER}" ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg DOCKER_BUILD_PREFIX=${CLUSTER}/"
[ -n "${suffix}" ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg DOCKER_BUILD_SUFFIX=-${suffix}"
# search for Dockerfile
[ -n "${CLUSTER}" ] && files="${dir}/${CLUSTER}/Dockerfile-${suffix} ${dir}/${CLUSTER}/Dockerfile" || files=""
files="${files} ${dir}/Dockerfile-${suffix} ${dir}/Dockerfile"
# build docker image with 1st found Dockerfile
for dockerfile in ${files}; do
[ -f "${dockerfile}" ] || continue
[ ${QUIET} ] && [ ! ${TEST} ] && echo -n "${image} "
[ ! ${QUIET} ] && echo "Building image ${image}"
if [ ${TEST} ]; then
echo docker build ${DOCKER_BUILD_ARGS} -t ${DOCKER_IMAGE_REPOSITORY}/${image} -f ${dockerfile} ${dir}
else
docker build ${DOCKER_BUILD_ARGS} -t ${DOCKER_IMAGE_REPOSITORY}/${image} -f ${dockerfile} ${dir}
result=$?
fi
[ ${result:-0} -ge ${return:-0} ] && return=${result}
break
done
done
# grsec/pax
[ ${grsec_disabled_chmod} ] && sysctl -w kernel.grsecurity.chroot_deny_chmod=1 2>/dev/null
[ ${grsec_disabled_mknod} ] && sysctl -w kernel.grsecurity.chroot_deny_mknod=1 2>/dev/null
exit ${return:-1}

View File

@ -0,0 +1,4 @@
#!/bin/sh
# Author: Yann Autissier <yann.autissier@gmail.com>
docker ps -q --no-trunc --filter status=exited,status=created,status=dead |while read docker; do docker rm ${docker}; done

View File

@ -0,0 +1,4 @@
#!/bin/sh
# Author: Yann Autissier <yann.autissier@gmail.com>
docker images -q --no-trunc --filter dangling=true |while read image; do docker rmi ${image}; done

View File

@ -0,0 +1,144 @@
#! /bin/bash
set -eou pipefail
#usage: sudo ./docker-cleanup-volumes.sh [--dry-run]
docker_bin="$(which docker.io 2> /dev/null || which docker 2> /dev/null)"
# Default dir
dockerdir=/var/lib/docker
# Look for an alternate docker directory with -g/--graph option
dockerpid=$(ps ax | grep "$docker_bin" | grep -v grep | awk '{print $1; exit}') || :
if [[ -n "$dockerpid" && $dockerpid -gt 0 ]]; then
next_arg_is_dockerdir=false
while read -d $'\0' arg
do
if [[ $arg =~ ^--graph=(.+) ]]; then
dockerdir=${BASH_REMATCH[1]}
break
elif [ $arg = '-g' ]; then
next_arg_is_dockerdir=true
elif [ $next_arg_is_dockerdir = true ]; then
dockerdir=$arg
break
fi
done < /proc/$dockerpid/cmdline
fi
dockerdir=$(readlink -f "$dockerdir")
volumesdir=${dockerdir}/volumes
vfsdir=${dockerdir}/vfs/dir
allvolumes=()
dryrun=false
verbose=false
function log_verbose() {
if [ "${verbose}" = true ]; then
echo "$1"
fi;
}
function delete_volumes() {
local targetdir=$1
echo
if [[ ! -d "${targetdir}" || ! "$(ls -A "${targetdir}")" ]]; then
echo "Directory ${targetdir} does not exist or is empty, skipping."
return
fi
echo "Delete unused volume directories from $targetdir"
local dir
while read -d $'\0' dir
do
dir=$(basename "$dir")
if [[ -d "${targetdir}/${dir}/_data" || "${dir}" =~ [0-9a-f]{64} ]]; then
if [ ${#allvolumes[@]} -gt 0 ] && [[ ${allvolumes[@]} =~ "${dir}" ]]; then
echo "In use ${dir}"
else
if [ "${dryrun}" = false ]; then
echo "Deleting ${dir}"
rm -rf "${targetdir}/${dir}"
else
echo "Would have deleted ${dir}"
fi
fi
else
echo "Not a volume ${dir}"
fi
done < <(find "${targetdir}" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null)
}
if [ $UID != 0 ]; then
echo "You need to be root to use this script."
exit 1
fi
if [ -z "$docker_bin" ] ; then
echo "Please install docker. You can install docker by running \"wget -qO- https://get.docker.io/ | sh\"."
exit 1
fi
while [[ $# > 0 ]]
do
key="$1"
case $key in
-n|--dry-run)
dryrun=true
;;
-v|--verbose)
verbose=true
;;
*)
echo "Cleanup docker volumes: remove unused volumes."
echo "Usage: ${0##*/} [--dry-run] [--verbose]"
echo " -n, --dry-run: dry run: display what would get removed."
echo " -v, --verbose: verbose output."
exit 1
;;
esac
shift
done
# Make sure that we can talk to docker daemon. If we cannot, we fail here.
${docker_bin} version >/dev/null
container_ids=$(${docker_bin} ps -a -q --no-trunc)
#All volumes from all containers
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
for container in $container_ids; do
#add container id to list of volumes, don't think these
#ever exists in the volumesdir but just to be safe
allvolumes+=${container}
#add all volumes from this container to the list of volumes
log_verbose "Inspecting container ${container}"
for volpath in $(
${docker_bin} inspect --format='{{range $key, $val := .}}{{if eq $key "Volumes"}}{{range $vol, $path := .}}{{$path}}{{"\n"}}{{end}}{{end}}{{if eq $key "Mounts"}}{{range $mount := $val}}{{$mount.Source}}{{"\n"}}{{end}}{{end}}{{end}}' ${container} \
); do
log_verbose "Processing volumepath ${volpath}"
#try to get volume id from the volume path
vid=$(echo "${volpath}" | sed 's|.*/\(.*\)/_data$|\1|;s|.*/\([0-9a-f]\{64\}\)$|\1|')
# check for either a 64 character vid or then end of a volumepath containing _data:
if [[ "${vid}" =~ ^[0-9a-f]{64}$ || (${volpath} =~ .*/_data$ && ! "${vid}" =~ "/") ]]; then
log_verbose "Found volume ${vid}"
allvolumes+=("${vid}")
else
#check if it's a bindmount, these have a config.json file in the ${volumesdir} but no files in ${vfsdir} (docker 1.6.2 and below)
for bmv in $(find "${volumesdir}" -name config.json -print | xargs grep -l "\"IsBindMount\":true" | xargs grep -l "\"Path\":\"${volpath}\""); do
bmv="$(basename "$(dirname "${bmv}")")"
log_verbose "Found bindmount ${bmv}"
allvolumes+=("${bmv}")
#there should be only one config for the bindmount, delete any duplicate for the same bindmount.
break
done
fi
done
done
IFS=$SAVEIFS
delete_volumes "${volumesdir}"
delete_volumes "${vfsdir}"

View File

@ -0,0 +1,8 @@
#!/bin/bash
# Author: Yann Autissier <yann.autissier@gmail.com>
[ -n "$1" ] || exit
for docker in $@; do
docker inspect "${docker}" 2>/dev/null |awk '$1 == "\"Image\":" && $2 ~ /^\"sha/ {gsub(/(^\"|\",$)/, "", $2); print "'${docker}' "$2}'
done

View File

@ -0,0 +1,8 @@
#!/bin/bash
# Author: Yann Autissier <yann.autissier@gmail.com>
[ -n "$1" ] || exit
for docker in $@; do
docker inspect "${docker}" 2>/dev/null |awk '$1 == "\"Status\":" {gsub(/(^\"|\",$)/, "", $2); print "'${docker}' "$2}'
done

View File

@ -0,0 +1,4 @@
#!/bin/sh
# Author: Yann Autissier <yann.autissier@gmail.com>
[ -n "$1" ] && :> $(docker inspect $1 | grep '"LogPath": "*"' | sed -e 's/.*"LogPath": "//g' | sed -e 's/",//g')

View File

@ -0,0 +1,7 @@
#!/bin/sh
# Author: Yann Autissier <yann.autissier@gmail.com>
[ -n "$1" ] && \
docker_log=$(docker inspect $1 2>/dev/null | grep '"LogPath": "*"' | sed -e 's/.*"LogPath": "//g' | sed -e 's/",//g') && \
[ -f "${docker_log}" ] && \
tail -n 100 ${docker_log} > ${docker_log}

View File

@ -0,0 +1,157 @@
#!/bin/bash
# Author: Yann Autissier <yann.autissier@gmail.com>
DOCKER_IMAGE_REPOSITORY="centile"
DOCKER_BUILD_DIRECTORY="/etc/docker"
usage() {
echo Usage: $0 [ -c cluster] [ -i image ] [-f] [-q] [-t] name [name [...]]
echo -e "Run a docker from an image in the '${DOCKER_IMAGE_REPOSITORY}' repository."
echo
echo -e "name\t is a directory with a Dockerfile, default in '${DOCKER_BUILD_DIRECTORY}/name'."
echo -e "\t'name' can contains a dash. The directory name will be extracted for the first part"
echo -e "\tbefore a dash."
echo
echo -e "Options:"
echo -e "\t-c 'cluster'\tAllow to override files in 'image' directory with existing files in"
echo -e "\t\t\tthe 'image/cluster' directory."
echo -e "\t -i 'image'\tthe docker image to run, default in '${DOCKER_IMAGE_REPOSITORY}' repository."
echo -e "\t -f\t\tforce run, stop and remove existing docker before running a new one."
echo -e "\t -q\t\tquiet mode, minimal output."
echo -e "\t -t\t\ttest mode, do nothing but output the command that would have been launched."
echo
echo -e "EXAMPLES"
echo
echo -e "$0 elk"
echo -e "Run a docker named 'elk' from the '${DOCKER_IMAGE_REPOSITORY}/elk' image"
echo
echo -e "$0 elk-es01"
echo -e "Run a docker named 'elk-es01' from the '${DOCKER_IMAGE_REPOSITORY}/elk-es01' image"
echo
echo -e "$0 -i elk elk-es01"
echo -e "Run a docker named 'elk-es01' from the '${DOCKER_IMAGE_REPOSITORY}/elk' image"
echo
exit 1
}
while [ $# -gt 0 ]; do
case $1 in
-c|--cluster) shift && CLUSTER="$1"
;;
-i|--image) shift && IMAGE="$1"
;;
-h|--help) usage
;;
-f|--force) FORCE=1
;;
-q|--quiet) QUIET=1
;;
-t|--test) TEST=1
;;
*) args="${args:-} $1"
esac
shift
args="${args# }"
done
# check args
[ "${args:0:1}" = "-" ] && usage
for arg in ${args}; do
# reset vars
image=""; DOCKER_OPT=""
# extract docker name
name="$(basename ${arg})"
# keep part before the dash as the directory name
dir="$(dirname ${arg})/${name%-*}"
# keep part after the dash as an image suffix name
[ "${name##*-}" != "${name}" ] && suffix="${name##*-}"
# if provided, set docker image from args
if [ -n "${IMAGE}" ]; then
# if docker image does not contain a /, add our default repository
[ "${IMAGE##*/}" != "${IMAGE}" ] && image="${IMAGE}" || image="${DOCKER_IMAGE_REPOSITORY}/${IMAGE}"
# else try to find an image from the docker name
else
# try docker name, docker name without ending numbers, docker name without suffix
for image in ${name} ${name%%[0-9]*} ${name%-*}; do
# search for image in ${DOCKER_IMAGE_REPOSITORY}
[ -n "$(docker images 2>/dev/null |awk '$1 == "'${DOCKER_IMAGE_REPOSITORY}/${image}'" {print $1}')" ] && image="${DOCKER_IMAGE_REPOSITORY}/${image}" && break
image="${name}"
done
fi
tag="$(docker images |awk '$1 == "'${image}'" {print $2}')"
[ -z "${tag}" ] && echo "ERROR: Cannot find image '${image}'" >2 && exit 2
# default to ${DOCKER_BUILD_DIRECTORY}/${dir} if ${dir} does not exists
[ ! -d "${dir}" ] && [ -d "${DOCKER_BUILD_DIRECTORY}/${dir}" ] && dir="${DOCKER_BUILD_DIRECTORY}/${dir#./}"
# directory exists && contains a Dockerfile
[ -d ${dir} ] && [ -f "${dir}/Dockerfile" ] || usage
# cluster directory exists
[ -n "${CLUSTER}" ] && { [ -d ${dir}/${CLUSTER} ] || usage; }
# search for Dockeropts files
files="${dir}/Dockeropts ${dir}/Dockeropts-${suffix}"
[ -n "${CLUSTER}" ] && files="${files} ${dir}/${CLUSTER}/Dockeropts ${dir}/${CLUSTER}/Dockeropts-${suffix}"
# source the Dockeropts files
for dockeropts in ${files}; do
[ -f "${dockeropts}" ] && . ${dockeropts}
done
# extract SYSCTL
[ -n "${SYSCTL}" ] && for sysctl in ${SYSCTL}; do
sysctl -w ${sysctl} 2>/dev/null
done
# extract DOCKER_OPT
[ -n "${DOCKER_OPT}" ] && DOCKER_OPTS="--${DOCKER_OPT/ / --}" || DOCKER_OPTS=""
# extract DOCKER_ENV
[ -n "${DOCKER_ENV}" ] && DOCKER_OPTS="${DOCKER_OPTS} -e ${DOCKER_ENV//\" /\" -e }"
# extract DOCKER_LINK
[ -n "${DOCKER_LINK}" ] && DOCKER_OPTS="--link ${DOCKER_LINK/ / --link }"
# extract DOCKER_PORT
[ -n "${DOCKER_PORT}" ] && DOCKER_OPTS="${DOCKER_OPTS} -p ${DOCKER_PORT// / -p }"
# extract DOCKER_ULIMIT
[ -n "${DOCKER_ULIMIT}" ] && DOCKER_OPTS="${DOCKER_OPTS} --ulimit ${DOCKER_ULIMIT// / --ulimit }"
# extract DOCKER_VOLUME
[ -n "${DOCKER_VOLUME}" ] && DOCKER_OPTS="${DOCKER_OPTS} -v ${DOCKER_VOLUME// / -v }"
# enable access to host volumes on selinux
for volume in ${HOST_VOLUME}; do
chcon -Rt svirt_sandbox_file_t ${volume} 2>/dev/null
done
# remove current docker
if [ ${FORCE} ]; then
if [ -n "$(docker ps -q --filter status=created,status=restarting,status=running,status=paused,status=exited,status=dead,name=${name})" ]; then
[ ! ${QUIET} ] && echo -n "Removing docker ${name}... "
if [ ${TEST} ]; then
echo docker rm -f ${name}
else
eval docker rm -f ${name} >/dev/null 2>&1
result=$? && [ ${result} -ne 0 ] && echo "ERROR" && { [ ${result:-0} -ge ${return:-0} ] && return=${result}; } && break
[ ! ${QUIET} ] && echo "OK"
fi
fi
fi
# launch docker
[ ${QUIET} ] && [ ! ${TEST} ] && echo -n "${name} "
[ ! ${QUIET} ] && echo -n "Running docker ${name}... "
if [ ${TEST} ]; then
echo docker run --restart=always ${DOCKER_OPTS} -d --name ${name} ${image} ${DOCKER_RUN:-}
else
eval docker run --restart=always ${DOCKER_OPTS} -d --name ${name} ${image} ${DOCKER_RUN:-} 2>/dev/null
result=$? && [ ${result} -ne 0 ] && echo "ERROR"
fi
[ ${result:-0} -ge ${return:-0} ] && return=${result}
done
exit ${return:-1}

View File

@ -0,0 +1,8 @@
---
# file handlers/main.yml
- name: restart docker
service:
name: "{{docker_service}}"
state: "restarted"

View File

@ -0,0 +1,42 @@
# Copyright (c) 2016 Centile
#
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
---
galaxy_info:
author: Yann Autissier
description: An ansible role to install docker
company: Centile
license: MIT
min_ansible_version: 1.9
platforms:
- name: Ubuntu
versions:
- xenial
- name: Debian
versions:
- jessie
- name: EL
versions:
- all
dependencies: []

View File

@ -0,0 +1,6 @@
---
# file: playbook.yml
- hosts: docker
roles:
- .

View File

@ -0,0 +1,17 @@
---
# file: tasks/build.yml
- name: build - Build docker image
with_items: "{{dockers|default([])}}"
command: "/usr/local/bin/docker-build -q -c {{docker_cluster|default('\"\"')}} {{item}}"
register: docker_build_image_command
- name: build - Register docker_build_image
with_items: "{{docker_build_image_command.results}}"
set_fact:
docker_build_image: "{{docker_build_image |default({}) |combine( {item.item: item.stdout} ) }}"
- name: build - Debug docker_elk_build_image
with_items: "{{dockers|default([])}}"
debug: msg="{{docker_build_image[item]}}"
when: docker_debug|default(false)

View File

@ -0,0 +1,16 @@
---
# file: tasks/check.yml
- name: check - kernel version
fail:
msg: >
docker requires a minimum kernel version of {{docker_check_kernel}}
on {{ansible_distribution}} {{ansible_distribution_version}}
when: ansible_kernel is version(docker_check_kernel, "<")
- name: check - machine architecture
fail:
msg: >
docker requires a {{docker_check_machine}} version
of {{ansible_distribution}} {{ansible_distribution_version}}
when: ansible_machine != docker_check_machine

View File

@ -0,0 +1,18 @@
---
# file: tasks/config.yml
- name: config - add docker storage setup
lineinfile: dest="{{docker_init_config_directory}}/{{docker_package}}-storage-setup" state="present" line="STORAGE_DRIVER=\"\""
when: docker_package|length > 0 and ansible_service_mgr == "systemd" and ansible_os_family|lower == "redhat"
become: yes
# - name: config - disable docker iptables setup
# lineinfile: dest="/lib/systemd/system/docker.service" state="present" regex="^ExecStart=" line="ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --iptables=false"
# notify: restart docker
# when: docker_package|length > 0 and ansible_service_mgr == "systemd"
# become: yes
- name: config - setup docker mtu on Openstack VMs
lineinfile: dest="{{docker_init_config_directory}}/{{docker_package}}" state="present" backrefs=true regexp='^{{docker_opts}}=(?:\'|\")?((?:\s*[\w=\/\-\.](?<!--mtu=1450)\s*)*)(?:\'|\")?$' line='{{docker_opts}}="\1 --mtu=1450"'
when: docker_package|length > 0 and ansible_product_name == "OpenStack Nova"
become: yes

View File

@ -0,0 +1,23 @@
---
# file: tasks/files.yml
- name: files - copy files
with_items:
- /usr/local/bin/docker-build
- /usr/local/bin/docker-cleanup
- /usr/local/bin/docker-cleanup-images
- /usr/local/bin/docker-cleanup-volumes
- /usr/local/bin/docker-log-cleanup
- /usr/local/bin/docker-log-truncate
- /usr/local/bin/docker-run
- /usr/local/bin/docker-get-image
- /usr/local/bin/docker-get-status
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0755
become: yes
- name: files - copy sysctl configuration files
with_items:
- /etc/sysctl.d/docker.conf
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0644
become: yes

View File

@ -0,0 +1,12 @@
---
# file: tasks/group.yml
- name: group - create docker group
group: name="docker" state="present" system="yes"
become: yes
when: ansible_os_family|lower != "alpine"
- name: group - add me to the docker group
user: name="{{ansible_user_id}}" groups=docker append=yes
become: yes
when: ansible_os_family|lower != "alpine" and ansible_user_id != "root"

View File

@ -0,0 +1,30 @@
---
# file: tasks/main.yml
- import_tasks: vars.yml
tags:
- vars
- import_tasks: check.yml
tags:
- check
- import_tasks: files.yml
tags:
- files
- import_tasks: package.yml
tags:
- package
- import_tasks: config.yml
tags:
- config
- import_tasks: service.yml
tags:
- service
- import_tasks: group.yml
tags:
- group
- import_tasks: build.yml
tags:
- build
- import_tasks: run.yml
tags:
- run

View File

@ -0,0 +1,21 @@
---
# file: tasks/package.yml
- name: package - packages pre installation
with_items: "{{docker_packages|default([])}}"
package: name="{{item.name}}" state="{{item.state}}"
become: yes
- name: package - add docker GPG key
apt_key: url=https://download.docker.com/linux/debian/gpg
when: ansible_os_family|lower == "debian"
- name: add docker APT repository
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/{{ansible_distribution|lower}} {{ansible_distribution_release}} stable
when: ansible_os_family|lower == "debian"
- name: package - add docker package
package: name="{{docker_package}}" state=present
when: docker_package|length > 0
become: yes

View File

@ -0,0 +1,53 @@
---
# file: tasks/run.yml
- name: run - Get current docker status
with_items: "{{dockers|default([])}}"
command: "/usr/local/bin/docker-get-status {{item}}"
register: docker_current_status_command
- name: run - Register docker_current_status
with_items: "{{docker_current_status_command.results}}"
set_fact:
docker_current_status: "{{docker_current_status |default({}) |combine( {item.item: item.stdout} ) }}"
- name: run - Debug docker_current_status
with_items: "{{dockers|default([])}}"
debug: msg="{{docker_current_status[item]}}"
when: docker_debug|default(false)
- name: run - Get current docker image
with_items: "{{dockers|default([])}}"
command: "/usr/local/bin/docker-get-image {{item}}"
register: docker_current_image_command
- name: run - Register docker_current_image
with_items: "{{docker_current_image_command.results}}"
set_fact:
docker_current_image: "{{docker_current_image |default({}) |combine( {item.item: item.stdout} ) }}"
- name: run - Debug docker_current_image
with_items: "{{dockers|default([])}}"
debug: msg="{{docker_current_image[item]}}"
when: docker_debug|default(false)
- name: run - Stop current docker
with_items: "{{dockers|default([])}}"
command: "docker stop {{item}}"
when: ( docker_restart|default(false) and "{{docker_current_image[item]}}" != "{{docker_build_image[item]}}" or docker_force_restart|default(false) ) and "{{docker_current_status[item]}}" == "{{item}} running"
- name: run - Remove current docker
with_items: "{{dockers|default([])}}"
command: "docker rm {{item}}"
when: ( docker_restart|default(false) and "{{docker_current_image[item]}}" != "{{docker_build_image[item]}}" or docker_force_restart|default(false) ) and "{{docker_current_status[item]}}" != ""
- name: run - Run docker image
with_items: "{{dockers|default([])}}"
command: "/usr/local/bin/docker-run -q -c {{docker_cluster|default('\"\"')}} {{item}}"
when: docker_start|default(true) and "{{docker_current_image[item]}}" != "{{docker_build_image[item]}}" or docker_force_restart|default(false)
- name: run - Start docker
with_items: "{{dockers|default([])}}"
command: "docker start {{item}}"
when: docker_start|default(true) and "{{docker_current_image[item]}}" == "{{docker_build_image[item]}}" and "{{docker_current_status[item]}}" != "{{item}} running"

View File

@ -0,0 +1,32 @@
---
# file: tasks/service.yml
- name: service - start docker daemon
with_items: "{{docker_services|default([])}}"
service:
name: "{{item}}"
state: started
enabled: yes
become: yes
when: ansible_service_mgr|lower != "openrc" and ansible_os_family|lower != "alpine"
- name: service - force openrc status
shell: "kill -0 $(cat /run/{{item}}.pid) && [ ! -h /run/openrc/started/{{item}} ] && ln -s /etc/init.d/{{item}} /run/openrc/started/{{item}} && service {{item}} restart ||:"
with_items: "{{docker_services|default([])}}"
become: yes
when: ansible_service_mgr|lower == "openrc" or ansible_os_family|lower == "alpine"
- name: service - start docker daemon
with_items: "{{docker_services|default([])}}"
service:
name: "{{item}}"
state: started
enabled: yes
runlevel: boot
become: yes
when: ansible_service_mgr|lower == "openrc" or ansible_os_family|lower == "alpine"
- name: service - force docker restart
shell: "[ ! -d /var/lib/docker/tmp ] && service docker restart ||:"
become: yes
when: ansible_service_mgr|lower == "openrc" or ansible_os_family|lower == "alpine"

View File

@ -0,0 +1,34 @@
---
# file: tasks/vars.yml
- name: vars - load per operating system variables
include_vars: "{{item}}"
with_first_found:
- paths:
- "vars/"
- files:
- "{{ansible_distribution|lower}}-{{ansible_distribution_version|lower}}-{{ansible_machine}}.yml" # centos-6.4-i386.yml ubuntu-16.04-x86_64.yml
- "{{ansible_distribution|lower}}-{{ansible_distribution_version|lower}}.yml" # centos-6.4.yml ubuntu-16.04.yml
- "{{ansible_distribution|lower}}-{{ansible_distribution_major_version|lower}}-{{ansible_machine}}.yml" # centos-6-i386.yml ubuntu-16-x86_64.yml
- "{{ansible_distribution|lower}}-{{ansible_distribution_major_version|lower}}.yml" # centos-6.yml ubuntu-16.yml
- "{{ansible_os_family|lower}}-{{ansible_distribution_version|lower}}-{{ansible_machine}}.yml" # redhat-6.4-i386.yml debian-8.5-x86_64.yml
- "{{ansible_os_family|lower}}-{{ansible_distribution_version|lower}}.yml" # redhat-6.4.yml debian-8.5.yml
- "{{ansible_os_family|lower}}-{{ansible_distribution_major_version|lower}}-{{ansible_machine}}.yml" # redhat-6-i386.yml debian-8-x86_64.yml
- "{{ansible_os_family|lower}}-{{ansible_distribution_major_version|lower}}.yml" # redhat-6.yml debian-8.yml
- "{{ansible_distribution|lower}}-{{ansible_machine}}.yml" # centos-i386.yml ubuntu-x86_64.yml
- "{{ansible_distribution|lower}}.yml" # centos.yml ubuntu.yml
- "{{ansible_os_family|lower}}-{{ansible_machine}}.yml" # redhat-i386.yml debian-x86_64.yml
- "{{ansible_os_family|lower}}.yml" # redhat.yml debian.yml
- "{{ansible_system|lower}}-{{ansible_machine}}.yml" # linux-i386.yml linux-x86_64.yml
- "{{ansible_system|lower}}.yml" # linux.yml
- "default.yml" # default.yml
skip: true
- name: vars - override with local variables
include_vars: "{{item}}"
with_first_found:
- paths:
- "vars/"
- files:
- "local.yml"
skip: true

View File

@ -0,0 +1,43 @@
---
# file: tests/goss.yml
- name: tests - create temporary directory
command: mktemp -d
register: tests_mktemp
- name: tests - register goss installation
environment:
PATH: "/usr/local/bin:{{ansible_env.PATH}}"
command: which goss
register: tests_goss_installed
- name: tests - register specific OS goss files
set_fact:
goss_file:
- "goss/main_{{ansible_distribution|lower}}-{{ansible_distribution_major_version|lower}}.yml" # main_centos-6.yml main_centos-7.yml
- "goss/main_{{ansible_distribution|lower}}.yml" # main_centos.yml main_ubuntu.yml
- "goss/main_{{ansible_os_family|lower}}.yml" # main_redhat.yml main_debian.yml
- "goss/main_{{ansible_system|lower}}.yml" # main_linux.yml
- "goss/main.yml" # main.yml
- name: tests - register goss file
set_fact:
tests_goss_file: "{{lookup('first_found', goss_file)}}"
- name: tests - copy test files
copy: src=goss/ dest="{{tests_mktemp.stdout}}"
- name: tests - launch tests
environment:
PATH: "/usr/local/bin:{{ansible_env.PATH}}"
goss: path="{{tests_mktemp.stdout}}/{{tests_goss_file|basename}}" format=rspecish
register: tests_goss_results
ignore_errors: true
become: yes
- name: tests - remove temporary directory
file: path="{{tests_mktemp.stdout}}" state=absent
- name: tests - failure message
fail: msg="{{tests_goss_results.msg}}"
when: tests_goss_results|failed

View File

@ -0,0 +1,4 @@
group:
docker:
exists: true

View File

@ -0,0 +1,4 @@
gossfile:
package.yml: {}
service.yml: {}
group.yml: {}

View File

@ -0,0 +1,4 @@
gossfile:
package_centos-6.yml: {}
service.yml: {}
group.yml: {}

View File

@ -0,0 +1,4 @@
gossfile:
package_centos-7.yml: {}
service.yml: {}
group.yml: {}

View File

@ -0,0 +1,4 @@
gossfile:
package_debian.yml: {}
service.yml: {}
group.yml: {}

View File

@ -0,0 +1,3 @@
package:
docker:
installed: true

View File

@ -0,0 +1,3 @@
package:
docker-io:
installed: true

View File

@ -0,0 +1,3 @@
package:
docker-latest:
installed: true

View File

@ -0,0 +1,3 @@
package:
docker-engine:
installed: true

View File

@ -0,0 +1,5 @@
service:
docker:
enabled: true
running: true

View File

@ -0,0 +1,118 @@
#!/usr/bin/env python
import os
from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: goss
author: Mathieu Corbin
short_description: Launch goss (https://github.com/aelsabbahy/goss) test
description:
- Launch goss test. Always changed = False if success.
options:
path:
required: true
description:
- Test file to validate. Must be on the remote machine.
format:
required: false
description:
- change the output goss format.
- Goss format list : goss v --format => [documentation json junit nagios rspecish tap].
- Default: rspecish
output_file:
required: false
description:
- save the result of the goss command in a file whose path is output_file
examples:
- name: test goss file
goss:
path: "/path/to/file.yml"
- name: test goss files
goss:
path: "{{ item }}"
format: json
output_file : /my/output/file-{{ item }}
with_items: "{{ goss_files }}"
'''
# launch goss validate command on the file
def check(module, test_file_path, output_format):
cmd = ""
if output_format is not None:
cmd = "goss -g {0} v --format {1}".format(test_file_path, output_format)
else:
cmd = "goss -g {0} v".format(test_file_path)
return module.run_command(cmd)
# write goss result to output_file_path
def output_file(output_file_path, out):
if output_file_path is not None:
with open(output_file_path, 'w') as output_file:
output_file.write(out)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, type='str'),
format=dict(required=False, type='str'),
output_file=dict(required=False, type='str'),
),
supports_check_mode=False
)
test_file_path = module.params['path'] # test file path
output_format = module.params['format'] # goss output format
output_file_path = module.params['output_file']
if test_file_path is None:
module.fail_json(msg="test file path is null")
test_file_path = os.path.expanduser(test_file_path)
# test if access to test file is ok
if not os.access(test_file_path, os.R_OK):
module.fail_json(msg="Test file %s not readable" % (test_file_path))
# test if test file is not a dir
if os.path.isdir(test_file_path):
module.fail_json(msg="Test file must be a file ! : %s" % (test_file_path))
(rc, out, err) = check(module, test_file_path, output_format)
if output_file_path is not None:
output_file_path = os.path.expanduser(output_file_path)
# check if output_file is a file
if output_file_path.endswith(os.sep):
module.fail_json(msg="output_file must be a file. Actually : %s "
% (output_file_path))
output_dirname = os.path.dirname(output_file_path)
# check if output directory exists
if not os.path.exists(output_dirname):
module.fail_json(msg="directory %s does not exists" % (output_dirname))
# check if writable
if not os.access(os.path.dirname(output_file_path), os.W_OK):
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(output_file_path)))
# write goss result on the output file
output_file(output_file_path, out)
if rc is not None and rc != 0:
error_msg = "err : {0} ; out : {1}".format(err, out)
module.fail_json(msg=error_msg)
result = {}
result['stdout'] = out
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,6 @@
---
# file: tests/main.yml
- include: goss.yml
tags:
- tests

View File

@ -0,0 +1,6 @@
---
# file: tests/playbook.yml
- hosts: '{{ target | default("all") }}'
tasks:
- import_tasks: main.yml

View File

@ -0,0 +1,11 @@
---
# file vars/centos-6.yml
docker_check_kernel: '2.6.32-431'
docker_opts: "other_args"
docker_package: docker-io
docker_packages: docker
- { "name": "docker", "state": "absent" }
- { "name": "epel-release", "state": "present" }
- { "name": "curl", "state": "present" }
- { "name": "device-mapper-libs", "state": "present" }

View File

@ -0,0 +1,13 @@
---
# file vars/centos-7.yml
docker_check_kernel: '3.10.0-327'
docker_package: docker-latest
docker_packages:
- { "name": "docker", "state": "absent" }
- { "name": "curl", "state": "present" }
- { "name": "device-mapper-libs", "state": "present" }
docker_services:
- docker-latest-storage-setup
- docker-latest

View File

@ -0,0 +1,15 @@
---
# file: vars/debian.yml
docker_check_kernel: '3.2'
docker_package: docker-ce
docker_packages:
- { "name": "apt-transport-https", "state": "present" }
- { "name": "ca-certificates", "state": "present" }
- { "name": "curl", "state": "present" }
- { "name": "gnupg2", "state": "present" }
- { "name": "software-properties-common", "state": "present" }
docker_init_config_directory: "/etc/default"
docker_opts: "DOCKER_OPTS"

1
ansible/roles/hosts/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
vars/local.yml

View File

@ -0,0 +1,12 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/1001Pharmacies/ansible-hosts
branch = master
commit = a495a6dbfae1f3c32f8e968c1ff2b3596ab42f27
parent = 85a259e1f4db43a63c58b4c8fe39b5d5e3b54053
method = merge
cmdver = 0.4.0

View File

@ -0,0 +1,4 @@
# Authors
* **Yann Autissier** - *Initial work* - [aya](https://github.com/aya)

View File

@ -0,0 +1,5 @@
# Changelog
## v1.0.0 (December 20, 2016)
* Initial release

View File

@ -0,0 +1,20 @@
MIT License
Copyright (c) 2016 Yann Autissier
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,110 @@
# Ansible role to customize servers
An ansible role to customize your servers after a fresh install
## Role Variables
* `hosts_ssh_users` - A list of github usernames. We will fetch ssh keys from their github account and add it to the authorized_keys of the ansible user.
``` yaml
# a list of github usernames to get public keys
hosts_ssh_users: []
```
* `hosts_enable_zram` - Activate zram swap devices. This option allows to create virtual swap devices compressed in RAM. It can increase hosts performances, specially on hosts without physical swap.
``` yaml
# Activate zram swap devices
hosts_enable_zram: false
```
* `hosts_enable_rc` - Run user specific functions on ssh connection. This allow a user to customize his session when connecting to a server, like attaching automaticaly a screen session for example.
``` yaml
# run user specific rc functions on ssh connection
hosts_enable_rc: false
```
* `hosts_rc_functions` - List of user specific functions to run on ssh connection. Here you can add any function to be called when you connect to the host. Default functions are available in the /etc/profile.d/rc_functions.sh file.
``` yaml
# list of rc functions to call at user connection
hosts_rc_functions:
# customize PS1 variable
- 01_custom_ps1
# customize PROMPT variable
# - 02_custom_prompt
# launch a ssh agent and load all private keys located in ~/.ssh
# - 03_ssh_agent
# create and/or attach a tmux session
# - 04_attach_tmux
# create and/or attach a screen session
- 05_attach_screen
```
* `hosts_rc_cleanup` - List of rc functions you do not want to run anymore. If you had previously activated a rc function in `hosts_rc_functions`, you can add it to `hosts_rc_cleanup` to disable it.
``` yaml
# list of rc functions to cleanup (remove files)
# hosts_rc_cleanup:
# - 03_ssh_agent
# - 04_attach_tmux
```
* `hosts_etc_bashrc` - The location of the /etc/bashrc file on the current distro
``` yaml
# location of /etc/bashrc
hosts_etc_bashrc: /etc/bashrc
```
* `hosts_packages` - A list of packages to install on your servers. This list should be overrided for a specific distro.
``` yaml
# packages specific to a distribution
hosts_packages: []
```
* `hosts_packages_common` - A common list of packages to install on your servers. This list should be common to all distros.
``` yaml
# packages common to all distributions
hosts_packages_common:
- { "name": "bash", "state": "present" }
- { "name": "ca-certificates", "state": "present" }
- { "name": "rsync", "state": "present" }
- { "name": "screen", "state": "present" }
- { "name": "tzdata", "state": "present" }
```
## Example
To launch this role on your `hosts` servers, run the default playbook.
``` bash
$ ansible-playbook playbook.yml
```
It will install the following packages : bash, ca-certificates, rsync, screen, tzdata and vim (plus libselinux-python on redhat).
## Common configurations
This example configuration will add the [ssh keys from aya's github user](https://github.com/aya.keys) to your remote ~/.ssh/authorized_keys.
It will create a ~/.rc.d and touch 01_custom_ps1 and 02_attach_screen files into this directory, resulting in a customized PS1 and automaticaly attaching a screen on (re)connection on the remote server.
``` yaml
hosts_ssh_users:
- aya
hosts_enable_rc: true
hosts_rc_functions:
- 01_custom_ps1
- 02_attach_screen
```
## Tests
To test this role on your `hosts` servers, run the tests/playbook.yml playbook.
``` bash
$ ansible-playbook tests/playbook.yml
```

View File

@ -0,0 +1,142 @@
---
# file: defaults/main.yml
# enable cloud-init
hosts_enable_cloudinit: false
# enable rc.local script
hosts_enable_local: false
# run user specific rc functions on ssh connection
hosts_enable_rc: false
# Activate zram swap devices on host
hosts_enable_zram: false
# git repositories to clone
hosts_git_repositories: []
# - { "repo": "ssh://git@github.com/aya/infra", "dest": "/src" }
# list of rc functions to call at user connection
hosts_rc_functions:
# customize PS1 variable
- 01_custom_ps1
# customize PROMPT variable
- 02_custom_prompt
# launch a ssh agent and load all private keys located in ~/.ssh
- 03_ssh_agent
# create and/or attach a tmux session
# - 04_attach_tmux
# create and/or attach a screen session
- 05_attach_screen
# display system information
- 06_pfetch
# list of rc functions to cleanup (remove files)
# hosts_rc_cleanup:
# - 03_ssh_agent
# - 04_attach_tmux
# packages to install
hosts_packages: []
# packages specific to a distribution
hosts_packages_distro: []
# packages common to all distributions
hosts_packages_common:
- { "name": "bash", "state": "present" }
- { "name": "ca-certificates", "state": "present" }
- { "name": "rsync", "state": "present" }
- { "name": "screen", "state": "present" }
- { "name": "tzdata", "state": "present" }
# a list of SSH private keys to copy
hosts_ssh_private_keys: []
# - ~/.ssh/id_rsa
# a list of public hosts keys to add to known_hosts
hosts_ssh_public_hosts_keys:
- { "name": "github.com", "key": "files/etc/ssh/github.com.pub" }
# a list of github usernames to get public keys
hosts_ssh_users: []
# - aya
# a list of environment variables to write to user ~/.env
hosts_user_env: []
# - SHELL
hosts_cloudinit_config:
users:
- default
disable_root: true
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
resize_rootfs_tmp: /dev
ssh_pwauth: 0
preserve_hostname: false
datasource_list:
- Ec2
datasource:
Ec2:
metadata_urls:
- 'http://169.254.169.254'
timeout: 5
max_wait: 10
cloud_init_modules:
- migrator
- seed_random
- bootcmd
- write-files
- growpart
- resizefs
- disk_setup
- mounts
- set_hostname
- update_hostname
- update_etc_hosts
- resolv_conf
- ca-certs
- rsyslog
- users-groups
- ssh
cloud_config_modules:
- ssh-import-id
- locale
- set-passwords
- apk-configure
- ntp
- timezone
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- package-update-upgrade-install
- puppet
- chef
- mcollective
- salt-minion
- rightscale_userdata
- scripts-vendor
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
- power-state-change
system_info:
distro: alpine
default_user:
name: alpine
lock_passwd: True
gecos: alpine Cloud User
groups: [adm, sudo]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/ash
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
ssh_svcname: sshd

View File

@ -0,0 +1,105 @@
#!/bin/bash
### BEGIN INIT INFO
# Provides: zram
# Required-Start:
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Increased Performance In Linux With zRam (Virtual Swap Compressed in RAM)
# Description: Adapted from systemd scripts at https://github.com/mystilleef/FedoraZram
### END INIT INFO
# percent of RAM assigned to zRam swap devices
RATIO=33
# priority of zRam swap devices
PRIORITY=1024
# load system specific configurations
[ -r /etc/default/zram ] && . /etc/default/zram
[ -r /etc/sysconfig/zram ] && . /etc/sysconfig/zram
start() {
# get number of CPUs
num_cpus=$(grep -c ^processor /proc/cpuinfo 2>/dev/null)
# if something goes wrong, assume we have 1
[ $? -eq 0 ] && [ "${num_cpus:-0}" != 0 ] || num_cpus=1
# load kernel module
if /sbin/modinfo zram 2>/dev/null | grep -q ' zram_num_devices:' 2>/dev/null; then
/sbin/modprobe zram zram_num_devices=$num_cpus
elif /sbin/modinfo zram 2>/dev/null | grep -q ' num_devices:' 2>/dev/null; then
/sbin/modprobe zram num_devices=$num_cpus
else
/bin/echo "Unable to load zram kernel module." && exit 1
fi
# get amount of memory
mem_total_kb=$(awk '$1 == "MemTotal:" {print $2}' /proc/meminfo)
# assign RATIO% of system memory to zram
mem_total_zram=$((mem_total_kb * ${RATIO:-33} / 100 * 1024))
# create one zram swap device per cpu
for i in $(seq 0 $((num_cpus - 1))); do
# enable lz4 if supported
/bin/grep -q lz4 /sys/block/zram$i/comp_algorithm 2>/dev/null && /bin/echo lz4 > /sys/block/zram$i/comp_algorithm 2>/dev/null
# initialize the device
/bin/echo $((mem_total_zram / num_cpus)) > /sys/block/zram$i/disksize 2>/dev/null
# create a swap filesystem
/sbin/mkswap /dev/zram$i >/dev/null 2>&1
# activate zram swap device
/bin/echo -n "Adding swap device /dev/zram$i... "
/sbin/swapon -p ${PRIORITY:-1024} /dev/zram$i >/dev/null 2>&1
result=$? && [ ${result} -eq 0 ] && /bin/echo "OK" || /bin/echo "ERROR"
done
[ ${result:-0} -ge ${return:-0} ] && return=${result}
}
stop() {
# remove swap devices
for dev in $(awk '$1 ~ /^\/dev\/zram/ {print $1}' /proc/swaps); do
/bin/echo -n "Removing swap device $dev... "
/sbin/swapoff $dev >/dev/null 2>&1
result=$? && [ ${result} -eq 0 ] && /bin/echo "OK" || /bin/echo "ERROR"
done
[ ${result:-0} -ge ${return:-0} ] && return=${result}
# remove zram kernel module
if grep -q "^zram " /proc/modules; then
sleep 1
/sbin/rmmod zram
fi
}
status() {
for block in /sys/block/zram*; do
[ -d "$block" ] && /bin/echo -n "/dev/${block/*\/}: " || continue
[ $(<$block/compr_data_size) -gt 0 ] \
&& compr_ratio=$(awk "BEGIN { printf \"%.2f\", "$(<$block/orig_data_size)/$(<$block/compr_data_size)" }") \
|| compr_ratio=0
[ -r $block/stat ] && [ -r $block/mm_stat ] \
&& /usr/bin/awk 'NF==11 {printf("read: %8d, write: %8d, wait: %8d", $1, $5, $11)}' $block/stat && /bin/echo -n ", " \
&& /usr/bin/awk 'NF==7 {printf("orig_data_size: %12d, compr_data_size: %12d", $1, $2)}' $block/mm_stat && /bin/echo -n ", " \
&& /bin/echo "compr_ratio: $compr_ratio" \
|| /bin/echo "read: $(<$block/num_reads), write: $(<$block/num_writes), orig_data_size: $(<$block/orig_data_size), compr_data_size: $(<$block/compr_data_size), compr_ratio: $compr_ratio"
done
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
status)
status
;;
*)
/bin/echo "Usage: $0 {start|stop|restart|status}"
esac
exit ${return:-1}

View File

@ -0,0 +1,18 @@
#!/sbin/openrc-run
depend() {
need localmount swap
after bootmisc modules
}
start() {
/etc/init.d/zram start
}
stop() {
/etc/init.d/zram stop
}
status() {
/etc/init.d/zram status
}

View File

@ -0,0 +1,6 @@
UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED
You must have explicit, authorized permission to access or configure this device.
Unauthorized attempts and actions to access or use this system may result in civil and/or criminal penalties.
All activities performed on this device are logged and monitored.

View File

@ -0,0 +1,2 @@
#!/bin/sh
cd /root && ( make ansible-pull > /var/log/ansible.log || reboot ) &

View File

@ -0,0 +1,39 @@
# shellcheck shell=sh
# test current shell flags
case $- in
# if we are in an interactive shell
*i*)
# load user stuff from files ~/.rc.d/*
for file in "${HOME}"/.rc.d/*; do
# read files only
if [ -f "${file}" ]; then
func_name=$(basename "${file}")
func_args=$(cat "${file}")
# at this stage, func_name can start with numbers to allow ordering function calls with file names starting with numbers
# func_name must start with a letter, remove all other characters at the beginning of func_name until a letter is found
while [ "${func_name}" != "" ] && [ "${func_name#[a-z]}" = "${func_name}" ]; do
# remove first char of func_name
func_name="${func_name#?}"
done
# call user function with args passed from the content of the file
command -v "${func_name}" >/dev/null 2>&1 && "${func_name}" "${func_args}"
fi
done
# load user stuff from env vars RC_*
IFS="$(printf '%b_' '\n')"; IFS="${IFS%_}"; for line in $(printenv 2>/dev/null |awk '$0 ~ /^RC_[1-9A-Z_]*=/'); do
func_name=$(printf '%s\n' "${line%%=*}" |awk '{print tolower(substr($0,4))}')
eval func_args=\$"${line%%=*}"
[ "${func_args}" = "false" ] && continue
[ "${func_args}" = "true" ] && unset func_args
# at this stage, func_name can start with numbers to allow ordering function calls with file names starting with numbers
# func_name must start with a letter, remove all other characters at the beginning of func_name until a letter is found
while [ "${func_name}" != "" ] && [ "${func_name#[a-z]}" = "${func_name}" ]; do
# remove first char of func_name
func_name="${func_name#?}"
done
# call user function with args passed from the value of the env var
command -v "${func_name}" >/dev/null 2>&1 && "${func_name}" "${func_args}"
done
unset IFS
;;
esac

View File

@ -0,0 +1,218 @@
# shellcheck shell=sh
## force() runs a command sine die
force() {
if [ $# -gt 0 ]; then
while true; do
"$@"
sleep 1
done
fi
}
## force8() runs a command sine die if not already running
force8() {
if [ $# -gt 0 ]; then
while true; do
# awk expression to match $@
[ "$(ps wwx -o args 2>/dev/null |awk -v field="${PS_X_FIELD:-1}" '
BEGIN {nargs=split("'"$*"'",args)}
# if match first field
$field == args[1] {
matched=1;
# match following fields
for (i=1;i<=NF-field;i++) {
if ($(i+field) == args[i+1]) {matched++}
}
# all fields matched
if (matched == nargs) {found++}
}
END {print found+0}'
)" = 0 ] && "$@"
sleep 1
done
fi
}
## load_average() prints the current load average
load_average() {
awk '{printf "%.1f\n" $1}' /proc/loadavg 2>/dev/null \
|| uptime 2>/dev/null |awk '{printf "%.1f\n", $(NF-2)}'
}
## process_count() prints number of "processes"/"running processes"/"D-state"
process_count() {
ps ax -o stat 2>/dev/null |awk '
$1 ~ /R/ {process_running++};
$1 ~ /D/ {process_dstate++};
END {
print NR-1"/"process_running+0"/"process_dstate+0;
}'
}
## prompt_set() exports custom PROMPT_COMMAND
prompt_set() {
case "${TERM}" in
screen*)
ESCAPE_CODE_DCS="\033k"
ESCAPE_CODE_ST="\033\\"
;;
linux*|xterm*|rxvt*)
ESCAPE_CODE_DCS="\033]0;"
ESCAPE_CODE_ST="\007"
;;
*)
;;
esac
# in a screen
if [ -n "${STY}" ]; then
export PROMPT_COMMAND='printf "${ESCAPE_CODE_DCS:-\033]0;}%s${ESCAPE_CODE_ST:-\007}" "${PWD##*/}"'
else
export PROMPT_COMMAND='printf "${ESCAPE_CODE_DCS:-\033]0;}%s@%s:%s${ESCAPE_CODE_ST:-\007}" "${USER}" "${HOSTNAME%%.*}" "${PWD##*/}"'
fi
unset ESCAPE_CODE_DCS ESCAPE_CODE_ST
}
## ps1_set() exports custom PS1
ps1_set() {
case "$0" in
*sh)
COLOR_DGRAY="\[\033[1;30m\]"
COLOR_RED="\[\033[01;31m\]"
COLOR_GREEN="\[\033[01;32m\]"
COLOR_BROWN="\[\033[0;33m\]"
COLOR_YELLOW="\[\033[01;33m\]"
COLOR_BLUE="\[\033[01;34m\]"
COLOR_CYAN="\[\033[0;36m\]"
COLOR_GRAY="\[\033[0;37m\]"
COLOR_NC="\[\033[0m\]"
;;
*)
;;
esac
PS1_COUNT="${COLOR_DGRAY}[${COLOR_BLUE}\$(process_count 2>/dev/null)${COLOR_DGRAY}|${COLOR_BLUE}\$(user_count 2>/dev/null)${COLOR_DGRAY}|${COLOR_BLUE}\$(load_average 2>/dev/null)${COLOR_DGRAY}]${COLOR_NC}"
PS1_END="${COLOR_DGRAY}\$(if [ \"\$(id -u)\" = 0 ]; then printf \"#\"; else printf \"\$\"; fi)${COLOR_NC}"
if type __git_ps1 >/dev/null 2>&1; then
PS1_GIT="\$(__git_ps1 2>/dev/null \" (%s)\")"
else
PS1_GIT="\$(BRANCH=\$(git rev-parse --abbrev-ref HEAD 2>/dev/null); [ -n \"\${BRANCH}\" ] && printf \" (\${BRANCH})\")"
fi
PS1_GIT="${COLOR_CYAN}${PS1_GIT}${COLOR_NC}"
PS1_HOSTNAME_COLOR="\`case \"\${ENV}\" in [Pp][Rr][0Oo][Dd]*) printf \"${COLOR_RED}\";; *) if [ -n \"\${ENV}\" ]; then printf \"${COLOR_YELLOW}\"; else printf \"${COLOR_GREEN}\"; fi;; esac\`"
PS1_HOSTNAME="${PS1_HOSTNAME_COLOR}\$(hostname |sed 's/\..*//')${COLOR_NC}"
PS1_USER_COLOR="\$(if [ \"\$(id -u)\" = 0 ]; then printf \"${COLOR_RED}\"; else printf \"${COLOR_BROWN}\"; fi)"
PS1_USER="${PS1_USER_COLOR}\$(id -nu)${COLOR_NC}"
PS1_WORKDIR="${COLOR_GRAY}\$(pwd |sed 's|^'\${HOME}'\(/.*\)*$|~\1|')${COLOR_NC}"
export PS1="${PS1_COUNT}${PS1_USER}${COLOR_DGRAY}@${PS1_HOSTNAME}${COLOR_DGRAY}:${PS1_WORKDIR}${PS1_GIT}${PS1_END} "
unset PS1_COUNT PS1_END PS1_GIT PS1_HOSTNAME PS1_HOSTNAME_COLOR PS1_USER PS1_USER_COLOR PS1_WORKDIR
}
## screen_attach() attaches existing screen session or creates a new one
screen_attach() {
command -v screen >/dev/null 2>&1 || return
SCREEN_SESSION="$(id -nu)@$(hostname |sed 's/\..*//')"
if [ -z "${STY}" ]; then
# attach screen in tmux window 0 only ;)
[ -n "${TMUX}" ] && [ "$(tmux list-window 2>/dev/null |awk '$NF == "(active)" {print $1}' |sed 's/:$//')" != "0" ] && return
printf 'Attaching screen.' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1
exec screen -xRR -S "${SCREEN_SESSION}"
fi
unset SCREEN_SESSION
}
## screen_detach() detaches current screen session
screen_detach() {
screen -d
}
## ssh_add() loads all private keys in ~/.ssh/ to ssh agent
ssh_add() {
command -v ssh-agent >/dev/null 2>&1 && command -v ssh-add >/dev/null 2>&1 || return
SSH_AGENT_DIR="/tmp/ssh-$(id -u)"
SSH_AGENT_SOCK="${SSH_AGENT_DIR}/agent@$(hostname |sed 's/\..*//')"
# launch a new agent
if [ -z "${SSH_AUTH_SOCK}" ]; then
[ ! -d "${SSH_AGENT_DIR}" ] && mkdir -p "${SSH_AGENT_DIR}" 2>/dev/null && chmod 0700 "${SSH_AGENT_DIR}"
# search for an already running agent
if ps wwx -o args |awk '$1 ~ "ssh-agent$" && $3 == "'"${SSH_AGENT_SOCK}"'"' |wc -l |grep -q 0; then
rm -f "${SSH_AGENT_SOCK}"
ssh-agent -a "${SSH_AGENT_SOCK}" >/dev/null 2>&1
fi
fi
# attach to agent
export SSH_AUTH_SOCK="${SSH_AUTH_SOCK:-${SSH_AGENT_SOCK}}"
# list private keys to add
# shellcheck disable=SC2068
for dir in ${@:-${HOME}/.ssh}; do
if [ "${SSH_ADD_RECURSIVE:-}" = true ]; then
GREP_RECURSIVE_FLAG="r"
else
GREP_RECURSIVE_CHAR="*"
fi
SSH_PRIVATE_KEYS="${SSH_PRIVATE_KEYS:-} ${dir}/id_rsa $(grep -l${GREP_RECURSIVE_FLAG:-} 'PRIVATE KEY' "${dir}"/"${GREP_RECURSIVE_CHAR:-}" 2>/dev/null |grep -vw "${dir}"/id_rsa)"
done
# shellcheck disable=SC2086
printf '%s\n' ${SSH_PRIVATE_KEYS} |while read -r file; do
[ -r "${file}" ] || continue
# add private key to agent
ssh-add -l |grep -q "$(ssh-keygen -lf "${file}" 2>/dev/null |awk '{print $2}')" 2>/dev/null || ssh-add "${file}"
done
unset GREP_RECURSIVE_CHAR GREP_RECURSIVE_FLAG SSH_AGENT_DIR SSH_AGENT_SOCK SSH_PRIVATE_KEYS
}
## ssh_del() removes all private keys in ~/.ssh/ from ssh agent
ssh_del() {
command -v ssh-add >/dev/null 2>&1 || return
# attach to agent
if [ -z "${SSH_AUTH_SOCK}" ]; then
return
fi
# list private keys to del
# shellcheck disable=SC2068
for dir in ${@:-${HOME}/.ssh}; do
if [ "${SSH_DEL_RECURSIVE:-}" = true ]; then
GREP_RECURSIVE_FLAG="r"
else
GREP_RECURSIVE_CHAR="*"
fi
SSH_PRIVATE_KEYS="${SSH_PRIVATE_KEYS:-} ${dir}/id_rsa $(grep -l${GREP_RECURSIVE_FLAG:-} 'PRIVATE KEY' "${dir}"/"${GREP_RECURSIVE_CHAR:-}" 2>/dev/null |grep -vw "${dir}"/id_rsa)"
done
# shellcheck disable=SC2086
printf '%s\n' ${SSH_PRIVATE_KEYS} |while read -r file; do
[ -r "${file}" ] || continue
# remove private key from agent
ssh-add -l |grep -q "$(ssh-keygen -lf "${file}" 2>/dev/null |awk '{print $2}')" 2>/dev/null && ssh-add -d "${file}"
done
unset GREP_RECURSIVE_CHAR GREP_RECURSIVE_FLAG SSH_PRIVATE_KEYS
}
## tmux_attach() attaches existing tmux session or creates a new one
tmux_attach() {
command -v tmux >/dev/null 2>&1 || return
TMUX_SESSION="$(id -nu)@$(hostname |sed 's/\..*//')"
if [ -z "${TMUX}" ]; then
printf 'Attaching tmux.' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1
exec tmux -L"${TMUX_SESSION}" new-session -A -s"${TMUX_SESSION}"
fi
unset TMUX_SESSION
}
## tmux_detach() detaches current tmux session
tmux_detach() {
tmux detach
}
## user_count() prints number of "users sessions"/"users"/"logged users"
user_count() {
ps ax -o user,tty 2>/dev/null |awk '
$2 ~ /^(pts|tty)/ {users_session++; logged[$1]++;};
{count[$1]++;}
END {
for (uc in count) {c = c" "uc;}; users_count=split(c,v," ")-1;
for (ul in logged) {l = l" "ul;}; users_logged=split(l,v," ")-1;
print users_session+0"/"users_count"/"users_logged;
}'
}

View File

@ -0,0 +1 @@
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==

View File

@ -0,0 +1,12 @@
[Unit]
Description=Increased Performance In Linux With zRam (Virtual Swap Compressed in RAM)
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=-/etc/sysconfig/zram
ExecStart=/etc/init.d/zram start
ExecStop=/etc/init.d/zram stop
[Install]
WantedBy=sysinit.target

View File

@ -0,0 +1,11 @@
---
# file: handlers/main.yml
- name: update boot config
environment:
PATH: "{{ ansible_env.PATH }}:/usr/sbin:/sbin"
with_together:
- '{{ boot_config }}'
- '{{ boot_config_handler_notify.results }}'
command: "update-extlinux"
when: item.1.changed and item.0.dest == "/etc/update-extlinux.conf"

View File

@ -0,0 +1,46 @@
# Copyright (c) 2016 Centile
#
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
---
galaxy_info:
author: Yann Autissier
description: An ansible role to customize servers
company: Centile
license: MIT
platforms:
- name: EL
versions:
- all
- name: Fedora
versions:
- all
- name: Debian
versions:
- all
- name: Ubuntu
versions:
- all
- name: Alpine
versions:
- all
dependencies: []

View File

@ -0,0 +1,7 @@
---
# file: playbook.yml
- hosts: hosts
roles:
- .

View File

@ -0,0 +1,32 @@
---
# file: tasks/boot.yml
- name: boot - define config
set_fact:
boot_config:
# set clocksource at boot
- dest: /etc/update-extlinux.conf
line: 'default_kernel_opts="\1 clocksource=tsc tsc=reliable"'
regex: '^default_kernel_opts="((?!.*clocksource=tsc tsc=reliable).*)"$'
- name: boot - stat config file
changed_when: false
register: boot_config_stat
stat:
path: '{{item.dest}}'
with_items: '{{boot_config|default([])}}'
- name: boot - update config
become: yes
lineinfile:
backrefs: true
dest: '{{item.0.dest}}'
line: '{{item.0.line}}'
regex: '{{item.0.regex}}'
with_together:
- '{{boot_config|default([])}}'
- '{{boot_config_stat.results}}'
when: item.1.stat.exists
register: boot_config_handler_notify
notify:
- update boot config

View File

@ -0,0 +1,48 @@
---
# file: tasks/cloudinit.yml
- name: cloudinit - install cloud-init packages
package: name="cloud-init" state="present"
become: yes
when: hosts_enable_cloudinit|default(false) and ansible_os_family|lower != "alpine"
- name: cloudinit - install cloud-init packages
apk: name="{{item.name}}" state="{{item.state}}"
apk:
name: cloud-init
state: present
repository:
- http://dl-cdn.alpinelinux.org/alpine/edge/main
- http://dl-cdn.alpinelinux.org/alpine/edge/testing
- http://dl-cdn.alpinelinux.org/alpine/edge/community
- http://dl-cdn.alpinelinux.org/alpine/latest-stable/main
- http://dl-cdn.alpinelinux.org/alpine/latest-stable/community
with_items:
- { "name": "cloud-init", "state": "present" }
- { "name": "cloud-init-openrc", "state": "present" }
become: yes
when: hosts_enable_cloudinit|default(false) and ansible_os_family|lower == "alpine"
- name: cloudinit - update /etc/cloud/cloud.cfg
template:
src: etc/cloud/cloud.cfg.j2
dest: /etc/cloud/cloud.cfg
force: yes
when: hosts_enable_cloudinit|default(false)
- name: cloudinit - activate service
service:
name: cloud-init
state: started
enabled: yes
when: hosts_enable_cloudinit|default(false) and ansible_service_mgr|lower != "openrc"
become: yes
- name: cloudinit - activate service (openrc)
service:
name: cloud-init
state: started
enabled: yes
runlevel: boot
when: hosts_enable_cloudinit|default(false) and ansible_service_mgr|lower == "openrc"
become: yes

View File

@ -0,0 +1,37 @@
---
# file: tasks/files.yml
- name: files - copy files
with_items:
- /etc/issue.net
- /etc/profile.d/rc.sh
- /etc/profile.d/rc_functions.sh
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0644
become: yes
- name: files - copy binary files
with_items:
- /etc/init.d/zram
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0755
become: yes
- name: files - copy systemd files
with_items:
- /etc/systemd/system/zram.service
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0644
when: ansible_service_mgr|lower == "systemd"
become: yes
- name: files - copy openrc files
with_items:
- /etc/init.d/zram-openrc
- /etc/local.d/ansible.start
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0755
when: ansible_service_mgr|lower == "openrc"
become: yes
- name: files - get remote binary files
with_items:
- https://raw.githubusercontent.com/dylanaraps/pfetch/master/pfetch
get_url: url={{item}} dest=/usr/local/bin owner=root group=root mode=0755
become: yes

View File

@ -0,0 +1,10 @@
---
# file: tasks/git.yml
- name: git - clone repositories
with_items: "{{ hosts_git_repositories|default([]) }}"
git:
repo: "{{ item.repo }}"
dest: "{{ item.dest|default('/src') }}"
key_file: "{{ item.key_file|default('~/.ssh/id_rsa') }}"
version: "{{ item.version|default('HEAD') }}"

View File

@ -0,0 +1,30 @@
---
# file: tasks/main.yml
- import_tasks: vars.yml
tags:
- vars
- import_tasks: boot.yml
tags:
- boot
- import_tasks: cloudinit.yml
tags:
- cloudinit
- import_tasks: packages.yml
tags:
- packages
- import_tasks: ssh.yml
tags:
- ssh
- import_tasks: files.yml
tags:
- files
- import_tasks: git.yml
tags:
- git
- import_tasks: service.yml
tags:
- service
- import_tasks: user.yml
tags:
- user

Some files were not shown because too many files have changed in this diff Show More