diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..c3d7a01 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +.git* +build/cache/* +build/iso/* diff --git a/.env.dist b/.env.dist new file mode 100644 index 0000000..a7a04e1 --- /dev/null +++ b/.env.dist @@ -0,0 +1,11 @@ +APP=yaip +APP_DOMAIN=${ENV}.${DOMAIN} +APP_HOST=${APP}.${APP_DOMAIN} +APP_NAME=${APP} +APP_PATH=/${ENV_SUFFIX} +APP_SCHEME=http +APP_URI=${APP_HOST}${APP_PATH} +APP_URL=${APP_SCHEME}://${APP_URI} +DOMAIN=localhost +ENV=dist +SSH_DIR=${HOME}/.ssh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b1c9b8b --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/.env +/build diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..e9aa6a2 --- /dev/null +++ b/Makefile @@ -0,0 +1,16 @@ +APP_TYPE := infra +include make/include.mk + +## +# APP + +app-build: build-rm infra-base + $(call install-parameters,,curator,build) + $(call make,docker-compose-build) + $(call make,up) + $(call make,docker-compose-exec ARGS='rm -Rf /root/.npm /log-buffer/*' SERVICE=logagent) + $(call make,docker-commit) + +app-deploy: deploy-ping + +app-install: base node up diff --git a/ansible/.gitignore b/ansible/.gitignore new file mode 100644 index 0000000..b9751b5 --- /dev/null +++ b/ansible/.gitignore @@ -0,0 +1,2 @@ +playbook.retry +inventories/packer-provisioner-ansible* diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..aee8654 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,9 @@ +[defaults] +inventory = inventories +roles_path = roles +filter_plugins = plugins/filter +host_key_checking = False + +[ssh_connection] +scp_if_ssh = smart +pipelining =True diff --git a/ansible/ec2.py b/ansible/ec2.py new file mode 100755 index 0000000..4d6a3f2 --- /dev/null +++ b/ansible/ec2.py @@ -0,0 +1,1711 @@ +#!/usr/bin/env python3 + +''' +EC2 external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +AWS EC2 using the Boto library. + +NOTE: This script assumes Ansible is being executed where the environment +variables needed for Boto have already been set: + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +Optional region environment variable if region is 'auto' + +This script also assumes that there is an ec2.ini file alongside it. To specify a +different path to ec2.ini, define the EC2_INI_PATH environment variable: + + export EC2_INI_PATH=/path/to/my_ec2.ini + +If you're using eucalyptus you need to set the above variables and +you need to define: + + export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus + +If you're using boto profiles (requires boto>=2.24.0) you can choose a profile +using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using +the AWS_PROFILE variable: + + AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml + +For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html + +You can filter for specific EC2 instances by creating an environment variable +named EC2_INSTANCE_FILTERS, which has the same format as the instance_filters +entry documented in ec2.ini. For example, to find all hosts whose name begins +with 'webserver', one might use: + + export EC2_INSTANCE_FILTERS='tag:Name=webserver*' + +When run against a specific host, this script returns the following variables: + - ec2_ami_launch_index + - ec2_architecture + - ec2_association + - ec2_attachTime + - ec2_attachment + - ec2_attachmentId + - ec2_block_devices + - ec2_client_token + - ec2_deleteOnTermination + - ec2_description + - ec2_deviceIndex + - ec2_dns_name + - ec2_eventsSet + - ec2_group_name + - ec2_hypervisor + - ec2_id + - ec2_image_id + - ec2_instanceState + - ec2_instance_type + - ec2_ipOwnerId + - ec2_ip_address + - ec2_item + - ec2_kernel + - ec2_key_name + - ec2_launch_time + - ec2_monitored + - ec2_monitoring + - ec2_networkInterfaceId + - ec2_ownerId + - ec2_persistent + - ec2_placement + - ec2_platform + - ec2_previous_state + - ec2_private_dns_name + - ec2_private_ip_address + - ec2_publicIp + - ec2_public_dns_name + - ec2_ramdisk + - ec2_reason + - ec2_region + - ec2_requester_id + - ec2_root_device_name + - ec2_root_device_type + - ec2_security_group_ids + - ec2_security_group_names + - ec2_shutdown_state + - ec2_sourceDestCheck + - ec2_spot_instance_request_id + - ec2_state + - ec2_state_code + - ec2_state_reason + - ec2_status + - ec2_subnet_id + - ec2_tenancy + - ec2_virtualization_type + - ec2_vpc_id + +These variables are pulled out of a boto.ec2.instance object. There is a lack of +consistency with variable spellings (camelCase and underscores) since this +just loops through all variables the object exposes. It is preferred to use the +ones with underscores when multiple exist. + +In addition, if an instance has AWS tags associated with it, each tag is a new +variable named: + - ec2_tag_[Key] = [Value] + +Security groups are comma-separated in 'ec2_security_group_ids' and +'ec2_security_group_names'. + +When destination_format and destination_format_tags are specified +the destination_format can be built from the instance tags and attributes. +The behavior will first check the user defined tags, then proceed to +check instance attributes, and finally if neither are found 'nil' will +be used instead. + +'my_instance': { + 'region': 'us-east-1', # attribute + 'availability_zone': 'us-east-1a', # attribute + 'private_dns_name': '172.31.0.1', # attribute + 'ec2_tag_deployment': 'blue', # tag + 'ec2_tag_clusterid': 'ansible', # tag + 'ec2_tag_Name': 'webserver', # tag + ... +} + +Inside of the ec2.ini file the following settings are specified: +... +destination_format: {0}-{1}-{2}-{3} +destination_format_tags: Name,clusterid,deployment,private_dns_name +... + +These settings would produce a destination_format as the following: +'webserver-ansible-blue-172.31.0.1' +''' + +# (c) 2012, Peter Sankauskas +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import sys +import os +import argparse +import re +from time import time +from copy import deepcopy +import boto +from boto import ec2 +from boto import rds +from boto import elasticache +from boto import route53 +from boto import sts + +from ansible.module_utils import six +from ansible.module_utils import ec2 as ec2_utils +from ansible.module_utils.six.moves import configparser + +HAS_BOTO3 = False +try: + import boto3 # noqa + HAS_BOTO3 = True +except ImportError: + pass + +from collections import defaultdict + +import json + +DEFAULTS = { + 'all_elasticache_clusters': 'False', + 'all_elasticache_nodes': 'False', + 'all_elasticache_replication_groups': 'False', + 'all_instances': 'False', + 'all_rds_instances': 'False', + 'aws_access_key_id': '', + 'aws_secret_access_key': '', + 'aws_security_token': '', + 'boto_profile': '', + 'cache_max_age': '300', + 'cache_path': '~/.ansible/tmp', + 'destination_variable': 'public_dns_name', + 'elasticache': 'True', + 'eucalyptus': 'False', + 'eucalyptus_host': '', + 'expand_csv_tags': 'False', + 'group_by_ami_id': 'True', + 'group_by_availability_zone': 'True', + 'group_by_aws_account': 'False', + 'group_by_elasticache_cluster': 'True', + 'group_by_elasticache_engine': 'True', + 'group_by_elasticache_parameter_group': 'True', + 'group_by_elasticache_replication_group': 'True', + 'group_by_instance_id': 'True', + 'group_by_instance_state': 'False', + 'group_by_instance_type': 'True', + 'group_by_key_pair': 'True', + 'group_by_platform': 'True', + 'group_by_rds_engine': 'True', + 'group_by_rds_parameter_group': 'True', + 'group_by_region': 'True', + 'group_by_route53_names': 'True', + 'group_by_security_group': 'True', + 'group_by_tag_keys': 'True', + 'group_by_tag_none': 'True', + 'group_by_vpc_id': 'True', + 'hostname_variable': '', + 'iam_role': '', + 'include_rds_clusters': 'False', + 'nested_groups': 'False', + 'pattern_exclude': '', + 'pattern_include': '', + 'rds': 'False', + 'regions': 'all', + 'regions_exclude': 'us-gov-west-1, cn-north-1', + 'replace_dash_in_groups': 'True', + 'route53': 'False', + 'route53_excluded_zones': '', + 'route53_hostnames': '', + 'stack_filters': 'False', + 'vpc_destination_variable': 'ip_address' +} + + +class Ec2Inventory(object): + + def _empty_inventory(self): + return {"_meta": {"hostvars": {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + self.aws_account_id = None + + # Index of hostname (address) to instance ID + self.index = {} + + # Boto profile to use (if any) + self.boto_profile = None + + # AWS credentials. + self.credentials = {} + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.read_settings() + + # Make sure that profile_name is not passed at all if not set + # as pre 2.24 boto will fall over otherwise + if self.boto_profile: + if not hasattr(boto.ec2.EC2Connection, 'profile_name'): + self.fail_with_error("boto version must be >= 2.24 to use profile") + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + def read_settings(self): + ''' Reads the settings from the ec2.ini file ''' + + scriptbasename = __file__ + scriptbasename = os.path.basename(scriptbasename) + scriptbasename = scriptbasename.replace('.py', '') + + defaults = { + 'ec2': { + 'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'), + 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) + } + } + + if six.PY3: + config = configparser.ConfigParser(DEFAULTS) + else: + config = configparser.SafeConfigParser(DEFAULTS) + ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) + ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) + + if not os.path.isfile(ec2_ini_path): + ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback']) + + if os.path.isfile(ec2_ini_path): + config.read(ec2_ini_path) + + # Add empty sections if they don't exist + try: + config.add_section('ec2') + except configparser.DuplicateSectionError: + pass + + try: + config.add_section('credentials') + except configparser.DuplicateSectionError: + pass + + # is eucalyptus? + self.eucalyptus = config.getboolean('ec2', 'eucalyptus') + self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') + + # Regions + self.regions = [] + config_regions = config.get('ec2', 'regions') + if (config_regions == 'all'): + if self.eucalyptus_host: + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) + else: + config_regions_exclude = config.get('ec2', 'regions_exclude') + + for region_info in ec2.regions(): + if region_info.name not in config_regions_exclude: + self.regions.append(region_info.name) + else: + self.regions = config_regions.split(",") + if 'auto' in self.regions: + env_region = os.environ.get('AWS_REGION') + if env_region is None: + env_region = os.environ.get('AWS_DEFAULT_REGION') + self.regions = [env_region] + + # Destination addresses + self.destination_variable = config.get('ec2', 'destination_variable') + self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + self.hostname_variable = config.get('ec2', 'hostname_variable') + + if config.has_option('ec2', 'destination_format') and \ + config.has_option('ec2', 'destination_format_tags'): + self.destination_format = config.get('ec2', 'destination_format') + self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') + else: + self.destination_format = None + self.destination_format_tags = None + + # Route53 + self.route53_enabled = config.getboolean('ec2', 'route53') + self.route53_hostnames = config.get('ec2', 'route53_hostnames') + + self.route53_excluded_zones = [] + self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a] + + # Include RDS instances? + self.rds_enabled = config.getboolean('ec2', 'rds') + + # Include RDS cluster instances? + self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') + + # Include ElastiCache instances? + self.elasticache_enabled = config.getboolean('ec2', 'elasticache') + + # Return all EC2 instances? + self.all_instances = config.getboolean('ec2', 'all_instances') + + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + ec2_valid_instance_states = [ + 'pending', + 'running', + 'shutting-down', + 'terminated', + 'stopping', + 'stopped' + ] + self.ec2_instance_states = [] + if self.all_instances: + self.ec2_instance_states = ec2_valid_instance_states + elif config.has_option('ec2', 'instance_states'): + for instance_state in config.get('ec2', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in ec2_valid_instance_states: + continue + self.ec2_instance_states.append(instance_state) + else: + self.ec2_instance_states = ['running'] + + # Return all RDS instances? (if RDS is enabled) + self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') + + # Return all ElastiCache replication groups? (if ElastiCache is enabled) + self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') + + # Return all ElastiCache clusters? (if ElastiCache is enabled) + self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') + + # Return all ElastiCache nodes? (if ElastiCache is enabled) + self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') + + # boto configuration profile (prefer CLI argument then environment variables then config file) + self.boto_profile = self.args.boto_profile or \ + os.environ.get('AWS_PROFILE') or \ + config.get('ec2', 'boto_profile') + + # AWS credentials (prefer environment variables) + if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or + os.environ.get('AWS_PROFILE')): + + aws_access_key_id = config.get('credentials', 'aws_access_key_id') + aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') + aws_security_token = config.get('credentials', 'aws_security_token') + + if aws_access_key_id: + self.credentials = { + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key + } + if aws_security_token: + self.credentials['security_token'] = aws_security_token + + # Cache related + cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) + if self.boto_profile: + cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + cache_name = 'ansible-ec2' + cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) + if cache_id: + cache_name = '%s-%s' % (cache_name, cache_id) + cache_name += '-' + str(abs(hash(__file__)))[1:7] + self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) + self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) + self.cache_max_age = config.getint('ec2', 'cache_max_age') + + self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') + + # Configure nested groups instead of flat namespace. + self.nested_groups = config.getboolean('ec2', 'nested_groups') + + # Replace dash or not in group names + self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') + + # IAM role to assume for connection + self.iam_role = config.get('ec2', 'iam_role') + + # Configure which groups should be created. + + group_by_options = [a for a in DEFAULTS if a.startswith('group_by')] + for option in group_by_options: + setattr(self, option, config.getboolean('ec2', option)) + + # Do we need to just include hosts that match a pattern? + self.pattern_include = config.get('ec2', 'pattern_include') + if self.pattern_include: + self.pattern_include = re.compile(self.pattern_include) + + # Do we need to exclude hosts that match a pattern? + self.pattern_exclude = config.get('ec2', 'pattern_exclude') + if self.pattern_exclude: + self.pattern_exclude = re.compile(self.pattern_exclude) + + # Do we want to stack multiple filters? + self.stack_filters = config.getboolean('ec2', 'stack_filters') + + # Instance filters (see boto and EC2 API docs). Ignore invalid filters. + self.ec2_instance_filters = [] + + if config.has_option('ec2', 'instance_filters') or 'EC2_INSTANCE_FILTERS' in os.environ: + filters = os.getenv('EC2_INSTANCE_FILTERS', config.get('ec2', 'instance_filters') if config.has_option('ec2', 'instance_filters') else '') + + if self.stack_filters and '&' in filters: + self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n") + + filter_sets = [f for f in filters.split(',') if f] + + for filter_set in filter_sets: + filters = {} + filter_set = filter_set.strip() + for instance_filter in filter_set.split("&"): + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + filters[filter_key] = filter_value + self.ec2_instance_filters.append(filters.copy()) + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', + help='Use boto profile for connections to EC2') + self.args = parser.parse_args() + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + if self.route53_enabled: + self.get_route53_records() + + for region in self.regions: + self.get_instances_by_region(region) + if self.rds_enabled: + self.get_rds_instances_by_region(region) + if self.elasticache_enabled: + self.get_elasticache_clusters_by_region(region) + self.get_elasticache_replication_groups_by_region(region) + if self.include_rds_clusters: + self.include_rds_clusters_by_region(region) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def connect(self, region): + ''' create connection to api server''' + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) + conn.APIVersion = '2010-08-31' + else: + conn = self.connect_to_aws(ec2, region) + return conn + + def boto_fix_security_token_in_profile(self, connect_args): + ''' monkey patch for boto issue boto/boto#2100 ''' + profile = 'profile ' + self.boto_profile + if boto.config.has_option(profile, 'aws_security_token'): + connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') + return connect_args + + def connect_to_aws(self, module, region): + connect_args = deepcopy(self.credentials) + + # only pass the profile name if it's set (as it is not supported by older boto versions) + if self.boto_profile: + connect_args['profile_name'] = self.boto_profile + self.boto_fix_security_token_in_profile(connect_args) + elif os.environ.get('AWS_SESSION_TOKEN'): + connect_args['security_token'] = os.environ.get('AWS_SESSION_TOKEN') + + if self.iam_role: + sts_conn = sts.connect_to_region(region, **connect_args) + role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') + connect_args['aws_access_key_id'] = role.credentials.access_key + connect_args['aws_secret_access_key'] = role.credentials.secret_key + connect_args['security_token'] = role.credentials.session_token + + conn = module.connect_to_region(region, **connect_args) + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + return conn + + def get_instances_by_region(self, region): + ''' Makes an AWS EC2 API call to the list of instances in a particular + region ''' + + try: + conn = self.connect(region) + reservations = [] + if self.ec2_instance_filters: + if self.stack_filters: + filters_dict = {} + for filters in self.ec2_instance_filters: + filters_dict.update(filters) + reservations.extend(conn.get_all_instances(filters=filters_dict)) + else: + for filters in self.ec2_instance_filters: + reservations.extend(conn.get_all_instances(filters=filters)) + else: + reservations = conn.get_all_instances() + + # Pull the tags back in a second step + # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not + # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` + instance_ids = [] + for reservation in reservations: + instance_ids.extend([instance.id for instance in reservation.instances]) + + max_filter_value = 199 + tags = [] + for i in range(0, len(instance_ids), max_filter_value): + tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]})) + + tags_by_instance_id = defaultdict(dict) + for tag in tags: + tags_by_instance_id[tag.res_id][tag.name] = tag.value + + if (not self.aws_account_id) and reservations: + self.aws_account_id = reservations[0].owner_id + + for reservation in reservations: + for instance in reservation.instances: + instance.tags = tags_by_instance_id[instance.id] + self.add_instance(instance, region) + + except boto.exception.BotoServerError as e: + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + else: + backend = 'Eucalyptus' if self.eucalyptus else 'AWS' + error = "Error connecting to %s backend.\n%s" % (backend, e.message) + self.fail_with_error(error, 'getting EC2 instances') + + def tags_match_filters(self, tags): + ''' return True if given tags match configured filters ''' + if not self.ec2_instance_filters: + return True + + for filters in self.ec2_instance_filters: + for filter_name, filter_value in filters.items(): + if filter_name[:4] != 'tag:': + continue + filter_name = filter_name[4:] + if filter_name not in tags: + if self.stack_filters: + return False + continue + if isinstance(filter_value, list): + if self.stack_filters and tags[filter_name] not in filter_value: + return False + if not self.stack_filters and tags[filter_name] in filter_value: + return True + if isinstance(filter_value, six.string_types): + if self.stack_filters and tags[filter_name] != filter_value: + return False + if not self.stack_filters and tags[filter_name] == filter_value: + return True + + return self.stack_filters + + def get_rds_instances_by_region(self, region): + ''' Makes an AWS API call to the list of RDS instances in a particular + region ''' + + if not HAS_BOTO3: + self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again", + "getting RDS instances") + + client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) + db_instances = client.describe_db_instances() + + try: + conn = self.connect_to_aws(rds, region) + if conn: + marker = None + while True: + instances = conn.get_all_dbinstances(marker=marker) + marker = instances.marker + for index, instance in enumerate(instances): + # Add tags to instances. + instance.arn = db_instances['DBInstances'][index]['DBInstanceArn'] + tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList'] + instance.tags = {} + for tag in tags: + instance.tags[tag['Key']] = tag['Value'] + if self.tags_match_filters(instance.tags): + self.add_rds_instance(instance, region) + if not marker: + break + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + elif e.error_code == "OptInRequired": + error = "RDS hasn't been enabled for this account yet. " \ + "You must either log in to the RDS service through the AWS console to enable it, " \ + "or set 'rds = False' in ec2.ini" + elif not e.reason == "Forbidden": + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error, 'getting RDS instances') + + def include_rds_clusters_by_region(self, region): + if not HAS_BOTO3: + self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", + "getting RDS clusters") + + client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) + + marker, clusters = '', [] + while marker is not None: + resp = client.describe_db_clusters(Marker=marker) + clusters.extend(resp["DBClusters"]) + marker = resp.get('Marker', None) + + account_id = boto.connect_iam().get_user().arn.split(':')[4] + c_dict = {} + for c in clusters: + # remove these datetime objects as there is no serialisation to json + # currently in place and we don't need the data yet + if 'EarliestRestorableTime' in c: + del c['EarliestRestorableTime'] + if 'LatestRestorableTime' in c: + del c['LatestRestorableTime'] + + if not self.ec2_instance_filters: + matches_filter = True + else: + matches_filter = False + + try: + # arn:aws:rds:::: + tags = client.list_tags_for_resource( + ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) + c['Tags'] = tags['TagList'] + + if self.ec2_instance_filters: + for filters in self.ec2_instance_filters: + for filter_key, filter_values in filters.items(): + # get AWS tag key e.g. tag:env will be 'env' + tag_name = filter_key.split(":", 1)[1] + # Filter values is a list (if you put multiple values for the same tag name) + matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) + + if matches_filter: + # it matches a filter, so stop looking for further matches + break + + if matches_filter: + break + + except Exception as e: + if e.message.find('DBInstanceNotFound') >= 0: + # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. + # Ignore errors when trying to find tags for these + pass + + # ignore empty clusters caused by AWS bug + if len(c['DBClusterMembers']) == 0: + continue + elif matches_filter: + c_dict[c['DBClusterIdentifier']] = c + + self.inventory['db_clusters'] = c_dict + + def get_elasticache_clusters_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache clusters (with + nodes' info) in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_instances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + clusters = [] + try: + conn = self.connect_to_aws(elasticache, region) + if conn: + # show_cache_node_info = True + # because we also want nodes' information + _marker = 1 + while _marker: + if _marker == 1: + _marker = None + response = conn.describe_cache_clusters(None, None, _marker, True) + _marker = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['Marker'] + try: + # Boto also doesn't provide wrapper classes to CacheClusters or + # CacheNodes. Because of that we can't make use of the get_list + # method in the AWSQueryConnection. Let's do the work manually + clusters = clusters + response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + elif e.error_code == "OptInRequired": + error = "ElastiCache hasn't been enabled for this account yet. " \ + "You must either log in to the ElastiCache service through the AWS console to enable it, " \ + "or set 'elasticache = False' in ec2.ini" + elif not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + for cluster in clusters: + self.add_elasticache_cluster(cluster, region) + + def get_elasticache_replication_groups_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache replication groups + in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_instances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = self.connect_to_aws(elasticache, region) + if conn: + response = conn.describe_replication_groups() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to ReplicationGroups + # Because of that we can't make use of the get_list method in the + # AWSQueryConnection. Let's do the work manually + replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] + + except KeyError as e: + error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for replication_group in replication_groups: + self.add_elasticache_replication_group(replication_group, region) + + def get_auth_error_message(self): + ''' create an informative error message if there is an issue authenticating''' + errors = ["Authentication error retrieving ec2 inventory."] + if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: + errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') + else: + errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') + + boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] + boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))] + if len(boto_config_found) > 0: + errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) + else: + errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) + + return '\n'.join(errors) + + def fail_with_error(self, err_msg, err_operation=None): + '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def get_instance(self, region, instance_id): + conn = self.connect(region) + + reservations = conn.get_all_instances([instance_id]) + for reservation in reservations: + for instance in reservation.instances: + return instance + + def add_instance(self, instance, region): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only return instances with desired instance states + if instance.state not in self.ec2_instance_states: + return + + # Select the best destination address + # When destination_format and destination_format_tags are specified + # the following code will attempt to find the instance tags first, + # then the instance attributes next, and finally if neither are found + # assign nil for the desired destination format attribute. + if self.destination_format and self.destination_format_tags: + dest_vars = [] + inst_tags = getattr(instance, 'tags') + for tag in self.destination_format_tags: + if tag in inst_tags: + dest_vars.append(inst_tags[tag]) + elif hasattr(instance, tag): + dest_vars.append(getattr(instance, tag)) + else: + dest_vars.append('nil') + + dest = self.destination_format.format(*dest_vars) + elif instance.subnet_id: + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) + else: + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # set the hostname from route53 + if self.route53_enabled and self.route53_hostnames: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + if name.endswith(self.route53_hostnames): + hostname = name + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + # to_safe strips hostname characters like dots, so don't strip route53 hostnames + elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): + hostname = hostname.lower() + else: + hostname = self.to_safe(hostname).lower() + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(hostname): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(hostname): + return + + # Add to index + self.index[hostname] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [hostname] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.placement, hostname) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.placement) + self.push_group(self.inventory, 'zones', instance.placement) + + # Inventory: Group by Amazon Machine Image (AMI) ID + if self.group_by_ami_id: + ami_id = self.to_safe(instance.image_id) + self.push(self.inventory, ami_id, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'images', ami_id) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_type) + self.push(self.inventory, type_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by instance state + if self.group_by_instance_state: + state_name = self.to_safe('instance_state_' + instance.state) + self.push(self.inventory, state_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'instance_states', state_name) + + # Inventory: Group by platform + if self.group_by_platform: + if instance.platform: + platform = self.to_safe('platform_' + instance.platform) + else: + platform = self.to_safe('platform_undefined') + self.push(self.inventory, platform, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'platforms', platform) + + # Inventory: Group by key pair + if self.group_by_key_pair and instance.key_name: + key_name = self.to_safe('key_' + instance.key_name) + self.push(self.inventory, key_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'keys', key_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) + self.push(self.inventory, vpc_id_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + + # Inventory: Group by AWS account ID + if self.group_by_aws_account: + self.push(self.inventory, self.aws_account_id, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'accounts', self.aws_account_id) + + # Inventory: Group by tag keys + if self.group_by_tag_keys: + for k, v in instance.tags.items(): + if self.expand_csv_tags and v and ',' in v: + values = map(lambda x: x.strip(), v.split(',')) + else: + values = [v] + + for v in values: + if v: + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + if v: + self.push_group(self.inventory, self.to_safe("tag_" + k), key) + + # Inventory: Group by Route53 domain names if enabled + if self.route53_enabled and self.group_by_route53_names: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + self.push(self.inventory, name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'route53', name) + + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', hostname) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: tag all EC2 instances + self.push(self.inventory, 'ec2', hostname) + + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest + + def add_rds_instance(self, instance, region): + ''' Adds an RDS instance to the inventory and index, as long as it is + addressable ''' + + # Only want available instances unless all_rds_instances is True + if not self.all_rds_instances and instance.status != 'available': + return + + # Select the best destination address + dest = instance.endpoint[0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + + hostname = self.to_safe(hostname).lower() + + # Add to index + self.index[hostname] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [hostname] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.availability_zone, hostname) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.availability_zone) + self.push_group(self.inventory, 'zones', instance.availability_zone) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_class) + self.push(self.inventory, type_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + self.push(self.inventory, vpc_id_name, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + # Inventory: Group by tag keys + if self.group_by_tag_keys: + for k, v in instance.tags.items(): + if self.expand_csv_tags and v and ',' in v: + values = map(lambda x: x.strip(), v.split(',')) + else: + values = [v] + + for v in values: + if v: + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + if v: + self.push_group(self.inventory, self.to_safe("tag_" + k), key) + + # Inventory: Group by engine + if self.group_by_rds_engine: + self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) + if self.nested_groups: + self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) + + # Inventory: Group by parameter group + if self.group_by_rds_parameter_group: + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) + if self.nested_groups: + self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) + + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', hostname) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: all RDS instances + self.push(self.inventory, 'rds', hostname) + + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest + + def add_elasticache_cluster(self, cluster, region): + ''' Adds an ElastiCache cluster to the inventory and index, as long as + it's nodes are addressable ''' + + # Only want available clusters unless all_elasticache_clusters is True + if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': + return + + # Select the best destination address + if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: + # Memcached cluster + dest = cluster['ConfigurationEndpoint']['Address'] + is_redis = False + else: + # Redis sigle node cluster + # Because all Redis clusters are single nodes, we'll merge the + # info from the cluster with info about the node + dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + is_redis = True + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, cluster['CacheClusterId']] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[cluster['CacheClusterId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) + + # Inventory: Group by region + if self.group_by_region and not is_redis: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone and not is_redis: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type and not is_redis: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group and not is_redis: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine and not is_redis: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) + + # Inventory: Group by parameter group + if self.group_by_elasticache_parameter_group: + self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) + + host_info = self.get_host_info_dict_from_describe_dict(cluster) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + # Add the nodes + for node in cluster['CacheNodes']: + self.add_elasticache_node(node, cluster, region) + + def add_elasticache_node(self, node, cluster, region): + ''' Adds an ElastiCache node to the inventory and index, as long as + it is addressable ''' + + # Only want available nodes unless all_elasticache_nodes is True + if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': + return + + # Select the best destination address + dest = node['Endpoint']['Address'] + + if not dest: + # Skip nodes we cannot address (e.g. private VPC subnet) + return + + node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) + + # Add to index + self.index[dest] = [region, node_id] + + # Inventory: Group by node ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[node_id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', node_id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) + + # Inventory: Group by parameter group (done at cluster level) + + # Inventory: Group by replication group (done at cluster level) + + # Inventory: Group by ElastiCache Cluster + if self.group_by_elasticache_cluster: + self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) + + # Global Tag: all ElastiCache nodes + self.push(self.inventory, 'elasticache_nodes', dest) + + host_info = self.get_host_info_dict_from_describe_dict(node) + + if dest in self.inventory["_meta"]["hostvars"]: + self.inventory["_meta"]["hostvars"][dest].update(host_info) + else: + self.inventory["_meta"]["hostvars"][dest] = host_info + + def add_elasticache_replication_group(self, replication_group, region): + ''' Adds an ElastiCache replication group to the inventory and index ''' + + # Only want available clusters unless all_elasticache_replication_groups is True + if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': + return + + # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) + if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ + replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: + return + + # Select the best destination address (PrimaryEndpoint) + dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] + + # Add to index + self.index[dest] = [region, replication_group['ReplicationGroupId']] + + # Inventory: Group by ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[replication_group['ReplicationGroupId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone (doesn't apply to replication groups) + + # Inventory: Group by node type (doesn't apply to replication groups) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for replication groups + + # Inventory: Group by security group (doesn't apply to replication groups) + # Check this value in cluster level + + # Inventory: Group by engine (replication groups are always Redis) + if self.group_by_elasticache_engine: + self.push(self.inventory, 'elasticache_redis', dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', 'redis') + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) + + host_info = self.get_host_info_dict_from_describe_dict(replication_group) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + def get_route53_records(self): + ''' Get and store the map of resource records to domain names that + point to them. ''' + + if self.boto_profile: + r53_conn = route53.Route53Connection(profile_name=self.boto_profile) + else: + r53_conn = route53.Route53Connection() + all_zones = r53_conn.get_zones() + + route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones] + + self.route53_records = {} + + for zone in route53_zones: + rrsets = r53_conn.get_all_rrsets(zone.id) + + for record_set in rrsets: + record_name = record_set.name + + if record_name.endswith('.'): + record_name = record_name[:-1] + + for resource in record_set.resource_records: + self.route53_records.setdefault(resource, set()) + self.route53_records[resource].add(record_name) + + def get_instance_route53_names(self, instance): + ''' Check if an instance is referenced in the records we have from + Route53. If it is, return the list of domain names pointing to said + instance. If nothing points to it, return an empty list. ''' + + instance_attributes = ['public_dns_name', 'private_dns_name', + 'ip_address', 'private_ip_address'] + + name_list = set() + + for attrib in instance_attributes: + try: + value = getattr(instance, attrib) + except AttributeError: + continue + + if value in self.route53_records: + name_list.update(self.route53_records[value]) + + return list(name_list) + + def get_host_info_dict_from_instance(self, instance): + instance_vars = {} + for key in vars(instance): + value = getattr(instance, key) + key = self.to_safe('ec2_' + key) + + # Handle complex types + # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 + if key == 'ec2__state': + instance_vars['ec2_state'] = instance.state or '' + instance_vars['ec2_state_code'] = instance.state_code + elif key == 'ec2__previous_state': + instance_vars['ec2_previous_state'] = instance.previous_state or '' + instance_vars['ec2_previous_state_code'] = instance.previous_state_code + elif isinstance(value, (int, bool)): + instance_vars[key] = value + elif isinstance(value, six.string_types): + instance_vars[key] = value.strip() + elif value is None: + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2__placement': + instance_vars['ec2_placement'] = value.zone + elif key == 'ec2_tags': + for k, v in value.items(): + if self.expand_csv_tags and ',' in v: + v = list(map(lambda x: x.strip(), v.split(','))) + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + elif key == 'ec2_block_device_mapping': + instance_vars["ec2_block_devices"] = {} + for k, v in value.items(): + instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id + else: + pass + # TODO Product codes if someone finds them useful + # print key + # print type(value) + # print value + + instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id + + return instance_vars + + def get_host_info_dict_from_describe_dict(self, describe_dict): + ''' Parses the dictionary returned by the API call into a flat list + of parameters. This method should be used only when 'describe' is + used directly because Boto doesn't provide specific classes. ''' + + # I really don't agree with prefixing everything with 'ec2' + # because EC2, RDS and ElastiCache are different services. + # I'm just following the pattern used until now to not break any + # compatibility. + + host_info = {} + for key in describe_dict: + value = describe_dict[key] + key = self.to_safe('ec2_' + self.uncammelize(key)) + + # Handle complex types + + # Target: Memcached Cache Clusters + if key == 'ec2_configuration_endpoint' and value: + host_info['ec2_configuration_endpoint_address'] = value['Address'] + host_info['ec2_configuration_endpoint_port'] = value['Port'] + + # Target: Cache Nodes and Redis Cache Clusters (single node) + if key == 'ec2_endpoint' and value: + host_info['ec2_endpoint_address'] = value['Address'] + host_info['ec2_endpoint_port'] = value['Port'] + + # Target: Redis Replication Groups + if key == 'ec2_node_groups' and value: + host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] + host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + replica_count = 0 + for node in value[0]['NodeGroupMembers']: + if node['CurrentRole'] == 'primary': + host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] + host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] + host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] + elif node['CurrentRole'] == 'replica': + host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address'] + host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port'] + host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId'] + replica_count += 1 + + # Target: Redis Replication Groups + if key == 'ec2_member_clusters' and value: + host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) + + # Target: All Cache Clusters + elif key == 'ec2_cache_parameter_group': + host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) + host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] + host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + + # Target: Almost everything + elif key == 'ec2_security_groups': + + # Skip if SecurityGroups is None + # (it is possible to have the key defined but no value in it). + if value is not None: + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Target: Everything + # Preserve booleans and integers + elif isinstance(value, (int, bool)): + host_info[key] = value + + # Target: Everything + # Sanitize string values + elif isinstance(value, six.string_types): + host_info[key] = value.strip() + + # Target: Everything + # Replace None by an empty string + elif value is None: + host_info[key] = '' + + else: + # Remove non-processed complex types + pass + + return host_info + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if self.args.host not in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if self.args.host not in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + instance = self.get_instance(region, instance_id) + return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def push_group(self, my_dict, key, element): + ''' Push a group as a child of another group. ''' + parent_group = my_dict.setdefault(key, {}) + if not isinstance(parent_group, dict): + parent_group = my_dict[key] = {'hosts': parent_group} + child_groups = parent_group.setdefault('children', []) + if element not in child_groups: + child_groups.append(element) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + with open(self.cache_path_cache, 'r') as f: + json_inventory = f.read() + return json_inventory + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + with open(self.cache_path_index, 'rb') as f: + self.index = json.load(f) + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + with open(filename, 'w') as f: + f.write(json_data) + + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = r"[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += r"\-" + return re.sub(regex + "]", "_", word) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +if __name__ == '__main__': + # Run the script + Ec2Inventory() diff --git a/ansible/inventories/.host.docker.internal b/ansible/inventories/.host.docker.internal new file mode 100644 index 0000000..9a67f62 --- /dev/null +++ b/ansible/inventories/.host.docker.internal @@ -0,0 +1 @@ +localhost ansible_host=host.docker.internal diff --git a/ansible/inventories/group_vars/all b/ansible/inventories/group_vars/all new file mode 100644 index 0000000..b036f86 --- /dev/null +++ b/ansible/inventories/group_vars/all @@ -0,0 +1,3 @@ +--- +# file: inventories/group_vars/all + diff --git a/ansible/inventories/host_vars/default b/ansible/inventories/host_vars/default new file mode 100644 index 0000000..b882336 --- /dev/null +++ b/ansible/inventories/host_vars/default @@ -0,0 +1,50 @@ +--- +# file: inventories/host_vars/default + +aws_access_key_id: "{{ lookup('env','ANSIBLE_AWS_ACCESS_KEY_ID') }}" +aws_output_format: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_OUTPUT') or 'json' }}" +aws_region: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_REGION') or 'eu-west-1' }}" +aws_secret_access_key: "{{ lookup('env','ANSIBLE_AWS_SECRET_ACCESS_KEY') }}" +disks_additional_packages: +- e2fsprogs-extra +- nfs-utils +hosts_enable_cloudinit: false +hosts_enable_local: true +hosts_enable_rc: true +hosts_enable_zram: true +hosts_git_repositories: +- { "repo": "{{ lookup('env','ANSIBLE_GIT_REPOSITORY') }}", "dest": "{{ lookup('env','ANSIBLE_GIT_DIRECTORY') }}", "key_file": "{{ lookup('env','ANSIBLE_GIT_KEY_FILE') or '~/.ssh/id_rsa' }}", "version": "{{ lookup('env','ANSIBLE_GIT_VERSION') }}" } +hosts_packages: +- { "name": "ansible", "state": "present" } +- { "name": "coreutils", "state": "present" } +- { "name": "curl", "state": "present" } +- { "name": "git", "state": "present" } +- { "name": "groff", "state": "present" } +- { "name": "htop", "state": "present" } +- { "name": "less", "state": "present" } +- { "name": "lsof", "state": "present" } +- { "name": "make", "state": "present" } +- { "name": "openssh-client", "state": "present" } +- { "name": "util-linux", "state": "present" } +- { "name": "vim", "state": "present" } +- { "name": "zsh", "state": "present" } +hosts_ssh_private_keys: +- "{{ lookup('env','ANSIBLE_SSH_PRIVATE_KEY') or '~/.ssh/id_rsa' }}" +hosts_ssh_users: +- aya +hosts_user_env: +- ANSIBLE_AWS_ACCESS_KEY_ID +- ANSIBLE_AWS_SECRET_ACCESS_KEY +- ANSIBLE_CONFIG +- ANSIBLE_DISKS_NFS_DISK +- ANSIBLE_DISKS_NFS_OPTIONS +- ANSIBLE_DISKS_NFS_PATH +- ANSIBLE_DOCKER_IMAGE_TAG +- ANSIBLE_DOCKER_REGISTRY +- ANSIBLE_EXTRA_VARS +- ANSIBLE_GIT_DIRECTORY +- ANSIBLE_GIT_KEY_FILE +- ANSIBLE_GIT_REPOSITORY +- ANSIBLE_INVENTORY +- ANSIBLE_PLAYBOOK +- ENV diff --git a/ansible/inventories/host_vars/localhost b/ansible/inventories/host_vars/localhost new file mode 100644 index 0000000..b6f4970 --- /dev/null +++ b/ansible/inventories/host_vars/localhost @@ -0,0 +1,25 @@ +--- +# file: inventories/host_vars/localhost + +aws_access_key_id: "{{ lookup('env','ANSIBLE_AWS_ACCESS_KEY_ID') }}" +aws_output_format: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_OUTPUT') or 'json' }}" +aws_region: "{{ lookup('env','ANSIBLE_AWS_DEFAULT_REGION') or 'eu-west-1' }}" +aws_secret_access_key: "{{ lookup('env','ANSIBLE_AWS_SECRET_ACCESS_KEY') }}" +disks_additional_disks: +- disk: /dev/xvdb + disable_periodic_fsck: true + fstype: ext4 + mount_options: defaults + mount: /var/lib/docker + service: docker +- disk: "{{ lookup('env','ANSIBLE_DISKS_NFS_DISK') }}" + fstype: nfs + mount_options: "{{ lookup('env','ANSIBLE_DISKS_NFS_OPTIONS') }}" + mount: "{{ lookup('env','ANSIBLE_DISKS_NFS_PATH') }}" +disks_additional_services: +- rpc.statd +docker_image_tag: "{{ lookup('env','ANSIBLE_DOCKER_IMAGE_TAG') or 'latest' }}" +docker_registry: "{{ lookup('env','ANSIBLE_DOCKER_REGISTRY') }}" +hosts_enable_local: true +hosts_enable_rc: true +hosts_enable_zram: true diff --git a/ansible/playbook.yml b/ansible/playbook.yml new file mode 100644 index 0000000..ab632e3 --- /dev/null +++ b/ansible/playbook.yml @@ -0,0 +1,29 @@ +--- +# file: playbook.yml + +# bootstrap hosts +- hosts: default + gather_facts: false + pre_tasks: + - name: raw - install ansible requirements for alpine linux + raw: "[ -f /etc/alpine-release ] && /sbin/apk update && { which python3 >/dev/null 2>&1 || /sbin/apk add python3; } && { which sudo >/dev/null 2>&1 || /sbin/apk add sudo; } && { /bin/tar --version 2>/dev/null |grep busybox >/dev/null && /sbin/apk add tar; } && { ls /usr/lib/ssh/sftp-server >/dev/null 2>&1 || /sbin/apk add openssh-sftp-server; } || true" + +# install default packages and user settings +- import_playbook: playbooks/hosts.yml + tags: + - hosts + +# mount additional disks +- import_playbook: playbooks/disks.yml + tags: + - disks + +# install docker +- import_playbook: playbooks/docker.yml + tags: + - docker + +# install aws cli +- import_playbook: playbooks/aws-cli.yml + tags: + - aws-cli diff --git a/ansible/playbooks/aws-cli.yml b/ansible/playbooks/aws-cli.yml new file mode 100644 index 0000000..df5c217 --- /dev/null +++ b/ansible/playbooks/aws-cli.yml @@ -0,0 +1,6 @@ +--- +# file: playbooks/aws-cli.yml + +- hosts: '{{ target | default("all") }}' + roles: + - aws-cli diff --git a/ansible/playbooks/disks.yml b/ansible/playbooks/disks.yml new file mode 100644 index 0000000..7ea9505 --- /dev/null +++ b/ansible/playbooks/disks.yml @@ -0,0 +1,6 @@ +--- +# file: playbooks/disks.yml + +- hosts: '{{ target | default("all") }}' + roles: + - disks diff --git a/ansible/playbooks/docker.yml b/ansible/playbooks/docker.yml new file mode 100644 index 0000000..ce7ba97 --- /dev/null +++ b/ansible/playbooks/docker.yml @@ -0,0 +1,6 @@ +--- +# file: playbooks/docker.yml + +- hosts: '{{ target | default("all") }}' + roles: + - docker diff --git a/ansible/playbooks/hosts.yml b/ansible/playbooks/hosts.yml new file mode 100644 index 0000000..aa1a825 --- /dev/null +++ b/ansible/playbooks/hosts.yml @@ -0,0 +1,6 @@ +--- +# file: playbooks/hosts.yml + +- hosts: '{{ target | default("all") }}' + roles: + - hosts diff --git a/ansible/roles/aws-cli/.gitrepo b/ansible/roles/aws-cli/.gitrepo new file mode 100644 index 0000000..2488bff --- /dev/null +++ b/ansible/roles/aws-cli/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = ssh://git@github.com/1001Pharmacies/ansible-aws-cli + branch = master + commit = f10e38af3a9b36648576f9850e0d09fcc7a057df + parent = 9ee8bfab9d2f5e5591c2e8a3d6f3a03b56b36196 + method = merge + cmdver = 0.4.0 diff --git a/ansible/roles/aws-cli/README.md b/ansible/roles/aws-cli/README.md new file mode 100644 index 0000000..6742418 --- /dev/null +++ b/ansible/roles/aws-cli/README.md @@ -0,0 +1,35 @@ +# DEPRECATION NOTICE + +We have moved away from Ansible and are in the process of removing or transferring ownership of our Ansible repositories. If you rely on this repository directly, please make arrangements to replace this dependency with your own fork. + +# AWS CLI role for Ansible + +Installs and configures the AWS CLI for conveniently interacting with AWS services such as S3. + +## Requirements + +- Tested on Ubuntu 12.04 Server; +- Ansible 2.0+ + +## Role Variables + +The default variables are as follows: + + aws_output_format: 'json' + aws_region: 'ap-southeast-2' + aws_access_key_id: 'YOUR_ACCESS_KEY_ID' + aws_secret_access_key: 'YOUR_SECRET_ACCESS_KEY' + +## Example Playbook + + - hosts: 'servers' + roles: + - role: 'dstil.aws-cli' + aws_output_format: 'json' + aws_region: 'ap-southeast-2' + aws_access_key_id: 'SUPER_SECRET_ACCESS_KEY_ID' # Don't version this or put it on pastebin + aws_secret_access_key: 'SUPER_SECRET_ACCESS_KEY' # Ditto + +# License + +This playbook is provided 'as-is' under the conditions of the BSD license. No fitness for purpose is guaranteed or implied. diff --git a/ansible/roles/aws-cli/defaults/main.yml b/ansible/roles/aws-cli/defaults/main.yml new file mode 100644 index 0000000..d1101f4 --- /dev/null +++ b/ansible/roles/aws-cli/defaults/main.yml @@ -0,0 +1,7 @@ +--- +aws_cli_user: "{{ ansible_user|default('root') }}" +aws_cli_group: "{{ ansible_user|default('root') }}" +aws_output_format: 'json' +aws_region: 'eu-west-1' +aws_access_key_id: 'YOUR_ACCESS_KEY_ID' +aws_secret_access_key: 'YOUR_SECRET_ACCESS_KEY' diff --git a/ansible/roles/aws-cli/meta/main.yml b/ansible/roles/aws-cli/meta/main.yml new file mode 100644 index 0000000..9e051e6 --- /dev/null +++ b/ansible/roles/aws-cli/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + author: 'Rohan Liston' + description: 'Installs and configures the AWS CLI for conveniently interacting with AWS services such as S3.' + company: 'DSTIL' + license: 'BSD' + min_ansible_version: 2.0 + platforms: + - name: 'Ubuntu' + versions: + - 'precise' + categories: + - 'development' +dependencies: [] diff --git a/ansible/roles/aws-cli/tasks/main.yml b/ansible/roles/aws-cli/tasks/main.yml new file mode 100644 index 0000000..4a1ffbd --- /dev/null +++ b/ansible/roles/aws-cli/tasks/main.yml @@ -0,0 +1,144 @@ +--- + +- name: 'Install AWS CLI' + tags: 'aws-cli' + become: 'yes' + pip: > + executable=pip + name=awscli + state=present + extra_args=--no-cache-dir + +- name: 'Install docker python' + tags: 'aws-cli' + become: 'yes' + pip: > + name=docker + state=present + extra_args=--no-cache-dir + +- name: 'Install boto python' + tags: 'aws-cli' + become: 'yes' + pip: > + name=boto3 + state=present + extra_args=--no-cache-dir + +- name: Set home directory of the user + set_fact: + home_dir: /home/{{ aws_cli_user }} + when: "not aws_cli_user == 'root'" + +- name: Set home directory for root + set_fact: + home_dir: /root + when: "aws_cli_user == 'root'" + +- name: 'Create the AWS config directory' + tags: 'aws-cli' + become: 'yes' + file: > + path={{ home_dir }}/.aws + state=directory + owner={{ aws_cli_user }} + group={{ aws_cli_group }} + mode=0755 + +- name: 'Copy AWS CLI config' + tags: 'aws-cli' + become: 'yes' + template: > + src=aws_cli_config.j2 + dest={{ home_dir }}/.aws/config + owner={{ aws_cli_user }} + group={{ aws_cli_group }} + mode=0600 + force=yes + +- name: 'Copy AWS CLI credentials' + tags: 'aws-cli' + become: 'yes' + template: > + src=aws_cli_credentials.j2 + dest={{ home_dir }}/.aws/credentials + owner={{ aws_cli_user }} + group={{ aws_cli_group }} + mode=0600 + force=yes + +- name: aws - check AWS meta-data URI + uri: + url: http://169.254.169.254/latest/meta-data + timeout: 1 + register: aws_uri_check + tags: 'aws' + failed_when: False + +- name: aws - get instance metadata + tags: 'aws' + ec2_metadata_facts: + when: aws_uri_check.status == 200 + +- name: aws - get instance tags + tags: 'aws' + ec2_tag: + aws_access_key: "{{ aws_access_key_id }}" + aws_secret_key: "{{ aws_secret_access_key }}" + region: "{{ ansible_ec2_placement_region }}" + resource: "{{ ansible_ec2_instance_id }}" + state: list + register: ec2_tags + when: ansible_ec2_instance_id is defined + +- name: aws - set hostname + hostname: name="{{ ec2_tags.tags.hostname }}{% if ec2_tags.tags.domainname is defined %}.{{ ec2_tags.tags.domainname }}{% endif %}" + tags: 'aws' + when: ec2_tags.tags is defined and ec2_tags.tags.hostname is defined + +- name: aws - ecr login + shell: "$(aws ecr get-login --no-include-email --region {{ aws_region }})" + tags: 'aws' + when: ec2_tags.tags is defined + +- name: aws - prune docker objects (including non-dangling images) + docker_prune: + containers: yes + images: yes + images_filters: + dangling: false + networks: yes + volumes: yes + builder_cache: yes + tags: 'aws' + +- name: aws - launch docker containers + docker_container: + image: "{{docker_registry|default(ec2_tags.tags.user)}}/{{ec2_tags.tags.user}}/{{ec2_tags.tags.env}}/{% if ':' in item %}{{item}}{% else %}{{item}}:{{docker_image_tag|default('latest')}}{% endif %}" + name: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}" + network_mode: host + pull: yes + restart_policy: always + volumes: + - "{{ lookup('env','ANSIBLE_DISKS_NFS_PATH') }}:/shared" + - /etc/localtime:/etc/localtime:ro + - /var/run/docker.sock:/tmp/docker.sock:ro + tags: 'aws' + with_items: '{{ec2_tags.tags.services.split(" ")}}' + when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined + +- name: aws - add docker containers to inventory + add_host: + name: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}" + ansible_connection: docker + changed_when: false + tags: 'aws' + with_items: '{{ec2_tags.tags.services.split(" ")}}' + when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined + +- name: aws - run make deploy in docker containers + delegate_to: "{{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}}" + raw: "command -v make || exit 0 && make deploy CONTAINER={{ec2_tags.tags.user}}_{{ec2_tags.tags.env}}_{{item|replace('/','_')|regex_replace(':.*','')}} HOST={{ansible_ec2_local_ipv4}}" + tags: 'aws' + with_items: '{{ec2_tags.tags.services.split(" ")}}' + when: ec2_tags.tags is defined and ec2_tags.tags.env is defined and ec2_tags.tags.services is defined and ec2_tags.tags.user is defined diff --git a/ansible/roles/aws-cli/templates/aws_cli_config.j2 b/ansible/roles/aws-cli/templates/aws_cli_config.j2 new file mode 100644 index 0000000..d1346bd --- /dev/null +++ b/ansible/roles/aws-cli/templates/aws_cli_config.j2 @@ -0,0 +1,7 @@ +[default] +{% if aws_output_format|length %} +output = {{ aws_output_format }} +{% endif %} +{% if aws_region|length %} +region = {{ aws_region }} +{% endif %} diff --git a/ansible/roles/aws-cli/templates/aws_cli_credentials.j2 b/ansible/roles/aws-cli/templates/aws_cli_credentials.j2 new file mode 100644 index 0000000..0cd7034 --- /dev/null +++ b/ansible/roles/aws-cli/templates/aws_cli_credentials.j2 @@ -0,0 +1,5 @@ +{% if aws_access_key_id|length and aws_secret_access_key|length %} +[default] +aws_access_key_id = {{ aws_access_key_id }} +aws_secret_access_key = {{ aws_secret_access_key }} +{% endif %} diff --git a/ansible/roles/disks/.gitrepo b/ansible/roles/disks/.gitrepo new file mode 100644 index 0000000..299b0ca --- /dev/null +++ b/ansible/roles/disks/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = ssh://git@github.com/1001Pharmacies/ansible-disks + branch = master + commit = c0ac6978d715b461fbf20aca719cd5196bc60645 + parent = d01cccd9bab3a63d60ba251e3719767635ccd5d2 + method = merge + cmdver = 0.4.0 diff --git a/ansible/roles/disks/LICENSE b/ansible/roles/disks/LICENSE new file mode 100644 index 0000000..d77bf2a --- /dev/null +++ b/ansible/roles/disks/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Wizcorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ansible/roles/disks/README.md b/ansible/roles/disks/README.md new file mode 100644 index 0000000..b580c7e --- /dev/null +++ b/ansible/roles/disks/README.md @@ -0,0 +1,77 @@ +Disk +==== + +This role allows you to format extra disks and attach them to different mount points. + +You can use it to move the data of different services to another disk. + +Configuration +------------- + +### Inventory + +Because the configuration for additional disks must be stored using the YAML +syntax, you have to write it in a `group_vars` directory. + +```yaml +# inventory/group_vars/GROUP_NAME +disks_additional_disks: + - disk: /dev/sdb + fstype: ext4 + mount_options: defaults + mount: /data + user: www-data + group: www-data + disable_periodic_fsck: false + - disk: /dev/nvme0n1 + part: /dev/nvme0n1p1 + fstype: xfs + mount_options: defaults,noatime + mount: /data2 + - device_name: /dev/sdf + fstype: ext4 + mount_options: defaults + mount: /data + - disk: nfs-host:/nfs/export + fstype: nfs + mount_options: defaults,noatime + mount: /mnt/nfs +``` + +* `disk` is the device, you want to mount. +* `part` is the first partition name. If not specified, `1` will be appended to the disk name. +* `fstype` allows you to choose the filesystem to use with the new disk. +* `mount_options` allows you to specify custom mount options. +* `mount` is the directory where the new disk should be mounted. +* `user` sets owner of the mount directory (default: `root`). +* `group` sets group of the mount directory (default: `root`). +* `disable_periodic_fsck` deactivates the periodic ext3/4 filesystem check for the new disk. + +You can add: +* `disks_package_use` is the required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it. + +The following filesystems are currently supported: +- [btrfs](http://en.wikipedia.org/wiki/BTRFS) * +- [ext2](http://en.wikipedia.org/wiki/Ext2) +- [ext3](http://en.wikipedia.org/wiki/Ext3) +- [ext4](http://en.wikipedia.org/wiki/Ext4) +- [nfs](http://en.wikipedia.org/wiki/Network_File_System) * +- [xfs](http://en.wikipedia.org/wiki/XFS) * + +*) Note: To use these filesystems you have to define and install additional software packages. Please estimate the right package names for your operating system. + +```yaml +# inventory/group_vars/GROUP_NAME +disks_additional_packages: + - xfsprogs # package for mkfs.xfs on RedHat / Ubuntu + - btrfs-progs # package for mkfs.btrfs on CentOS / Debian +disks_additional_services: + - rpc.statd # start rpc.statd service for nfs +``` + +How it works +------------ + +It uses `sfdisk` to partition the disk with a single primary partition spanning the entire disk. +The specified filesystem will then be created with `mkfs`. +Finally the new partition will be mounted to the specified mount path. diff --git a/ansible/roles/disks/defaults/main.yml b/ansible/roles/disks/defaults/main.yml new file mode 100644 index 0000000..a525dc1 --- /dev/null +++ b/ansible/roles/disks/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# Aditional disks that need to be formated and mounted. +# See README for syntax and usage. +disks_additional_disks: [] +disks_additional_packages: [] +disks_additional_services: [] +disks_discover_aws_nvme_ebs: False +disks_package_use: auto diff --git a/ansible/roles/disks/handlers/main.yml b/ansible/roles/disks/handlers/main.yml new file mode 100644 index 0000000..f22446d --- /dev/null +++ b/ansible/roles/disks/handlers/main.yml @@ -0,0 +1,21 @@ +--- +# file: handlers/main.yml + +- name: restart services + with_together: + - '{{ disks_additional_disks }}' + - '{{ disks_additional_disks_handler_notify.results }}' + service: + name: "{{item.0.service}}" + state: restarted + when: item.1.changed and item.0.service is defined + +- name: restart services - nfs + with_together: + - '{{ disks_additional_disks }}' + - '{{ disks_additional_disks_nfs_handler_notify.results }}' + service: + name: "{{item.0.service}}" + state: restarted + when: item.1.changed and item.0.service is defined + diff --git a/ansible/roles/disks/library/disks_ebs_config.py b/ansible/roles/disks/library/disks_ebs_config.py new file mode 100644 index 0000000..23b96e1 --- /dev/null +++ b/ansible/roles/disks/library/disks_ebs_config.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from ctypes import * +from fcntl import ioctl +from pathlib import Path +import json +import os +import subprocess + +from ansible.module_utils.basic import * + +module = AnsibleModule(argument_spec=dict( + config=dict(required=True, type='list'), +)) + + +NVME_ADMIN_IDENTIFY = 0x06 +NVME_IOCTL_ADMIN_CMD = 0xC0484E41 +AMZN_NVME_VID = 0x1D0F +AMZN_NVME_EBS_MN = "Amazon Elastic Block Store" + +class nvme_admin_command(Structure): + _pack_ = 1 + _fields_ = [("opcode", c_uint8), # op code + ("flags", c_uint8), # fused operation + ("cid", c_uint16), # command id + ("nsid", c_uint32), # namespace id + ("reserved0", c_uint64), + ("mptr", c_uint64), # metadata pointer + ("addr", c_uint64), # data pointer + ("mlen", c_uint32), # metadata length + ("alen", c_uint32), # data length + ("cdw10", c_uint32), + ("cdw11", c_uint32), + ("cdw12", c_uint32), + ("cdw13", c_uint32), + ("cdw14", c_uint32), + ("cdw15", c_uint32), + ("reserved1", c_uint64)] + +class nvme_identify_controller_amzn_vs(Structure): + _pack_ = 1 + _fields_ = [("bdev", c_char * 32), # block device name + ("reserved0", c_char * (1024 - 32))] + +class nvme_identify_controller_psd(Structure): + _pack_ = 1 + _fields_ = [("mp", c_uint16), # maximum power + ("reserved0", c_uint16), + ("enlat", c_uint32), # entry latency + ("exlat", c_uint32), # exit latency + ("rrt", c_uint8), # relative read throughput + ("rrl", c_uint8), # relative read latency + ("rwt", c_uint8), # relative write throughput + ("rwl", c_uint8), # relative write latency + ("reserved1", c_char * 16)] + +class nvme_identify_controller(Structure): + _pack_ = 1 + _fields_ = [("vid", c_uint16), # PCI Vendor ID + ("ssvid", c_uint16), # PCI Subsystem Vendor ID + ("sn", c_char * 20), # Serial Number + ("mn", c_char * 40), # Module Number + ("fr", c_char * 8), # Firmware Revision + ("rab", c_uint8), # Recommend Arbitration Burst + ("ieee", c_uint8 * 3), # IEEE OUI Identifier + ("mic", c_uint8), # Multi-Interface Capabilities + ("mdts", c_uint8), # Maximum Data Transfer Size + ("reserved0", c_uint8 * (256 - 78)), + ("oacs", c_uint16), # Optional Admin Command Support + ("acl", c_uint8), # Abort Command Limit + ("aerl", c_uint8), # Asynchronous Event Request Limit + ("frmw", c_uint8), # Firmware Updates + ("lpa", c_uint8), # Log Page Attributes + ("elpe", c_uint8), # Error Log Page Entries + ("npss", c_uint8), # Number of Power States Support + ("avscc", c_uint8), # Admin Vendor Specific Command Configuration + ("reserved1", c_uint8 * (512 - 265)), + ("sqes", c_uint8), # Submission Queue Entry Size + ("cqes", c_uint8), # Completion Queue Entry Size + ("reserved2", c_uint16), + ("nn", c_uint32), # Number of Namespaces + ("oncs", c_uint16), # Optional NVM Command Support + ("fuses", c_uint16), # Fused Operation Support + ("fna", c_uint8), # Format NVM Attributes + ("vwc", c_uint8), # Volatile Write Cache + ("awun", c_uint16), # Atomic Write Unit Normal + ("awupf", c_uint16), # Atomic Write Unit Power Fail + ("nvscc", c_uint8), # NVM Vendor Specific Command Configuration + ("reserved3", c_uint8 * (704 - 531)), + ("reserved4", c_uint8 * (2048 - 704)), + ("psd", nvme_identify_controller_psd * 32), # Power State Descriptor + ("vs", nvme_identify_controller_amzn_vs)] # Vendor Specific + +class ebs_nvme_device: + def __init__(self, device): + self.device = device + self.ctrl_identify() + + def _nvme_ioctl(self, id_response, id_len): + admin_cmd = nvme_admin_command(opcode = NVME_ADMIN_IDENTIFY, + addr = id_response, + alen = id_len, + cdw10 = 1) + + with open(self.device, "w") as nvme: + ioctl(nvme, NVME_IOCTL_ADMIN_CMD, admin_cmd) + + def ctrl_identify(self): + self.id_ctrl = nvme_identify_controller() + self._nvme_ioctl(addressof(self.id_ctrl), sizeof(self.id_ctrl)) + + def is_ebs(self): + if self.id_ctrl.vid != AMZN_NVME_VID: + return False + if self.id_ctrl.mn.strip() != AMZN_NVME_EBS_MN: + return False + return True + + def get_volume_id(self): + vol = self.id_ctrl.sn.decode('utf-8') + + if vol.startswith("vol") and vol[3] != "-": + vol = "vol-" + vol[3:] + + return vol.strip() + + def get_block_device(self, stripped=False): + dev = self.id_ctrl.vs.bdev.decode('utf-8') + + if stripped and dev.startswith("/dev/"): + dev = dev[5:] + + return dev.strip() + + +def update_disk(disk, mapping): + if 'device_name' not in disk: + return disk + + device_name = disk['device_name'][5:] + if device_name not in mapping: + return disk + + volume_id = mapping[device_name] + link_path = '/dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol%s' % volume_id[4:] + resolved = str(Path(link_path).resolve()) + + new_disk = dict(disk) + new_disk['disk'] = resolved + new_disk['part'] = '%sp1' % resolved + return new_disk + + +def main(): + src_config = module.params['config'] + + lsblkOutput = subprocess.check_output(['lsblk', '-J']) + lsblk = json.loads(lsblkOutput.decode('utf-8')) + mapping = {} + for blockdevice in lsblk['blockdevices']: + try: + dev = ebs_nvme_device('/dev/%s' % blockdevice['name']) + except OSError: + continue + except IOError: + continue + if dev.is_ebs(): + continue + mapping[dev.get_block_device()] = dev.get_volume_id() + + new_config = [ + update_disk(disk, mapping) for disk in src_config + ] + + facts = {'blockDeviceMapping': mapping, 'config': new_config, 'source_config': src_config} + result = {"changed": False, "ansible_facts": facts} + module.exit_json(**result) + + +main() diff --git a/ansible/roles/disks/meta/main.yml b/ansible/roles/disks/meta/main.yml new file mode 100644 index 0000000..a990627 --- /dev/null +++ b/ansible/roles/disks/meta/main.yml @@ -0,0 +1,21 @@ +galaxy_info: + author: Emilien Kenler + description: This role allows setting up extra disks and their mount points + company: Wizcorp K.K. + license: MIT + min_ansible_version: 2.0.0 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Debian + versions: + - wheezy + - jessie + - name: Ubuntu + versions: + - all + categories: + - system +dependencies: [] diff --git a/ansible/roles/disks/tasks/main.yml b/ansible/roles/disks/tasks/main.yml new file mode 100644 index 0000000..8ee0133 --- /dev/null +++ b/ansible/roles/disks/tasks/main.yml @@ -0,0 +1,173 @@ +- name: 'Install Python PIP' + package: > + name=py3-pip + state=present + when: ansible_os_family|lower == "alpine" + +- name: 'Install Python PIP' + package: > + name=python-pip + state=present + when: ansible_os_family|lower != "alpine" + +- name: 'Install python-pathlib' + pip: > + name=pathlib + state=present + +- name: "Discover NVMe EBS" + disks_ebs_config: + config: "{{ disks_additional_disks }}" + register: __disks_ebs_config + when: disks_discover_aws_nvme_ebs | default(True) | bool + +- set_fact: + disks_additional_disks: "{{ disks_additional_disks|defaut([]) + __disks_ebs_config['ansible_facts']['config'] }}" + when: __disks_ebs_config is defined and 'ansible_facts' in __disks_ebs_config + +- name: "Install parted" + package: + name: parted + state: present + use: '{{ disks_package_use }}' + when: disks_additional_disks + tags: ['disks', 'pkgs'] + +- name: "Install additional fs progs" + package: + name: "{{ item }}" + state: present + with_items: "{{ disks_additional_packages|default([]) }}" + when: disks_additional_packages is defined + tags: ['disks', 'pkgs'] + +- name: disks - start additional services + service: + name: "{{item}}" + enabled: yes + state: started + with_items: "{{ disks_additional_services|default([]) }}" + tags: ['disks', 'pkgs'] + +- name: "Get disk alignment for disks" + shell: | + if + [[ -e /sys/block/{{ item.disk | basename }}/queue/optimal_io_size && -e /sys/block/{{ item.disk | basename }}/alignment_offset && -e /sys/block/{{ item.disk | basename }}/queue/physical_block_size ]]; + then + echo $[$(( ($(cat /sys/block/{{ item.disk | basename }}/queue/optimal_io_size) + $(cat /sys/block/{{ item.disk | basename }}/alignment_offset)) / $(cat /sys/block/{{ item.disk | basename }}/queue/physical_block_size) )) | 2048]; + else + echo 2048; + fi + args: + creates: '{{ item.part | default(item.disk + "1") }}' + executable: '/bin/bash' + with_items: '{{ disks_additional_disks }}' + register: disks_offset + tags: ['disks'] + +- name: "Ensure the disk exists" + stat: + path: '{{ item.disk }}' + with_items: '{{ disks_additional_disks }}' + register: disks_stat + changed_when: False + tags: ['disks'] + +- name: "Partition additional disks" + shell: | + if + [ -b {{ item.disk }} ] + then + [ -b {{ item.part | default(item.disk + "1") }} ] || parted -a optimal --script "{{ item.disk }}" mklabel gpt mkpart primary {{ disks_offset.stdout|default("2048") }}s 100% && sleep 5 && partprobe {{ item.disk }}; sleep 5 + fi + args: + creates: '{{ item.part | default(item.disk + "1") }}' + executable: '/bin/bash' + with_items: '{{ disks_additional_disks }}' + tags: ['disks'] + +- name: "Create filesystem on the first partition" + filesystem: + dev: '{{ item.0.part | default(item.0.disk + "1") }}' + force: '{{ item.0.force|d(omit) }}' + fstype: '{{ item.0.fstype }}' + opts: '{{ item.0.fsopts|d(omit) }}' + with_together: + - '{{ disks_additional_disks }}' + - '{{ disks_stat.results }}' + when: item.1.stat.exists + tags: ['disks'] + +- name: "Disable periodic fsck and reserved space on ext3 or ext4 formatted disks" + environment: + PATH: "{{ ansible_env.PATH }}:/usr/sbin:/sbin" + shell: tune2fs -c0 -i0 -m0 {{ item.0.part | default(item.0.disk + "1") }} + with_together: + - '{{ disks_additional_disks }}' + - '{{ disks_stat.results }}' + when: "disks_additional_disks and ( item.0.fstype == 'ext4' or item.0.fstype == 'ext3' ) and item.0.disable_periodic_fsck|default(false)|bool and item.1.stat.exists" + tags: ['disks'] + +- name: "Ensure the mount directory exists" + file: + path: '{{ item.mount }}' + state: directory + with_items: '{{ disks_additional_disks }}' + tags: ['disks'] + +- name: "Get UUID for partition" + environment: + PATH: "{{ ansible_env.PATH }}:/usr/sbin:/sbin" + command: blkid -s UUID -o value {{ item.0.part | default(item.0.disk + "1") }} + check_mode: no + register: disks_blkid + with_together: + - '{{ disks_additional_disks }}' + - '{{ disks_stat.results }}' + changed_when: False + when: item.1.stat.exists + tags: ['disks'] + +- name: "Mount additional disks" + mount: + name: '{{ item.0.mount }}' + fstype: '{{ item.0.fstype }}' + opts: '{{ item.0.mount_options|d(omit) }}' + passno: '0' + src: 'UUID={{ item.1.stdout }}' + state: '{{ item.0.mount_state|d("mounted") }}' + with_together: + - '{{ disks_additional_disks }}' + - '{{ disks_blkid.results }}' + - '{{ disks_stat.results }}' + when: item.2.stat.exists + tags: ['disks'] + register: disks_additional_disks_handler_notify + notify: + - restart services + +- name: "Mount additional disks - nfs" + mount: + name: '{{ item.mount }}' + fstype: '{{ item.fstype }}' + opts: '{{ item.mount_options|d(omit) }}' + src: '{{ item.disk }}' + state: '{{ item.mount_state|d("mounted") }}' + when: item.fstype == 'nfs' + with_items: '{{ disks_additional_disks }}' + tags: ['disks'] + register: disks_additional_disks_nfs_handler_notify + notify: + - restart services - nfs + +- name: "Ensure the permissions are set correctly" + file: + path: '{{ item.mount }}' + owner: '{{ item.user | default("root") }}' + group: '{{ item.group | default("root") }}' + state: directory + with_items: '{{ disks_additional_disks }}' + when: item.user is defined or item.group is defined + tags: ['disk'] + +- meta: flush_handlers diff --git a/ansible/roles/docker/.gitrepo b/ansible/roles/docker/.gitrepo new file mode 100644 index 0000000..3792cb5 --- /dev/null +++ b/ansible/roles/docker/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = ssh://git@github.com/1001Pharmacies/ansible-docker + branch = master + commit = 6217a899084cba00447195d1873b211462b60d52 + parent = 4745dad8cb8a826ee3ac47accda79f96957b5e13 + method = merge + cmdver = 0.4.0 diff --git a/ansible/roles/docker/AUTHORS.md b/ansible/roles/docker/AUTHORS.md new file mode 100644 index 0000000..2c0d726 --- /dev/null +++ b/ansible/roles/docker/AUTHORS.md @@ -0,0 +1,4 @@ +# Authors + +* **Yann Autissier** - *Initial work* - [aya](https://github.com/aya) + diff --git a/ansible/roles/docker/CHANGELOG.md b/ansible/roles/docker/CHANGELOG.md new file mode 100644 index 0000000..0cf6ebc --- /dev/null +++ b/ansible/roles/docker/CHANGELOG.md @@ -0,0 +1,9 @@ +# Changelog + +## v1.0.0 (December 20, 2016) + +Initial release. + +* Install docker daemon +* Start and active docker service at boot +* Build and run docker images diff --git a/ansible/roles/docker/LICENSE b/ansible/roles/docker/LICENSE new file mode 100644 index 0000000..ee96c4c --- /dev/null +++ b/ansible/roles/docker/LICENSE @@ -0,0 +1,20 @@ +MIT License + +Copyright (c) 2016 Yann Autissier + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ansible/roles/docker/README.md b/ansible/roles/docker/README.md new file mode 100644 index 0000000..f4d2615 --- /dev/null +++ b/ansible/roles/docker/README.md @@ -0,0 +1,237 @@ +# Ansible role to run dockers + +An ansible role to install the [docker](https://www.docker.com/) daemon and build and run dockers. + +It installs the docker daemon and ensure it is up and running. +It sets the STORAGE_DRIVER if the docker host uses systemd and it configures the MTU to 1450 if it is a VM running on OpenStack. +It builds and runs docker images on the docker host. + +## Requirements + +This Ansible role requires at least Ansible version 1.9. + +## Role Variables + +* `docker_check_kernel` - The minimum kernel version allowed on hosts to run docker. + +``` yaml +# minimum kernel version +docker_check_kernel: '3.10' +``` + +* `docker_check_machine` - The hosts architecture needed to run docker. + +``` yaml +# architecture +docker_check_machine: 'x86_64' +``` + +* `docker_package` - The name of the docker package. + +``` yaml +# The docker package name +docker_package: docker +``` + +* `docker_packages` - A list of packages to install/remove before installing the docker package. + +``` yaml +# A list of package to install/remove +# docker_packages: +# - { "name": "docker", "state": "absent" } +``` + +* `docker_init_config_directory` - The location of the configuration file of the docker daemon init script. + +``` yaml +# Location of configuration files loaded by the init script +docker_init_config_directory: "/etc/sysconfig" +``` + +* `docker_opts` - The name of the environment variable used to pass options to the docker daemon. + +``` yaml +# docker daemon options environment variable +docker_opts: "OPTIONS" +``` + +* `docker_services` - A list of system services to start + +``` yaml +# services +docker_services: + - docker +``` + +* `dockers` - A list of docker images to build and run on the docker host with the docker-build and docker-run commands + +``` yaml +# dockers +# dockers: +# - nginx +``` + +* `docker_cluster` - An optional cluster name to pass to the docker-build and docker-run commands + +``` yaml +# docker cluster +# docker_cluster: "" +``` + +* `docker_cluster` - Starts the dockers if set to true. + +``` yaml +# Start docker +docker_start: true +``` + +* `docker_restart` - Restarts dockers when their image has been updated. It removes current running dockers and start new ones. + +``` yaml +# Stop and remove running docker to start a new one when image has been updated +docker_restart: true +``` + +* `docker_force_restart` - Restart dockers, even if image has not been updated. It removes current running dockers and start new ones. + +``` yaml +# Stop and remove running docker to start a new one even if image has not been updated +docker_force_restart: false +``` + +## Helper scripts + +This role comes with a few helper scripts. Here is a short description. + +* `docker-build` - Build a docker image, reading options to pass to the `docker build` command from a Dockeropts file. + +* `docker-cleanup` - Remove unused dockers. + +* `docker-cleanup-images` - Remove unused docker images. + +* `docker-cleanup-volumes` - Remove unused docker volumes. + +* `docker-get-image` - Return sha256 of the image used by the docker. + +* `docker-get-status` - Return the status of the docker. + +* `docker-log-cleanup` - Empty the file logging the docker output on the docker host. + +* `docker-log-truncate` - Truncate the file logging the docker output on the docker host. + +* `docker-run` - Run a docker, reading options to pass to the `docker run` command from a Dockeropts file. + +## Example + +To launch this role on your `docker` hosts, run the default playbook.yml. + +``` bash +$ ansible-playbook playbook.yml +``` + +### Build a docker image + +On the docker hosts, you'll be able to build docker images and run dockers, based on Dockerfile and Dockeropts files located in the /etc/docker subdirectories. + +To create an `nginx` docker image, create a directory /etc/docker/nginx with a Dockerfile and a Dockeropts files into. + +``` bash +# mkdir -p /etc/docker/nginx +# cat << EOF > /etc/docker/nginx/Dockerfile +FROM nginx:alpine +EOF +# cat << EOF > /etc/docker/nginx/Dockeropts +DOCKER_ULIMIT="nofile=65536" +DOCKER_PORT="80:80" +EOF +``` + +Build your `nginx` docker image, then run it ! The docker-run command will read the Dockeropts file to add the --ulimit and --port options to the docker run command. + +``` bash +# docker-build nginx && docker-run nginx +``` + +### Override your files + +If you want to copy a file in your Dockerfile, say the default nginx.conf, you can use the DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX variables to get different versions of this file giving some context. + +``` bash +# cat << EOF > /etc/docker/nginx/Dockerfile +FROM nginx:alpine +ARG DOCKER_BUILD_PREFIX +ARG DOCKER_BUILD_SUFFIX +COPY ./\${DOCKER_BUILD_PREFIX}nginx.conf\${DOCKER_BUILD_SUFFIX} /etc/nginx/nginx.conf +EOF +``` + +You can now override the nginx configuration file when you build your image. + +* Without option, the docker-build command will search for the file beside your Dockerfile. + +``` bash +# docker-build nginx && docker-run nginx +``` + +Both DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX variables are empty, the Dockerfile will search for a `./nginx.conf` file, ie the /etc/docker/nginx/nginx.conf file. + +* With a -c|--cluster option, the docker-build command will search for the file in a subdirectory below your Dockerfile. + +``` bash +# docker-build -c custom nginx && docker-run -c custom nginx +``` + +The DOCKER_BUILD_PREFIX variable is populated with 'custom/' to force the Dockerfile to search for a `./custom/nginx.conf` file, ie /etc/docker/nginx/custom/nginx.conf file. + +* Whith an image name suffixed with a dash, the docker-build command will search for a suffixed file as well. + +``` bash +# docker-build -c custom nginx-develop && docker-run -c custom nginx-develop +``` + +The DOCKER_BUILD_PREFIX variable is populated with 'custom/' and the DOCKER_BUILD_SUFFIX variable is populated with '-develop' to force the Dockerfile to search for a `./custom/nginx.conf-develop` file, ie /etc/docker/nginx/custom/nginx.conf-develop file. + +### Override your options + +The same override principle can be used for the Dockerfile and the Dockeropts file when using the docker-build and docker-run commands. +You can create a /etc/docker/nginx/custom/Dockeropts file that would override your default Dockeropt file, and a /etc/docker/nginx/custom/Dockeropts-develop file overriding both other files too. +The Dockeropts file accepts the following options. + +* `SYSCTL` - values to set on the docker host via the sysctl command before running the docker +* `DOCKER_ARGS` - values to pass to the docker build command with --build-arg options +* `DOCKER_ENV` - values to pass to the docker run command with -e options +* `DOCKER_LINK` - values to pass to the docker run command with --link options +* `DOCKER_OPT` - values to pass to the docker run command with prefixed by -- +* `DOCKER_PORT` - values to pass to the docker run command with -p options +* `DOCKER_ULIMIT` - values to pass to the docker run command with --ulimit options +* `DOCKER_VOLUME` - values to pass to the docker run command with -v options +* `HOST_VOLUME` - volumes to allow write access to from the docker on selinux enabled host + +Overriding options is done several times, reading options from the more specific to the more generic file. In our example, files are read in this order : +/etc/docker/nginx/custom/Dockeropts-develop +/etc/docker/nginx/custom/Dockeropts +/etc/docker/nginx/Dockeropts + +## Common configuration + +Following configuration builds and runs the docker image 'nginx-develop' for the 'custom' cluster described in our example. +The Dockerfile and Dockeropts files needed in the /etc/docker/nginx directory should be present on the docker host, likely synchronised by an other ansible role. + +``` yaml +docker_cluster: "custom" +docker: + - nginx-develop +``` + +## Tests + +To test this role on your `docker` hosts, run the tests/playbook.yml playbook. + +``` bash +$ ansible-playbook tests/playbook.yml +``` + +## Limitations + +This role is known to work on Ubuntu, Debian, CentOS and Alpine Linux. + diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000..e24cd12 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,41 @@ +--- +# file: defaults/main.yml + +# minimum kernel version +docker_check_kernel: '3.10' + +# architecture +docker_check_machine: 'x86_64' + +# The docker package name +docker_package: docker + +# A list of package to install/remove +# docker_packages: +# - { "name": "docker", "state": "absent" } + +# Location of configuration files loaded by the init script +docker_init_config_directory: "/etc/sysconfig" + +# docker daemon options environment variable +docker_opts: "OPTIONS" + +# services +docker_services: + - docker + +# dockers +# dockers: +# - nginx + +# docker cluster +# docker_cluster: "" + +# Start docker +docker_start: true + +# Stop and remove running docker to start a new one when image has been updated +docker_restart: true + +# Stop and remove running docker to start a new one even if image has not been updated +docker_force_restart: false diff --git a/ansible/roles/docker/files/etc/sysctl.d/docker.conf b/ansible/roles/docker/files/etc/sysctl.d/docker.conf new file mode 100755 index 0000000..cd203bc --- /dev/null +++ b/ansible/roles/docker/files/etc/sysctl.d/docker.conf @@ -0,0 +1 @@ +kernel.pax.softmode=1 diff --git a/ansible/roles/docker/files/usr/local/bin/docker-build b/ansible/roles/docker/files/usr/local/bin/docker-build new file mode 100755 index 0000000..7c8ac79 --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-build @@ -0,0 +1,133 @@ +#!/bin/bash +# Author: Yann Autissier + +DOCKER_IMAGE_REPOSITORY="centile" +DOCKER_BUILD_DIRECTORY="/etc/docker" + +usage() { + echo Usage: $0 [-c cluster] [-f] [-q] [-t] image [image [...]] + echo -e "Build a docker image in the '${DOCKER_IMAGE_REPOSITORY}' repository." + echo + echo -e "image\tis a directory with a Dockerfile, default in '${DOCKER_BUILD_DIRECTORY}/image'." + echo -e "\t'image' can contains a dash. The suffixed part after the dash is taken into account" + echo -e "\tin the image name but not in the name of the directory containing the Dockerfile." + echo -e "\tsuffix will be available in your Dockerfile in the DOCKER_BUILD_SUFFIX build-arg." + echo + echo -e "Options:" + echo -e "\t-c 'cluster'\tAllow to override files in 'image' directory with existing files in" + echo -e "\t\t\tthe 'image/cluster' directory. 'cluster' will be available in your" + echo -e "\t\t\tDockerfile in the DOCKER_BUILD_PREFIX build-arg." + echo -e "\t-f\t\tforce build, do not use cache when building image." + echo -e "\t-q\t\tquiet mode, minimal output." + echo -e "\t-t\t\ttest mode, do nothing but output the command that would haev been launched." + echo + echo -e "EXAMPLES" + echo + echo -e "$0 elk" + echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile" + echo + echo -e "$0 elk-es01" + echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile" + echo -e "and build-arg DOCKER_BUILD_SUFFIX=-es01" + echo + echo -e "$0 -c elisa-sdc elk-es01" + echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile," + echo -e "build-arg DOCKER_BUILD_PREFIX=elisa-sdc/ and build-arg DOCKER_BUILD_SUFFIX=-es01" + echo + exit 1 +} + +while [ $# -gt 0 ]; do + case $1 in + -c|--cluster) shift && CLUSTER="$1" + ;; + -f|--force) FORCE=1 + ;; + -t|--test) TEST=1 + ;; + -q|--quiet) QUIET=1 + ;; + -h|--help) usage + ;; + *) args="${args:-} $1" + esac + shift + args="${args# }" +done + +# check args +[ "${args:0:1}" = "-" ] && usage + +# grsec/pax on alpine linux with docker < 1.12 +[ -f /etc/alpine-release ] && while read major minor patch; do + if [ "${major}" -eq 1 ] && [ "${minor:-0}" -lt 12 ]; then + [ "$(sysctl -n kernel.grsecurity.chroot_deny_chmod 2>/dev/null)" = 1 ] && sysctl -w kernel.grsecurity.chroot_deny_chmod=0 2>/dev/null && grsec_disabled_chmod=1 + [ "$(sysctl -n kernel.grsecurity.chroot_deny_mknod 2>/dev/null)" = 1 ] && sysctl -w kernel.grsecurity.chroot_deny_mknod=0 2>/dev/null && grsec_disabled_mknod=1 + fi +done <<< $(apk version docker |awk -F '-' '/^docker/ {print $2}' |sed 's/\./ /g') + +for arg in $args; do + # extract docker image name + image="$(basename ${arg})" + # keep part before the dash as the directory name + dir="$(dirname ${arg})/${image%-*}" + # keep part after the dash as an image suffix name + [ "${image##*-}" != "${image}" ] && suffix="${image##*-}" + + # default to ${DOCKER_BUILD_DIRECTORY}/${dir} if ${dir} does not exists + [ ! -d "${dir}" ] && [ -d "${DOCKER_BUILD_DIRECTORY}/${dir}" ] && dir="${DOCKER_BUILD_DIRECTORY}/${dir#./}" + + # directory exists && contains a Dockerfile + [ -d ${dir} ] && [ -f "${dir}/Dockerfile" ] || usage + # cluster directory exists + [ -n "${CLUSTER}" ] && { [ -d ${dir}/${CLUSTER} ] || usage; } + + # search for Dockeropts files + files="${dir}/Dockeropts ${dir}/Dockeropts-${suffix}" + [ -n "${CLUSTER}" ] && files="${files} ${dir}/${CLUSTER}/Dockeropts ${dir}/${CLUSTER}/Dockeropts-${suffix}" + + # source the Dockeropts files + for dockeropts in ${files}; do + [ -f "${dockeropts}" ] && . ${dockeropts} + done + + # quiet build + [ ${QUIET} ] && DOCKER_BUILD_ARGS="--quiet" || DOCKER_BUILD_ARGS="" + + # do not use cache + [ ${FORCE} ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --no-cache" + + # extract DOCKER_ARGS + [ -n "${DOCKER_ARGS}" ] && for build_arg in ${DOCKER_ARGS}; do + DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg ${build_arg}" + done + + # add DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX + [ -n "${CLUSTER}" ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg DOCKER_BUILD_PREFIX=${CLUSTER}/" + [ -n "${suffix}" ] && DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS} --build-arg DOCKER_BUILD_SUFFIX=-${suffix}" + + # search for Dockerfile + [ -n "${CLUSTER}" ] && files="${dir}/${CLUSTER}/Dockerfile-${suffix} ${dir}/${CLUSTER}/Dockerfile" || files="" + files="${files} ${dir}/Dockerfile-${suffix} ${dir}/Dockerfile" + + # build docker image with 1st found Dockerfile + for dockerfile in ${files}; do + [ -f "${dockerfile}" ] || continue + [ ${QUIET} ] && [ ! ${TEST} ] && echo -n "${image} " + [ ! ${QUIET} ] && echo "Building image ${image}" + if [ ${TEST} ]; then + echo docker build ${DOCKER_BUILD_ARGS} -t ${DOCKER_IMAGE_REPOSITORY}/${image} -f ${dockerfile} ${dir} + else + docker build ${DOCKER_BUILD_ARGS} -t ${DOCKER_IMAGE_REPOSITORY}/${image} -f ${dockerfile} ${dir} + result=$? + fi + [ ${result:-0} -ge ${return:-0} ] && return=${result} + break + done +done + +# grsec/pax +[ ${grsec_disabled_chmod} ] && sysctl -w kernel.grsecurity.chroot_deny_chmod=1 2>/dev/null +[ ${grsec_disabled_mknod} ] && sysctl -w kernel.grsecurity.chroot_deny_mknod=1 2>/dev/null + +exit ${return:-1} diff --git a/ansible/roles/docker/files/usr/local/bin/docker-cleanup b/ansible/roles/docker/files/usr/local/bin/docker-cleanup new file mode 100755 index 0000000..f509db3 --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-cleanup @@ -0,0 +1,4 @@ +#!/bin/sh +# Author: Yann Autissier + +docker ps -q --no-trunc --filter status=exited,status=created,status=dead |while read docker; do docker rm ${docker}; done diff --git a/ansible/roles/docker/files/usr/local/bin/docker-cleanup-images b/ansible/roles/docker/files/usr/local/bin/docker-cleanup-images new file mode 100755 index 0000000..4704f51 --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-cleanup-images @@ -0,0 +1,4 @@ +#!/bin/sh +# Author: Yann Autissier + +docker images -q --no-trunc --filter dangling=true |while read image; do docker rmi ${image}; done diff --git a/ansible/roles/docker/files/usr/local/bin/docker-cleanup-volumes b/ansible/roles/docker/files/usr/local/bin/docker-cleanup-volumes new file mode 100755 index 0000000..185b9ea --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-cleanup-volumes @@ -0,0 +1,144 @@ +#! /bin/bash + +set -eou pipefail + +#usage: sudo ./docker-cleanup-volumes.sh [--dry-run] + +docker_bin="$(which docker.io 2> /dev/null || which docker 2> /dev/null)" + +# Default dir +dockerdir=/var/lib/docker + +# Look for an alternate docker directory with -g/--graph option +dockerpid=$(ps ax | grep "$docker_bin" | grep -v grep | awk '{print $1; exit}') || : +if [[ -n "$dockerpid" && $dockerpid -gt 0 ]]; then + next_arg_is_dockerdir=false + while read -d $'\0' arg + do + if [[ $arg =~ ^--graph=(.+) ]]; then + dockerdir=${BASH_REMATCH[1]} + break + elif [ $arg = '-g' ]; then + next_arg_is_dockerdir=true + elif [ $next_arg_is_dockerdir = true ]; then + dockerdir=$arg + break + fi + done < /proc/$dockerpid/cmdline +fi + +dockerdir=$(readlink -f "$dockerdir") + +volumesdir=${dockerdir}/volumes +vfsdir=${dockerdir}/vfs/dir +allvolumes=() +dryrun=false +verbose=false + +function log_verbose() { + if [ "${verbose}" = true ]; then + echo "$1" + fi; +} + +function delete_volumes() { + local targetdir=$1 + echo + if [[ ! -d "${targetdir}" || ! "$(ls -A "${targetdir}")" ]]; then + echo "Directory ${targetdir} does not exist or is empty, skipping." + return + fi + echo "Delete unused volume directories from $targetdir" + local dir + while read -d $'\0' dir + do + dir=$(basename "$dir") + if [[ -d "${targetdir}/${dir}/_data" || "${dir}" =~ [0-9a-f]{64} ]]; then + if [ ${#allvolumes[@]} -gt 0 ] && [[ ${allvolumes[@]} =~ "${dir}" ]]; then + echo "In use ${dir}" + else + if [ "${dryrun}" = false ]; then + echo "Deleting ${dir}" + rm -rf "${targetdir}/${dir}" + else + echo "Would have deleted ${dir}" + fi + fi + else + echo "Not a volume ${dir}" + fi + done < <(find "${targetdir}" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null) +} + +if [ $UID != 0 ]; then + echo "You need to be root to use this script." + exit 1 +fi + +if [ -z "$docker_bin" ] ; then + echo "Please install docker. You can install docker by running \"wget -qO- https://get.docker.io/ | sh\"." + exit 1 +fi + +while [[ $# > 0 ]] +do + key="$1" + + case $key in + -n|--dry-run) + dryrun=true + ;; + -v|--verbose) + verbose=true + ;; + *) + echo "Cleanup docker volumes: remove unused volumes." + echo "Usage: ${0##*/} [--dry-run] [--verbose]" + echo " -n, --dry-run: dry run: display what would get removed." + echo " -v, --verbose: verbose output." + exit 1 + ;; + esac + shift +done + +# Make sure that we can talk to docker daemon. If we cannot, we fail here. +${docker_bin} version >/dev/null + +container_ids=$(${docker_bin} ps -a -q --no-trunc) + +#All volumes from all containers +SAVEIFS=$IFS +IFS=$(echo -en "\n\b") +for container in $container_ids; do + #add container id to list of volumes, don't think these + #ever exists in the volumesdir but just to be safe + allvolumes+=${container} + #add all volumes from this container to the list of volumes + log_verbose "Inspecting container ${container}" + for volpath in $( + ${docker_bin} inspect --format='{{range $key, $val := .}}{{if eq $key "Volumes"}}{{range $vol, $path := .}}{{$path}}{{"\n"}}{{end}}{{end}}{{if eq $key "Mounts"}}{{range $mount := $val}}{{$mount.Source}}{{"\n"}}{{end}}{{end}}{{end}}' ${container} \ + ); do + log_verbose "Processing volumepath ${volpath}" + #try to get volume id from the volume path + vid=$(echo "${volpath}" | sed 's|.*/\(.*\)/_data$|\1|;s|.*/\([0-9a-f]\{64\}\)$|\1|') + # check for either a 64 character vid or then end of a volumepath containing _data: + if [[ "${vid}" =~ ^[0-9a-f]{64}$ || (${volpath} =~ .*/_data$ && ! "${vid}" =~ "/") ]]; then + log_verbose "Found volume ${vid}" + allvolumes+=("${vid}") + else + #check if it's a bindmount, these have a config.json file in the ${volumesdir} but no files in ${vfsdir} (docker 1.6.2 and below) + for bmv in $(find "${volumesdir}" -name config.json -print | xargs grep -l "\"IsBindMount\":true" | xargs grep -l "\"Path\":\"${volpath}\""); do + bmv="$(basename "$(dirname "${bmv}")")" + log_verbose "Found bindmount ${bmv}" + allvolumes+=("${bmv}") + #there should be only one config for the bindmount, delete any duplicate for the same bindmount. + break + done + fi + done +done +IFS=$SAVEIFS + +delete_volumes "${volumesdir}" +delete_volumes "${vfsdir}" diff --git a/ansible/roles/docker/files/usr/local/bin/docker-get-image b/ansible/roles/docker/files/usr/local/bin/docker-get-image new file mode 100755 index 0000000..c0c9cbf --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-get-image @@ -0,0 +1,8 @@ +#!/bin/bash +# Author: Yann Autissier + +[ -n "$1" ] || exit + +for docker in $@; do + docker inspect "${docker}" 2>/dev/null |awk '$1 == "\"Image\":" && $2 ~ /^\"sha/ {gsub(/(^\"|\",$)/, "", $2); print "'${docker}' "$2}' +done diff --git a/ansible/roles/docker/files/usr/local/bin/docker-get-status b/ansible/roles/docker/files/usr/local/bin/docker-get-status new file mode 100755 index 0000000..4c87714 --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-get-status @@ -0,0 +1,8 @@ +#!/bin/bash +# Author: Yann Autissier + +[ -n "$1" ] || exit + +for docker in $@; do + docker inspect "${docker}" 2>/dev/null |awk '$1 == "\"Status\":" {gsub(/(^\"|\",$)/, "", $2); print "'${docker}' "$2}' +done diff --git a/ansible/roles/docker/files/usr/local/bin/docker-log-cleanup b/ansible/roles/docker/files/usr/local/bin/docker-log-cleanup new file mode 100755 index 0000000..86b3d75 --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-log-cleanup @@ -0,0 +1,4 @@ +#!/bin/sh +# Author: Yann Autissier + +[ -n "$1" ] && :> $(docker inspect $1 | grep '"LogPath": "*"' | sed -e 's/.*"LogPath": "//g' | sed -e 's/",//g') diff --git a/ansible/roles/docker/files/usr/local/bin/docker-log-truncate b/ansible/roles/docker/files/usr/local/bin/docker-log-truncate new file mode 100755 index 0000000..56734a5 --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-log-truncate @@ -0,0 +1,7 @@ +#!/bin/sh +# Author: Yann Autissier + +[ -n "$1" ] && \ + docker_log=$(docker inspect $1 2>/dev/null | grep '"LogPath": "*"' | sed -e 's/.*"LogPath": "//g' | sed -e 's/",//g') && \ + [ -f "${docker_log}" ] && \ + tail -n 100 ${docker_log} > ${docker_log} diff --git a/ansible/roles/docker/files/usr/local/bin/docker-run b/ansible/roles/docker/files/usr/local/bin/docker-run new file mode 100755 index 0000000..d49856b --- /dev/null +++ b/ansible/roles/docker/files/usr/local/bin/docker-run @@ -0,0 +1,157 @@ +#!/bin/bash +# Author: Yann Autissier + +DOCKER_IMAGE_REPOSITORY="centile" +DOCKER_BUILD_DIRECTORY="/etc/docker" + +usage() { + echo Usage: $0 [ -c cluster] [ -i image ] [-f] [-q] [-t] name [name [...]] + echo -e "Run a docker from an image in the '${DOCKER_IMAGE_REPOSITORY}' repository." + echo + echo -e "name\t is a directory with a Dockerfile, default in '${DOCKER_BUILD_DIRECTORY}/name'." + echo -e "\t'name' can contains a dash. The directory name will be extracted for the first part" + echo -e "\tbefore a dash." + echo + echo -e "Options:" + echo -e "\t-c 'cluster'\tAllow to override files in 'image' directory with existing files in" + echo -e "\t\t\tthe 'image/cluster' directory." + echo -e "\t -i 'image'\tthe docker image to run, default in '${DOCKER_IMAGE_REPOSITORY}' repository." + echo -e "\t -f\t\tforce run, stop and remove existing docker before running a new one." + echo -e "\t -q\t\tquiet mode, minimal output." + echo -e "\t -t\t\ttest mode, do nothing but output the command that would have been launched." + echo + echo -e "EXAMPLES" + echo + echo -e "$0 elk" + echo -e "Run a docker named 'elk' from the '${DOCKER_IMAGE_REPOSITORY}/elk' image" + echo + echo -e "$0 elk-es01" + echo -e "Run a docker named 'elk-es01' from the '${DOCKER_IMAGE_REPOSITORY}/elk-es01' image" + echo + echo -e "$0 -i elk elk-es01" + echo -e "Run a docker named 'elk-es01' from the '${DOCKER_IMAGE_REPOSITORY}/elk' image" + echo + exit 1 +} + +while [ $# -gt 0 ]; do + case $1 in + -c|--cluster) shift && CLUSTER="$1" + ;; + -i|--image) shift && IMAGE="$1" + ;; + -h|--help) usage + ;; + -f|--force) FORCE=1 + ;; + -q|--quiet) QUIET=1 + ;; + -t|--test) TEST=1 + ;; + *) args="${args:-} $1" + esac + shift + args="${args# }" +done + +# check args +[ "${args:0:1}" = "-" ] && usage + +for arg in ${args}; do + # reset vars + image=""; DOCKER_OPT="" + # extract docker name + name="$(basename ${arg})" + # keep part before the dash as the directory name + dir="$(dirname ${arg})/${name%-*}" + # keep part after the dash as an image suffix name + [ "${name##*-}" != "${name}" ] && suffix="${name##*-}" + # if provided, set docker image from args + if [ -n "${IMAGE}" ]; then + # if docker image does not contain a /, add our default repository + [ "${IMAGE##*/}" != "${IMAGE}" ] && image="${IMAGE}" || image="${DOCKER_IMAGE_REPOSITORY}/${IMAGE}" + # else try to find an image from the docker name + else + # try docker name, docker name without ending numbers, docker name without suffix + for image in ${name} ${name%%[0-9]*} ${name%-*}; do + # search for image in ${DOCKER_IMAGE_REPOSITORY} + [ -n "$(docker images 2>/dev/null |awk '$1 == "'${DOCKER_IMAGE_REPOSITORY}/${image}'" {print $1}')" ] && image="${DOCKER_IMAGE_REPOSITORY}/${image}" && break + image="${name}" + done + fi + + tag="$(docker images |awk '$1 == "'${image}'" {print $2}')" + [ -z "${tag}" ] && echo "ERROR: Cannot find image '${image}'" >2 && exit 2 + + # default to ${DOCKER_BUILD_DIRECTORY}/${dir} if ${dir} does not exists + [ ! -d "${dir}" ] && [ -d "${DOCKER_BUILD_DIRECTORY}/${dir}" ] && dir="${DOCKER_BUILD_DIRECTORY}/${dir#./}" + + # directory exists && contains a Dockerfile + [ -d ${dir} ] && [ -f "${dir}/Dockerfile" ] || usage + # cluster directory exists + [ -n "${CLUSTER}" ] && { [ -d ${dir}/${CLUSTER} ] || usage; } + + # search for Dockeropts files + files="${dir}/Dockeropts ${dir}/Dockeropts-${suffix}" + [ -n "${CLUSTER}" ] && files="${files} ${dir}/${CLUSTER}/Dockeropts ${dir}/${CLUSTER}/Dockeropts-${suffix}" + + # source the Dockeropts files + for dockeropts in ${files}; do + [ -f "${dockeropts}" ] && . ${dockeropts} + done + + # extract SYSCTL + [ -n "${SYSCTL}" ] && for sysctl in ${SYSCTL}; do + sysctl -w ${sysctl} 2>/dev/null + done + + # extract DOCKER_OPT + [ -n "${DOCKER_OPT}" ] && DOCKER_OPTS="--${DOCKER_OPT/ / --}" || DOCKER_OPTS="" + + # extract DOCKER_ENV + [ -n "${DOCKER_ENV}" ] && DOCKER_OPTS="${DOCKER_OPTS} -e ${DOCKER_ENV//\" /\" -e }" + + # extract DOCKER_LINK + [ -n "${DOCKER_LINK}" ] && DOCKER_OPTS="--link ${DOCKER_LINK/ / --link }" + + # extract DOCKER_PORT + [ -n "${DOCKER_PORT}" ] && DOCKER_OPTS="${DOCKER_OPTS} -p ${DOCKER_PORT// / -p }" + + # extract DOCKER_ULIMIT + [ -n "${DOCKER_ULIMIT}" ] && DOCKER_OPTS="${DOCKER_OPTS} --ulimit ${DOCKER_ULIMIT// / --ulimit }" + + # extract DOCKER_VOLUME + [ -n "${DOCKER_VOLUME}" ] && DOCKER_OPTS="${DOCKER_OPTS} -v ${DOCKER_VOLUME// / -v }" + + # enable access to host volumes on selinux + for volume in ${HOST_VOLUME}; do + chcon -Rt svirt_sandbox_file_t ${volume} 2>/dev/null + done + + # remove current docker + if [ ${FORCE} ]; then + if [ -n "$(docker ps -q --filter status=created,status=restarting,status=running,status=paused,status=exited,status=dead,name=${name})" ]; then + [ ! ${QUIET} ] && echo -n "Removing docker ${name}... " + if [ ${TEST} ]; then + echo docker rm -f ${name} + else + eval docker rm -f ${name} >/dev/null 2>&1 + result=$? && [ ${result} -ne 0 ] && echo "ERROR" && { [ ${result:-0} -ge ${return:-0} ] && return=${result}; } && break + [ ! ${QUIET} ] && echo "OK" + fi + fi + fi + + # launch docker + [ ${QUIET} ] && [ ! ${TEST} ] && echo -n "${name} " + [ ! ${QUIET} ] && echo -n "Running docker ${name}... " + if [ ${TEST} ]; then + echo docker run --restart=always ${DOCKER_OPTS} -d --name ${name} ${image} ${DOCKER_RUN:-} + else + eval docker run --restart=always ${DOCKER_OPTS} -d --name ${name} ${image} ${DOCKER_RUN:-} 2>/dev/null + result=$? && [ ${result} -ne 0 ] && echo "ERROR" + fi + [ ${result:-0} -ge ${return:-0} ] && return=${result} +done + +exit ${return:-1} diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000..96777da --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,8 @@ +--- +# file handlers/main.yml + +- name: restart docker + service: + name: "{{docker_service}}" + state: "restarted" + diff --git a/ansible/roles/docker/meta/main.yml b/ansible/roles/docker/meta/main.yml new file mode 100644 index 0000000..a984441 --- /dev/null +++ b/ansible/roles/docker/meta/main.yml @@ -0,0 +1,42 @@ +# Copyright (c) 2016 Centile +# +# MIT License + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +--- +galaxy_info: + author: Yann Autissier + description: An ansible role to install docker + company: Centile + license: MIT + min_ansible_version: 1.9 + platforms: + - name: Ubuntu + versions: + - xenial + - name: Debian + versions: + - jessie + - name: EL + versions: + - all + +dependencies: [] diff --git a/ansible/roles/docker/playbook.yml b/ansible/roles/docker/playbook.yml new file mode 100644 index 0000000..45ce166 --- /dev/null +++ b/ansible/roles/docker/playbook.yml @@ -0,0 +1,6 @@ +--- +# file: playbook.yml + +- hosts: docker + roles: + - . diff --git a/ansible/roles/docker/tasks/build.yml b/ansible/roles/docker/tasks/build.yml new file mode 100644 index 0000000..1115060 --- /dev/null +++ b/ansible/roles/docker/tasks/build.yml @@ -0,0 +1,17 @@ +--- +# file: tasks/build.yml + +- name: build - Build docker image + with_items: "{{dockers|default([])}}" + command: "/usr/local/bin/docker-build -q -c {{docker_cluster|default('\"\"')}} {{item}}" + register: docker_build_image_command + +- name: build - Register docker_build_image + with_items: "{{docker_build_image_command.results}}" + set_fact: + docker_build_image: "{{docker_build_image |default({}) |combine( {item.item: item.stdout} ) }}" + +- name: build - Debug docker_elk_build_image + with_items: "{{dockers|default([])}}" + debug: msg="{{docker_build_image[item]}}" + when: docker_debug|default(false) diff --git a/ansible/roles/docker/tasks/check.yml b/ansible/roles/docker/tasks/check.yml new file mode 100644 index 0000000..471cdff --- /dev/null +++ b/ansible/roles/docker/tasks/check.yml @@ -0,0 +1,16 @@ +--- +# file: tasks/check.yml + +- name: check - kernel version + fail: + msg: > + docker requires a minimum kernel version of {{docker_check_kernel}} + on {{ansible_distribution}} {{ansible_distribution_version}} + when: ansible_kernel is version(docker_check_kernel, "<") + +- name: check - machine architecture + fail: + msg: > + docker requires a {{docker_check_machine}} version + of {{ansible_distribution}} {{ansible_distribution_version}} + when: ansible_machine != docker_check_machine diff --git a/ansible/roles/docker/tasks/config.yml b/ansible/roles/docker/tasks/config.yml new file mode 100644 index 0000000..37bdd56 --- /dev/null +++ b/ansible/roles/docker/tasks/config.yml @@ -0,0 +1,18 @@ +--- +# file: tasks/config.yml + +- name: config - add docker storage setup + lineinfile: dest="{{docker_init_config_directory}}/{{docker_package}}-storage-setup" state="present" line="STORAGE_DRIVER=\"\"" + when: docker_package|length > 0 and ansible_service_mgr == "systemd" and ansible_os_family|lower == "redhat" + become: yes + +# - name: config - disable docker iptables setup +# lineinfile: dest="/lib/systemd/system/docker.service" state="present" regex="^ExecStart=" line="ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --iptables=false" +# notify: restart docker +# when: docker_package|length > 0 and ansible_service_mgr == "systemd" +# become: yes + +- name: config - setup docker mtu on Openstack VMs + lineinfile: dest="{{docker_init_config_directory}}/{{docker_package}}" state="present" backrefs=true regexp='^{{docker_opts}}=(?:\'|\")?((?:\s*[\w=\/\-\.](? "a", "b", "c", "d" +make-comma-list = $(subst $(space),$(comma)$(space),$(patsubst %,"%",$(strip $(1)))) + +# Needed for the foreach loops to loop over the list of hooks, so that +# each hook call is properly separated by a newline. +define sep + + +endef + +PERCENT = % +QUOTE = ' +# ' # Meh... syntax-highlighting + +# This macro properly escapes a command string, then prints it with printf: +# +# - first, backslash '\' are self-escaped, so that they do not escape +# the following char and so that printf properly outputs a backslash; +# +# - next, single quotes are escaped by closing an existing one, adding +# an escaped one, and re-openning a new one (see below for the reason); +# +# - then '%' signs are self-escaped so that the printf does not interpret +# them as a format specifier, in case the variable contains an actual +# printf with a format; +# +# - finally, $(sep) is replaced with the literal '\n' so that make does +# not break on the so-expanded variable, but so that the printf does +# correctly output an LF. +# +# Note: this must be escaped in this order to avoid over-escaping the +# previously escaped elements. +# +# Once everything has been escaped, it is passed between single quotes +# (that's why the single-quotes are escaped they way they are, above, +# and why the dollar sign is not escaped) to printf(1). A trailing +# newline is apended, too. +# +# Note: leading or trailing spaces are *not* stripped. +# +define PRINTF + printf '$(subst $(sep),\n,\ + $(subst $(PERCENT),$(PERCENT)$(PERCENT),\ + $(subst $(QUOTE),$(QUOTE)\$(QUOTE)$(QUOTE),\ + $(subst \,\\,$(1)))))\n' +endef diff --git a/openstack/.env.dist b/openstack/.env.dist new file mode 100644 index 0000000..a6499b2 --- /dev/null +++ b/openstack/.env.dist @@ -0,0 +1,9 @@ +DEBUG=false +DOCKER=true +ENV=local +OS_AUTH_URL= +OS_TENANT_ID= +OS_TENANT_NAME= +OS_USERNAME= +OS_PASSWORD= +OS_REGION_NAME= diff --git a/packer/alpine/alpine.json b/packer/alpine/alpine.json new file mode 100644 index 0000000..23fe7a1 --- /dev/null +++ b/packer/alpine/alpine.json @@ -0,0 +1,105 @@ +{ + "builders": [ + { + "accelerator": "{{user `accelerator`}}", + "boot_command": [ + "{{user `username`}}", + "passwd{{user `password`}}{{user `password`}}", + "ifconfig eth0 up \u0026\u0026 udhcpc -i eth0", + "apk add --repository http://dl-cdn.alpinelinux.org/alpine/v{{user `alpine_version`}}/main dropbear dropbear-openrc openssh-sftp-server", + "rc-update add dropbear", + "echo -e 'auto eth0\\niface eth0 inet dhcp' > /etc/network/interfaces", + "rc-service dropbear start", + "" + ], + "boot_wait": "{{user `boot_wait`}}", + "disk_interface": "virtio", + "disk_size": "{{user `iso_size`}}", + "format": "raw", + "headless": true, + "host_port_max": "{{user `ssh_port_max`}}", + "host_port_min": "{{user `ssh_port_min`}}", + "iso_checksum": "file:http://dl-cdn.alpinelinux.org/alpine/v{{user `alpine_version`}}/releases/{{user `alpine_arch`}}/alpine-virt-{{user `alpine_release`}}-{{user `alpine_arch`}}.iso.sha256", + "iso_url": "http://dl-cdn.alpinelinux.org/alpine/v{{user `alpine_version`}}/releases/{{user `alpine_arch`}}/alpine-virt-{{user `alpine_release`}}-{{user `alpine_arch`}}.iso", + "net_device": "virtio-net", + "output_directory": "{{user `output`}}", + "qemuargs": [ + [ + "-device", + "virtio-rng-pci,rng=rng0,bus=pci.0,addr=0x7" + ], + [ + "-object", + "rng-random,filename=/dev/urandom,id=rng0" + ] + ], + "shutdown_command": "/sbin/poweroff", + "ssh_file_transfer_method": "sftp", + "ssh_password": "{{user `password`}}", + "ssh_port": 22, + "ssh_timeout": "{{user `ssh_wait_timeout`}}", + "ssh_username": "{{user `username`}}", + "type": "qemu", + "vm_name": "{{user `iso_name`}}.iso", + "vnc_bind_address": "{{user `vnc_bind_address`}}", + "vnc_port_max": "{{user `vnc_port_max`}}", + "vnc_port_min": "{{user `vnc_port_min`}}" + } + ], + "provisioners": [ + { + "environment_vars": [ + "ALPINE_VERSION={{user `alpine_version`}}", + "HOSTNAME={{user `hostname`}}" + ], + "script": "packer/alpine/setup.sh", + "type": "shell" + }, + { + "expect_disconnect": true, + "inline": [ + "/usr/bin/eject -s", + "/sbin/reboot" + ], + "type": "shell" + }, + { + "extra_arguments": [ + "--extra-vars", + "{{user `ansible_extra_vars`}}", + "{{user `ansible_verbose`}}" + ], + "inventory_directory": "ansible/inventories", + "pause_before": "16s", + "playbook_file": "ansible/playbook.yml", + "sftp_command": "/usr/lib/ssh/sftp-server -e", + "type": "ansible", + "user": "{{user `ansible_user`}}" + } + ], + "variables": { + "accelerator": "kvm", + "alpine_arch": "x86_64", + "alpine_release": "3.12.0", + "alpine_version": "3.12", + "ansible_extra_vars": "target=default", + "ansible_user": "root", + "ansible_verbose": "-v", + "boot_wait": "8s", + "hostname": "alpine", + "iso_name": "alpine-3.12.0-x86_64", + "iso_size": "1024", + "output": "build/iso", + "password": "alpine", + "qemuargs": "", + "ssh_port_max": "2222", + "ssh_port_min": "2222", + "ssh_wait_timeout": "32s", + "template": "alpine", + "username": "root", + "vnc_bind_address": "127.0.0.1", + "vnc_port_max": "5900", + "vnc_port_min": "5900" + } +} + diff --git a/packer/alpine/setup.sh b/packer/alpine/setup.sh new file mode 100755 index 0000000..fcd8923 --- /dev/null +++ b/packer/alpine/setup.sh @@ -0,0 +1,53 @@ +#!/bin/sh +# https://github.com/alpinelinux/alpine-conf/blob/master/setup-alpine.in + +export PATH="/usr/sbin:/usr/bin:/sbin:/bin" + +ALPINE_VERSION="${ALPINE_VERSION:-3.10}" +APKREPOSOPTS="http://dl-cdn.alpinelinux.org/alpine/v${ALPINE_VERSION}/main http://dl-cdn.alpinelinux.org/alpine/v${ALPINE_VERSION}/community" +BOOT_SIZE="32" +DISKOPTS="-s 0 -m sys /dev/vda" +DNSOPTS="-n 8.8.8.8" +HOSTNAME="${HOSTNAME:-alpine}" +HOSTNAMEOPTS="-n ${HOSTNAME}" +INTERFACESOPTS="auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet dhcp + +auto eth1 +iface eth1 inet dhcp +" +KEYMAPOPTS="fr fr" +NTPOPTS="-c openntpd" +PROXYOPTS="none" +SSHDOPTS="-c none" +TIMEZONEOPTS="-z Europe/Paris" +export MIRRORS="http://dl-cdn.alpinelinux.org/alpine/ +http://dl-2.alpinelinux.org/alpine/ +http://dl-3.alpinelinux.org/alpine/ +http://dl-4.alpinelinux.org/alpine/ +http://dl-5.alpinelinux.org/alpine/ +http://dl-8.alpinelinux.org/alpine/" + +/sbin/setup-keymap ${KEYMAPOPTS} +/sbin/setup-hostname ${HOSTNAMEOPTS} +echo "${INTERFACESOPTS}" | /sbin/setup-interfaces -i +# /etc/init.d/networking --quiet start >/dev/null +# /sbin/setup-dns ${DNSOPTS} +/sbin/setup-timezone ${TIMEZONEOPTS} +/sbin/setup-proxy -q ${PROXYOPTS} +/sbin/setup-apkrepos ${APKREPOSOPTS} +/sbin/setup-ntp ${NTPOPTS} +/sbin/setup-sshd ${SSHDOPTS} +rc-update --quiet add networking boot +rc-update --quiet add urandom boot +/etc/init.d/hostname --quiet restart +killall ntpd +sed -i 's/constraints/# constraints/' /etc/ntpd.conf +sed -i 's/^#NTPD_OPTS=$/NTPD_OPTS=-s/' /etc/conf.d/openntpd +openrc boot +openrc default + +echo "y" | BOOT_SIZE="${BOOT_SIZE}" DEFAULT_DISK="none" /sbin/setup-disk -q ${DISKOPTS} || exit diff --git a/scripts/img-compressr.sh b/scripts/img-compressr.sh new file mode 100755 index 0000000..c4df8d4 --- /dev/null +++ b/scripts/img-compressr.sh @@ -0,0 +1,464 @@ +#!/bin/bash +#################################################### +# # +# Auth: jean-christophe.iacono@1001pharmacies.com # +# # +# img-compressr.sh # +# # +# Usage: img-compressr.sh # +# # +# This script compresses images in target folders # +# and subfolders using ImageMagick. # +# # +# # +# Version: 0.4 # +# # +#################################################### +# +# +### Documentation used +# - ImageMagick install : https://imagemagick.org/script/install-source.php +# OR apt install. (because manual install means manual plugin management) +# +# - PageSpeed Insights : https://developers.google.com/speed/docs/insights/OptimizeImages +# +### Notes +# +# The purpose of this script is to compress images in a list of folders +# with potential subfolders. +# +# - Script can handle spaces in files or folders names +# +# - imagemagick : target and destination files can be the same, this will overwrite file +# with compressed version +# +# +# +### TODO +# +# - Catch and manage error codes +# - Set customizable (maxdepth) recursivity +# - Log path as a variable +# +# +# + + +PATH=${PATH:-/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin} + +# imagemagick convert binary +convert=convert + + +lastrun_marker=compression_marker +original_ext=uncompressedbak +convert_args_jpg="-sampling-factor 4:2:0 -strip -quality 85 -interlace JPEG -colorspace RGB" +convert_args_png="-strip -define png:compression-filter=0 -define png:compression-level=9 -define png:compression-strategy=1" + +report="/tmp/img_compression_report.log" +current_time="$(date +"%Y-%m-%d_%H-%M-%S")" + +RED="\033[31m" +GREEN="\033[32m" +BLUE="\033[96m" +YELLOW="\033[93m" +BRIGHTGREEN="\033[32m\033[1m" +BRIGHTBLUE="\033[36m\033[1m" +COLOR_RESET="\033[0m" + + +if ! [ -x "$(command -v $convert)" ] ; then + echo "Imagemagick convert ($convert) is required to execute this script" >> $report + exit 1 +fi + +function show_help(){ + printf "Script usage :\n" + printf "\t-b : Backup, copy original images as .$original_ext ;\n" + printf "\t-c : Clean backups, backup copies will be removed ;\n" + printf "\t-f : Force, ignore last compression timestamp. Will also overwrite backups if combined with -b ;\n" + printf "\t-r : Recursive, convert images recursively in folder and subfolders ;\n" + printf "\t-u : Undo, revert to original files (requires backup to be previously created) ;\n" + printf "\t-v : Verbose, display commands and files being compressed ;\n" + printf "\t-y : Yes, do not ask for confirmation to begin compression ;\n" + printf "\n" + printf "\t-h : this screen.\n" + printf "\n" + printf " Examples : $0 -vbr /some/directory \"/directory/with some/spaces\"\n" + printf " $0 -ryu /some/directory \"/directory/with some/spaces\"\n" + printf " $0 -cry /some/directory \"/directory/with some/spaces\"\n" + printf "\n" +} + + + +function user_confirmation() { + if [ "$yes" -eq 0 ] ; then + read -p "Are you sure you want to proceed ? (y/n) " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]] ; then + echo "Operation aborted, no change on files" ; exit 0 + fi + fi +} + + + +function clean_backups() { +user_confirmation +echo ; echo -e "${BLUE}Cleaning backups...${COLOR_RESET}" + + for target_folder in "$@" ; do + find "$target_folder" -type f -iname '*.$original_ext' -delete + done + +echo -e "${BLUE}Cleaning backups ${BRIGHTGREEN}OK${COLOR_RESET}" +exit 0 +} + + +function image_error() { + error_count=$((error_count+1)) + echo -e "${RED}Error${COLOR_RESET} on file ${YELLOW}$file${COLOR_RESET}, file is ${BLUE}not modified${COLOR_RESET}" + echo "($1)" + log_message "error" "$1" + rm -f "$file.compressed" + if [ $force -eq 0 ] ; then rm -f "$file.$original_ext" ; fi +} + + +function log_message() { + echo "[$(date +"%Y/%m/%d-%H:%M:%S")] event=\"$1\" message=\"$2\"" >> $report ### : +} + + +function bytes_to_human() { + b=${1:-0}; d=''; s=0; S=(Bytes {K,M,G,T,P,E,Z,Y}B) + while ((b > 1024)); do + d="$(printf ".%02d" $((b % 1024 * 100 / 1024)))" + b=$((b / 1024)) + let s++ + done + echo "$b$d ${S[$s]}" +} + + + +### Catching options ### + +script_options="$@" # For logs +backup=0 +do_clean_backups=0 +force=0 +recursive=0 +undo=0 +verbose=0 +yes=0 + +if [ $# -eq 0 ] ; then # If 0 option + echo "Some options are required" ; echo + show_help ; exit 1 +fi + + +while getopts "bcfruvyh" optName; do + case "$optName" in + b) backup=1 ; echo "Backup enabled. A copy of files will be created" ;; + c) do_clean_backups=1 ; echo "Clean mode, backups in target folders will be removed" ;; + f) force=1 ; echo "Force enabled. Last compression timestamp will be ignored" + echo "Note that forcing compression will overwrite any existing backups with current version" ;; + r) recursive=1 ; echo "Recursivity enabled. Subfolders will be treated" ;; + u) undo=1 ; echo "Undo mode, original files for target folders will be restored if present" ;; + v) verbose=1 ; echo "Verbose mode enabled" ;; + y) yes=1 ; echo "Auto confirmation enabled" ;; + h) show_help ; exit 0 ;; + *) echo "Option $1 not recognized" ; show_help ; exit 1 ;; + esac +done +shift "$((OPTIND -1))" + +if [ -z "$1" ] ; then + echo ; echo "You must specify at least one target directory" + echo ; show_help ; exit 1 +fi + +if [ $backup -eq 1 ] && [ $undo -eq 1 ] ; then + echo ; echo "You may not want to backup and undo at the same time" + echo ; show_help ; exit 1 +fi + +if [ $do_clean_backups -eq 1 ] ; then + clean_backups "$@" +fi + + + + +############# +### START ### +############# + + +# Report definition +start_time="$current_time" + +#echo "[$(date +"%Y-%m-%d_%H-%M-%S")] Script started. Options: $script_options" >> $report +echo >> $report +echo >> $report +echo >> $report +#echo "[$current_time] event=\"info\" message=\"Script started. Options: $script_options\"" >> $report +log_message "info" "Script started." +log_message "info" "Script options : $script_options" +[ "$verbose" -eq 1 ] && echo && echo -e "${BLUE}Script started at : $(date)${COLOR_RESET}" + +# Stats : init total +total_before=0 +total_after=0 +error_count=0 +subfolders_count=0 + +### SUBFOLDERS DISCOVERY ### + +# Create the subfolder list +echo ; echo -e "${BLUE}Listing affected subfolders...${COLOR_RESET}" + +if [ $recursive -eq 0 ] ; then # target folders will be the actual folder list + subfolders_list="$(find "$@" -maxdepth 0 -type d)" +else # recursive enabled, get subfolders + subfolders_list="$(find "$@" -type d)" +fi + +subfolders_total=$(echo "$subfolders_list" | wc -l) + +# Display and confirm $subfolders_list +echo "$subfolders_list" +echo -e "${BLUE}Listing $subfolders_total subfolders ${BRIGHTGREEN}OK${COLOR_RESET}"; echo + +user_confirmation + + + +### BACKUP RESTORATION ### +if [ $undo -eq 1 ] ; then # Restore available backups + restored_count=0 + echo ; echo -e "${BLUE}Restoring available backups in target subfolders...${COLOR_RESET}" + + while IFS="" read -r folder ; do + subfolders_count=$(($subfolders_count+1)) + echo + echo + echo + echo -e "${BRIGHTBLUE}*** Entering folder ${COLOR_RESET}${YELLOW}$folder ${COLOR_RESET} ${BLUE}($subfolders_count / $subfolders_total)${COLOR_RESET}" + echo + echo -e "${BLUE}Restoring files...${COLOR_RESET}" + + while IFS="" read -r file ; do + if [ -z "$file" ] ; then # list is empty + echo -e "${BLUE}No new files to restore in this folder${COLOR_RESET}" + else + mv "$file" "${file%.$original_ext}" + [ "$verbose" -eq 1 ] && echo -ne "${BLUE}folder $subfolders_count/$subfolders_total${COLOR_RESET} " + [ "$verbose" -eq 1 ] && echo -e "File ${YELLOW}${file%.$original_ext}${COLOR_RESET} ${BLUE}restored${COLOR_RESET}" + restored_count=$(($restored_count+1)) + fi + done <<< "$(find "$folder" -maxdepth 1 -type f -iname "*.$original_ext")" + + if [ -f "$folder/$lastrun_marker" ] ; then + echo -e "${BLUE}Removing compression marker${COLOR_RESET}" ; rm "$folder/$lastrun_marker" + fi + done <<< "$subfolders_list" + + echo ; echo -e "${BLUE}Cleaning backups ${BRIGHTGREEN}OK${COLOR_RESET}" + echo -e "${BLUE}$restored_count files restored${COLOR_RESET}" ; echo + log_message "info" "$restored_count files restored" + log_message "info" "Script ended." + exit 0 +fi + + + + +### COMPRESSION ### +while IFS="" read -r folder ; do + subfolders_count=$(($subfolders_count+1)) + echo + echo + echo + echo -e "${BRIGHTBLUE}*** Entering folder ${COLOR_RESET}${YELLOW}$folder${COLOR_RESET} ${BLUE}($subfolders_count / $subfolders_total)${COLOR_RESET}" + + + + ### FILE LISTING ### + echo ; echo -e "${BLUE}Listing files...${COLOR_RESET}" + + # If marker present, use it, unless Force is used + use_marker=0 + if [ $force -eq 0 ] ; then + if [ -f "$folder/$lastrun_marker" ] ; then + use_marker=1 ; [ "$verbose" -eq 1 ] && echo -e "${BLUE}Found marker from last compression, only compressing new files${COLOR_RESET}" + else + se_marker=0 ; [ "$verbose" -eq 1 ] && echo -e "${BLUE}No marker found, all files in this folder will be compressed${COLOR_RESET}" + fi + else + use_marker=0 ; [ "$verbose" -eq 1 ] && echo -e "${BLUE}Force used, all files in this folder will be compressed${COLOR_RESET}" + fi + + # Create files list + # + # (Duplicated command here, passing -newer with quotes would make find to not locate the marker) + # + if [ $use_marker -eq 0 ] ; then +# newer_opt="" +# newer_marker="" + images_list="$(find "$folder" -maxdepth 1 -type f -iregex '.*\.\(jpe?g\|png\)')" + + else +# newer_opt="-newer" +# newer_marker="\"$folder/$lastrun_marker\"" + images_list="$(find "$folder" -maxdepth 1 -type f -newer "$folder/$lastrun_marker" -iregex '.*\.\(jpe?g\|png\)')" + fi + + [ "$verbose" -eq 1 ] && echo -e "${BLUE}command : ${GREEN}find $folder -maxdepth 1 -type f $newer_opt $newer_marker -iregex \'.*\.\(jpe?g\|png\)\'${COLOR_RESET}" +# images_list="$(find "$folder" -maxdepth 1 -type f $newer_opt $newer_marker -iregex '.*\.\(jpe?g\|png\)')" + + + images_total=$(echo "$images_list" | wc -l) + images_count=0 + + + + + ### FILE COMPRESSION ### + [ "$verbose" -eq 1 ] && echo ; echo -e "${BLUE}Converting files...${COLOR_RESET}" + [ "$verbose" -eq 1 ] && echo -e "${BLUE}command : ${GREEN}$convert $convert_args ${COLOR_RESET}" + + if [ -z "$images_list" ] ; then # list is empty + echo -e "${BLUE}No new files to compress in this folder${COLOR_RESET}" + else + + # Initialization of folder statistics + folder_before=0 + folder_after=0 + + while IFS="" read -r file ; do + + images_count=$(($images_count+1)) + + + # Stats before + size_before=$(stat -c%s "$file") + if [ "$size_before" -eq 0 ] ; then + image_error "File $file has a size of 0" ; continue + fi + + # Display count as output prefix + [ "$verbose" -eq 1 ] && echo -ne "${BLUE}folder $subfolders_count/$subfolders_total ; image $images_count/$images_total)${COLOR_RESET} " + + file_type="$(file -b "$file" | awk '{print $1}')" + if [ $file_type == "very" ] ; then file_type="very small or empty" ; fi + + case $file_type in + [jJ][pP][gG] | [jJ][pP][eE][gG]) + convert_args="$convert_args_jpg" ;; + [pP][nN][gG]) + convert_args="$convert_args_png" ;; + *) + image_error "File $file has an unexpected filetype : $file_type" ; continue ;; + esac + + $convert $convert_args "$file" "$file.compressed" 2>>$report + + if [ ! $? -eq 0 ] ; then + image_error "Compression failed for $file" ; continue + else + + # Stats + size_after=$(stat -c%s "$file.compressed") + variation=$(awk -v after="$size_after" -v before="$size_before" 'BEGIN {print int(((after*100)/before)-100)}') + folder_before=$(( $folder_before + $size_before )) + total_before=$(( $total_before + $size_before )) + + if [ "$variation" -ge 0 ] ; then + # No improvement, do not keep. + rm -f "$file.compressed" + if [ $force -eq 0 ] ; then rm -f "$file.$original_ext" ; fi + + # Stats update + folder_after=$(( $folder_after + $size_before )) + total_after=$(( $total_after + $size_before )) + + [ "$verbose" -eq 1 ] && echo -e "File ${YELLOW}$file${COLOR_RESET} would increase by $variation %, file is ${BLUE}not modified${COLOR_RESET}" + else + + # Create / overwrite backup (unless force used && backup already present) + if [ $backup -eq 1 ] ; then + if [ $force -eq 0 ] || [[ $force -eq 1 && ! -f "$file.$original_ext" ]] ; then + cp -a "$file" "$file.$original_ext" + fi + fi + + # Replace + mv "$file.compressed" "$file" + + # Stats update + folder_after=$(( $folder_after + $size_after )) + total_after=$(( $total_after + $size_after )) + + [ "$verbose" -eq 1 ] && echo -e "File ${YELLOW}$file${COLOR_RESET} ${GREEN}decreased${COLOR_RESET} from $(bytes_to_human $size_before) to $(bytes_to_human $size_after), by ${GREEN}$variation %${COLOR_RESET}" + fi + fi + done <<< "$images_list" + + echo -e "${BLUE}Compression ${BRIGHTGREEN}OK${COLOR_RESET}" + + # Stats for this folder + echo + [ "$verbose" -eq 1 ] && echo -e "${BLUE}Folder${COLOR_RESET} ${YELLOW}$folder ${BLUE}complete.${COLOR_RESET}" + echo -e "${BLUE}Initial size${COLOR_RESET} : $(bytes_to_human $folder_before)" + echo -e "${BLUE}Compressed size${COLOR_RESET} : $(bytes_to_human $folder_after)" + folder_variation=$(awk -v after="$folder_after" -v before="$folder_before" 'BEGIN {print int(((after*100)/before)-100)}') + if [ $folder_after -gt $folder_before ] ; then + echo -e "${BLUE}Folder compression gain : ${RED}$folder_variation %${COLOR_RESET}" + else + echo -e "${BLUE}Folder compression gain : ${GREEN}$folder_variation %${COLOR_RESET}" + fi + + # Test success with exit code ? + [ "$verbose" -eq 1 ] && echo -e "${BLUE}Success, generating compression marker...${COLOR_RESET}" + echo "Success" > "$folder/$lastrun_marker" + fi + +done <<< "$subfolders_list" + + +# Total stats +echo ; echo ; echo ; echo -e "${BRIGHTBLUE}*** Compression complete ! ***${COLOR_RESET}" + +if [ $total_before -eq 0 ] ; then + echo -e "${BLUE}No file were modified.${COLOR_RESET}" + log_message "info" "No file were modified." +else + echo -e "${BLUE}Total initial size :${COLOR_RESET} $(bytes_to_human $total_before)" + log_message "info" "Total initial size : $(bytes_to_human $total_before)" + echo -e "${BLUE}Total compressed size :${COLOR_RESET} $(bytes_to_human $total_after)" + log_message "info" "Total compressed size : $(bytes_to_human $total_after)" + total_variation=$(awk -v after="$total_after" -v before="$total_before" 'BEGIN {print int(((after*100)/before)-100)}') + if [ $total_after -gt $total_before ] ; then + echo -e "${BLUE}Total compression gain : ${RED}$total_variation %${COLOR_RESET}" + else + echo -e "${BLUE}Total compression gain : ${GREEN}$total_variation %${COLOR_RESET}" + fi + log_message "info" "Total compression gain : $total_variation %" +fi + +if [ $error_count -gt 0 ] ; then + echo -e "${BLUE}$error_count errors during execution, check $report for details${COLOR_RESET}" + log_message "info" "$error_count errors during execution, check $report for details" +fi +echo + +echo +echo -e "${BLUE}Script ended at : $(date)${COLOR_RESET}" +log_message "info" "Script ended." +echo + diff --git a/stack/1001pharmadev/.env.dist b/stack/1001pharmadev/.env.dist new file mode 100644 index 0000000..f307cc5 --- /dev/null +++ b/stack/1001pharmadev/.env.dist @@ -0,0 +1,4 @@ +MAILCATCHER_SERVICE_1080_TAGS=urlprefix-mailcatcher.${APP_DOMAIN}/ +TOGGLE_API_SERVICE_80_TAGS=urlprefix-toggle-api.${APP_DOMAIN}/ +TOGGLE_UI_SERVICE_80_TAGS=urlprefix-toggle-ui.${APP_DOMAIN}/ +TOGGLE__API_BASE_URL=http://toggle-api.${APP_DOMAIN} diff --git a/stack/1001pharmadev/mailcatcher.yml b/stack/1001pharmadev/mailcatcher.yml new file mode 100644 index 0000000..8cf7db9 --- /dev/null +++ b/stack/1001pharmadev/mailcatcher.yml @@ -0,0 +1,26 @@ +version: '3.6' + +services: + mailcatcher: + image: 1001pharmadev/mailcatcher:latest + labels: + - SERVICE_1025_CHECK_TCP=true + - SERVICE_1025_NAME=${COMPOSE_SERVICE_NAME}-mailcatcher-1025 + - SERVICE_1080_CHECK_HTTP=/ + - SERVICE_1080_NAME=${COMPOSE_SERVICE_NAME}-mailcatcher-1080 + - SERVICE_1080_TAGS=${MAILCATCHER_SERVICE_1080_TAGS} + networks: + - private + - public + ports: + - 1025 + - 1080 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/1001pharmadev/toggle.yml b/stack/1001pharmadev/toggle.yml new file mode 100644 index 0000000..b86ce06 --- /dev/null +++ b/stack/1001pharmadev/toggle.yml @@ -0,0 +1,43 @@ +version: '3.6' + +services: + toggle-api: + image: 1001pharmadev/qandidate-toggle-api:latest + depends_on: + - redis + environment: + - TOGGLE__REDIS_DSN=tcp://redis:6379 + labels: + - SERVICE_80_CHECK_HTTP=/toggles + - SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-toggle-api-80 + - SERVICE_80_TAGS=${TOGGLE_API_SERVICE_80_TAGS} + networks: + - private + - public + ports: + - 80 + restart: always + toggle-ui: + image: 1001pharmadev/qandidate-toggle-ui:latest + depends_on: + - toggle-api + environment: + - TOGGLE__API_BASE_URL=${TOGGLE__API_BASE_URL} + labels: + - SERVICE_80_CHECK_HTTP=/ + - SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-toggle-ui-80 + - SERVICE_80_TAGS=${TOGGLE_UI_SERVICE_80_TAGS} + networks: + - private + - public + ports: + - 80 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/alpine/.env.dist b/stack/alpine/.env.dist new file mode 100644 index 0000000..b4289aa --- /dev/null +++ b/stack/alpine/.env.dist @@ -0,0 +1 @@ +SYSCTL_CONFIG=vm.max_map_count=262144 vm.overcommit_memory=1 fs.file-max=8388608 net.core.somaxconn=1024 diff --git a/stack/alpine/sysctl.yml b/stack/alpine/sysctl.yml new file mode 100644 index 0000000..e8949f4 --- /dev/null +++ b/stack/alpine/sysctl.yml @@ -0,0 +1,14 @@ +version: '3.6' + +services: + sysctl: + command: sh -c 'sysctl -q -w ${SYSCTL_CONFIG} ||:' + image: alpine:latest + networks: + - private + privileged: true + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/base.mk b/stack/base.mk new file mode 100644 index 0000000..5868e64 --- /dev/null +++ b/stack/base.mk @@ -0,0 +1,19 @@ +.PHONY: base +base: docker-network-create stack-base-up base-ssh-add + +.PHONY: ssh-add +ssh-add: base-ssh-add + +.PHONY: base-ssh-add +base-ssh-add: base-ssh-key + $(eval SSH_PRIVATE_KEYS := $(foreach file,$(SSH_DIR)/id_rsa $(filter-out $(wildcard $(SSH_DIR)/id_rsa),$(wildcard $(SSH_DIR)/*)),$(if $(shell grep "PRIVATE KEY" $(file) 2>/dev/null),$(notdir $(file))))) + $(call docker-run,$(DOCKER_SSH_AUTH) $(DOCKER_IMAGE_CLI),sh -c "$(foreach file,$(patsubst %,$(SSH_DIR)/%,$(SSH_PRIVATE_KEYS)),ssh-add -l |grep -qw $$(ssh-keygen -lf $(file) 2>/dev/null |awk '{print $$2}') 2>/dev/null || ssh-add $(file) ||: &&) true") + +.PHONY: base-ssh-key +base-ssh-key: stack-base-up +ifneq (,$(filter true,$(DRONE))) + $(call exec,[ ! -d $(SSH_DIR) ] && mkdir -p $(SSH_DIR) && chown $(UID) $(SSH_DIR) && chmod 0700 $(SSH_DIR) ||:) +else + $(eval DOCKER_RUN_VOLUME += -v $(SSH_DIR):$(SSH_DIR)) +endif + $(if $(SSH_KEY),$(eval export SSH_KEY ?= $(SSH_KEY)) $(call docker-run,$(DOCKER_IMAGE_CLI),echo -e "$$SSH_KEY" > $(SSH_DIR)/${COMPOSE_PROJECT_NAME}_id_rsa && chmod 0400 $(SSH_DIR)/${COMPOSE_PROJECT_NAME}_id_rsa && chown $(UID) $(SSH_DIR)/${COMPOSE_PROJECT_NAME}_id_rsa ||:)) diff --git a/stack/base/base.yml b/stack/base/base.yml new file mode 100644 index 0000000..d044aab --- /dev/null +++ b/stack/base/base.yml @@ -0,0 +1,61 @@ +version: '3.6' + +services: + cli: + build: + args: + - DOCKER_BUILD_DIR=docker/cli + - GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME} + - GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL} + - SSH_BASTION_USERNAME=${SSH_BASTION_USERNAME} + - SSH_BASTION_HOSTNAME=${SSH_BASTION_HOSTNAME} + - SSH_PUBLIC_HOST_KEYS=${SSH_PUBLIC_HOST_KEYS} + - SSH_PRIVATE_IP_RANGE=${SSH_PRIVATE_IP_RANGE} + - UID=${UID} + - USER=${USER} + context: ../.. + dockerfile: docker/cli/Dockerfile + target: ${DOCKER_BUILD_TARGET} + command: tail -f /dev/null + container_name: ${DOCKER_NAME_CLI} + depends_on: + - ssh + environment: + - SSH_AUTH_SOCK=/tmp/ssh-agent/socket + image: ${DOCKER_IMAGE_CLI}:${DOCKER_IMAGE_TAG} + networks: + - private + restart: always + volumes: + - ssh:/tmp/ssh-agent:ro + - ${MONOREPO_DIR}:${MONOREPO_DIR}:cached + working_dir: ${MONOREPO_DIR} + ssh: + build: + args: + - DOCKER_BUILD_DIR=docker/ssh + - UID=${UID} + - USER=${USER} + context: ../.. + dockerfile: docker/ssh/Dockerfile + target: ${DOCKER_BUILD_TARGET} + container_name: ${DOCKER_NAME_SSH} + image: ${DOCKER_IMAGE_SSH}:${DOCKER_IMAGE_TAG} + networks: + - private + restart: always + volumes: + - ssh:/tmp/ssh-agent + +volumes: + ssh: + driver: local + driver_opts: + type: tmpfs + device: tmpfs + o: uid=${UID} + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/develop.mk b/stack/develop.mk new file mode 100644 index 0000000..dd2236d --- /dev/null +++ b/stack/develop.mk @@ -0,0 +1 @@ +develop ?= redmine diff --git a/stack/drone.mk b/stack/drone.mk new file mode 100644 index 0000000..36229e1 --- /dev/null +++ b/stack/drone.mk @@ -0,0 +1 @@ +drone ?= drone/drone drone/drone-runner-docker drone/gc diff --git a/stack/drone/.env.dist b/stack/drone/.env.dist new file mode 100644 index 0000000..e1e740f --- /dev/null +++ b/stack/drone/.env.dist @@ -0,0 +1,9 @@ +DRONE_GITHUB_CLIENT_ID=github_client_id +DRONE_GITHUB_CLIENT_SECRET=github_client_secret +DRONE_RPC_SECRET=drone_rpc_secret +DRONE_RUNNER_CAPACITY=1 +DRONE_SERVER_HOST=drone.${APP_DOMAIN} +DRONE_SERVER_PROTO=http +DRONE_SERVER_SERVICE_80_TAGS=urlprefix-${DRONE_SERVER_HOST}/ +DRONE_USER_CREATE=username:gitaccount,admin:true +DRONE_USER_FILTER=gitaccount diff --git a/stack/drone/drone-runner-docker.1.1.yml b/stack/drone/drone-runner-docker.1.1.yml new file mode 100644 index 0000000..6755427 --- /dev/null +++ b/stack/drone/drone-runner-docker.1.1.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone-runner-docker: + image: drone/drone-runner-docker:1.1 diff --git a/stack/drone/drone-runner-docker.1.2.yml b/stack/drone/drone-runner-docker.1.2.yml new file mode 100644 index 0000000..b4e86a0 --- /dev/null +++ b/stack/drone/drone-runner-docker.1.2.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone-runner-docker: + image: drone/drone-runner-docker:1.2 diff --git a/stack/drone/drone-runner-docker.1.3.yml b/stack/drone/drone-runner-docker.1.3.yml new file mode 100644 index 0000000..042812a --- /dev/null +++ b/stack/drone/drone-runner-docker.1.3.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone-runner-docker: + image: drone/drone-runner-docker:1.3 diff --git a/stack/drone/drone-runner-docker.debug.yml b/stack/drone/drone-runner-docker.debug.yml new file mode 100644 index 0000000..a68043e --- /dev/null +++ b/stack/drone/drone-runner-docker.debug.yml @@ -0,0 +1,7 @@ +version: '3.6' + +services: + drone-runner-docker: + environment: + - DRONE_DEBUG=true + - DRONE_TRACE=true diff --git a/stack/drone/drone-runner-docker.latest.yml b/stack/drone/drone-runner-docker.latest.yml new file mode 100644 index 0000000..0a129a9 --- /dev/null +++ b/stack/drone/drone-runner-docker.latest.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone-runner-docker: + image: drone/drone-runner-docker:latest diff --git a/stack/drone/drone-runner-docker.yml b/stack/drone/drone-runner-docker.yml new file mode 100644 index 0000000..a1f8c91 --- /dev/null +++ b/stack/drone/drone-runner-docker.yml @@ -0,0 +1,24 @@ +version: '3.6' + +services: + drone-runner-docker: + depends_on: + - drone + environment: + - DRONE_RPC_SECRET=${DRONE_RPC_SECRET} + - DRONE_RPC_HOST=drone + - DRONE_RPC_PROTO=http + - DRONE_RUNNER_CAPACITY=${DRONE_RUNNER_CAPACITY} + - DRONE_RUNNER_NAME=${HOSTNAME} + labels: + - SERVICE_3000_IGNORE=true + networks: + - private + restart: always + volumes: + - /var/run/docker.sock:/var/run/docker.sock + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/drone/drone.1.6.yml b/stack/drone/drone.1.6.yml new file mode 100644 index 0000000..6a53c0e --- /dev/null +++ b/stack/drone/drone.1.6.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone: + image: drone/drone:1.6 diff --git a/stack/drone/drone.1.7.yml b/stack/drone/drone.1.7.yml new file mode 100644 index 0000000..002f260 --- /dev/null +++ b/stack/drone/drone.1.7.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone: + image: drone/drone:1.7 diff --git a/stack/drone/drone.debug b/stack/drone/drone.debug new file mode 100644 index 0000000..b9b7616 --- /dev/null +++ b/stack/drone/drone.debug @@ -0,0 +1,7 @@ +version: '3.6' + +services: + drone: + environment: + - DRONE_LOGS_DEBUG=true + - DRONE_LOGS_TRACE=true diff --git a/stack/drone/drone.latest.yml b/stack/drone/drone.latest.yml new file mode 100644 index 0000000..95c9dc4 --- /dev/null +++ b/stack/drone/drone.latest.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone: + image: drone/drone:latest diff --git a/stack/drone/drone.yml b/stack/drone/drone.yml new file mode 100644 index 0000000..29b9202 --- /dev/null +++ b/stack/drone/drone.yml @@ -0,0 +1,44 @@ +version: '3.6' + +services: + drone: + environment: + - DRONE_GIT_ALWAYS_AUTH=false + - DRONE_GITHUB_SERVER=https://github.com + - DRONE_GITHUB_CLIENT_ID=${DRONE_GITHUB_CLIENT_ID} + - DRONE_GITHUB_CLIENT_SECRET=${DRONE_GITHUB_CLIENT_SECRET} + - DRONE_LOGS_COLOR=true + - DRONE_LOGS_PRETTY=true + - DRONE_PROMETHEUS_ANONYMOUS_ACCESS=true + - DRONE_RPC_SECRET=${DRONE_RPC_SECRET} + - DRONE_SERVER_HOST=${DRONE_SERVER_HOST} + - DRONE_SERVER_PROTO=${DRONE_SERVER_PROTO} + - DRONE_TLS_AUTOCERT=true + - DRONE_USER_CREATE=${DRONE_USER_CREATE} + - DRONE_USER_FILTER=${DRONE_USER_FILTER} + labels: + - SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-drone-80 + - SERVICE_80_CHECK_TCP=true + - SERVICE_80_CHECK_INITIAL_STATUS=passing + - SERVICE_80_TAGS=${DRONE_SERVER_SERVICE_80_TAGS} + - SERVICE_443_IGNORE=true + networks: + - private + - public + ports: + - 80 + - 443 + restart: always + volumes: + - drone:/data + +volumes: + drone: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/drone/gc.1.0.yml b/stack/drone/gc.1.0.yml new file mode 100644 index 0000000..b9749c9 --- /dev/null +++ b/stack/drone/gc.1.0.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone-gc: + image: drone/gc:1.0 diff --git a/stack/drone/gc.debug.yml b/stack/drone/gc.debug.yml new file mode 100644 index 0000000..e156096 --- /dev/null +++ b/stack/drone/gc.debug.yml @@ -0,0 +1,8 @@ +version: '3.6' + +services: + drone-gc: + environment: + - GC_DEBUG=true + - GC_DEBUG_COLOR=true + - GC_DEBUG_PRETTY=true diff --git a/stack/drone/gc.latest.yml b/stack/drone/gc.latest.yml new file mode 100644 index 0000000..b9b6c67 --- /dev/null +++ b/stack/drone/gc.latest.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + drone-gc: + image: drone/gc:latest diff --git a/stack/drone/gc.yml b/stack/drone/gc.yml new file mode 100644 index 0000000..2d13411 --- /dev/null +++ b/stack/drone/gc.yml @@ -0,0 +1,18 @@ +version: '3.6' + +services: + drone-gc: + image: drone/gc:latest + environment: + - GC_CACHE=20gb + - GC_INTERVAL=5m + networks: + - private + restart: always + volumes: + - /var/run/docker.sock:/var/run/docker.sock + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/elastic.mk b/stack/elastic.mk new file mode 100644 index 0000000..891166c --- /dev/null +++ b/stack/elastic.mk @@ -0,0 +1 @@ +elastic ?= elastic/curator elastic/elasticsearch elastic/kibana alpine/sysctl diff --git a/stack/elastic/.env.dist b/stack/elastic/.env.dist new file mode 100644 index 0000000..389fb72 --- /dev/null +++ b/stack/elastic/.env.dist @@ -0,0 +1,11 @@ +APM_SERVER_SERVICE_8200_TAGS=urlprefix-apm.${APP_DOMAIN}/ +CURATOR_LOGFORMAT=default +CURATOR_LOGLEVEL=INFO +CURATOR_MASTER_ONLY=False +CURATOR_TIMEOUT=30 +CURATOR_USE_SSL=False +ELASTICSEARCH_HOST=elasticsearch +ELASTICSEARCH_PORT=9200 +ELASTICSEARCH_PROTOCOL=http +ELASTICSEARCH_SERVICE_9200_TAGS=urlprefix-elasticsearch.${APP_DOMAIN}/ +KIBANA_SERVICE_5601_TAGS=urlprefix-kibana.${APP_DOMAIN}/ diff --git a/stack/elastic/apm-server-oss.yml b/stack/elastic/apm-server-oss.yml new file mode 100644 index 0000000..18b1940 --- /dev/null +++ b/stack/elastic/apm-server-oss.yml @@ -0,0 +1,29 @@ +version: '3.6' + +services: + apm-server-oss: + build: + args: + - DOCKER_BUILD_DIR=docker/apm-server-oss + context: ../.. + dockerfile: docker/apm-server-oss/Dockerfile + image: ${DOCKER_REPOSITORY}/apm-server-oss:${DOCKER_IMAGE_TAG} + command: -c apm-server.yml --strict.perms=false -e -E output.elasticsearch.hosts=["${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}"] -E output.elasticsearch.protocol=${ELASTICSEARCH_PROTOCOL} -E output.elasticsearch.username=${ELASTICSEARCH_USERNAME} -E output.elasticsearch.password=${ELASTICSEARCH_PASSWORD} -E apm-server.register.ingest.pipeline.enabled=false + labels: + - SERVICE_8200_CHECK_HTTP=/ + - SERVICE_8200_NAME=${COMPOSE_SERVICE_NAME}-apm-server-oss-8200 + - SERVICE_8200_TAGS=${APM_SERVER_SERVICE_8200_TAGS} + networks: + - private + - public + ports: + - 8200 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/elastic/apm-server.yml b/stack/elastic/apm-server.yml new file mode 100644 index 0000000..0e374ac --- /dev/null +++ b/stack/elastic/apm-server.yml @@ -0,0 +1,27 @@ +version: '3.6' + +services: + apm-server: + image: docker.elastic.co/apm/apm-server:7.4.2 + command: -c apm-server.yml --strict.perms=false -e -E output.elasticsearch.hosts=["${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}"] -E output.elasticsearch.protocol=${ELASTICSEARCH_PROTOCOL} -E output.elasticsearch.username=${ELASTICSEARCH_USERNAME} -E output.elasticsearch.password=${ELASTICSEARCH_PASSWORD} + labels: + - SERVICE_8200_CHECK_HTTP=/ + - SERVICE_8200_NAME=${COMPOSE_SERVICE_NAME}-apm-server-8200 + - SERVICE_8200_TAGS=${APM_SERVER_SERVICE_8200_TAGS} + networks: + private: + aliases: + - apm.${DOCKER_NETWORK_PRIVATE} + - apm.elastic.${DOCKER_NETWORK_PRIVATE} + public: + ports: + - 8200 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/elastic/curator.local.yml b/stack/elastic/curator.local.yml new file mode 100644 index 0000000..26eac5c --- /dev/null +++ b/stack/elastic/curator.local.yml @@ -0,0 +1,6 @@ +version: '3.6' + +services: + curator: + depends_on: + - elasticsearch diff --git a/stack/elastic/curator.yml b/stack/elastic/curator.yml new file mode 100644 index 0000000..9e48101 --- /dev/null +++ b/stack/elastic/curator.yml @@ -0,0 +1,27 @@ +version: '3.6' + +services: + curator: + build: + args: + - DOCKER_BUILD_DIR=docker/elastic/curator + context: ../.. + dockerfile: docker/elastic/curator/Dockerfile + target: ${DOCKER_BUILD_TARGET} + environment: + - DEPLOY=${DEPLOY} + - HOSTS=${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST} + - LOGFORMAT=${CURATOR_LOGFORMAT} + - LOGLEVEL=${CURATOR_LOGLEVEL} + - MASTER_ONLY=${CURATOR_MASTER_ONLY} + - PORT=${ELASTICSEARCH_PORT} + - TIMEOUT=${CURATOR_TIMEOUT} + - USE_SSL=${CURATOR_USE_SSL} + networks: + - private + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/elastic/elasticsearch.5.3.yml b/stack/elastic/elasticsearch.5.3.yml new file mode 100644 index 0000000..c549f78 --- /dev/null +++ b/stack/elastic/elasticsearch.5.3.yml @@ -0,0 +1,7 @@ +version: '3.6' + +services: + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:5.3.3 + environment: + - discovery.zen.minimum_master_nodes=1 diff --git a/stack/elastic/elasticsearch.7.4.yml b/stack/elastic/elasticsearch.7.4.yml new file mode 100644 index 0000000..d5a6455 --- /dev/null +++ b/stack/elastic/elasticsearch.7.4.yml @@ -0,0 +1,7 @@ +version: '3.6' + +services: + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2 + environment: + - node.data=true diff --git a/stack/elastic/elasticsearch.latest.yml b/stack/elastic/elasticsearch.latest.yml new file mode 100644 index 0000000..a934d65 --- /dev/null +++ b/stack/elastic/elasticsearch.latest.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2 diff --git a/stack/elastic/elasticsearch.local.7.4.yml b/stack/elastic/elasticsearch.local.7.4.yml new file mode 100644 index 0000000..77f3c6b --- /dev/null +++ b/stack/elastic/elasticsearch.local.7.4.yml @@ -0,0 +1,7 @@ +version: '3.6' + +services: + elasticsearch: + environment: + - discovery.type=single-node + diff --git a/stack/elastic/elasticsearch.yml b/stack/elastic/elasticsearch.yml new file mode 100644 index 0000000..8771cb7 --- /dev/null +++ b/stack/elastic/elasticsearch.yml @@ -0,0 +1,49 @@ +version: '3.6' + +services: + elasticsearch: + depends_on: + - sysctl + environment: + - ES_JAVA_OPTS=-Xmx1024m -Xms1024m + - xpack.security.enabled=false + - xpack.monitoring.enabled=false + - xpack.graph.enabled=false + - xpack.watcher.enabled=false + - cluster.name=elasticsearch-${ENV} + - network.host=0.0.0.0 + - http.cors.enabled=true + - http.cors.allow-credentials=true + - http.cors.allow-methods=OPTIONS,HEAD,GET,POST,PUT,DELETE + - http.cors.max-age=0 + - http.cors.allow-origin=* + - http.cors.allow-headers=X-Requested-With,X-Auth-Token,Content-Type,Content-Length + labels: + - SERVICE_9200_CHECK_HTTP=/ + - SERVICE_9200_NAME=${COMPOSE_SERVICE_NAME}-elasticsearch-9200 + - SERVICE_9200_TAGS=${ELASTICSEARCH_SERVICE_9200_TAGS} + - SERVICE_9300_CHECK_TCP=true + - SERVICE_9300_NAME=${COMPOSE_SERVICE_NAME}-elasticsearch-9300 + networks: + - private + - public + ports: + - 9200 + - 9300 + ulimits: + nofile: + soft: 65536 + hard: 65536 + volumes: + - elasticsearch:/usr/share/elasticsearch/data + restart: always +volumes: + elasticsearch: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/elastic/kibana-oss.7.4.yml b/stack/elastic/kibana-oss.7.4.yml new file mode 100644 index 0000000..5c64b56 --- /dev/null +++ b/stack/elastic/kibana-oss.7.4.yml @@ -0,0 +1,9 @@ +version: '3.6' + +services: + kibana-oss: + image: docker.elastic.co/kibana/kibana-oss:7.4.2 + environment: + - ELASTICSEARCH_HOSTS="${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}" + - KIBANA_INDEX=.kibana-oss.${ENV} + - SERVER_NAME=kibana.${APP_DOMAIN} diff --git a/stack/elastic/kibana-oss.latest.yml b/stack/elastic/kibana-oss.latest.yml new file mode 100644 index 0000000..6e82429 --- /dev/null +++ b/stack/elastic/kibana-oss.latest.yml @@ -0,0 +1,9 @@ +version: '3.6' + +services: + kibana-oss: + image: docker.elastic.co/kibana/kibana-oss:7.7.1 + environment: + - ELASTICSEARCH_HOSTS="${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}" + - KIBANA_INDEX=.kibana-oss.${ENV} + - SERVER_NAME=kibana.${APP_DOMAIN} diff --git a/stack/elastic/kibana-oss.local.yml b/stack/elastic/kibana-oss.local.yml new file mode 100644 index 0000000..050cb29 --- /dev/null +++ b/stack/elastic/kibana-oss.local.yml @@ -0,0 +1,6 @@ +version: '3.6' + +services: + kibana-oss: + depends_on: + - elasticsearch diff --git a/stack/elastic/kibana-oss.yml b/stack/elastic/kibana-oss.yml new file mode 100644 index 0000000..f29f32e --- /dev/null +++ b/stack/elastic/kibana-oss.yml @@ -0,0 +1,22 @@ +version: '3.6' + +services: + kibana-oss: + labels: + - SERVICE_5601_CHECK_HTTP=/app/kibana + - SERVICE_5601_NAME=${COMPOSE_SERVICE_NAME}-kibana-oss-5601 + - SERVICE_5601_TAGS=${KIBANA_SERVICE_5601_TAGS} + networks: + - private + - public + ports: + - 5601 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/elastic/kibana.5.3.yml b/stack/elastic/kibana.5.3.yml new file mode 100644 index 0000000..b502e6b --- /dev/null +++ b/stack/elastic/kibana.5.3.yml @@ -0,0 +1,7 @@ +version: '3.6' + +services: + kibana: + image: docker.elastic.co/kibana/kibana:5.3.3 + environment: + - ELASTICSEARCH_URL="${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}" diff --git a/stack/elastic/kibana.7.4.yml b/stack/elastic/kibana.7.4.yml new file mode 100644 index 0000000..7bfc310 --- /dev/null +++ b/stack/elastic/kibana.7.4.yml @@ -0,0 +1,9 @@ +version: '3.6' + +services: + kibana: + image: docker.elastic.co/kibana/kibana:7.4.2 + environment: + - ELASTICSEARCH_HOSTS="${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}" + - KIBANA_INDEX=.kibana.${ENV} + - SERVER_NAME=kibana.${APP_DOMAIN} diff --git a/stack/elastic/kibana.latest.yml b/stack/elastic/kibana.latest.yml new file mode 100644 index 0000000..a3c4a34 --- /dev/null +++ b/stack/elastic/kibana.latest.yml @@ -0,0 +1,9 @@ +version: '3.6' + +services: + kibana: + image: docker.elastic.co/kibana/kibana:7.7.1 + environment: + - ELASTICSEARCH_HOSTS="${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}" + - KIBANA_INDEX=.kibana.${ENV} + - SERVER_NAME=kibana.${APP_DOMAIN} diff --git a/stack/elastic/kibana.local.yml b/stack/elastic/kibana.local.yml new file mode 100644 index 0000000..4580959 --- /dev/null +++ b/stack/elastic/kibana.local.yml @@ -0,0 +1,6 @@ +version: '3.6' + +services: + kibana: + depends_on: + - elasticsearch diff --git a/stack/elastic/kibana.yml b/stack/elastic/kibana.yml new file mode 100644 index 0000000..a2ec2f9 --- /dev/null +++ b/stack/elastic/kibana.yml @@ -0,0 +1,22 @@ +version: '3.6' + +services: + kibana: + labels: + - SERVICE_5601_CHECK_HTTP=/app/kibana + - SERVICE_5601_NAME=${COMPOSE_SERVICE_NAME}-kibana-5601 + - SERVICE_5601_TAGS=${KIBANA_SERVICE_5601_TAGS} + networks: + - private + - public + ports: + - 5601 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/grafana/.env.dist b/stack/grafana/.env.dist new file mode 100644 index 0000000..34d9f24 --- /dev/null +++ b/stack/grafana/.env.dist @@ -0,0 +1,6 @@ +GRAFANA_AWS_ACCESS_KEY=${AWS_ACCESS_KEY_ID} +GRAFANA_AWS_SECRET_KEY=${AWS_SECRET_ACCESS_KEY} +GRAFANA_MYSQL_DB=grafana +GRAFANA_MYSQL_PASSWORD=grafana +GRAFANA_MYSQL_USER=grafana +GRAFANA_SERVICE_3000_TAGS=urlprefix-grafana.${APP_DOMAIN}/ diff --git a/stack/grafana/grafana.yml b/stack/grafana/grafana.yml new file mode 100644 index 0000000..77bf9ee --- /dev/null +++ b/stack/grafana/grafana.yml @@ -0,0 +1,41 @@ +version: '3.6' + +services: + grafana: + build: + args: + - AWS_ACCESS_KEY=${GRAFANA_AWS_ACCESS_KEY} + - AWS_SECRET_KEY=${GRAFANA_AWS_SECRET_KEY} + - DOCKER_BUILD_DIR=docker/grafana + - MYSQL_GRAFANA_DB=${GRAFANA_MYSQL_DB} + - MYSQL_GRAFANA_PASSWORD=${GRAFANA_MYSQL_PASSWORD} + - MYSQL_GRAFANA_USER=${GRAFANA_MYSQL_USER} + context: ../.. + dockerfile: docker/grafana/Dockerfile + environment: + - GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource + image: ${DOCKER_REPOSITORY}/grafana:${DOCKER_IMAGE_TAG} + labels: + - SERVICE_3000_NAME=${COMPOSE_SERVICE_NAME}-grafana-3000 + - SERVICE_3000_CHECK_TCP=true + - SERVICE_3000_CHECK_INITIAL_STATUS=passing + - SERVICE_3000_TAGS=${GRAFANA_SERVICE_3000_TAGS} + networks: + - private + - public + ports: + - 3000 + restart: always + volumes: + - grafana:/var/lib/grafana + +volumes: + grafana: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/logs.mk b/stack/logs.mk new file mode 100644 index 0000000..46fa26d --- /dev/null +++ b/stack/logs.mk @@ -0,0 +1 @@ +logs ?= sematext/logagent diff --git a/stack/memcached/memcached.yml b/stack/memcached/memcached.yml new file mode 100644 index 0000000..850e4dc --- /dev/null +++ b/stack/memcached/memcached.yml @@ -0,0 +1,18 @@ +version: '3.6' + +services: + memcached: + image: memcached:alpine + labels: + - SERVICE_11211_CHECK_TCP=true + - SERVICE_11211_NAME=${COMPOSE_SERVICE_NAME}-memcached-11211 + networks: + - private + ports: + - 11211 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/monitoring.mk b/stack/monitoring.mk new file mode 100644 index 0000000..ddc3892 --- /dev/null +++ b/stack/monitoring.mk @@ -0,0 +1 @@ +monitoring ?= grafana prometheus/alertmanager prometheus/blackbox-exporter prometheus/es-exporter prometheus/prometheus diff --git a/stack/mysql/.env.dist b/stack/mysql/.env.dist new file mode 100644 index 0000000..4b04ea2 --- /dev/null +++ b/stack/mysql/.env.dist @@ -0,0 +1 @@ +MYSQL_ROOT_PASSWORD=root diff --git a/stack/mysql/mysql.5.6.yml b/stack/mysql/mysql.5.6.yml new file mode 100644 index 0000000..fe6bba9 --- /dev/null +++ b/stack/mysql/mysql.5.6.yml @@ -0,0 +1,10 @@ +version: '3.6' + +services: + mysql: + build: + args: + - DOCKER_BUILD_DIR=docker/mysql/5.6 + context: ../.. + dockerfile: docker/mysql/5.6/Dockerfile + image: ${DOCKER_REPOSITORY}/mysql:${DOCKER_IMAGE_TAG} diff --git a/stack/mysql/mysql.dev.yml b/stack/mysql/mysql.dev.yml new file mode 100644 index 0000000..3ec43b6 --- /dev/null +++ b/stack/mysql/mysql.dev.yml @@ -0,0 +1,6 @@ +version: '3.6' + +services: + mysql: + ports: + - 3303:3306 diff --git a/stack/mysql/mysql.latest.yml b/stack/mysql/mysql.latest.yml new file mode 100644 index 0000000..94f6a2b --- /dev/null +++ b/stack/mysql/mysql.latest.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + mysql: + image: mysql:latest diff --git a/stack/mysql/mysql.local.yml b/stack/mysql/mysql.local.yml new file mode 100644 index 0000000..288e361 --- /dev/null +++ b/stack/mysql/mysql.local.yml @@ -0,0 +1,6 @@ +version: '3.6' + +services: + mysql: + ports: + - 3306:3306 diff --git a/stack/mysql/mysql.yml b/stack/mysql/mysql.yml new file mode 100644 index 0000000..4012777 --- /dev/null +++ b/stack/mysql/mysql.yml @@ -0,0 +1,24 @@ +version: '3.6' + +services: + mysql: + environment: + - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} + labels: + - SERVICE_CHECK_SCRIPT=container-check-status $$SERVICE_IP + - SERVICE_3306_NAME=${COMPOSE_SERVICE_NAME}-mysql-3306 + networks: + - private + ports: + - 3306 + volumes: + - mysql:/var/lib/mysql + restart: always + +volumes: + mysql: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/newrelic.mk b/stack/newrelic.mk new file mode 100644 index 0000000..033ed4d --- /dev/null +++ b/stack/newrelic.mk @@ -0,0 +1 @@ +newrelic ?= newrelic/php-daemon diff --git a/stack/newrelic/php-daemon.yml b/stack/newrelic/php-daemon.yml new file mode 100644 index 0000000..f24dba6 --- /dev/null +++ b/stack/newrelic/php-daemon.yml @@ -0,0 +1,20 @@ +version: '3.6' + +services: + php-daemon: + image: newrelic/php-daemon:latest + # command: "/usr/bin/newrelic-daemon --loglevel debug" + labels: + - SERVICE_31339_NAME=${COMPOSE_SERVICE_NAME}-php-daemon-31339 + - SERVICE_31339_CHECK_TCP=true + - SERVICE_31339_CHECK_INITIAL_STATUS=passing + networks: + - private + ports: + - 31339 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/nginx/.env.dist b/stack/nginx/.env.dist new file mode 100644 index 0000000..5c74d96 --- /dev/null +++ b/stack/nginx/.env.dist @@ -0,0 +1 @@ +STATIC_SERVICE_80_TAGS=urlprefix-static.${APP_DOMAIN}/ diff --git a/stack/nginx/static.yml b/stack/nginx/static.yml new file mode 100644 index 0000000..ab539b8 --- /dev/null +++ b/stack/nginx/static.yml @@ -0,0 +1,30 @@ +version: '3.6' + +services: + static: + image: nginx:alpine + command: /bin/sh -c "grep autoindex /etc/nginx/conf.d/default.conf >/dev/null 2>&1 || sed -i 's|index index.html index.htm;|index index.html index.htm;\n autoindex on;|' /etc/nginx/conf.d/default.conf && nginx -g 'daemon off;'" + labels: + - SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-nginx-80 + - SERVICE_80_CHECK_TCP=true + - SERVICE_80_CHECK_INITIAL_STATUS=passing + - SERVICE_80_TAGS=${STATIC_SERVICE_80_TAGS} + networks: + - private + - public + ports: + - 80 + restart: always + volumes: + - static:/usr/share/nginx/html:ro + +volumes: + static: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/node.mk b/stack/node.mk new file mode 100644 index 0000000..c80c6ec --- /dev/null +++ b/stack/node.mk @@ -0,0 +1,9 @@ +.PHONY: node +node: docker-network-create-$(DOCKER_NETWORK_PUBLIC) node-openssl stack-node-up + +.PHONY: node-openssl +node-openssl: + docker run --rm --mount source=$(COMPOSE_PROJECT_NAME_INFRA_NODE)_ssl-certs,target=/certs alpine:latest [ -f /certs/$(SSL_HOSTNAME).crt.pem -a -f /certs/$(SSL_HOSTNAME).key.pem ] \ + || docker run --rm -e SSL_HOSTNAME=$(SSL_HOSTNAME) --mount source=$(COMPOSE_PROJECT_NAME_INFRA_NODE)_ssl-certs,target=/certs alpine:latest sh -c "apk --no-cache add openssl \ + && { [ -f /certs/${SSL_HOSTNAME}.key.pem ] || openssl genrsa -out /certs/${SSL_HOSTNAME}.key.pem 2048; } \ + && openssl req -key /certs/${SSL_HOSTNAME}.key.pem -out /certs/${SSL_HOSTNAME}.crt.pem -addext extendedKeyUsage=serverAuth -addext subjectAltName=DNS:${SSL_HOSTNAME} -subj \"/C=/ST=/L=/O=/CN=${SSL_HOSTNAME}\" -x509 -days 365" diff --git a/stack/node/.env.dist b/stack/node/.env.dist new file mode 100644 index 0000000..604255a --- /dev/null +++ b/stack/node/.env.dist @@ -0,0 +1,8 @@ +CONSUL_ACL_TOKENS_MASTER=01234567-89AB-CDEF-0123-456789ABCDEF +CONSUL_CONSUL_HTTP_TOKEN=01234567-89AB-CDEF-0123-456789ABCDEF +CONSUL_SERVICE_8500_TAGS=urlprefix-consul.${APP_DOMAIN}/ +FABIO_CONSUL_HTTP_TOKEN=01234567-89AB-CDEF-0123-456789ABCDEF +FABIO_SERVICE_9998_TAGS=urlprefix-fabio.${APP_DOMAIN}/ +PORTAINER_SERVICE_9000_TAGS=urlprefix-portainer.${APP_DOMAIN}/ +REGISTRATOR_CONSUL_HTTP_TOKEN=01234567-89AB-CDEF-0123-456789ABCDEF +SSL_HOSTNAME=${APP_DOMAIN} diff --git a/stack/node/exporter/.env.dist b/stack/node/exporter/.env.dist new file mode 100644 index 0000000..dd4273c --- /dev/null +++ b/stack/node/exporter/.env.dist @@ -0,0 +1,2 @@ +CADVISOR_EXPORTER_SERVICE_8080_TAGS=urlprefix-cadvisor-exporter.${APP_DOMAIN}/ +NODE_EXPORTER_SERVICE_9100_TAGS=urlprefix-node-exporter.${APP_DOMAIN}/ diff --git a/stack/node/exporter/exporter.yml b/stack/node/exporter/exporter.yml new file mode 100644 index 0000000..8319784 --- /dev/null +++ b/stack/node/exporter/exporter.yml @@ -0,0 +1,49 @@ +version: '3.6' + +services: + cadvisor-exporter: + image: google/cadvisor:latest + hostname: ${HOSTNAME} + labels: + - SERVICE_8080_NAME=${COMPOSE_SERVICE_NAME}-cadvisor-exporter-8080 + - SERVICE_8080_CHECK_TCP=true + - SERVICE_8080_CHECK_INITIAL_STATUS=passing + - SERVICE_8080_TAGS=${CADVISOR_SERVICE_EXPORTER_8080_TAGS} + - SERVICE_9200_IGNORE=true + networks: + - public + ports: + - 8080 + restart: always + volumes: + - /:/rootfs:ro + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + - /var/run:/var/run:rw + node-exporter: + command: + - --collector.filesystem.ignored-mount-points + - "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)" + - '--path.procfs=/host/proc' + - '--path.sysfs=/host/sys' + image: prom/node-exporter:latest + hostname: ${HOSTNAME} + labels: + - SERVICE_9100_NAME=${COMPOSE_SERVICE_NAME}-node-exporter-9100 + - SERVICE_9100_CHECK_TCP=true + - SERVICE_9100_CHECK_INITIAL_STATUS=passing + - SERVICE_9100_TAGS=${SERVICE_NODE_EXPORTER_HTTP_TAGS} + networks: + - public + ports: + - 9100 + restart: always + volumes: + - /:/rootfs:ro + - /proc:/host/proc:ro + - /sys:/host/sys:ro + +networks: + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/node/node.yml b/stack/node/node.yml new file mode 100644 index 0000000..4aaff56 --- /dev/null +++ b/stack/node/node.yml @@ -0,0 +1,110 @@ +version: '3.6' + +services: + consul: + build: + args: + - DOCKER_BUILD_DIR=docker/consul + context: ../.. + dockerfile: docker/consul/Dockerfile + image: ${DOCKER_REPOSITORY_INFRA_NODE}/consul:${DOCKER_IMAGE_TAG} + environment: + CONSUL_BIND_INTERFACE: '${DOCKER_HOST_IFACE}' + CONSUL_CLIENT_INTERFACE: '${DOCKER_HOST_IFACE}' + CONSUL_HTTP_TOKEN: '${CONSUL_CONSUL_HTTP_TOKEN}' + CONSUL_LOCAL_CONFIG: '{ "log_level": "warn" + , "enable_script_checks": true + , "acl": { "enabled": true + , "default_policy": "deny" + , "enable_token_persistence": true + , "tokens": { "master": "$CONSUL_ACL_TOKENS_MASTER" + , "agent": "$CONSUL_CONSUL_HTTP_TOKEN" + } + } + }' + hostname: ${HOSTNAME} + labels: + - SERVICE_8300_IGNORE=true + - SERVICE_8301_IGNORE=true + - SERVICE_8302_IGNORE=true + - SERVICE_8500_CHECK_HTTP=/ui + - SERVICE_8500_NAME=${COMPOSE_SERVICE_NAME}-consul-8500 + - SERVICE_8500_TAGS=${CONSUL_SERVICE_8500_TAGS} + - SERVICE_8600_IGNORE=true + - SERVICE_ADDRESS=${DOCKER_HOST_INET} + network_mode: host + restart: always + volumes: + - consul:/consul/data + - /var/run/docker.sock:/var/run/docker.sock + fabio: + command: -registry.backend "consul" -registry.consul.addr "consul:8500" -registry.consul.token "$FABIO_CONSUL_HTTP_TOKEN" -proxy.addr ":80,:443;cs=local" -proxy.cs "cs=local;type=file;cert=/certs/${SSL_HOSTNAME}.crt.pem;key=/certs/${SSL_HOSTNAME}.key.pem" + depends_on: + - consul + extra_hosts: + - consul:${DOCKER_HOST_INET} + hostname: ${HOSTNAME} + image: fabiolb/fabio:latest + labels: + - SERVICE_80_CHECK_TCP=true + - SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-fabio-80 + - SERVICE_443_CHECK_TCP=true + - SERVICE_443_NAME=${COMPOSE_SERVICE_NAME}-fabio-443 + - SERVICE_9998_CHECK_TCP=true + - SERVICE_9998_NAME=${COMPOSE_SERVICE_NAME}-fabio-9998 + - SERVICE_9998_TAGS=${FABIO_SERVICE_9998_TAGS} + - SERVICE_9999_IGNORE=true + ports: + - 80:80 + - 443:443 + - 9998 + networks: + - public + restart: always + volumes: + - ssl-certs:/certs + portainer: + image: portainer/portainer:latest + labels: + - SERVICE_8000_IGNORE=true + - SERVICE_9000_CHECK_HTTP=/ + - SERVICE_9000_NAME=${COMPOSE_SERVICE_NAME}-portainer-9000 + - SERVICE_9000_TAGS=${PORTAINER_SERVICE_9000_TAGS} + networks: + - public + ports: + - 8000 + - 9000 + restart: always + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - portainer:/data + registrator: + build: + args: + - DOCKER_BUILD_DIR=docker/registrator + context: ../.. + dockerfile: docker/registrator/Dockerfile + command: -internal -cleanup -deregister always -resync=30 -useIpFromNetwork node -useIpFromLabel SERVICE_ADDRESS consul://consul:8500 + depends_on: + - consul + environment: + - CONSUL_HTTP_TOKEN=${REGISTRATOR_CONSUL_HTTP_TOKEN} + extra_hosts: + - consul:${DOCKER_HOST_INET} + hostname: ${HOSTNAME} + image: ${DOCKER_REPOSITORY_INFRA_NODE}/registrator:${DOCKER_IMAGE_TAG} + network_mode: host + restart: always + volumes: + - /var/run/docker.sock:/tmp/docker.sock + +volumes: + consul: + portainer: + ssl-certs: + +networks: + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/node/pdns/pdns-recursor.yml b/stack/node/pdns/pdns-recursor.yml new file mode 100644 index 0000000..9ac8ecc --- /dev/null +++ b/stack/node/pdns/pdns-recursor.yml @@ -0,0 +1,14 @@ +version: '3.6' + +services: + pdns-recursor: + build: + args: + - DOCKER_BUILD_DIR=docker/pdns-server + context: ../../.. + dockerfile: docker/pdns-server/Dockerfile + command: /usr/local/sbin/pdns_recursor --local-address='192.168.0.1:53' --allow-from='127.0.0.0/8, 192.168.1.0/24, 172.16.0.0/12' + image: ${DOCKER_REPOSITORY}/pdns-recursor:${DOCKER_IMAGE_TAG} + hostname: ${HOSTNAME} + network_mode: host + restart: always diff --git a/stack/node/vsftpd/.env.dist b/stack/node/vsftpd/.env.dist new file mode 100644 index 0000000..a8d2094 --- /dev/null +++ b/stack/node/vsftpd/.env.dist @@ -0,0 +1,3 @@ +VSFTPD_S3_AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} +VSFTPD_S3_AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} +VSFTPD_S3_FTPD_USERS=ftpuser::ftppass::ftpbucket diff --git a/stack/node/vsftpd/vsftpd-s3.yml b/stack/node/vsftpd/vsftpd-s3.yml new file mode 100644 index 0000000..a8afc5f --- /dev/null +++ b/stack/node/vsftpd/vsftpd-s3.yml @@ -0,0 +1,39 @@ +version: '3.6' + +services: + vsftpd-s3: + build: + args: + - DOCKER_BUILD_DIR=docker/vsftpd-s3 + context: ../../.. + dockerfile: docker/vsftpd-s3/Dockerfile + cap_add: + - sys_admin + devices: + - /dev/fuse + environment: + - AWS_ACCESS_KEY_ID=${VSFTPD_S3_AWS_ACCESS_KEY_ID} + - AWS_SECRET_ACCESS_KEY=${VSFTPD_S3_AWS_SECRET_ACCESS_KEY} + - DIR_REMOTE=${VSFTPD_S3_DIR_REMOTE} + - FTP_HOST=${VSFTPD_S3_FTP_HOST} + - FTP_PASS=${VSFTPD_S3_FTP_PASS} + - FTP_SYNC=${VSFTPD_S3_FTP_SYNC} + - FTP_USER=${VSFTPD_S3_FTP_USER} + - FTPD_USER=${VSFTPD_S3_FTPD_USER} + - FTPD_USERS=${VSFTPD_S3_FTPD_USERS} + - PASV_MAX_PORT=${VSFTPD_S3_PASV_MAX_PORT} + - PASV_MIN_PORT=${VSFTPD_S3_PASV_MIN_PORT} + image: ${DOCKER_REPOSITORY}/vsftpd-s3:${DOCKER_IMAGE_TAG} + labels: + - SERVICE_21_NAME=${COMPOSE_SERVICE_NAME}-vsftpd-s3-21 + - SERVICE_21_CHECK_TCP=true + - SERVICE_21_CHECK_INITIAL_STATUS=passing + - SERVICE_22_NAME=${COMPOSE_SERVICE_NAME}-vsftpd-s3-22 + - SERVICE_22_CHECK_TCP=true + - SERVICE_22_CHECK_INITIAL_STATUS=passing + - SERVICE_65000_IGNORE=true + hostname: ${HOSTNAME} + security_opt: + - apparmor:unconfined + network_mode: host + restart: always diff --git a/stack/postgres/.env.dist b/stack/postgres/.env.dist new file mode 100644 index 0000000..0636623 --- /dev/null +++ b/stack/postgres/.env.dist @@ -0,0 +1,3 @@ +POSTGRES_DB=postgres +POSTGRES_PASSWORD=postgres +POSTGRES_USER=postgres diff --git a/stack/postgres/postgres.9.6.yml b/stack/postgres/postgres.9.6.yml new file mode 100644 index 0000000..1e6c7e0 --- /dev/null +++ b/stack/postgres/postgres.9.6.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + postgres: + image: postgres:9.6-alpine diff --git a/stack/postgres/postgres.dev.yml b/stack/postgres/postgres.dev.yml new file mode 100644 index 0000000..26a04f1 --- /dev/null +++ b/stack/postgres/postgres.dev.yml @@ -0,0 +1,6 @@ +version: '3.6' + +services: + postgres: + ports: + - 5433:5432 diff --git a/stack/postgres/postgres.latest.yml b/stack/postgres/postgres.latest.yml new file mode 100644 index 0000000..b0a4759 --- /dev/null +++ b/stack/postgres/postgres.latest.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + postgres: + image: postgres:latest diff --git a/stack/postgres/postgres.local.yml b/stack/postgres/postgres.local.yml new file mode 100644 index 0000000..0a964bb --- /dev/null +++ b/stack/postgres/postgres.local.yml @@ -0,0 +1,24 @@ +version: '3.6' + +services: + postgres: + command: -c logging_collector=on -c log_destination='stderr' -c log_directory='/shared/logs/postgres' -c log_filename='postgresql.log' -c log_file_mode='0644' -c log_rotation_age=0 -c log_checkpoints=on -c log_hostname=on -c log_line_prefix='%t [%p] [%l-1] db=%d,user=%u ' + depends_on: + - shared-logs + ports: + - 5432:5432 + volumes: + - shared:/shared + shared-logs: + command: sh -c 'mkdir -p /shared/logs/postgres && chown 70:70 /shared/logs/postgres' + image: alpine:latest + volumes: + - shared:/shared + +volumes: + shared: + driver: local + driver_opts: + type: none + device: ${MONOREPO_DIR}/shared + o: bind diff --git a/stack/postgres/postgres.yml b/stack/postgres/postgres.yml new file mode 100644 index 0000000..e3928d5 --- /dev/null +++ b/stack/postgres/postgres.yml @@ -0,0 +1,25 @@ +version: '3.6' + +services: + postgres: + environment: + - POSTGRES_DB=${POSTGRES_DB} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - POSTGRES_USER=${POSTGRES_USER} + labels: + - SERVICE_5432_NAME=${COMPOSE_SERVICE_NAME}-postgres-5432 + networks: + - private + ports: + - 5432 + volumes: + - postgres:/var/lib/postgresql/data + restart: always + +volumes: + postgres: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/prometheus/.env.dist b/stack/prometheus/.env.dist new file mode 100644 index 0000000..d895593 --- /dev/null +++ b/stack/prometheus/.env.dist @@ -0,0 +1,8 @@ +ALERTMANAGER_SERVICE_9093_TAGS=urlprefix-alertmanager.${APP_DOMAIN}/ +ALERTMANAGER_SLACK_WEBHOOK_ID=https://hooks.slack.com/services/123456789/123456789/ABCDEFGHIJKLMNOPQRSTUVWX +BLACKBOX_SERVICE_9115_TAGS=urlprefix-blackbox.${APP_DOMAIN}/ +ES_EXPORTER_ELASTICSEARCH_URL=elasticsearch:9200 +ES_EXPORTER_SERVICE_9206_TAGS=urlprefix-es-exporter.${APP_DOMAIN}/ +PROMETHEUS_MONITORING_PRIMARY_TARGETS_BLACKBOX=https://www.google.com +PROMETHEUS_MONITORING_SECONDARY_TARGETS_BLACKBOX= +PROMETHEUS_SERVICE_9090_TAGS=urlprefix-prometheus.${APP_DOMAIN}/ diff --git a/stack/prometheus/alertmanager.yml b/stack/prometheus/alertmanager.yml new file mode 100644 index 0000000..0171e54 --- /dev/null +++ b/stack/prometheus/alertmanager.yml @@ -0,0 +1,30 @@ +version: '3.6' + +services: + alertmanager: + build: + args: + - DOCKER_BUILD_DIR=docker/alertmanager + - SLACK_WEBHOOK_ID=${ALERTMANAGER_SLACK_WEBHOOK_ID} + context: ../.. + dockerfile: docker/alertmanager/Dockerfile + image: ${DOCKER_REPOSITORY}/alertmanager:${DOCKER_IMAGE_TAG} + labels: + - SERVICE_9093_NAME=${COMPOSE_SERVICE_NAME}-alertmanager-9093 + - SERVICE_9093_CHECK_TCP=true + - SERVICE_9093_CHECK_INITIAL_STATUS=passing + - SERVICE_9093_TAGS=${ALERTMANAGER_SERVICE_9093_TAGS} + networks: + - private + - public + ports: + - 9093 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/prometheus/blackbox-exporter.yml b/stack/prometheus/blackbox-exporter.yml new file mode 100644 index 0000000..9355977 --- /dev/null +++ b/stack/prometheus/blackbox-exporter.yml @@ -0,0 +1,29 @@ +version: '3.6' + +services: + blackbox: + build: + args: + - DOCKER_BUILD_DIR=docker/blackbox + context: ../.. + dockerfile: docker/blackbox/Dockerfile + image: ${DOCKER_REPOSITORY}/blackbox:${DOCKER_IMAGE_TAG} + labels: + - SERVICE_9115_NAME=${COMPOSE_SERVICE_NAME}-blackbox-9115 + - SERVICE_9115_CHECK_TCP=true + - SERVICE_9115_CHECK_INITIAL_STATUS=passing + - SERVICE_9115_TAGS=${BLACKBOX_SERVICE_9115_TAGS} + networks: + - private + - public + ports: + - 9115 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/prometheus/es-exporter.yml b/stack/prometheus/es-exporter.yml new file mode 100644 index 0000000..63a6285 --- /dev/null +++ b/stack/prometheus/es-exporter.yml @@ -0,0 +1,30 @@ +version: '3.6' + +services: + es-exporter: + build: + args: + - DOCKER_BUILD_DIR=docker/es-exporter + context: ../.. + dockerfile: docker/es-exporter/Dockerfile + command: -e ${ES_EXPORTER_ELASTICSEARCH_URL} + image: ${DOCKER_REPOSITORY}/es-exporter:${DOCKER_IMAGE_TAG} + labels: + - SERVICE_9206_NAME=${COMPOSE_SERVICE_NAME}-es-exporter-9206 + - SERVICE_9206_CHECK_TCP=true + - SERVICE_9206_CHECK_INITIAL_STATUS=passing + - SERVICE_9206_TAGS=${ES_EXPORTER_SERVICE_9206_TAGS} + networks: + - private + - public + ports: + - 9206 + restart: always + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/prometheus/prometheus.yml b/stack/prometheus/prometheus.yml new file mode 100644 index 0000000..e427cd4 --- /dev/null +++ b/stack/prometheus/prometheus.yml @@ -0,0 +1,36 @@ +version: '3.6' + +services: + prometheus: + build: + args: + - DOCKER_BUILD_DIR=docker/prometheus + - MONITORING_PRIMARY_TARGETS_BLACKBOX=${PROMETHEUS_MONITORING_PRIMARY_TARGETS_BLACKBOX} + - MONITORING_SECONDARY_TARGETS_BLACKBOX=${PROMETHEUS_MONITORING_SECONDARY_TARGETS_BLACKBOX} + context: ../.. + dockerfile: docker/prometheus/Dockerfile + image: ${DOCKER_REPOSITORY}/prometheus:${DOCKER_IMAGE_TAG} + labels: + - SERVICE_9090_NAME=${COMPOSE_SERVICE_NAME}-prometheus-9090 + - SERVICE_9090_CHECK_TCP=true + - SERVICE_9090_CHECK_INITIAL_STATUS=passing + - SERVICE_9090_TAGS=${PROMETHEUS_SERVICE_9090_TAGS} + networks: + - private + - public + ports: + - 9090 + restart: always + volumes: + - prometheus:/prometheus + +volumes: + prometheus: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/rabbitmq/.env.dist b/stack/rabbitmq/.env.dist new file mode 100644 index 0000000..927e85a --- /dev/null +++ b/stack/rabbitmq/.env.dist @@ -0,0 +1 @@ +RABBITMQ_SERVICE_15672_TAGS=urlprefix-rabbitmq.${APP_DOMAIN}/ diff --git a/stack/rabbitmq/rabbitmq.yml b/stack/rabbitmq/rabbitmq.yml new file mode 100644 index 0000000..4562a53 --- /dev/null +++ b/stack/rabbitmq/rabbitmq.yml @@ -0,0 +1,35 @@ +version: '3.6' + +services: + rabbitmq: + image: rabbitmq:management-alpine + labels: + - SERVICE_4369_IGNORE=true + - SERVICE_5671_IGNORE=true + - SERVICE_5672_CHECK_TCP=true + - SERVICE_5672_NAME=${COMPOSE_SERVICE_NAME}-rabbitmq-5672 + - SERVICE_15671_IGNORE=true + - SERVICE_15672_CHECK_HTTP=/ + - SERVICE_15672_NAME=${COMPOSE_SERVICE_NAME}-rabbitmq-15672 + - SERVICE_15672_TAGS=${RABBITMQ_SERVICE_15672_TAGS} + - SERVICE_25672_IGNORE=true + networks: + - private + - public + ports: + - 5672 + - 15672 + volumes: + - rabbitmq:/var/lib/rabbitmq + restart: always + +volumes: + rabbitmq: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/redis/redis.yml b/stack/redis/redis.yml new file mode 100644 index 0000000..fb11b92 --- /dev/null +++ b/stack/redis/redis.yml @@ -0,0 +1,29 @@ +version: '3.6' + +services: + redis: + depends_on: + - sysctl + image: redis:alpine + command: redis-server --appendonly yes + labels: + - SERVICE_6379_CHECK_TCP=true + - SERVICE_6379_NAME=${COMPOSE_SERVICE_NAME}-redis-6379 + networks: + - private + ports: + - 6379 + volumes: + - redis:/data + restart: always + +volumes: + redis: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/redmine/.env.dist b/stack/redmine/.env.dist new file mode 100644 index 0000000..9833da8 --- /dev/null +++ b/stack/redmine/.env.dist @@ -0,0 +1,33 @@ +REDMINE_DB_HOST=mysql +REDMINE_DB_NAME=redmine +REDMINE_DB_PASS=redmine +REDMINE_DB_USER=redmine +REDMINE_IMAP_ENABLED=false +REDMINE_IMAP_HOST=imap.gmail.com +REDMINE_IMAP_INTERVAL=30 +REDMINE_IMAP_USER=imap_user +REDMINE_IMAP_PASS=imap_pass +REDMINE_INCOMING_EMAIL_ALLOW_OVERRIDE=project,tracker,category,priority,status +REDMINE_INCOMING_EMAIL_PROJECT=incoming_email_project +REDMINE_FETCH_COMMITS=hourly +REDMINE_SECRET_TOKEN=redmine_secret_token +REDMINE_SERVICE_80_TAGS=urlprefix-redmine.${APP_DOMAIN}/ +REDMINE_SMTP_DOMAIN=redmine_smtp_domain +REDMINE_SMTP_USER=redmine_smtp_user +REDMINE_SMTP_PASS=redmine_smtp_pass +REDMINE3_DB_HOST=mysql +REDMINE3_DB_NAME=redmine3 +REDMINE3_DB_PASS=redmine +REDMINE3_DB_USER=redmine +REDMINE3_IMAP_ENABLED=false +REDMINE3_IMAP_HOST=imap.gmail.com +REDMINE3_IMAP_INTERVAL=30 +REDMINE3_IMAP_USER=imap_user +REDMINE3_IMAP_PASS=imap_pass +REDMINE3_INCOMING_EMAIL_ALLOW_OVERRIDE=project,tracker,category,priority,status +REDMINE3_INCOMING_EMAIL_PROJECT=incoming_email_project +REDMINE3_REDMINE_SECRET_TOKEN=redmine_secret_token +REDMINE3_SERVICE_80_TAGS=urlprefix-redmine3.${APP_DOMAIN}/ +REDMINE3_SMTP_DOMAIN=redmine_smtp_domain +REDMINE3_SMTP_USER=redmine_smtp_user +REDMINE3_SMTP_PASS=redmine_smtp_pass diff --git a/stack/redmine/redmine.3.4.yml b/stack/redmine/redmine.3.4.yml new file mode 100644 index 0000000..d37e35f --- /dev/null +++ b/stack/redmine/redmine.3.4.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + redmine3: + image: sameersbn/redmine:3.4.12 diff --git a/stack/redmine/redmine.4.0.yml b/stack/redmine/redmine.4.0.yml new file mode 100644 index 0000000..84d20e8 --- /dev/null +++ b/stack/redmine/redmine.4.0.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + redmine: + image: sameersbn/redmine:4.0.5 diff --git a/stack/redmine/redmine.latest.yml b/stack/redmine/redmine.latest.yml new file mode 100644 index 0000000..2c3d4d8 --- /dev/null +++ b/stack/redmine/redmine.latest.yml @@ -0,0 +1,5 @@ +version: '3.6' + +services: + redmine: + image: sameersbn/redmine:latest diff --git a/stack/redmine/redmine.yml b/stack/redmine/redmine.yml new file mode 100644 index 0000000..c8ec424 --- /dev/null +++ b/stack/redmine/redmine.yml @@ -0,0 +1,48 @@ +version: '3.6' + +services: + redmine: + environment: + - DB_ADAPTER=mysql2 + - DB_HOST=${REDMINE_DB_HOST} + - DB_NAME=${REDMINE_DB_NAME} + - DB_USER=${REDMINE_DB_USER} + - DB_PASS=${REDMINE_DB_PASS} + - IMAP_ENABLED=${REDMINE_IMAP_ENABLED} + - IMAP_HOST=${REDMINE_IMAP_HOST} + - IMAP_INTERVAL=${REDMINE_IMAP_INTERVAL} + - IMAP_USER=${REDMINE_IMAP_USER} + - IMAP_PASS=${REDMINE_IMAP_PASS} + - INCOMING_EMAIL_PROJECT=${REDMINE_INCOMING_EMAIL_PROJECT} + - INCOMING_EMAIL_ALLOW_OVERRIDE=${REDMINE_INCOMING_EMAIL_ALLOW_OVERRIDE} + - REDMINE_FETCH_COMMITS=${REDMINE_FETCH_COMMITS} + - REDMINE_SECRET_TOKEN=${REDMINE_SECRET_TOKEN} + - SMTP_DOMAIN=${REDMINE_SMTP_DOMAIN} + - SMTP_USER=${REDMINE_SMTP_USER} + - SMTP_PASS=${REDMINE_SMTP_PASS} + - TZ=Europe/Paris + labels: + - SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-redmine-80 + - SERVICE_80_CHECK_TCP=true + - SERVICE_80_CHECK_INITIAL_STATUS=passing + - SERVICE_80_TAGS=${REDMINE_SERVICE_80_TAGS} + - SERVICE_443_IGNORE=true + networks: + - private + - public + ports: + - 80 + restart: always + volumes: + - redmine:/home/redmine/data + +volumes: + redmine: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/redmine/redmine3.yml b/stack/redmine/redmine3.yml new file mode 100644 index 0000000..53d5b7f --- /dev/null +++ b/stack/redmine/redmine3.yml @@ -0,0 +1,41 @@ +version: '3.6' + +services: + redmine3: + image: sameersbn/redmine:3.4.12 + environment: + - DB_ADAPTER=mysql2 + - DB_HOST=${REDMINE3_DB_HOST} + - DB_NAME=${REDMINE3_DB_NAME} + - DB_USER=${REDMINE3_DB_USER} + - DB_PASS=${REDMINE3_DB_PASS} + - REDMINE_SECRET_TOKEN=${REDMINE3_REDMINE_SECRET_TOKEN} + - SMTP_DOMAIN=${REDMINE3_SMTP_DOMAIN} + - SMTP_USER=${REDMINE3_SMTP_USER} + - SMTP_PASS=${REDMINE3_SMTP_PASS} + - TZ=Europe/Paris + labels: + - SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-redmine3-80 + - SERVICE_80_CHECK_TCP=true + - SERVICE_80_CHECK_INITIAL_STATUS=passing + - SERVICE_80_TAGS=${REDMINE3_SERVICE_80_TAGS} + - SERVICE_443_IGNORE=true + networks: + - private + - public + ports: + - 80 + restart: always + volumes: + - redmine3:/home/redmine/data + +volumes: + redmine3: + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC} diff --git a/stack/sematext/logagent.yml b/stack/sematext/logagent.yml new file mode 100644 index 0000000..550aa87 --- /dev/null +++ b/stack/sematext/logagent.yml @@ -0,0 +1,31 @@ +version: '3.6' + +services: + logagent: + build: + args: + - DOCKER_BUILD_DIR=docker/sematext/logagent + - GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME} + - GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL} + - UID=${UID} + - USER=${USER} + context: ../.. + dockerfile: docker/sematext/logagent/Dockerfile + target: ${DOCKER_BUILD_TARGET} + image: ${DOCKER_REPOSITORY}/logagent:${DOCKER_IMAGE_TAG} + environment: + - LOGAGENT_ARGS=-u 514 --docker /tmp/docker.sock --dockerEvents + - LOGS_RECEIVER_URL=${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} + - LOGS_TOKEN=logs-YYYY.MM.DD + networks: + - private + ports: + - 514 + restart: always + volumes: + - /var/run/docker.sock:/tmp/docker.sock + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} diff --git a/stack/services.mk b/stack/services.mk new file mode 100644 index 0000000..ef2a1ec --- /dev/null +++ b/stack/services.mk @@ -0,0 +1,2 @@ +services ?= elastic memcached mysql rabbitmq redis +services: stack-services-up; diff --git a/stack/testing.mk b/stack/testing.mk new file mode 100644 index 0000000..4995e37 --- /dev/null +++ b/stack/testing.mk @@ -0,0 +1 @@ +testing ?= drone/drone drone/drone-runner-docker drone/gc diff --git a/stack/theia/.env.dist b/stack/theia/.env.dist new file mode 100644 index 0000000..219a98f --- /dev/null +++ b/stack/theia/.env.dist @@ -0,0 +1,6 @@ +RC_ATTACH_SCREEN=false +RC_ATTACH_TMUX=false +RC_CUSTOM_PROMPT=false +RC_CUSTOM_PS1=true +RC_SSH_AGENT=false +THEIA_SERVICE_3000_TAGS=urlprefix-theia.${USER}.${APP_DOMAIN}/ diff --git a/stack/theia/theia.yml b/stack/theia/theia.yml new file mode 100644 index 0000000..f5b7ae0 --- /dev/null +++ b/stack/theia/theia.yml @@ -0,0 +1,63 @@ +version: '3.6' + +services: + theia: + build: + args: + - DOCKER_BUILD_DIR=docker/theia + - DOCKER_GID=${DOCKER_GID} + - GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME} + - GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL} + - SSH_BASTION_USERNAME=${SSH_BASTION_USERNAME} + - SSH_BASTION_HOSTNAME=${SSH_BASTION_HOSTNAME} + - SSH_PUBLIC_HOST_KEYS=${SSH_PUBLIC_HOST_KEYS} + - SSH_PRIVATE_IP_RANGE=${SSH_PRIVATE_IP_RANGE} + - UID=${UID} + - USER=${USER} + context: ../.. + dockerfile: docker/theia/Dockerfile + target: ${DOCKER_BUILD_TARGET} + environment: + - ENV=${ENV} + - MONOREPO_DIR=${MONOREPO_DIR} + - RC_01_CUSTOM_PS1=${RC_CUSTOM_PS1} + - RC_02_CUSTOM_PROMPT=${RC_CUSTOM_PROMPT} + - RC_03_SSH_AGENT=${RC_SSH_AGENT} + - RC_04_ATTACH_TMUX=${RC_ATTACH_TMUX} + - RC_05_ATTACH_SCREEN=${RC_ATTACH_SCREEN} + - SHELL=${DOCKER_SHELL} + - SSH_AUTH_SOCK=/tmp/ssh-agent/socket + - WORKSPACE_DIR=/Sources/${MONOREPO} + image: ${DOCKER_REPOSITORY}/theia:${DOCKER_IMAGE_TAG} + labels: + - SERVICE_3000_NAME=${COMPOSE_SERVICE_NAME}-theia-3000 + - SERVICE_3000_CHECK_TCP=true + - SERVICE_3000_CHECK_INITIAL_STATUS=passing + - SERVICE_3000_TAGS=${THEIA_SERVICE_3000_TAGS} + networks: + - private + - public + restart: always + volumes: + - monorepo:/Sources/${MONOREPO}:cached + - ssh-agent:/tmp/ssh-agent:ro + - /var/run/docker.sock:/var/run/docker.sock + +volumes: + monorepo: + driver: local + driver_opts: + type: none + device: ${MONOREPO_DIR} + o: bind + ssh-agent: + external: + name: ${DOCKER_VOLUME_SSH} + +networks: + private: + external: true + name: ${DOCKER_NETWORK_PRIVATE} + public: + external: true + name: ${DOCKER_NETWORK_PUBLIC}