Compare commits

...

2 Commits

Author SHA1 Message Date
Yann Autissier 093b99b0e3 update docker images 2022-03-30 01:12:27 +00:00
Yann Autissier 5594d58732 add support of runit service manager 2022-01-09 21:28:05 +01:00
93 changed files with 369 additions and 1673 deletions

View File

@ -2,11 +2,10 @@
inventory = inventories
roles_path = roles
filter_plugins = plugins/filter
host_key_checking = False
# host_key_checking = False
[ssh_connection]
scp_if_ssh = smart
pipelining =True
[colors]
changed = purple

View File

@ -24,7 +24,7 @@ ansible-run: ansible-run-localhost
# target ansible-run-%: Fire docker-build-ansible, Call ansible-playbook ANSIBLE_PLAYBOOK
.PHONY: ansible-run-%
ansible-run-%: $(if $(DOCKER_RUN),docker-build-ansible,install-ansible) debug-ANSIBLE_PLAYBOOK
ansible-run-%: $(if $(DOCKER_RUN),docker-build-ansible,install-ansible)
$(call ansible-playbook,$(if $(ANSIBLE_TAGS),--tags $(ANSIBLE_TAGS)) $(if $(ANSIBLE_EXTRA_VARS),--extra-vars '$(patsubst target=localhost,target=$*,$(ANSIBLE_EXTRA_VARS))') $(if $(findstring true,$(DRYRUN)),--check) $(if $(ANSIBLE_INVENTORY),--inventory $(ANSIBLE_INVENTORY)) $(ANSIBLE_PLAYBOOK))
# target ansible-tests: Fire ssh-add ansible-tests-localhost

View File

@ -1,4 +1,4 @@
ANSIBLE_ARGS ?= $(if $(filter-out 0,$(UID)),$(if $(shell sudo -l 2>/dev/null |grep 'NOPASSWD: ALL'),,-K))$(if $(DOCKER_RUN),$(if $(shell ssh-add -l >/dev/null 2>&1 || echo false), -k))
ANSIBLE_ARGS ?= $(if $(filter-out 0,$(UID)),$(if $(shell sudo -l 2>/dev/null |grep 'NOPASSWD: ALL'),,--ask-become-pass))$(if $(DOCKER_RUN),$(if $(shell ssh-add -l >/dev/null 2>&1 || echo false), --ask-pass))
ANSIBLE_AWS_ACCESS_KEY_ID ?= $(AWS_ACCESS_KEY_ID)
ANSIBLE_AWS_DEFAULT_OUTPUT ?= $(AWS_DEFAULT_OUTPUT)
ANSIBLE_AWS_DEFAULT_REGION ?= $(AWS_DEFAULT_REGION)

View File

@ -10,7 +10,6 @@ hosts_git_repositories:
- { "repo": "{{ lookup('env','ANSIBLE_GIT_REPOSITORY') }}", "dest": "{{ lookup('env','ANSIBLE_GIT_DIRECTORY') }}", "key_file": "{{ lookup('env','ANSIBLE_GIT_KEY_FILE') or '~/.ssh/id_rsa' }}", "version": "{{ lookup('env','ANSIBLE_GIT_VERSION') }}" }
hosts_services:
- { "name": "myos", "state": "stopped", "enabled": "yes" }
- { "name": "zram", "state": "started", "enabled": "yes" }
hosts_user_env:
- ANSIBLE_CONFIG
- ANSIBLE_DOCKER_IMAGE_TAG

View File

@ -25,7 +25,7 @@
- name: filesystems - btrfs
when: "disks_to_mount and ansible_cmdline.fstype == 'btrfs'"
import_tasks: filesystems-btrfs.yml
import_tasks: filesystems_btrfs.yml
tags:
- btrfs

View File

@ -2,7 +2,7 @@
# file: tasks/services.yml
- name: services - enable/disable disks services
when: disks_services is defined and ansible_service_mgr|lower != "openrc"
when: disks_services is defined and ansible_service_mgr|lower != "openrc" and ansible_service_mgr|lower != "runit"
with_items: "{{ disks_services|default([]) }}"
service:
name: "{{item.name}}"
@ -10,7 +10,7 @@
enabled: "{{item.enabled}}"
become: yes
- name: services - openrc - enable/disable disks services
- name: services - enable/disable disks services - openrc
when: disks_services is defined and ansible_service_mgr|lower == "openrc"
with_items: "{{ disks_services|default([]) }}"
service:
@ -20,3 +20,12 @@
runlevel: boot
become: yes
- name: services - enable/disable disks services - runit
when: disks_services is defined and ansible_service_mgr|lower == "runit"
with_items: "{{ disks_services|default([]) }}"
sysvinit:
name: "{{item.name}}"
state: "{{item.state}}"
enabled: "{{item.enabled}}"
become: yes

View File

@ -36,6 +36,24 @@ docker_daemon_config_storage: 'overlay2'
docker_daemon_config: { "storage-driver": "devicemapper" }
```
* `docker_distribution` - Docker package distribution
``` yaml
docker_distribution: debian
```
* `docker_distribution_release` - Docker package distribution release
``` yaml
docker_distribution_release: bullseye
```
* `docker_machine` - Docker package architecture
``` yaml
docker_machine: amd64
```
* `docker_package` - Name of the docker package
``` yaml
@ -199,7 +217,7 @@ Both DOCKER_BUILD_PREFIX and DOCKER_BUILD_SUFFIX variables are empty, the Docker
The DOCKER_BUILD_PREFIX variable is populated with 'custom/' to force the Dockerfile to search for a `./custom/nginx.conf` file, ie /etc/docker/nginx/custom/nginx.conf file.
* Whith an image name suffixed with a dash, the docker-build command will search for a suffixed file as well.
* With an image name suffixed with a dash, the docker-build command will search for a suffixed file as well.
``` bash
# docker-build -c custom nginx-develop && docker-run -c custom nginx-develop
@ -241,4 +259,4 @@ docker:
## Limitations
This role is known to work on Ubuntu, Debian, CentOS and Alpine Linux.
This role is known to work on Alpine Linux, Armbian, Centos, Debian, Devuan, Fedora, Raspbian, RedHat and Ubuntu.

View File

@ -1,7 +1,7 @@
#!/bin/bash
# Author: Yann Autissier <yann.autissier@gmail.com>
DOCKER_IMAGE_REPOSITORY="centile"
DOCKER_IMAGE_REPOSITORY="local"
DOCKER_BUILD_DIRECTORY="/etc/docker"
usage() {
@ -30,9 +30,9 @@ usage() {
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile"
echo -e "and build-arg DOCKER_BUILD_SUFFIX=-es01"
echo
echo -e "$0 -c elisa-sdc elk-es01"
echo -e "$0 -c custom elk-es01"
echo -e "Build a docker image named '${DOCKER_IMAGE_REPOSITORY}/elk-es01' with Dockerfile ${DOCKER_BUILD_DIRECTORY}/elk/Dockerfile,"
echo -e "build-arg DOCKER_BUILD_PREFIX=elisa-sdc/ and build-arg DOCKER_BUILD_SUFFIX=-es01"
echo -e "build-arg DOCKER_BUILD_PREFIX=custom/ and build-arg DOCKER_BUILD_SUFFIX=-es01"
echo
exit 1
}

View File

@ -1,7 +1,7 @@
#!/bin/bash
# Author: Yann Autissier <yann.autissier@gmail.com>
DOCKER_IMAGE_REPOSITORY="centile"
DOCKER_IMAGE_REPOSITORY="local"
DOCKER_BUILD_DIRECTORY="/etc/docker"
usage() {

View File

@ -12,16 +12,50 @@ galaxy_info:
- name: Alpine
versions:
- all
- name: Debian
- name: Centos
versions:
- all
- name: Debian
versions:
- bookworm
- bullseye
- buster
- stretch
- jessie
- wheezy
- name: Devuan
versions:
- daedalus
- chimaera
- beowulf
- ascii
- jessie
- name: EL
versions:
- all
- name: Fedora
versions:
- all
- name: Raspbian
versions:
- bookworm
- bullseye
- buster
- stretch
- jessie
- name: Ubuntu
versions:
- all
- impish
- hirsute
- groovy
- focal
- eoan
- disco
- cosmic
- bionic
- artful
- zesty
- yakkety
- xenial
- trusty

View File

@ -7,21 +7,10 @@
package: name="{{item.name}}" state="{{item.state}}"
become: yes
- name: packages - add docker GPG key
- import_tasks: packages_debian.yml
when: ansible_os_family|lower == "debian"
apt_key: url=https://download.docker.com/linux/debian/gpg
ignore_errors: true
become: yes
- name: packages - define arch
set_fact:
docker_apt_arch: "{% if ansible_machine == 'aarch64' %}arm64{% endif %}{% if ansible_machine == 'x86_64' %}amd64{% endif %}"
- name: packages - add docker APT repository
when: ansible_os_family|lower == "debian"
apt_repository:
repo: deb [arch={{docker_apt_arch}}] https://download.docker.com/linux/{{ansible_distribution|lower}} {{ansible_distribution_release}} stable
become: yes
tags:
- debian
- name: packages - add docker package
when: docker_package|length > 0

View File

@ -0,0 +1,53 @@
---
# file: tasks/packages_debian.yml
- name: packages - add docker GPG key
apt_key: url=https://download.docker.com/linux/debian/gpg
ignore_errors: true
become: yes
- name: packages - define docker_machine
set_fact:
docker_machine: "{% if ansible_machine == 'aarch64' %}arm64{% endif %}{% if ansible_machine == 'x86_64' %}amd64{% endif %}"
when: docker_machine is undefined
- name: packages - define docker_distribution
set_fact:
docker_distribution: "{% if ansible_distribution|lower == 'devuan' %}debian{% else %}{{ansible_distribution|lower}}{% endif %}"
when: docker_distribution is undefined
- name: packages - define docker_distribution_release - debian bookworm (not yet available)
set_fact:
docker_distribution_release: "bullseye"
when: docker_distribution_release is undefined and ansible_distribution_release|lower == 'bookworm'
- name: packages - define docker_distribution_release - devuan daealus
set_fact:
docker_distribution_release: "bullseye"
when: docker_distribution_release is undefined and ansible_distribution_release|lower == 'daedalus/ceres'
- name: packages - define docker_distribution_release - devuan chimaera
set_fact:
docker_distribution_release: "bullseye"
when: docker_distribution_release is undefined and ansible_distribution_release|lower == 'chimaera'
- name: packages - define docker_distribution_release - devuan beowulf
set_fact:
docker_distribution_release: "buster"
when: docker_distribution_release is undefined and ansible_distribution_release|lower == 'beowulf'
- name: packages - define docker_distribution_release - devuan ascii
set_fact:
docker_distribution_release: "stretch"
when: docker_distribution_release is undefined and ansible_distribution_release|lower == 'ascii'
- name: packages - define docker_distribution_release
set_fact:
docker_distribution_release: "{{ansible_distribution_release|lower}}"
when: docker_distribution_release is undefined
- name: packages - add docker APT repository
apt_repository:
repo: deb [arch={{docker_machine}}] https://download.docker.com/linux/{{docker_distribution}} {{docker_distribution_release}} stable
become: yes

View File

@ -2,7 +2,7 @@
# file: tasks/services.yml
- name: services - enable/disable docker services
when: docker_services is defined and ansible_service_mgr|lower != "openrc"
when: docker_services is defined and ansible_service_mgr|lower != "openrc" and ansible_service_mgr|lower != "runit"
with_items: "{{ docker_services|default([]) }}"
service:
name: "{{item.name}}"
@ -10,13 +10,13 @@
enabled: "{{item.enabled}}"
become: yes
- name: services - openrc - force service status
- name: services - force service status - openrc
when: docker_services is defined and ansible_service_mgr|lower == "openrc"
shell: "kill -0 $(cat /run/{{item.name}}.pid) && [ ! -h /run/openrc/started/{{item.name}} ] && ln -s /etc/init.d/{{item.name}} /run/openrc/started/{{item.name}} && service {{item.name}} restart ||:"
with_items: "{{ docker_services|default([]) }}"
become: yes
- name: services - openrc - enable/disable docker services
- name: services - enable/disable docker services - openrc
when: docker_services is defined and ansible_service_mgr|lower == "openrc"
with_items: "{{ docker_services|default([]) }}"
service:
@ -26,7 +26,16 @@
runlevel: boot
become: yes
- name: services - openrc - force service restart
- name: services - enable/disable docker services - runit
when: docker_services is defined and ansible_service_mgr|lower == "runit"
with_items: "{{ docker_services|default([]) }}"
sysvinit:
name: "{{item.name}}"
state: "{{item.state}}"
enabled: "{{item.enabled}}"
become: yes
- name: services - force service restart - openrc
when: ansible_service_mgr|lower == "openrc"
shell: "[ ! -d /var/lib/docker/tmp ] && service docker restart ||:"
become: yes

View File

@ -1,11 +1,11 @@
#!/bin/bash
### BEGIN INIT INFO
# Provides: myos
# Required-Start:
# Required-Start: docker
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: MYOS system call
# Description: MYOS system calls
### END INIT INFO
set -e

View File

@ -1,5 +1,5 @@
[Unit]
Description=System calls from https://github.com/aynicos/myos
Description=MYOS system calls
[Service]
Type=oneshot

View File

@ -6,7 +6,7 @@
package: name="cloud-init" state="present"
become: yes
- name: cloudinit - alpine - install cloud-init packages
- name: cloudinit - install cloud-init packages - alpine
when: hosts_cloudinit_enable|default(false) and ansible_os_family|lower == "alpine"
with_items:
- { "name": "cloud-init", "state": "present" }
@ -37,7 +37,7 @@
enabled: yes
become: yes
- name: cloudinit - activate service (openrc)
- name: cloudinit - activate service - openrc
when: hosts_cloudinit_enable|default(false) and ansible_service_mgr|lower == "openrc"
service:
name: cloud-init

View File

@ -1,14 +1,24 @@
---
# file: tasks/config.yml
- name: config - arch != x86 - do not run docker-compose in docker
- name: config - do not run docker-compose in docker - arch != x86
when: ansible_machine|lower != "x86_64"
lineinfile: dest="/etc/default/myos" state="present" line="DOCKER=false"
with_items:
- DOCKER=false
lineinfile:
path: /etc/default/myos
create: yes
line: '{{ item }}'
mode: '0644'
become: yes
- name: config - set MYOS config in /etc/default/myos
with_items:
- MYOS={{ lookup('env','ANSIBLE_MYOS') }}
lineinfile: dest="/etc/default/myos" create="yes" line="{{ item }}" mode="0644"
lineinfile:
path: /etc/default/myos
create: yes
line: '{{ item }}'
mode: '0644'
become: yes

View File

@ -23,7 +23,7 @@
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0755
become: yes
- name: files - systemd - copy files
- name: files - copy files - systemd
when: ansible_service_mgr|lower == "systemd"
with_items:
- /etc/systemd/system/ansible.service
@ -33,7 +33,7 @@
copy: src=../files/{{item}} dest={{item}} owner=root group=root mode=0644
become: yes
- name: files - openrc - copy files
- name: files - copy files - openrc
when: ansible_service_mgr|lower == "openrc"
with_items:
- /etc/init.d/myos-openrc

View File

@ -2,7 +2,7 @@
# file: tasks/services.yml
- name: services - enable/disable hosts services
when: hosts_services is defined and ansible_service_mgr|lower != "openrc"
when: hosts_services is defined and ansible_service_mgr|lower != "openrc" and ansible_service_mgr|lower != "runit"
with_items: "{{ hosts_services|default([]) }}"
service:
name: "{{item.name}}"
@ -10,7 +10,7 @@
enabled: "{{item.enabled}}"
become: yes
- name: services - openrc - enable/disable hosts services
- name: services - enable/disable hosts services - openrc
when: hosts_services is defined and ansible_service_mgr|lower == "openrc"
with_items: "{{ hosts_services|default([]) }}"
service:
@ -20,3 +20,12 @@
runlevel: boot
become: yes
- name: services - enable/disable hosts services - runit
when: hosts_services is defined and ansible_service_mgr|lower == "runit"
with_items: "{{ hosts_services|default([]) }}"
sysvinit:
name: "{{item.name}}"
state: "{{item.state}}"
enabled: "{{item.enabled}}"
become: yes

View File

@ -70,7 +70,7 @@
- include myos/config
lineinfile: dest=~/.ssh/config create=yes line='{{item}}'
- name: ssh - update ~/.ssh/myos/config
- name: user - update ~/.ssh/myos/config
template:
src: ssh_config.j2
dest: ~/.ssh/myos/config

View File

@ -6,7 +6,7 @@ hosts_packages_distro:
- { "name": "groff", "state": "present" }
- { "name": "openssh-client", "state": "present" }
- { "name": "pass", "state": "present" }
- { "name": "python-pip", "state": "present" }
- { "name": "python3-pip", "state": "present" }
- { "name": "rclone", "state": "present" }
- { "name": "util-linux", "state": "present" }
- { "name": "vim-nox", "state": "present" }

View File

@ -2,7 +2,7 @@
# file: tasks/services.yml
- name: services - enable/disable remotes services
when: remotes_services is defined and ansible_service_mgr|lower != "openrc"
when: remotes_services is defined and ansible_service_mgr|lower != "openrc" and ansible_service_mgr|lower != "runit"
with_items: "{{ remotes_services|default([]) }}"
service:
name: "{{item.name}}"
@ -10,7 +10,7 @@
enabled: "{{item.enabled}}"
become: yes
- name: services - openrc - enable/disable remotes services
- name: services - enable/disable remotes services - openrc
when: remotes_services is defined and ansible_service_mgr|lower == "openrc"
with_items: "{{ remotes_services|default([]) }}"
service:
@ -20,3 +20,12 @@
runlevel: boot
become: yes
- name: services - enable/disable remotes services - runit
when: remotes_services is defined and ansible_service_mgr|lower == "runit"
with_items: "{{ remotes_services|default([]) }}"
sysvinit:
name: "{{item.name}}"
state: "{{item.state}}"
enabled: "{{item.enabled}}"
become: yes

View File

@ -1,13 +1,12 @@
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache add \
ansible \
py3-pip
py3-pip \
&& pip3 install boto
RUN pip3 install boto
RUN apk --no-cache upgrade
ENTRYPOINT ["/usr/bin/ansible"]
CMD ["--help"]

View File

@ -1,8 +0,0 @@
FROM docker.elastic.co/apm/apm-server-oss:7.4.2 as dist
ARG DOCKER_BUILD_DIR
# config
COPY ${DOCKER_BUILD_DIR}/apm-server.yml /usr/share/apm-server/
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,931 +0,0 @@
######################### APM Server Configuration #########################
################################ APM Server ################################
apm-server:
# Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket.
host: "0.0.0.0:8200"
# Maximum permitted size in bytes of a request's header accepted by the server to be processed.
#max_header_size: 1048576
# Maximum amount of time to wait for the next incoming request before underlying connection is closed.
#idle_timeout: 45s
# Maximum permitted duration for reading an entire request.
#read_timeout: 30s
# Maximum permitted duration for writing a response.
#write_timeout: 30s
# Maximum duration before releasing resources when shutting down the server.
#shutdown_timeout: 5s
# Maximum permitted size in bytes of an event accepted by the server to be processed.
#max_event_size: 307200
# Maximum number of new connections to accept simultaneously (0 means unlimited).
#max_connections: 0
# Authorization token for sending data to the APM server. If a token is set, the
# agents must send it in the following format: Authorization: Bearer <secret-token>.
# It is recommended to use an authorization token in combination with SSL enabled,
# and save the token in the apm-server keystore. The token is not used for the RUM endpoint.
#secret_token:
# Enable secure communication between APM agents and the server. By default ssl is disabled.
#ssl:
#enabled: false
# Configure a list of root certificate authorities for verifying client certificates.
#certificate_authorities: []
# Path to file containing the certificate for server authentication.
# Needs to be configured when ssl is enabled.
#certificate: ''
# Path to file containing server certificate key.
# Needs to be configured when ssl is enabled.
#key: ''
# Optional configuration options for ssl communication.
# Passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#key_passphrase: ''
# List of supported/valid protocol versions. By default TLS versions 1.1 up to 1.2 are enabled.
#supported_protocols: [TLSv1.1, TLSv1.2]
# Configure cipher suites to be used for SSL connections.
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#curve_types: []
# Configure which type of client authentication is supported.
# Options are `none`, `optional`, and `required`. Default is `optional`.
#client_authentication: "optional"
# Configure SSL verification mode. If `none` is configured, all hosts and
# certificates will be accepted. In this mode, SSL-based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is `full`.
#ssl.verification_mode: full
# Enable Real User Monitoring (RUM) Support. By default RUM is disabled.
#rum:
#enabled: false
#event_rate:
# Defines the maximum amount of events allowed to be sent to the APM Server RUM
# endpoint per IP per second. Defaults to 300.
#limit: 300
# An LRU cache is used to keep a rate limit per IP for the most recently seen IPs.
# This setting defines the number of unique IPs that can be tracked in the cache.
# Sites with many concurrent clients should consider increasing this limit. Defaults to 1000.
#lru_size: 1000
#-- General RUM settings
# Comma separated list of permitted origins for real user monitoring.
# User-agents will send an origin header that will be validated against this list.
# An origin is made of a protocol scheme, host and port, without the url path.
# Allowed origins in this setting can have * to match anything (eg.: http://*.example.com)
# If an item in the list is a single '*', everything will be allowed.
#allow_origins : ['*']
# Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes.
# If the regexp matches, the stacktrace frame is considered to be a library frame.
#library_pattern: "node_modules|bower_components|~"
# Regexp to be matched against a stacktrace frame's `file_name`.
# If the regexp matches, the stacktrace frame is not used for calculating error groups.
# The default pattern excludes stacktrace frames that have a filename starting with '/webpack'
#exclude_from_grouping: "^/webpack"
# If a source map has previously been uploaded, source mapping is automatically applied.
# to all error and transaction documents sent to the RUM endpoint.
#source_mapping:
# Sourcemapping is enabled by default.
#enabled: true
# Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration.
# A different instance must be configured when using any other output.
# This setting only affects sourcemap reads - the output determines where sourcemaps are written.
#elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (`http` and `9200`).
# In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
# hosts: ["localhost:9200"]
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch.
# Note that values configured without a time unit will be interpreted as seconds.
#cache:
#expiration: 5m
# Source maps are stored in a separate index.
# If the default index pattern for source maps at 'outputs.elasticsearch.indices'
# is changed, a matching index pattern needs to be specified here.
#index_pattern: "apm-*-sourcemap*"
# If true (default), APM Server captures the IP of the instrumented service
# or the IP and User Agent of the real user (RUM requests).
#capture_personal_data: true
# Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/).
#expvar:
#enabled: false
# Url to expose expvar.
#url: "/debug/vars"
# Instrumentation support for the server's HTTP endpoints and event publisher.
#instrumentation:
# Set to true to enable instrumentation of the APM Server itself.
#enabled: false
# Environment in which the APM Server is running on (eg: staging, production, etc.)
#environment: ""
# Remote hosts to report instrumentation results to.
#hosts:
# - http://remote-apm-server:8200
# secret_token for the remote apm-servers.
#secret_token:
# A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
# Using pipelines involves two steps:
# (1) registering a pipeline
# (2) applying a pipeline during data ingestion (see `output.elasticsearch.pipeline`)
#
# You can manually register a pipeline, or use this configuration option to ensure
# the pipeline is loaded and registered at the configured Elasticsearch instances.
# Find the default pipeline configuration at `ingest/pipeline/definition.json`.
# Automatic pipeline registration requires the `output.elasticsearch` to be enabled and configured.
#register.ingest.pipeline:
# Registers APM pipeline definition in Elasticsearch on APM Server startup. Defaults to true.
#enabled: true
# Overwrites existing APM pipeline definition in Elasticsearch. Defaults to false.
#overwrite: false
# When ilm is set to `auto`, the APM Server checks a couple of preconditions:
# If a different output than Elasticsearch is configured, ILM will be disabled.
# If Elasticsearch output is configured, but specific `index` or `indices` settings are configured, ILM will be
# disabled, as it only works with default index settings.
# If the configured Elasticsearch instance is not eligible for ILM, ILM will also be disabled.
# If all preconditions are met, ILM will be enabled.
#
# When ILM is set to `true`, the APM Server ignores any configured index settings.
# For ILM to be applied, The configured output must be set to Elasticsearch and the instance
# needs to support ILM. Otherwise APM Server falls back to ordinary index management without ILM.
#
# Defaults to "auto". Disable ILM by setting it to `false`.
#ilm.enabled: "auto"
# When using APM agent configuration, information fetched from Kibana will be cached in memory for some time.
# Specify cache key expiration via this setting. Default is 30 seconds.
#agent.config.cache.expiration: 30s
#kibana:
# For APM Agent configuration in Kibana, enabled must be true.
#enabled: false
# Scheme and port can be left out and will be set to the default (`http` and `5601`).
# In case you specify an additional path, the scheme is required: `http://localhost:5601/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:5601`.
#host: "localhost:5601"
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Optional HTTP path.
#path: ""
# Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
#================================= General =================================
# Data is buffered in a memory queue before it is published to the configured output.
# The memory queue will present all available events (up to the outputs
# bulk_max_size) to the output, the moment the output is ready to serve
# another batch of events.
#queue:
# Queue type by name (default 'mem').
#mem:
# Max number of events the queue can buffer.
#events: 4096
# Hints the minimum number of events stored in the queue,
# before providing a batch of events to the outputs.
# The default value is set to 2048.
# A value of 0 ensures events are immediately available
# to be sent to the outputs.
#flush.min_events: 2048
# Maximum duration after which events are available to the outputs,
# if the number of events stored in the queue is < `flush.min_events`.
#flush.timeout: 1s
# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:
#================================= Template =================================
# A template is used to set the mapping in Elasticsearch.
# By default template loading is enabled and the template is loaded.
# These settings can be adjusted to load your own template or overwrite existing ones.
# Set to false to disable template loading.
#setup.template.enabled: true
# Template name. By default the template name is "apm-%{[observer.version]}"
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
#setup.template.name: "apm-%{[observer.version]}"
# Template pattern. By default the template pattern is "apm-%{[observer.version]}-*" to apply to the default index settings.
# The first part is the version of apm-server and then -* is used to match all daily indices.
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
#setup.template.pattern: "apm-%{[observer.version]}-*"
# Path to fields.yml file to generate the template.
#setup.template.fields: "${path.config}/fields.yml"
# Overwrite existing template.
#setup.template.overwrite: false
# Elasticsearch template settings.
#setup.template.settings:
# A dictionary of settings to place into the settings.index dictionary
# of the Elasticsearch template. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
#index:
#number_of_shards: 1
#codec: best_compression
#number_of_routing_shards: 30
#mapping.total_fields.limit: 2000
#============================= Elastic Cloud =============================
# These settings simplify using APM Server with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` option.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs =================================
# Configure the output to use when sending the data collected by apm-server.
#-------------------------- Elasticsearch output --------------------------
output.elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (`http` and `9200`).
# In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
hosts: ["elasticsearch:9200"]
# Boolean flag to enable or disable the output module.
#enabled: true
# Set gzip compression level.
#compression_level: 0
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Number of workers per Elasticsearch host.
#worker: 1
# By using the configuration below, APM documents are stored to separate indices,
# depending on their `processor.event`:
# - error
# - transaction
# - span
# - sourcemap
#
# The indices are all prefixed with `apm-%{[observer.version]}`.
# To allow managing indices based on their age, all indices (except for sourcemaps)
# end with the information of the day they got indexed.
# e.g. "apm-7.3.0-transaction-2019.07.20"
#
# Be aware that you can only specify one Elasticsearch template.
# If you modify the index patterns you must also update these configurations accordingly,
# as they need to be aligned:
# * `setup.template.name`
# * `setup.template.pattern`
#index: "apm-%{[observer.version]}-%{+yyyy.MM.dd}"
#indices:
# - index: "apm-%{[observer.version]}-sourcemap"
# when.contains:
# processor.event: "sourcemap"
#
# - index: "apm-%{[observer.version]}-error-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "error"
#
# - index: "apm-%{[observer.version]}-transaction-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "transaction"
#
# - index: "apm-%{[observer.version]}-span-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "span"
#
# - index: "apm-%{[observer.version]}-metric-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "metric"
#
# - index: "apm-%{[observer.version]}-onboarding-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "onboarding"
# A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
# APM Server comes with a default pipeline definition, located at `ingest/pipeline/definition.json`, which is
# loaded to Elasticsearch by default (see `apm-server.register.ingest.pipeline`).
# APM pipeline is enabled by default. To disable it, set `pipeline: _none`.
#pipeline: "apm"
# Optional HTTP Path.
#path: "/elasticsearch"
# Custom HTTP headers to add to each request.
#headers:
# X-My-Header: Contents of the header
# Proxy server url.
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, apm-server
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#----------------------------- Console output -----------------------------
#output.console:
# Boolean flag to enable or disable the output module.
#enabled: false
# Configure JSON encoding.
#codec.json:
# Pretty-print JSON event.
#pretty: false
# Configure escaping HTML symbols in strings.
#escape_html: false
#---------------------------- Logstash output -----------------------------
#output.logstash:
# Boolean flag to enable or disable the output module.
#enabled: false
# The Logstash hosts.
#hosts: ["localhost:5044"]
# Number of workers per Logstash host.
#worker: 1
# Set gzip compression level.
#compression_level: 3
# Configure escaping html symbols in strings.
#escape_html: true
# Optional maximum time to live for a connection to Logstash, after which the
# connection will be re-established. A value of `0s` (the default) will
# disable this feature.
#
# Not yet supported for async connections (i.e. with the "pipelining" option set).
#ttl: 30s
# Optional load balance the events between the Logstash hosts. Default is false.
#loadbalance: false
# Number of batches to be sent asynchronously to Logstash while processing
# new batches.
#pipelining: 2
# If enabled only a subset of events in a batch of events is transferred per
# group. The number of events to be sent increases up to `bulk_max_size`
# if no error is encountered.
#slow_start: false
# The number of seconds to wait before trying to reconnect to Logstash
# after a network error. After waiting backoff.init seconds, apm-server
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Logstash after a network error. The default is 60s.
#backoff.max: 60s
# Optional index name. The default index name is set to apm
# in all lowercase.
#index: 'apm'
# SOCKS5 proxy server URL
#proxy_url: socks5://user:password@socks5-server:2233
# Resolve names locally when using a proxy server. Defaults to false.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
#ssl.enabled: false
# Optional SSL configuration options. SSL is off by default.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------ Kafka output ------------------------------
#output.kafka:
# Boolean flag to enable or disable the output module.
#enabled: false
# The list of Kafka broker addresses from where to fetch the cluster metadata.
# The cluster metadata contain the actual Kafka brokers events are published
# to.
#hosts: ["localhost:9092"]
# The Kafka topic used for produced events. The setting can be a format string
# using any event field. To set the topic from document type use `%{[type]}`.
#topic: beats
# The Kafka event key setting. Use format string to create unique event key.
# By default no event key will be generated.
#key: ''
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
# using the `output.kafka.key` setting or randomly distributes events if
# `output.kafka.key` is not configured.
#partition.hash:
# If enabled, events will only be published to partitions with reachable
# leaders. Default is false.
#reachable_only: false
# Configure alternative event field names used to compute the hash value.
# If empty `output.kafka.key` setting will be used.
# Default value is empty list.
#hash: []
# Authentication details. Password is required if username is set.
#username: ''
#password: ''
# Kafka version libbeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'
# Configure JSON encoding.
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# Metadata update configuration. Metadata do contain leader information
# deciding which broker to use when publishing.
#metadata:
# Max metadata request retry attempts when cluster is in middle of leader
# election. Defaults to 3 retries.
#retry.max: 3
# Waiting time between retries during leader elections. Default is 250ms.
#retry.backoff: 250ms
# Refresh metadata interval. Defaults to every 10 minutes.
#refresh_frequency: 10m
# The number of concurrent load-balanced Kafka output workers.
#worker: 1
# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Set max_retries to a value less than 0 to retry
# until all events are published. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Kafka request. The default
# is 2048.
#bulk_max_size: 2048
# The number of seconds to wait for responses from the Kafka brokers before
# timing out. The default is 30s.
#timeout: 30s
# The maximum duration a broker will wait for number of required ACKs. The
# default is 10s.
#broker_timeout: 10s
# The number of messages buffered for each Kafka broker. The default is 256.
#channel_buffer_size: 256
# The keep-alive period for an active network connection. If 0s, keep-alives
# are disabled. The default is 0 seconds.
#keep_alive: 0
# Sets the output compression codec. Must be one of none, snappy and gzip. The
# default is gzip.
#compression: gzip
# Set the compression level. Currently only gzip provides a compression level
# between 0 and 9. The default value is chosen by the compression algorithm.
#compression_level: 4
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
# dropped. The default value is 1000000 (bytes). This value should be equal to
# or less than the broker's message.max.bytes.
#max_message_bytes: 1000000
# The ACK reliability level required from broker. 0=no response, 1=wait for
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
# on error.
#required_acks: 1
# The configurable ClientID used for logging, debugging, and auditing
# purposes. The default is "beats".
#client_id: beats
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
#ssl.enabled: false
# Optional SSL configuration options. SSL is off by default.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#================================= Paths ==================================
# The home path for the apm-server installation. This is the default base path
# for all other path settings and for miscellaneous files that come with the
# distribution.
# If not set by a CLI flag or in the configuration file, the default for the
# home path is the location of the binary.
#path.home:
# The configuration path for the apm-server installation. This is the default
# base path for configuration files, including the main YAML configuration file
# and the Elasticsearch template file. If not set by a CLI flag or in the
# configuration file, the default for the configuration path is the home path.
#path.config: ${path.home}
# The data path for the apm-server installation. This is the default base path
# for all the files in which apm-server needs to store its data. If not set by a
# CLI flag or in the configuration file, the default for the data path is a data
# subdirectory inside the home path.
#path.data: ${path.home}/data
# The logs path for an apm-server installation. If not set by a CLI flag or in the
# configuration file, the default is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#================================= Logging =================================
# There are three options for the log output: syslog, file, and stderr.
# Windows systems default to file output. All other systems default to syslog.
# Sets the minimum log level. The default log level is info.
# Available log levels are: error, warning, info, or debug.
#logging.level: info
# Enable debug output for selected components. To enable all selectors use ["*"].
# Other available selectors are "beat", "publish", or "service".
# Multiple selectors can be chained.
#logging.selectors: [ ]
# Send all logging output to syslog. The default is false.
#logging.to_syslog: true
# If enabled, apm-server periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is false.
#logging.metrics.enabled: false
# The period after which to log the internal metrics. The default is 30s.
#logging.metrics.period: 30s
# Logging to rotating files. When true, writes all logging output to files.
# The log files are automatically rotated when the log file size limit is reached.
#logging.to_files: true
#logging.files:
# Configure the path where the logs are written. The default is the logs directory
# under the home path (the binary location).
#path: /var/log/apm-server
# The name of the files where the logs are written to.
#name: apm-server
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated.
#rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
#keepfiles: 7
# The permissions mask to apply when rotating log files. The default value is 0600.
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Enable log file rotation on time intervals in addition to size-based rotation.
# Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
# are boundary-aligned with minutes, hours, days, weeks, months, and years as
# reported by the local system clock. All other intervals are calculated from the
# Unix epoch. Defaults to disabled.
#interval: 0
# Set to true to log messages in json format.
#logging.json: false
#=============================== HTTP Endpoint ===============================
# apm-server can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats. For pretty JSON output
# append ?pretty to the URL.
# Defines if the HTTP endpoint is enabled.
#http.enabled: false
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
#http.host: localhost
# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066
#============================= X-pack Monitoring =============================
# APM server can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires x-pack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Most settings from the Elasticsearch output are accepted here as well.
# Note that these settings should be configured to point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration. This means that if you have the Elasticsearch output configured,
# you can simply uncomment the following line.
#monitoring.elasticsearch:
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "apm_system"
#password: ""
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (`http` and `9200`).
# In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
#hosts: ["localhost:9200"]
# Set gzip compression level.
#compression_level: 0
# Dictionary of HTTP parameters to pass within the URL with index operations.
#parameters:
#param1: value1
#param2: value2
# Custom HTTP headers to add to each request.
#headers:
# X-My-Header: Contents of the header
# Proxy server url.
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, apm-server
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s
# Configure HTTP request timeout before failing an request to Elasticsearch.
#timeout: 90
# Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#metrics.period: 10s
#state.period: 1m

View File

@ -1,30 +0,0 @@
apm-server:
host: "0.0.0.0:8200"
output.elasticsearch:
pipeline: _none
#index: "apm-%{[observer.version]}-%{+yyyy.MM.dd}"
#indices:
# - index: "apm-%{[observer.version]}-sourcemap"
# when.contains:
# processor.event: "sourcemap"
#
# - index: "apm-%{[observer.version]}-error-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "error"
#
# - index: "apm-%{[observer.version]}-transaction-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "transaction"
#
# - index: "apm-%{[observer.version]}-span-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "span"
#
# - index: "apm-%{[observer.version]}-metric-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "metric"
#
# - index: "apm-%{[observer.version]}-onboarding-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "onboarding"

View File

@ -1,98 +0,0 @@
[
{
"_id": "5f08a870-7c6a-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"name": "panel_0",
"type": "visualization"
},
{
"id": "ceefd050-7c6a-11e7-aa55-3b0d52c71c60",
"name": "panel_1",
"type": "search"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":3,\"panelIndex\":1,\"col\":1,\"row\":1,\"panelRefName\":\"panel_0\"},{\"sort\":[\"@timestamp\",\"desc\"],\"size_x\":12,\"size_y\":21,\"panelIndex\":2,\"col\":1,\"columns\":[\"error.culprit\",\"error.exception.type\",\"error.exception.message\",\"error.log.message\",\"error.exception.handled\",\"service.name\"],\"row\":4,\"panelRefName\":\"panel_1\"}]",
"timeRestore": false,
"title": "[APM] Error Details",
"uiStateJSON": "{}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Error Occurrences [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"0\",\"terms_field\":\"error.grouping_key\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"label\":\"Occurrences\",\"metrics\":[{\"type\":\"count\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"bar\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"1\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"filter\":\"processor.event:error\",\"show_legend\":0,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Error Occurrences [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "ceefd050-7c6a-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"search": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
},
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index",
"type": "index-pattern"
}
],
"_source": {
"columns": [
"error.culprit",
"error.exception.type",
"error.exception.message",
"error.log.message",
"error.exception.handled",
"service.name"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"match\":{\"processor.event\":{\"query\":\"error\",\"type\":\"phrase\"}}},\"meta\":{\"value\":\"error\",\"disabled\":false,\"alias\":null,\"params\":{\"query\":\"error\",\"type\":\"phrase\"},\"key\":\"processor.event\",\"negate\":false,\"type\":\"phrase\",\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index\"},\"$state\":{\"store\":\"appState\"}}],\"version\":true,\"highlightAll\":true,\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Error Details [APM]",
"version": 1
},
"_type": "search"
}
]

View File

@ -1,82 +0,0 @@
[
{
"_id": "37f6fac0-7c6a-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "22518e70-7c69-11e7-aa55-3b0d52c71c60",
"name": "panel_0",
"type": "visualization"
},
{
"id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"name": "panel_1",
"type": "visualization"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":10,\"panelIndex\":1,\"col\":1,\"row\":4,\"panelRefName\":\"panel_0\"},{\"size_x\":12,\"size_y\":3,\"panelIndex\":2,\"col\":1,\"row\":1,\"panelRefName\":\"panel_1\"}]",
"timeRestore": false,
"title": "[APM] Errors",
"uiStateJSON": "{\"P-1\": {\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "22518e70-7c69-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Errors for Time Period [APM]",
"uiStateJSON": "{\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}",
"version": 1,
"visState": "{\"title\":\"Top Errors for Time Period [APM]\",\"type\":\"table\",\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null},\"perPage\":25,\"showPartialRows\":false,\"totalFunc\":\"sum\",\"showTotal\":false,\"showMetricsAtAllLevels\":false,\"dimensions\":{\"metrics\":[{\"accessor\":1,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"},{\"accessor\":2,\"format\":{\"id\":\"number\"},\"params\":{},\"aggType\":\"count\"},{\"accessor\":3,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"},{\"accessor\":4,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"},{\"accessor\":5,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"}],\"buckets\":[{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"string\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Missing\"}},\"params\":{},\"aggType\":\"terms\"}]}},\"aggs\":[{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"error.culprit\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Error Culprit\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"error.exception.message\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"Message\"}},{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Number of Errors\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"error.exception.type\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"Type\"}},{\"id\":\"6\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"service.name\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"App Name\"}},{\"id\":\"7\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"error.grouping_key\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"Error Grouping Key\"}}]}"
},
"_type": "visualization"
},
{
"_id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Error Occurrences [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"0\",\"terms_field\":\"error.grouping_key\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"label\":\"Occurrences\",\"metrics\":[{\"type\":\"count\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"bar\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"1\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"filter\":\"processor.event:error\",\"show_legend\":0,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Error Occurrences [APM]\"}"
},
"_type": "visualization"
}
]

View File

@ -1,111 +0,0 @@
[
{
"_id": "8d3ed660-7828-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "1ffc5e20-7827-11e7-8c47-65b845b5cfb3",
"name": "panel_0",
"type": "visualization"
},
{
"id": "1bdca740-7828-11e7-8c47-65b845b5cfb3",
"name": "panel_1",
"type": "visualization"
},
{
"id": "804ffc40-7828-11e7-8c47-65b845b5cfb3",
"name": "panel_2",
"type": "visualization"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":5,\"panelIndex\":1,\"col\":1,\"row\":4,\"panelRefName\":\"panel_0\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":2,\"col\":1,\"row\":1,\"panelRefName\":\"panel_1\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":3,\"col\":7,\"row\":1,\"panelRefName\":\"panel_2\"}]",
"timeRestore": false,
"title": "[APM] Services",
"uiStateJSON": "{\"P-1\": {\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "1ffc5e20-7827-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Services [APM]",
"uiStateJSON": "{\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}",
"version": 1,
"visState": "{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null},\"perPage\":10,\"showPartialRows\":false,\"totalFunc\":\"sum\",\"showTotal\":false,\"showMeticsAtAllLevels\":false},\"type\":\"table\",\"aggs\":[{\"type\":\"avg\",\"enabled\":true,\"id\":\"1\",\"params\":{\"field\":\"transaction.duration.us\",\"customLabel\":\"Avg. Trans. Time\"},\"schema\":\"metric\"},{\"type\":\"percentiles\",\"enabled\":true,\"id\":\"3\",\"params\":{\"field\":\"transaction.duration.us\",\"percents\":[95],\"customLabel\":\"Trans. Time\"},\"schema\":\"metric\"},{\"type\":\"cardinality\",\"enabled\":true,\"id\":\"4\",\"params\":{\"field\":\"transaction.id\",\"customLabel\":\"Total Transactions\"},\"schema\":\"metric\"},{\"type\":\"cardinality\",\"enabled\":true,\"id\":\"6\",\"params\":{\"field\":\"error.id\",\"customLabel\":\"Errors\"},\"schema\":\"metric\"},{\"type\":\"terms\",\"enabled\":true,\"id\":\"2\",\"params\":{\"orderBy\":\"1\",\"field\":\"service.name\",\"order\":\"desc\",\"size\":1000},\"schema\":\"bucket\"}],\"title\":\"Services [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "1bdca740-7828-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Services by Transaction Time [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"terms_field\":\"service.name\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"value_template\":\"{{value}} ms\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"type\":\"avg\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"line\",\"terms_order_by\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"split_color_mode\":\"gradient\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":1,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Top Services by Transaction Time [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "804ffc40-7828-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Services by Transaction Per Minute [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"terms_field\":\"service.name\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"value_template\":\"{{value}} tpm\",\"metrics\":[{\"type\":\"count\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"},{\"field\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"cumulative_sum\",\"id\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\"},{\"field\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"derivative\",\"id\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"1m\"},{\"field\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"positive_only\",\"id\":\"4bd1b8f0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"line\",\"terms_order_by\":\"_count\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":1,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Top Apps by Transaction Per Minute [APM]\"}"
},
"_type": "visualization"
}
]

View File

@ -1,67 +0,0 @@
[
{
"_id": "3e3de700-7de0-11e7-b115-df9c90da2df1",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "d7735b90-7ddf-11e7-b115-df9c90da2df1",
"name": "panel_0",
"type": "search"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"sort\":[\"span.start.us\",\"asc\"],\"col\":1,\"size_x\":12,\"size_y\":23,\"panelIndex\":1,\"columns\":[\"span.type\",\"span.name\",\"span.duration.us\",\"span.start.us\"],\"row\":1,\"panelRefName\":\"panel_0\"}]",
"timeRestore": false,
"title": "[APM] Span Details",
"uiStateJSON": "{}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "d7735b90-7ddf-11e7-b115-df9c90da2df1",
"_migrationVersion": {
"search": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
},
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index",
"type": "index-pattern"
}
],
"_source": {
"columns": [
"span.type",
"span.name",
"span.duration.us",
"span.start.us"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"match\":{\"processor.event\":{\"query\":\"span\",\"type\":\"phrase\"}}},\"meta\":{\"value\":\"span\",\"disabled\":false,\"alias\":null,\"params\":{\"query\":\"span\",\"type\":\"phrase\"},\"key\":\"processor.event\",\"negate\":false,\"type\":\"phrase\",\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index\"},\"$state\":{\"store\":\"appState\"}}],\"version\":true,\"highlightAll\":true,\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"sort": [
"span.start.us",
"asc"
],
"title": "Spans [APM]",
"version": 1
},
"_type": "search"
}
]

View File

@ -1,111 +0,0 @@
[
{
"_id": "41b5d920-7821-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "a2e199b0-7820-11e7-8c47-65b845b5cfb3",
"name": "panel_0",
"type": "visualization"
},
{
"id": "09bcf890-7822-11e7-8c47-65b845b5cfb3",
"name": "panel_1",
"type": "visualization"
},
{
"id": "55606a60-7823-11e7-8c47-65b845b5cfb3",
"name": "panel_2",
"type": "visualization"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":10,\"panelIndex\":1,\"col\":1,\"row\":4,\"panelRefName\":\"panel_0\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":2,\"col\":1,\"row\":1,\"panelRefName\":\"panel_1\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":3,\"col\":7,\"row\":1,\"panelRefName\":\"panel_2\"}]",
"timeRestore": false,
"title": "[APM] Transactions",
"uiStateJSON": "{\"P-1\": {\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "a2e199b0-7820-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Transactions for Time Period [APM]",
"uiStateJSON": "{\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}",
"version": 1,
"visState": "{\"type\":\"table\",\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null},\"perPage\":25,\"showPartialRows\":false,\"totalFunc\":\"sum\",\"showTotal\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"params\":{\"orderBy\":\"1\",\"field\":\"transaction.name\",\"customLabel\":\"Transaction\",\"order\":\"desc\",\"size\":1000},\"type\":\"terms\",\"enabled\":true,\"id\":\"2\",\"schema\":\"bucket\"},{\"params\":{\"sortField\":\"@timestamp\",\"customLabel\":\"Type\",\"field\":\"transaction.type\",\"sortOrder\":\"desc\",\"aggregate\":\"concat\",\"size\":1},\"type\":\"top_hits\",\"enabled\":true,\"id\":\"5\",\"schema\":\"metric\"},{\"params\":{\"field\":\"transaction.duration.us\",\"customLabel\":\"Avg. Resp Time (ms)\"},\"type\":\"avg\",\"enabled\":true,\"id\":\"1\",\"schema\":\"metric\"},{\"params\":{\"field\":\"transaction.duration.us\",\"customLabel\":\"Resp Time (ms)\",\"percents\":[95]},\"type\":\"percentiles\",\"enabled\":true,\"id\":\"3\",\"schema\":\"metric\"},{\"params\":{\"sortField\":\"@timestamp\",\"customLabel\":\"View Spans\",\"field\":\"transaction.id\",\"sortOrder\":\"desc\",\"aggregate\":\"concat\",\"size\":1},\"type\":\"top_hits\",\"enabled\":true,\"id\":\"4\",\"schema\":\"metric\"}],\"title\":\"Top Transactions for Time Period [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "09bcf890-7822-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Transaction Times [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"value_template\":\"{{value}} ms\",\"label\":\"Average\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"type\":\"avg\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"0\"},{\"line_width\":1,\"point_size\":1,\"color\":\"rgba(115,216,255,1)\",\"value_template\":\"{{value}} ms\",\"label\":\"95th Percentile\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"percentiles\":[{\"mode\":\"line\",\"percentile\":\"\",\"shade\":0.2,\"value\":\"95\",\"id\":\"858ec670-7821-11e7-8745-07eaffcb65e5\"}],\"type\":\"percentile\",\"id\":\"79921481-7821-11e7-8745-07eaffcb65e5\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"79921480-7821-11e7-8745-07eaffcb65e5\",\"fill\":0.5},{\"line_width\":\"2\",\"point_size\":1,\"color\":\"rgba(254,146,0,1)\",\"value_template\":\"{{value}} ms\",\"label\":\"99th Percentile\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"percentiles\":[{\"mode\":\"line\",\"percentile\":\"\",\"shade\":0.2,\"value\":\"99\",\"id\":\"858ec670-7821-11e7-8745-07eaffcb65e5\"}],\"type\":\"percentile\",\"id\":\"c1e42de1-7821-11e7-8745-07eaffcb65e5\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"c1e42de0-7821-11e7-8745-07eaffcb65e5\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":1,\"show_grid\":1,\"legend_position\":\"right\",\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Transaction Times [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "55606a60-7823-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Transaction Per Minute [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"terms_field\":\"transaction.result\",\"point_size\":\"0\",\"color\":\"rgba(115,216,255,1)\",\"value_template\":\"{{value}} tpm\",\"label\":\"\",\"metrics\":[{\"field\":\"transaction.id\",\"type\":\"cardinality\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"},{\"field\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"cumulative_sum\",\"id\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\"},{\"field\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"derivative\",\"id\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"1m\"},{\"field\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"positive_only\",\"id\":\"4bd1b8f0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":0,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Transaction Per Minute [APM]\"}"
},
"_type": "visualization"
}
]

View File

@ -1,13 +1,11 @@
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache add \
groff \
less \
py-pip \
&& pip install awscli
py-pip3 \
&& pip3 install awscli
RUN apk --no-cache upgrade

View File

@ -1,10 +1,9 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG GIT_AUTHOR_NAME
ARG GIT_AUTHOR_EMAIL
LABEL maintainer aynic.os <support+docker@asycn.io>
ENV GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
ENV GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
ENV GIT_COMMITTER_NAME=${GIT_AUTHOR_NAME}

View File

@ -1,21 +1,22 @@
FROM consul:1.6.1 as dist
ARG DOCKER_BUILD_DIR
FROM consul:1.11.1 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG DOCKER_GID=999
# install docker
RUN apk add --no-cache bash docker gawk sudo \
&& echo "consul ALL=(root) NOPASSWD: /usr/local/bin/container-list-status" >> /etc/sudoers
# add user consul in group docker
RUN DOCKER_GROUP=$(awk -F: '$3 == '${DOCKER_GID}' {print $1}' < /etc/group) \
&& if [ -n "${DOCKER_GROUP}" ]; then adduser consul ${DOCKER_GROUP}; \
else addgroup -g ${DOCKER_GID} docker && adduser consul docker; \
fi
# install goss
ADD https://github.com/aelsabbahy/goss/releases/latest/download/goss-linux-amd64 /usr/bin/goss
RUN chmod +rx /usr/bin/goss
COPY ${DOCKER_BUILD_DIR}/goss.yml /tests/goss.yml
COPY ${DOCKER_BUILD_DIR}/docker-healthcheck /usr/local/bin/
RUN chmod +rx /usr/local/bin/docker-healthcheck
COPY ${DOCKER_BUILD_DIR}/container-check-status ${DOCKER_BUILD_DIR}/container-list-status /usr/local/bin/
RUN chmod +rx /usr/local/bin/container-check-status /usr/local/bin/container-list-status
HEALTHCHECK CMD goss -g /tests/goss.yml validate --format tap
HEALTHCHECK CMD ((((echo -e 'GET /v1/health/service/consul HTTP/1.0\n' \
|nc -w 1 localhost:8500; echo $? >&3) \
|sed -n '/^\[/,$p' \
|jq '.[].Checks[0].Output' >&4) 3>&1) \
| (read err; exit $err)) 4>&1
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,24 +0,0 @@
#!/bin/bash
# https://github.com/hashicorp/consul/issues/3182
if [ $# -ne 1 ]; then
>&2 echo "Invalid parameters: '$@'"
echo "USAGE: $0 <container-id|container-name|container-hostname|container-ip>"
exit 2
fi
CONTAINER_ID="$1"
# CONTAINER_STATUS=$(sudo container-list-status |awk '/\<'${CONTAINER_ID}'\>/ {print $NF}')
read -d "\n" CONTAINER_STATUS CONTAINER_OUTPUT <<< $(sudo container-list-status |awk 'BEGIN {FS="\t"; RS="\0"} /\<'${CONTAINER_ID}'\>/ {print $1,$NF}')
echo ${CONTAINER_STATUS:-undefined}: "${CONTAINER_OUTPUT}"
case "${CONTAINER_STATUS}" in
healthy)
exit 0
;;
starting)
exit 1
;;
esac
exit 2

View File

@ -1,5 +0,0 @@
#!/bin/sh
# https://github.com/hashicorp/consul/issues/3182
# docker inspect -f '{{.Id}} {{.Config.Hostname}} {{.Name}} {{range .NetworkSettings.Networks}}{{.IPAddress}} {{end}} {{.State.Health.Status}}' $(docker ps -q) 2>/dev/null
docker container inspect --format '{{.State.Health.Status}}{{printf "\t"}}{{.Id}}{{printf "\t"}}{{.Name}}{{printf "\t"}}{{.Config.Hostname}}{{printf "\t"}}{{range .NetworkSettings.Networks}}{{.IPAddress}}{{printf "\t"}}{{end}}{{printf "\t"}}{{$output := ""}}{{range .State.Health.Log}}{{$output = .Output}}{{end}}{{$output}}{{printf "%c" 0}}' $(docker ps -q) 2>/dev/null

View File

@ -0,0 +1,43 @@
#!/bin/sh
# link: https://github.com/hashicorp/consul/issues/3182
# author: Yann "aya" Autissier
# license: GPL
set -eu
DOCKER_SOCK=${DOCKER_SOCK:-/var/run/docker.sock}
if ! which curl > /dev/null || ! which jq >/dev/null; then
>&2 echo "ERROR: curl or jq not found"
exit 2
fi
if [ $# -ne 1 ]; then
>&2 echo "ERROR: invalid parameter '$*'"
echo "USAGE: $0 container-id|container-name|container-ip"
exit 2
fi
{
{
{
# list all dockers
for docker in $(curl --disable --fail --show-error --silent --unix-socket "${DOCKER_SOCK}" http://localhost/containers/json |jq -r '.[].Id'); do
# print "health_status id name ip_address health_output" for each docker
curl --disable --fail --show-error --silent --unix-socket "${DOCKER_SOCK}" "http://localhost/containers/${docker}/json" \
|jq -r '[.State.Health.Status, .Id, .Name, .NetworkSettings.IPAddress, .State.Health.Log[0].Output] |@tsv'
# shorten id: .Id |capture("(?<id>.{12})").id
# print "health_status" and "health_output" for line matching $1
done |awk -F '\t' '/\<'"$1"'\>/ {print $1 | "cat >&3; exec 3>&-"; print $NF | "cat >&4";}'
} 3>&1
} | {
read -r status ||:
case "$status" in
healthy) exit=0;;
starting) exit=1;;
*) exit=2;;
esac
# exit according to "health_status"
exit $exit
}
# print "health_output"
} 4>&1

View File

@ -1,18 +0,0 @@
file:
/bin/consul:
exists: true
filetype: file
mode: "0755"
owner: root
sha256: 99bacb9dc1c6b7eaff75326e4ae0396ac2f29eb8ab95bc2124c718d926c3aef4
port:
tcp6:8500:
listening: true
ip:
process:
consul:
running: true
user:
consul:
exists: true
uid: 100

View File

@ -1,4 +1,5 @@
FROM docker.elastic.co/apm/apm-server-oss:7.4.2 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# config

View File

@ -1,4 +1,5 @@
FROM docker.elastic.co/apm/apm-server-oss:7.4.2 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# config

View File

@ -1,9 +1,8 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG CURATOR_VERSION=5.8.3
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache add \
bash \
py-pip \

View File

@ -1,9 +0,0 @@
cluster.name: "meup-cluster"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
http.cors.enabled: true
http.cors.allow-credentials: true
http.cors.allow-methods: OPTIONS,HEAD,GET,POST,PUT,DELETE
http.cors.max-age: 0
http.cors.allow-origin: "*"
http.cors.allow-headers : X-Requested-With,X-Auth-Token,Content-Type,Content-Length

24
docker/fabio/Dockerfile Normal file
View File

@ -0,0 +1,24 @@
FROM golang:1.15-alpine AS build
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG GIT_AUTHOR_NAME
ARG GIT_AUTHOR_EMAIL
ENV GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
ENV GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
ENV GIT_COMMITTER_NAME=${GIT_AUTHOR_NAME}
ENV GIT_COMMITTER_EMAIL=${GIT_AUTHOR_EMAIL}
WORKDIR /go/src/github.com/fabiolb/fabio
COPY . .
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go test -mod=vendor -trimpath -ldflags "-s -w" ./...
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -mod=vendor -trimpath -ldflags "-s -w"
FROM alpine:3.12 as master
RUN apk update && apk add --no-cache ca-certificates
COPY --from=build /src/fabio /usr/bin
ADD fabio.properties /etc/fabio/fabio.properties
EXPOSE 9998 9999
ENTRYPOINT ["/usr/bin/fabio"]
CMD ["-cfg", "/etc/fabio/fabio.properties"]

View File

@ -1,10 +1,8 @@
FROM golang:1.12-alpine AS builder
ARG DOCKER_BUILD_DIR
ARG GOOFYS_VERSION=v0.20.0
FROM golang:1.15-alpine AS build
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG GOOFYS_VERSION=v0.24.0
WORKDIR /go/src/github.com/kahing/goofys/
WORKDIR /src/github.com/kahing/goofys/
RUN apk --no-cache upgrade \
&& apk --no-cache add git make \
@ -14,10 +12,9 @@ RUN apk --no-cache upgrade \
&& make install
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
RUN apk add --no-cache ca-certificates
COPY --from=builder /go/bin/goofys /bin/goofys
COPY --from=build /go/bin/goofys /bin/goofys
ENTRYPOINT ["/bin/goofys"]

View File

@ -1,4 +1,5 @@
FROM grafana/grafana:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AWS_ACCESS_KEY
ARG AWS_SECRET_KEY
@ -6,8 +7,6 @@ ARG MYSQL_GRAFANA_USER
ARG MYSQL_GRAFANA_PASSWORD
ARG MYSQL_GRAFANA_DB
LABEL maintainer aynic.os <support+docker@asycn.io>
COPY ${DOCKER_BUILD_DIR}/config.ini /etc/grafana/config.ini
COPY ${DOCKER_BUILD_DIR}/dashboards /etc/grafana/dashboards
COPY ${DOCKER_BUILD_DIR}/provisioning /etc/grafana/provisioning

View File

@ -1,10 +1,9 @@
FROM httpd:alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG HTTPD_LOAD_MODULE="env expires headers lbmethod_bybusyness lbmethod_byrequests proxy proxy_balancer proxy_fcgi proxy_http setenvif slotmem_shm reqtimeout rewrite"
ARG HTTPD_CONF_EXTRA="default info mpm vhosts"
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN sed -E -i \
-e 's!^#?\s*(LoadModule ('${HTTPD_LOAD_MODULE// /|}')_module modules/mod_('${HTTPD_LOAD_MODULE// /|}').so)\s*!\1!g' \
-e 's!^#?\s*(Include conf/extra/httpd-('${HTTPD_CONF_EXTRA// /|}').conf)\s*!\1!g' \

View File

@ -1,7 +1,6 @@
FROM mysql:5.6.44 as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# config
COPY ${DOCKER_BUILD_DIR}/conf.d/all.cnf /etc/mysql/conf.d/

View File

@ -1,7 +1,6 @@
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
RUN apk --no-cache add libffi openssl python3 py3-pip py3-netifaces

View File

@ -1,7 +1,6 @@
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ENV PACKER_VERSION=1.6.6

View File

@ -1,11 +1,10 @@
FROM alpine:3.11 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG VERSION_PDNS_REC=4.2.1
ARG VERSION_PDNS_AUTH=4.2.1
ARG VERSION_PDNS_DNSDIST=1.4.0
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk upgrade --no-cache \
&& apk add --no-cache --virtual .build-deps \
autoconf \

View File

@ -1,4 +1,5 @@
FROM phabricator/daemon:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
RUN apt-get update \

View File

@ -1,4 +1,5 @@
FROM phabricator/phabricator:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
RUN { \

View File

@ -1,4 +1,5 @@
FROM php:5.6-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG APCU_VERSION=4.0.11
@ -18,8 +19,6 @@ ARG TWIG_VERSION=1.35.3
ARG XCACHE_VERSION=3.2.0
ARG XDEBUG_VERSION=2.5.5
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \

View File

@ -1,4 +1,5 @@
FROM php:7.0-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
@ -25,8 +26,6 @@ ARG XDEBUG_VERSION=2.7.2
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \

View File

@ -1,4 +1,5 @@
FROM php:7.1-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
@ -25,8 +26,6 @@ ARG XDEBUG_VERSION=stable
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \

View File

@ -1,4 +1,5 @@
FROM php:7.2-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
@ -25,8 +26,6 @@ ARG XDEBUG_VERSION=stable
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \

View File

@ -1,4 +1,5 @@
FROM php:7.3-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
@ -24,8 +25,6 @@ ARG XDEBUG_VERSION=stable
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \

View File

@ -1,9 +1,9 @@
FROM quay.io/prometheus/alertmanager:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG SLACK_WEBHOOK_ID
COPY ${DOCKER_BUILD_DIR}/config.tmpl /etc/alertmanager/config.tmpl
RUN sed 's@SLACK_WEBHOOK_ID@'"${SLACK_WEBHOOK_ID:-UNDEFINED}"'@g' /etc/alertmanager/config.tmpl > /etc/alertmanager/alertmanager.yml
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,7 +1,7 @@
FROM quay.io/prometheus/blackbox-exporter:latest as dist
ARG DOCKER_BUILD_DIR
COPY ${DOCKER_BUILD_DIR}/config.yml /etc/blackbox_exporter/config.yml
LABEL maintainer aynic.os <support+docker@asycn.io>
FROM dist as master
ARG DOCKER_BUILD_DIR
COPY ${DOCKER_BUILD_DIR}/config.yml /etc/blackbox_exporter/config.yml

View File

@ -1,10 +1,16 @@
FROM quay.io/prometheus/prometheus:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD []
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG MONITORING_PRIMARY_TARGETS_BLACKBOX
ARG MONITORING_SECONDARY_TARGETS_BLACKBOX
LABEL maintainer aynic.os <support+docker@asycn.io>
COPY ${DOCKER_BUILD_DIR}/prometheus.tmpl /etc/prometheus/prometheus.tmpl
COPY ${DOCKER_BUILD_DIR}/alert-rules.yml /etc/prometheus/alert-rules.yml
@ -13,10 +19,3 @@ COPY ${DOCKER_BUILD_DIR}/alert-rules.yml /etc/prometheus/alert-rules.yml
RUN sed \
-e 's|MONITORING_PRIMARY_TARGETS_BLACKBOX|'" - ${MONITORING_PRIMARY_TARGETS_BLACKBOX// /\\n - }"'|; s|MONITORING_SECONDARY_TARGETS_BLACKBOX|'" - ${MONITORING_SECONDARY_TARGETS_BLACKBOX// /\\n - }"'|' \
/etc/prometheus/prometheus.tmpl > /etc/prometheus/prometheus.yml
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD []
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,10 +1,9 @@
FROM golang:1.9.4-alpine3.7 AS builder
FROM golang:1-alpine AS build
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG GIT_AUTHOR_NAME
ARG GIT_AUTHOR_EMAIL
LABEL maintainer aynic.os <support+docker@asycn.io>
ENV GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
ENV GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
ENV GIT_COMMITTER_NAME=${GIT_AUTHOR_NAME}
@ -14,29 +13,37 @@ WORKDIR /go/src/github.com/gliderlabs/registrator/
RUN \
apk add --no-cache curl git \
&& git clone https://github.com/gliderlabs/registrator/ . \
# TEMPORARY DISABLED - TODO: check with new upstream master
# && git reset --hard da90d170da9dd7e1a8d9a13429d44686dc3d118f \
# # -useIpFromNetwork command line option \
# && git fetch origin pull/596/head \
# && git merge --no-edit 8d904c60949e310893a25c8af3636b0151334dd4 \
# # convert check.Script to check.Args \
# && git fetch origin pull/627/head \
# && git merge --no-edit ed053c364e3ba941aeca9ab0d8791b051ff4dede \
# # skip tls verification \
&& git reset --hard 4322fe00304d6de661865721b073dc5c7e750bd2 \
# -useIpFromNetwork \
&& git fetch origin pull/596/head \
&& git merge --no-edit 8d904c60949e310893a25c8af3636b0151334dd4 \
# fix SERVICE_CHECK_SCRIPT
&& git fetch origin pull/686/head \
&& git merge --no-edit 097305157a6a2c0c236fa430c17498c895536782 \
# # skip tls verification
# && git fetch origin pull/661/head \
# && git merge --no-edit 38fc83ac07b4a070be71079cb810429d94a60205 \
# # prevent publishing ip twice \
# && git fetch origin pull/703/head \
# && git merge --no-edit b628dcd0edacfb2d3e5f0a6f486b23339f35e82a \
# # -useIpFromEnv
# && git fetch origin pull/674/head \
# && git merge --no-edit 4fe9e216d9747e25ae5aa9d40f2246861c032dd1 \
&& curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh \
&& dep ensure -vendor-only \
&& go mod init \
&& go mod vendor \
&& CGO_ENABLED=0 GOOS=linux go build \
-a -installsuffix cgo \
-ldflags "-X main.Version=$(cat VERSION)" \
-o bin/registrator \
-o /go/bin/registrator \
.
FROM alpine:3.7 as dist
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
RUN apk add --no-cache ca-certificates
COPY --from=builder /go/src/github.com/gliderlabs/registrator/bin/registrator /bin/registrator
COPY --from=build /go/bin/registrator /bin/registrator
ENTRYPOINT ["/bin/registrator"]

View File

@ -1,7 +1,6 @@
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
WORKDIR /usr/src

View File

@ -1,9 +1,8 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG S3FS_VERSION=v1.85
LABEL maintainer aynic.os <support+docker@asycn.io>
# Install s3fs-fuse
RUN apk --no-cache upgrade \
&& apk --no-cache add --virtual .build-deps \

View File

@ -1,10 +1,9 @@
FROM sematext/logagent:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG GIT_AUTHOR_NAME
ARG GIT_AUTHOR_EMAIL
LABEL maintainer aynic.os <support+docker@asycn.io>
ENV GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
ENV GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
ENV GIT_COMMITTER_NAME=${GIT_AUTHOR_NAME}

View File

@ -1,7 +1,6 @@
FROM alpine:latest as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# Install dependencies
RUN apk add --no-cache \

View File

@ -1,7 +1,6 @@
FROM hashicorp/terraform:light as dist
ARG DOCKER_BUILD_DIR
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
RUN apk --no-cache upgrade

View File

@ -1,9 +1,8 @@
FROM ubuntu:18.04 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# https://github.com/theia-ide/theia-apps/blob/master/theia-full-docker/Dockerfile
LABEL maintainer aynic.os <support+docker@asycn.io>
ENV DEBIAN_FRONTEND noninteractive
#Common deps

View File

@ -1,9 +1,8 @@
FROM alpine:3.11 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG S3FS_VERSION=v1.85
LABEL maintainer aynic.os <support+docker@asycn.io>
# Install s3fs-fuse and sftpserver
RUN apk --no-cache upgrade \
&& apk --no-cache add --virtual .build-deps \

View File

@ -8,7 +8,7 @@ bootstrap: bootstrap-git bootstrap-docker app-bootstrap ## Update application fi
# target bootstrap-docker: Build and start application dockers
# on local host
.PHONY: boostrap-docker
.PHONY: bootstrap-docker
bootstrap-docker: install-bin-docker setup-docker-group
# target bootstrap-git: Fire update-app

View File

@ -7,7 +7,7 @@ BUILD_LABEL_VARS ?= org.label-schema.% org.opencontainers.% os.my
BUILD_LABEL_ARGS ?= $(foreach var,$(filter $(BUILD_LABEL_VARS),$(MAKE_FILE_VARS)),$(if $($(var)),$(var)='$($(var))'))
BUILD_LICENSE ?= GPL-3.0
BUILD_NAME ?= $(COMPOSE_SERVICE_NAME)-$(BUILD_SERVICE)
BUILD_SERVICE ?= undef
BUILD_SERVICE ?= $(or $(service),undefined)
BUILD_STATUS ?= $(shell git status -uno --porcelain 2>/dev/null)
org.label-schema.build-date ?= $(BUILD_DATE)

View File

@ -12,6 +12,7 @@ endif
COMPOSE_IGNORE_ORPHANS ?= false
COMPOSE_PROJECT_NAME ?= $(APP_ENV)$(subst /,,$(subst -,,$(APP_PATH)))
COMPOSE_SERVICE_NAME ?= $(subst _,-,$(COMPOSE_PROJECT_NAME))
COMPOSE_VERSION ?= 1.29.2
CONTEXT += COMPOSE_FILE DOCKER_REPOSITORY
CONTEXT_DEBUG += DOCKER_BUILD_TARGET DOCKER_IMAGE_TAG DOCKER_REGISTRY DOCKER_SERVICE DOCKER_SERVICES
DOCKER_AUTHOR ?= $(DOCKER_AUTHOR_NAME) <$(DOCKER_AUTHOR_EMAIL)>
@ -25,6 +26,7 @@ DOCKER_BUILD_TARGET ?= $(if $(filter $(ENV),$(DOCKER_BUILD_TARGETS))
DOCKER_BUILD_TARGET_DEFAULT ?= master
DOCKER_BUILD_TARGETS ?= $(ENV_DEPLOY)
DOCKER_BUILD_VARS ?= APP BRANCH DOCKER_GID DOCKER_REPOSITORY GID GIT_AUTHOR_EMAIL GIT_AUTHOR_NAME SSH_BASTION_HOSTNAME SSH_BASTION_USERNAME SSH_PRIVATE_IP_RANGE SSH_PUBLIC_HOST_KEYS SSH_REMOTE_HOSTS UID USER VERSION
DOCKER_COMPOSE ?= $(if $(DOCKER_RUN),docker/compose:$(COMPOSE_VERSION),$(or $(shell docker compose >/dev/null 2>&1 && printf 'docker compose\n'),docker-compose))
DOCKER_COMPOSE_DOWN_OPTIONS ?=
DOCKER_COMPOSE_UP_OPTIONS ?= -d
DOCKER_IMAGE_TAG ?= $(if $(filter $(ENV),$(ENV_DEPLOY)),$(VERSION),$(if $(DRONE_BUILD_NUMBER),$(DRONE_BUILD_NUMBER),latest))
@ -46,12 +48,6 @@ DOCKER_SERVICES ?= $(eval IGNORE_DRYRUN := true)$(shell $(call d
DOCKER_SHELL ?= $(SHELL)
ENV_VARS += COMPOSE_PROJECT_NAME COMPOSE_SERVICE_NAME DOCKER_BUILD_TARGET DOCKER_IMAGE_TAG DOCKER_REGISTRY DOCKER_REPOSITORY DOCKER_SHELL
ifneq ($(DOCKER_RUN),)
DOCKER_COMPOSE ?= docker/compose:$(COMPOSE_VERSION)
else
DOCKER_COMPOSE ?= $(or $(shell docker compose >/dev/null 2>&1 && printf 'docker compose\n'),docker-compose)
endif
ifeq ($(DRONE), true)
APP_PATH_PREFIX := $(DRONE_BUILD_NUMBER)
DOCKER_BUILD_CACHE := false
@ -87,7 +83,8 @@ endef
define docker-build
$(call INFO,docker-build,$(1)$(comma) $(2)$(comma) $(3))
$(eval path := $(patsubst %/,%,$(1)))
$(eval tag := $(or $(2),$(DOCKER_REPOSITORY)/$(lastword $(subst /, ,$(path))):$(DOCKER_IMAGE_TAG)))
$(eval service := $(lastword $(subst /, ,$(path))))
$(eval tag := $(or $(2),$(DOCKER_REPOSITORY)/$(service):$(DOCKER_IMAGE_TAG)))
$(eval target := $(subst ",,$(subst ',,$(or $(3),$(DOCKER_BUILD_TARGET)))))
$(eval image_id := $(shell docker images -q $(tag) 2>/dev/null))
$(eval build_image := $(or $(filter false,$(DOCKER_BUILD_CACHE)),$(if $(image_id),,true)))

View File

@ -4,7 +4,7 @@
# target debug: Print more informations
.PHONY: debug
debug:
$(MAKE) doc help profile DEBUG=true
$(MAKE) help profile DEBUG=true
# target debug-%: Print value of %
.PHONY: debug-%

View File

@ -1,6 +1,5 @@
COMPOSE_PROJECT_NAME_MYOS ?= $(USER_ENV)_myos
COMPOSE_PROJECT_NAME_NODE ?= node
COMPOSE_VERSION ?= 1.29.2
DOCKER_ENV_ARGS ?= $(docker_env_args)
DOCKER_EXEC_OPTIONS ?=
DOCKER_GID ?= $(call gid,docker)
@ -15,6 +14,8 @@ DOCKER_NETWORK_PRIVATE ?= $(USER_ENV)
DOCKER_NETWORK_PUBLIC ?= node
DOCKER_REPOSITORY_MYOS ?= $(subst _,/,$(COMPOSE_PROJECT_NAME_MYOS))
DOCKER_REPOSITORY_NODE ?= $(subst _,/,$(COMPOSE_PROJECT_NAME_NODE))
# DOCKER_RUN: if empty, run system command, else run it in a docker
DOCKER_RUN ?= $(if $(filter-out false False FALSE,$(DOCKER)),$(DOCKER))
# DOCKER_RUN_OPTIONS: default options of `docker run` command
DOCKER_RUN_OPTIONS += --rm -it
# DOCKER_RUN_VOLUME: options -v of `docker run` command to mount additionnal volumes

View File

@ -43,7 +43,6 @@ CONTEXT ?= ENV $(shell awk 'BEGIN {FS="="}; $$1 !~ /^(\#
CONTEXT_DEBUG ?= MAKEFILE_LIST DOCKER_ENV_ARGS ENV_ARGS APPS GIT_AUTHOR_EMAIL GIT_AUTHOR_NAME MAKE_DIR MAKE_SUBDIRS MAKE_CMD_ARGS MAKE_ENV_ARGS UID USER
DEBUG ?=
DOCKER ?= $(shell type -p docker)
DOCKER_RUN ?= $(if $(filter-out false False FALSE,$(DOCKER)),$(DOCKER))
DOMAIN ?= localhost
DRONE ?= false
DRYRUN ?= false

View File

@ -6,6 +6,8 @@ services:
args:
- DOCKER_BUILD_DIR=docker/cli
- GID=${GID}
- GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
- GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
- UID=${UID}
- USER=${USER}
- SSH_BASTION_HOSTNAME=${SSH_BASTION_HOSTNAME}
@ -14,7 +16,6 @@ services:
- SSH_PRIVATE_IP_RANGE=${SSH_RIVATE_IP_RANGE}
context: ../..
dockerfile: docker/cli/Dockerfile
target: ${DOCKER_BUILD_TARGET}
command: tail -f /dev/null
container_name: ${DOCKER_NAME_CLI}
depends_on:
@ -38,7 +39,6 @@ services:
- USER=${USER}
context: ../..
dockerfile: docker/ssh/Dockerfile
target: ${DOCKER_BUILD_TARGET}
container_name: ${DOCKER_NAME_SSH}
image: ${DOCKER_IMAGE_SSH}:${DOCKER_IMAGE_TAG}
networks:

View File

@ -4,9 +4,9 @@ services:
apm-server-oss:
build:
args:
- DOCKER_BUILD_DIR=docker/apm-server-oss
- DOCKER_BUILD_DIR=docker/elastic/apm-server-oss
context: ../..
dockerfile: docker/apm-server-oss/Dockerfile
dockerfile: docker/elastic/apm-server-oss/Dockerfile
image: ${DOCKER_REPOSITORY}/apm-server-oss:${DOCKER_IMAGE_TAG}
command: -c apm-server.yml --strict.perms=false -e -E output.elasticsearch.hosts=["${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}"] -E output.elasticsearch.protocol=${ELASTICSEARCH_PROTOCOL} -E output.elasticsearch.username=${ELASTICSEARCH_USERNAME} -E output.elasticsearch.password=${ELASTICSEARCH_PASSWORD} -E apm-server.register.ingest.pipeline.enabled=false
labels:

View File

@ -7,7 +7,6 @@ services:
- DOCKER_BUILD_DIR=docker/elastic/curator
context: ../..
dockerfile: docker/elastic/curator/Dockerfile
target: ${DOCKER_BUILD_TARGET}
environment:
- DEPLOY=${DEPLOY}
- HOSTS=${ELASTICSEARCH_PROTOCOL}://${ELASTICSEARCH_HOST}

View File

@ -5,7 +5,7 @@ services:
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
labels:
- SERVICE_CHECK_SCRIPT=container-check-status $$SERVICE_IP
- SERVICE_CHECK_SCRIPT=docker-healthcheck $$SERVICE_IP
- SERVICE_3306_NAME=${COMPOSE_SERVICE_NAME}-mysql-3306
networks:
- private

View File

@ -38,13 +38,13 @@ services:
- consul:/consul/data
- /var/run/docker.sock:/var/run/docker.sock
fabio:
image: fabiolb/fabio:latest
command: -registry.backend "consul" -registry.consul.addr "consul:8500" -registry.consul.token "$FABIO_CONSUL_HTTP_TOKEN" -proxy.addr ":80,:443;cs=local" -proxy.cs "cs=local;type=file;cert=/certs/${DOMAIN}.crt.pem;key=/certs/${DOMAIN}.key.pem"
depends_on:
- consul
extra_hosts:
- consul:${DOCKER_HOST_INET}
hostname: ${HOSTNAME}
image: fabiolb/fabio:latest
labels:
- SERVICE_80_CHECK_TCP=true
- SERVICE_80_NAME=${COMPOSE_SERVICE_NAME}-fabio-80
@ -83,8 +83,11 @@ services:
build:
args:
- DOCKER_BUILD_DIR=docker/registrator
- GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
- GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
context: ../..
dockerfile: docker/registrator/Dockerfile
image: ${DOCKER_REPOSITORY_NODE}/registrator:${DOCKER_IMAGE_TAG}
command: -internal -cleanup -deregister always -resync=30 -useIpFromNetwork node -useIpFromLabel SERVICE_ADDRESS consul://consul:8500
depends_on:
- consul
@ -93,7 +96,6 @@ services:
extra_hosts:
- consul:${DOCKER_HOST_INET}
hostname: ${HOSTNAME}
image: ${DOCKER_REPOSITORY_NODE}/registrator:${DOCKER_IMAGE_TAG}
network_mode: host
restart: always
volumes:

View File

@ -4,10 +4,10 @@ services:
alertmanager:
build:
args:
- DOCKER_BUILD_DIR=docker/alertmanager
- DOCKER_BUILD_DIR=docker/prometheus/alertmanager
- SLACK_WEBHOOK_ID=${ALERTMANAGER_SLACK_WEBHOOK_ID}
context: ../..
dockerfile: docker/alertmanager/Dockerfile
dockerfile: docker/prometheus/alertmanager/Dockerfile
image: ${DOCKER_REPOSITORY}/alertmanager:${DOCKER_IMAGE_TAG}
labels:
- SERVICE_9093_NAME=${COMPOSE_SERVICE_NAME}-alertmanager-9093

View File

@ -4,9 +4,9 @@ services:
blackbox:
build:
args:
- DOCKER_BUILD_DIR=docker/blackbox
- DOCKER_BUILD_DIR=docker/prometheus/blackbox-exporter
context: ../..
dockerfile: docker/blackbox/Dockerfile
dockerfile: docker/prometheus/blackbox-exporter/Dockerfile
image: ${DOCKER_REPOSITORY}/blackbox:${DOCKER_IMAGE_TAG}
labels:
- SERVICE_9115_NAME=${COMPOSE_SERVICE_NAME}-blackbox-9115

View File

@ -4,9 +4,9 @@ services:
es-exporter:
build:
args:
- DOCKER_BUILD_DIR=docker/es-exporter
- DOCKER_BUILD_DIR=docker/prometheus/es-exporter
context: ../..
dockerfile: docker/es-exporter/Dockerfile
dockerfile: docker/prometheus/es-exporter/Dockerfile
command: -e ${ES_EXPORTER_ELASTICSEARCH_URL}
image: ${DOCKER_REPOSITORY}/es-exporter:${DOCKER_IMAGE_TAG}
labels:

View File

@ -4,11 +4,11 @@ services:
prometheus:
build:
args:
- DOCKER_BUILD_DIR=docker/prometheus
- DOCKER_BUILD_DIR=docker/prometheus/prometheus
- MONITORING_PRIMARY_TARGETS_BLACKBOX=${PROMETHEUS_MONITORING_PRIMARY_TARGETS_BLACKBOX}
- MONITORING_SECONDARY_TARGETS_BLACKBOX=${PROMETHEUS_MONITORING_SECONDARY_TARGETS_BLACKBOX}
context: ../..
dockerfile: docker/prometheus/Dockerfile
dockerfile: docker/prometheus/prometheus/Dockerfile
image: ${DOCKER_REPOSITORY}/prometheus:${DOCKER_IMAGE_TAG}
labels:
- SERVICE_9090_NAME=${COMPOSE_SERVICE_NAME}-prometheus-9090

View File

@ -5,9 +5,10 @@ services:
build:
args:
- DOCKER_BUILD_DIR=docker/sematext/logagent
- GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
- GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
context: ../..
dockerfile: docker/sematext/logagent/Dockerfile
target: ${DOCKER_BUILD_TARGET}
image: ${DOCKER_REPOSITORY}/logagent:${DOCKER_IMAGE_TAG}
environment:
- LOGAGENT_ARGS=-u 514 --docker /tmp/docker.sock --dockerEvents

View File

@ -5,9 +5,10 @@ services:
build:
args:
- DOCKER_BUILD_DIR=docker/theia
- GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
- GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
context: ../..
dockerfile: docker/theia/Dockerfile
target: ${DOCKER_BUILD_TARGET}
environment:
- ENV=${ENV}
- MONOREPO_DIR=${MONOREPO_DIR}