split make files in `myos` project and install files in `yaip` project

This commit is contained in:
Yann Autissier 2022-11-11 23:37:28 +01:00
parent 99b9e5ecbe
commit f847f6f5b9
302 changed files with 10 additions and 16952 deletions

View File

@ -1,5 +1,3 @@
APP_NAME=myos
APP_TYPE=myos
DOMAIN=localhost
ENV=local
STACK=

View File

@ -1,5 +1,9 @@
# CHANGELOG
## v0.9 - 2022-11-11
* split make files in `myos` project and install files in `yaip` project
## v0.1-beta - 2022-06-30
Beta release, welcome ipfs

View File

@ -1,10 +1,12 @@
include make/include.mk
MYOS ?= ../myos
MYOS_REPOSITORY ?= https://github.com/aynicos/myos
$(MYOS):
-@git clone $(MYOS_REPOSITORY) $(MYOS)
-include $(MYOS)/make/include.mk
##
# APP
app-bootstrap: bootstrap-docker
app-build: user install-build-config
$(call make,docker-compose-build docker-compose-up)
$(foreach service,$(or $(SERVICE),$(SERVICES)),$(call make,app-build-$(service)))
@ -21,14 +23,3 @@ app-update-default: ENV_FILE := /etc/default/myos
app-update-default: .env-update;
app-tests: ansible-tests
##
# BOOTSTRAP
# target bootstrap-docker: Install and configure docker
.PHONY: bootstrap-docker
bootstrap-docker: install-bin-docker setup-docker-group setup-binfmt setup-nfsd setup-sysctl
# target bootstrap-stack: Call bootstrap target of each stack
.PHONY: bootstrap-stack
bootstrap-stack: docker-network-create $(foreach stack,$(STACK),bootstrap-stack-$(stack))

View File

@ -1,4 +1,4 @@
# myos - Make Your Own Stack
# yaip - Yet Another IPFS Project
Docker paas based on docker compose files.

View File

@ -1,37 +0,0 @@
# target ansible: Fire docker-build-ansible, Call ansible ANSIBLE_ARGS ARGS or ansible-run target
.PHONY: ansible
ansible: $(if $(ANSIBLE_DOCKER_RUN),docker-build-ansible,install-ansible)
$(call ansible,$(ANSIBLE_ARGS) $(ARGS))
# target ansible-playbook: Call ansible-playbook ANSIBLE_ARGS ARGS
.PHONY: ansible-playbook
ansible-playbook: $(if $(ANSIBLE_DOCKER_RUN),docker-build-ansible,install-ansible)
$(call ansible-playbook,$(ANSIBLE_ARGS) $(ARGS))
# target ansible-pull: Call ansible-pull ANSIBLE_GIT_REPOSITORY ANSIBLE_PLAYBOOK
.PHONY: ansible-pull
ansible-pull: install-ansible
$(call ansible-pull,--url $(ANSIBLE_GIT_REPOSITORY) $(if $(ANSIBLE_GIT_KEY_FILE),--key-file $(ANSIBLE_GIT_KEY_FILE)) $(if $(ANSIBLE_GIT_VERSION),--checkout $(ANSIBLE_GIT_VERSION)) $(if $(ANSIBLE_GIT_DIRECTORY),--directory $(ANSIBLE_GIT_DIRECTORY)) $(if $(ANSIBLE_TAGS),--tags $(ANSIBLE_TAGS)) $(if $(ANSIBLE_EXTRA_VARS),--extra-vars '$(ANSIBLE_EXTRA_VARS)') $(if $(findstring true,$(FORCE)),--force) $(if $(findstring true,$(DRYRUN)),--check) --full $(if $(ANSIBLE_INVENTORY),--inventory $(ANSIBLE_INVENTORY)) $(ANSIBLE_PLAYBOOK))
# target ansible-pull@%: Fire ssh-get-PrivateIpAddress-% for SERVER_NAME, Call ssh-exec make ansible-pull DOCKER_IMAGE_TAG
.PHONY: ansible-pull@%
ansible-pull@%: ssh-get-PrivateIpAddress-$(SERVER_NAME)
$(call ssh-exec,$(AWS_INSTANCE_IP),make ansible-pull ANSIBLE_DOCKER_IMAGE_TAG=$(ANSIBLE_DOCKER_IMAGE_TAG) ANSIBLE_TAGS=$(ANSIBLE_TAGS) FORCE=$(FORCE))
# target ansible-run: Fire ssh-add ansible-run-localhost
.PHONY: ansible-run
ansible-run: ansible-run-localhost
# target ansible-run-%: Fire docker-build-ansible, Call ansible-playbook ANSIBLE_PLAYBOOK
.PHONY: ansible-run-%
ansible-run-%: $(if $(ANSIBLE_DOCKER_RUN),docker-build-ansible,install-ansible)
$(call ansible-playbook,$(if $(ANSIBLE_TAGS),--tags $(ANSIBLE_TAGS)) $(if $(ANSIBLE_EXTRA_VARS),--extra-vars '$(patsubst target=localhost,target=$*,$(ANSIBLE_EXTRA_VARS))') $(if $(findstring true,$(DRYRUN)),--check) $(if $(ANSIBLE_INVENTORY),--inventory $(ANSIBLE_INVENTORY)) $(ANSIBLE_PLAYBOOK))
# target ansible-tests: Fire ssh-add ansible-tests-localhost
.PHONY: ansible-tests
ansible-tests: ansible-tests-localhost
# target ansible-tests-%: Fire docker-run-% with ANSIBLE_PLAYBOOK ansible/roles/*/tests/playbook.yml
.PHONY: ansible-tests-%
ansible-tests-%: ANSIBLE_PLAYBOOK := $(wildcard ansible/roles/*/tests/playbook.yml)
ansible-tests-%: ansible-run-%;

View File

@ -1,61 +0,0 @@
ANSIBLE_APP_NAME ?= myos
ANSIBLE_APP_TYPE ?= myos
ANSIBLE_ARGS ?= $(if $(filter-out 0,$(UID)),$(if $(shell sudo -l 2>/dev/null |grep 'NOPASSWD: ALL'),,--ask-become-pass))$(if $(DOCKER_RUN),$(if $(shell ssh-add -l >/dev/null 2>&1 || echo false), --ask-pass))
ANSIBLE_AWS_ACCESS_KEY_ID ?= $(AWS_ACCESS_KEY_ID)
ANSIBLE_AWS_DEFAULT_OUTPUT ?= $(AWS_DEFAULT_OUTPUT)
ANSIBLE_AWS_DEFAULT_REGION ?= $(AWS_DEFAULT_REGION)
ANSIBLE_AWS_SECRET_ACCESS_KEY ?= $(AWS_SECRET_ACCESS_KEY)
ANSIBLE_CONFIG ?= ansible/ansible.cfg
ANSIBLE_DISKS_NFS_DISK ?= $(NFS_DISK)
ANSIBLE_DISKS_NFS_OPTIONS ?= $(NFS_OPTIONS)
ANSIBLE_DISKS_NFS_PATH ?= $(NFS_PATH)
# running ansible in docker requires sshd running on localhost,
# to allow ansible to escape docker and apply changes to localhost
ANSIBLE_DOCKER ?= false
ANSIBLE_DOCKER_RUN ?= $(if $(filter-out false False FALSE,$(ANSIBLE_DOCKER)),$(ANSIBLE_DOCKER))
ANSIBLE_DOCKER_IMAGE_TAG ?= $(DOCKER_IMAGE_TAG)
ANSIBLE_DOCKER_REGISTRY ?= $(DOCKER_REGISTRY)
ANSIBLE_EXTRA_VARS ?= target=localhost
ANSIBLE_GIT_DIRECTORY ?= /dns/$(subst $(space),/,$(strip $(call reverse,$(subst ., ,$(APP_REPOSITORY_HOST)))))/$(APP_REPOSITORY_PATH)
ANSIBLE_GIT_KEY_FILE ?= $(if $(ANSIBLE_SSH_PRIVATE_KEYS),~$(ANSIBLE_USERNAME)/.ssh/$(notdir $(firstword $(ANSIBLE_SSH_PRIVATE_KEYS))))
ANSIBLE_GIT_REPOSITORY ?= $(GIT_REPOSITORY)
ANSIBLE_GIT_VERSION ?= $(BRANCH)
ANSIBLE_INVENTORY ?= ansible/inventories
ANSIBLE_MYOS ?= $(ANSIBLE_GIT_DIRECTORY)
ANSIBLE_PLAYBOOK ?= ansible/playbook.yml
ANSIBLE_SSH_AUTHORIZED_KEYS ?= $(strip $(SSH_AUTHORIZED_KEYS))
ANSIBLE_SSH_BASTION_HOSTNAME ?= $(firstword $(SSH_BASTION_HOSTNAME))
ANSIBLE_SSH_BASTION_USERNAME ?= $(firstword $(SSH_BASTION_USERNAME))
ANSIBLE_SSH_PRIVATE_IP_RANGE ?= $(strip $(SSH_PRIVATE_IP_RANGE))
ANSIBLE_SSH_PRIVATE_KEYS ?= $(if $(ANSIBLE_SSH_PRIVATE_KEYS_ENABLE),$(strip $(SSH_PRIVATE_KEYS)))
ANSIBLE_SSH_PRIVATE_KEYS_ENABLE ?=
ANSIBLE_SSH_PUBLIC_HOSTS ?= $(strip $(SSH_PUBLIC_HOSTS))
ANSIBLE_SSH_USERNAME ?= $(firstword $(SSH_USER))
ANSIBLE_SERVER_NAME ?= $(SERVER_NAME)
ANSIBLE_USERNAME ?= $(USER)
ANSIBLE_VERBOSE ?= $(if $(DEBUG),-vvvv,$(if $(VERBOSE),-v))
CMDS += ansible ansible-playbook
DOCKER_RUN_OPTIONS_ANSIBLE ?= -it $(if $(DOCKER_INTERNAL_DOCKER_HOST),--add-host=host.docker.internal:$(DOCKER_INTERNAL_DOCKER_HOST))
ENV_VARS += ANSIBLE_APP_NAME ANSIBLE_APP_TYPE ANSIBLE_AWS_ACCESS_KEY_ID ANSIBLE_AWS_DEFAULT_OUTPUT ANSIBLE_AWS_DEFAULT_REGION ANSIBLE_AWS_SECRET_ACCESS_KEY ANSIBLE_CONFIG ANSIBLE_DISKS_NFS_DISK ANSIBLE_DISKS_NFS_OPTIONS ANSIBLE_DISKS_NFS_PATH ANSIBLE_DOCKER_IMAGE_TAG ANSIBLE_DOCKER_REGISTRY ANSIBLE_EXTRA_VARS ANSIBLE_GIT_DIRECTORY ANSIBLE_GIT_KEY_FILE ANSIBLE_GIT_REPOSITORY ANSIBLE_GIT_VERSION ANSIBLE_INVENTORY ANSIBLE_MYOS ANSIBLE_PLAYBOOK ANSIBLE_SSH_AUTHORIZED_KEYS ANSIBLE_SSH_BASTION_HOSTNAME ANSIBLE_SSH_BASTION_USERNAME ANSIBLE_SSH_PRIVATE_IP_RANGE ANSIBLE_SSH_PRIVATE_KEYS ANSIBLE_SSH_PUBLIC_HOSTS ANSIBLE_SSH_USERNAME ANSIBLE_USERNAME ANSIBLE_VERBOSE
# function ansible: Call run ansible ANSIBLE_ARGS with arg 1
define ansible
$(call INFO,ansible,$(1))
$(RUN) $(call $(if $(ANSIBLE_DOCKER_RUN),run,env-run),$(if $(ANSIBLE_DOCKER_RUN),,$(RUN) )ansible $(ANSIBLE_ARGS) $(ANSIBLE_VERBOSE) $(if $(ANSIBLE_DOCKER_RUN),-i $(ANSIBLE_INVENTORY)/.host.docker.internal) $(1),$(DOCKER_RUN_OPTIONS_ANSIBLE) $(DOCKER_REPOSITORY)/)
endef
# function ansible-playbook: Call run ansible-playbook ANSIBLE_ARGS with arg 1
define ansible-playbook
$(call INFO,ansible-playbook,$(1))
$(RUN) $(call $(if $(ANSIBLE_DOCKER_RUN),run,env-run),$(if $(ANSIBLE_DOCKER_RUN),,$(RUN) )ansible$(if $(ANSIBLE_DOCKER_RUN),,-playbook) $(ANSIBLE_ARGS) $(ANSIBLE_VERBOSE) $(if $(ANSIBLE_DOCKER_RUN),-i $(ANSIBLE_INVENTORY)/.host.docker.internal) $(1),$(DOCKER_RUN_OPTIONS_ANSIBLE) --entrypoint=ansible-playbook $(DOCKER_REPOSITORY)/)
endef
# function ansible-pull: Call run ansible-pull ANSIBLE_ARGS with arg 1
define ansible-pull
$(call INFO,ansible-pull,$(1))
$(call $(if $(ANSIBLE_DOCKER_RUN),run,env-run),$(if $(ANSIBLE_DOCKER_RUN),,$(RUN) )ansible-pull $(ANSIBLE_ARGS) $(ANSIBLE_VERBOSE) $(1))
endef
# function ansible-user-add-groups: Call ansible to add user 1 in groups 2
define ansible-user-add-groups
$(call INFO,ansible-user-add-groups,$(1)$(comma) $(2))
$(if $(ANSIBLE_DOCKER_RUN),$(call make,docker-build-ansible),$(call make,install-ansible))
$(call ansible,-b -m user -a 'name=$(1) groups=$(2) append=yes' localhost)
endef

View File

@ -1,8 +0,0 @@
##
# INSTALL
# target install-ansible; Install ansible on local host
.PHONY: install-ansible
install-ansible:
$(if $(shell type -p ansible),,$(RUN) $(INSTALL) ansible)

View File

@ -1,203 +0,0 @@
# target aws: Fire docker-build-aws, Call aws ARGS
.PHONY: aws
aws: $(if $(DOCKER_RUN),docker-build-aws)
$(call aws,$(ARGS))
# target aws-deploy: Call aws deploy create-deployment with application-name CODEDEPLOY_APP_NAME
.PHONY: aws-deploy
aws-deploy:
$(call aws,deploy create-deployment \
--application-name $(CODEDEPLOY_APP_NAME) \
--deployment-config-name $(CODEDEPLOY_DEPLOYMENT_CONFIG) \
--deployment-group-name $(CODEDEPLOY_DEPLOYMENT_GROUP) \
--description "$(CODEDEPLOY_DESCRIPTION)" \
--github-location repository=$(CODEDEPLOY_GITHUB_REPO)$(comma)commitId=$(CODEDEPLOY_GITHUB_COMMIT_ID))
# target aws-docker-login: Fire aws-ecr-get-login
.PHONY: aws-docker-login
aws-docker-login: aws-ecr-get-login
# target aws-ecr-get-login: Call aws ecr get-login
.PHONY: aws-ecr-get-login
aws-ecr-get-login:
$(eval IGNORE_DRYRUN := true)
$(eval docker_login := $(shell $(call aws,ecr get-login --no-include-email --region $(AWS_DEFAULT_REGION))))
$(eval IGNORE_DRYRUN := FALSE)
$(RUN) $(docker_login)
# target aws-iam-create-role-%: Call aws iam create-role with role-name % and role-policy file aws/policies/%-trust.json
.PHONY: aws-iam-create-role-%
aws-iam-create-role-%: user docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval json := $(shell $(call exec,sh -c 'envsubst < aws/policies/$*-trust.json')))
$(eval IGNORE_DRYRUN := false)
$(call aws,iam create-role --role-name $* --assume-role-policy-document '$(json)')
# target aws-iam-put-role-policy-%: Call aws iam put-role-policy with policy-name % and policy-document file aws/policies/%.json
.PHONY: aws-iam-put-role-policy-%
aws-iam-put-role-policy-%: user docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval json := $(shell $(call exec,sh -c 'envsubst < aws/policies/$*.json')))
$(eval IGNORE_DRYRUN := false)
$(call aws,iam put-role-policy --role-name $* --policy-name $* --policy-document '$(json)')
# target aws-role-create-import-image: Fire aws-iam-create-role-% aws-iam-put-role-policy-% for AWS_VM_IMPORT_ROLE_NAME
.PHONY: aws-role-create-import-image
aws-role-create-import-image: aws-iam-create-role-$(AWS_VM_IMPORT_ROLE_NAME) aws-iam-put-role-policy-$(AWS_VM_IMPORT_ROLE_NAME)
# target aws-s3-check-upload: Fire aws-s3api-get-head-object-etag, Eval upload=false if remote s3 file already exists
.PHONY: aws-s3-check-upload
aws-s3-check-upload: docker-build-aws aws-s3api-get-head-object-etag
$(eval upload := true)
$(eval IGNORE_DRYRUN := true)
$(if $(AWS_S3_KEY_ETAG),$(if $(filter $(AWS_S3_KEY_ETAG),"$(shell cat $(PACKER_ISO_INFO) |awk '$$1 == "etag:" {print $$2}' 2>/dev/null)"),$(eval upload := false)))
$(eval IGNORE_DRYRUN := false)
# target aws-s3-cp: Fire aws-s3-check-upload, Call aws s3 cp PACKER_ISO_FILE s3://AWS_S3_BUCKET/AWS_S3_KEY, Call aws-s3-etag-save target
.PHONY: aws-s3-cp
aws-s3-cp: docker-build-aws $(PACKER_ISO_FILE) aws-s3-check-upload
$(if $(filter $(upload),true),$(call aws,s3 cp $(PACKER_ISO_FILE) s3://$(AWS_S3_BUCKET)/$(AWS_S3_KEY)) $(call make,aws-s3-etag-save))
# target aws-s3-etag-save: Fire aws-s3api-get-head-object-etag, Add line 'etag: AWS_S3_KEY_TAG' to file PACKER_ISO_INFO
.PHONY: aws-s3-etag-save
aws-s3-etag-save: docker-build-aws aws-s3api-get-head-object-etag
echo "etag: $(AWS_S3_KEY_ETAG)" >> $(PACKER_ISO_INFO)
# target aws-s3api-get-head-object-etag: Eval AWS_S3_KEY_ETAG, Echo 'ETag: AWS_S3_KEY_ETAG'
.PHONY: aws-s3api-get-head-object-etag
aws-s3api-get-head-object-etag: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_S3_KEY_ETAG := $(shell $(call aws,s3api head-object --bucket $(AWS_S3_BUCKET) --key $(AWS_S3_KEY) --output text --query ETag) |grep -v 'operation: Not Found' 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo ETag: $(AWS_S3_KEY_ETAG)
# target aws-s3api-get-head-object-lastmodified: Eval AWS_S3_KEY_DATE, Echo 'LastModified: AWS_S3_KEY_DATE'
.PHONY: aws-s3api-get-head-object-lastmodified
aws-s3api-get-head-object-lastmodified: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_S3_KEY_DATE := $(shell $(call aws,s3api head-object --bucket $(AWS_S3_BUCKET) --key $(AWS_S3_KEY) --output text --query LastModified) |grep -v 'operation: Not Found' 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo LastModified: $(AWS_S3_KEY_DATE)
# target aws-ec2-import-snapshot: Call aws ec2 import-snapshot with S3Bucket AWS_S3_BUCKET and S3Key AWS_S3_KEY
.PHONY: aws-ec2-import-snapshot
aws-ec2-import-snapshot: user docker-build-aws aws-s3api-get-head-object-etag aws-s3api-get-head-object-lastmodified
$(eval IGNORE_DRYRUN := true)
$(eval json := $(shell $(call exec,sh -c 'envsubst < aws/import-snapshot.json')))
$(eval IGNORE_DRYRUN := false)
$(eval AWS_TASK_ID := $(shell $(call aws,ec2 import-snapshot --description '$(AWS_SNAP_DESCRIPTION)' --output text --query ImportTaskId --disk-container '$(json)')))
echo ImportTaskId: $(AWS_TASK_ID)
# target aws-ec2-describe-import-snapshot-tasks-%: Call aws ec2 describe-import-snapshot-tasks with import-task-id %
.PHONY: aws-ec2-describe-import-snapshot-tasks-%
aws-ec2-describe-import-snapshot-tasks-%: docker-build-aws
$(call aws,ec2 describe-import-snapshot-tasks --import-task-ids $*)
# target aws-ec2-describe-import-snapshot-tasks: Call aws ec2 describe-import-snapshots-tasks
.PHONY: aws-ec2-describe-import-snapshot-tasks
aws-ec2-describe-import-snapshot-tasks: docker-build-aws
$(call aws,ec2 describe-import-snapshot-tasks)
# target aws-ec2-describe-instances-PrivateIpAddress: Call aws ec2 describe-instances, Print list of PrivateIpAddress
.PHONY: aws-ec2-describe-instances-PrivateIpAddress
aws-ec2-describe-instances-PrivateIpAddress: docker-build-aws
$(call aws,ec2 describe-instances --no-paginate --query 'Reservations[*].Instances[*].[Tags[?Key==`Name`].Value$(comma)PrivateIpAddress]' --output text) |sed '$$!N;s/\r\n/ /' |awk 'BEGIN {printf "%-24s%s\r\n"$(comma)"PrivateIpAddress"$(comma)"Name"}; $$1 != "None" {printf "%-24s%s\n"$(comma)$$1$(comma)$$2}'
# target aws-ec2-describe-instances-PrivateIpAddress-%: Call aws ec2 describe-instances, Print list of PrivateIpAddress for Name matching %
.PHONY: aws-ec2-describe-instances-PrivateIpAddress-%
aws-ec2-describe-instances-PrivateIpAddress-%: docker-build-aws
$(call aws,ec2 describe-instances --no-paginate --query 'Reservations[*].Instances[*].[Tags[?Key==`Name`].Value$(comma)PrivateIpAddress]' --output text) |sed '$$!N;s/\r\n/ /' |awk 'BEGIN {printf "%-24s%s\r\n"$(comma)"PrivateIpAddress"$(comma)"Name"}; $$1 != "None" && $$2 ~ /$*/ {printf "%-24s%s\n"$(comma)$$1$(comma)$$2}'
# target aws-ec2-get-instances-PrivateIpAddress: Eval AWS_INSTANCE_IP, Echo 'PrivateIpAddress: AWS_INSTANCE_IP'
.PHONY: aws-ec2-get-instances-PrivateIpAddress
aws-ec2-get-instances-PrivateIpAddress: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_INSTANCE_IP := $(shell $(call aws,ec2 describe-instances --no-paginate --query 'Reservations[*].Instances[*].PrivateIpAddress' --output text) 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo PrivateIpAddress: $(AWS_INSTANCE_IP)
# target aws-ec2-get-instances-PrivateIpAddress-%: Eval AWS_INSTANCE_IP with Name matching %, Echo 'PrivateIpAddress: AWS_INSTANCE_IP'
.PHONY: aws-ec2-get-instances-PrivateIpAddress-%
aws-ec2-get-instances-PrivateIpAddress-%:
$(eval IGNORE_DRYRUN := true)
$(eval AWS_INSTANCE_IP := $(shell $(call aws,ec2 describe-instances --no-paginate --filter 'Name=tag:Name$(comma)Values=$**' --query 'Reservations[*].Instances[*].PrivateIpAddress' --output text) 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo PrivateIpAddress: $(AWS_INSTANCE_IP)
# target aws-ec2-get-import-snapshot-tasks-id: Fire aws-ec2-get-import-snapshot-tasks-id-% for AWS_TASK_ID
.PHONY: aws-ec2-get-import-snapshot-tasks-id
aws-ec2-get-import-snapshot-tasks-id: aws-ec2-get-import-snapshot-tasks-id-$(AWS_TASK_ID)
# target aws-ec2-get-import-snapshot-tasks-id-%: Eval AWS_SNAP_IP with import-task-ids %, Echo 'SnapshotId: AWS_SNAP_IP'
.PHONY: aws-ec2-get-import-snapshot-tasks-id-%
aws-ec2-get-import-snapshot-tasks-id-%: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_SNAP_ID := $(shell $(call aws,ec2 describe-import-snapshot-tasks --import-task-ids $* --output text --query ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId) 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo SnapshotId: $(AWS_SNAP_ID)
# target aws-ec2-get-import-snapshot-tasks-message-%: Eval AWS_SNAP_MESSAGE with import-task-ids %, Echo 'StatusMessage: AWS_SNAP_MESSAGE'
.PHONY: aws-ec2-get-import-snapshot-tasks-message-%
aws-ec2-get-import-snapshot-tasks-message-%: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_SNAP_MESSAGE := $(shell $(call aws,ec2 describe-import-snapshot-tasks --import-task-ids $* --output text --query ImportSnapshotTasks[0].SnapshotTaskDetail.StatusMessage) 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo StatusMessage: $(AWS_SNAP_MESSAGE)
# target aws-ec2-get-import-snapshot-tasks-progress-%: Eval AWS_SNAP_PROGRESS with import-task-ids %, Echo 'Progress: AWS_SNAP_PROGRESS'
.PHONY: aws-ec2-get-import-snapshot-tasks-progress-%
aws-ec2-get-import-snapshot-tasks-progress-%: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_SNAP_PROGRESS := $(shell $(call aws,ec2 describe-import-snapshot-tasks --import-task-ids $* --output text --query ImportSnapshotTasks[0].SnapshotTaskDetail.Progress) 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo Progress: $(AWS_SNAP_PROGRESS)
# target aws-ec2-get-import-snapshot-tasks-size-%: Eval AWS_SNAP_SIZE with import-task-ids %, Echo 'DiskImageSize: AWS_SNAP_SIZE'
.PHONY: aws-ec2-get-import-snapshot-tasks-size-%
aws-ec2-get-import-snapshot-tasks-size-%: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_SNAP_SIZE := $(shell $(call aws,ec2 describe-import-snapshot-tasks --import-task-ids $* --output text --query ImportSnapshotTasks[0].SnapshotTaskDetail.DiskImageSize) 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo DiskImageSize: $(AWS_SNAP_SIZE)
# target aws-ec2-get-import-snapshot-tasks-status-%: Eval AWS_SNAP_STATUS with import-task-ids %, Echo 'Status: AWS_SNAP_STATUS'
.PHONY: aws-ec2-get-import-snapshot-tasks-status-%
aws-ec2-get-import-snapshot-tasks-status-%: docker-build-aws
$(eval IGNORE_DRYRUN := true)
$(eval AWS_SNAP_STATUS := $(shell $(call aws,ec2 describe-import-snapshot-tasks --import-task-ids $* --output text --query ImportSnapshotTasks[0].SnapshotTaskDetail.Status) 2>/dev/null))
$(eval IGNORE_DRYRUN := false)
echo Status: $(AWS_SNAP_STATUS)
# target aws-ec2-wait-import-snapshot-tasks-status-completed: Fire aws-ec2-wait-import-snapshot-tasks-status-completed-% for AWS_TASK_ID
.PHONY: aws-ec2-wait-import-snapshot-tasks-status-completed
aws-ec2-wait-import-snapshot-tasks-status-completed: aws-ec2-wait-import-snapshot-tasks-status-completed-$(AWS_TASK_ID)
# target aws-ec2-wait-import-snapshot-tasks-status-completed-%: Wait SnapshotTaskDetail.Status=completed for import-task-ids %
.PHONY: aws-ec2-wait-import-snapshot-tasks-status-completed-%
aws-ec2-wait-import-snapshot-tasks-status-completed-%: docker-build-aws
while [ `$(call aws,ec2 describe-import-snapshot-tasks --import-task-ids $* --output text --query ImportSnapshotTasks[0].SnapshotTaskDetail.Status)` != "completed$$(printf '\r')" ]; \
do \
count=$$(( $${count:-0}+1 )); \
[ "$${count}" -eq 99 ] && exit 1; \
sleep 10; \
done
# target aws-ec2-wait-snapshot-completed-%: Call ec2 wait snapshot-completed with shapshot-ids %
.PHONY: aws-ec2-wait-snapshot-completed-%
aws-ec2-wait-snapshot-completed-%: docker-build-aws
$(call aws,ec2 wait snapshot-completed --snapshot-ids $* --output text)
# target aws-ec2-register-image: Fire aws-ec2-get-import-snapshot-tasks-id, Eval AWS_AMI_ID with Name AWS_AMI_NAME, Echo 'ImageId: AWS_AMI_ID'
.PHONY: aws-ec2-register-image
aws-ec2-register-image: user docker-build-aws aws-ec2-get-import-snapshot-tasks-id
$(eval IGNORE_DRYRUN := true)
$(eval json := $(shell $(call exec,sh -c 'envsubst < aws/register-image-device-mappings.json')))
$(eval IGNORE_DRYRUN := false)
$(eval AWS_AMI_ID := $(shell $(call aws,ec2 register-image --name '$(AWS_AMI_NAME)' --description '$(AWS_AMI_DESCRIPTION)' --architecture x86_64 --root-device-name /dev/sda1 --virtualization-type hvm --block-device-mappings '$(json)') 2>/dev/null))
echo ImageId: $(AWS_AMI_ID)
# target aws-ami: Fire aws-s3-cp aws-ec2-import-snapshot, Call aws-ec2-wait-import-snapshot-tasks-status-completed target, Call aws-ec2-register-image target
.PHONY: aws-ami
aws-ami: aws-s3-cp aws-ec2-import-snapshot
$(call make,aws-ec2-wait-import-snapshot-tasks-status-completed,,AWS_TASK_ID)
$(call make,aws-ec2-register-image,,AWS_TASK_ID)

View File

@ -1,23 +0,0 @@
AWS_ACCESS_KEY_ID := $(if $(AWS_CREDENTIALS),$(shell $(call conf,$(AWS_CREDENTIALS),$(or $(AWS_PROFILE),default),aws_access_key_id)))
AWS_AMI_DESCRIPTION ?= $(AWS_SERVICE_VERSION)
AWS_AMI_NAME ?= $(AWS_SERVICE_NAME)-$(AWS_S3_FILENAME)
AWS_CREDENTIALS ?= $(wildcard $(HOME)/.aws/credentials)
AWS_DEFAULT_REGION ?= eu-west-1
AWS_DEFAULT_OUTPUT ?= text
AWS_INSTANCE_ID ?= $(shell timeout 0.1 curl -s http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
AWS_VM_IMPORT_ROLE_NAME ?= vmimport
AWS_S3_BUCKET ?= $(AWS_SERVICE_NAME)
AWS_S3_FILENAME ?= $(PACKER_ISO_NAME)
AWS_S3_KEY ?= $(PACKER_ISO_FILE)
AWS_SECRET_ACCESS_KEY := $(if $(AWS_CREDENTIALS),$(shell $(call conf,$(AWS_CREDENTIALS),$(or $(AWS_PROFILE),default),aws_secret_access_key)))
AWS_SERVICE_NAME ?= $(COMPOSE_SERVICE_NAME)
AWS_SERVICE_VERSION ?= $(BUILD_DATE)-$(VERSION)
AWS_SNAP_DESCRIPTION ?= $(AWS_SERVICE_NAME)-$(AWS_SERVICE_VERSION)-$(AWS_S3_FILENAME)
CMDS += aws
DOCKER_RUN_VOLUME += -v $(HOME)/.aws:/home/$(USER)/.aws
ENV_VARS += AWS_ACCESS_KEY_ID AWS_AMI_DESCRIPTION AWS_AMI_NAME AWS_DEFAULT_OUTPUT AWS_DEFAULT_REGION AWS_INSTANCE_ID AWS_PROFILE AWS_S3_BUCKET AWS_S3_KEY AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN AWS_SNAP_DESCRIPTION AWS_SNAP_ID
# function aws: Call run aws with arg 1
define aws
$(RUN) $(call run,aws $(1),$(DOCKER_REPOSITORY)/)
endef

View File

@ -1,42 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache add \
ansible \
py3-pip \
&& pip3 install boto
RUN apk --no-cache upgrade
ENTRYPOINT ["/usr/bin/ansible"]
CMD ["--help"]
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a numeric UID
RUN [ "$UID" -eq "$UID" ] 2>/dev/null \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk -F: '{print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk -F: '{print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk -F: '{print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:$SHELL" >> /etc/passwd; \
echo "$USER:\!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
USER $USER

View File

@ -1,43 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
RUN apk --no-cache add \
groff \
less \
py-pip3 \
&& pip3 install awscli
RUN apk --no-cache upgrade
ENTRYPOINT ["/usr/bin/aws"]
CMD ["help"]
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a numeric UID
RUN [ "$UID" -eq "$UID" ] 2>/dev/null \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk -F: '{print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk -F: '{print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk -F: '{print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:$SHELL" >> /etc/passwd; \
echo "$USER:\!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
USER $USER

View File

@ -1,51 +0,0 @@
# FROM certbot/certbot:latest as dist
FROM python:3.8-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# RUN pip install \
RUN apk --no-cache add --virtual .build-deps \
build-base \
libffi-dev \
&& pip install \
certbot \
certbot-dns-azure \
# certbot-dns-bunny \
# certbot-dns-clouddns \
certbot-dns-cloudflare \
# certbot-dns-cloudxns \
certbot-dns-digitalocean \
# certbot-dns-dnsmadeeasy \
# certbot-dns-dnsimple \
# certbot-dns-gehirn \
# certbot-dns-godaddy \
certbot-dns-google \
certbot-dns-infomaniak \
# certbot-dns-inwx \
certbot-dns-ispconfig \
# certbot-dns-lightsail \
certbot-dns-linode \
# certbot-dns-luadns \
# certbot-dns-njalla \
# certbot-dns-nsone \
certbot-dns-ovh \
certbot-dns-rfc2136 \
certbot-dns-route53 \
# certbot-dns-sakuracloud \
certbot-dns-standalone \
# certbot-dns-yandexcloud \
# certbot-ext-auth future \
certbot-plugin-gandi \
certbot-s3front \
# certbot_dns_duckdns \
# certbot_dns_porkbun \
# letsencrypt-pritunl \
# letsencrypt-proxmox \
&& apk del .build-deps
COPY ${DOCKER_BUILD_DIR}/certbot-renew /etc/periodic/daily/
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["--help"]
FROM dist as master

View File

@ -1,4 +0,0 @@
#!/bin/sh
mkdir -p /etc/letsencrypt/renewal
/usr/local/bin/certbot renew > /etc/letsencrypt/renewal/letsencrypt.log

View File

@ -1,16 +0,0 @@
#!/usr/bin/env sh
set -euo errexit
[ -n "${DEBUG:-}" -a "${DEBUG:-}" != "false" ] && set -x
case "${1:-start}" in
start)
exec /usr/sbin/crond -f -L/dev/stdout
;;
*)
exec /usr/local/bin/certbot "$@"
;;
esac

View File

@ -1,20 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG COMPOSE_REMOTE=https://github.com/docker/compose
ARG COMPOSE_VERSION=2.5.0
ARG SYSTEM=Linux
ARG MACHINE=x86_64
RUN apk update \
&& apk add --no-cache ca-certificates \
&& OS="$(echo ${SYSTEM} |awk '{print tolower($0)}')"; \
ARCH="$(echo ${MACHINE})"; \
wget -qO /usr/bin/docker-compose ${COMPOSE_REMOTE}/releases/download/v${COMPOSE_VERSION}/docker-compose-${OS}-${ARCH} \
&& chmod +x /usr/bin/docker-compose
ENTRYPOINT ["/usr/bin/docker-compose"]
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,23 +0,0 @@
ARG CONSUL_VERSION=1.11.1
FROM consul:${CONSUL_VERSION} as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG DOCKER_GID=999
# add user consul in group docker
RUN DOCKER_GROUP=$(awk -F: '$3 == '${DOCKER_GID}' {print $1}' < /etc/group) \
&& if [ -n "${DOCKER_GROUP}" ]; then adduser consul ${DOCKER_GROUP}; \
else addgroup -g ${DOCKER_GID} docker && adduser consul docker; \
fi
COPY ${DOCKER_BUILD_DIR}/docker-healthcheck /usr/local/bin/
RUN chmod +rx /usr/local/bin/docker-healthcheck
HEALTHCHECK CMD ((((echo -e 'GET /v1/health/service/consul HTTP/1.0\n' \
|nc -w 1 localhost:8500; echo $? >&3) \
|sed -n '/^\[/,$p' \
|jq '.[].Checks[0].Output' >&4) 3>&1) \
| (read err; exit $err)) 4>&1
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,43 +0,0 @@
#!/bin/sh
# link: https://github.com/hashicorp/consul/issues/3182
# author: Yann "aya" Autissier
# license: GPL
set -eu
DOCKER_SOCK=${DOCKER_SOCK:-/var/run/docker.sock}
if ! which curl > /dev/null || ! which jq >/dev/null; then
>&2 echo "ERROR: curl or jq not found"
exit 2
fi
if [ $# -ne 1 ]; then
>&2 echo "ERROR: invalid parameter '$*'"
echo "USAGE: $0 container-id|container-name|container-ip"
exit 2
fi
{
{
{
# list all dockers
for docker in $(curl --disable --fail --show-error --silent --unix-socket "${DOCKER_SOCK}" http://localhost/containers/json |jq -r '.[].Id'); do
# print "health_status id name ip_address health_output" for each docker
curl --disable --fail --show-error --silent --unix-socket "${DOCKER_SOCK}" "http://localhost/containers/${docker}/json" \
|jq -r '[.State.Health.Status, .Id, .Name, .NetworkSettings.IPAddress, .State.Health.Log[0].Output] |@tsv'
# shorten id: .Id |capture("(?<id>.{12})").id
# print "health_status" and "health_output" for line matching $1
done |awk -F '\t' '/\<'"$1"'\>/ {print $1 | "cat >&3; exec 3>&-"; print $NF | "cat >&4";}'
} 3>&1
} | {
read -r status ||:
case "$status" in
healthy) exit=0;;
starting) exit=1;;
*) exit=2;;
esac
# exit according to "health_status"
exit $exit
}
# print "health_output"
} 4>&1

View File

@ -1,9 +0,0 @@
FROM docker.elastic.co/apm/apm-server-oss:7.4.2 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# config
COPY ${DOCKER_BUILD_DIR}/apm-server.yml /usr/share/apm-server/
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,931 +0,0 @@
######################### APM Server Configuration #########################
################################ APM Server ################################
apm-server:
# Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket.
host: "0.0.0.0:8200"
# Maximum permitted size in bytes of a request's header accepted by the server to be processed.
#max_header_size: 1048576
# Maximum amount of time to wait for the next incoming request before underlying connection is closed.
#idle_timeout: 45s
# Maximum permitted duration for reading an entire request.
#read_timeout: 30s
# Maximum permitted duration for writing a response.
#write_timeout: 30s
# Maximum duration before releasing resources when shutting down the server.
#shutdown_timeout: 5s
# Maximum permitted size in bytes of an event accepted by the server to be processed.
#max_event_size: 307200
# Maximum number of new connections to accept simultaneously (0 means unlimited).
#max_connections: 0
# Authorization token for sending data to the APM server. If a token is set, the
# agents must send it in the following format: Authorization: Bearer <secret-token>.
# It is recommended to use an authorization token in combination with SSL enabled,
# and save the token in the apm-server keystore. The token is not used for the RUM endpoint.
#secret_token:
# Enable secure communication between APM agents and the server. By default ssl is disabled.
#ssl:
#enabled: false
# Configure a list of root certificate authorities for verifying client certificates.
#certificate_authorities: []
# Path to file containing the certificate for server authentication.
# Needs to be configured when ssl is enabled.
#certificate: ''
# Path to file containing server certificate key.
# Needs to be configured when ssl is enabled.
#key: ''
# Optional configuration options for ssl communication.
# Passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#key_passphrase: ''
# List of supported/valid protocol versions. By default TLS versions 1.1 up to 1.2 are enabled.
#supported_protocols: [TLSv1.1, TLSv1.2]
# Configure cipher suites to be used for SSL connections.
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#curve_types: []
# Configure which type of client authentication is supported.
# Options are `none`, `optional`, and `required`. Default is `optional`.
#client_authentication: "optional"
# Configure SSL verification mode. If `none` is configured, all hosts and
# certificates will be accepted. In this mode, SSL-based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is `full`.
#ssl.verification_mode: full
# Enable Real User Monitoring (RUM) Support. By default RUM is disabled.
#rum:
#enabled: false
#event_rate:
# Defines the maximum amount of events allowed to be sent to the APM Server RUM
# endpoint per IP per second. Defaults to 300.
#limit: 300
# An LRU cache is used to keep a rate limit per IP for the most recently seen IPs.
# This setting defines the number of unique IPs that can be tracked in the cache.
# Sites with many concurrent clients should consider increasing this limit. Defaults to 1000.
#lru_size: 1000
#-- General RUM settings
# Comma separated list of permitted origins for real user monitoring.
# User-agents will send an origin header that will be validated against this list.
# An origin is made of a protocol scheme, host and port, without the url path.
# Allowed origins in this setting can have * to match anything (eg.: http://*.example.com)
# If an item in the list is a single '*', everything will be allowed.
#allow_origins : ['*']
# Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes.
# If the regexp matches, the stacktrace frame is considered to be a library frame.
#library_pattern: "node_modules|bower_components|~"
# Regexp to be matched against a stacktrace frame's `file_name`.
# If the regexp matches, the stacktrace frame is not used for calculating error groups.
# The default pattern excludes stacktrace frames that have a filename starting with '/webpack'
#exclude_from_grouping: "^/webpack"
# If a source map has previously been uploaded, source mapping is automatically applied.
# to all error and transaction documents sent to the RUM endpoint.
#source_mapping:
# Sourcemapping is enabled by default.
#enabled: true
# Source maps are always fetched from Elasticsearch, by default using the output.elasticsearch configuration.
# A different instance must be configured when using any other output.
# This setting only affects sourcemap reads - the output determines where sourcemaps are written.
#elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (`http` and `9200`).
# In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
# hosts: ["localhost:9200"]
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch.
# Note that values configured without a time unit will be interpreted as seconds.
#cache:
#expiration: 5m
# Source maps are stored in a separate index.
# If the default index pattern for source maps at 'outputs.elasticsearch.indices'
# is changed, a matching index pattern needs to be specified here.
#index_pattern: "apm-*-sourcemap*"
# If true (default), APM Server captures the IP of the instrumented service
# or the IP and User Agent of the real user (RUM requests).
#capture_personal_data: true
# Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/).
#expvar:
#enabled: false
# Url to expose expvar.
#url: "/debug/vars"
# Instrumentation support for the server's HTTP endpoints and event publisher.
#instrumentation:
# Set to true to enable instrumentation of the APM Server itself.
#enabled: false
# Environment in which the APM Server is running on (eg: staging, production, etc.)
#environment: ""
# Remote hosts to report instrumentation results to.
#hosts:
# - http://remote-apm-server:8200
# secret_token for the remote apm-servers.
#secret_token:
# A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
# Using pipelines involves two steps:
# (1) registering a pipeline
# (2) applying a pipeline during data ingestion (see `output.elasticsearch.pipeline`)
#
# You can manually register a pipeline, or use this configuration option to ensure
# the pipeline is loaded and registered at the configured Elasticsearch instances.
# Find the default pipeline configuration at `ingest/pipeline/definition.json`.
# Automatic pipeline registration requires the `output.elasticsearch` to be enabled and configured.
#register.ingest.pipeline:
# Registers APM pipeline definition in Elasticsearch on APM Server startup. Defaults to true.
#enabled: true
# Overwrites existing APM pipeline definition in Elasticsearch. Defaults to false.
#overwrite: false
# When ilm is set to `auto`, the APM Server checks a couple of preconditions:
# If a different output than Elasticsearch is configured, ILM will be disabled.
# If Elasticsearch output is configured, but specific `index` or `indices` settings are configured, ILM will be
# disabled, as it only works with default index settings.
# If the configured Elasticsearch instance is not eligible for ILM, ILM will also be disabled.
# If all preconditions are met, ILM will be enabled.
#
# When ILM is set to `true`, the APM Server ignores any configured index settings.
# For ILM to be applied, The configured output must be set to Elasticsearch and the instance
# needs to support ILM. Otherwise APM Server falls back to ordinary index management without ILM.
#
# Defaults to "auto". Disable ILM by setting it to `false`.
#ilm.enabled: "auto"
# When using APM agent configuration, information fetched from Kibana will be cached in memory for some time.
# Specify cache key expiration via this setting. Default is 30 seconds.
#agent.config.cache.expiration: 30s
#kibana:
# For APM Agent configuration in Kibana, enabled must be true.
#enabled: false
# Scheme and port can be left out and will be set to the default (`http` and `5601`).
# In case you specify an additional path, the scheme is required: `http://localhost:5601/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:5601`.
#host: "localhost:5601"
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Optional HTTP path.
#path: ""
# Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
#================================= General =================================
# Data is buffered in a memory queue before it is published to the configured output.
# The memory queue will present all available events (up to the outputs
# bulk_max_size) to the output, the moment the output is ready to serve
# another batch of events.
#queue:
# Queue type by name (default 'mem').
#mem:
# Max number of events the queue can buffer.
#events: 4096
# Hints the minimum number of events stored in the queue,
# before providing a batch of events to the outputs.
# The default value is set to 2048.
# A value of 0 ensures events are immediately available
# to be sent to the outputs.
#flush.min_events: 2048
# Maximum duration after which events are available to the outputs,
# if the number of events stored in the queue is < `flush.min_events`.
#flush.timeout: 1s
# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:
#================================= Template =================================
# A template is used to set the mapping in Elasticsearch.
# By default template loading is enabled and the template is loaded.
# These settings can be adjusted to load your own template or overwrite existing ones.
# Set to false to disable template loading.
#setup.template.enabled: true
# Template name. By default the template name is "apm-%{[observer.version]}"
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
#setup.template.name: "apm-%{[observer.version]}"
# Template pattern. By default the template pattern is "apm-%{[observer.version]}-*" to apply to the default index settings.
# The first part is the version of apm-server and then -* is used to match all daily indices.
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
#setup.template.pattern: "apm-%{[observer.version]}-*"
# Path to fields.yml file to generate the template.
#setup.template.fields: "${path.config}/fields.yml"
# Overwrite existing template.
#setup.template.overwrite: false
# Elasticsearch template settings.
#setup.template.settings:
# A dictionary of settings to place into the settings.index dictionary
# of the Elasticsearch template. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
#index:
#number_of_shards: 1
#codec: best_compression
#number_of_routing_shards: 30
#mapping.total_fields.limit: 2000
#============================= Elastic Cloud =============================
# These settings simplify using APM Server with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` option.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs =================================
# Configure the output to use when sending the data collected by apm-server.
#-------------------------- Elasticsearch output --------------------------
output.elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (`http` and `9200`).
# In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
hosts: ["elasticsearch:9200"]
# Boolean flag to enable or disable the output module.
#enabled: true
# Set gzip compression level.
#compression_level: 0
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Number of workers per Elasticsearch host.
#worker: 1
# By using the configuration below, APM documents are stored to separate indices,
# depending on their `processor.event`:
# - error
# - transaction
# - span
# - sourcemap
#
# The indices are all prefixed with `apm-%{[observer.version]}`.
# To allow managing indices based on their age, all indices (except for sourcemaps)
# end with the information of the day they got indexed.
# e.g. "apm-7.3.0-transaction-2019.07.20"
#
# Be aware that you can only specify one Elasticsearch template.
# If you modify the index patterns you must also update these configurations accordingly,
# as they need to be aligned:
# * `setup.template.name`
# * `setup.template.pattern`
#index: "apm-%{[observer.version]}-%{+yyyy.MM.dd}"
#indices:
# - index: "apm-%{[observer.version]}-sourcemap"
# when.contains:
# processor.event: "sourcemap"
#
# - index: "apm-%{[observer.version]}-error-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "error"
#
# - index: "apm-%{[observer.version]}-transaction-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "transaction"
#
# - index: "apm-%{[observer.version]}-span-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "span"
#
# - index: "apm-%{[observer.version]}-metric-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "metric"
#
# - index: "apm-%{[observer.version]}-onboarding-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "onboarding"
# A pipeline is a definition of processors applied to documents when ingesting them to Elasticsearch.
# APM Server comes with a default pipeline definition, located at `ingest/pipeline/definition.json`, which is
# loaded to Elasticsearch by default (see `apm-server.register.ingest.pipeline`).
# APM pipeline is enabled by default. To disable it, set `pipeline: _none`.
#pipeline: "apm"
# Optional HTTP Path.
#path: "/elasticsearch"
# Custom HTTP headers to add to each request.
#headers:
# X-My-Header: Contents of the header
# Proxy server url.
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, apm-server
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#----------------------------- Console output -----------------------------
#output.console:
# Boolean flag to enable or disable the output module.
#enabled: false
# Configure JSON encoding.
#codec.json:
# Pretty-print JSON event.
#pretty: false
# Configure escaping HTML symbols in strings.
#escape_html: false
#---------------------------- Logstash output -----------------------------
#output.logstash:
# Boolean flag to enable or disable the output module.
#enabled: false
# The Logstash hosts.
#hosts: ["localhost:5044"]
# Number of workers per Logstash host.
#worker: 1
# Set gzip compression level.
#compression_level: 3
# Configure escaping html symbols in strings.
#escape_html: true
# Optional maximum time to live for a connection to Logstash, after which the
# connection will be re-established. A value of `0s` (the default) will
# disable this feature.
#
# Not yet supported for async connections (i.e. with the "pipelining" option set).
#ttl: 30s
# Optional load balance the events between the Logstash hosts. Default is false.
#loadbalance: false
# Number of batches to be sent asynchronously to Logstash while processing
# new batches.
#pipelining: 2
# If enabled only a subset of events in a batch of events is transferred per
# group. The number of events to be sent increases up to `bulk_max_size`
# if no error is encountered.
#slow_start: false
# The number of seconds to wait before trying to reconnect to Logstash
# after a network error. After waiting backoff.init seconds, apm-server
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Logstash after a network error. The default is 60s.
#backoff.max: 60s
# Optional index name. The default index name is set to apm
# in all lowercase.
#index: 'apm'
# SOCKS5 proxy server URL
#proxy_url: socks5://user:password@socks5-server:2233
# Resolve names locally when using a proxy server. Defaults to false.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
#ssl.enabled: false
# Optional SSL configuration options. SSL is off by default.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#------------------------------ Kafka output ------------------------------
#output.kafka:
# Boolean flag to enable or disable the output module.
#enabled: false
# The list of Kafka broker addresses from where to fetch the cluster metadata.
# The cluster metadata contain the actual Kafka brokers events are published
# to.
#hosts: ["localhost:9092"]
# The Kafka topic used for produced events. The setting can be a format string
# using any event field. To set the topic from document type use `%{[type]}`.
#topic: beats
# The Kafka event key setting. Use format string to create unique event key.
# By default no event key will be generated.
#key: ''
# The Kafka event partitioning strategy. Default hashing strategy is `hash`
# using the `output.kafka.key` setting or randomly distributes events if
# `output.kafka.key` is not configured.
#partition.hash:
# If enabled, events will only be published to partitions with reachable
# leaders. Default is false.
#reachable_only: false
# Configure alternative event field names used to compute the hash value.
# If empty `output.kafka.key` setting will be used.
# Default value is empty list.
#hash: []
# Authentication details. Password is required if username is set.
#username: ''
#password: ''
# Kafka version libbeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'
# Configure JSON encoding.
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# Metadata update configuration. Metadata do contain leader information
# deciding which broker to use when publishing.
#metadata:
# Max metadata request retry attempts when cluster is in middle of leader
# election. Defaults to 3 retries.
#retry.max: 3
# Waiting time between retries during leader elections. Default is 250ms.
#retry.backoff: 250ms
# Refresh metadata interval. Defaults to every 10 minutes.
#refresh_frequency: 10m
# The number of concurrent load-balanced Kafka output workers.
#worker: 1
# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Set max_retries to a value less than 0 to retry
# until all events are published. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Kafka request. The default
# is 2048.
#bulk_max_size: 2048
# The number of seconds to wait for responses from the Kafka brokers before
# timing out. The default is 30s.
#timeout: 30s
# The maximum duration a broker will wait for number of required ACKs. The
# default is 10s.
#broker_timeout: 10s
# The number of messages buffered for each Kafka broker. The default is 256.
#channel_buffer_size: 256
# The keep-alive period for an active network connection. If 0s, keep-alives
# are disabled. The default is 0 seconds.
#keep_alive: 0
# Sets the output compression codec. Must be one of none, snappy and gzip. The
# default is gzip.
#compression: gzip
# Set the compression level. Currently only gzip provides a compression level
# between 0 and 9. The default value is chosen by the compression algorithm.
#compression_level: 4
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
# dropped. The default value is 1000000 (bytes). This value should be equal to
# or less than the broker's message.max.bytes.
#max_message_bytes: 1000000
# The ACK reliability level required from broker. 0=no response, 1=wait for
# local commit, -1=wait for all replicas to commit. The default is 1. Note:
# If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
# on error.
#required_acks: 1
# The configurable ClientID used for logging, debugging, and auditing
# purposes. The default is "beats".
#client_id: beats
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
#ssl.enabled: false
# Optional SSL configuration options. SSL is off by default.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#================================= Paths ==================================
# The home path for the apm-server installation. This is the default base path
# for all other path settings and for miscellaneous files that come with the
# distribution.
# If not set by a CLI flag or in the configuration file, the default for the
# home path is the location of the binary.
#path.home:
# The configuration path for the apm-server installation. This is the default
# base path for configuration files, including the main YAML configuration file
# and the Elasticsearch template file. If not set by a CLI flag or in the
# configuration file, the default for the configuration path is the home path.
#path.config: ${path.home}
# The data path for the apm-server installation. This is the default base path
# for all the files in which apm-server needs to store its data. If not set by a
# CLI flag or in the configuration file, the default for the data path is a data
# subdirectory inside the home path.
#path.data: ${path.home}/data
# The logs path for an apm-server installation. If not set by a CLI flag or in the
# configuration file, the default is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#================================= Logging =================================
# There are three options for the log output: syslog, file, and stderr.
# Windows systems default to file output. All other systems default to syslog.
# Sets the minimum log level. The default log level is info.
# Available log levels are: error, warning, info, or debug.
#logging.level: info
# Enable debug output for selected components. To enable all selectors use ["*"].
# Other available selectors are "beat", "publish", or "service".
# Multiple selectors can be chained.
#logging.selectors: [ ]
# Send all logging output to syslog. The default is false.
#logging.to_syslog: true
# If enabled, apm-server periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is false.
#logging.metrics.enabled: false
# The period after which to log the internal metrics. The default is 30s.
#logging.metrics.period: 30s
# Logging to rotating files. When true, writes all logging output to files.
# The log files are automatically rotated when the log file size limit is reached.
#logging.to_files: true
#logging.files:
# Configure the path where the logs are written. The default is the logs directory
# under the home path (the binary location).
#path: /var/log/apm-server
# The name of the files where the logs are written to.
#name: apm-server
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated.
#rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
#keepfiles: 7
# The permissions mask to apply when rotating log files. The default value is 0600.
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Enable log file rotation on time intervals in addition to size-based rotation.
# Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
# are boundary-aligned with minutes, hours, days, weeks, months, and years as
# reported by the local system clock. All other intervals are calculated from the
# Unix epoch. Defaults to disabled.
#interval: 0
# Set to true to log messages in json format.
#logging.json: false
#=============================== HTTP Endpoint ===============================
# apm-server can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats. For pretty JSON output
# append ?pretty to the URL.
# Defines if the HTTP endpoint is enabled.
#http.enabled: false
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
#http.host: localhost
# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066
#============================= X-pack Monitoring =============================
# APM server can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires x-pack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Most settings from the Elasticsearch output are accepted here as well.
# Note that these settings should be configured to point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration. This means that if you have the Elasticsearch output configured,
# you can simply uncomment the following line.
#monitoring.elasticsearch:
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "apm_system"
#password: ""
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (`http` and `9200`).
# In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
#hosts: ["localhost:9200"]
# Set gzip compression level.
#compression_level: 0
# Dictionary of HTTP parameters to pass within the URL with index operations.
#parameters:
#param1: value1
#param2: value2
# Custom HTTP headers to add to each request.
#headers:
# X-My-Header: Contents of the header
# Proxy server url.
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, apm-server
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s
# Configure HTTP request timeout before failing an request to Elasticsearch.
#timeout: 90
# Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication.
#ssl.enabled: true
# Optional SSL configuration options. SSL is off by default, change the `protocol` option if you want to enable `https`.
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# List of root certificates for HTTPS server verifications.
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication.
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
# It is recommended to use the provided keystore instead of entering the passphrase in plain text.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections.
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites.
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#metrics.period: 10s
#state.period: 1m

View File

@ -1,30 +0,0 @@
apm-server:
host: "0.0.0.0:8200"
output.elasticsearch:
pipeline: _none
#index: "apm-%{[observer.version]}-%{+yyyy.MM.dd}"
#indices:
# - index: "apm-%{[observer.version]}-sourcemap"
# when.contains:
# processor.event: "sourcemap"
#
# - index: "apm-%{[observer.version]}-error-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "error"
#
# - index: "apm-%{[observer.version]}-transaction-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "transaction"
#
# - index: "apm-%{[observer.version]}-span-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "span"
#
# - index: "apm-%{[observer.version]}-metric-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "metric"
#
# - index: "apm-%{[observer.version]}-onboarding-%{+yyyy.MM.dd}"
# when.contains:
# processor.event: "onboarding"

View File

@ -1,98 +0,0 @@
[
{
"_id": "5f08a870-7c6a-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"name": "panel_0",
"type": "visualization"
},
{
"id": "ceefd050-7c6a-11e7-aa55-3b0d52c71c60",
"name": "panel_1",
"type": "search"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":3,\"panelIndex\":1,\"col\":1,\"row\":1,\"panelRefName\":\"panel_0\"},{\"sort\":[\"@timestamp\",\"desc\"],\"size_x\":12,\"size_y\":21,\"panelIndex\":2,\"col\":1,\"columns\":[\"error.culprit\",\"error.exception.type\",\"error.exception.message\",\"error.log.message\",\"error.exception.handled\",\"service.name\"],\"row\":4,\"panelRefName\":\"panel_1\"}]",
"timeRestore": false,
"title": "[APM] Error Details",
"uiStateJSON": "{}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Error Occurrences [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"0\",\"terms_field\":\"error.grouping_key\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"label\":\"Occurrences\",\"metrics\":[{\"type\":\"count\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"bar\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"1\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"filter\":\"processor.event:error\",\"show_legend\":0,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Error Occurrences [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "ceefd050-7c6a-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"search": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
},
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index",
"type": "index-pattern"
}
],
"_source": {
"columns": [
"error.culprit",
"error.exception.type",
"error.exception.message",
"error.log.message",
"error.exception.handled",
"service.name"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"match\":{\"processor.event\":{\"query\":\"error\",\"type\":\"phrase\"}}},\"meta\":{\"value\":\"error\",\"disabled\":false,\"alias\":null,\"params\":{\"query\":\"error\",\"type\":\"phrase\"},\"key\":\"processor.event\",\"negate\":false,\"type\":\"phrase\",\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index\"},\"$state\":{\"store\":\"appState\"}}],\"version\":true,\"highlightAll\":true,\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"sort": [
"@timestamp",
"desc"
],
"title": "Error Details [APM]",
"version": 1
},
"_type": "search"
}
]

View File

@ -1,82 +0,0 @@
[
{
"_id": "37f6fac0-7c6a-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "22518e70-7c69-11e7-aa55-3b0d52c71c60",
"name": "panel_0",
"type": "visualization"
},
{
"id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"name": "panel_1",
"type": "visualization"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":10,\"panelIndex\":1,\"col\":1,\"row\":4,\"panelRefName\":\"panel_0\"},{\"size_x\":12,\"size_y\":3,\"panelIndex\":2,\"col\":1,\"row\":1,\"panelRefName\":\"panel_1\"}]",
"timeRestore": false,
"title": "[APM] Errors",
"uiStateJSON": "{\"P-1\": {\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "22518e70-7c69-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Errors for Time Period [APM]",
"uiStateJSON": "{\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}",
"version": 1,
"visState": "{\"title\":\"Top Errors for Time Period [APM]\",\"type\":\"table\",\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null},\"perPage\":25,\"showPartialRows\":false,\"totalFunc\":\"sum\",\"showTotal\":false,\"showMetricsAtAllLevels\":false,\"dimensions\":{\"metrics\":[{\"accessor\":1,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"},{\"accessor\":2,\"format\":{\"id\":\"number\"},\"params\":{},\"aggType\":\"count\"},{\"accessor\":3,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"},{\"accessor\":4,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"},{\"accessor\":5,\"format\":{\"id\":\"string\"},\"params\":{},\"aggType\":\"top_hits\"}],\"buckets\":[{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"string\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Missing\"}},\"params\":{},\"aggType\":\"terms\"}]}},\"aggs\":[{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"bucket\",\"params\":{\"field\":\"error.culprit\",\"size\":100,\"order\":\"desc\",\"orderBy\":\"1\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Error Culprit\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"error.exception.message\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"Message\"}},{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Number of Errors\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"error.exception.type\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"Type\"}},{\"id\":\"6\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"service.name\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"App Name\"}},{\"id\":\"7\",\"enabled\":true,\"type\":\"top_hits\",\"schema\":\"metric\",\"params\":{\"field\":\"error.grouping_key\",\"aggregate\":\"concat\",\"size\":1,\"sortField\":\"@timestamp\",\"sortOrder\":\"desc\",\"customLabel\":\"Error Grouping Key\"}}]}"
},
"_type": "visualization"
},
{
"_id": "c618e4e0-7c69-11e7-aa55-3b0d52c71c60",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Error Occurrences [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"0\",\"terms_field\":\"error.grouping_key\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"label\":\"Occurrences\",\"metrics\":[{\"type\":\"count\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"bar\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"1\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"filter\":\"processor.event:error\",\"show_legend\":0,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Error Occurrences [APM]\"}"
},
"_type": "visualization"
}
]

View File

@ -1,111 +0,0 @@
[
{
"_id": "8d3ed660-7828-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "1ffc5e20-7827-11e7-8c47-65b845b5cfb3",
"name": "panel_0",
"type": "visualization"
},
{
"id": "1bdca740-7828-11e7-8c47-65b845b5cfb3",
"name": "panel_1",
"type": "visualization"
},
{
"id": "804ffc40-7828-11e7-8c47-65b845b5cfb3",
"name": "panel_2",
"type": "visualization"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":5,\"panelIndex\":1,\"col\":1,\"row\":4,\"panelRefName\":\"panel_0\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":2,\"col\":1,\"row\":1,\"panelRefName\":\"panel_1\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":3,\"col\":7,\"row\":1,\"panelRefName\":\"panel_2\"}]",
"timeRestore": false,
"title": "[APM] Services",
"uiStateJSON": "{\"P-1\": {\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "1ffc5e20-7827-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Services [APM]",
"uiStateJSON": "{\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}",
"version": 1,
"visState": "{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null},\"perPage\":10,\"showPartialRows\":false,\"totalFunc\":\"sum\",\"showTotal\":false,\"showMeticsAtAllLevels\":false},\"type\":\"table\",\"aggs\":[{\"type\":\"avg\",\"enabled\":true,\"id\":\"1\",\"params\":{\"field\":\"transaction.duration.us\",\"customLabel\":\"Avg. Trans. Time\"},\"schema\":\"metric\"},{\"type\":\"percentiles\",\"enabled\":true,\"id\":\"3\",\"params\":{\"field\":\"transaction.duration.us\",\"percents\":[95],\"customLabel\":\"Trans. Time\"},\"schema\":\"metric\"},{\"type\":\"cardinality\",\"enabled\":true,\"id\":\"4\",\"params\":{\"field\":\"transaction.id\",\"customLabel\":\"Total Transactions\"},\"schema\":\"metric\"},{\"type\":\"cardinality\",\"enabled\":true,\"id\":\"6\",\"params\":{\"field\":\"error.id\",\"customLabel\":\"Errors\"},\"schema\":\"metric\"},{\"type\":\"terms\",\"enabled\":true,\"id\":\"2\",\"params\":{\"orderBy\":\"1\",\"field\":\"service.name\",\"order\":\"desc\",\"size\":1000},\"schema\":\"bucket\"}],\"title\":\"Services [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "1bdca740-7828-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Services by Transaction Time [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"terms_field\":\"service.name\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"value_template\":\"{{value}} ms\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"type\":\"avg\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"line\",\"terms_order_by\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"split_color_mode\":\"gradient\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":1,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Top Services by Transaction Time [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "804ffc40-7828-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Services by Transaction Per Minute [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"terms_field\":\"service.name\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"value_template\":\"{{value}} tpm\",\"metrics\":[{\"type\":\"count\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"},{\"field\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"cumulative_sum\",\"id\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\"},{\"field\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"derivative\",\"id\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"1m\"},{\"field\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"positive_only\",\"id\":\"4bd1b8f0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"\"}],\"seperate_axis\":0,\"split_mode\":\"terms\",\"chart_type\":\"line\",\"terms_order_by\":\"_count\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":1,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Top Apps by Transaction Per Minute [APM]\"}"
},
"_type": "visualization"
}
]

View File

@ -1,67 +0,0 @@
[
{
"_id": "3e3de700-7de0-11e7-b115-df9c90da2df1",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "d7735b90-7ddf-11e7-b115-df9c90da2df1",
"name": "panel_0",
"type": "search"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"sort\":[\"span.start.us\",\"asc\"],\"col\":1,\"size_x\":12,\"size_y\":23,\"panelIndex\":1,\"columns\":[\"span.type\",\"span.name\",\"span.duration.us\",\"span.start.us\"],\"row\":1,\"panelRefName\":\"panel_0\"}]",
"timeRestore": false,
"title": "[APM] Span Details",
"uiStateJSON": "{}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "d7735b90-7ddf-11e7-b115-df9c90da2df1",
"_migrationVersion": {
"search": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
},
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index",
"type": "index-pattern"
}
],
"_source": {
"columns": [
"span.type",
"span.name",
"span.duration.us",
"span.start.us"
],
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"match\":{\"processor.event\":{\"query\":\"span\",\"type\":\"phrase\"}}},\"meta\":{\"value\":\"span\",\"disabled\":false,\"alias\":null,\"params\":{\"query\":\"span\",\"type\":\"phrase\"},\"key\":\"processor.event\",\"negate\":false,\"type\":\"phrase\",\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index\"},\"$state\":{\"store\":\"appState\"}}],\"version\":true,\"highlightAll\":true,\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"sort": [
"span.start.us",
"asc"
],
"title": "Spans [APM]",
"version": 1
},
"_type": "search"
}
]

View File

@ -1,111 +0,0 @@
[
{
"_id": "41b5d920-7821-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"dashboard": "7.0.0"
},
"_references": [
{
"id": "a2e199b0-7820-11e7-8c47-65b845b5cfb3",
"name": "panel_0",
"type": "visualization"
},
{
"id": "09bcf890-7822-11e7-8c47-65b845b5cfb3",
"name": "panel_1",
"type": "visualization"
},
{
"id": "55606a60-7823-11e7-8c47-65b845b5cfb3",
"name": "panel_2",
"type": "visualization"
}
],
"_source": {
"description": "",
"hits": 0,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"version\":true,\"highlightAll\":true}"
},
"optionsJSON": "{\"darkTheme\": false}",
"panelsJSON": "[{\"size_x\":12,\"size_y\":10,\"panelIndex\":1,\"col\":1,\"row\":4,\"panelRefName\":\"panel_0\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":2,\"col\":1,\"row\":1,\"panelRefName\":\"panel_1\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":3,\"col\":7,\"row\":1,\"panelRefName\":\"panel_2\"}]",
"timeRestore": false,
"title": "[APM] Transactions",
"uiStateJSON": "{\"P-1\": {\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}}",
"version": 1
},
"_type": "dashboard"
},
{
"_id": "a2e199b0-7820-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Top Transactions for Time Period [APM]",
"uiStateJSON": "{\"vis\": {\"params\": {\"sort\": {\"columnIndex\": null, \"direction\": null}}}}",
"version": 1,
"visState": "{\"type\":\"table\",\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null},\"perPage\":25,\"showPartialRows\":false,\"totalFunc\":\"sum\",\"showTotal\":false,\"showMeticsAtAllLevels\":false},\"aggs\":[{\"params\":{\"orderBy\":\"1\",\"field\":\"transaction.name\",\"customLabel\":\"Transaction\",\"order\":\"desc\",\"size\":1000},\"type\":\"terms\",\"enabled\":true,\"id\":\"2\",\"schema\":\"bucket\"},{\"params\":{\"sortField\":\"@timestamp\",\"customLabel\":\"Type\",\"field\":\"transaction.type\",\"sortOrder\":\"desc\",\"aggregate\":\"concat\",\"size\":1},\"type\":\"top_hits\",\"enabled\":true,\"id\":\"5\",\"schema\":\"metric\"},{\"params\":{\"field\":\"transaction.duration.us\",\"customLabel\":\"Avg. Resp Time (ms)\"},\"type\":\"avg\",\"enabled\":true,\"id\":\"1\",\"schema\":\"metric\"},{\"params\":{\"field\":\"transaction.duration.us\",\"customLabel\":\"Resp Time (ms)\",\"percents\":[95]},\"type\":\"percentiles\",\"enabled\":true,\"id\":\"3\",\"schema\":\"metric\"},{\"params\":{\"sortField\":\"@timestamp\",\"customLabel\":\"View Spans\",\"field\":\"transaction.id\",\"sortOrder\":\"desc\",\"aggregate\":\"concat\",\"size\":1},\"type\":\"top_hits\",\"enabled\":true,\"id\":\"4\",\"schema\":\"metric\"}],\"title\":\"Top Transactions for Time Period [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "09bcf890-7822-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Transaction Times [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"point_size\":1,\"color\":\"rgba(0,156,224,1)\",\"value_template\":\"{{value}} ms\",\"label\":\"Average\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"type\":\"avg\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"0\"},{\"line_width\":1,\"point_size\":1,\"color\":\"rgba(115,216,255,1)\",\"value_template\":\"{{value}} ms\",\"label\":\"95th Percentile\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"percentiles\":[{\"mode\":\"line\",\"percentile\":\"\",\"shade\":0.2,\"value\":\"95\",\"id\":\"858ec670-7821-11e7-8745-07eaffcb65e5\"}],\"type\":\"percentile\",\"id\":\"79921481-7821-11e7-8745-07eaffcb65e5\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"79921480-7821-11e7-8745-07eaffcb65e5\",\"fill\":0.5},{\"line_width\":\"2\",\"point_size\":1,\"color\":\"rgba(254,146,0,1)\",\"value_template\":\"{{value}} ms\",\"label\":\"99th Percentile\",\"metrics\":[{\"field\":\"transaction.duration.us\",\"percentiles\":[{\"mode\":\"line\",\"percentile\":\"\",\"shade\":0.2,\"value\":\"99\",\"id\":\"858ec670-7821-11e7-8745-07eaffcb65e5\"}],\"type\":\"percentile\",\"id\":\"c1e42de1-7821-11e7-8745-07eaffcb65e5\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"us,ms,0\",\"id\":\"c1e42de0-7821-11e7-8745-07eaffcb65e5\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":1,\"show_grid\":1,\"legend_position\":\"right\",\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Transaction Times [APM]\"}"
},
"_type": "visualization"
},
{
"_id": "55606a60-7823-11e7-8c47-65b845b5cfb3",
"_migrationVersion": {
"visualization": "7.0.0"
},
"_references": [
{
"id": "apm-*",
"name": "kibanaSavedObjectMeta.searchSourceJSON.index",
"type": "index-pattern"
}
],
"_source": {
"description": "",
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}"
},
"title": "Transaction Per Minute [APM]",
"uiStateJSON": "{}",
"version": 1,
"visState": "{\"params\":{\"time_field\":\"@timestamp\",\"series\":[{\"line_width\":\"2\",\"terms_field\":\"transaction.result\",\"point_size\":\"0\",\"color\":\"rgba(115,216,255,1)\",\"value_template\":\"{{value}} tpm\",\"label\":\"\",\"metrics\":[{\"field\":\"transaction.id\",\"type\":\"cardinality\",\"id\":\"61ca57f2-469d-11e7-af02-69e470af7417\"},{\"field\":\"61ca57f2-469d-11e7-af02-69e470af7417\",\"type\":\"cumulative_sum\",\"id\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\"},{\"field\":\"3fcaa6c0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"derivative\",\"id\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"1m\"},{\"field\":\"467f1cd0-7828-11e7-bb25-2ff6dee07a1b\",\"type\":\"positive_only\",\"id\":\"4bd1b8f0-7828-11e7-bb25-2ff6dee07a1b\",\"unit\":\"\"}],\"seperate_axis\":0,\"split_mode\":\"everything\",\"chart_type\":\"line\",\"stacked\":\"none\",\"axis_position\":\"right\",\"formatter\":\"number\",\"id\":\"61ca57f1-469d-11e7-af02-69e470af7417\",\"fill\":\"0\"}],\"axis_formatter\":\"number\",\"interval\":\">=1m\",\"show_legend\":0,\"show_grid\":1,\"axis_position\":\"left\",\"type\":\"timeseries\",\"id\":\"61ca57f0-469d-11e7-af02-69e470af7417\"},\"type\":\"metrics\",\"aggs\":[],\"title\":\"Transaction Per Minute [APM]\"}"
},
"_type": "visualization"
}
]

View File

@ -1,9 +0,0 @@
FROM docker.elastic.co/apm/apm-server-oss:7.4.2 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# config
# COPY ${DOCKER_BUILD_DIR}/apm-server.yml /usr/share/apm-server/
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,26 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG CURATOR_VERSION=5.8.3
RUN apk --no-cache add \
bash \
py-pip \
&& pip install elasticsearch-curator==${CURATOR_VERSION}
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /
COPY ${DOCKER_BUILD_DIR}/config.yml /etc/curator/
COPY ${DOCKER_BUILD_DIR}/action.yml /etc/curator/
ENTRYPOINT ["/docker-entrypoint.sh"]
FROM dist as master
ARG DOCKER_BUILD_DIR
# install cronlock
ADD https://raw.github.com/kvz/cronlock/master/cronlock /usr/bin/cronlock
RUN chmod +rx /usr/bin/cronlock
# install ssmtp
RUN apk --no-cache add ssmtp && \
echo "FromLineOverride=YES" >> /etc/ssmtp/ssmtp.conf

View File

@ -1,23 +0,0 @@
---
actions:
1:
action: delete_indices
description: >-
Delete indices older than ${UNIT_COUNT:1} ${UNIT:months} based on index name, for apm-*
and logs-* prefixed indices. Ignore the error if the filter does not result in an
actionable list of indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
timeout_override:
continue_if_exception: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(apm|logs)-.*$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: ${UNIT:months}
unit_count: ${UNIT_COUNT:1}

View File

@ -1,18 +0,0 @@
---
client:
hosts: ${HOSTS:elasticsearch}
port: ${PORT:9200}
url_prefix:
use_ssl: ${USE_SSL:False}
certificate:
client_cert:
client_key:
ssl_no_validate: False
http_auth:
timeout: ${TIMEOUT:30}
master_only: ${MASTER_ONLY:False}
logging:
loglevel: ${LOGLEVEL:INFO}
logfile:
logformat: ${LOGFORMAT:default}
blacklist: ['elasticsearch', 'urllib3']

View File

@ -1,16 +0,0 @@
#!/bin/sh
set -euo errexit
trap 'kill -SIGQUIT $PID' INT
CRON_DAILY_COMMAND="/usr/bin/curator --config /etc/curator/config.yml /etc/curator/action.yml"
[ "${DEPLOY:-}" = "true" ] && CRON_DAILY_COMMAND="cronlock ${CRON_DAILY_COMMAND}"
cat > /etc/periodic/daily/curator <<EOF
#!/bin/sh
${CRON_DAILY_COMMAND}
EOF
chmod +x /etc/periodic/daily/curator
[ $# -eq 0 ] && exec crond -f -L/dev/stdout || exec "$@" &
PID=$! && wait

View File

@ -1,25 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG FABIO_REMOTE=https://github.com/fabiolb/fabio
ARG FABIO_VERSION=1.6.0
ARG SYSTEM=Linux
ARG MACHINE=x86_64
RUN apk update \
&& apk add --no-cache ca-certificates \
&& OS="$(echo ${SYSTEM} |awk '{print tolower($0)}')"; \
ARCH="$(echo ${MACHINE} |awk '/x86_64/ {print "amd64"}; /aarch64/ {print "arm64"}')"; \
wget -qO /usr/bin/fabio ${FABIO_REMOTE}/releases/download/v${FABIO_VERSION}/fabio-${FABIO_VERSION}-${OS}_${ARCH} \
&& chmod +x /usr/bin/fabio
EXPOSE 9998 9999
ENTRYPOINT ["/usr/bin/fabio"]
CMD ["-cfg", "/etc/fabio/fabio.properties"]
HEALTHCHECK CMD status=$(echo -e 'GET /health HTTP/1.0\n' |nc -w 1 localhost 9998 | sed -n '$p') \
&& echo "$status" && [ "$status" = "OK" ] || exit 1
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,24 +0,0 @@
FROM golang:1.15-alpine AS build
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG GOOFYS_VERSION=v0.24.0
WORKDIR /src/github.com/kahing/goofys/
RUN apk --no-cache upgrade \
&& apk --no-cache add git make \
&& git clone https://github.com/kahing/goofys/ . \
&& git checkout tags/${GOOFYS_VERSION} -b ${GOOFYS_VERSION} \
&& go get . \
&& make install
FROM alpine:latest as dist
RUN apk add --no-cache ca-certificates
COPY --from=build /go/bin/goofys /bin/goofys
ENTRYPOINT ["/bin/goofys"]
# goofys -f --region $REGION --stat-cache-ttl $STAT_CACHE_TTL --type-cache-ttl $TYPE_CACHE_TTL --dir-mode $DIR_MODE --file-mode $FILE_MODE -o nonempty $BUCKET $MOUNT_DIR
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,26 +0,0 @@
FROM grafana/grafana:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AWS_ACCESS_KEY
ARG AWS_SECRET_KEY
ARG MYSQL_GRAFANA_USER
ARG MYSQL_GRAFANA_PASSWORD
ARG MYSQL_GRAFANA_DB
COPY ${DOCKER_BUILD_DIR}/config.ini /etc/grafana/config.ini
COPY ${DOCKER_BUILD_DIR}/dashboards /etc/grafana/dashboards
COPY ${DOCKER_BUILD_DIR}/provisioning /etc/grafana/provisioning
USER root
RUN sed 's@AWS_ACCESS_KEY@'"${AWS_ACCESS_KEY:-UNDEFINED}"'@g; s@AWS_SECRET_KEY@'"${AWS_SECRET_KEY:-UNDEFINED}"'@g; s@MYSQL_GRAFANA_USER@'"${MYSQL_GRAFANA_USER:-UNDEFINED}"'@g; s@MYSQL_GRAFANA_PASSWORD@'"${MYSQL_GRAFANA_PASSWORD:-UNDEFINED}"'@g; s@MYSQL_GRAFANA_DB@'"${MYSQL_GRAFANA_DB:-UNDEFINED}"'@g' /etc/grafana/provisioning/datasources/datasources.tmpl > /etc/grafana/provisioning/datasources/datasources.yml
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /
RUN chmod +x /docker-entrypoint.sh
USER grafana
ENTRYPOINT ["/docker-entrypoint.sh"]
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,2 +0,0 @@
[paths]
provisioning = /etc/grafana/provisioning

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
#!/bin/sh
mkdir -p /var/lib/grafana/dashboards
cp -a /etc/grafana/dashboards/* /var/lib/grafana/dashboards/
/run.sh

View File

@ -1,6 +0,0 @@
- name: 'default'
org_id: 1
folder: ''
type: 'file'
options:
path: '/var/lib/grafana/dashboards' ## Contains dashboards host.json and containers.json

View File

@ -1,30 +0,0 @@
apiVersion: 1
datasources:
- name: 'Prometheus'
type: 'prometheus'
access: 'proxy'
org_id: 1
url: 'http://prometheus:9090'
is_default: true
version: 1
editable: true
- name: "AWS_Cloudwatch"
type: cloudwatch
jsonData:
authType: keys
defaultRegion: eu-west-1
secureJsonData:
accessKey: "AWS_ACCESS_KEY"
secretKey: "AWS_SECRET_KEY"
- name: MySQL
type: mysql
url: mysql:3306
database: MYSQL_GRAFANA_DB
user: MYSQL_GRAFANA_USER
password: MYSQL_GRAFANA_PASSWORD

View File

@ -1,24 +0,0 @@
FROM httpd:alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG HTTPD_LOAD_MODULE="env expires headers lbmethod_bybusyness lbmethod_byrequests proxy proxy_balancer proxy_fcgi proxy_http setenvif slotmem_shm reqtimeout rewrite"
ARG HTTPD_CONF_EXTRA="default info mpm vhosts"
RUN sed -E -i \
-e 's!^#?\s*(LoadModule ('${HTTPD_LOAD_MODULE// /|}')_module modules/mod_('${HTTPD_LOAD_MODULE// /|}').so)\s*!\1!g' \
-e 's!^#?\s*(Include conf/extra/httpd-('${HTTPD_CONF_EXTRA// /|}').conf)\s*!\1!g' \
"$HTTPD_PREFIX/conf/httpd.conf";
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /
COPY ${DOCKER_BUILD_DIR}/httpd-vhosts.ctmpl /usr/local/apache2/conf/extra/
COPY --from=hashicorp/consul-template:alpine /bin/consul-template /usr/local/bin/
WORKDIR /var/www
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD /usr/local/bin/consul-template -log-level info -consul-addr consul:8500 -template "/usr/local/apache2/conf/extra/httpd-vhosts.ctmpl:/usr/local/apache2/conf/extra/httpd-vhosts.conf" -exec "/usr/local/bin/httpd-foreground"
EXPOSE 80/tcp
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,9 +0,0 @@
#!/bin/sh
set -euo pipefail
set -o errexit
trap 'kill -SIGQUIT $PID' INT
# Launch httpd
[ $# -eq 0 ] && httpd-foreground || exec "$@" &
PID=$! && wait

View File

@ -1,21 +0,0 @@
{{ $serverName := printf "%s.%s.%s" (env "APP") (env "ENV") (env "USER") }}
{{ $serviceName := printf "%s-%s-%s-php-9000" (env "USER") (env "ENV") (env "APP") }}
<VirtualHost *:80>
ServerAdmin support+apache@asycn.io
DocumentRoot "/var/www/web"
ServerName {{ $serverName }}
ServerAlias *
<FilesMatch "\.php$">
SetHandler "proxy:balancer://php/"
</FilesMatch>
<Proxy "balancer://php/">
{{ range service $serviceName }} BalancerMember "fcgi://{{ .Address }}:{{ .Port }}/" disablereuse=On timeout=900
{{ end }} ProxySet lbmethod=bybusyness
</Proxy>
<Directory /var/www/web>
AllowOverride All
CGIPassAuth On
Options +FollowSymLinks
Require all granted
</Directory>
</VirtualHost>

View File

@ -1,45 +0,0 @@
ARG IPFS_VERSION=0.14.0
FROM ipfs/kubo:v${IPFS_VERSION} as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
COPY ${DOCKER_BUILD_DIR}/*.sh /container-init.d/
RUN chmod +rx /container-init.d/*.sh
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG UID
ENV UID=${UID}
ENV USER=ipfs
# If we provide a numeric UID
RUN if [ "${UID}" -eq "${UID}" ] 2>/dev/null; then \
# Force $UID of $USER if it exists
if [ "$(awk -F: '$1 == "'"${USER}"'" {print $3}' /etc/passwd)" != "${UID}" ]; then \
sed -i 's/^\('"${USER}"':x\):[0-9]\+:/\1:'"${UID}"':/' /etc/passwd; \
fi; \
# Create $USER if $UID does not exist
if [ "$(awk -F: '$3 == "'"${UID}"'" {print $1}' /etc/passwd)" = "" ]; then \
echo "${USER}:x:${UID}:${GID:-${UID}}::/home/${USER}:${SHELL:-/bin/sh}" >> /etc/passwd; \
echo "${USER}:\!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
mkdir -p /home/"${USER}"; \
fi; \
chown "${UID}" $(awk -F: '$1 == "'"${USER}"'" {print $(NF-1)}' /etc/passwd); \
fi
# If we provide a numeric GID
RUN if [ "${GID}" -eq "${GID}" ] 2>/dev/null; then \
# Force $GID of $GROUP if it already exists
if [ "$(awk -F: '$1 == "'"${GROUP}"'" {print $3}' /etc/group)" != "${GID}" ]; then \
sed -i 's/^\('"${GROUP}"':x\):[0-9]\+:/\1:'"${GID}"':/' /etc/group; \
fi; \
# Create $GROUP if $GID does not exist
if [ "$(awk -F: '$3 == "'"${GID}"'" {print $1}' /etc/group)" = "" ]; then \
echo "${GROUP}:x:${GID}:" >> /etc/group; \
fi; \
# Force $GID of $USER if it exists
if [ "$(awk -F: '$1 == "'"${USER}"'" {print $4}' /etc/passwd)" != "${GID}" ]; then \
sed -i 's/^\('"${USER}"':x:[0-9]\+\):[0-9]\+:/\1:'"${GID}"':/' /etc/passwd; \
fi; \
chgrp "${GID}" $(awk -F: '$1 == "'"${USER}"'" {print $(NF-1)}' /etc/passwd); \
fi

View File

@ -1,102 +0,0 @@
#!/bin/sh
[ -n "${DEBUG:-}" -a "${DEBUG:-}" != "false" ] && set -x
set -e
## fix resource manager fatal error on arm64/linux with 2Gb RAM
# ipfs config --json Swarm.ResourceMgr.Enabled false
# ERROR p2pnode libp2p/rcmgr_defaults.go:107 ===> OOF! go-libp2p changed DefaultServiceLimits
# => changes ('test' represents the old value):
# {"op":"test","path":"/SystemLimits/Memory","value":1073741824}
# {"op":"replace","path":"/SystemLimits/Memory","value":256560128}
# => go-libp2p SetDefaultServiceLimits update needs a review:
# Please inspect if changes impact go-ipfs users, and update expectedDefaultServiceLimits in rcmgr_defaults.go to remove this message
# FATAL p2pnode libp2p/rcmgr_defaults.go:115 daemon will refuse to run with the resource manager until this is resolved
## ipfs client needs API address
# search for ip address of $(hostname).${IPFS_ADDRESSES_API_DOMAIN}
[ -n "${IPFS_ADDRESSES_API_DOMAIN}" ] && [ -z "${IPFS_ADDRESSES_API_INET4}" ] \
&& IPFS_ADDRESSES_API_INET4=$(nslookup -type=A "$(hostname).${IPFS_ADDRESSES_API_DOMAIN}" |awk 'found && /^Address:/ {print $2; found=0}; /^Name:\t'"$(hostname).${IPFS_ADDRESSES_API_DOMAIN}"'/ {found=1};')
# check ${IPFS_ADDRESSES_API_INET4} format
echo "${IPFS_ADDRESSES_API_INET4}" |awk -F. '{ for ( i=1; i<=4; i++ ) if ($i >= 0 && $i <= 255); else exit 1;}; NF != 4 {exit 1;}' || unset IPFS_ADDRESSES_API_INET4
# check ${IPFS_ADDRESSES_API_PORT} format
[ "${IPFS_ADDRESSES_API_PORT}" -eq "${IPFS_ADDRESSES_API_PORT}" ] 2>/dev/null && [ "${IPFS_ADDRESSES_API_PORT}" -ge 1 ] && [ "${IPFS_ADDRESSES_API_PORT}" -le 65535 ] \
|| unset IPFS_ADDRESSES_API_PORT
ipfs config Addresses.API "${IPFS_ADDRESSES_API:-/ip4/${IPFS_ADDRESSES_API_INET4:-127.0.0.1}/tcp/${IPFS_ADDRESSES_API_PORT:-5001}}"
## gateway address
# search for ip address of $(hostname).${IPFS_ADDRESSES_GATEWAY_DOMAIN}
[ -n "${IPFS_ADDRESSES_GATEWAY_DOMAIN}" ] && [ -z "${IPFS_ADDRESSES_GATEWAY_INET4}" ] \
&& IPFS_ADDRESSES_GATEWAY_INET4=$(nslookup -type=A "$(hostname).${IPFS_ADDRESSES_GATEWAY_DOMAIN}" |awk 'found && /^Address:/ {print $2; found=0}; /^Name:\t'"$(hostname).${IPFS_ADDRESSES_GATEWAY_DOMAIN}"'/ {found=1};')
# check ${IPFS_ADDRESSES_GATEWAY_INET4} format
echo "${IPFS_ADDRESSES_GATEWAY_INET4}" |awk -F. '{ for ( i=1; i<=4; i++ ) if ($i >= 0 && $i <= 255); else exit 1;}; NF != 4 {exit 1;}' || unset IPFS_ADDRESSES_GATEWAY_INET4
# check ${IPFS_ADDRESSES_GATEWAY_PORT} format
[ "${IPFS_ADDRESSES_GATEWAY_PORT}" -eq "${IPFS_ADDRESSES_GATEWAY_PORT}" ] 2>/dev/null && [ "${IPFS_ADDRESSES_GATEWAY_PORT}" -ge 1 ] && [ "${IPFS_ADDRESSES_GATEWAY_PORT}" -le 65535 ] \
|| unset IPFS_ADDRESSES_GATEWAY_PORT
ipfs config Addresses.Gateway "${IPFS_ADDRESSES_GATEWAY:-/ip4/${IPFS_ADDRESSES_GATEWAY_INET4:-127.0.0.1}/tcp/${IPFS_ADDRESSES_GATEWAY_PORT:-8080}}"
[ -n "${IPFS_ADDRESSES_NOANNOUNCE}" ] && ipfs config --json Addresses.NoAnnounce "${IPFS_ADDRESSES_NOANNOUNCE}"
## api http headers
ipfs config --json API.HTTPHeaders "${IPFS_API_HTTPHEADERS:-{
\"Access-Control-Allow-Credentials\": ${IPFS_API_HTTPHEADERS_ACA_CREDENTIALS:-null},
\"Access-Control-Allow-Headers\": ${IPFS_API_HTTPHEADERS_ACA_HEADERS:-null},
\"Access-Control-Allow-Methods\": ${IPFS_API_HTTPSHEADERS_ACA_METHODS:-null},
\"Access-Control-Allow-Origin\": ${IPFS_API_HTTPHEADERS_ACA_ORIGIN:-null}
}}"
## bootstrap
[ -n "${IPFS_BOOTSTRAP}" ] && ipfs config --json Bootstrap "${IPFS_BOOTSTRAP}"
## storage
# limit disk usage to 50 percent of disk size
diskSize=$(df -P ${IPFS_PATH:-~/.ipfs} | awk 'NR>1{size+=$2}END{print size}')
ipfs config Datastore.StorageMax "$((diskSize * ${IPFS_DISK_USAGE_PERCENT:-50/100}))"
# garbage collector
[ -n "${IPFS_DATASTORE_GCPERIOD}" ] && ipfs config Datastore.GCPeriod "${IPFS_DATASTORE_GCPERIOD}"
## experimental features
[ -n "${IPFS_EXPERIMENTAL_ACCELERATEDDHTCLIENT}" ] && ipfs config --json Experimental.AcceleratedDHTClient "${IPFS_EXPERIMENTAL_ACCELERATEDDHTCLIENT}"
[ -n "${IPFS_EXPERIMENTAL_FILESTOREENABLED}" ] && ipfs config --json Experimental.FilestoreEnabled "${IPFS_EXPERIMENTAL_FILESTOREENABLED}"
[ -n "${IPFS_EXPERIMENTAL_GRAPHSYNCENABLED}" ] && ipfs config --json Experimental.GraphsyncEnabled "${IPFS_EXPERIMENTAL_GRAPHSYNCENABLED}"
[ -n "${IPFS_EXPERIMENTAL_LIBP2PSTREAMMOUNTING}" ] && ipfs config --json Experimental.Libp2pStreamMounting "${IPFS_EXPERIMENTAL_LIBP2PSTREAMMOUNTING}"
[ -n "${IPFS_EXPERIMENTAL_P2PHTTPPROXY}" ] && ipfs config --json Experimental.P2pHttpProxy "${IPFS_EXPERIMENTAL_P2PHTTPPROXY}"
[ -n "${IPFS_EXPERIMENTAL_STRATEGICPROVIDING}" ] && ipfs config --json Experimental.StrategicProviding "${IPFS_EXPERIMENTAL_STRATEGICPROVIDING}"
[ -n "${IPFS_EXPERIMENTAL_URLSTOREENABLED}" ] && ipfs config --json Experimental.UrlstoreEnabled "${IPFS_EXPERIMENTAL_URLSTOREENABLED}"
## api http headers
ipfs config --json Gateway.HTTPHeaders "${IPFS_GATEWAY_HTTPHEADERS:-{
\"Access-Control-Allow-Credentials\": ${IPFS_GATEWAY_HTTPHEADERS_ACA_CREDENTIALS:-null},
\"Access-Control-Allow-Headers\": ${IPFS_GATEWAY_HTTPHEADERS_ACA_HEADERS:-[ \"X-Requested-With\", \"Range\", \"User-Agent\" ]},
\"Access-Control-Allow-Methods\": ${IPFS_GATEWAY_HTTPSHEADERS_ACA_METHODS:-[ \"GET\" ]},
\"Access-Control-Allow-Origin\": ${IPFS_GATEWAY_HTTPHEADERS_ACA_ORIGIN:-[ \"*\" ]}
}}"
## ipns
[ -n "${IPFS_IPNS_REPUBLISHPERIOD}" ] && ipfs config Ipns.RepublishPeriod "${IPFS_IPNS_REPUBLISHPERIOD}"
[ -n "${IPFS_IPNS_RECORDLIFETIME}" ] && ipfs config Ipns.RecordLifetime "${IPFS_IPNS_RECORDLIFETIME}"
[ -n "${IPFS_IPNS_USEPUBSUB}" ] && ipfs config --json Ipns.UsePubsub "${IPFS_IPNS_USEPUBSUB}"
## dht pubsub mode
[ -n "${IPFS_PUBSUB_ENABLE}" ] && ipfs config --json Pubsub.Enabled "${IPFS_PUBSUB_ENABLE}"
[ -n "${IPFS_PUBSUB_ROUTER}" ] && ipfs config Pubsub.Router "${IPFS_PUBSUB_ROUTER}"
## routing
[ -n "${IPFS_ROUTING_TYPE}" ] && ipfs config Routing.Type "${IPFS_ROUTING_TYPE}"
## reproviding local content to routing system
[ -n "${IPFS_REPROVIDER_INTERVAL}" ] && ipfs config Reprovider.Interval "${IPFS_REPROVIDER_INTERVAL}"
[ -n "${IPFS_REPROVIDER_STRATEGY}" ] && ipfs config Reprovider.Strategy "${IPFS_REPROVIDER_STRATEGY}"
## swarm config
[ -n "${IPFS_SWARM_CONNMGR_HIGHWATER}" ] && ipfs config --json Swarm.ConnMgr.HighWater "${IPFS_SWARM_CONNMGR_HIGHWATER}"
[ -n "${IPFS_SWARM_CONNMGR_LOWWATER}" ] && ipfs config --json Swarm.ConnMgr.LowWater "${IPFS_SWARM_CONNMGR_LOWWATER}"
[ -n "${IPFS_SWARM_CONNMGR_TYPE}" ] && ipfs config --json Swarm.ConnMgr.Type "${IPFS_SWARM_CONNMGR_TYPE}"
[ -n "${IPFS_SWARM_DISABLENATPORTMAP}" ] && ipfs config --bool Swarm.DisableNatPortMap "${SWARM_DISABLENATPORTMAP}"
[ -n "${IPFS_SWARM_ENABLEHOLEPUNCHING}" ] && ipfs config --bool Swarm.EnableHolePunching "${SWARM_ENABLEHOLEPUNCHING}"
[ -n "${IPFS_SWARM_RELAYCLIENT_ENABLED}" ] && ipfs config --bool Swarm.RelayClient.Enabled "${SWARM_RELAYCLIENT_ENABLED}"
[ -n "${IPFS_SWARM_RELAYSERVICE_ENABLED}" ] && ipfs config --bool Swarm.RelayService.Enabled "${SWARM_RELAYSERVICE_ENABLED}"
[ -n "${IPFS_SWARM_TRANSPORTS_NETWORK_RELAY}" ] && ipfs config --bool Swarm.Transports.Network.Relay "${SWARM_TRANSPORTS_NETWORK_RELAY}"
## REMOVE IPFS BOOTSTRAP for private usage
[ ${IPFS_NETWORK:-public} = "public" ] || ipfs bootstrap rm --all
[ ${IPFS_NETWORK:-public} = "private" ] && export LIBP2P_FORCE_PNET=1 ||:

View File

@ -1,3 +0,0 @@
hardstatus alwayslastline "%{= kw}[%{G}%H%{-}] \# %?%-Lw%?[%{G}%n%f %t%{-}]%?%+Lw%?%?%=%-17< [%{B}%l%{-}]"
defscrollback 1048576
shell -$SHELL

View File

@ -1,32 +0,0 @@
# this is aya's tmux configuration !
bind r source-file ~/.tmux.conf\; display-message "tmux.conf reloaded!"
bind R move-window -r
bind C-n switch-client -n\; refresh-client -S
bind C-p switch-client -p\; refresh-client -S
bind Escape copy-mode
bind Y run "tmux save-buffer - |xsel -i"
bind P run "xsel -o |tmux load-buffer -; tmux paste-buffer"
bind C-c run "tmux save-buffer - | xclip -i -sel clipboard >/dev/null"
bind C-v run "tmux set-buffer \"$(xclip -o -sel clipboard)\"; tmux paste-buffer"
bind -n S-down new-window
bind -n S-left prev
bind -n S-right next
bind -n C-left swap-window -t -1
bind -n C-right swap-window -t +1
set -g aggressive-resize on
set -g status-keys vi
setw -g mode-keys vi
setw -g window-status-current-bg blue
setw -g window-status-current-fg white
setw -g monitor-activity on
set -g visual-activity on
setw -g automatic-rename on
set -g default-terminal "screen"
set -g history-limit 4242
set -g status-bg black
set -g status-fg white
set -g status-interval 60
set -g status-left-length 30
set -g status-left '<#[fg=green]#S#[default]> '
set -g status-right '#[fg=yellow] %d/%m %H:%M#[default]'
set -g update-environment "SSH_ASKPASS SSH_AUTH_SOCK SSH_AGENT_PID SSH_CONNECTION"

View File

@ -1,173 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG GIT_AUTHOR_NAME
ARG GIT_AUTHOR_EMAIL
ARG SYSTEM=Linux
ARG MACHINE=x86_64
ENV GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME}
ENV GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL}
ENV GIT_COMMITTER_NAME=${GIT_AUTHOR_NAME}
ENV GIT_COMMITTER_EMAIL=${GIT_AUTHOR_EMAIL}
RUN apk upgrade --no-cache \
&& apk add --no-cache \
bash \
curl \
gettext \
git \
gpg \
gpg-agent \
make \
nano \
netcat-openbsd \
openssh \
screen \
socat \
tmux \
wget \
xz
RUN git clone https://github.com/ingydotnet/git-subrepo \
&& cd git-subrepo \
&& git fetch origin +refs/heads/release/0.4.0: \
&& git checkout release/0.4.0 \
&& git fetch origin pull/314/head \
&& git rebase 9cbe7ba2f61552ce97fb312c8133813f970ab4a5 \
&& sed -i 's/install -C/install/' Makefile \
&& make install \
&& cd .. \
&& rm -rf git-subrepo
ARG IPFS_VERSION=0.14.0
RUN { OS="$(echo ${SYSTEM} |awk '{print tolower($0)}')"; \
ARCH="$(echo ${MACHINE})"; \
wget -qO - https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.${OS}.${ARCH}.tar.xz \
|tar --strip-components 1 -C /usr/local/bin -xJf - shellcheck-stable/shellcheck; } \
&& { ARCH="$(echo ${MACHINE} |awk '/x86_64/ {print "amd64"}; /aarch64/ {print "arm64"}')"; \
wget -qO - https://github.com/ipfs/kubo/releases/download/v${IPFS_VERSION}/kubo_v${IPFS_VERSION}_${OS}-${ARCH}.tar.gz \
|tar --strip-components 1 -C /usr/local/bin -xzf - kubo/ipfs; } \
&& mkdir -p /usr/local/lib/shellspec \
&& wget -qO - https://github.com/shellspec/shellspec/archive/refs/heads/master.tar.gz \
|tar --strip-components 1 -C /usr/local/lib/shellspec -xzf - \
&& ln -s /usr/local/lib/shellspec/shellspec /usr/local/bin/shellspec
ADD https://raw.github.com/kvz/cronlock/master/cronlock /usr/local/bin/cronlock
RUN chmod +rx /usr/local/bin/cronlock
# Setup environment variables; export SSH_AUTH_SOCK from socket directory
ENV SOCKET_DIR /tmp/ssh-agent
ENV SSH_AUTH_SOCK ${SOCKET_DIR}/socket
ENV SSH_AUTH_PROXY_SOCK ${SOCKET_DIR}/proxy-socket
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["start"]
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG DOCKER_GID
ARG SHELL=/bin/bash
ARG SSH_BASTION_HOSTNAME
ARG SSH_BASTION_USERNAME
ARG SSH_PRIVATE_IP_RANGE
ARG SSH_PUBLIC_HOSTS
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
RUN apk add --no-cache \
# docker \
# docker-compose \
# mysql-client \
# postgresql-client \
sudo
# vim \
# zsh
# If we provide a numeric UID
RUN [ "$UID" -eq "$UID" ] 2>/dev/null \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk -F: '{print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk -F: '{print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk -F: '{print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:$SHELL" >> /etc/passwd; \
echo "$USER:\!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
# If we provide a numeric DOCKER_GID
RUN [ "$DOCKER_GID" -eq "$DOCKER_GID" ] 2>/dev/null \
&& if [ "$(getent group docker |awk -F: '{print $3}')" != "$DOCKER_GID" ]; then \
sed -i 's/^docker:x:[0-9]\+:/docker:x:'$DOCKER_GID':/' /etc/group; \
fi \
|| true
## User groups
RUN adduser $USER wheel \
# && adduser $USER docker \
&& echo '%wheel ALL=(ALL:ALL) NOPASSWD: ALL' >> /etc/sudoers
RUN echo -e "\n\
Host *\n\
LogLevel quiet\n\
Compression yes\n\
" >> /etc/ssh/ssh_config \
&& if [ -n "${SSH_PRIVATE_IP_RANGE}" ] && [ -n "${SSH_BASTION_HOSTNAME}" ]; then \
echo -e "\
Host ${SSH_PRIVATE_IP_RANGE}\n\
ProxyCommand ssh -q ssh-bastion nc -q0 %h 22\n\
HostName %h\n\
StrictHostKeyChecking no\n\
UserKnownHostsFile /dev/null\n\
Host ssh-bastion\n\
HostName ${SSH_BASTION_HOSTNAME}\
" >> /etc/ssh/ssh_config; \
if [ -n "${SSH_BASTION_USERNAME}" ]; then \
echo -e "\
User ${SSH_BASTION_USERNAME}\n\
" >> /etc/ssh/ssh_config; \
fi \
fi
# Custom rc functions
COPY ansible/roles/hosts/files/etc/profile.d/rc*.sh /etc/profile.d/
RUN mkdir -p $SOCKET_DIR && chown $USER $SOCKET_DIR
VOLUME ${SOCKET_DIR}
USER $USER
ENV SHELL=${SHELL}
WORKDIR /home/$USER
# git config
RUN mkdir -p ~/.ssh ~/.config/git \
&& ssh-keyscan -t rsa -H ${SSH_PUBLIC_HOSTS} >> ~/.ssh/known_hosts \
&& echo -e "\
.DS_Store\n\
.idea/\n\
.nfs*\n\
.theia/settings.json\n\
*~\n\
*.log\n\
*.swp\n\
Thumbs.db\n\
" > ~/.config/git/ignore
# dot files
COPY ${DOCKER_BUILD_DIR}/.* /home/$USER/

View File

@ -1,31 +0,0 @@
#!/usr/bin/env sh
set -euo errexit
# Print a debug message if debug mode is on ($DEBUG is not empty)
# @param message
debug_msg ()
{
if [ -n "${DEBUG:-}" -a "${DEBUG:-}" != "false" ]; then
echo "$@"
fi
}
case "${1:-start}" in
start)
debug_msg "Starting..."
# Create proxy-socket for ssh-agent (to give everyone access to the ssh-agent socket)
debug_msg "Create proxy socket..."
rm -f ${SSH_AUTH_SOCK} ${SSH_AUTH_PROXY_SOCK} > /dev/null 2>&1
socat UNIX-LISTEN:${SSH_AUTH_PROXY_SOCK},perm=0666,fork UNIX-CONNECT:${SSH_AUTH_SOCK} &
debug_msg "Launch ssh-agent..."
exec /usr/bin/ssh-agent -a ${SSH_AUTH_SOCK} -D >/dev/null
;;
*)
debug_msg "Exec: $@"
exec "$@"
;;
esac

View File

@ -1,19 +0,0 @@
FROM mysql:5.6.44 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
# config
COPY ${DOCKER_BUILD_DIR}/conf.d/all.cnf /etc/mysql/conf.d/
# install goss
ADD https://github.com/aelsabbahy/goss/releases/latest/download/goss-linux-amd64 /usr/bin/goss
RUN chmod +rx /usr/bin/goss
COPY ${DOCKER_BUILD_DIR}/goss.yml /tests/goss.yml
HEALTHCHECK CMD goss -g /tests/goss.yml validate --format tap
FROM dist as master
ARG DOCKER_BUILD_DIR
# config
COPY ${DOCKER_BUILD_DIR}/conf.d/master.cnf /etc/mysql/conf.d/

View File

@ -1,22 +0,0 @@
[mysqld]
binlog_cache_size = 32768
character_set_server = utf8mb4
collation_server = utf8mb4_unicode_ci
innodb_file_per_table = 1
innodb_flush_method = O_DIRECT
innodb_log_buffer_size = 8388608
innodb_log_file_size = 134217728
key_buffer_size = 16777216
# local_infile = 1
log_bin_trust_function_creators = 1
log_output = FILE
max_allowed_packet = 1G
max_binlog_size = 128M
query_cache_limit = 16M
query_cache_size = 128M
query_cache_type = 1
read_buffer_size = 262144
read_rnd_buffer_size = 524288
slow_query_log = 1
table_open_cache_instances = 16
wait_timeout = 31536000

View File

@ -1,8 +0,0 @@
[mysqld]
innodb_buffer_pool_size = 1G
max_connections = 128
sort_buffer_size = 1M
innodb_strict_mode = 1
local_infile = 0
; sql-mode = "TRADITIONAL"

View File

@ -1,23 +0,0 @@
file:
/usr/sbin/mysqld:
exists: true
filetype: file
mode: "0755"
owner: root
sha256: bf5ba9081d08792048ef8459e1ea6ddd155b82bc1a22cd5c8666e47050cd6208
package:
mysql-server:
installed: true
mysql-common:
installed: true
port:
tcp6:3306:
listening: true
ip:
process:
mysqld:
running: true
user:
mysql:
exists: true
uid: 999

View File

@ -1,49 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
RUN apk --no-cache add libffi openssl python3 py3-pip py3-netifaces
RUN apk --no-cache add --virtual .build-deps \
build-base \
libffi-dev \
openssl-dev \
python3-dev \
&& pip install --upgrade pip \
&& pip install python-openstackclient \
&& apk del .build-deps
RUN apk --no-cache upgrade
ENTRYPOINT ["/usr/bin/openstack"]
CMD ["help"]
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a numeric UID
RUN [ "$UID" -eq "$UID" ] 2>/dev/null \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk -F: '{print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk -F: '{print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk -F: '{print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:$SHELL" >> /etc/passwd; \
echo "$USER:\!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
USER $USER

View File

@ -1,67 +0,0 @@
FROM alpine:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG MACHINE="x86_64"
ARG SYSTEM="Linux"
RUN apk --no-cache add \
ansible \
bash \
dirmngr \
git \
gpg \
gpg-agent \
qemu \
qemu-system-x86_64 \
qemu-img \
openssh-client \
openssl \
socat \
wget
ARG PACKER_VERSION=1.8.3
RUN { OS="$(echo ${SYSTEM} |awk '{print tolower($0)}')"; \
ARCH="$(echo ${MACHINE} |awk '/x86_64/ {print "amd64"}; /aarch64/ {print "arm64"}')"; \
wget https://releases.hashicorp.com/packer/${PACKER_VERSION}/packer_${PACKER_VERSION}_${OS}_${ARCH}.zip \
&& wget https://releases.hashicorp.com/packer/${PACKER_VERSION}/packer_${PACKER_VERSION}_SHA256SUMS \
&& wget https://releases.hashicorp.com/packer/${PACKER_VERSION}/packer_${PACKER_VERSION}_SHA256SUMS.sig \
&& GNUPGHOME="./.gnupg" gpg --batch --recv-keys C874011F0AB405110D02105534365D9472D7468F \
&& GNUPGHOME="./.gnupg" gpg --batch --verify packer_${PACKER_VERSION}_SHA256SUMS.sig packer_${PACKER_VERSION}_SHA256SUMS \
&& grep packer_${PACKER_VERSION}_${OS}_${ARCH}.zip packer_${PACKER_VERSION}_SHA256SUMS |sha256sum -c - \
&& unzip "packer_${PACKER_VERSION}_${OS}_${ARCH}.zip" -d /usr/local/bin \
&& rm -f "packer_${PACKER_VERSION}_${OS}_${ARCH}.zip" packer_${PACKER_VERSION}_SHA256SUMS.sig packer_${PACKER_VERSION}_SHA256SUMS ./.gnupg \
; }
ENTRYPOINT ["/usr/local/bin/packer"]
FROM dist as master
ARG DOCKER_BUILD_DIR
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a specific UID
RUN let $UID >/dev/null 2>&1 \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk 'BEGIN {FS=":"} {print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:/bin/false" >> /etc/passwd; \
echo "$USER:!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
USER $USER

View File

@ -1,12 +0,0 @@
; DO NOT EDIT (unless you know what you are doing)
;
; This subdirectory is a git "subrepo", and this file is maintained by the
; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme
;
[subrepo]
remote = ssh://git@github.com/aynicos/docker-pdns-server
branch = master
commit = 6d3c4ce70cbb38d237be757206bfbf082ce2ba3e
parent = 9de6b77b0d944fe6efa7b020ba7c2dbbba5df4f8
method = merge
cmdver = 0.4.0

View File

@ -1,67 +0,0 @@
FROM alpine:3.11 as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG VERSION_PDNS_REC=4.2.1
ARG VERSION_PDNS_AUTH=4.2.1
ARG VERSION_PDNS_DNSDIST=1.4.0
RUN apk upgrade --no-cache \
&& apk add --no-cache --virtual .build-deps \
autoconf \
automake \
bison \
boost-dev \
boost-program_options \
boost-serialization \
build-base \
curl \
curl-dev \
file \
flex \
g++ \
git \
py-virtualenv \
libedit-dev \
libressl-dev \
libstdc++ \
libtool \
lua-dev \
make \
musl \
ragel \
&& git clone https://github.com/PowerDNS/pdns \
&& cd pdns \
&& git checkout tags/auth-${VERSION_PDNS_AUTH} -b auth-${VERSION_PDNS_AUTH} \
&& autoreconf -vi \
&& ./configure --enable-static --disable-systemd --without-systemd --with-modules="" \
&& make install clean \
&& git checkout tags/rec-${VERSION_PDNS_REC} -b rec-${VERSION_PDNS_REC} \
&& cd pdns/recursordist \
&& autoreconf -vi \
&& ./configure --enable-static --disable-systemd --without-systemd \
&& make install clean \
&& cd ../.. \
&& git checkout tags/dnsdist-${VERSION_PDNS_DNSDIST} -b dnsdist-${VERSION_PDNS_DNSDIST} \
&& cd pdns/dnsdistdist \
&& autoreconf -vi \
&& ./configure --enable-static --disable-systemd --without-systemd \
&& make install clean \
&& cd ../../.. \
&& rm -fr pdns \
&& runDeps="$( \
scanelf --needed --nobanner --recursive /usr/local \
| awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
| xargs -r apk info --installed \
| sort -u \
)" \
&& apk del .build-deps \
&& apk add --no-cache --virtual .run-deps $runDeps \
lua
COPY ${DOCKER_BUILD_DIR}/docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
EXPOSE 53/udp 53/tcp
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,22 +0,0 @@
# docker-pdns
Alpine based Dockerfile running a powerdns authoritative and/or recursive DNS server.
## Usage
Following environment variables can be customized.
## Example
Build a docker image named "pdns".
```shell
$ docker build -t pdns .
```
Start a docker from this image.
```shell
$ docker run --net host pdns
```

View File

@ -1,9 +0,0 @@
#!/bin/ash
set -euo pipefail
set -o errexit
trap 'kill -SIGQUIT $PID' INT
# Launch pdns_recursor by default
[ $# -eq 0 ] && /usr/local/sbin/pdns_recursor || exec "$@" &
PID=$! && wait

View File

@ -1,11 +0,0 @@
FROM phabricator/daemon:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
openssh-client \
&& rm -rf /var/lib/apt/lists/*
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,10 +0,0 @@
FROM phabricator/phabricator:latest as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
RUN { \
echo 'mysqli.allow_local_infile = 0'; \
} > /usr/local/etc/php/conf.d/mysql.ini
FROM dist as master
ARG DOCKER_BUILD_DIR

View File

@ -1,239 +0,0 @@
FROM php:5.6-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG APCU_VERSION=4.0.11
ARG BLACKFIRE_VERSION=1.31.0
ARG CACHETOOL_VERSION=3.2.1
ARG IMAGICK_VERSION=stable
ARG GEOIP_VERSION=stable
ARG MEMCACHE_VERSION=3.0.8
ARG MEMCACHED_VERSION=2.2.0
ARG MONGODB_VERSION=1.6.1
ARG NEWRELIC_VERSION=9.6.1.256
ARG RAR_VERSION=stable
ARG REDIS_VERSION=4.3.0
ARG UUID_VERSION=1.0.4
ARG SUHOSIN_VERSION=0.9.38
ARG TWIG_VERSION=1.35.3
ARG XCACHE_VERSION=3.2.0
ARG XDEBUG_VERSION=2.5.5
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \
aspell-dev \
bison \
bzip2-dev \
curl-dev \
flex \
freetype-dev \
gawk \
geoip-dev \
gettext-dev \
gmp-dev \
icu-dev \
imagemagick-dev \
imap-dev \
libjpeg-turbo-dev \
libmcrypt-dev \
libmemcached-dev \
libpng-dev \
libressl-dev \
libxml2-dev \
libxslt-dev \
make \
net-snmp-dev \
openldap-dev \
postgresql-dev \
python \
rabbitmq-c-dev \
zlib-dev \
&& wget https://packages.blackfire.io/binaries/blackfire-php/${BLACKFIRE_VERSION}/blackfire-php-alpine_amd64-php-$(php -r "echo PHP_MAJOR_VERSION.PHP_MINOR_VERSION;").so -O $(php -r "echo ini_get('extension_dir');")/blackfire.so \
&& wget https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz -O /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz \
&& mkdir -p /tmp/newrelic-${NEWRELIC_VERSION} \
&& tar xzf /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz -C /tmp/newrelic-${NEWRELIC_VERSION} --strip-components=1 \
&& rm /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz \
&& mv /tmp/newrelic-${NEWRELIC_VERSION}/agent/x64/newrelic-20131226.so $(php -r "echo ini_get('extension_dir');")/newrelic.so \
&& wget --no-check-certificate https://download.suhosin.org/suhosin-${SUHOSIN_VERSION}.tar.gz -O /tmp/suhosin-${SUHOSIN_VERSION}.tar.gz \
&& mkdir -p /tmp/suhosin-${SUHOSIN_VERSION} \
&& tar xzf /tmp/suhosin-${SUHOSIN_VERSION}.tar.gz -C /tmp/suhosin-${SUHOSIN_VERSION} --strip-components=1 \
&& rm /tmp/suhosin-${SUHOSIN_VERSION}.tar.gz \
&& cd /tmp/suhosin-${SUHOSIN_VERSION} \
&& sed -i '1i#include <sys/file.h>' log.c \
&& wget https://github.com/twigphp/Twig/archive/v${TWIG_VERSION}.tar.gz -O /tmp/twig-${TWIG_VERSION}.tar.gz \
&& mkdir -p /tmp/twig-${TWIG_VERSION} \
&& tar xzf /tmp/twig-${TWIG_VERSION}.tar.gz -C /tmp/twig-${TWIG_VERSION} --strip-components=1 \
&& rm /tmp/twig-${TWIG_VERSION}.tar.gz \
&& wget https://web.archive.org/web/20181118151821if_/http://xcache.lighttpd.net/pub/Releases/${XCACHE_VERSION}/xcache-${XCACHE_VERSION}.tar.gz -O /tmp/xcache-${XCACHE_VERSION}.tar.gz \
&& mkdir -p /tmp/xcache-${XCACHE_VERSION} \
&& tar xzf /tmp/xcache-${XCACHE_VERSION}.tar.gz -C /tmp/xcache-${XCACHE_VERSION} --strip-components=1 \
&& rm /tmp/xcache-${XCACHE_VERSION}.tar.gz \
&& docker-php-ext-configure /tmp/xcache-${XCACHE_VERSION} --enable-xcache --enable-xcache-constant --enable-xcache-optimizer --enable-xcache-coverager \
&& docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ --with-png-dir=/usr/include/ \
&& docker-php-ext-install -j$(nproc) \
/tmp/suhosin-${SUHOSIN_VERSION} \
/tmp/twig-${TWIG_VERSION}/ext/twig \
/tmp/xcache-${XCACHE_VERSION} \
bcmath \
bz2 \
calendar \
dba \
exif \
gd \
gettext \
gmp \
imap \
intl \
ldap \
mcrypt \
mysql \
mysqli \
opcache \
pcntl \
pdo_mysql \
pdo_pgsql \
pgsql \
pspell \
shmop \
snmp \
soap \
sockets \
sysvmsg \
sysvsem \
sysvshm \
xmlrpc \
xsl \
zip \
&& rm /usr/local/etc/php/conf.d/docker-php-ext-* \
&& rm -rf /tmp/newrelic-* \
&& rm -rf /tmp/suhosin-* \
&& rm -rf /tmp/twig-* \
&& rm -rf /tmp/xcache-* \
&& pecl install amqp-${AMQP_VERSION} \
&& pecl install apcu-${APCU_VERSION} \
&& pecl install geoip-${GEOIP_VERSION} \
&& pecl install imagick-${IMAGICK_VERSION} \
&& pecl install memcache-${MEMCACHE_VERSION} \
&& pecl install memcached-${MEMCACHED_VERSION} \
&& pecl install mongodb-${MONGODB_VERSION} \
&& pecl install rar-${RAR_VERSION} \
&& pecl install redis-${REDIS_VERSION} \
&& echo |pecl install uuid-${UUID_VERSION} \
&& pecl install xdebug-${XDEBUG_VERSION} \
&& pecl clear-cache \
&& runDeps="$( scanelf --needed --nobanner --format '%n#p' --recursive /usr/local/lib/php/extensions \
| tr ',' '\n' \
| sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)" \
&& apk del .build-deps \
&& apk add --no-cache --virtual .run-deps $runDeps
RUN wget http://gordalina.github.io/cachetool/downloads/cachetool-${CACHETOOL_VERSION}.phar -O /usr/local/bin/cachetool \
&& chmod +x /usr/local/bin/cachetool \
&& echo -e "\
adapter: fastcgi \n\
fastcgi: 127.0.0.1:9000 \n\
" > /etc/cachetool.yml
RUN mkdir -p /etc/ssh && echo -e "\
Host * \n\
Compression yes \n\
" >> /etc/ssh/ssh_config
RUN apk add --no-cache \
bash \
bzip2 \
coreutils \
gettext \
git \
imagemagick \
lftp \
mailx \
make \
mysql-client \
nano \
openssh-client \
ssmtp \
vim
# Iconv fix: https://github.com/docker-library/php/issues/240#issuecomment-305038173
RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ gnu-libiconv
ENV LD_PRELOAD=/usr/lib/preloadable_libiconv.so
# enable php modules
# available modules : amqp apcu bcmath blackfire bz2 calendar curl dba exif gettext gd geoip gmp imap imagick intl json ldap mbstring mcrypt memcache memcached mongodb mysql mysqli newrelic opcache pcntl pdo_mysql pdo_pgsql pgsql pspell rar redis shmop simplexml snmp soap sockets suhosin sysvmsg sysvsem sysvshm tokenizer twig uuid xcache xdebug xmlrpc xsl zip
# fix: disabled memcache to avoid relocation errors
ARG PHP_EXT_ENABLE="amqp apcu bcmath blackfire bz2 calendar gd geoip imagick intl mcrypt memcached mysql mysqli opcache pdo_mysql redis soap sockets twig uuid zip"
RUN docker-php-ext-enable ${PHP_EXT_ENABLE}
# copy *.ini
COPY ${DOCKER_BUILD_DIR}/*.ini /usr/local/etc/php/conf.d/
COPY ${DOCKER_BUILD_DIR}/php-fpm-*.conf /usr/local/etc/php-fpm.d/
# custom php config
ARG PHP_INI_CONFIG
RUN echo -e ${PHP_INI_CONFIG// /\\n} >> /usr/local/etc/php/conf.d/config.ini
# custom php cli
ARG PHP_CLI_CONFIG="apc.enable_cli=0 max_execution_time=-1 memory_limit=-1 opcache.enable_cli=0 xdebug.default_enable=0"
RUN echo '#!/usr/bin/env sh' > /usr/local/bin/php-cli \
&& chmod +x /usr/local/bin/php-cli \
&& echo -e "\
/usr/local/bin/php -d ${PHP_CLI_CONFIG// / -d } \"\$@\"\
" >> /usr/local/bin/php-cli
# install cronlock
ADD https://raw.github.com/kvz/cronlock/master/cronlock /usr/bin/cronlock
RUN chmod +rx /usr/bin/cronlock
# config ssmtp
RUN echo "FromLineOverride=YES" >> /etc/ssmtp/ssmtp.conf
# https://bugs.php.net/bug.php?id=71880
ENV LOG_STREAM="/tmp/stdout"
RUN mkfifo $LOG_STREAM && chmod 777 $LOG_STREAM
# default www-data homedir to /var/www for crontabs
RUN sed -i 's|/home/www-data|/var/www|' /etc/passwd
WORKDIR /var/www
# redirect LOG_STREAM to stdout and start php-fpm with environment variables from .env
CMD [ "sh", "-c", "(exec 3<>$LOG_STREAM; cat <&3 >&1 & IFS=$'\n'; exec env $(cat .env 2>/dev/null) php-fpm)" ]
FROM dist as master
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a specific UID
RUN let $UID >/dev/null 2>&1 \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk 'BEGIN {FS=":"} {print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:/bin/false" >> /etc/passwd; \
echo "$USER:!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
RUN chown -R $USER /usr/local/etc/php/conf.d/
USER $USER
ARG SSH_REMOTE_HOSTS
RUN mkdir -p ~/.ssh \
&& ssh-keyscan -t rsa -H $SSH_REMOTE_HOSTS >> ~/.ssh/known_hosts

View File

@ -1,5 +0,0 @@
apc.enable_cli = 0
apc.enabled = 1
apc.shm_segments = 1
apc.shm_size = 32M

View File

@ -1 +0,0 @@
blackfire.agent_socket=tcp://blackfire:8707

View File

@ -1,4 +0,0 @@
[newrelic]
newrelic.logfile = /dev/self/fd/2
newrelic.daemon.logfile = /dev/self/fd/2

View File

@ -1,8 +0,0 @@
opcache.enable = 1
opcache.enable_cli = 0
opcache.error_log = /proc/self/fd/2
opcache.interned_strings_buffer = 16
opcache.log_verbosity_level = 2
opcache.max_accelerated_files = 20000
opcache.memory_consumption = 256
opcache.validate_timestamps = 1

View File

@ -1,412 +0,0 @@
; Start a new pool named 'www'.
; the variable $pool can we used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all IPv4 addresses on a
; specific port;
; '[::]:port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
;listen = /var/run/php5-fpm.sock
listen = 0.0.0.0:9000
; Set listen(2) backlog.
; Default Value: 65535 (-1 on FreeBSD and OpenBSD)
listen.backlog = 1023
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions.
; Default Values: user and group are set as the running user
; mode is set to 0660
;listen.owner = www-data
;listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 0.0.0.0
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 8
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php5/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
pm.status_path = /php-fpm-status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
ping.path = /php-fpm-ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 5m
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
chdir = /
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
catch_workers_output = yes
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; exectute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View File

@ -1,56 +0,0 @@
[PHP]
expose_php = Off
disable_functions = exec,system,popen,passthru,apache_child_terminate,apache_get_modules,apache_get_version,apache_getenv,apache_note,apache_setenv,virtual,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority
enable_dl = Off
allow_url_fopen = On
allow_url_include = Off
engine = On
short_open_tag = On
output_buffering = 4096
realpath_cache_size = 4096k
realpath_cache_ttl = 600
include_path = .:/usr/share/php
date.timezone = Europe/Paris
default_socket_timeout = 10
max_execution_time = 30
max_input_time = 60
max_input_vars = 1000
memory_limit = 512M
post_max_size = 32M
file_uploads = On
upload_tmp_dir = /tmp
upload_max_filesize = 32M
max_file_uploads = 20
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
display_errors = Off
display_startup_errors = Off
log_errors = On
html_errors = On
SMTP = mailcatcher
smtp_port = 1025
sendmail_path = /usr/sbin/sendmail -t -i
mail_log = syslog
session.save_handler = memcached
session.save_path = memcached:11211
session.use_cookies = 1
session.cookie_secure =
session.use_only_cookies = 1
session.name = PHPSESSID
session.auto_start = 0
session.cookie_lifetime = 0
session.cookie_path = /
session.cookie_domain =
session.cookie_httponly =
session.serialize_handler = php
session.gc_probability = 0
session.gc_divisor = 1000
session.gc_maxlifetime = 2592000
session.bug_compat_42 = Off
session.bug_compat_warn = Off
session.referer_check =
session.entropy_length = 512
session.entropy_file = /dev/urandom
session.cache_limiter = nocache
session.cache_expire = 180
session.use_trans_sid = 0
session.hash_function = 0

View File

@ -1 +0,0 @@
suhosin.executor.disable_eval = On

View File

@ -1,13 +0,0 @@
xdebug.collect_params = 1
xdebug.collect_return = 1
xdebug.default_enable = 1
xdebug.force_display_errors = 1
xdebug.force_error_reporting = E_ALL & ~E_NOTICE & ~E_DEPRECATED
xdebug.halt_level = E_WARNING
xdebug.idekey = PHPSTORM
xdebug.max_nesting_level = 1024
xdebug.remote_enable = 1
xdebug.remote_connect_back = 1
xdebug.scream = 0
xdebug.show_error_trace = 1
xdebug.show_exception_trace = 1

View File

@ -1,270 +0,0 @@
FROM php:7.0-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
ARG APCU_VERSION=stable
ARG BLACKFIRE_VERSION=1.34.3
ARG CACHETOOL_VERSION=4.0.1
ARG DS_VERSION=stable
ARG EVENT_VERSION=stable
ARG IGBINARY_VERSION=stable
ARG IMAGICK_VERSION=stable
ARG GEOIP_VERSION=beta
ARG GRPC_VERSION=stable
ARG MCRYPT_VERSION=stable
ARG MEMCACHE_VERSION=4.0.1-php73
ARG MEMCACHED_VERSION=stable
ARG MONGODB_VERSION=1.6.1
ARG NEWRELIC_VERSION=9.11.0.267
ARG OAUTH_VERSION=stable
ARG RAR_VERSION=stable
ARG REDIS_VERSION=stable
ARG SNUFFLEUPAGUS_VERSION=0.5.1
ARG UUID_VERSION=stable
ARG XDEBUG_VERSION=2.7.2
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \
aspell-dev \
bison \
bzip2-dev \
curl-dev \
enchant-dev \
flex \
freetype-dev \
gawk \
geoip-dev \
gettext-dev \
gmp-dev \
icu-dev \
imagemagick-dev \
imap-dev \
libevent-dev \
libjpeg-turbo-dev \
libmcrypt-dev \
libmemcached-dev \
libpng-dev \
libressl-dev \
libxml2-dev \
libxslt-dev \
linux-headers \
make \
net-snmp-dev \
openldap-dev \
postgresql-dev \
pcre-dev \
rabbitmq-c-dev \
tidyhtml-dev \
yaml-dev \
zlib-dev \
# blackfire \
&& wget https://packages.blackfire.io/binaries/blackfire-php/${BLACKFIRE_VERSION}/blackfire-php-alpine_amd64-php-$(php -r "echo PHP_MAJOR_VERSION.PHP_MINOR_VERSION;").so -O $(php -r "echo ini_get('extension_dir');")/blackfire.so \
# gd \
&& docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ --with-png-dir=/usr/include/ \
# memcache \
&& wget https://github.com/websupport-sk/pecl-memcache/archive/v${MEMCACHE_VERSION}.tar.gz -O /tmp/memcache-${MEMCACHE_VERSION}.tar.gz \
&& mkdir -p /tmp/memcache-${MEMCACHE_VERSION} \
&& tar xzf /tmp/memcache-${MEMCACHE_VERSION}.tar.gz -C /tmp/memcache-${MEMCACHE_VERSION} --strip-components=1 \
# https://github.com/websupport-sk/pecl-memcache/pull/39 \
&& sed -i '399s/);/, char *);/' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.h \
# https://github.com/websupport-sk/pecl-memcache/pull/40 \
&& sed -i '47i#if PHP_VERSION_ID < 70200\n register size_t newlen;\n#endif' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.c \
# newrelic \
&& wget https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz -O /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz \
&& mkdir -p /tmp/newrelic-${NEWRELIC_VERSION} \
&& tar xzf /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz -C /tmp/newrelic-${NEWRELIC_VERSION} --strip-components=1 \
&& rm /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz \
&& mv /tmp/newrelic-${NEWRELIC_VERSION}/agent/x64/newrelic-20151012.so $(php -r "echo ini_get('extension_dir');")/newrelic.so \
# snuffleupagus \
&& wget https://github.com/jvoisin/snuffleupagus/archive/v${SNUFFLEUPAGUS_VERSION}.tar.gz -O /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz \
&& mkdir -p /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} \
&& tar xzf /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz -C /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src --prefix=/usr --enable-snuffleupagus \
# xhprof \
&& wget https://github.com/longxinH/xhprof/archive/v${XHPROF_VERSION}.tar.gz -O /tmp/xhprof-${XHPROF_VERSION}.tar.gz \
&& mkdir -p /tmp/xhprof-${XHPROF_VERSION} \
&& tar xzf /tmp/xhprof-${XHPROF_VERSION}.tar.gz -C /tmp/xhprof-${XHPROF_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/xhprof-${XHPROF_VERSION}/extension --with-php-config=/usr/local/bin/php-config \
# tidy \
&& docker-php-source extract \
&& cd /usr/src/php \
&& sed -i 's/buffio.h/tidybuffio.h/' ext/tidy/*.c \
&& docker-php-ext-install -j$(nproc) \
bcmath \
bz2 \
calendar \
dba \
enchant \
exif \
gd \
gettext \
gmp \
imap \
intl \
ldap \
mcrypt \
/tmp/memcache-${MEMCACHE_VERSION} \
mysqli \
opcache \
pcntl \
pdo_mysql \
pdo_pgsql \
pgsql \
pspell \
shmop \
snmp \
soap \
sockets \
sysvmsg \
sysvsem \
sysvshm \
tidy \
/tmp/xhprof-${XHPROF_VERSION}/extension \
xmlrpc \
xsl \
zip \
# docker-php-ext-install fails after snuffleupagus is enabled
/tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src \
&& docker-php-source delete \
&& rm /usr/local/etc/php/conf.d/docker-php-ext-* \
&& rm -rf /tmp/memcache-* \
&& rm -rf /tmp/newrelic-* \
&& rm -rf /tmp/snuffleupagus-* \
&& rm -rf /tmp/xhprof-* \
&& pecl install amqp-${AMQP_VERSION} \
&& pecl install apcu-${APCU_VERSION} \
&& pecl install ast-${AST_VERSION} \
&& pecl install ds-${DS_VERSION} \
&& pecl install event-${EVENT_VERSION} \
&& pecl install geoip-${GEOIP_VERSION} \
&& pecl install grpc-${GRPC_VERSION} \
&& pecl install igbinary-${IGBINARY_VERSION} \
&& pecl install imagick-${IMAGICK_VERSION} \
&& pecl install memcached-${MEMCACHED_VERSION} \
&& pecl install mongodb-${MONGODB_VERSION} \
&& pecl install oauth-${OAUTH_VERSION} \
&& pecl install rar-${RAR_VERSION} \
&& pecl install redis-${REDIS_VERSION} \
&& echo |pecl install uuid-${UUID_VERSION} \
&& pecl install xdebug-${XDEBUG_VERSION} \
&& pecl install yaml-${YAML_VERSION} \
&& pecl clear-cache \
&& runDeps="$( \
scanelf --needed --nobanner --recursive /usr/local \
| awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
| xargs -r apk info --installed \
| sort -u \
)" \
&& apk del .build-deps \
&& apk add --no-cache --virtual .run-deps $runDeps
RUN wget http://gordalina.github.io/cachetool/downloads/cachetool-${CACHETOOL_VERSION}.phar -O /usr/local/bin/cachetool \
&& chmod +x /usr/local/bin/cachetool \
&& echo -e "\
adapter: fastcgi \n\
fastcgi: 127.0.0.1:9000 \n\
" > /etc/cachetool.yml
RUN mkdir -p /etc/ssh && echo -e "\
Host * \n\
Compression yes \n\
" >> /etc/ssh/ssh_config
RUN apk add --no-cache \
bash \
bzip2 \
coreutils \
gettext \
git \
imagemagick \
lftp \
mailx \
make \
mysql-client \
nano \
openssh-client \
ssmtp \
vim
# Iconv fix: https://github.com/docker-library/php/issues/240#issuecomment-305038173
RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ gnu-libiconv
ENV LD_PRELOAD=/usr/lib/preloadable_libiconv.so
# builtin modules : Core ctype curl date dom fileinfo filter ftp hash iconv json libxml mbstring mysqlnd openssl pcre PDO pdo_sqlite Phar posix readline Reflection session SimpleXML SPL sqlite3 standard tokenizer xml xmlreader xmlwriter zlib
# available modules : amqp apcu ast bcmath blackfire bz2 calendar dba ds enchant event exif gd geoip gmp grpc igbinary imap imagick intl ldap mcrypt memcache memcached mongodb mysqli newrelic oauth opcache pcntl pdo_mysql pdo_pgsql pgsql pspell rar redis shmop snmp snuffleupagus soap sockets sysvmsg sysvsem sysvshm xhprof tidy uuid wddx xdebug xhprof xmlrpc xsl yaml zip
ARG PHP_EXT_ENABLE="amqp apcu bcmath bz2 calendar gd geoip imagick intl mcrypt memcached mysqli oauth opcache pdo_mysql redis soap sockets uuid yaml zip"
RUN docker-php-ext-enable ${PHP_EXT_ENABLE}
# copy *.ini
COPY ${DOCKER_BUILD_DIR}/*.ini /usr/local/etc/php/conf.d/
COPY ${DOCKER_BUILD_DIR}/php-fpm-*.conf /usr/local/etc/php-fpm.d/
# custom php config
ARG PHP_INI_CONFIG
RUN echo -e ${PHP_INI_CONFIG// /\\n} >> /usr/local/etc/php/conf.d/config.ini
# custom php cli
ARG PHP_CLI_CONFIG="apc.enable_cli=0 max_execution_time=-1 memory_limit=-1 opcache.enable_cli=0 xdebug.default_enable=0"
RUN echo '#!/usr/bin/env sh' > /usr/local/bin/php-cli \
&& chmod +x /usr/local/bin/php-cli \
&& echo -e "\
/usr/local/bin/php -d ${PHP_CLI_CONFIG// / -d } \"\$@\"\
" >> /usr/local/bin/php-cli
# install cronlock
ADD https://raw.github.com/kvz/cronlock/master/cronlock /usr/bin/cronlock
RUN chmod +rx /usr/bin/cronlock
# config ssmtp
RUN echo "FromLineOverride=YES" >> /etc/ssmtp/ssmtp.conf
# https://bugs.php.net/bug.php?id=71880
ENV LOG_STREAM="/tmp/stdout"
RUN mkfifo $LOG_STREAM && chmod 777 $LOG_STREAM
# default www-data homedir to /var/www for crontabs
RUN sed -i 's|/home/www-data|/var/www|' /etc/passwd
WORKDIR /var/www
# redirect LOG_STREAM to stdout and start php-fpm with environment variables from .env
CMD [ "sh", "-c", "(exec 3<>$LOG_STREAM; cat <&3 >&1 & IFS=$'\n'; exec env $(cat .env 2>/dev/null) php-fpm)" ]
FROM dist as master
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a specific UID
RUN let $UID >/dev/null 2>&1 \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk 'BEGIN {FS=":"} {print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:/bin/false" >> /etc/passwd; \
echo "$USER:!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
RUN chown -R $USER /usr/local/etc/php/conf.d/
USER $USER
ARG SSH_REMOTE_HOSTS
RUN mkdir -p ~/.ssh \
&& ssh-keyscan -t rsa -H $SSH_REMOTE_HOSTS >> ~/.ssh/known_hosts

View File

@ -1,5 +0,0 @@
apc.enable_cli = 0
apc.enabled = 1
apc.shm_segments = 1
apc.shm_size = 32M

View File

@ -1 +0,0 @@
blackfire.agent_socket=tcp://blackfire:8707

View File

@ -1 +0,0 @@
memcached.sess_locking = Off

View File

@ -1,4 +0,0 @@
[newrelic]
newrelic.logfile = /dev/self/fd/2
newrelic.daemon.logfile = /dev/self/fd/2

View File

@ -1,8 +0,0 @@
opcache.enable = 1
opcache.enable_cli = 0
opcache.error_log = /proc/self/fd/2
opcache.interned_strings_buffer = 16
opcache.log_verbosity_level = 2
opcache.max_accelerated_files = 20000
opcache.memory_consumption = 256
opcache.validate_timestamps = 1

View File

@ -1,412 +0,0 @@
; Start a new pool named 'www'.
; the variable $pool can we used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all IPv4 addresses on a
; specific port;
; '[::]:port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
;listen = /var/run/php5-fpm.sock
listen = 0.0.0.0:9000
; Set listen(2) backlog.
; Default Value: 65535 (-1 on FreeBSD and OpenBSD)
listen.backlog = 1023
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions.
; Default Values: user and group are set as the running user
; mode is set to 0660
;listen.owner = www-data
;listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 0.0.0.0
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 16
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php5/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
pm.status_path = /php-fpm-status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
ping.path = /php-fpm-ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 5m
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
chdir = /
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
catch_workers_output = yes
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; exectute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View File

@ -1,56 +0,0 @@
[PHP]
expose_php = Off
disable_functions = exec,system,popen,passthru,apache_child_terminate,apache_get_modules,apache_get_version,apache_getenv,apache_note,apache_setenv,virtual,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority
enable_dl = Off
allow_url_fopen = On
allow_url_include = Off
engine = On
short_open_tag = On
output_buffering = 4096
realpath_cache_size = 4096k
realpath_cache_ttl = 600
include_path = .:/usr/share/php
date.timezone = Europe/Paris
default_socket_timeout = 10
max_execution_time = 30
max_input_time = 60
max_input_vars = 1000
memory_limit = 512M
post_max_size = 32M
file_uploads = On
upload_tmp_dir = /tmp
upload_max_filesize = 32M
max_file_uploads = 20
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
display_errors = Off
display_startup_errors = Off
log_errors = On
html_errors = On
SMTP = mailcatcher
smtp_port = 1025
sendmail_path = /usr/sbin/sendmail -t -i
mail_log = syslog
session.save_handler = memcached
session.save_path = memcached:11211
session.use_cookies = 1
session.cookie_secure =
session.use_only_cookies = 1
session.name = PHPSESSID
session.auto_start = 0
session.cookie_lifetime = 0
session.cookie_path = /
session.cookie_domain =
session.cookie_httponly =
session.serialize_handler = php
session.gc_probability = 0
session.gc_divisor = 1000
session.gc_maxlifetime = 2592000
session.bug_compat_42 = Off
session.bug_compat_warn = Off
session.referer_check =
session.entropy_length = 512
session.entropy_file = /dev/urandom
session.cache_limiter = nocache
session.cache_expire = 180
session.use_trans_sid = 0
session.hash_function = 0

View File

@ -1,13 +0,0 @@
xdebug.collect_params = 1
xdebug.collect_return = 1
xdebug.default_enable = 1
xdebug.force_display_errors = 1
xdebug.force_error_reporting = E_ALL & ~E_NOTICE & ~E_DEPRECATED
xdebug.halt_level = E_WARNING
xdebug.idekey = PHPSTORM
xdebug.max_nesting_level = 1024
xdebug.remote_enable = 1
xdebug.remote_connect_back = 1
xdebug.scream = 0
xdebug.show_error_trace = 1
xdebug.show_exception_trace = 1

View File

@ -1,262 +0,0 @@
FROM php:7.1-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
ARG APCU_VERSION=stable
ARG BLACKFIRE_VERSION=1.34.3
ARG CACHETOOL_VERSION=4.0.1
ARG DS_VERSION=stable
ARG EVENT_VERSION=stable
ARG IGBINARY_VERSION=stable
ARG IMAGICK_VERSION=stable
ARG GEOIP_VERSION=beta
ARG GRPC_VERSION=stable
ARG MCRYPT_VERSION=stable
ARG MEMCACHE_VERSION=4.0.1-php73
ARG MEMCACHED_VERSION=stable
ARG MONGODB_VERSION=stable
ARG NEWRELIC_VERSION=9.11.0.267
ARG OAUTH_VERSION=stable
ARG RAR_VERSION=stable
ARG REDIS_VERSION=stable
ARG SNUFFLEUPAGUS_VERSION=0.5.1
ARG UUID_VERSION=stable
ARG XDEBUG_VERSION=stable
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \
aspell-dev \
bison \
bzip2-dev \
curl-dev \
enchant-dev \
flex \
freetype-dev \
gawk \
geoip-dev \
gettext-dev \
gmp-dev \
icu-dev \
imagemagick-dev \
imap-dev \
libevent-dev \
libjpeg-turbo-dev \
libmcrypt-dev \
libmemcached-dev \
libpng-dev \
libressl-dev \
libxml2-dev \
libxslt-dev \
make \
net-snmp-dev \
openldap-dev \
postgresql-dev \
pcre-dev \
rabbitmq-c-dev \
yaml-dev \
zlib-dev \
# blackfire \
&& wget https://packages.blackfire.io/binaries/blackfire-php/${BLACKFIRE_VERSION}/blackfire-php-alpine_amd64-php-$(php -r "echo PHP_MAJOR_VERSION.PHP_MINOR_VERSION;").so -O $(php -r "echo ini_get('extension_dir');")/blackfire.so \
# gd \
&& docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ --with-png-dir=/usr/include/ \
# memcache \
&& wget https://github.com/websupport-sk/pecl-memcache/archive/v${MEMCACHE_VERSION}.tar.gz -O /tmp/memcache-${MEMCACHE_VERSION}.tar.gz \
&& mkdir -p /tmp/memcache-${MEMCACHE_VERSION} \
&& tar xzf /tmp/memcache-${MEMCACHE_VERSION}.tar.gz -C /tmp/memcache-${MEMCACHE_VERSION} --strip-components=1 \
# https://github.com/websupport-sk/pecl-memcache/pull/39 \
&& sed -i '399s/);/, char *);/' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.h \
# https://github.com/websupport-sk/pecl-memcache/pull/40 \
&& sed -i '47i#if PHP_VERSION_ID < 70200\n register size_t newlen;\n#endif' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.c \
# newrelic \
&& wget https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz -O /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz \
&& mkdir -p /tmp/newrelic-${NEWRELIC_VERSION} \
&& tar xzf /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz -C /tmp/newrelic-${NEWRELIC_VERSION} --strip-components=1 \
&& mv /tmp/newrelic-${NEWRELIC_VERSION}/agent/x64/newrelic-20160303.so $(php -r "echo ini_get('extension_dir');")/newrelic.so \
# snuffleupagus \
&& wget https://github.com/jvoisin/snuffleupagus/archive/v${SNUFFLEUPAGUS_VERSION}.tar.gz -O /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz \
&& mkdir -p /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} \
&& tar xzf /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz -C /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src --prefix=/usr --enable-snuffleupagus \
# xhprof \
&& wget https://github.com/longxinH/xhprof/archive/v${XHPROF_VERSION}.tar.gz -O /tmp/xhprof-${XHPROF_VERSION}.tar.gz \
&& mkdir -p /tmp/xhprof-${XHPROF_VERSION} \
&& tar xzf /tmp/xhprof-${XHPROF_VERSION}.tar.gz -C /tmp/xhprof-${XHPROF_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/xhprof-${XHPROF_VERSION}/extension --with-php-config=/usr/local/bin/php-config \
&& docker-php-ext-install -j$(nproc) \
bcmath \
bz2 \
calendar \
dba \
enchant \
exif \
gd \
gettext \
gmp \
imap \
intl \
ldap \
mcrypt \
/tmp/memcache-${MEMCACHE_VERSION} \
mysqli \
opcache \
pcntl \
pdo_mysql \
pdo_pgsql \
pgsql \
pspell \
shmop \
snmp \
soap \
sockets \
sysvmsg \
sysvsem \
sysvshm \
/tmp/xhprof-${XHPROF_VERSION}/extension \
xmlrpc \
xsl \
zip \
# docker-php-ext-install fails after snuffleupagus is enabled
/tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src \
&& docker-php-source delete \
&& rm /usr/local/etc/php/conf.d/docker-php-ext-* \
&& rm -rf /tmp/memcache-* \
&& rm -rf /tmp/newrelic-* \
&& rm -rf /tmp/snuffleupagus-* \
&& rm -rf /tmp/xhprof-* \
&& pecl install amqp-${AMQP_VERSION} \
&& pecl install apcu-${APCU_VERSION} \
&& pecl install ast-${AST_VERSION} \
&& pecl install ds-${DS_VERSION} \
&& pecl install event-${EVENT_VERSION} \
&& pecl install geoip-${GEOIP_VERSION} \
&& pecl install grpc-${GRPC_VERSION} \
&& pecl install igbinary-${IGBINARY_VERSION} \
&& pecl install imagick-${IMAGICK_VERSION} \
&& pecl install memcached-${MEMCACHED_VERSION} \
&& pecl install mongodb-${MONGODB_VERSION} \
&& pecl install oauth-${OAUTH_VERSION} \
&& pecl install rar-${RAR_VERSION} \
&& pecl install redis-${REDIS_VERSION} \
&& echo |pecl install uuid-${UUID_VERSION} \
&& pecl install xdebug-${XDEBUG_VERSION} \
&& pecl install yaml-${YAML_VERSION} \
&& pecl clear-cache \
&& runDeps="$( \
scanelf --needed --nobanner --recursive /usr/local \
| awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
| xargs -r apk info --installed \
| sort -u \
)" \
&& apk del .build-deps \
&& apk add --no-cache --virtual .run-deps $runDeps
RUN wget http://gordalina.github.io/cachetool/downloads/cachetool-${CACHETOOL_VERSION}.phar -O /usr/local/bin/cachetool \
&& chmod +x /usr/local/bin/cachetool \
&& echo -e "\
adapter: fastcgi \n\
fastcgi: 127.0.0.1:9000 \n\
" > /etc/cachetool.yml
RUN mkdir -p /etc/ssh && echo -e "\
Host * \n\
Compression yes \n\
" >> /etc/ssh/ssh_config
RUN apk add --no-cache \
bash \
bzip2 \
coreutils \
gettext \
git \
imagemagick \
lftp \
mailx \
make \
mysql-client \
nano \
openssh-client \
ssmtp \
vim
# Iconv fix: https://github.com/docker-library/php/issues/240#issuecomment-305038173
RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ gnu-libiconv
ENV LD_PRELOAD=/usr/lib/preloadable_libiconv.so
# builtin modules : Core ctype curl date dom fileinfo filter ftp hash iconv json libxml mbstring mysqlnd openssl pcre PDO pdo_sqlite Phar posix readline Reflection session SimpleXML SPL sqlite3 standard tokenizer xml xmlreader xmlwriter zlib
# available modules : amqp apcu ast bcmath blackfire bz2 calendar dba ds enchant event exif gd geoip gmp grpc igbinary imap imagick intl ldap mcrypt memcache memcached mongodb mysqli newrelic oauth opcache pcntl pdo_mysql pdo_pgsql pgsql pspell rar redis shmop snmp snuffleupagus soap sockets sysvmsg sysvsem sysvshm xhprof uuid wddx xdebug xhprof xmlrpc xsl yaml zip
ARG PHP_EXT_ENABLE="amqp apcu bcmath bz2 calendar gd geoip imagick intl mcrypt memcached mysqli oauth opcache pdo_mysql redis soap sockets uuid yaml zip"
RUN docker-php-ext-enable ${PHP_EXT_ENABLE}
# copy *.ini
COPY ${DOCKER_BUILD_DIR}/*.ini /usr/local/etc/php/conf.d/
COPY ${DOCKER_BUILD_DIR}/php-fpm-*.conf /usr/local/etc/php-fpm.d/
# custom php config
ARG PHP_INI_CONFIG
RUN echo -e ${PHP_INI_CONFIG// /\\n} >> /usr/local/etc/php/conf.d/config.ini
# custom php cli
ARG PHP_CLI_CONFIG="apc.enable_cli=0 max_execution_time=-1 memory_limit=-1 opcache.enable_cli=0 xdebug.default_enable=0"
RUN echo '#!/usr/bin/env sh' > /usr/local/bin/php-cli \
&& chmod +x /usr/local/bin/php-cli \
&& echo -e "\
/usr/local/bin/php -d ${PHP_CLI_CONFIG// / -d } \"\$@\"\
" >> /usr/local/bin/php-cli
# install cronlock
ADD https://raw.github.com/kvz/cronlock/master/cronlock /usr/bin/cronlock
RUN chmod +rx /usr/bin/cronlock
# config ssmtp
RUN echo "FromLineOverride=YES" >> /etc/ssmtp/ssmtp.conf
# https://bugs.php.net/bug.php?id=71880
ENV LOG_STREAM="/tmp/stdout"
RUN mkfifo $LOG_STREAM && chmod 777 $LOG_STREAM
# default www-data homedir to /var/www for crontabs
RUN sed -i 's|/home/www-data|/var/www|' /etc/passwd
WORKDIR /var/www
# redirect LOG_STREAM to stdout and start php-fpm with environment variables from .env
CMD [ "sh", "-c", "(exec 3<>$LOG_STREAM; cat <&3 >&1 & IFS=$'\n'; exec env $(cat .env 2>/dev/null) php-fpm)" ]
FROM dist as master
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a specific UID
RUN let $UID >/dev/null 2>&1 \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk 'BEGIN {FS=":"} {print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:/bin/false" >> /etc/passwd; \
echo "$USER:!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
RUN chown -R $USER /usr/local/etc/php/conf.d/
USER $USER
ARG SSH_REMOTE_HOSTS
RUN mkdir -p ~/.ssh \
&& ssh-keyscan -t rsa -H $SSH_REMOTE_HOSTS >> ~/.ssh/known_hosts

View File

@ -1,5 +0,0 @@
apc.enable_cli = 0
apc.enabled = 1
apc.shm_segments = 1
apc.shm_size = 32M

View File

@ -1 +0,0 @@
blackfire.agent_socket=tcp://blackfire:8707

View File

@ -1 +0,0 @@
memcached.sess_locking = Off

View File

@ -1,4 +0,0 @@
[newrelic]
newrelic.logfile = /dev/self/fd/2
newrelic.daemon.logfile = /dev/self/fd/2

View File

@ -1,8 +0,0 @@
opcache.enable = 1
opcache.enable_cli = 0
opcache.error_log = /proc/self/fd/2
opcache.interned_strings_buffer = 16
opcache.log_verbosity_level = 2
opcache.max_accelerated_files = 20000
opcache.memory_consumption = 256
opcache.validate_timestamps = 1

View File

@ -1,412 +0,0 @@
; Start a new pool named 'www'.
; the variable $pool can we used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all IPv4 addresses on a
; specific port;
; '[::]:port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
;listen = /var/run/php5-fpm.sock
listen = 0.0.0.0:9000
; Set listen(2) backlog.
; Default Value: 65535 (-1 on FreeBSD and OpenBSD)
listen.backlog = 1023
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions.
; Default Values: user and group are set as the running user
; mode is set to 0660
;listen.owner = www-data
;listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 0.0.0.0
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 16
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php5/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
pm.status_path = /php-fpm-status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
ping.path = /php-fpm-ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 5m
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
chdir = /
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
catch_workers_output = yes
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; exectute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View File

@ -1,56 +0,0 @@
[PHP]
expose_php = Off
disable_functions = exec,system,popen,passthru,apache_child_terminate,apache_get_modules,apache_get_version,apache_getenv,apache_note,apache_setenv,virtual,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority
enable_dl = Off
allow_url_fopen = On
allow_url_include = Off
engine = On
short_open_tag = On
output_buffering = 4096
realpath_cache_size = 4096k
realpath_cache_ttl = 600
include_path = .:/usr/share/php
date.timezone = Europe/Paris
default_socket_timeout = 10
max_execution_time = 30
max_input_time = 60
max_input_vars = 1000
memory_limit = 512M
post_max_size = 32M
file_uploads = On
upload_tmp_dir = /tmp
upload_max_filesize = 32M
max_file_uploads = 20
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
display_errors = Off
display_startup_errors = Off
log_errors = On
html_errors = On
SMTP = mailcatcher
smtp_port = 1025
sendmail_path = /usr/sbin/sendmail -t -i
mail_log = syslog
session.save_handler = memcached
session.save_path = memcached:11211
session.use_cookies = 1
session.cookie_secure =
session.use_only_cookies = 1
session.name = PHPSESSID
session.auto_start = 0
session.cookie_lifetime = 0
session.cookie_path = /
session.cookie_domain =
session.cookie_httponly =
session.serialize_handler = php
session.gc_probability = 0
session.gc_divisor = 1000
session.gc_maxlifetime = 2592000
session.bug_compat_42 = Off
session.bug_compat_warn = Off
session.referer_check =
session.entropy_length = 512
session.entropy_file = /dev/urandom
session.cache_limiter = nocache
session.cache_expire = 180
session.use_trans_sid = 0
session.hash_function = 0

View File

@ -1,13 +0,0 @@
xdebug.collect_params = 1
xdebug.collect_return = 1
xdebug.default_enable = 1
xdebug.force_display_errors = 1
xdebug.force_error_reporting = E_ALL & ~E_NOTICE & ~E_DEPRECATED
xdebug.halt_level = E_WARNING
xdebug.idekey = PHPSTORM
xdebug.max_nesting_level = 1024
xdebug.remote_enable = 1
xdebug.remote_connect_back = 1
xdebug.scream = 0
xdebug.show_error_trace = 1
xdebug.show_exception_trace = 1

View File

@ -1,267 +0,0 @@
FROM php:7.2-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
ARG APCU_VERSION=stable
ARG BLACKFIRE_VERSION=1.34.3
ARG CACHETOOL_VERSION=4.0.1
ARG DS_VERSION=stable
ARG EVENT_VERSION=stable
ARG IGBINARY_VERSION=stable
ARG IMAGICK_VERSION=stable
ARG GEOIP_VERSION=beta
ARG GRPC_VERSION=stable
ARG MCRYPT_VERSION=stable
ARG MEMCACHE_VERSION=4.0.1-php73
ARG MEMCACHED_VERSION=stable
ARG MONGODB_VERSION=stable
ARG NEWRELIC_VERSION=9.11.0.267
ARG OAUTH_VERSION=stable
ARG RAR_VERSION=stable
ARG REDIS_VERSION=stable
ARG SNUFFLEUPAGUS_VERSION=0.5.1
ARG UUID_VERSION=stable
ARG XDEBUG_VERSION=stable
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \
aspell-dev \
bison \
bzip2-dev \
curl-dev \
enchant2-dev \
flex \
freetype-dev \
gawk \
geoip-dev \
gettext-dev \
gmp-dev \
icu-dev \
imagemagick-dev \
imap-dev \
libevent-dev \
libjpeg-turbo-dev \
libmcrypt-dev \
libmemcached-dev \
libpng-dev \
libressl-dev \
libxml2-dev \
libxslt-dev \
make \
net-snmp-dev \
openldap-dev \
patch \
postgresql-dev \
pcre-dev \
rabbitmq-c-dev \
yaml-dev \
zlib-dev \
# blackfire \
&& wget https://packages.blackfire.io/binaries/blackfire-php/${BLACKFIRE_VERSION}/blackfire-php-alpine_amd64-php-$(php -r "echo PHP_MAJOR_VERSION.PHP_MINOR_VERSION;").so -O $(php -r "echo ini_get('extension_dir');")/blackfire.so \
# enchant \
&& docker-php-source extract \
&& wget "https://git.alpinelinux.org/aports/plain/community/php7/enchant-2.patch?id=3f8d7d2e5e558a975f79b6470423b32e01c0bfbc" -O /usr/src/php-enchant-2.patch \
&& cd /usr/src/php && patch -p1 < ../php-enchant-2.patch \
# gd \
&& docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ --with-png-dir=/usr/include/ \
# memcache \
&& wget https://github.com/websupport-sk/pecl-memcache/archive/v${MEMCACHE_VERSION}.tar.gz -O /tmp/memcache-${MEMCACHE_VERSION}.tar.gz \
&& mkdir -p /tmp/memcache-${MEMCACHE_VERSION} \
&& tar xzf /tmp/memcache-${MEMCACHE_VERSION}.tar.gz -C /tmp/memcache-${MEMCACHE_VERSION} --strip-components=1 \
# https://github.com/websupport-sk/pecl-memcache/pull/39 \
&& sed -i '399s/);/, char *);/' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.h \
# https://github.com/websupport-sk/pecl-memcache/pull/40 \
&& sed -i '47i#if PHP_VERSION_ID < 70200\n register size_t newlen;\n#endif' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.c \
# newrelic \
&& wget https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz -O /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz \
&& mkdir -p /tmp/newrelic-${NEWRELIC_VERSION} \
&& tar xzf /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz -C /tmp/newrelic-${NEWRELIC_VERSION} --strip-components=1 \
&& mv /tmp/newrelic-${NEWRELIC_VERSION}/agent/x64/newrelic-20170718.so $(php -r "echo ini_get('extension_dir');")/newrelic.so \
# snuffleupagus \
&& wget https://github.com/jvoisin/snuffleupagus/archive/v${SNUFFLEUPAGUS_VERSION}.tar.gz -O /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz \
&& mkdir -p /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} \
&& tar xzf /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz -C /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src --prefix=/usr --enable-snuffleupagus \
# xhprof \
&& wget https://github.com/longxinH/xhprof/archive/v${XHPROF_VERSION}.tar.gz -O /tmp/xhprof-${XHPROF_VERSION}.tar.gz \
&& mkdir -p /tmp/xhprof-${XHPROF_VERSION} \
&& tar xzf /tmp/xhprof-${XHPROF_VERSION}.tar.gz -C /tmp/xhprof-${XHPROF_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/xhprof-${XHPROF_VERSION}/extension --with-php-config=/usr/local/bin/php-config \
&& docker-php-ext-install -j$(nproc) \
bcmath \
bz2 \
calendar \
dba \
enchant \
exif \
gd \
gettext \
gmp \
imap \
intl \
ldap \
/tmp/memcache-${MEMCACHE_VERSION} \
mysqli \
opcache \
pcntl \
pdo_mysql \
pdo_pgsql \
pgsql \
pspell \
shmop \
snmp \
soap \
sockets \
sysvmsg \
sysvsem \
sysvshm \
/tmp/xhprof-${XHPROF_VERSION}/extension \
xmlrpc \
xsl \
zip \
# docker-php-ext-install fails after snuffleupagus is enabled
/tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src \
&& docker-php-source delete \
&& rm /usr/local/etc/php/conf.d/docker-php-ext-* \
&& rm -rf /tmp/memcache-* \
&& rm -rf /tmp/newrelic-* \
&& rm -rf /tmp/snuffleupagus-* \
&& rm -rf /tmp/xhprof-* \
&& pecl install amqp-${AMQP_VERSION} \
&& pecl install apcu-${APCU_VERSION} \
&& pecl install ast-${AST_VERSION} \
&& pecl install ds-${DS_VERSION} \
&& pecl install event-${EVENT_VERSION} \
&& pecl install geoip-${GEOIP_VERSION} \
&& pecl install grpc-${GRPC_VERSION} \
&& pecl install igbinary-${IGBINARY_VERSION} \
&& pecl install imagick-${IMAGICK_VERSION} \
&& pecl install memcached-${MEMCACHED_VERSION} \
&& pecl install mongodb-${MONGODB_VERSION} \
&& pecl install oauth-${OAUTH_VERSION} \
&& pecl install rar-${RAR_VERSION} \
&& pecl install redis-${REDIS_VERSION} \
&& echo |pecl install uuid-${UUID_VERSION} \
&& echo |pecl install mcrypt-${MCRYPT_VERSION} \
&& pecl install xdebug-${XDEBUG_VERSION} \
&& pecl install yaml-${YAML_VERSION} \
&& pecl clear-cache \
&& runDeps="$( \
scanelf --needed --nobanner --recursive /usr/local \
| awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
| xargs -r apk info --installed \
| sort -u \
)" \
&& apk del .build-deps \
&& apk add --no-cache --virtual .run-deps $runDeps
RUN wget http://gordalina.github.io/cachetool/downloads/cachetool-${CACHETOOL_VERSION}.phar -O /usr/local/bin/cachetool \
&& chmod +x /usr/local/bin/cachetool \
&& echo -e "\
adapter: fastcgi \n\
fastcgi: 127.0.0.1:9000 \n\
" > /etc/cachetool.yml
RUN mkdir -p /etc/ssh && echo -e "\
Host * \n\
Compression yes \n\
" >> /etc/ssh/ssh_config
RUN apk add --no-cache \
bash \
bzip2 \
coreutils \
gettext \
git \
imagemagick \
lftp \
mailx \
make \
mysql-client \
nano \
openssh-client \
ssmtp \
vim
# Iconv fix: https://github.com/docker-library/php/issues/240#issuecomment-305038173
RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ gnu-libiconv
ENV LD_PRELOAD=/usr/lib/preloadable_libiconv.so
# builtin modules : Core ctype curl date dom fileinfo filter ftp hash iconv json libxml mbstring mysqlnd openssl pcre PDO pdo_sqlite Phar posix readline Reflection session SimpleXML SPL sqlite3 standard tokenizer xml xmlreader xmlwriter zlib
# available modules : amqp apcu ast bcmath blackfire bz2 calendar dba ds enchant event exif gd geoip gmp grpc igbinary imap imagick intl ldap mcrypt memcache memcached mongodb mysqli newrelic oauth opcache pcntl pdo_mysql pdo_pgsql pgsql pspell rar redis shmop snmp snuffleupagus soap sockets sysvmsg sysvsem sysvshm xhprof uuid wddx xdebug xhprof xmlrpc xsl yaml zip
ARG PHP_EXT_ENABLE="amqp apcu bcmath bz2 calendar gd geoip imagick intl mcrypt memcached mysqli oauth opcache pdo_mysql redis soap sockets uuid yaml zip"
RUN docker-php-ext-enable ${PHP_EXT_ENABLE}
# copy *.ini
COPY ${DOCKER_BUILD_DIR}/*.ini /usr/local/etc/php/conf.d/
COPY ${DOCKER_BUILD_DIR}/php-fpm-*.conf /usr/local/etc/php-fpm.d/
# custom php config
ARG PHP_INI_CONFIG
RUN echo -e ${PHP_INI_CONFIG// /\\n} >> /usr/local/etc/php/conf.d/config.ini
# custom php cli
ARG PHP_CLI_CONFIG="apc.enable_cli=0 max_execution_time=-1 memory_limit=-1 opcache.enable_cli=0 xdebug.default_enable=0"
RUN echo '#!/usr/bin/env sh' > /usr/local/bin/php-cli \
&& chmod +x /usr/local/bin/php-cli \
&& echo -e "\
/usr/local/bin/php -d ${PHP_CLI_CONFIG// / -d } \"\$@\"\
" >> /usr/local/bin/php-cli
# install cronlock
ADD https://raw.github.com/kvz/cronlock/master/cronlock /usr/bin/cronlock
RUN chmod +rx /usr/bin/cronlock
# config ssmtp
RUN echo "FromLineOverride=YES" >> /etc/ssmtp/ssmtp.conf
# https://bugs.php.net/bug.php?id=71880
ENV LOG_STREAM="/tmp/stdout"
RUN mkfifo $LOG_STREAM && chmod 777 $LOG_STREAM
# default www-data homedir to /var/www for crontabs
RUN sed -i 's|/home/www-data|/var/www|' /etc/passwd
WORKDIR /var/www
# redirect LOG_STREAM to stdout and start php-fpm with environment variables from .env
CMD [ "sh", "-c", "(exec 3<>$LOG_STREAM; cat <&3 >&1 & IFS=$'\n'; exec env $(cat .env 2>/dev/null) php-fpm)" ]
FROM dist as master
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a specific UID
RUN let $UID >/dev/null 2>&1 \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk 'BEGIN {FS=":"} {print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:/bin/false" >> /etc/passwd; \
echo "$USER:!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
RUN chown -R $USER /usr/local/etc/php/conf.d/
USER $USER
ARG SSH_REMOTE_HOSTS
RUN mkdir -p ~/.ssh \
&& ssh-keyscan -t rsa -H $SSH_REMOTE_HOSTS >> ~/.ssh/known_hosts

View File

@ -1,5 +0,0 @@
apc.enable_cli = 0
apc.enabled = 1
apc.shm_segments = 1
apc.shm_size = 32M

View File

@ -1 +0,0 @@
blackfire.agent_socket=tcp://blackfire:8707

View File

@ -1 +0,0 @@
memcached.sess_locking = Off

View File

@ -1,4 +0,0 @@
[newrelic]
newrelic.logfile = /dev/self/fd/2
newrelic.daemon.logfile = /dev/self/fd/2

View File

@ -1,8 +0,0 @@
opcache.enable = 1
opcache.enable_cli = 0
opcache.error_log = /proc/self/fd/2
opcache.interned_strings_buffer = 16
opcache.log_verbosity_level = 2
opcache.max_accelerated_files = 20000
opcache.memory_consumption = 256
opcache.validate_timestamps = 1

View File

@ -1,412 +0,0 @@
; Start a new pool named 'www'.
; the variable $pool can we used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all IPv4 addresses on a
; specific port;
; '[::]:port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
;listen = /var/run/php5-fpm.sock
listen = 0.0.0.0:9000
; Set listen(2) backlog.
; Default Value: 65535 (-1 on FreeBSD and OpenBSD)
listen.backlog = 1023
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions.
; Default Values: user and group are set as the running user
; mode is set to 0660
;listen.owner = www-data
;listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 0.0.0.0
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 16
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php5/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
pm.status_path = /php-fpm-status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
ping.path = /php-fpm-ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 5m
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
chdir = /
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
catch_workers_output = yes
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; exectute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View File

@ -1,56 +0,0 @@
[PHP]
expose_php = Off
disable_functions = exec,system,popen,passthru,apache_child_terminate,apache_get_modules,apache_get_version,apache_getenv,apache_note,apache_setenv,virtual,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority
enable_dl = Off
allow_url_fopen = On
allow_url_include = Off
engine = On
short_open_tag = On
output_buffering = 4096
realpath_cache_size = 4096k
realpath_cache_ttl = 600
include_path = .:/usr/share/php
date.timezone = Europe/Paris
default_socket_timeout = 10
max_execution_time = 30
max_input_time = 60
max_input_vars = 1000
memory_limit = 512M
post_max_size = 32M
file_uploads = On
upload_tmp_dir = /tmp
upload_max_filesize = 32M
max_file_uploads = 20
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
display_errors = Off
display_startup_errors = Off
log_errors = On
html_errors = On
SMTP = mailcatcher
smtp_port = 1025
sendmail_path = /usr/sbin/sendmail -t -i
mail_log = syslog
session.save_handler = memcached
session.save_path = memcached:11211
session.use_cookies = 1
session.cookie_secure =
session.use_only_cookies = 1
session.name = PHPSESSID
session.auto_start = 0
session.cookie_lifetime = 0
session.cookie_path = /
session.cookie_domain =
session.cookie_httponly =
session.serialize_handler = php
session.gc_probability = 0
session.gc_divisor = 1000
session.gc_maxlifetime = 2592000
session.bug_compat_42 = Off
session.bug_compat_warn = Off
session.referer_check =
session.entropy_length = 512
session.entropy_file = /dev/urandom
session.cache_limiter = nocache
session.cache_expire = 180
session.use_trans_sid = 0
session.hash_function = 0

View File

@ -1,13 +0,0 @@
xdebug.collect_params = 1
xdebug.collect_return = 1
xdebug.default_enable = 1
xdebug.force_display_errors = 1
xdebug.force_error_reporting = E_ALL & ~E_NOTICE & ~E_DEPRECATED
xdebug.halt_level = E_WARNING
xdebug.idekey = PHPSTORM
xdebug.max_nesting_level = 1024
xdebug.remote_enable = 1
xdebug.remote_connect_back = 1
xdebug.scream = 0
xdebug.show_error_trace = 1
xdebug.show_exception_trace = 1

View File

@ -1,264 +0,0 @@
FROM php:7.3-fpm-alpine as dist
LABEL maintainer aynic.os <support+docker@asycn.io>
ARG DOCKER_BUILD_DIR
ARG AMQP_VERSION=stable
ARG AST_VERSION=stable
ARG APCU_VERSION=stable
ARG BLACKFIRE_VERSION=1.34.3
ARG CACHETOOL_VERSION=4.0.1
ARG DS_VERSION=stable
ARG EVENT_VERSION=stable
ARG IGBINARY_VERSION=stable
ARG IMAGICK_VERSION=stable
ARG GEOIP_VERSION=beta
ARG GRPC_VERSION=stable
ARG MCRYPT_VERSION=stable
ARG MEMCACHE_VERSION=4.0.1-php73
ARG MEMCACHED_VERSION=stable
ARG MONGODB_VERSION=stable
ARG NEWRELIC_VERSION=9.11.0.267
ARG OAUTH_VERSION=stable
ARG REDIS_VERSION=stable
ARG SNUFFLEUPAGUS_VERSION=0.5.1
ARG UUID_VERSION=stable
ARG XDEBUG_VERSION=stable
ARG XHPROF_VERSION=2.2.0
ARG YAML_VERSION=stable
RUN apk --no-cache upgrade \
&& apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \
aspell-dev \
bison \
bzip2-dev \
curl-dev \
enchant2-dev \
flex \
freetype-dev \
gawk \
geoip-dev \
gettext-dev \
gmp-dev \
icu-dev \
imagemagick-dev \
imap-dev \
libevent-dev \
libjpeg-turbo-dev \
libmcrypt-dev \
libmemcached-dev \
libpng-dev \
libressl-dev \
libxml2-dev \
libxslt-dev \
libzip-dev \
make \
net-snmp-dev \
openldap-dev \
patch \
postgresql-dev \
pcre-dev \
rabbitmq-c-dev \
yaml-dev \
# blackfire \
&& wget https://packages.blackfire.io/binaries/blackfire-php/${BLACKFIRE_VERSION}/blackfire-php-alpine_amd64-php-$(php -r "echo PHP_MAJOR_VERSION.PHP_MINOR_VERSION;").so -O $(php -r "echo ini_get('extension_dir');")/blackfire.so \
# enchant \
&& docker-php-source extract \
&& wget "https://git.alpinelinux.org/aports/plain/community/php7/enchant-2.patch?id=3f8d7d2e5e558a975f79b6470423b32e01c0bfbc" -O /usr/src/php-enchant-2.patch \
&& cd /usr/src/php && patch -p1 < ../php-enchant-2.patch \
# gd \
&& docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ --with-png-dir=/usr/include/ \
# memcache \
&& wget https://github.com/websupport-sk/pecl-memcache/archive/v${MEMCACHE_VERSION}.tar.gz -O /tmp/memcache-${MEMCACHE_VERSION}.tar.gz \
&& mkdir -p /tmp/memcache-${MEMCACHE_VERSION} \
&& tar xzf /tmp/memcache-${MEMCACHE_VERSION}.tar.gz -C /tmp/memcache-${MEMCACHE_VERSION} --strip-components=1 \
# https://github.com/websupport-sk/pecl-memcache/pull/39 \
&& sed -i '399s/);/, char *);/' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.h \
# https://github.com/websupport-sk/pecl-memcache/pull/40 \
&& sed -i '47i#if PHP_VERSION_ID < 70200\n register size_t newlen;\n#endif' /tmp/memcache-${MEMCACHE_VERSION}/php7/memcache_pool.c \
# newrelic \
&& wget https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz -O /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz \
&& mkdir -p /tmp/newrelic-${NEWRELIC_VERSION} \
&& tar xzf /tmp/newrelic-${NEWRELIC_VERSION}.tar.gz -C /tmp/newrelic-${NEWRELIC_VERSION} --strip-components=1 \
&& mv /tmp/newrelic-${NEWRELIC_VERSION}/agent/x64/newrelic-20180731.so $(php -r "echo ini_get('extension_dir');")/newrelic.so \
# snuffleupagus \
&& wget https://github.com/jvoisin/snuffleupagus/archive/v${SNUFFLEUPAGUS_VERSION}.tar.gz -O /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz \
&& mkdir -p /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} \
&& tar xzf /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}.tar.gz -C /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src --prefix=/usr --enable-snuffleupagus \
# xhprof \
&& wget https://github.com/longxinH/xhprof/archive/v${XHPROF_VERSION}.tar.gz -O /tmp/xhprof-${XHPROF_VERSION}.tar.gz \
&& mkdir -p /tmp/xhprof-${XHPROF_VERSION} \
&& tar xzf /tmp/xhprof-${XHPROF_VERSION}.tar.gz -C /tmp/xhprof-${XHPROF_VERSION} --strip-components=1 \
&& docker-php-ext-configure /tmp/xhprof-${XHPROF_VERSION}/extension --with-php-config=/usr/local/bin/php-config \
&& docker-php-ext-install -j$(nproc) \
bcmath \
bz2 \
calendar \
dba \
enchant \
exif \
gd \
gettext \
gmp \
imap \
intl \
ldap \
/tmp/memcache-${MEMCACHE_VERSION} \
mysqli \
opcache \
pcntl \
pdo_mysql \
pdo_pgsql \
pgsql \
pspell \
shmop \
snmp \
soap \
sockets \
sysvmsg \
sysvsem \
sysvshm \
/tmp/xhprof-${XHPROF_VERSION}/extension \
xmlrpc \
xsl \
zip \
# docker-php-ext-install fails after snuffleupagus is enabled
/tmp/snuffleupagus-${SNUFFLEUPAGUS_VERSION}/src \
&& docker-php-source delete \
&& rm /usr/local/etc/php/conf.d/docker-php-ext-* \
&& rm -rf /tmp/memcache-* \
&& rm -rf /tmp/newrelic-* \
&& rm -rf /tmp/snuffleupagus-* \
&& rm -rf /tmp/xhprof-* \
&& pecl install amqp-${AMQP_VERSION} \
&& pecl install apcu-${APCU_VERSION} \
&& pecl install ast-${AST_VERSION} \
&& pecl install ds-${DS_VERSION} \
&& pecl install event-${EVENT_VERSION} \
&& pecl install geoip-${GEOIP_VERSION} \
&& pecl install grpc-${GRPC_VERSION} \
&& pecl install igbinary-${IGBINARY_VERSION} \
&& pecl install imagick-${IMAGICK_VERSION} \
&& pecl install memcached-${MEMCACHED_VERSION} \
&& pecl install mongodb-${MONGODB_VERSION} \
&& pecl install oauth-${OAUTH_VERSION} \
&& pecl install redis-${REDIS_VERSION} \
&& echo |pecl install uuid-${UUID_VERSION} \
&& echo |pecl install mcrypt-${MCRYPT_VERSION} \
&& pecl install xdebug-${XDEBUG_VERSION} \
&& pecl install yaml-${YAML_VERSION} \
&& pecl clear-cache \
&& runDeps="$( \
scanelf --needed --nobanner --recursive /usr/local \
| awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \
| xargs -r apk info --installed \
| sort -u \
)" \
&& apk del .build-deps \
&& apk add --no-cache --virtual .run-deps $runDeps
RUN wget http://gordalina.github.io/cachetool/downloads/cachetool-${CACHETOOL_VERSION}.phar -O /usr/local/bin/cachetool \
&& chmod +x /usr/local/bin/cachetool \
&& echo -e "\
adapter: fastcgi \n\
fastcgi: 127.0.0.1:9000 \n\
" > /etc/cachetool.yml
RUN mkdir -p /etc/ssh && echo -e "\
Host * \n\
Compression yes \n\
" >> /etc/ssh/ssh_config
RUN apk add --no-cache \
bash \
bzip2 \
coreutils \
gettext \
git \
imagemagick \
lftp \
mailx \
make \
mysql-client \
nano \
openssh-client \
ssmtp \
vim
# Iconv fix: https://github.com/docker-library/php/issues/240#issuecomment-305038173
RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ gnu-libiconv
ENV LD_PRELOAD=/usr/lib/preloadable_libiconv.so
# builtin modules : Core ctype curl date dom fileinfo filter ftp hash iconv json libxml mbstring mysqlnd openssl pcre PDO pdo_sqlite Phar posix readline Reflection session SimpleXML SPL sqlite3 standard tokenizer xml xmlreader xmlwriter zlib
# available modules : amqp apcu ast bcmath blackfire bz2 calendar dba ds enchant event exif gd geoip gmp grpc igbinary imap imagick intl ldap mcrypt memcache memcached mongodb mysqli newrelic oauth opcache pcntl pdo_mysql pdo_pgsql pgsql pspell redis shmop snmp snuffleupagus soap sockets sysvmsg sysvsem sysvshm xhprof uuid wddx xdebug xhprof xmlrpc xsl yaml zip
ARG PHP_EXT_ENABLE="amqp apcu bcmath bz2 calendar gd geoip imagick intl mcrypt memcached mysqli oauth opcache pdo_mysql redis soap sockets uuid yaml zip"
RUN docker-php-ext-enable ${PHP_EXT_ENABLE}
# copy *.ini
COPY ${DOCKER_BUILD_DIR}/*.ini /usr/local/etc/php/conf.d/
COPY ${DOCKER_BUILD_DIR}/php-fpm-*.conf /usr/local/etc/php-fpm.d/
RUN rm /usr/local/etc/php-fpm.d/www.conf
# custom php config
ARG PHP_INI_CONFIG
RUN echo -e ${PHP_INI_CONFIG// /\\n} >> /usr/local/etc/php/conf.d/config.ini
# custom php cli
ARG PHP_CLI_CONFIG="apc.enable_cli=0 max_execution_time=-1 memory_limit=-1 opcache.enable_cli=0 xdebug.default_enable=0"
RUN echo '#!/usr/bin/env sh' > /usr/local/bin/php-cli \
&& chmod +x /usr/local/bin/php-cli \
&& echo -e "\
/usr/local/bin/php -d ${PHP_CLI_CONFIG// / -d } \"\$@\"\
" >> /usr/local/bin/php-cli
# install cronlock
ADD https://raw.github.com/kvz/cronlock/master/cronlock /usr/bin/cronlock
RUN chmod +rx /usr/bin/cronlock
# config ssmtp
RUN echo "FromLineOverride=YES" >> /etc/ssmtp/ssmtp.conf
# default www-data homedir to /var/www for crontabs
RUN sed -i 's|/home/www-data|/var/www|' /etc/passwd
# link shared folder
RUN ln -s /shared /var/www/shared
WORKDIR /var/www
CMD [ "sh", "-c", "(IFS=$'\n'; exec env $(cat .env 2>/dev/null) php-fpm)" ]
FROM dist as master
ARG UID
ARG USER
ENV UID=${UID}
ENV GID=${UID}
ENV USER=${USER}
# If we provide a specific UID
RUN let $UID >/dev/null 2>&1 \
# Remove user with $UID if it is not our $USER
&& if [ "$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')" != "$USER" ]; then \
sed -i '/^'$(getent passwd $UID |awk 'BEGIN {FS=":"} {print $1}')':x:'$UID':/d' /etc/passwd; \
sed -i '/^'$(getent group $GID |awk 'BEGIN {FS=":"} {print $1}')':x:'$GID':/d' /etc/group; \
fi \
# Force $UID if our $USER already exists
&& sed -i 's/^'$USER':x:[0-9]\+:[0-9]\+:/'$USER':x:'$UID':'$GID':/' /etc/passwd \
&& sed -i 's/^'$USER':x:[0-9]\+:/'$USER':x:'$GID':/' /etc/group \
# Create $USER if it does not exist
&& if [ "$(getent passwd $UID)" = "" ]; then \
echo "$USER:x:$UID:$GID::/home/$USER:/bin/false" >> /etc/passwd; \
echo "$USER:!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow; \
echo "$USER:x:$GID:" >> /etc/group; \
fi \
&& mkdir -p /home/$USER \
&& chown $UID:$GID /home/$USER \
|| true
RUN chown -R $USER /usr/local/etc/php/conf.d/
USER $USER
ARG SSH_REMOTE_HOSTS
RUN mkdir -p ~/.ssh \
&& ssh-keyscan -t rsa -H $SSH_REMOTE_HOSTS >> ~/.ssh/known_hosts

View File

@ -1,5 +0,0 @@
apc.enable_cli = 0
apc.enabled = 1
apc.shm_segments = 1
apc.shm_size = 32M

View File

@ -1 +0,0 @@
blackfire.agent_socket=tcp://blackfire:8707

View File

@ -1 +0,0 @@
memcached.sess_locking = Off

Some files were not shown because too many files have changed in this diff Show More