Compare commits

..

No commits in common. "b33b10d0f4199f75bebb08409b5a6e9ac279fafb" and "5dd178ac1adc04fce7d7ee327c545a336100cf27" have entirely different histories.

19 changed files with 234 additions and 67 deletions

2
.env
View File

@ -1,4 +1,4 @@
ELASTIC_VERSION=8.13.4
ELASTIC_VERSION=8.12.1
## Passwords for stack users
#

View File

@ -30,6 +30,7 @@ jobs:
run: >-
docker compose
-f docker-compose.yml
-f extensions/logspout/logspout-compose.yml
-f extensions/fleet/fleet-compose.yml
-f extensions/fleet/agent-apmserver-compose.yml
-f extensions/metricbeat/metricbeat-compose.yml
@ -45,40 +46,67 @@ jobs:
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: >-
sed -i
-e 's/\(ELASTIC_PASSWORD=\)'\''changeme'\''/\1testpasswd/g'
-e 's/\(LOGSTASH_INTERNAL_PASSWORD=\)'\''changeme'\''/\1testpasswd/g'
-e 's/\(KIBANA_SYSTEM_PASSWORD=\)'\''changeme'\''/\1testpasswd/g'
-e 's/\(METRICBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g'
-e 's/\(FILEBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g'
-e 's/\(HEARTBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g'
-e 's/\(MONITORING_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g'
-e 's/\(BEATS_SYSTEM_PASSWORD=\)'\'\''/\1testpasswd/g'
.env
run: |
- name: Set up users and roles
run: docker compose up setup
sed -i -e 's/\(ELASTIC_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' \
-e 's/\(LOGSTASH_INTERNAL_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' \
-e 's/\(KIBANA_SYSTEM_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' \
-e 's/\(METRICBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' \
-e 's/\(FILEBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' \
-e 's/\(HEARTBEAT_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' \
-e 's/\(MONITORING_INTERNAL_PASSWORD=\)'\'\''/\1testpasswd/g' \
-e 's/\(BEATS_SYSTEM_PASSWORD=\)'\'\''/\1testpasswd/g' .env
#############################
##########################################################
# #
# Test core and extensions. #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
#############################
##########################################################
- name: Run the stack
run: |
docker compose up setup
docker compose up -d
# Elasticsearch's high disk watermark gets regularly exceeded on GitHub Actions runners.
# https://www.elastic.co/guide/en/elasticsearch/reference/8.10/fix-watermark-errors.html
- name: Disable Elasticsearch disk allocation decider
run: .github/workflows/scripts/disable-disk-alloc-decider.sh
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh
##############################
# #
# Test supported extensions. #
# #
##############################
#
# Core components: Elasticsearch, Logstash, Kibana
# Logspout
#
- name: Execute core test suite
- name: Execute Logspout test suite
run: |
docker compose up -d
.github/workflows/scripts/run-tests-core.sh
# Set mandatory Logstash settings
sed -i '$ a input { udp { port => 50000 codec => json } }' logstash/pipeline/logstash.conf
# Restart Logstash for changes to take effect
docker compose restart logstash
# Run Logspout and execute tests.
docker compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up --remove-orphans -d logspout
.github/workflows/scripts/run-tests-logspout.sh
# Revert changes to Logstash configuration
sed -i '/input { udp { port => 50000 codec => json } }/d' logstash/pipeline/logstash.conf
docker compose restart logstash
# next steps don't need Logstash
docker compose stop logstash
@ -153,6 +181,7 @@ jobs:
docker compose \
-f docker-compose.yml \
-f extensions/logspout/logspout-compose.yml \
-f extensions/fleet/fleet-compose.yml \
-f extensions/fleet/agent-apmserver-compose.yml \
-f extensions/metricbeat/metricbeat-compose.yml \
@ -163,6 +192,7 @@ jobs:
docker compose \
-f docker-compose.yml \
-f extensions/logspout/logspout-compose.yml \
-f extensions/fleet/fleet-compose.yml \
-f extensions/fleet/agent-apmserver-compose.yml \
-f extensions/metricbeat/metricbeat-compose.yml \
@ -191,6 +221,7 @@ jobs:
run: >-
docker compose
-f docker-compose.yml
-f extensions/logspout/logspout-compose.yml
-f extensions/fleet/fleet-compose.yml
-f extensions/fleet/agent-apmserver-compose.yml
-f extensions/metricbeat/metricbeat-compose.yml

View File

@ -11,15 +11,14 @@ cid_es="$(container_id elasticsearch)"
ip_es="$(service_ip elasticsearch)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
log 'Disabling disk allocation decider'
declare -a put_args=( '-X' 'PUT' '--fail-with-body' '-s' '-u' 'elastic:testpasswd'
'-H' 'Content-Type: application/json'
'http://elasticsearch:9200/_cluster/settings?pretty'
'--resolve' "elasticsearch:9200:${ip_es}"
"http://${ip_es}:9200/_cluster/settings?pretty"
'-d' '{"persistent":{"cluster.routing.allocation.disk.threshold_enabled":false}}'
)
declare response

View File

@ -16,15 +16,15 @@ ip_ls="$(service_ip logstash)"
ip_kb="$(service_ip kibana)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
grouplog 'Wait for readiness of Logstash'
poll_ready "$cid_ls" 'http://logstash:9600/_node/pipelines/main?pretty' --resolve "logstash:9600:${ip_ls}"
poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty"
endgroup
grouplog 'Wait for readiness of Kibana'
poll_ready "$cid_kb" 'http://kibana:5601/api/status' --resolve "kibana:5601:${ip_kb}" -u 'kibana_system:testpasswd'
poll_ready "$cid_kb" "http://${ip_kb}:5601/api/status" -u 'kibana_system:testpasswd'
endgroup
log 'Sending message to Logstash TCP input'
@ -50,8 +50,7 @@ fi
# need to be resilient here.
was_retried=0
declare -a refresh_args=( '-X' 'POST' '-s' '-w' '%{http_code}' '-u' 'elastic:testpasswd'
'http://elasticsearch:9200/logs-generic-default/_refresh'
'--resolve' "elasticsearch:9200:${ip_es}"
"http://${ip_es}:9200/logs-generic-default/_refresh"
)
# retry for max 10s (10*1s)
@ -76,8 +75,7 @@ log 'Searching message in Elasticsearch'
# we need to be resilient here too.
was_retried=0
declare -a search_args=( '-s' '-u' 'elastic:testpasswd'
'http://elasticsearch:9200/logs-generic-default/_search?q=message:dockerelk&pretty'
'--resolve' "elasticsearch:9200:${ip_es}"
"http://${ip_es}:9200/logs-generic-default/_search?q=message:dockerelk&pretty"
)
declare -i count
declare response

View File

@ -14,15 +14,15 @@ ip_es="$(service_ip elasticsearch)"
ip_en="$(service_ip enterprise-search)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
grouplog 'Wait for readiness of Enterprise Search'
poll_ready "$cid_en" 'http://enterprise-search:3002/api/ent/v1/internal/health' --resolve "enterprise-search:3002:${ip_en}" -u 'elastic:testpasswd'
poll_ready "$cid_en" "http://${ip_en}:3002/api/ent/v1/internal/health" -u 'elastic:testpasswd'
endgroup
log 'Ensuring that App Search API keys were created in Elasticsearch'
response="$(curl 'http://elasticsearch:9200/.ent-search-actastic-app_search_api_tokens_v3/_search?q=*:*&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/.ent-search-actastic-app_search_api_tokens_v3/_search?q=*:*&pretty" -s -u elastic:testpasswd)"
echo "$response"
declare -i count
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"

View File

@ -8,17 +8,17 @@ source "${BASH_SOURCE[0]%/*}"/lib/testing.sh
cid_es="$(container_id elasticsearch)"
cid_fb="$(container_id filebeat)"
cid_mb="$(container_id filebeat)"
ip_es="$(service_ip elasticsearch)"
ip_fb="$(service_ip filebeat)"
ip_mb="$(service_ip filebeat)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
grouplog 'Wait for readiness of Filebeat'
poll_ready "$cid_fb" 'http://filebeat:5066/?pretty' --resolve "filebeat:5066:${ip_fb}"
poll_ready "$cid_mb" "http://${ip_mb}:5066/?pretty"
endgroup
# We expect to find log entries for the 'elasticsearch' Compose service using
@ -37,7 +37,7 @@ declare -i was_retried=0
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
response="$(curl 'http://elasticsearch:9200/filebeat-*/_search?q=agent.type:%22filebeat%22%20AND%20input.type:%22container%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/filebeat-*/_search?q=agent.type:%22filebeat%22%20AND%20input.type:%22container%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&pretty" -s -u elastic:testpasswd)"
set +u # prevent "unbound variable" if assigned value is not an integer
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"

View File

@ -16,15 +16,15 @@ ip_fl="$(service_ip fleet-server)"
ip_apm="$(service_ip apm-server)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
grouplog 'Wait for readiness of Fleet Server'
poll_ready "$cid_fl" 'http://fleet-server:8220/api/status' --resolve "fleet-server:8220:${ip_fl}"
poll_ready "$cid_fl" "http://${ip_fl}:8220/api/status"
endgroup
grouplog 'Wait for readiness of APM Server'
poll_ready "$cid_apm" 'http://apm-server:8200/' --resolve "apm-server:8200:${ip_apm}"
poll_ready "$cid_apm" "http://${ip_apm}:8200/"
endgroup
# We expect to find metrics entries using the following query:
@ -44,7 +44,7 @@ declare -i was_retried=0
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
response="$(curl 'http://elasticsearch:9200/metrics-system.cpu-default/_search?q=agent.name:%22fleet-server%22%20AND%20agent.type:%22metricbeat%22%20AND%20event.module:%22system%22%20AND%20event.dataset:%22system.cpu%22%20AND%20metricset.name:%22cpu%22&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/metrics-system.cpu-default/_search?q=agent.name:%22fleet-server%22%20AND%20agent.type:%22metricbeat%22%20AND%20event.module:%22system%22%20AND%20event.dataset:%22system.cpu%22%20AND%20metricset.name:%22cpu%22&pretty" -s -u elastic:testpasswd)"
set +u # prevent "unbound variable" if assigned value is not an integer
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"
@ -87,7 +87,7 @@ was_retried=0
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
response="$(curl 'http://elasticsearch:9200/logs-docker.container_logs-default/_search?q=agent.name:%22fleet-server%22%20AND%20agent.type:%22filebeat%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/logs-docker.container_logs-default/_search?q=agent.name:%22fleet-server%22%20AND%20agent.type:%22filebeat%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&pretty" -s -u elastic:testpasswd)"
set +u # prevent "unbound variable" if assigned value is not an integer
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"

View File

@ -8,17 +8,17 @@ source "${BASH_SOURCE[0]%/*}"/lib/testing.sh
cid_es="$(container_id elasticsearch)"
cid_hb="$(container_id heartbeat)"
cid_mb="$(container_id heartbeat)"
ip_es="$(service_ip elasticsearch)"
ip_hb="$(service_ip heartbeat)"
ip_mb="$(service_ip heartbeat)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
grouplog 'Wait for readiness of Heartbeat'
poll_ready "$cid_hb" 'http://heartbeat:5066/?pretty' --resolve "heartbeat:5066:${ip_hb}"
poll_ready "$cid_mb" "http://${ip_mb}:5066/?pretty"
endgroup
# We expect to find heartbeat entries for the 'elasticsearch' HTTP service
@ -37,7 +37,7 @@ declare -i was_retried=0
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
response="$(curl 'http://elasticsearch:9200/heartbeat-*/_search?q=agent.type:%22heartbeat%22%20AND%20monitor.type:%22http%22%20AND%20url.domain:%22elasticsearch%22&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/heartbeat-*/_search?q=agent.type:%22heartbeat%22%20AND%20monitor.type:%22http%22%20AND%20url.domain:%22elasticsearch%22&pretty" -s -u elastic:testpasswd)"
set +u # prevent "unbound variable" if assigned value is not an integer
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"

View File

@ -0,0 +1,70 @@
#!/usr/bin/env bash
set -eu
set -o pipefail
source "${BASH_SOURCE[0]%/*}"/lib/testing.sh
cid_es="$(container_id elasticsearch)"
cid_ls="$(container_id logstash)"
cid_lsp="$(container_id logspout)"
ip_es="$(service_ip elasticsearch)"
ip_ls="$(service_ip logstash)"
ip_lsp="$(service_ip logspout)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
grouplog 'Wait for readiness of Logstash'
poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty"
endgroup
grouplog 'Wait for readiness of Logspout'
poll_ready "$cid_lsp" "http://${ip_lsp}/health"
endgroup
# When Logspout starts, it prints the following log line:
# 2021/01/07 16:14:52 # logspout v3.2.13-custom by gliderlabs
#
# which we expect to find by querying:
# docker.image:"docker-elk-logspout" AND message:"logspout gliderlabs"~3
#
log 'Searching a log entry forwarded by Logspout'
declare response
declare -i count
declare -i was_retried=0
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
response="$(curl "http://${ip_es}:9200/logs-generic-default/_search?q=docker.image:%22docker-elk-logspout%22%20AND%20message:%22logspout%20gliderlabs%22~3&pretty" -s -u elastic:testpasswd)"
set +u # prevent "unbound variable" if assigned value is not an integer
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"
set -u
if (( count > 0 )); then
break
fi
was_retried=1
echo -n 'x' >&2
sleep 2
done
if ((was_retried)); then
# flush stderr, important in non-interactive environments (CI)
echo >&2
fi
echo "$response"
# Logspout may restart if Logstash isn't ready yet, so we tolerate multiple
# results
if (( count == 0 )); then
echo 'Expected at least 1 document'
exit 1
fi

View File

@ -14,11 +14,11 @@ ip_es="$(service_ip elasticsearch)"
ip_mb="$(service_ip metricbeat)"
grouplog 'Wait for readiness of Elasticsearch'
poll_ready "$cid_es" 'http://elasticsearch:9200/' --resolve "elasticsearch:9200:${ip_es}" -u 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd'
endgroup
grouplog 'Wait for readiness of Metricbeat'
poll_ready "$cid_mb" 'http://metricbeat:5066/?pretty' --resolve "metricbeat:5066:${ip_mb}"
poll_ready "$cid_mb" "http://${ip_mb}:5066/?pretty"
endgroup
# We expect to find monitoring entries for the 'elasticsearch' Compose service
@ -38,7 +38,7 @@ declare -i was_retried=0
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
response="$(curl 'http://elasticsearch:9200/metricbeat-*/_search?q=agent.type:%22metricbeat%22%20AND%20event.module:%22docker%22%20AND%20event.dataset:%22docker.container%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&pretty' -s --resolve "elasticsearch:9200:${ip_es}" -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/metricbeat-*/_search?q=agent.type:%22metricbeat%22%20AND%20event.module:%22docker%22%20AND%20event.dataset:%22docker.container%22%20AND%20container.name:%22docker-elk-elasticsearch-1%22&pretty" -s -u elastic:testpasswd)"
set +u # prevent "unbound variable" if assigned value is not an integer
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"

View File

@ -1,6 +1,6 @@
# Elastic stack (ELK) on Docker
[![Elastic Stack version](https://img.shields.io/badge/Elastic%20Stack-8.13.4-00bfb3?style=flat&logo=elastic-stack)](https://www.elastic.co/blog/category/releases)
[![Elastic Stack version](https://img.shields.io/badge/Elastic%20Stack-8.12.1-00bfb3?style=flat&logo=elastic-stack)](https://www.elastic.co/blog/category/releases)
[![Build Status](https://github.com/deviantony/docker-elk/workflows/CI/badge.svg?branch=main)](https://github.com/deviantony/docker-elk/actions?query=workflow%3ACI+branch%3Amain)
[![Join the chat](https://badges.gitter.im/Join%20Chat.svg)](https://app.gitter.im/#/room/#deviantony_docker-elk:gitter.im)

View File

@ -19,17 +19,6 @@ filebeat.autodiscover:
type: container
paths:
- /var/lib/docker/containers/${data.container.id}/*-json.log
templates:
- condition:
contains:
docker.container.image: elasticsearch
config:
- module: elasticsearch
server:
input:
type: container
paths:
- /var/lib/docker/containers/${data.container.id}/*-json.log
processors:
- add_cloud_metadata: ~

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,5 @@
# uses ONBUILD instructions described here:
# https://github.com/gliderlabs/logspout/tree/master/custom
FROM gliderlabs/logspout:master
ENV SYSLOG_FORMAT rfc3164

View File

@ -0,0 +1,28 @@
# Logspout extension
Logspout collects all Docker logs using the Docker logs API, and forwards them to Logstash without any additional
configuration.
## Usage
If you want to include the Logspout extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `logspout-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up
```
In your Logstash pipeline configuration, enable the `udp` input and set the input codec to `json`:
```logstash
input {
udp {
port => 50000
codec => json
}
}
```
## Documentation
<https://github.com/looplab/logspout-logstash>

13
extensions/logspout/build.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
# source: https://github.com/gliderlabs/logspout/blob/621524e/custom/build.sh
set -e
apk add --update go build-base git mercurial ca-certificates
cd /src
go build -ldflags "-X main.Version=$1" -o /bin/logspout
apk del go git mercurial build-base
rm -rf /root/go /var/cache/apk/*
# backwards compatibility
ln -fs /tmp/docker.sock /var/run/docker.sock

View File

@ -0,0 +1,19 @@
version: '3.7'
services:
logspout:
build:
context: extensions/logspout
volumes:
- type: bind
source: /var/run/docker.sock
target: /var/run/docker.sock
read_only: true
environment:
ROUTE_URIS: logstash://logstash:50000
LOGSTASH_TAGS: docker-elk
networks:
- elk
depends_on:
- logstash
restart: on-failure

View File

@ -0,0 +1,10 @@
package main
// installs the Logstash adapter for Logspout, and required dependencies
// https://github.com/looplab/logspout-logstash
import (
_ "github.com/gliderlabs/logspout/healthcheck"
_ "github.com/gliderlabs/logspout/transports/tcp"
_ "github.com/gliderlabs/logspout/transports/udp"
_ "github.com/looplab/logspout-logstash"
)

View File

@ -2,7 +2,6 @@
"cluster": [
"manage_ilm",
"manage_index_templates",
"manage_ingest_pipelines",
"monitor",
"read_pipeline"
],