Automate management of required roles and user passwords (#671)

This commit is contained in:
Antoine Cotten 2022-02-21 11:19:43 +01:00 committed by GitHub
parent 641290c20a
commit 9877b39900
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 503 additions and 327 deletions

View File

@ -1,2 +0,0 @@
# Ignore OS artifacts
**/.DS_Store

21
.env
View File

@ -1 +1,22 @@
ELK_VERSION=8.0.0 ELK_VERSION=8.0.0
## Passwords for stack users
#
# User 'elastic' (built-in)
#
# Superuser role, full access to cluster management and data indices.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
ELASTIC_PASSWORD='changeme'
# User 'logstash_internal' (custom)
#
# The user Logstash uses to connect and send data to Elasticsearch.
# https://www.elastic.co/guide/en/logstash/current/ls-security.html
LOGSTASH_INTERNAL_PASSWORD='changeme'
# User 'kibana_system' (built-in)
#
# The user Kibana uses to connect and communicate with Elasticsearch.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
KIBANA_SYSTEM_PASSWORD='changeme'

View File

@ -8,8 +8,8 @@ on:
jobs: jobs:
test-compose: test:
name: 'Test suite: Compose' name: Test suite
# List of supported runners: # List of supported runners:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-runners-and-hardware-resources # https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-runners-and-hardware-resources
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -26,13 +26,6 @@ jobs:
- name: Prepare environment - name: Prepare environment
run: | run: |
# Install Linux packages
#
# List of packages pre-installed in the runner:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-software
sudo apt install -y expect
# Enable support for Compose V2 # Enable support for Compose V2
# #
# Instructions: # Instructions:
@ -63,24 +56,9 @@ jobs:
- name: Set password of every built-in user to 'testpasswd' - name: Set password of every built-in user to 'testpasswd'
run: | run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files sed -i -e 's/\(ELASTIC_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' \
-e 's/\(LOGSTASH_INTERNAL_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' \
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf -e 's/\(KIBANA_SYSTEM_PASSWORD=\)'\''changeme'\''/\1testpasswd/g' .env
sed -i 's/\(elasticsearch.password:\) '\''changeme'\''/\1 testpasswd/g' kibana/config/kibana.yml
sed -i -e 's/\(elasticsearch.password:\) '\''changeme'\''/\1 testpasswd/g' -e 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml
sed -i 's/\(password:\) '\''changeme'\''/\1 testpasswd/g' extensions/apm-server/config/apm-server.yml
sed -i 's/\(password:\) '\''changeme'\''/\1 testpasswd/g' extensions/metricbeat/config/metricbeat.yml
sed -i 's/\(password:\) '\''changeme'\''/\1 testpasswd/g' extensions/filebeat/config/filebeat.yml
# Run Elasticsearch and wait for its availability
docker compose up -d elasticsearch
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp
########################################################## ##########################################################
# # # #
@ -100,6 +78,7 @@ jobs:
if: always() if: always()
run: | run: |
docker compose ps docker compose ps
docker compose logs setup
docker compose logs elasticsearch docker compose logs elasticsearch
docker compose logs logstash docker compose logs logstash
docker compose logs kibana docker compose logs kibana
@ -162,6 +141,8 @@ jobs:
# Run Enterprise Search and execute tests # Run Enterprise Search and execute tests
sed -i 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml
docker compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up -d enterprise-search docker compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up -d enterprise-search
.github/workflows/scripts/run-tests-enterprise-search.sh .github/workflows/scripts/run-tests-enterprise-search.sh
@ -246,85 +227,3 @@ jobs:
-f extensions/metricbeat/metricbeat-compose.yml -f extensions/metricbeat/metricbeat-compose.yml
-f extensions/filebeat/filebeat-compose.yml -f extensions/filebeat/filebeat-compose.yml
down -v down -v
test-swarm:
name: 'Test suite: Swarm'
runs-on: ubuntu-latest
env:
MODE: swarm
steps:
- uses: actions/checkout@v2
#####################################################
# #
# Install all dependencies required by test suites. #
# #
#####################################################
- name: Prepare environment
run: |
# Install Linux packages
sudo apt install -y expect
# Enable Swarm mode
docker swarm init
########################################################
# #
# Ensure §"Initial setup" of the README remains valid. #
# #
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
sed -i 's/\(elasticsearch.password:\) '\''changeme'\''/\1 testpasswd/g' kibana/config/kibana.yml
# Run Elasticsearch and wait for its availability
docker stack deploy -c ./docker-stack.yml elk
docker service scale elk_logstash=0 elk_kibana=0
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" -u 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp swarm
##########################################################
# #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
##########################################################
- name: Run the stack
run: docker service scale elk_logstash=1 elk_kibana=1
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh swarm
- name: 'debug: Display state and logs (core)'
if: always()
run: |
docker stack services elk
docker service logs elk_elasticsearch
docker service logs elk_kibana
docker service logs elk_logstash
##############
# #
# Tear down. #
# #
##############
- name: Terminate all components
if: always()
run: docker stack rm elk

View File

@ -1,38 +0,0 @@
#!/usr/bin/expect -f
# List of expected users with dummy password
set users {"elastic" "kibana_system" "logstash_system" "beats_system" "apm_system" "remote_monitoring_user"}
set password "testpasswd"
# Find elasticsearch container id
set MODE [lindex $argv 0]
if { [string match "swarm" $MODE] } {
set cid [exec docker ps -q -f label=com.docker.swarm.service.name=elk_elasticsearch]
} else {
set cid [exec docker ps -q -f label=com.docker.compose.service=elasticsearch]
}
foreach user $users {
set cmd "docker exec -it $cid bin/elasticsearch-reset-password --batch --user $user -i"
spawn {*}$cmd
expect {
-re "(E|Re-e)nter password for \\\[$user\\\]: " {
send "$password\r"
exp_continue
}
timeout {
puts "\ntimed out waiting for input"
exit 4
}
eof
}
lassign [wait] pid spawnid os_error_flag value
if {$value != 0} {
if {$os_error_flag == 0} { puts "exit status: $value" } else { puts "errno: $value" }
exit $value
}
}

View File

@ -14,12 +14,7 @@ function err {
function container_id { function container_id {
local svc=$1 local svc=$1
local label local label="com.docker.compose.service=${svc}"
if [[ "${MODE:-}" == "swarm" ]]; then
label="com.docker.swarm.service.name=elk_${svc}"
else
label="com.docker.compose.service=${svc}"
fi
local cid local cid
@ -51,26 +46,11 @@ function container_id {
# Return the IP address at which a service can be reached. # Return the IP address at which a service can be reached.
# In Compose mode, returns the container's IP. # In Compose mode, returns the container's IP.
# In Swarm mode, returns the IP of the node to ensure traffic enters the routing mesh (ingress).
function service_ip { function service_ip {
local svc=$1 local svc=$1
local ip local ip
if [[ "${MODE:-}" == "swarm" ]]; then
#ingress_net="$(docker network inspect ingress --format '{{ .Id }}')"
#ip="$(docker service inspect elk_"$svc" --format "{{ range .Endpoint.VirtualIPs }}{{ if eq .NetworkID \"${ingress_net}\" }}{{ .Addr }}{{ end }}{{ end }}" | cut -d/ -f1)"
node="$(docker node ls --format '{{ .ID }}')"
ip="$(docker node inspect "$node" --format '{{ .Status.Addr }}')"
if [ -z "${ip:-}" ]; then
err "Node ${node} has no IP address"
return 1
fi
echo "$ip"
return
fi
local cid local cid
cid="$(container_id "$svc")" cid="$(container_id "$svc")"

View File

@ -71,7 +71,7 @@ jobs:
# Escape dot characters so sed interprets them as literal dots # Escape dot characters so sed interprets them as literal dots
cur_ver="$(echo $cur_ver | sed 's/\./\\./g')" cur_ver="$(echo $cur_ver | sed 's/\./\\./g')"
for f in .env docker-stack.yml README.md; do for f in .env README.md; do
sed -i "s/${cur_ver}/${new_ver}/g" "$f" sed -i "s/${cur_ver}/${new_ver}/g" "$f"
done done

123
README.md
View File

@ -48,6 +48,7 @@ own_. [sherifabdlnaby/elastdocker][elastdocker] is one example among others of p
* [Windows](#windows) * [Windows](#windows)
* [macOS](#macos) * [macOS](#macos)
1. [Usage](#usage) 1. [Usage](#usage)
* [Bringing up the stack](#bringing-up-the-stack)
* [Initial setup](#initial-setup) * [Initial setup](#initial-setup)
* [Setting up user authentication](#setting-up-user-authentication) * [Setting up user authentication](#setting-up-user-authentication)
* [Injecting data](#injecting-data) * [Injecting data](#injecting-data)
@ -68,7 +69,6 @@ own_. [sherifabdlnaby/elastdocker][elastdocker] is one example among others of p
* [How to enable a remote JMX connection to a service](#how-to-enable-a-remote-jmx-connection-to-a-service) * [How to enable a remote JMX connection to a service](#how-to-enable-a-remote-jmx-connection-to-a-service)
1. [Going further](#going-further) 1. [Going further](#going-further)
* [Plugins and integrations](#plugins-and-integrations) * [Plugins and integrations](#plugins-and-integrations)
* [Swarm mode](#swarm-mode)
## Requirements ## Requirements
@ -112,44 +112,57 @@ instructions from the [documentation][mac-filesharing] to add more locations.
**:warning: You must rebuild the stack images with `docker-compose build` whenever you switch branch or update the **:warning: You must rebuild the stack images with `docker-compose build` whenever you switch branch or update the
[version](#version-selection) of an already existing stack.** [version](#version-selection) of an already existing stack.**
### Initial setup ### Bringing up the stack
Clone this repository onto the Docker host that will run the stack, then start the Elasticsearch service locally using Clone this repository onto the Docker host that will run the stack, then start the stack's services locally using Docker
Docker Compose: Compose:
```console ```console
$ docker-compose up -d elasticsearch $ docker-compose up
``` ```
We will start the rest of the Elastic components _after_ completing the initial setup described in this section. These *:information_source: You can also run all services in the background (detached mode) by appending the `-d` flag to the
steps only need to be performed _once_. above command.*
**:warning: Starting with Elastic v8.0.0, it is no longer possible to run Kibana using the bootstraped privileged Give Kibana about a minute to initialize, then access the Kibana web UI by opening <http://localhost:5601> in a web
`elastic` user. If you are starting the stack for the very first time, you MUST initialize a password for the [built-in browser and use the following (default) credentials to log in:
`kibana_system` user][builtin-users] to be able to start and access Kibana. Please read the section below attentively.**
* user: *elastic*
* password: *changeme*
*:information_source: Upon the initial startup, the `elastic`, `logstash_internal` and `kibana_system` Elasticsearch
users are intialized with the values of the passwords defined in the [`.env`](.env) file (_"changeme"_ by default). The
first one is the [built-in superuser][builtin-users], the other two are used by Kibana and Logstash respectively to
communicate with Elasticsearch. This task is only performed during the _initial_ startup of the stack. To change users'
passwords _after_ they have been initialized, please refer to the instructions in the next section.*
### Initial setup
#### Setting up user authentication #### Setting up user authentication
*:information_source: Refer to [Security settings in Elasticsearch][es-security] to disable authentication.* *:information_source: Refer to [Security settings in Elasticsearch][es-security] to disable authentication.*
The stack is pre-configured with the following **privileged** bootstrap user: **:warning: Starting with Elastic v8.0.0, it is no longer possible to run Kibana using the bootstraped privileged
`elastic` user.**
* user: *elastic* The _"changeme"_ password set by default for all aforementioned users is **unsecure**. For increased security, we will
* password: *changeme* reset the passwords of all aforementioned Elasticsearch users to random secrets.
For increased security, we will reset this bootstrap password, and generate a set of passwords to be used by 1. Reset passwords for default users
unprivileged [built-in users][builtin-users] within components of the Elastic stack.
1. Initialize passwords for built-in users The commands below resets the passwords of the `elastic`, `logstash_internal` and `kibana_system` users. Take note
of them.
The commands below generate random passwords for the `elastic` and `kibana_system` users. Take note of them.
```console ```console
$ docker-compose exec -T elasticsearch bin/elasticsearch-reset-password --batch --user elastic $ docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user elastic
``` ```
```console ```console
$ docker-compose exec -T elasticsearch bin/elasticsearch-reset-password --batch --user kibana_system $ docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user logstash_internal
```
```console
$ docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user kibana_system
``` ```
If the need for it arises (e.g. if you want to [collect monitoring information][ls-monitoring] through Beats and If the need for it arises (e.g. if you want to [collect monitoring information][ls-monitoring] through Beats and
@ -158,48 +171,40 @@ unprivileged [built-in users][builtin-users] within components of the Elastic st
1. Replace usernames and passwords in configuration files 1. Replace usernames and passwords in configuration files
Replace the password of the `kibana_system` user inside the Kibana configuration file (`kibana/config/kibana.yml`) Replace the password of the `elastic` user inside the `.env` file with the password generated in the previous step.
with the password generated in the previous step. Its value isn't used by any core component, but [extensions](#how-to-enable-the-provided-extensions) use it to
connect to Elasticsearch.
Replace the password of the `elastic` user inside the Logstash pipeline file (`logstash/pipeline/logstash.conf`) *:information_source: In case you don't plan on using any of the provided
with the password generated in the previous step. [extensions](#how-to-enable-the-provided-extensions), or prefer to create your own roles and users to authenticate
these services, it is safe to remove the `ELASTIC_PASSWORD` entry from the `.env` file altogether after the stack
has been initialized.*
*:information_source: Do not use the `logstash_system` user inside the Logstash **pipeline** file, it does not have Replace the password of the `logstash_internal` user inside the `.env` file with the password generated in the
sufficient permissions to create indices. Follow the instructions at [Configuring Security in Logstash][ls-security] previous step. Its value is referenced inside the Logstash pipeline file (`logstash/pipeline/logstash.conf`).
to create a user with suitable roles.*
See also the [Configuration](#configuration) section below. Replace the password of the `kibana_system` user inside the `.env` file with the password generated in the previous
step. Its value is referenced inside the Kibana configuration file (`kibana/config/kibana.yml`).
1. Unset the bootstrap password (_optional_) See the [Configuration](#configuration) section below for more information about these configuration files.
Remove the `ELASTIC_PASSWORD` environment variable from the `elasticsearch` service inside the Compose file 1. Restart Logstash and Kibana to re-connect to Elasticsearch using the new passwords
(`docker-compose.yml`). It is only used to initialize the keystore during the initial startup of Elasticsearch, and
is ignored on subsequent runs.
1. Start Kibana and Logstash
```console ```console
$ docker-compose up -d $ docker-compose up -d logstash kibana
``` ```
The `-d` flag runs all services in the background (detached mode). *:information_source: Learn more about the security of the Elastic stack at [Secure the Elastic Stack][sec-cluster].*
On subsequent runs of the Elastic stack, it is sufficient to execute the above command in order to start all
components.
*:information_source: Learn more about the security of the Elastic stack at [Secure the Elastic
Stack][sec-cluster].*
#### Injecting data #### Injecting data
Give Kibana about a minute to initialize, then access the Kibana web UI by opening <http://localhost:5601> in a web Open the Kibana web UI by opening <http://localhost:5601> in a web browser and use the following credentials to log in:
browser and use the following credentials to log in:
* user: *elastic* * user: *elastic*
* password: *\<your generated elastic password>* * password: *\<your generated elastic password>*
Now that the stack is running, you can go ahead and inject some log entries. The shipped Logstash configuration allows Now that the stack is fully configured, you can go ahead and inject some log entries. The shipped Logstash configuration
you to send content via TCP: allows you to send content via TCP:
```console ```console
# Using BSD netcat (Debian, Ubuntu, MacOS system, ...) # Using BSD netcat (Debian, Ubuntu, MacOS system, ...)
@ -228,8 +233,9 @@ $ docker-compose down -v
This repository stays aligned with the latest version of the Elastic stack. The `main` branch tracks the current major This repository stays aligned with the latest version of the Elastic stack. The `main` branch tracks the current major
version (8.x). version (8.x).
To use a different version of the core Elastic components, simply change the version number inside the `.env` file. If To use a different version of the core Elastic components, simply change the version number inside the [`.env`](.env)
you are upgrading an existing stack, please carefully read the note in the next section. file. If you are upgrading an existing stack, remember to rebuild all container images using the `docker-compose build`
command.
**:warning: Always pay attention to the [official upgrade instructions][upgrade] for each individual component before **:warning: Always pay attention to the [official upgrade instructions][upgrade] for each individual component before
performing a stack upgrade.** performing a stack upgrade.**
@ -392,24 +398,6 @@ See the following Wiki pages:
* [External applications](https://github.com/deviantony/docker-elk/wiki/External-applications) * [External applications](https://github.com/deviantony/docker-elk/wiki/External-applications)
* [Popular integrations](https://github.com/deviantony/docker-elk/wiki/Popular-integrations) * [Popular integrations](https://github.com/deviantony/docker-elk/wiki/Popular-integrations)
### Swarm mode
Experimental support for Docker [Swarm mode][swarm-mode] is provided in the form of a `docker-stack.yml` file, which can
be deployed in an existing Swarm cluster using the following command:
```console
$ docker stack deploy -c docker-stack.yml elk
```
If all components get deployed without any error, the following command will show 3 running services:
```console
$ docker stack services elk
```
*:information_source: To scale Elasticsearch in Swarm mode, configure seed hosts with the DNS name `tasks.elasticsearch`
instead of `elasticsearch`.*
[elk-stack]: https://www.elastic.co/what-is/elk-stack [elk-stack]: https://www.elastic.co/what-is/elk-stack
[xpack]: https://www.elastic.co/what-is/open-x-pack [xpack]: https://www.elastic.co/what-is/open-x-pack
[paid-features]: https://www.elastic.co/subscriptions [paid-features]: https://www.elastic.co/subscriptions
@ -429,7 +417,6 @@ instead of `elasticsearch`.*
[mac-filesharing]: https://docs.docker.com/desktop/mac/#file-sharing [mac-filesharing]: https://docs.docker.com/desktop/mac/#file-sharing
[builtin-users]: https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html [builtin-users]: https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html
[ls-security]: https://www.elastic.co/guide/en/logstash/current/ls-security.html
[ls-monitoring]: https://www.elastic.co/guide/en/logstash/current/monitoring-with-metricbeat.html [ls-monitoring]: https://www.elastic.co/guide/en/logstash/current/monitoring-with-metricbeat.html
[sec-cluster]: https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-cluster.html [sec-cluster]: https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-cluster.html
@ -445,5 +432,3 @@ instead of `elasticsearch`.*
[ls-docker]: https://www.elastic.co/guide/en/logstash/current/docker-config.html [ls-docker]: https://www.elastic.co/guide/en/logstash/current/docker-config.html
[upgrade]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html [upgrade]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html
[swarm-mode]: https://docs.docker.com/engine/swarm/

View File

@ -1,11 +1,33 @@
version: '3.2' version: '3.2'
services: services:
# The 'setup' service runs a one-off script which initializes the
# 'logstash_internal' and 'kibana_system' users inside Elasticsearch with the
# values of the passwords defined in the '.env' file.
#
# This task is only performed during the *initial* startup of the stack. On all
# subsequent runs, the service simply returns immediately, without performing
# any modification to existing users.
setup:
build:
context: setup/
args:
ELK_VERSION: ${ELK_VERSION}
volumes:
- setup:/state:Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- elk
elasticsearch: elasticsearch:
build: build:
context: elasticsearch/ context: elasticsearch/
args: args:
ELK_VERSION: $ELK_VERSION ELK_VERSION: ${ELK_VERSION}
volumes: volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,z - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,z
- elasticsearch:/usr/share/elasticsearch/data:z - elasticsearch:/usr/share/elasticsearch/data:z
@ -14,7 +36,10 @@ services:
- "9300:9300" - "9300:9300"
environment: environment:
ES_JAVA_OPTS: -Xmx256m -Xms256m ES_JAVA_OPTS: -Xmx256m -Xms256m
ELASTIC_PASSWORD: 'changeme' # Bootstrap password.
# Used to initialize the keystore during the initial startup of
# Elasticsearch. Ignored on subsequent runs.
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
# Use single node discovery in order to disable production mode and avoid bootstrap checks. # Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node discovery.type: single-node
@ -25,7 +50,7 @@ services:
build: build:
context: logstash/ context: logstash/
args: args:
ELK_VERSION: $ELK_VERSION ELK_VERSION: ${ELK_VERSION}
volumes: volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z - ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
@ -36,6 +61,7 @@ services:
- "9600:9600" - "9600:9600"
environment: environment:
LS_JAVA_OPTS: -Xmx256m -Xms256m LS_JAVA_OPTS: -Xmx256m -Xms256m
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
networks: networks:
- elk - elk
depends_on: depends_on:
@ -45,11 +71,13 @@ services:
build: build:
context: kibana/ context: kibana/
args: args:
ELK_VERSION: $ELK_VERSION ELK_VERSION: ${ELK_VERSION}
volumes: volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
ports: ports:
- "5601:5601" - "5601:5601"
environment:
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks: networks:
- elk - elk
depends_on: depends_on:
@ -60,4 +88,5 @@ networks:
driver: bridge driver: bridge
volumes: volumes:
setup:
elasticsearch: elasticsearch:

View File

@ -1,72 +0,0 @@
version: '3.3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.0.0
ports:
- "9200:9200"
- "9300:9300"
configs:
- source: elastic_config
target: /usr/share/elasticsearch/config/elasticsearch.yml
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
# Force publishing on the 'elk' overlay.
network.publish_host: _eth0_
networks:
- elk
deploy:
mode: replicated
replicas: 1
logstash:
image: docker.elastic.co/logstash/logstash:8.0.0
ports:
- "5044:5044"
- "5000:5000"
- "9600:9600"
configs:
- source: logstash_config
target: /usr/share/logstash/config/logstash.yml
- source: logstash_pipeline
target: /usr/share/logstash/pipeline/logstash.conf
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
deploy:
mode: replicated
replicas: 1
kibana:
image: docker.elastic.co/kibana/kibana:8.0.0
ports:
- "5601:5601"
configs:
- source: kibana_config
target: /usr/share/kibana/config/kibana.yml
networks:
- elk
deploy:
mode: replicated
replicas: 1
configs:
elastic_config:
file: ./elasticsearch/config/elasticsearch.yml
logstash_config:
file: ./logstash/config/logstash.yml
logstash_pipeline:
file: ./logstash/pipeline/logstash.conf
kibana_config:
file: ./kibana/config/kibana.yml
networks:
elk:
driver: overlay

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -5,7 +5,7 @@ services:
build: build:
context: extensions/apm-server/ context: extensions/apm-server/
args: args:
ELK_VERSION: $ELK_VERSION ELK_VERSION: ${ELK_VERSION}
command: command:
# Disable strict permission checking on 'apm-server.yml' configuration file # Disable strict permission checking on 'apm-server.yml' configuration file
# https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html # https://www.elastic.co/guide/en/beats/libbeat/current/config-file-permissions.html
@ -14,6 +14,8 @@ services:
- ./extensions/apm-server/config/apm-server.yml:/usr/share/apm-server/apm-server.yml:ro,Z - ./extensions/apm-server/config/apm-server.yml:/usr/share/apm-server/apm-server.yml:ro,Z
ports: ports:
- '8200:8200' - '8200:8200'
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
networks: networks:
- elk - elk
depends_on: depends_on:

View File

@ -5,4 +5,4 @@ output:
elasticsearch: elasticsearch:
hosts: ['http://elasticsearch:9200'] hosts: ['http://elasticsearch:9200']
username: elastic username: elastic
password: 'changeme' password: ${ELASTIC_PASSWORD}

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -22,7 +22,7 @@ kibana.host: http://localhost:5601
# Elasticsearch URL and credentials # Elasticsearch URL and credentials
elasticsearch.host: http://elasticsearch:9200 elasticsearch.host: http://elasticsearch:9200
elasticsearch.username: elastic elasticsearch.username: elastic
elasticsearch.password: 'changeme' elasticsearch.password: ${ELASTIC_PASSWORD}
# Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes. # Allow Enterprise Search to modify Elasticsearch settings. Used to enable auto-creation of Elasticsearch indexes.
allow_es_settings_modification: true allow_es_settings_modification: true

View File

@ -5,12 +5,13 @@ services:
build: build:
context: extensions/enterprise-search/ context: extensions/enterprise-search/
args: args:
ELK_VERSION: $ELK_VERSION ELK_VERSION: ${ELK_VERSION}
volumes: volumes:
- ./extensions/enterprise-search/config/enterprise-search.yml:/usr/share/enterprise-search/config/enterprise-search.yml:ro,Z - ./extensions/enterprise-search/config/enterprise-search.yml:/usr/share/enterprise-search/config/enterprise-search.yml:ro,Z
environment: environment:
JAVA_OPTS: -Xmx2g -Xms2g JAVA_OPTS: -Xmx2g -Xms2g
ENT_SEARCH_DEFAULT_PASSWORD: 'changeme' ENT_SEARCH_DEFAULT_PASSWORD: 'changeme'
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
ports: ports:
- '3002:3002' - '3002:3002'
networks: networks:

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -20,7 +20,7 @@ processors:
output.elasticsearch: output.elasticsearch:
hosts: ['http://elasticsearch:9200'] hosts: ['http://elasticsearch:9200']
username: elastic username: elastic
password: 'changeme' password: ${ELASTIC_PASSWORD}
## HTTP endpoint for health checking ## HTTP endpoint for health checking
## https://www.elastic.co/guide/en/beats/filebeat/current/http-endpoint.html ## https://www.elastic.co/guide/en/beats/filebeat/current/http-endpoint.html

View File

@ -5,7 +5,7 @@ services:
build: build:
context: extensions/filebeat/ context: extensions/filebeat/
args: args:
ELK_VERSION: $ELK_VERSION ELK_VERSION: ${ELK_VERSION}
# Run as 'root' instead of 'filebeat' (uid 1000) to allow reading # Run as 'root' instead of 'filebeat' (uid 1000) to allow reading
# 'docker.sock' and the host's filesystem. # 'docker.sock' and the host's filesystem.
user: root user: root
@ -26,6 +26,8 @@ services:
source: /var/run/docker.sock source: /var/run/docker.sock
target: /var/run/docker.sock target: /var/run/docker.sock
read_only: true read_only: true
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
networks: networks:
- elk - elk
depends_on: depends_on:

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -34,7 +34,7 @@ processors:
output.elasticsearch: output.elasticsearch:
hosts: ['http://elasticsearch:9200'] hosts: ['http://elasticsearch:9200']
username: elastic username: elastic
password: 'changeme' password: ${ELASTIC_PASSWORD}
## HTTP endpoint for health checking ## HTTP endpoint for health checking
## https://www.elastic.co/guide/en/beats/metricbeat/current/http-endpoint.html ## https://www.elastic.co/guide/en/beats/metricbeat/current/http-endpoint.html

View File

@ -5,7 +5,7 @@ services:
build: build:
context: extensions/metricbeat/ context: extensions/metricbeat/
args: args:
ELK_VERSION: $ELK_VERSION ELK_VERSION: ${ELK_VERSION}
# Run as 'root' instead of 'metricbeat' (uid 1000) to allow reading # Run as 'root' instead of 'metricbeat' (uid 1000) to allow reading
# 'docker.sock' and the host's filesystem. # 'docker.sock' and the host's filesystem.
user: root user: root
@ -37,6 +37,8 @@ services:
source: /var/run/docker.sock source: /var/run/docker.sock
target: /var/run/docker.sock target: /var/run/docker.sock
read_only: true read_only: true
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
networks: networks:
- elk - elk
depends_on: depends_on:

6
kibana/.dockerignore Normal file
View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -10,4 +10,4 @@ monitoring.ui.container.elasticsearch.enabled: true
## X-Pack security credentials ## X-Pack security credentials
# #
elasticsearch.username: kibana_system elasticsearch.username: kibana_system
elasticsearch.password: 'changeme' elasticsearch.password: ${KIBANA_SYSTEM_PASSWORD}

6
logstash/.dockerignore Normal file
View File

@ -0,0 +1,6 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store

View File

@ -13,7 +13,7 @@ input {
output { output {
elasticsearch { elasticsearch {
hosts => "elasticsearch:9200" hosts => "elasticsearch:9200"
user => "elastic" user => "logstash_internal"
password => "changeme" password => "${LOGSTASH_INTERNAL_PASSWORD}"
} }
} }

12
setup/.dockerignore Normal file
View File

@ -0,0 +1,12 @@
# Ignore Docker build files
Dockerfile
.dockerignore
# Ignore OS artifacts
**/.DS_Store
# Ignore Git files
.gitignore
# Ignore setup state
state/

1
setup/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/state/

11
setup/Dockerfile Normal file
View File

@ -0,0 +1,11 @@
ARG ELK_VERSION
# https://www.docker.elastic.co/
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION}
USER root
RUN mkdir /state && chown elasticsearch /state
USER elasticsearch:root
COPY . /
ENTRYPOINT ["/entrypoint.sh"]

85
setup/entrypoint.sh Executable file
View File

@ -0,0 +1,85 @@
#!/usr/bin/env bash
set -eu
set -o pipefail
source "$(dirname "${BASH_SOURCE[0]}")/helpers.sh"
# --------------------------------------------------------
# Users declarations
declare -A users_passwords
users_passwords=(
[logstash_internal]="${LOGSTASH_INTERNAL_PASSWORD:-}"
[kibana_system]="${KIBANA_SYSTEM_PASSWORD:-}"
)
declare -A users_roles
users_roles=(
[logstash_internal]='logstash_writer'
)
# --------------------------------------------------------
# Roles declarations
declare -A roles_files
roles_files=(
[logstash_writer]='logstash_writer.json'
)
# --------------------------------------------------------
echo "-------- $(date) --------"
state_file="$(dirname ${BASH_SOURCE[0]})/state/.done"
if [[ -e "$state_file" ]]; then
log "State file exists at '${state_file}', skipping setup"
exit 0
fi
log 'Waiting for availability of Elasticsearch'
wait_for_elasticsearch
sublog 'Elasticsearch is running'
for role in "${!roles_files[@]}"; do
log "Role '$role'"
declare body_file
body_file="$(dirname "${BASH_SOURCE[0]}")/roles/${roles_files[$role]:-}"
if [[ ! -f "${body_file:-}" ]]; then
sublog "No role body found at '${body_file}', skipping"
continue
fi
sublog 'Creating/updating'
ensure_role "$role" "$(<"${body_file}")"
done
for user in "${!users_passwords[@]}"; do
log "User '$user'"
if [[ -z "${users_passwords[$user]:-}" ]]; then
sublog 'No password defined, skipping'
continue
fi
declare -i user_exists=0
user_exists="$(check_user_exists "$user")"
if ((user_exists)); then
sublog 'User exists, setting password'
set_user_password "$user" "${users_passwords[$user]}"
else
if [[ -z "${users_roles[$user]:-}" ]]; then
err ' No role defined, skipping creation'
continue
fi
sublog 'User does not exist, creating'
create_user "$user" "${users_passwords[$user]}" "${users_roles[$user]}"
fi
done
mkdir -p "$(dirname "${state_file}")"
touch "$state_file"

182
setup/helpers.sh Executable file
View File

@ -0,0 +1,182 @@
#!/usr/bin/env bash
# Log a message.
function log {
echo "[+] $1"
}
# Log a message at a sub-level.
function sublog {
echo "$1"
}
# Log an error.
function err {
echo "[x] $1" >&2
}
# Poll the 'elasticsearch' service until it responds with HTTP code 200.
function wait_for_elasticsearch {
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}' "http://${elasticsearch_host}:9200/" )
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
# retry for max 300s (60*5s)
for _ in $(seq 1 60); do
output="$(curl "${args[@]}" || true)"
if [[ "${output: -3}" -eq 200 ]]; then
result=0
break
fi
sleep 5
done
if ((result)); then
echo -e "\n${output::-3}"
fi
return $result
}
# Verify that the given Elasticsearch user exists.
function check_user_exists {
local username=$1
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/user/${username}"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local -i exists=0
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 || "${output: -3}" -eq 404 ]]; then
result=0
fi
if [[ "${output: -3}" -eq 200 ]]; then
exists=1
fi
if ((result)); then
echo -e "\n${output::-3}"
else
echo "$exists"
fi
return $result
}
# Set password of a given Elasticsearch user.
function set_user_password {
local username=$1
local password=$2
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/user/${username}/_password"
'-X' 'POST'
'-H' 'Content-Type: application/json'
'-d' "{\"password\" : \"${password}\"}"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 ]]; then
result=0
fi
if ((result)); then
echo -e "\n${output::-3}\n"
fi
return $result
}
# Create the given Elasticsearch user.
function create_user {
local username=$1
local password=$2
local role=$3
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/user/${username}"
'-X' 'POST'
'-H' 'Content-Type: application/json'
'-d' "{\"password\":\"${password}\",\"roles\":[\"${role}\"]}"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 ]]; then
result=0
fi
if ((result)); then
echo -e "\n${output::-3}\n"
fi
return $result
}
# Ensure that the given Elasticsearch role is up-to-date, create it if required.
function ensure_role {
local name=$1
local body=$2
local elasticsearch_host="${ELASTICSEARCH_HOST:-elasticsearch}"
local -a args=( '-s' '-D-' '-m15' '-w' '%{http_code}'
"http://${elasticsearch_host}:9200/_security/role/${name}"
'-X' 'POST'
'-H' 'Content-Type: application/json'
'-d' "$body"
)
if [[ -n "${ELASTIC_PASSWORD:-}" ]]; then
args+=( '-u' "elastic:${ELASTIC_PASSWORD}" )
fi
local -i result=1
local output
output="$(curl "${args[@]}")"
if [[ "${output: -3}" -eq 200 ]]; then
result=0
fi
if ((result)); then
echo -e "\n${output::-3}\n"
fi
return $result
}

View File

@ -0,0 +1,22 @@
{
"cluster": [
"manage_index_templates",
"monitor",
"manage_ilm"
],
"indices": [
{
"names": [
"logs-generic-default",
"logstash-*"
],
"privileges": [
"write",
"create",
"create_index",
"manage",
"manage_ilm"
]
}
]
}