[DOCS] Update Docker Compose installation with simplified configuration (#81835)

* [DOCS] Update Docker Compose installation with simplified configuration

* Change heading to fix link issue
This commit is contained in:
Adam Locke 2021-12-16 19:42:41 -05:00 committed by GitHub
parent 350fe2d21f
commit bf60ce68c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 283 additions and 254 deletions

View File

@ -1,4 +1,29 @@
COMPOSE_PROJECT_NAME=es
CERTS_DIR=/usr/share/elasticsearch/config/certificates
ELASTIC_PASSWORD=<password>
VERSION={version}
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=
# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=
# Version of Elastic products
STACK_VERSION={version}
# Set the cluster name
CLUSTER_NAME=docker-cluster
# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial
# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
#ES_PORT=127.0.0.1:9200
# Port to expose Kibana to the host
KIBANA_PORT=5601
#KIBANA_PORT=80
# Increase or decrease based on the available host memory (in bytes)
MEM_LIMIT=1073741824
# Project namespace (defaults to the current folder name if not set)
#COMPOSE_PROJECT_NAME=myproject

View File

@ -1,19 +0,0 @@
version: "2.2"
services:
create_certs:
container_name: create_certs
image: docker.elastic.co/elasticsearch/elasticsearch:{version}
command: >
bash -c '
if [[ ! -f /certs/bundle.zip ]]; then
bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip;
unzip /certs/bundle.zip -d /certs;
fi;
chown -R 1000:0 /certs
'
user: "0"
working_dir: /usr/share/elasticsearch
volumes: ["certs:/certs", ".:/usr/share/elasticsearch/config/certificates"]
volumes: { "certs" }

View File

@ -1,81 +1,230 @@
version: "2.2"
services:
setup:
image: docker.elastic.co/elasticsearch/elasticsearch:{version}
volumes:
- certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es02\n"\
" dns:\n"\
" - es02\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es03\n"\
" dns:\n"\
" - es03\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
container_name: es01
depends_on:
setup:
condition: service_healthy
image: {docker-repo}:{version}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata01:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
environment:
- node.name=es01
- discovery.seed_hosts=es02,es03
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es01/es01.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key
volumes: ["data01:/usr/share/elasticsearch/data", "certs:$CERTS_DIR"]
ports:
- 9200:9200
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
interval: 30s
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 5
retries: 120
es02:
container_name: es02
depends_on:
- es01
image: {docker-repo}:{version}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata02:/usr/share/elasticsearch/data
environment:
- node.name=es02
- discovery.seed_hosts=es01,es03
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.license.self_generated.type=trial
- discovery.seed_hosts=es01,es03
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es02/es02.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.http.ssl.key=certs/es02/es02.key
- xpack.security.http.ssl.certificate=certs/es02/es02.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es02/es02.key
- xpack.security.transport.ssl.certificate=certs/es02/es02.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key
volumes: ["data02:/usr/share/elasticsearch/data", "certs:$CERTS_DIR"]
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
es03:
container_name: es03
depends_on:
- es02
image: {docker-repo}:{version}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata03:/usr/share/elasticsearch/data
environment:
- node.name=es03
- discovery.seed_hosts=es01,es02
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.license.self_generated.type=trial
- discovery.seed_hosts=es01,es02
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es03/es03.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.http.ssl.key=certs/es03/es03.key
- xpack.security.http.ssl.certificate=certs/es03/es03.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es03/es03.key
- xpack.security.transport.ssl.certificate=certs/es03/es03.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es03/es03.key
volumes: ["data03:/usr/share/elasticsearch/data", "certs:$CERTS_DIR"]
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
wait_until_ready:
image: {docker-repo}:{version}
command: /usr/bin/true
depends_on: { "es01": { "condition": "service_healthy" } }
kibana:
depends_on:
es01:
condition: service_healthy
es02:
condition: service_healthy
es03:
condition: service_healthy
image: docker.elastic.co/kibana/kibana:{version}
volumes:
- certs:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: ${MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
volumes: { "data01", "data02", "data03", "certs" }
volumes:
certs:
driver: local
esdata01:
driver: local
esdata02:
driver: local
esdata03:
driver: local
kibanadata:
driver: local

View File

@ -204,205 +204,105 @@ the nodes in your cluster.
[[docker-compose-file]]
==== Start a multi-node cluster with Docker Compose
When defining multiple nodes in a `docker-compose.yml` file, you'll need to
explicitly enable and configure security so that {es} doesn't try to generate a
password for the `elastic` user on every node.
To get a multi-node {es} cluster and {kib} up and running in Docker with
security enabled, you can use Docker Compose.
This configuration provides a simple method of starting a secured cluster that
you can use for development before building a distributed deployment with
multiple hosts.
===== Prerequisites
Install the appropriate https://docs.docker.com/get-docker/[Docker application]
for your operating system.
If you're running on Linux, install https://docs.docker.com/compose/install/[Docker Compose].
[NOTE]
====
Make sure that Docker is allotted at least 4GB of memory. In Docker Desktop,
you configure resource usage on the Advanced tab in Preferences (macOS) or
Settings (Windows).
====
===== Prepare the environment
The following example uses Docker Compose to start a three-node {es} cluster.
Create each of the following files inside of a new directory. Copy and paste the
contents of each example into the appropriate file as described in the
following sections:
* <<docker-instances-yml,`instances.yml`>>
* <<docker-env,`.env`>>
* <<docker-create-certs,`create-certs.yml`>>
* <<docker-docker-compose,`docker-compose.yml`>>
[[docker-instances-yml]]
[discrete]
===== `instances.yml`
When you run the example, {es} uses this file to create a three-node cluster.
The nodes are named `es01`, `es02`,and `es03`.
Create the following configuration files in a new, empty directory. These files
are also available from the
https://github.com/elastic/elasticsearch/tree/master/docs/reference/setup/install[elasticsearch]
repository on GitHub.
--
ifeval::["{release-state}"=="unreleased"]
--
WARNING: Version {version} of {es} has not yet been released, so a
`docker-compose.yml` is not available for this version.
NOTE: Version {version} of {es} has not been released,
so the sample Docker Compose and configuration files are not yet available for
this version. See the {stack-gs-current}/get-started-docker.html[current version]
for the latest sample files.
endif::[]
--
--
ifeval::["{release-state}"!="unreleased"]
[source,yaml,subs="attributes"]
----
include::instances.yml[]
----
endif::[]
--
[[docker-env]]
[discrete]
[[docker-env-file]]
===== `.env`
The `.env` file sets environment variables that are used when you run the
example. Ensure that you specify a strong password for the `elastic` user with
the `ELASTIC_PASSWORD` variable. This variable is referenced by the
`docker-compose.yml` file.
`docker-compose.yml` configuration file. Ensure that you specify a strong
password for the `elastic` and `kibana_system` users with the
`ELASTIC_PASSWORD` and `KIBANA_PASSWORD` variables. These variable are
referenced by the `docker-compose.yml` file.
ifeval::["{release-state}"=="unreleased"]
--
WARNING: Version {version} of {es} has not yet been released, so a
`docker-compose.yml` is not available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
[source,yaml,subs="attributes"]
["source","txt",subs="attributes"]
----
include::.env[]
----
endif::[]
--
`COMPOSE_PROJECT_NAME`:: Adds an `es_` prefix for all volumes and networks
created by `docker-compose`.
`CERTS_DIR`:: Specifies the path inside the Docker image where {es} expects the
security certificates.
`ELASTIC_PASSWORD`:: Sets the initial password for the `elastic` user.
[discrete]
[[docker-create-certs]]
===== `create-certs.yml`
The `create-certs.yml` file includes a script that generates node certificates
and a certificate authority (CA) certificate and key where {es} expects them.
These certificates and key are placed in a Docker volume named `es_certs`.
ifeval::["{release-state}"=="unreleased"]
--
WARNING: Version {version} of {es} has not yet been released, so a
`docker-compose.yml` is not available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
[source,yaml,subs="attributes"]
----
include::create-certs.yml[]
----
endif::[]
--
[[docker-docker-compose]]
[discrete]
[[docker-file]]
===== `docker-compose.yml`
The `docker-compose.yml` file defines configuration settings for each of your
{es} nodes.
This `docker-compose.yml` file creates a three-node secure {es} cluster with authentication and network encryption enabled, and a {kib} instance securely connected to it.
NOTE: This sample `docker-compose.yml` file uses the `ES_JAVA_OPTS`
environment variable to manually set the heap size to 512MB. We do not recommend
using `ES_JAVA_OPTS` in production.
See <<docker-set-heap-size,manually set the heap size>>.
.Exposing ports
****
This configuration exposes port `9200` on all network interfaces. Because
of how Docker handles ports, a port that isn't bound to `localhost` leaves your
{es} cluster publicly accessible, potentially ignoring any firewall settings.
If you don't want to expose port `9200` to external hosts, set the value for
`ES_PORT` in the `.env` file to something like `127.0.0.1:9200`. {es} will
then only be accessible from the host machine itself.
****
This configuration exposes port `9200` on all network interfaces. Given how
Docker manipulates `iptables` on Linux, this means that your {es} cluster is
publicly accessible, potentially ignoring any firewall settings. If you don't
want to expose port `9200` and instead use a reverse proxy, replace `9200:9200`
with `127.0.0.1:9200:9200` in the `docker-compose.yml` file. {es} will then only
be accessible from the host machine itself.
ifeval::["{release-state}"=="unreleased"]
--
WARNING: Version {version} of {es} has not yet been released, so a
`docker-compose.yml` is not available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
[source,yaml,subs="attributes"]
----
include::docker-compose.yml[]
----
endif::[]
--
===== Start your cluster with security enabled and configured
This sample Docker Compose file starts a three-node {es} cluster.
The https://docs.docker.com/storage/volumes[Docker named volumes]
`data01`, `data02`, and `data03` store the node data directories so that the
data persists across restarts. If they don't already exist, running
`docker-compose` creates these volumes.
[[docker-generate-certificates]]
. Generate the certificates. You only need to run this command one time:
. Modify the `.env` file and enter strong password values for both the
`ELASTIC_PASSWORD` and `KIBANA_PASSWORD` variables.
+
["source","sh"]
----
docker-compose -f create-certs.yml run --rm create_certs
----
NOTE: You must use the `ELASTIC_PASSWORD` value for further interactions with
the cluster. The `KIBANA_PASSWORD` value is only used internally when
configuring {kib}.
. Start your {es} nodes with TLS configured on the transport layer:
. Create and start the three-node {es} cluster and {kib} instance:
+
["source","sh"]
----
docker-compose up -d
----
+
Node `es01` listens on `localhost:9200` and `es02` and `es03` talk to `es01`
over a Docker network.
. Access the {es} API over TLS using the bootstrapped password for the `elastic`
user that you specified in the `.env` file:
+
["source","sh",subs="attributes"]
----
docker run --rm -v es_certs:/certs --network=es_default {docker-image} curl --cacert /certs/ca/ca.crt -u elastic:<password> https://es01:9200
----
// NOTCONSOLE
+
--
`es_certs`:: The name of the volume that the script in `create-certs.yml`
creates to hold your certificates.
. When the deployment has started, open a browser and navigate to http://localhost:5601[http://localhost:5601] to
access {kib}, where you can load sample data and interact with your cluster.
`<password>`:: The password for the `elastic` user, defined by the
`ELASTIC_PASSWORD` variable in the `.env` file.
--
. Submit a `_cat/nodes` request to check that the nodes are up and running:
+
[source,sh]
----
curl -X GET "https://localhost:9200/_cat/nodes?v=true&pretty"
----
// NOTCONSOLE
Log messages go to the console and are handled by the configured Docker logging
driver. By default, you can access logs with `docker logs`. If you prefer that
the {es} container write logs to disk, set the `ES_LOG_STYLE` environment
variable to `file`. This causes {es} to use the same logging configuration as
other {es} distribution formats.
If you need to generate a new password for the `elastic` user or any of the
built-in users, use the `elasticsearch-reset-password` tool:
WARNING: Windows users not running PowerShell must remove all backslashes (`\`)
and join lines in the following command.
["source","sh"]
----
docker exec es01 /bin/bash -c "bin/elasticsearch-reset-password \
auto --batch \
--url https://localhost:9200"
----
===== Stop the cluster
===== Stop and remove the deployment
To stop the cluster, run `docker-compose down`. The data in the Docker volumes
is preserved and loaded when you restart the cluster with `docker-compose up`.
@ -413,19 +313,14 @@ docker-compose down
----
--
To **delete the data volumes** when you stop the cluster, specify the `-v`
option:
To **delete** the network, containers, and volumes when you stop the cluster,
specify the `-v` option:
["source","sh"]
----
docker-compose down -v
----
WARNING: Deleting data volumes will remove the generated security certificates
for your nodes. You will need to run `docker-compose` and
<<docker-generate-certificates,regenerate the security certificates>> before
starting your cluster.
===== Next steps
You now have a test {es} environment set up. Before you start

View File

@ -1,21 +0,0 @@
instances:
- name: es01
dns:
- es01
- localhost
ip:
- 127.0.0.1
- name: es02
dns:
- es02
- localhost
ip:
- 127.0.0.1
- name: es03
dns:
- es03
- localhost
ip:
- 127.0.0.1