Compare commits

..

1 Commits

Author SHA1 Message Date
Geoff Bourne
205c57c3e3 [es] upgrade 2.4.x to 2.4.3 2016-12-28 13:38:44 -06:00
14 changed files with 123 additions and 326 deletions

View File

@@ -2,9 +2,9 @@ FROM java:openjdk-8u72-jdk
MAINTAINER itzg
ENV CASSANDRA_VERSION 2.2.8
ENV CASSANDRA_VERSION 2.2.7
ADD http://apache.mirrors.pair.com/cassandra/$CASSANDRA_VERSION/apache-cassandra-$CASSANDRA_VERSION-bin.tar.gz /tmp/apache-cassandra.tgz
RUN wget -qO /tmp/apache-cassandra.tgz http://mirrors.ibiblio.org/apache/cassandra/$CASSANDRA_VERSION/apache-cassandra-$CASSANDRA_VERSION-bin.tar.gz
RUN tar -C /opt -zxf /tmp/apache-cassandra.tgz && \
rm /tmp/apache-cassandra.tgz

View File

@@ -1,21 +1,21 @@
FROM openjdk:8u111-jre-alpine
FROM java:8u111-jre-alpine
MAINTAINER itzg
RUN apk -U add bash
ENV ES_VERSION=2.4.3
ENV ES_VERSION=5.2.0
ADD https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$ES_VERSION.tar.gz /tmp/es.tgz
ADD https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/$ES_VERSION/elasticsearch-$ES_VERSION.tar.gz /tmp/es.tgz
RUN cd /usr/share && \
tar xf /tmp/es.tgz && \
rm /tmp/es.tgz
COPY start /start
EXPOSE 9200 9300
ENV ES_HOME=/usr/share/elasticsearch-$ES_VERSION \
DEFAULT_ES_USER=elasticsearch \
ES_JAVA_OPTS="-Xms1g -Xmx1g"
OPTS=-Dnetwork.host=_non_loopback_ \
DEFAULT_ES_USER=elasticsearch
RUN adduser -S -s /bin/sh $DEFAULT_ES_USER
@@ -23,8 +23,4 @@ VOLUME ["/data","/conf"]
WORKDIR $ES_HOME
COPY java.policy /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/
COPY start /start
COPY log4j2.properties $ES_HOME/config/
CMD ["/start"]

View File

@@ -1,10 +1,5 @@
This Docker image provides an easily configurable Elasticsearch node. Via port mappings, it is easy to create an arbitrarily sized cluster of nodes. As long as the versions match, you can mix-and-match "real" Elasticsearch nodes with container-ized ones.
# NOTE for use on Linux hosts
Elasticsearch 5.x requires that the virtual memory mmap count is set sufficiently for stable,
production use. [Refer to this guide for more information](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html).
# Basic Usage
To start an Elasticsearch data node that listens on the standard ports on your host's network interface:
@@ -34,9 +29,9 @@ Where `DOCKERHOST` would be the actual hostname of your host running Docker.
To run a multi-node cluster (3-node in this example) on a single Docker machine use:
docker run -d --name es0 -p 9200:9200 itzg/elasticsearch
docker run -d --name es1 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
docker run -d --name es2 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
docker run -d --name es0 -p 9200:9200 es
docker run -d --name es1 --link es0 -e UNICAST_HOSTS=es0 es
docker run -d --name es2 --link es0 -e UNICAST_HOSTS=es0 es
and then check the cluster health, such as http://192.168.99.100:9200/_cluster/health?pretty
@@ -54,24 +49,6 @@ and then check the cluster health, such as http://192.168.99.100:9200/_cluster/h
"unassigned_shards" : 0
}
If you have a Docker Swarm cluster already initialized you can download this
[docker-compose.yml](https://raw.githubusercontent.com/itzg/dockerfiles/master/elasticsearch/docker-compose.yml)
and deploy a cluster using:
docker stack deploy -c docker-compose.yml es
With a `docker service ls` you can confirm 1 master, 2 data, and 1 gateway nodes are running:
```
ID NAME MODE REPLICAS IMAGE
9nwnno8hbqgk es_kibana replicated 1/1 kibana:latest
f5x7nipwmvkr es_gateway replicated 1/1 es
om8rly2yxylw es_data replicated 2/2 es
tdvfilj370yn es_master replicated 1/1 es
```
As you can see, there is also a Kibana instance included and available at port 5601.
# Configuration Summary
## Ports
@@ -205,28 +182,18 @@ Using the Docker Compose file above, a value of `2` is appropriate when scaling
docker-compose scale master=3
## Multiple Network Binding, such as Swarm Mode
## Auto transport/http discovery with Swarm Mode
When using Docker Swarm mode the container is presented with multiple ethernet
devices. By default, all global, routable IP addresses are configured for
Elasticsearch to use as `network.host`.
When using Docker Swarm mode (starting with 1.12), the overlay and ingress network interfaces are assigned
multiple IP addresses. As a result, it creates confusion for the transport publish logic even when using
the special value `_eth0_`.
That discovery can be overridden by providing a specific ethernet device name
to `DISCOVER_TRANSPORT_IP` and/or `DISCOVER_HTTP_IP`, such as
To resolve this, add
-e DISCOVER_TRANSPORT_IP=eth0
-e DISCOVER_HTTP_IP=eth2
## Heap size and other JVM options
replacing `eth0` with another interface within the container, if needed.
By default this image will run Elasticsearch with a Java heap size of 1 GB. If that value
or any other JVM options need to be adjusted, then replace the `ES_JAVA_OPTS`
environment variable.
The same can be done for publish/binding of the http module by adding:
For example, this would allow for the use of 16 GB of heap:
-e ES_JAVA_OPTS="-Xms16g -Xmx16g"
Refer to [this page](https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html)
for more information about why both the minimum and maximum sizes were set to
the same value.
-e DISCOVERY_HTTP_IP=eth2

View File

@@ -1,37 +0,0 @@
version: '3'
services:
master:
image: itzg/elasticsearch
environment:
TYPE: MASTER
UNICAST_HOSTS: master
MIN_MASTERS: 1
deploy:
replicas: 1
update_config:
parallelism: 1
data:
image: itzg/elasticsearch
environment:
TYPE: DATA
UNICAST_HOSTS: master
deploy:
replicas: 2
update_config:
parallelism: 1
delay: 60s
gateway:
image: itzg/elasticsearch
ports:
- "9200:9200"
- "9300:9300"
environment:
TYPE: GATEWAY
UNICAST_HOSTS: master
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://gateway:9200

View File

@@ -1,6 +0,0 @@
grant {
// JMX Java Management eXtensions
permission javax.management.MBeanTrustPermission "register";
permission javax.management.MBeanServerPermission "createMBeanServer";
permission javax.management.MBeanPermission "-#-[-]", "queryNames";
};

View File

@@ -1,74 +0,0 @@
status = error
# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
#rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
#logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1
appender.index_search_slowlog_rolling.policies.time.modulate = true
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = console
logger.index_search_slowlog_rolling.additivity = false
appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = console
logger.index_indexing_slowlog.additivity = false

View File

@@ -1,103 +1,5 @@
#!/bin/sh
pre_checks() {
mmc=$(sysctl vm.max_map_count|sed 's/.*= //')
if [[ $mmc -lt 262144 ]]; then
echo "
ERROR: As of 5.0.0 Elasticsearch requires increasing mmap counts.
Refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
"
exit 1
fi
}
discoverIpFromLink() {
dev=$1
mode=$2
ip=`ipaddr show dev $dev scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'`
echo "Discovered $mode address $ip for $dev"
OPTS="$OPTS -E $mode.host=$ip"
}
discoverAllGlobalIps() {
ips=`ipaddr show scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; addrs[length(addrs)] = $2 } } END { for (i in addrs) { if (i>0) printf "," ; printf addrs[i] } }'`
OPTS="$OPTS -E network.host=$ips"
}
setup_clustering() {
if [ -n "$CLUSTER" ]; then
OPTS="$OPTS -E cluster.name=$CLUSTER"
if [ -n "$CLUSTER_FROM" ]; then
if [ -d /data/$CLUSTER_FROM -a ! -d /data/$CLUSTER ]; then
echo "Performing cluster data migration from $CLUSTER_FROM to $CLUSTER"
mv /data/$CLUSTER_FROM /data/$CLUSTER
fi
fi
fi
if [ -n "$NODE_NAME" ]; then
OPTS="$OPTS -E node.name=$NODE_NAME"
fi
if [ -n "$MULTICAST" ]; then
OPTS="$OPTS -E discovery.zen.ping.multicast.enabled=$MULTICAST"
fi
if [ -n "$UNICAST_HOSTS" ]; then
OPTS="$OPTS -E discovery.zen.ping.unicast.hosts=$UNICAST_HOSTS"
fi
if [ -n "$PUBLISH_AS" ]; then
OPTS="$OPTS -E transport.publish_host=$(echo $PUBLISH_AS | awk -F: '{print $1}')"
OPTS="$OPTS -E transport.publish_port=$(echo $PUBLISH_AS | awk -F: '{if ($2) print $2; else print 9300}')"
fi
if [ -n "$MIN_MASTERS" ]; then
OPTS="$OPTS -E discovery.zen.minimum_master_nodes=$MIN_MASTERS"
fi
}
install_plugins() {
if [ -n "$PLUGINS" ]; then
for p in $(echo $PLUGINS | awk -v RS=, '{print}')
do
echo "Installing the plugin $p"
$ES_HOME/bin/elasticsearch-plugin install $p
done
else
mkdir -p $ES_HOME/plugins
fi
}
setup_personality() {
if [ -n "$TYPE" ]; then
case $TYPE in
MASTER)
OPTS="$OPTS -E node.master=true -E node.data=false"
;;
GATEWAY)
OPTS="$OPTS -E node.master=false -E node.data=false"
;;
DATA|NON_MASTER)
OPTS="$OPTS -E node.master=false -E node.data=true"
;;
*)
echo "Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
exit 1
esac
fi
}
pre_checks
if [ -f /conf/env ]; then
. /conf/env
fi
@@ -106,28 +8,95 @@ if [ ! -e /conf/elasticsearch.* ]; then
cp $ES_HOME/config/elasticsearch.yml /conf
fi
if [ ! -e /conf/log4j2.properties ]; then
cp $ES_HOME/config/log4j2.properties /conf
if [ ! -e /conf/logging.* ]; then
cp $ES_HOME/config/logging.yml /conf
fi
OPTS="$OPTS \
-E path.conf=/conf \
-E path.data=/data \
-E path.logs=/data \
-E transport.tcp.port=9300 \
-E http.port=9200"
OPTS="$OPTS -Des.path.conf=/conf \
-Des.path.data=/data \
-Des.path.logs=/data \
-Des.transport.tcp.port=9300 \
-Des.http.port=9200"
discoverAllGlobalIps
if [ "${DISCOVER_TRANSPORT_IP}" != "" ]; then
discoverIpFromLink() {
dev=$1
mode=$2
ip=`ipaddr show dev $dev scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'`
echo "Discovered $mode address $ip for $dev"
OPTS="$OPTS -Des.$mode.host=$ip"
}
if [ "$DISCOVER_TRANSPORT_IP" != "" ]; then
discoverIpFromLink $DISCOVER_TRANSPORT_IP transport
fi
if [ "${DISCOVER_HTTP_IP}" != "" ]; then
if [ "$DISCOVER_HTTP_IP" != "" ]; then
discoverIpFromLink $DISCOVER_HTTP_IP http
fi
setup_personality
setup_clustering
install_plugins
if [ -n "$CLUSTER" ]; then
OPTS="$OPTS -Des.cluster.name=$CLUSTER"
if [ -n "$CLUSTER_FROM" ]; then
if [ -d /data/$CLUSTER_FROM -a ! -d /data/$CLUSTER ]; then
echo "Performing cluster data migration from $CLUSTER_FROM to $CLUSTER"
mv /data/$CLUSTER_FROM /data/$CLUSTER
fi
fi
fi
if [ -n "$NODE_NAME" ]; then
OPTS="$OPTS -Des.node.name=$NODE_NAME"
fi
if [ -n "$MULTICAST" ]; then
OPTS="$OPTS -Des.discovery.zen.ping.multicast.enabled=$MULTICAST"
fi
if [ -n "$UNICAST_HOSTS" ]; then
OPTS="$OPTS -Des.discovery.zen.ping.unicast.hosts=$UNICAST_HOSTS"
fi
if [ -n "$PUBLISH_AS" ]; then
OPTS="$OPTS -Des.transport.publish_host=$(echo $PUBLISH_AS | awk -F: '{print $1}')"
OPTS="$OPTS -Des.transport.publish_port=$(echo $PUBLISH_AS | awk -F: '{if ($2) print $2; else print 9300}')"
fi
if [ -n "$TYPE" ]; then
case $TYPE in
MASTER)
OPTS="$OPTS --node.master=true --node.data=false"
;;
GATEWAY)
OPTS="$OPTS --node.master=false --node.data=false"
;;
DATA|NON_MASTER)
OPTS="$OPTS --node.master=false --node.data=true"
;;
*)
echo "Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
exit 1
esac
fi
if [ -n "$MIN_MASTERS" ]; then
OPTS="$OPTS --discovery.zen.minimum_master_nodes=$MIN_MASTERS"
fi
mkdir -p /conf/plugins
OPTS="$OPTS --path.plugins=/conf/plugins"
if [ -n "$PLUGINS" ]; then
PLUGIN_OPTS="-Des.path.conf=/conf -Des.path.plugins=/conf/plugins"
for p in $(echo $PLUGINS | awk -v RS=, '{print}')
do
echo "Installing the plugin $p"
$ES_HOME/bin/plugin $PLUGIN_OPTS install $p -t 1m -b
done
else
mkdir -p /conf/plugins
fi
mkdir -p /conf/scripts
@@ -135,7 +104,8 @@ echo "Starting Elasticsearch with the options $OPTS"
CMD="$ES_HOME/bin/elasticsearch $OPTS"
if [ `id -u` = 0 ]; then
echo "Running as non-root..."
chown -R $DEFAULT_ES_USER /data /conf
chown -R $DEFAULT_ES_USER /data
set -x
su -c "$CMD" $DEFAULT_ES_USER
else
$CMD

View File

@@ -1,17 +1,12 @@
FROM java:openjdk-8u102-jdk
FROM java:openjdk-8u72-jdk
MAINTAINER itzg
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
graphviz \
&& apt-get clean
ADD download-and-start.sh /download-and-start
ENV JENKINS_HOME=/data
VOLUME ["/data", "/root", "/opt/jenkins"]
EXPOSE 8080 38252
COPY download-and-start.sh /opt/download-and-start
CMD ["/opt/download-and-start"]
CMD ["/download-and-start"]

View File

@@ -1,14 +1,13 @@
FROM openjdk:8u111-jre
FROM itzg/ubuntu-openjdk-7
MAINTAINER itzg
ENV KIBANA_VERSION 5.1.2
ENV KIBANA_VERSION 4.1.1
ADD https://artifacts.elastic.co/downloads/kibana/kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz /tmp/kibana.tgz
RUN wget -q -O /tmp/kibana.tgz https://download.elasticsearch.org/kibana/kibana/kibana-${KIBANA_VERSION}-linux-x64.tar.gz
RUN tar -C /opt -xzf /tmp/kibana.tgz && rm /tmp/kibana.tgz
ENV KIBANA_HOME /opt/kibana-$KIBANA_VERSION-linux-x86_64
ENV KIBANA_HOME /opt/kibana-$KIBANA_VERSION-linux-x64
# Simplify for cross-container
ENV ES_URL http://es:9200

View File

@@ -1,12 +0,0 @@
version: '2'
services:
es:
build: ../elasticsearch
ports:
- "9200:9200"
kibana:
build: .
ports:
- "5601:5601"

View File

@@ -1,5 +1,6 @@
#!/bin/sh
#!/bin/bash
OPTS="-e $ES_URL -H $HOSTNAME"
OPTS="-e $ES_URL"
exec bin/kibana $OPTS

View File

@@ -1,4 +1,4 @@
FROM openjdk:8-jre
FROM java:8
MAINTAINER itzg

View File

@@ -377,20 +377,18 @@ Enables command blocks
### Force Gamemode
Force players to join in the default game mode.
- false - Players will join in the gamemode they left in.
- true - Players will always join in the default gamemode.
* false - Players will join in the gamemode they left in.
* true - Players will always join in the default gamemode.
`docker run -d -e FORCE_GAMEMODE=false`
docker run -d -e FORCE_GAMEMODE=false
### Generate Structures
Defines whether structures (such as villages) will be generated.
- false - Structures will not be generated in new chunks.
- true - Structures will be generated in new chunks.
* false - Structures will not be generated in new chunks.
* true - Structures will be generated in new chunks.
`docker run -d -e GENERATE_STRUCTURES=true`
docker run -d -e GENERATE_STRUCTURES=true
### Hardcore

View File

@@ -197,7 +197,7 @@ case "$TYPE" in
# normalize on Spigot for operations below
TYPE=SPIGOT
;;
FORGE|forge)
TYPE=FORGE
installForge
@@ -362,7 +362,7 @@ if [ ! -e server.properties ]; then
case ${MODE,,?} in
0|1|2|3)
;;
su*)
s*)
MODE=0
;;
c*)
@@ -371,7 +371,7 @@ if [ ! -e server.properties ]; then
a*)
MODE=2
;;
sp*)
s*)
MODE=3
;;
*)