Compare commits

..

1 Commits

Author SHA1 Message Date
Geoff Bourne
205c57c3e3 [es] upgrade 2.4.x to 2.4.3 2016-12-28 13:38:44 -06:00
34 changed files with 331 additions and 1045 deletions

View File

@@ -1,10 +1,6 @@
dockerfiles
===========
This repository contains the various Dockerfile definitions I'm maintaining.
Contains the various Dockerfile definitions I'm maintaining.
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/itzg/dockerfiles?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
### Discontinued
##### Cassandra
I have found the [official image](https://hub.docker.com/_/cassandra/) to be quite sufficient

2
build
View File

@@ -5,8 +5,10 @@ pkgs="$pkgs minecraft-server"
pkgs="$pkgs elasticsearch"
pkgs="$pkgs kibana"
pkgs="$pkgs titan-gremlin"
pkgs="$pkgs cassandra"
for p in $pkgs
do
docker build -t itzg/$p $p
done

26
cassandra/Dockerfile Executable file
View File

@@ -0,0 +1,26 @@
FROM java:openjdk-8u72-jdk
MAINTAINER itzg
ENV CASSANDRA_VERSION 2.2.7
RUN wget -qO /tmp/apache-cassandra.tgz http://mirrors.ibiblio.org/apache/cassandra/$CASSANDRA_VERSION/apache-cassandra-$CASSANDRA_VERSION-bin.tar.gz
RUN tar -C /opt -zxf /tmp/apache-cassandra.tgz && \
rm /tmp/apache-cassandra.tgz
RUN mv /opt/apache-cassandra-$CASSANDRA_VERSION /opt/cassandra
ENV CASSANDRA_HOME /opt/cassandra
ENV CASSANDRA_CONF /conf
ENV CASSANDRA_DATA /data
WORKDIR $CASSANDRA_HOME
RUN ln -s $CASSANDRA_HOME/bin/* /usr/local/bin
VOLUME ["/data","/conf"]
EXPOSE 9042 9160 7000 7001
ADD cassandra.in.sh $CASSANDRA_HOME/cassandra.in.sh
RUN mv $CASSANDRA_HOME/bin/cassandra.in.sh $CASSANDRA_HOME/bin/orig.cassandra.in.sh
CMD ["/opt/cassandra/bin/cassandra", "-f"]

9
cassandra/README.md Normal file
View File

@@ -0,0 +1,9 @@
Yet another Cassandra image, but this one got container and non-container access right.
# Basic Usage
To support access from both Docker containers and external, non-Docker clients:
docker run -d --name cassandra -e PUBLISH_AS=192.168.59.103 -p 9160:9160 itzg/cassandra
replacing `192.168.59.103` with your Docker host's LAN IP address.

24
cassandra/cassandra.in.sh Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/bash
cassYml=$CASSANDRA_HOME/conf/cassandra.yaml
privateAddr=$(hostname -i)
seeds=${SEEDS:-${PUBLISH_AS:-$privateAddr}}
sed -i -e "s/- seeds:.*/- seeds: \"$seeds\"/" \
-e "s/listen_address:.*/listen_address: $privateAddr/" \
-e "s/rpc_address:.*/rpc_address: $privateAddr/" \
-e "s/start_rpc:.*/start_rpc: true/" \
-e "s#- /var/lib/cassandra/data#- $CASSANDRA_DATA#" \
$cassYml
if [ -n "$PUBLISH_AS" ]; then
sed -i -e "s/\(\s*#\)\?\s*broadcast_address:.*/broadcast_address: $PUBLISH_AS/" $cassYml
fi
# Copy over our tweaked files, but non-clobbering to let user have ultimate control
cp -rn $CASSANDRA_HOME/conf/* $CASSANDRA_CONF
# source the original
. $CASSANDRA_HOME/bin/orig.cassandra.in.sh

View File

@@ -1,39 +1,26 @@
FROM openjdk:8u121-jre-alpine
FROM java:8u111-jre-alpine
LABEL maintainer "itzg"
MAINTAINER itzg
RUN apk -U add bash
ENV ES_VERSION=2.4.3
ARG ES_VERSION=5.5.1
# avoid conflicts with debian host systems when mounting to host volume
ARG DEFAULT_ES_USER_UID=1100
ADD https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$ES_VERSION.tar.gz /tmp
# need to adapt to both Docker's new remote-unpack-ADD behavior and the old behavior
ADD https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/$ES_VERSION/elasticsearch-$ES_VERSION.tar.gz /tmp/es.tgz
RUN cd /usr/share && \
if [ -f /tmp/elasticsearch-$ES_VERSION.tar.gz ]; then \
tar xf /tmp/elasticsearch-$ES_VERSION.tar.gz; \
else mv /tmp/elasticsearch-${ES_VERSION} /usr/share; \
fi && \
rm -f /tmp/elasticsearch-$ES_VERSION.tar.gz
tar xf /tmp/es.tgz && \
rm /tmp/es.tgz
COPY start /start
EXPOSE 9200 9300
HEALTHCHECK --timeout=5s CMD wget -q -O - http://$HOSTNAME:9200/_cat/health
ENV ES_HOME=/usr/share/elasticsearch-$ES_VERSION \
DEFAULT_ES_USER=elasticsearch \
DEFAULT_ES_USER_UID=$DEFAULT_ES_USER_UID \
ES_JAVA_OPTS="-Xms1g -Xmx1g"
OPTS=-Dnetwork.host=_non_loopback_ \
DEFAULT_ES_USER=elasticsearch
RUN adduser -S -s /bin/sh -u $DEFAULT_ES_USER_UID $DEFAULT_ES_USER
RUN adduser -S -s /bin/sh $DEFAULT_ES_USER
VOLUME ["/data","/conf"]
WORKDIR $ES_HOME
COPY java.policy /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/
COPY start /start
COPY log4j2.properties $ES_HOME/config/
CMD ["/start"]

View File

@@ -1,10 +1,5 @@
This Docker image provides an easily configurable Elasticsearch node. Via port mappings, it is easy to create an arbitrarily sized cluster of nodes. As long as the versions match, you can mix-and-match "real" Elasticsearch nodes with container-ized ones.
# NOTE for use on Linux hosts
Elasticsearch 5.x requires that the virtual memory mmap count is set sufficiently for stable,
production use. [Refer to this guide for more information](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html).
# Basic Usage
To start an Elasticsearch data node that listens on the standard ports on your host's network interface:
@@ -34,9 +29,9 @@ Where `DOCKERHOST` would be the actual hostname of your host running Docker.
To run a multi-node cluster (3-node in this example) on a single Docker machine use:
docker run -d --name es0 -p 9200:9200 itzg/elasticsearch
docker run -d --name es1 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
docker run -d --name es2 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
docker run -d --name es0 -p 9200:9200 es
docker run -d --name es1 --link es0 -e UNICAST_HOSTS=es0 es
docker run -d --name es2 --link es0 -e UNICAST_HOSTS=es0 es
and then check the cluster health, such as http://192.168.99.100:9200/_cluster/health?pretty
@@ -54,40 +49,6 @@ and then check the cluster health, such as http://192.168.99.100:9200/_cluster/h
"unassigned_shards" : 0
}
If you have a Docker Swarm cluster already initialized you can download this
[docker-compose.yml](https://raw.githubusercontent.com/itzg/dockerfiles/master/elasticsearch/docker-compose.yml) and deploy a cluster using:
docker stack deploy -c docker-compose.yml es
With a `docker service ls` you can confirm 1 master, 2 data, and 1 gateway nodes are running:
```
ID NAME MODE REPLICAS IMAGE
9nwnno8hbqgk es_kibana replicated 1/1 kibana:latest
f5x7nipwmvkr es_gateway replicated 1/1 es
om8rly2yxylw es_data replicated 2/2 es
tdvfilj370yn es_master replicated 1/1 es
```
As you can see, there is also a Kibana instance included and available at port 5601.
# Health Checks
This container declares a [HEALTHCHECK](https://docs.docker.com/engine/reference/builder/#/healthcheck) that queries the `_cat/health`
endpoint for a quick, one-line gauge of health every 30 seconds.
The current health of the container is shown in the `STATUS` column of `docker ps`, such as
Up 14 minutes (healthy)
You can also check the history of health checks from `inspect`, such as:
```
> docker inspect -f "{{json .State.Health}}" es
{"Status":"healthy","FailingStreak":0,"Log":[...
```
# Configuration Summary
## Ports
@@ -181,14 +142,12 @@ To simplify all that, this image provides a `TYPE` variable to let you amongst t
* `MASTER` : master-eligible, but holds no data. It is good to have three or more of these in a
large cluster
* `DATA` (or `NON_MASTER`) : holds data and serves search/index requests. Scale these out for elastic-y goodness.
* `NON_DATA` : performs all duties except holding data
* `GATEWAY` (or `COORDINATING`) : only operates as a client node or a "smart router". These are the ones whose HTTP port 9200 will need to be exposed
* `INGEST` : operates only as an ingest node and is not master or data eligible
* `GATEWAY` : only operates as a client node or a "smart router". These are the ones whose HTTP port 9200 will need to be exposed
A [Docker Compose](https://docs.docker.com/compose/overview/) file will serve as a good example of these three node types:
```
version: '3'
version: '2'
services:
gateway:
@@ -211,14 +170,6 @@ services:
environment:
UNICAST_HOSTS: master,gateway
TYPE: DATA
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://gateway:9200
```
## Minimum Master Nodes
@@ -231,28 +182,18 @@ Using the Docker Compose file above, a value of `2` is appropriate when scaling
docker-compose scale master=3
## Multiple Network Binding, such as Swarm Mode
## Auto transport/http discovery with Swarm Mode
When using Docker Swarm mode the container is presented with multiple ethernet
devices. By default, all global, routable IP addresses are configured for
Elasticsearch to use as `network.host`.
When using Docker Swarm mode (starting with 1.12), the overlay and ingress network interfaces are assigned
multiple IP addresses. As a result, it creates confusion for the transport publish logic even when using
the special value `_eth0_`.
That discovery can be overridden by providing a specific ethernet device name
to `DISCOVER_TRANSPORT_IP` and/or `DISCOVER_HTTP_IP`, such as
To resolve this, add
-e DISCOVER_TRANSPORT_IP=eth0
-e DISCOVER_HTTP_IP=eth2
## Heap size and other JVM options
replacing `eth0` with another interface within the container, if needed.
By default this image will run Elasticsearch with a Java heap size of 1 GB. If that value
or any other JVM options need to be adjusted, then replace the `ES_JAVA_OPTS`
environment variable.
The same can be done for publish/binding of the http module by adding:
For example, this would allow for the use of 16 GB of heap:
-e ES_JAVA_OPTS="-Xms16g -Xmx16g"
Refer to [this page](https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html)
for more information about why both the minimum and maximum sizes were set to
the same value.
-e DISCOVERY_HTTP_IP=eth2

View File

@@ -1,35 +0,0 @@
# This composition is known to work on a Swarm cluster consisting of
# 3 VM nodes with 1GB allocated to each.
version: '3'
services:
master:
image: itzg/elasticsearch
environment:
UNICAST_HOSTS: master
MIN_MASTERS: 1
ES_JAVA_OPTS: -Xms756m -Xmx756m
TYPE: NON_DATA
ports:
- "9200:9200"
- "9300:9300"
deploy:
replicas: 1
update_config:
parallelism: 1
data:
image: itzg/elasticsearch
deploy:
mode: global
update_config:
parallelism: 1
environment:
TYPE: DATA
UNICAST_HOSTS: master
ES_JAVA_OPTS: -Xms512m -Xmx512m
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://master:9200

View File

@@ -1,35 +0,0 @@
version: '3'
services:
master:
build: .
environment:
TYPE: MASTER
UNICAST_HOSTS: master
MIN_MASTERS: 1
data:
build: .
environment:
TYPE: DATA
UNICAST_HOSTS: master
gateway:
build: .
ports:
- "9200:9200"
- "9300:9300"
environment:
TYPE: GATEWAY
UNICAST_HOSTS: master
ingest:
build: .
ports:
- "9222:9200"
environment:
TYPE: INGEST
UNICAST_HOSTS: master
kibana:
image: kibana:5.5.1
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://gateway:9200

View File

@@ -1,21 +0,0 @@
version: '3'
services:
master:
image: itzg/elasticsearch
environment:
UNICAST_HOSTS: master
MIN_MASTERS: 1
ports:
- "9200:9200"
- "9300:9300"
deploy:
replicas: 1
update_config:
parallelism: 1
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://master:9200

View File

@@ -1,44 +0,0 @@
version: '3'
services:
master:
image: itzg/elasticsearch
environment:
TYPE: MASTER
UNICAST_HOSTS: master
MIN_MASTERS: 1
deploy:
replicas: 1
update_config:
parallelism: 1
data:
image: itzg/elasticsearch
environment:
TYPE: DATA
UNICAST_HOSTS: master
deploy:
replicas: 2
update_config:
parallelism: 1
delay: 60s
gateway:
image: itzg/elasticsearch
ports:
- "9200:9200"
- "9300:9300"
environment:
TYPE: GATEWAY
UNICAST_HOSTS: master
ingest:
image: itzg/elasticsearch
ports:
- "9222:9200"
environment:
TYPE: INGEST
UNICAST_HOSTS: master
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://gateway:9200

View File

@@ -1,6 +0,0 @@
grant {
// JMX Java Management eXtensions
permission javax.management.MBeanTrustPermission "register";
permission javax.management.MBeanServerPermission "createMBeanServer";
permission javax.management.MBeanPermission "-#-[-]", "queryNames";
};

View File

@@ -1,74 +0,0 @@
status = error
# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
#rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
#logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1
appender.index_search_slowlog_rolling.policies.time.modulate = true
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = console
logger.index_search_slowlog_rolling.additivity = false
appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = console
logger.index_indexing_slowlog.additivity = false

View File

@@ -1,126 +1,5 @@
#!/bin/sh
pre_checks() {
mmc=$(sysctl vm.max_map_count|sed 's/.*= //')
if [[ $mmc -lt 262144 ]]; then
echo "
ERROR: As of 5.0.0 Elasticsearch requires increasing mmap counts.
Refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
"
exit 1
fi
}
discoverIpFromLink() {
dev=$1
mode=$2
ip=`ipaddr show dev $dev scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'`
echo "Discovered $mode address $ip for $dev"
OPTS="$OPTS -E $mode.host=$ip"
}
discoverAllGlobalIps() {
if [ ${#IGNORE_NETWORK} -eq 0 ]
then
IGNORE_NETWORK='999.999.999.999'
fi
printf "Finding IPs"
while [ ${#ips} -eq 0 ]
do
printf "."
ips=`ipaddr show scope global| grep -v "inet ${IGNORE_NETWORK}" | awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; addrs[length(addrs)] = $2 } } END { for (i in addrs) { if (i>0) printf "," ; printf addrs[i] } }'`
sleep 1
done
echo " found! $ips"
OPTS="$OPTS -E network.host=$ips"
}
setup_clustering() {
if [ -n "$CLUSTER" ]; then
OPTS="$OPTS -E cluster.name=$CLUSTER"
if [ -n "$CLUSTER_FROM" ]; then
if [ -d /data/$CLUSTER_FROM -a ! -d /data/$CLUSTER ]; then
echo "Performing cluster data migration from $CLUSTER_FROM to $CLUSTER"
mv /data/$CLUSTER_FROM /data/$CLUSTER
fi
fi
fi
if [ -n "$NODE_NAME" ]; then
OPTS="$OPTS -E node.name=$NODE_NAME"
fi
if [ -n "$MULTICAST" ]; then
OPTS="$OPTS -E discovery.zen.ping.multicast.enabled=$MULTICAST"
fi
if [ -n "$UNICAST_HOSTS" ]; then
OPTS="$OPTS -E discovery.zen.ping.unicast.hosts=$UNICAST_HOSTS"
fi
if [ -n "$PUBLISH_AS" ]; then
OPTS="$OPTS -E transport.publish_host=$(echo $PUBLISH_AS | awk -F: '{print $1}')"
OPTS="$OPTS -E transport.publish_port=$(echo $PUBLISH_AS | awk -F: '{if ($2) print $2; else print 9300}')"
fi
if [ -n "$MIN_MASTERS" ]; then
OPTS="$OPTS -E discovery.zen.minimum_master_nodes=$MIN_MASTERS"
fi
}
install_plugins() {
if [ -n "$PLUGINS" ]; then
for p in $(echo $PLUGINS | awk -v RS=, '{print}')
do
echo "Installing the plugin $p"
$ES_HOME/bin/elasticsearch-plugin install $p
done
else
mkdir -p $ES_HOME/plugins
fi
}
setup_personality() {
if [ -n "$TYPE" ]; then
case $TYPE in
MASTER)
OPTS="$OPTS -E node.master=true -E node.data=false -E node.ingest=false"
;;
GATEWAY|COORDINATING)
OPTS="$OPTS -E node.master=false -E node.data=false -E node.ingest=false"
;;
INGEST)
OPTS="$OPTS -E node.master=false -E node.data=false -E node.ingest=true"
;;
DATA)
OPTS="$OPTS -E node.master=false -E node.data=true -E node.ingest=false"
;;
NON_MASTER)
OPTS="$OPTS -E node.master=false -E node.data=true -E node.ingest=true"
;;
NON_DATA)
OPTS="$OPTS -E node.master=true -E node.data=false -E node.ingest=true"
;;
*)
echo "Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
exit 1
esac
fi
}
pre_checks
if [ -f /conf/env ]; then
. /conf/env
fi
@@ -129,28 +8,95 @@ if [ ! -e /conf/elasticsearch.* ]; then
cp $ES_HOME/config/elasticsearch.yml /conf
fi
if [ ! -e /conf/log4j2.properties ]; then
cp $ES_HOME/config/log4j2.properties /conf
if [ ! -e /conf/logging.* ]; then
cp $ES_HOME/config/logging.yml /conf
fi
OPTS="$OPTS \
-E path.conf=/conf \
-E path.data=/data \
-E path.logs=/data \
-E transport.tcp.port=9300 \
-E http.port=9200"
OPTS="$OPTS -Des.path.conf=/conf \
-Des.path.data=/data \
-Des.path.logs=/data \
-Des.transport.tcp.port=9300 \
-Des.http.port=9200"
discoverAllGlobalIps
if [ "${DISCOVER_TRANSPORT_IP}" != "" ]; then
discoverIpFromLink() {
dev=$1
mode=$2
ip=`ipaddr show dev $dev scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'`
echo "Discovered $mode address $ip for $dev"
OPTS="$OPTS -Des.$mode.host=$ip"
}
if [ "$DISCOVER_TRANSPORT_IP" != "" ]; then
discoverIpFromLink $DISCOVER_TRANSPORT_IP transport
fi
if [ "${DISCOVER_HTTP_IP}" != "" ]; then
if [ "$DISCOVER_HTTP_IP" != "" ]; then
discoverIpFromLink $DISCOVER_HTTP_IP http
fi
setup_personality
setup_clustering
install_plugins
if [ -n "$CLUSTER" ]; then
OPTS="$OPTS -Des.cluster.name=$CLUSTER"
if [ -n "$CLUSTER_FROM" ]; then
if [ -d /data/$CLUSTER_FROM -a ! -d /data/$CLUSTER ]; then
echo "Performing cluster data migration from $CLUSTER_FROM to $CLUSTER"
mv /data/$CLUSTER_FROM /data/$CLUSTER
fi
fi
fi
if [ -n "$NODE_NAME" ]; then
OPTS="$OPTS -Des.node.name=$NODE_NAME"
fi
if [ -n "$MULTICAST" ]; then
OPTS="$OPTS -Des.discovery.zen.ping.multicast.enabled=$MULTICAST"
fi
if [ -n "$UNICAST_HOSTS" ]; then
OPTS="$OPTS -Des.discovery.zen.ping.unicast.hosts=$UNICAST_HOSTS"
fi
if [ -n "$PUBLISH_AS" ]; then
OPTS="$OPTS -Des.transport.publish_host=$(echo $PUBLISH_AS | awk -F: '{print $1}')"
OPTS="$OPTS -Des.transport.publish_port=$(echo $PUBLISH_AS | awk -F: '{if ($2) print $2; else print 9300}')"
fi
if [ -n "$TYPE" ]; then
case $TYPE in
MASTER)
OPTS="$OPTS --node.master=true --node.data=false"
;;
GATEWAY)
OPTS="$OPTS --node.master=false --node.data=false"
;;
DATA|NON_MASTER)
OPTS="$OPTS --node.master=false --node.data=true"
;;
*)
echo "Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
exit 1
esac
fi
if [ -n "$MIN_MASTERS" ]; then
OPTS="$OPTS --discovery.zen.minimum_master_nodes=$MIN_MASTERS"
fi
mkdir -p /conf/plugins
OPTS="$OPTS --path.plugins=/conf/plugins"
if [ -n "$PLUGINS" ]; then
PLUGIN_OPTS="-Des.path.conf=/conf -Des.path.plugins=/conf/plugins"
for p in $(echo $PLUGINS | awk -v RS=, '{print}')
do
echo "Installing the plugin $p"
$ES_HOME/bin/plugin $PLUGIN_OPTS install $p -t 1m -b
done
else
mkdir -p /conf/plugins
fi
mkdir -p /conf/scripts
@@ -158,7 +104,8 @@ echo "Starting Elasticsearch with the options $OPTS"
CMD="$ES_HOME/bin/elasticsearch $OPTS"
if [ `id -u` = 0 ]; then
echo "Running as non-root..."
chown -R $DEFAULT_ES_USER /data /conf
chown -R $DEFAULT_ES_USER /data
set -x
su -c "$CMD" $DEFAULT_ES_USER
else
$CMD

View File

@@ -1,18 +1,18 @@
FROM java:8
LABEL maintainer "itzg"
MAINTAINER itzg
ENV GITBLIT_VERSION 1.7.1
RUN wget -qO /tmp/gitblit.tgz http://dl.bintray.com/gitblit/releases/gitblit-$GITBLIT_VERSION.tar.gz
RUN tar -C /opt -xvf /tmp/gitblit.tgz && \
rm /tmp/gitblit.tgz
VOLUME ["/data"]
ADD start.sh /start
ENV GITBLIT_PATH=/opt/gitblit-${GITBLIT_VERSION} \
GITBLIT_HTTPS_PORT=443 \
GITBLIT_HTTP_PORT=80 \
@@ -20,7 +20,7 @@ ENV GITBLIT_PATH=/opt/gitblit-${GITBLIT_VERSION} \
GITBLIT_ADMIN_USER=admin \
GITBLIT_INITIAL_REPO=
WORKDIR $GITBLIT_PATH
EXPOSE 80 443
ENTRYPOINT ["/start"]

View File

@@ -1,6 +1,6 @@
FROM itzg/ubuntu-openjdk-7
LABEL maintainer "itzg"
MAINTAINER itzg
RUN apt-get install -y curl unzip

View File

@@ -1,6 +1,6 @@
FROM ubuntu:trusty
LABEL maintainer "itzg"
MAINTAINER itzg
ENV APT_GET_UPDATE 2014-09-18
@@ -21,3 +21,4 @@ EXPOSE 4000
ADD start.sh /start
CMD ["/start"]

View File

@@ -1,17 +1,12 @@
FROM java:openjdk-8u102-jdk
FROM java:openjdk-8u72-jdk
LABEL maintainer "itzg"
MAINTAINER itzg
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
graphviz \
&& apt-get clean
ADD download-and-start.sh /download-and-start
ENV JENKINS_HOME=/data
VOLUME ["/data", "/root", "/opt/jenkins"]
EXPOSE 8080 38252
COPY download-and-start.sh /opt/download-and-start
CMD ["/opt/download-and-start"]
CMD ["/download-and-start"]

View File

@@ -1,14 +1,13 @@
FROM openjdk:8u111-jre
FROM itzg/ubuntu-openjdk-7
LABEL maintainer "itzg"
MAINTAINER itzg
ENV KIBANA_VERSION 5.1.2
ENV KIBANA_VERSION 4.1.1
ADD https://artifacts.elastic.co/downloads/kibana/kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz /tmp/kibana.tgz
RUN wget -q -O /tmp/kibana.tgz https://download.elasticsearch.org/kibana/kibana/kibana-${KIBANA_VERSION}-linux-x64.tar.gz
RUN tar -C /opt -xzf /tmp/kibana.tgz && rm /tmp/kibana.tgz
ENV KIBANA_HOME /opt/kibana-$KIBANA_VERSION-linux-x86_64
ENV KIBANA_HOME /opt/kibana-$KIBANA_VERSION-linux-x64
# Simplify for cross-container
ENV ES_URL http://es:9200

View File

@@ -1,12 +0,0 @@
version: '2'
services:
es:
build: ../elasticsearch
ports:
- "9200:9200"
kibana:
build: .
ports:
- "5601:5601"

View File

@@ -1,5 +1,6 @@
#!/bin/sh
#!/bin/bash
OPTS="-e $ES_URL -H $HOSTNAME"
OPTS="-e $ES_URL"
exec bin/kibana $OPTS

View File

@@ -1,6 +1,6 @@
FROM itzg/ubuntu-openjdk-7
LABEL maintainer "itzg"
MAINTAINER itzg
ENV LOGSTASH_VERSION 1.5.0-1

View File

@@ -1,36 +1,30 @@
FROM openjdk:8u131-jre-alpine
FROM java:8
LABEL maintainer "itzg"
MAINTAINER itzg
RUN apk add -U \
openssl \
imagemagick \
lsof \
su-exec \
bash \
curl \
git \
jq \
mysql-client \
python python-dev py2-pip && \
rm -rf /var/cache/apk/*
ENV APT_GET_UPDATE 2016-04-23
RUN apt-get update
RUN pip install mcstatus
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y \
imagemagick \
lsof \
nano \
sudo \
vim \
jq \
&& apt-get clean
HEALTHCHECK CMD mcstatus localhost ping
RUN addgroup -g 1000 minecraft \
&& adduser -Ss /bin/false -u 1000 -G minecraft -h /home/minecraft minecraft \
RUN useradd -s /bin/false --uid 1000 minecraft \
&& mkdir /data \
&& mkdir /config \
&& mkdir /mods \
&& mkdir /plugins \
&& mkdir /home/minecraft \
&& chown minecraft:minecraft /data /config /mods /plugins /home/minecraft
EXPOSE 25565 25575
ADD https://github.com/itzg/restify/releases/download/1.0.4/restify_linux_amd64 /usr/local/bin/restify
ADD https://github.com/itzg/rcon-cli/releases/download/1.3/rcon-cli_linux_amd64 /usr/local/bin/rcon-cli
ADD https://github.com/itzg/restify/releases/download/1.0.3/restify_linux_amd64 /usr/local/bin/restify
COPY start.sh /start
COPY start-minecraft.sh /start-minecraft
COPY mcadmin.jq /usr/share
@@ -44,7 +38,6 @@ ENTRYPOINT [ "/start" ]
ENV UID=1000 GID=1000 \
MOTD="A Minecraft Server Powered by Docker" \
JVM_XX_OPTS="-XX:+UseG1GC" MEMORY="1G" \
JVM_OPTS="-Xmx1024M -Xms1024M" \
TYPE=VANILLA VERSION=LATEST FORGEVERSION=RECOMMENDED LEVEL=world PVP=true DIFFICULTY=easy \
ENABLE_RCON=true RCON_PORT=25575 RCON_PASSWORD=minecraft \
LEVEL_TYPE=DEFAULT GENERATOR_SETTINGS= WORLD= MODPACK= ONLINE_MODE=TRUE CONSOLE=true

View File

@@ -1 +0,0 @@
<mxfile userAgent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36" version="6.4.4" editor="www.draw.io" type="github"><diagram name="Page-1">1VVNc5swEP01HDMDUiDOMbGd5pK2Ux96VkGAxgJRIRvcX9+VWD40ODOZ1j0Uz9jS2w+t3ts1Ad1W/SfNmvJNZVwGJMz6gO4CQjZRCN8WuAxA8vgwAIUW2QBFM3AQvziCGFecRMZbz9EoJY1ofDBVdc1T42FMa9X5brmS/qkNK/gKOKRMrtHvIjMlolEYzoZXLooSj97EaPjB0mOh1anG8wJCc/cM5oqNudC/LVmmugVE9wHdaqXMsKr6LZeW2pG2Ie7lHetUt+a1+UgAGQLOTJ74WHEiIfQ5E2dbn7kgJ8nPky3q2fDe3DEpijqgT+AheW5mK6wK/HVZ2obVI7ZT6ZFrCHlVrRkdoLSlzwJ2BYwo8WohXSkMPzQstfsOWhCcSlNJ2EX21JFVu8mFlFsllXahdJ/YD+Bnro0AyZ+Gu+yMslnwZjt3LVd/KurCpgptLlWbA5aBXNs0vH+X/mgSFWaFq4obfQEXDKDYBhd/2809RzeIlYt2mxwZ9nkxZZ61hgXKfV16upK+EjBMmuXmruX67KSCATMMYL2SwDU5z5DkvxAE52NNvdHqyBeOoXs8UWIbplP8A3ES3UCVja/K41oVEl5RJYpuoMr9SpU34B+Qr0rD0ITD0JE4TuI/nopwLcKLexYi6OFet6CT3Pt8TjwtCI3IFUJv0eXxis9v2y+fr/D58P/yGf87PmE7v4ycbfHCp/vf</diagram></mxfile>

View File

@@ -1,7 +1,6 @@
[![Docker Pulls](https://img.shields.io/docker/pulls/itzg/minecraft-server.svg)](https://hub.docker.com/r/itzg/minecraft-server/)
[![Docker Stars](https://img.shields.io/docker/stars/itzg/minecraft-server.svg?maxAge=2592000)](https://hub.docker.com/r/itzg/minecraft-server/)
[![GitHub Issues](https://img.shields.io/github/issues-raw/itzg/dockerfiles.svg)](https://github.com/itzg/dockerfiles/issues)
This docker image provides a Minecraft Server that will automatically download the latest stable
version at startup. You can also run/upgrade to any specific version or the
@@ -36,24 +35,6 @@ With that you can easily view the logs, stop, or re-start the container:
## Interacting with the server
[RCON](http://wiki.vg/RCON) is enabled by default, so you can `exec` into the container to
access the Minecraft server console:
```
docker exec -i mc rcon-cli
```
Note: The `-i` is required for interactive use of rcon-cli.
To run a simple, one-shot command, such as stopping a Minecraft server, pass the command as
arguments to `rcon-cli`, such as:
```
docker exec mc rcon-cli stop
```
_The `-i` is not needed in this case._
In order to attach and interact with the Minecraft server, add `-it` when starting the container, such as
docker run -d -it -p 25565:25565 --name mc itzg/minecraft-server
@@ -118,34 +99,6 @@ or a specific version:
docker run -d -e VERSION=1.7.9 ...
## Healthcheck
This image contains [Dinnerbone's mcstatus](https://github.com/Dinnerbone/mcstatus) and uses
its `ping` command to continually check on the container's. That can be observed
from the `STATUS` column of `docker ps`
```
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b418af073764 mc "/start" 43 seconds ago Up 41 seconds (healthy) 0.0.0.0:25565->25565/tcp, 25575/tcp mc
```
You can also query the container's health in a script friendly way:
```
> docker container inspect -f "{{.State.Health.Status}}" mc
healthy
```
Finally, since `mcstatus` is on the `PATH` you can exec into the container
and use mcstatus directly and invoke any of its other commands:
```
> docker exec mc mcstatus localhost status
version: v1.12 (protocol 335)
description: "{u'text': u'A Minecraft Server Powered by Docker'}"
players: 0/20 No players online
```
## Running a Forge Server
Enable Forge server mode by adding a `-e TYPE=FORGE` to your command-line.
@@ -156,20 +109,6 @@ but you can also choose to run a specific version with `-e FORGEVERSION=10.13.4.
-e TYPE=FORGE -e FORGEVERSION=10.13.4.1448 \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
To use a pre-downloaded Forge installer, place it in the attached `/data` directory and
specify the name of the installer file with `FORGE_INSTALLER`, such as:
$ docker run -d -v /path/on/host:/data ... \
-e FORGE_INSTALLER=forge-1.11.2-13.20.0.2228-installer.jar ...
To download a Forge installer from a custom location, such as your own file repository, specify
the URL with `FORGE_INSTALLER_URL`, such as:
$ docker run -d -v /path/on/host:/data ... \
-e FORGE_INSTALLER_URL=http://HOST/forge-1.11.2-13.20.0.2228-installer.jar ...
In both of the cases above, there is no need for the `VERSION` or `FORGEVERSION` variables.
In order to add mods, you have two options.
### Using the /data volume
@@ -220,10 +159,6 @@ Enable Bukkit/Spigot server mode by adding a `-e TYPE=BUKKIT -e VERSION=1.8` or
-e TYPE=SPIGOT -e VERSION=1.8 \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
If you are hosting your own copy of Bukkit/Spigot you can override the download URLs with:
* -e BUKKIT_DOWNLOAD_URL=<url>
* -e SPIGOT_DOWNLOAD_URL=<url>
You can build spigot from source by adding `-e BUILD_FROM_SOURCE=true`
__NOTE: to avoid pegging the CPU when running Spigot,__ you will need to
@@ -289,9 +224,6 @@ pass `--noconsole` at the very end of the command line and not use `-it`. For ex
-e TYPE=PAPER -e VERSION=1.9.4 \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server --noconsole
If you are hosting your own copy of PaperSpigot you can override the download URL with:
* -e PAPER_DOWNLOAD_URL=<url>
You can install Bukkit plugins in two ways...
### Using the /data volume
@@ -332,72 +264,6 @@ This works well if you want to have a common set of plugins in a separate
location, but still have multiple worlds with different server requirements
in either persistent volumes or a downloadable archive.
## Running a Server with a Feed-The-Beast (FTB) modpack
Enable this server mode by adding a `-e TYPE=FTB` to your command-line,
but note the following additional steps needed...
You need to specify a modpack to run, using the `FTB_SERVER_MOD` environment
variable. An FTB server modpack is available together with its respective
client modpack on https://www.feed-the-beast.com under "Additional Files."
Because of the interactive delayed download mechanism on that web site, you
must manually download the server modpack. Copy the modpack to the `/data`
directory (see "Attaching data directory to host filesystem”).
Now you can add a `-e FTB_SERVER_MOD=name_of_modpack.zip` to your command-line.
$ docker run -d -v /path/on/host:/data -e TYPE=FTB \
-e FTB_SERVER_MOD=FTBPresentsSkyfactory3Server_3.0.6.zip \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
Instead of explicitly downloading a modpack from the Feed the Beast site, you
can you set `FTB_SERVER_MOD` to the **server** URL of a modpack, such as
$ docker run ... \
-e TYPE=FTB \
-e FTB_SERVER_MOD=https://www.feed-the-beast.com/projects/ftb-infinity-lite-1-10/files/2402889
### Using the /data volume
You must use a persistent `/data` mount for this type of server.
To do this, you will need to attach the container's `/data` directory
(see "Attaching data directory to host filesystem”).
If the modpack is updated and you want to run the new version on your
server, you stop and remove the container:
docker stop mc
docker rm mc
Do not erase anything from your /data directory (unless you know of
specific mods that have been removed from the modpack). Download the
updated FTB server modpack and copy it to `/data`. Start a new container
with `FTB_SERVER_MOD` specifying the updated modpack file.
$ docker run -d -v /path/on/host:/data -e TYPE=FTB \
-e FTB_SERVER_MOD=FTBPresentsSkyfactory3Server_3.0.7.zip \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
### FTB server JVM options
An FTB server modpack contains its own startup script that launches the
JVM and it does not use the `JVM_OPTS` environment variable. Instead
you can use `MIN_RAM` and `MAX_RAM` variables. These are appended to
the JVM `-Xms` and `-Xmx` options. For example, `-e MIN_RAM=2G` results
in `-Xms2G` passed to the JVM.
Additionally, `PERMGEN_SIZE` is passed on to `-XX:PermSize`. Here is an
example:
$ docker run -d -v /path/on/host:/data -e TYPE=FTB \
-e MIN_RAM=1G -e MAX_RAM=2G -e PERMGEN_SIZE=512M \
-e FTB_SERVER_MOD=FTBPresentsSkyfactory3Server_3.0.6.zip \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
Note: The FTB server start script will also override other options,
like `MOTD`.
## Using Docker Compose
Rather than type the server options below, the port mappings above, etc
@@ -424,7 +290,7 @@ minecraft-server:
and in the same directory as that file run
docker-compose up -d
docker-compose -d up
Now, go play...or adjust the `environment` section to configure
this server instance.
@@ -511,20 +377,18 @@ Enables command blocks
### Force Gamemode
Force players to join in the default game mode.
- false - Players will join in the gamemode they left in.
- true - Players will always join in the default gamemode.
* false - Players will join in the gamemode they left in.
* true - Players will always join in the default gamemode.
`docker run -d -e FORCE_GAMEMODE=false`
docker run -d -e FORCE_GAMEMODE=false
### Generate Structures
Defines whether structures (such as villages) will be generated.
- false - Structures will not be generated in new chunks.
- true - Structures will be generated in new chunks.
* false - Structures will not be generated in new chunks.
* true - Structures will be generated in new chunks.
`docker run -d -e GENERATE_STRUCTURES=true`
docker run -d -e GENERATE_STRUCTURES=true
### Hardcore
@@ -673,20 +537,6 @@ To use this option pass the environment variable `MODPACK`, such as
top level of the zip archive. Make sure the jars are compatible with the
particular `TYPE` of server you are running.
### Remove old mods/plugins
When the option above is specified (`MODPACK`) you can also instruct script to
delete old mods/plugins prior to installing new ones. This behaviour is desirable
in case you want to upgrade mods/plugins from downloaded zip file.
To use this option pass the environment variable `REMOVE_OLD_MODS="TRUE"`, such as
docker run -d -e REMOVE_OLD_MODS="TRUE" -e MODPACK=http://www.example.com/mods/modpack.zip ...
**NOTE:** This option will be taken into account only when option `MODPACK` is also used.
**WARNING:** All content of the `mods` or `plugins` directory will be deleted
before unpacking new content from the zip file.
### Online mode
By default, server checks connecting players against Minecraft's account database. If you want to create an offline server or your server is not connected to the internet, you can disable the server to try connecting to minecraft.net to authenticate players with environment variable `ONLINE_MODE`, like this
@@ -697,23 +547,11 @@ By default, server checks connecting players against Minecraft's account databas
### Memory Limit
By default, the image declares a Java initial and maximum memory limit of 1 GB. There are several
ways to adjust the memory settings:
The Java memory limit can be adjusted using the `JVM_OPTS` environment variable, where the default is
the setting shown in the example (max and min at 1024 MB):
* `MEMORY`, "1G" by default, can be used to adjust both initial (`Xms`) and max (`Xmx`)
memory settings of the JVM
* `INIT_MEMORY`, independently sets the initial heap size
* `MAX_MEMORY`, independently sets the max heap size
The values of all three are passed directly to the JVM and support format/units as
`<size>[g|G|m|M|k|K]`.
docker run -e 'JVM_OPTS=-Xmx1024M -Xms1024M' ...
### /data ownership
In order to adapt to differences in `UID` and `GID` settings the entry script will attempt to correct ownership and writability of the `/data` directory. This logic can be disabled by setting `-e SKIP_OWNERSHIP_FIX=TRUE`.
### JVM Options
General JVM options can be passed to the Minecraft Server invocation by passing a `JVM_OPTS`
environment variable. Options like `-X` that need to proceed general JVM options can be passed
via a `JVM_XX_OPTS` environment variable.

View File

@@ -1,29 +0,0 @@
version: '3'
services:
minecraft:
ports:
- "25565:25565"
volumes:
- "mcbig:/data"
environment:
EULA: "TRUE"
MAX_MEMORY: 32G
MAX_BUILD_HEIGHT: 256
VIEW_DISTANCE: 15
LEVEL_TYPE: LARGEBIOMES
MAX_PLAYERS: 100
CONSOLE: "false"
image: itzg/minecraft-server
restart: always
rcon:
image: itzg/rcon
ports:
- "4326:4326"
- "4327:4327"
volumes:
- "rcon:/opt/rcon-web-admin/db"
volumes:
mcbig:
rcon:

View File

@@ -1,27 +1,14 @@
version: '3'
minecraft-server:
ports:
- "25565:25565"
services:
minecraft:
image: itzg/minecraft-server
ports:
- "25565:25565"
volumes:
- "mc:/data"
environment:
EULA: "TRUE"
CONSOLE: "false"
ENABLE_RCON: "true"
RCON_PASSWORD: "testing"
RCON_PORT: 28016
restart: always
rcon:
image: itzg/rcon
ports:
- "4326:4326"
- "4327:4327"
volumes:
- "rcon:/opt/rcon-web-admin/db"
environment:
EULA: "TRUE"
volumes:
mc:
rcon:
image: itzg/minecraft-server
container_name: minecraft-server
tty: true
stdin_open: true
restart: always

View File

@@ -1,7 +1,7 @@
.[] |
select(.elements | length > 1) |
select(.elements[].elements[] | select(.class == "version" and .text == $version)) |
.elements[].elements[] |
.elements[].elements[] |
select(.class|contains("server-jar")) |
.elements[] | select(.name="a") |
.href

View File

@@ -1,7 +1,5 @@
#!/bin/bash
shopt -s nullglob
#umask 002
export HOME=/data
@@ -9,10 +7,6 @@ if [ ! -e /data/eula.txt ]; then
if [ "$EULA" != "" ]; then
echo "# Generated via Docker on $(date)" > eula.txt
echo "eula=$EULA" >> eula.txt
if [ $? != 0 ]; then
echo "ERROR: unable to write eula to /data. Please make sure attached directory is writable by uid=${UID}"
exit 2
fi
else
echo ""
echo "Please accept the Minecraft EULA at"
@@ -24,28 +18,21 @@ if [ ! -e /data/eula.txt ]; then
fi
fi
if ! touch /data/.verify_access; then
echo "ERROR: /data doesn't seem to be writable. Please make sure attached directory is writable by uid=${UID} "
exit 2
fi
SERVER_PROPERTIES=/data/server.properties
FTB_DIR=/data/FeedTheBeast
VERSIONS_JSON=https://launchermeta.mojang.com/mc/game/version_manifest.json
echo "Checking version information."
case "X$VERSION" in
X|XLATEST|Xlatest)
VANILLA_VERSION=`curl -fsSL $VERSIONS_JSON | jq -r '.latest.release'`
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.release'`
;;
XSNAPSHOT|Xsnapshot)
VANILLA_VERSION=`curl -fsSL $VERSIONS_JSON | jq -r '.latest.snapshot'`
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.snapshot'`
;;
X[1-9]*)
VANILLA_VERSION=$VERSION
;;
*)
VANILLA_VERSION=`curl -fsSL $VERSIONS_JSON | jq -r '.latest.release'`
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.release'`
;;
esac
@@ -69,32 +56,28 @@ function downloadSpigot {
case "$TYPE" in
*BUKKIT|*bukkit)
match="Craftbukkit"
downloadUrl=${BUKKIT_DOWNLOAD_URL}
;;
*)
match="Spigot"
downloadUrl=${SPIGOT_DOWNLOAD_URL}
;;
esac
if [[ -z $downloadUrl ]]; then
downloadUrl=$(restify --class=jar-div https://mcadmin.net/ | \
jq --arg version "$match $VANILLA_VERSION" -r -f /usr/share/mcadmin.jq)
if [[ -z $downloadUrl ]]; then
echo "ERROR: Version $VANILLA_VERSION is not supported for $TYPE"
echo " Refer to https://mcadmin.net/ for supported versions"
exit 2
downloadUrl=$(restify --class=jar-div https://mcadmin.net/ | \
jq --arg version "$match $VANILLA_VERSION" -r -f /usr/share/mcadmin.jq)
if [[ -n $downloadUrl ]]; then
echo "Downloading $match"
wget -q -O $SERVER "$downloadUrl"
status=$?
if [ $status != 0 ]; then
echo "ERROR: failed to download from $downloadUrl due to (error code was $status)"
exit 3
fi
else
echo "ERROR: Version $VANILLA_VERSION is not supported for $TYPE"
echo " Refer to https://mcadmin.net/ for supported versions"
exit 2
fi
echo "Downloading $match"
curl -kfsSL -o $SERVER "$downloadUrl"
status=$?
if [ ! -f $SERVER ]; then
echo "ERROR: failed to download from $downloadUrl (status=$status)"
exit 3
fi
}
function downloadPaper {
@@ -115,11 +98,11 @@ function downloadPaper {
esac
if [ $build != "nosupp" ]; then
rm $SERVER
downloadUrl=${PAPER_DOWNLOAD_URL:-https://ci.destroystokyo.com/job/PaperSpigot/$build/artifact/paperclip.jar}
curl -fsSL -o $SERVER "$downloadUrl"
if [ ! -f $SERVER ]; then
echo "ERROR: failed to download from $downloadUrl (status=$?)"
downloadUrl="https://ci.destroystokyo.com/job/PaperSpigot/$build/artifact/paperclip.jar"
wget -q -O $SERVER "$downloadUrl"
status=$?
if [ $status != 0 ]; then
echo "ERROR: failed to download from $downloadUrl due to (error code was $status)"
exit 3
fi
else
@@ -132,181 +115,46 @@ function downloadPaper {
function installForge {
TYPE=FORGE
norm=$VANILLA_VERSION
if [[ -z $FORGE_INSTALLER && -z $FORGE_INSTALLER_URL ]]; then
norm=$VANILLA_VERSION
case $VANILLA_VERSION in
*.*.*)
norm=$VANILLA_VERSION ;;
*.*)
norm=${VANILLA_VERSION}.0 ;;
esac
echo "Checking Forge version information."
case $FORGEVERSION in
RECOMMENDED)
curl -fsSL -o /tmp/forge.json http://files.minecraftforge.net/maven/net/minecraftforge/forge/promotions_slim.json
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$VANILLA_VERSION-recommended\"]")
echo "Checking Forge version information."
case $FORGEVERSION in
RECOMMENDED)
curl -o /tmp/forge.json -sSL http://files.minecraftforge.net/maven/net/minecraftforge/forge/promotions_slim.json
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$norm-recommended\"]")
if [ $FORGE_VERSION = null ]; then
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$norm-latest\"]")
if [ $FORGE_VERSION = null ]; then
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$VANILLA_VERSION-latest\"]")
if [ $FORGE_VERSION = null ]; then
echo "ERROR: Version $VANILLA_VERSION is not supported by Forge"
echo " Refer to http://files.minecraftforge.net/ for supported versions"
exit 2
fi
fi
;;
*)
FORGE_VERSION=$FORGEVERSION
;;
esac
normForgeVersion=$VANILLA_VERSION-$FORGE_VERSION-$norm
shortForgeVersion=$VANILLA_VERSION-$FORGE_VERSION
FORGE_INSTALLER="/tmp/forge-$shortForgeVersion-installer.jar"
elif [[ -z $FORGE_INSTALLER ]]; then
FORGE_INSTALLER="/tmp/forge-installer.jar"
elif [[ ! -e $FORGE_INSTALLER ]]; then
echo "ERROR: the given Forge installer doesn't exist : $FORGE_INSTALLER"
exit 2
fi
installMarker=".forge-installed-$shortForgeVersion"
if [ ! -e $installMarker ]; then
if [ ! -e $FORGE_INSTALLER ]; then
if [[ -z $FORGE_INSTALLER_URL ]]; then
echo "Downloading $normForgeVersion"
forgeFileNames="
$normForgeVersion/forge-$normForgeVersion-installer.jar
$shortForgeVersion/forge-$shortForgeVersion-installer.jar
END
"
for fn in $forgeFileNames; do
if [ $fn == END ]; then
echo "Unable to compute URL for $normForgeVersion"
exit 2
fi
downloadUrl=http://files.minecraftforge.net/maven/net/minecraftforge/forge/$fn
echo "...trying $downloadUrl"
if curl -o $FORGE_INSTALLER -fsSL $downloadUrl; then
break
fi
done
else
echo "Downloading $FORGE_INSTALLER_URL ..."
if ! curl -o $FORGE_INSTALLER -fsSL $FORGE_INSTALLER_URL; then
echo "Failed to download from given location $FORGE_INSTALLER_URL"
echo "ERROR: Version $FORGE_VERSION is not supported by Forge"
echo " Refer to http://files.minecraftforge.net/ for supported versions"
exit 2
fi
fi
fi
;;
echo "Installing Forge $shortForgeVersion using $FORGE_INSTALLER"
mkdir -p mods
tries=3
while ((--tries >= 0)); do
java -jar $FORGE_INSTALLER --installServer
if [ $? == 0 ]; then
break
fi
done
if (($tries < 0)); then
echo "Forge failed to install after several tries." >&2
exit 10
fi
# NOTE $shortForgeVersion will be empty if installer location was given to us
echo "Finding installed server jar..."
for j in *forge*.jar; do
echo "...$j"
case $j in
*installer*)
;;
*)
SERVER=$j
break
;;
esac
done
if [[ -z $SERVER ]]; then
echo "Unable to derive server jar for Forge"
exit 2
fi
echo "Using server $SERVER"
echo $SERVER > $installMarker
*)
FORGE_VERSION=$FORGEVERSION
;;
esac
# URL format changed for 1.7.10 from 10.13.2.1300
sorted=$((echo $FORGE_VERSION; echo 10.13.2.1300) | sort -V | head -1)
if [[ $norm == '1.7.10' && $sorted == '10.13.2.1300' ]]; then
# if $FORGEVERSION >= 10.13.2.1300
normForgeVersion="$norm-$FORGE_VERSION-$norm"
else
SERVER=$(cat $installMarker)
fi
}
function isURL {
local value=$1
if [[ ${value:0:8} == "https://" || ${value:0:7} = "http://" ]]; then
return 0
else
return 1
fi
}
function installFTB {
TYPE=FEED-THE-BEAST
echo "Looking for Feed-The-Beast server modpack."
if [[ -z $FTB_SERVER_MOD ]]; then
echo "Environment variable FTB_SERVER_MOD not set."
echo "Set FTB_SERVER_MOD to the file name of the FTB server modpack."
echo "(And place the modpack in the /data directory.)"
exit 2
fi
local srv_modpack=${FTB_SERVER_MOD}
if isURL ${srv_modpack}; then
case $srv_modpack in
*/download)
break;;
*)
srv_modpack=${srv_modpack}/download;;
esac
local file=$(basename $(dirname $srv_modpack))
local downloaded=/data/${file}.zip
echo "Downloading FTB modpack...
$srv_modpack -> $downloaded"
curl -sSL -o $downloaded $srv_modpack
srv_modpack=$downloaded
fi
if [[ ${srv_modpack:0:5} == "data/" ]]; then
# Prepend with "/"
srv_modpack=/${srv_modpack}
fi
if [[ ! ${srv_modpack:0:1} == "/" ]]; then
# If not an absolute path, assume file is in "/data"
srv_modpack=/data/${srv_modpack}
fi
if [[ ! -f ${srv_modpack} ]]; then
echo "FTB server modpack ${srv_modpack} not found."
exit 2
fi
if [[ ! ${srv_modpack: -4} == ".zip" ]]; then
echo "FTB server modpack ${srv_modpack} is not a zip archive."
echo "Please set FTB_SERVER_MOD to a file with a .zip extension."
exit 2
normForgeVersion="$norm-$FORGE_VERSION"
fi
echo "Unpacking FTB server modpack ${srv_modpack} ..."
mkdir -p ${FTB_DIR}
unzip -o ${srv_modpack} -d ${FTB_DIR}
cp -f /data/eula.txt ${FTB_DIR}/eula.txt
FTB_SERVER_START=${FTB_DIR}/ServerStart.sh
chmod a+x ${FTB_SERVER_START}
sed -i "s/-jar/-Dfml.queryResult=confirm -jar/" ${FTB_SERVER_START}
FORGE_INSTALLER="forge-$normForgeVersion-installer.jar"
SERVER="forge-$normForgeVersion-universal.jar"
if [ ! -e "$SERVER" ]; then
echo "Downloading $FORGE_INSTALLER ..."
wget -q http://files.minecraftforge.net/maven/net/minecraftforge/forge/$normForgeVersion/$FORGE_INSTALLER
echo "Installing $SERVER"
java -jar $FORGE_INSTALLER --installServer
fi
}
function installVanilla {
@@ -349,17 +197,12 @@ case "$TYPE" in
# normalize on Spigot for operations below
TYPE=SPIGOT
;;
FORGE|forge)
TYPE=FORGE
installForge
;;
FTB|ftb)
TYPE=FEED-THE-BEAST
installFTB
;;
VANILLA|vanilla)
installVanilla
;;
@@ -411,18 +254,12 @@ if [[ "$MODPACK" ]]; then
case "X$MODPACK" in
X[Hh][Tt][Tt][Pp]*[Zz][iI][pP])
echo "Downloading mod/plugin pack via HTTP"
echo " from $MODPACK ..."
curl -sSL -o /tmp/modpack.zip "$MODPACK"
echo "$MODPACK"
wget -q -O /tmp/modpack.zip "$MODPACK"
if [ "$TYPE" = "SPIGOT" ]; then
if [ "$REMOVE_OLD_MODS" = "TRUE" ]; then
rm -rf /data/plugins/*
fi
mkdir -p /data/plugins
unzip -o -d /data/plugins /tmp/modpack.zip
else
if [ "$REMOVE_OLD_MODS" = "TRUE" ]; then
rm -rf /data/mods/*
fi
mkdir -p /data/mods
unzip -o -d /data/mods /tmp/modpack.zip
fi
@@ -434,28 +271,6 @@ case "X$MODPACK" in
esac
fi
# If supplied with a URL for a config (simple zip of configurations), download it and unpack
if [[ "$MODCONFIG" ]]; then
case "X$MODCONFIG" in
X[Hh][Tt][Tt][Pp]*[Zz][iI][pP])
echo "Downloading mod/plugin configs via HTTP"
echo " from $MODCONFIG ..."
curl -sSL -o /tmp/modconfig.zip "$MODCONFIG"
if [ "$TYPE" = "SPIGOT" ]; then
mkdir -p /data/plugins
unzip -o -d /data/plugins /tmp/modconfig.zip
else
mkdir -p /data/config
unzip -o -d /data/config /tmp/modconfig.zip
fi
rm -f /tmp/modconfig.zip
;;
*)
echo "Invalid URL given for modconfig: Must be HTTP or HTTPS and a ZIP file"
;;
esac
fi
function setServerProp {
local prop=$1
local var=$2
@@ -480,10 +295,11 @@ if [ ! -e server.properties ]; then
setServerProp "allow-nether" "$ALLOW_NETHER"
setServerProp "announce-player-achievements" "$ANNOUNCE_PLAYER_ACHIEVEMENTS"
setServerProp "enable-command-block" "$ENABLE_COMMAND_BLOCK"
setServerProp "spawn-animals" "$SPAWN_ANIMALS"
setServerProp "spawn-animals" "$SPAWN_ANIMAILS"
setServerProp "spawn-monsters" "$SPAWN_MONSTERS"
setServerProp "spawn-npcs" "$SPAWN_NPCS"
setServerProp "generate-structures" "$GENERATE_STRUCTURES"
setServerProp "spawn-npcs" "$SPAWN_NPCS"
setServerProp "view-distance" "$VIEW_DISTANCE"
setServerProp "hardcore" "$HARDCORE"
setServerProp "max-build-height" "$MAX_BUILD_HEIGHT"
@@ -504,11 +320,11 @@ if [ ! -e server.properties ]; then
if [ -n "$LEVEL_TYPE" ]; then
# normalize to uppercase
LEVEL_TYPE=$( echo ${LEVEL_TYPE} | tr '[:lower:]' '[:upper:]' )
LEVEL_TYPE=${LEVEL_TYPE^^}
echo "Setting level type to $LEVEL_TYPE"
# check for valid values and only then set
case $LEVEL_TYPE in
DEFAULT|FLAT|LARGEBIOMES|AMPLIFIED|CUSTOMIZED|BIOMESOP)
DEFAULT|FLAT|LARGEBIOMES|AMPLIFIED|CUSTOMIZED)
sed -i "/level-type\s*=/ c level-type=$LEVEL_TYPE" /data/server.properties
;;
*)
@@ -543,11 +359,10 @@ if [ ! -e server.properties ]; then
if [ -n "$MODE" ]; then
echo "Setting mode"
MODE_LC=$( echo $MODE | tr '[:upper:]' '[:lower:]' )
case $MODE_LC in
case ${MODE,,?} in
0|1|2|3)
;;
su*)
s*)
MODE=0
;;
c*)
@@ -556,7 +371,7 @@ if [ ! -e server.properties ]; then
a*)
MODE=2
;;
sp*)
s*)
MODE=3
;;
*)
@@ -565,7 +380,7 @@ if [ ! -e server.properties ]; then
;;
esac
sed -i "/^gamemode\s*=/ c gamemode=$MODE" $SERVER_PROPERTIES
sed -i "/gamemode\s*=/ c gamemode=$MODE" /data/server.properties
fi
fi
@@ -593,21 +408,21 @@ if [ -n "$ICON" -a ! -e server-icon.png ]; then
fi
fi
# Make sure files exist and are valid JSON (for pre-1.12 to 1.12 upgrades)
for j in *.json; do
if [[ $(python -c "print open('$j').read().strip()==''") = True ]]; then
echo "Fixing JSON $j"
echo '[]' > $j
fi
done
# Make sure files exist to avoid errors
if [ ! -e banned-players.json ]; then
echo '' > banned-players.json
fi
if [ ! -e banned-ips.json ]; then
echo '' > banned-ips.json
fi
# If any modules have been provided, copy them over
mkdir -p /data/mods
for m in /mods/*.{jar,zip}
[ -d /data/mods ] || mkdir /data/mods
for m in /mods/*.jar
do
if [ -f "$m" -a ! -f "/data/mods/$m" ]; then
if [ -f "$m" ]; then
echo Copying mod `basename "$m"`
cp "$m" /data/mods
cp -f "$m" /data/mods
fi
done
[ -d /data/config ] || mkdir /data/config
@@ -626,33 +441,16 @@ if [ "$TYPE" = "SPIGOT" ]; then
fi
fi
EXTRA_ARGS=""
# Optional disable console
if [[ ${CONSOLE} = false || ${CONSOLE} = FALSE ]]; then
EXTRA_ARGS+="--noconsole"
fi
# Optional disable GUI for headless servers
if [[ ${GUI} = false || ${GUI} = FALSE ]]; then
EXTRA_ARGS="${EXTRA_ARGS} nogui"
fi
# put these prior JVM_OPTS at the end to give any memory settings there higher precedence
echo "Setting initial memory to ${INIT_MEMORY:-${MEMORY}} and max to ${MAX_MEMORY:-${MEMORY}}"
JVM_OPTS="-Xms${INIT_MEMORY:-${MEMORY}} -Xmx${MAX_MEMORY:-${MEMORY}} ${JVM_OPTS}"
if [[ ${TYPE} == "FEED-THE-BEAST" ]]; then
cp -f $SERVER_PROPERTIES ${FTB_DIR}/server.properties
cp -f /data/{eula,ops,white-list}.txt ${FTB_DIR}/
cd ${FTB_DIR}
echo "Running FTB server modpack start ..."
exec sh ${FTB_SERVER_START}
if [[ $CONSOLE = false ]]; then
EXTRA_ARGS=--noconsole
else
# If we have a bootstrap.txt file... feed that in to the server stdin
if [ -f /data/bootstrap.txt ];
then
exec java $JVM_XX_OPTS $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS < /data/bootstrap.txt
else
exec java $JVM_XX_OPTS $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS
fi
EXTRA_ARGS=""
fi
# If we have a bootstrap.txt file... feed that in to the server stdin
if [ -f /data/bootstrap.txt ];
then
exec java $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS < /data/bootstrap.txt
else
exec java $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS
fi

View File

@@ -1,13 +1,13 @@
#!/bin/sh
set -e
sed -i "/^minecraft/s/:1000:1000:/:${UID}:${GID}:/g" /etc/passwd
sed -i "/^minecraft/s/:1000:/:${GID}:/g" /etc/group
usermod --uid $UID minecraft
groupmod --gid $GID minecraft
if [ "$SKIP_OWNERSHIP_FIX" != "TRUE" ]; then
fix_ownership() {
dir=$1
if ! su-exec minecraft test -w $dir; then
if ! sudo -u minecraft test -w $dir; then
echo "Correcting writability of $dir ..."
chown -R minecraft:minecraft $dir
chmod -R u+w $dir
@@ -19,4 +19,4 @@ if [ "$SKIP_OWNERSHIP_FIX" != "TRUE" ]; then
fi
echo "Switching to user 'minecraft'"
su-exec minecraft /start-minecraft $@
exec sudo -E -u minecraft /start-minecraft "$@"

View File

@@ -1,6 +1,6 @@
FROM itzg/gvm
LABEL maintainer "itzg"
MAINTAINER itzg
RUN ["/run", "install", "springboot"]

View File

@@ -1,12 +1,11 @@
FROM openjdk:8-jre
FROM itzg/ubuntu-openjdk-7
LABEL maintainer "itzg"
MAINTAINER itzg
ENV TITAN_VERSION 0.5.4
ADD http://s3.thinkaurelius.com/downloads/titan/titan-$TITAN_VERSION-hadoop2.zip /tmp/titan.zip
RUN unzip -q /tmp/titan.zip -d /opt && \
rm /tmp/titan.zip
RUN wget -q -O /tmp/titan.zip http://s3.thinkaurelius.com/downloads/titan/titan-$TITAN_VERSION-hadoop2.zip
RUN unzip -q /tmp/titan.zip -d /opt && rm /tmp/titan.zip
ENV TITAN_HOME /opt/titan-$TITAN_VERSION-hadoop2
WORKDIR $TITAN_HOME

View File

@@ -1,6 +1,6 @@
FROM itzg/ubuntu-openjdk-7
LABEL maintainer "itzg"
MAINTAINER itzg
ENV APT_GET_UPDATE 2014-07-19

View File

@@ -1,6 +1,6 @@
FROM ubuntu:trusty
LABEL maintainer "itzg"
MAINTAINER itzg
ENV APT_GET_UPDATE 2015-10-29
RUN apt-get update