Compare commits

..

123 Commits

Author SHA1 Message Date
Geoff Bourne
9ee907783d [mc] Added issues link 2017-09-02 12:29:50 -05:00
Geoff Bourne
463cc62b75 [mc] Check writability of /data
Document JVM_OPTS
For #176 and #177
2017-09-02 12:24:09 -05:00
Geoff Bourne
5afd98edd8 [es] Upgrade to 5.5.1 2017-08-12 13:58:39 -05:00
Geoff Bourne
8b9051d72e Merge pull request #174 from Rob5Underscores/master
Add support for a zip file of mod configs to be supplied!
2017-07-17 06:56:44 -05:00
Rob Weddell
d6392f3aa9 Add support for a zip file of mod configs to be supplied!
This was added such that Sponge servers are supported.
(For Sponge: TYPE=FORGE, MODPACK=[zip including sponge plugins and jar], MODCONFIG=[sponge configuration])
2017-07-17 11:48:23 +01:00
Geoff Bourne
fe808be91b Merge pull request #173 from opHASnoNAME/master
Set UID for ES_DEFAULT_USER to 1100, avoid conflicts with debian host…
2017-07-16 14:16:24 -05:00
Arne Riemann
2cab10e906 Forgot -u for UID 2017-07-16 16:09:28 +02:00
Geoff Bourne
7f39d1f22e [mc] Properly check empty json files to fix
For #162
2017-07-15 20:02:40 -05:00
Arne Riemann
a9184368c1 Set UID for ES_DEFAULT_USER to 1100, avoid conflicts with debian host systems 2017-07-15 06:38:36 +02:00
Geoff Bourne
caf2fc2ab6 [es] Correcting old-ADD tar file path 2017-07-13 09:09:10 -05:00
Geoff Bourne
5dd8141e5e Merge pull request #169 from manojsb/patch-1
Upgrade to ES 5.5.0
2017-07-13 08:53:42 -05:00
Geoff Bourne
2ff28675d4 [es] Adapt to new Dockerfile ADD behavior 2017-07-13 08:52:02 -05:00
Manoj Kumar
519bd986b5 Update Dockerfile
Updated to v5.5.0
2017-07-12 11:41:13 +05:30
Geoff Bourne
e41efba176 [es] Using NON_DATA in 3x1GB stack
For #165
2017-07-04 18:08:10 -05:00
Geoff Bourne
0502813496 [es] Add NON_DATA type
For #165
2017-07-04 18:06:08 -05:00
Geoff Bourne
932a6828f9 [mc] Auto fix pre 1.12 empty json
For #162
2017-07-03 14:20:12 -05:00
Geoff Bourne
d176ef3f6a Merge pull request #167 from lukascernydis/master
Added option REMOVE_OLD_MODS
2017-07-01 20:09:12 -05:00
Lukáš Černý
deb98268f6 Added option REMOVE_OLD_MODS 2017-07-02 01:32:24 +02:00
Geoff Bourne
e1531c3237 [mc] Switch to openjdk alpine base image 2017-06-29 19:45:51 -05:00
Geoff Bourne
47af45a430 Merge pull request #166 from macman31/patch-1
Fix typos in config file generation
2017-06-29 19:19:11 -05:00
ATMD
0195b42eea Fix regex to change gamemode value
The sed command `sed -i "/gamemode\s*=/ c gamemode=$MODE" $SERVER_PROPERTIES` matches all lines containing `gamemode=`.
This includes the line `force-gamemode` that is set it with `setServerProp "force-gamemode" "$FORCE_GAMEMODE"`
So basically the `force-gamemode` line is erased and the `gamemode=value` line is duplicated, and then the server cleans it up at start and deduplicates the `gamemode` line and create a new `force-gamemode` line with a default value.
This fix ensures that only the `gamemode=` line is modified when changing the gamemode value.
2017-06-29 21:53:26 +02:00
ATMD
f875af5cdb Delete repetition of spawn-npcs 2017-06-29 20:25:24 +02:00
ATMD
32a918b902 Fix typo SPAWN_ANIMIALS 2017-06-29 19:56:14 +02:00
Geoff Bourne
d09a56075f [es] Add an example minimal composition 2017-06-28 23:17:39 -05:00
Geoff Bourne
e261fae348 [es] Upgrade to 5.4.2 2017-06-21 19:06:13 -05:00
Geoff Bourne
5b8668d73f [mc] Add dinnerbone's mcstatus and use it for HEALTHCHECK
Part of #159
2017-06-11 11:03:00 -05:00
Geoff Bourne
123292b56b Merge pull request #156 from dirkcjelli/mc_fixes
minecraft-server: Fixed FORGEVERSION case, add support for BIOMESOP
Fixes #139
2017-06-07 21:00:39 -05:00
Sascha Askani
0e0828f47f Revert "Fix FORGEVERSION -> FORGE_VERSION, remove unneeded statement in default"
This reverts commit cb6643a345.
2017-06-07 20:55:39 -05:00
Geoff Bourne
532c9fa69a Merge branch 'master' into mc_fixes 2017-06-07 20:54:35 -05:00
Geoff Bourne
e509563b10 [mc] Correctly pre-populate json config files
For #158
2017-06-07 20:43:35 -05:00
Geoff Bourne
899f31917c [mc] Add some FORGE_INSTALLER lookup messages 2017-06-07 08:07:44 -05:00
Geoff Bourne
e6ca9a1c6d [mc] glob for forge jar, but exclude installer 2017-06-06 23:09:45 -05:00
Geoff Bourne
24c68b9c2c [mc] Avoid trying to run forge installer as server 2017-06-06 22:36:39 -05:00
Geoff Bourne
2891e1ac3e [mc] Check for valid $FORGE_INSTALLER 2017-06-05 21:34:15 -05:00
Sascha Askani
da9618c08b add support for BIOMESOP level type 2017-06-05 16:09:26 +02:00
Sascha Askani
cb6643a345 Fix FORGEVERSION -> FORGE_VERSION, remove unneeded statement in default
case
2017-06-05 16:08:26 +02:00
Geoff Bourne
71527b87c1 [mc] Add support for custom Forge installer sources
For #154
2017-06-03 20:51:54 -05:00
Geoff Bourne
df25a22634 [es] Upgrade to 5.4.1 2017-06-02 12:02:11 -05:00
Geoff Bourne
5c6a2cf44f [mc] Use a probing approach to find Forge URL and launcher name
For #91 and #153
2017-05-28 20:55:09 -05:00
Geoff Bourne
536a7f6095 [mc] Adapt Forge install run for 1.6-1.10
For #153
2017-05-28 11:37:08 -05:00
Geoff Bourne
df9e725baf [es] Add kibana to the compose example 2017-05-27 10:21:59 -05:00
Geoff Bourne
156716a7d8 Merge branch 'feat/ftb-download-config'
Fixes #151
2017-05-26 20:09:03 -05:00
Geoff Bourne
0876ef7704 Copy additional meta files into FTB directory 2017-05-26 20:07:26 -05:00
Geoff Bourne
5c3ea25ff3 Merge pull request #152 from dschaper/patch-3
Add optional `nogui`
2017-05-20 11:20:20 -05:00
Geoff Bourne
bd8e0cf5d2 Fix FTB download and copy adjusted properties into FTB dir 2017-05-20 10:56:32 -05:00
Dan Schaper
1d5c4e3b0b Check both upper and lower arguments 2017-05-19 21:32:52 -07:00
Dan Schaper
6a565692a0 Add optional nogui
If set via ENV, disable GUI on server.
2017-05-19 19:53:45 -07:00
Geoff Bourne
cf68446a00 Add option to download FTB server zip 2017-05-19 20:44:59 -05:00
Geoff Bourne
c4a92f6706 Merge pull request #150 from jackwilsdon/curl-insecure
Use cURL insecurely when downloading
2017-05-12 17:14:50 -05:00
Jack Wilsdon
7bb397a031 Use cURL insecurely when downloading
This fixes an error caused by the expired certificate on ci.mcadmin.net.
2017-05-12 21:10:57 +01:00
Geoff Bourne
d86f034e96 [mc] Add python package and improve Forge install robustness
For #148
2017-05-08 22:35:37 -05:00
Geoff Bourne
20385507d8 [mc] Add mysql-client package for mods that need it
For #145
2017-05-05 20:46:35 -05:00
Geoff Bourne
91def1176d [mc] Run FTB server in its directory to support legacy
For #144
2017-05-05 20:18:31 -05:00
Geoff Bourne
2e03ee4197 [es] Upgrade to 5.4.0 2017-05-04 21:43:01 -05:00
Geoff Bourne
e8dd60a831 [cass] Remove deprecated image from build script 2017-05-02 19:49:40 -05:00
Geoff Bourne
dc15094b8a [gremlin] Switch to openjdk:8 base 2017-05-02 19:42:47 -05:00
Geoff Bourne
fc807429e4 Merge pull request #143 from ben-st/label
change deprecated maintainer to label
2017-05-02 19:24:12 -05:00
Benjamin
5ec21fc0ba change deprecated maintainer to label 2017-05-02 21:29:17 +02:00
Geoff Bourne
3a0a8a9ced [es] avoid auto-test for now 2017-04-29 14:16:08 -05:00
Geoff Bourne
b9b05dbdfc [es] Upgrade to 5.3.2 2017-04-29 08:17:59 -05:00
Geoff Bourne
179e72cda0 [cass] discontinue cassandra image 2017-04-29 07:52:59 -05:00
Geoff Bourne
2b9514ab0b [es] Upgrade to 5.3.1 2017-04-21 20:08:15 -05:00
Geoff Bourne
8a04a9f72e Merge pull request #141 from gkawamoto/master
Subnet ignoring
2017-04-19 22:10:23 -05:00
Gustavo Kawamoto
3a7af8e8d6 Subnet ignoring
Needed for Cattle support
2017-04-19 09:59:32 -03:00
Geoff Bourne
615d12bce3 [mc] Upgrade to rcon-cli 1.3 2017-04-09 08:30:20 -05:00
Geoff Bourne
72d055ac19 [mc] Add support for DOWNLOAD_*_URL overrides
Also switched from wget to curl
2017-04-08 07:20:04 -05:00
Geoff Bourne
80c18004c1 [mc] Integrate rcon-cli 2017-04-07 19:35:15 -05:00
Geoff Bourne
97e9b2901c [mc] Fixed docker-compose example 2017-04-06 12:41:21 -05:00
Geoff Bourne
027d94cc77 Initial content of minecraft-server container diagram 2017-04-05 10:36:03 -05:00
Geoff Bourne
66261af03d Added Minecraft server containment diagram 2017-04-05 10:31:21 -05:00
Geoff Bourne
290e2c734a [mc] Improved UID/GID handling in passwd/group files
For #136
2017-04-04 22:11:05 -05:00
Geoff Bourne
54d19715c7 [es] Upgrade to 5.3.0 2017-03-28 18:39:21 -05:00
Geoff Bourne
bbe1533f91 [mc] Adjust mem with MEMORY and INIT_/MAX_MEMORY
For #126
2017-03-19 09:35:58 -05:00
Geoff Bourne
97040f61ed [mc] Remove -u from unzip in FTB for Alpine compatibility
For #132
2017-03-09 18:56:19 -06:00
Geoff Bourne
55801ac11c [es] Add ingest node support
For #131
2017-03-03 23:35:12 -06:00
Geoff Bourne
07c32d8ee4 [mc] Fixed TYPE=FORGE for alpine base image
For #128
2017-02-28 18:32:15 -06:00
Geoff Bourne
2e631bcbd9 [es] Upgrade to 5.2.2 2017-02-28 18:12:29 -06:00
Geoff Bourne
c96c630fe5 Merged branch master into master 2017-02-22 16:49:21 -06:00
Geoff Bourne
f69e75cfc1 [mc] Add rcon to big composition 2017-02-22 16:49:08 -06:00
Geoff Bourne
6157a693f1 Merge pull request #115 from ikke-t/alpine
Switch to Alpine linux for container base
2017-02-22 13:23:06 -06:00
Geoff Bourne
854a158d3d Merge branch 'master' into alpine 2017-02-22 13:16:38 -06:00
Geoff Bourne
18919ef33c [mc] Upgrade to Alpine compatible restify 2017-02-22 13:14:48 -06:00
Geoff Bourne
32b0737e70 [mc] Add rcon to docker-compose.yml example 2017-02-18 11:53:44 -06:00
Geoff Bourne
79eb164e90 [mc] Adding example compose file for large minecraft server 2017-02-18 11:00:55 -06:00
Geoff Bourne
e3296c3cc0 [es] Upgrade to 5.2.1 2017-02-15 19:30:33 -06:00
Geoff Bourne
f5dbbcc2c6 [mc] Use G1 GC by default and simplify max memory setting
For #126
2017-02-14 22:15:36 -06:00
Geoff Bourne
134eaedf23 Merge pull request #125 from HenryGessau/FTB
Add Feed-The-Beast (FTB) server modpack support
2017-02-13 22:23:33 -06:00
Henry Gessau
4a89f3c579 Add Feed-The-Beast (FTB) server modpack support
The popular mod site https://www.feed-the-beast.com provides server
modpacks to go with their client modpacks. These server modpacks
include a custom start script that must be used to launch the server.
2017-02-13 22:13:53 -05:00
Geoff Bourne
17420ed590 [es] Added HEALTHCHECK 2017-02-04 14:37:45 -06:00
Geoff Bourne
2125fa0855 [es] Tweak spacing on README 2017-02-02 20:53:47 -06:00
Geoff Bourne
5c14c30a78 [es] Include docker stack deploy instructions 2017-02-02 20:48:17 -06:00
Geoff Bourne
788d06c086 [es] Upgrade to 5.2.0
For #124
2017-02-02 20:28:12 -06:00
Geoff Bourne
1a88c96beb Merged branch master into master 2017-02-02 19:31:11 -06:00
Geoff Bourne
b8d69278e4 [es] Fix address binding for Swarm Mode 2017-01-23 10:25:50 -06:00
Geoff Bourne
4a7ecffcbb [es][kibana] Upgrade to 5.1.2 2017-01-15 07:04:27 -06:00
Geoff Bourne
9fe0021a7a [mc] Upgrade base image to openjdk
Fixes #122
2017-01-12 19:40:58 -06:00
Geoff Bourne
262816bd2d [es] Add note about increasing mmap count 2017-01-04 20:43:23 -06:00
Geoff Bourne
e44d27c00a [es] Fine java security grants
Fixes #119
* also switch/upgrade base to openjdk 8u111
2017-01-04 20:37:20 -06:00
Geoff Bourne
5062bc91ab [es] Fixed DISCOVER_HTTP_IP typo in README
Fixes #120
2017-01-04 19:56:33 -06:00
Geoff Bourne
dde4132d2c [cass] Switch mirror URL to pair.com 2017-01-01 13:29:52 -06:00
Geoff Bourne
9bb3628792 [kibana] Upgrade to 5.1.1 2016-12-09 15:23:06 -06:00
Geoff Bourne
03a5bb1ab0 [es] Upgrade to 5.1.1 2016-12-09 15:15:38 -06:00
Geoff Bourne
3a9a1dc043 [es] small command line fix (#117)
small command line fix
2016-12-09 10:55:08 -06:00
Eric Pugh
36ced1c630 small command line fix 2016-12-09 10:45:34 -05:00
Ilkka Tengvall
2d8b3d7275 Remove commented lines 2016-12-07 19:10:18 +02:00
Ilkka Tengvall
f48eedee78 restored the java heap size
I reduced it for RasPi, but forgot it low here too. Restored.
2016-12-07 19:06:01 +02:00
Ilkka Tengvall
08d459c373 changed to work on alpine container 2016-12-07 00:31:06 +02:00
Geoff Bourne
0cb0755739 [kibana] Upgrade to 5.0.2 2016-11-30 19:21:16 -06:00
Geoff Bourne
3e8eca6e28 [es] Upgrade to 5.0.2
Fixes #113
2016-11-29 22:05:13 -06:00
Geoff Bourne
a179f5f7ba [cass] Upgrade to 2.2.8 2016-11-29 19:14:05 -06:00
Geoff Bourne
f2955bcc2f [es] Bump to 5.0.1
* explicitly configure default JVM heap size
* /conf needed ownership fixed
For #112
2016-11-28 20:37:44 -07:00
Geoff Bourne
a0c1ed88d1 [kibana] Adjust binding hosting from old default 2016-11-13 15:18:37 -06:00
Geoff Bourne
de6d7a64ac Merge branch 'master' of github.com:itzg/dockerfiles 2016-11-13 14:59:47 -06:00
Geoff Bourne
3bf560bfbe [kibana] Upgrade to 5.0.0
* also switched to openjdk 8 base image
2016-11-13 14:59:22 -06:00
Geoff Bourne
f1b58323d3 [es] Fixed TYPE usage
Removed last of the --'s parameters
2016-11-05 10:46:32 -05:00
Geoff Bourne
d4a888073b [es] Upgrade to 5.0.0 2016-11-04 20:18:43 -05:00
Geoff Bourne
54844930bb [jenkins] Working around hub build issue 2016-10-23 09:52:25 -05:00
Geoff Bourne
b26714c9c1 [jenkins] Move COPY later in build 2016-10-23 09:36:16 -05:00
Geoff Bourne
f03a8f0edc [jenkins] Switch local ADD to COPY 2016-10-23 09:29:46 -05:00
Geoff Bourne
048beefadc [jenkins] Pre-installing graphviz (for dependency graphs, etc) 2016-10-23 00:35:02 -05:00
Geoff Bourne
de3545e8d9 Merge pull request #109 from manuelgu/patch-1
Fix formatting in minecraft README
2016-10-18 13:54:57 -05:00
manuelgu
e766301d1e Fix formatting 2016-10-18 13:59:43 +02:00
Geoff Bourne
3348083424 [mc] Fix game mode shorthand handling of s*
For #107
2016-10-02 07:50:19 -05:00
34 changed files with 1045 additions and 331 deletions

View File

@@ -1,6 +1,10 @@
dockerfiles
===========
Contains the various Dockerfile definitions I'm maintaining.
This repository contains the various Dockerfile definitions I'm maintaining.
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/itzg/dockerfiles?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
### Discontinued
##### Cassandra
I have found the [official image](https://hub.docker.com/_/cassandra/) to be quite sufficient

2
build
View File

@@ -5,10 +5,8 @@ pkgs="$pkgs minecraft-server"
pkgs="$pkgs elasticsearch"
pkgs="$pkgs kibana"
pkgs="$pkgs titan-gremlin"
pkgs="$pkgs cassandra"
for p in $pkgs
do
docker build -t itzg/$p $p
done

View File

@@ -1,26 +0,0 @@
FROM java:openjdk-8u72-jdk
MAINTAINER itzg
ENV CASSANDRA_VERSION 2.2.7
RUN wget -qO /tmp/apache-cassandra.tgz http://mirrors.ibiblio.org/apache/cassandra/$CASSANDRA_VERSION/apache-cassandra-$CASSANDRA_VERSION-bin.tar.gz
RUN tar -C /opt -zxf /tmp/apache-cassandra.tgz && \
rm /tmp/apache-cassandra.tgz
RUN mv /opt/apache-cassandra-$CASSANDRA_VERSION /opt/cassandra
ENV CASSANDRA_HOME /opt/cassandra
ENV CASSANDRA_CONF /conf
ENV CASSANDRA_DATA /data
WORKDIR $CASSANDRA_HOME
RUN ln -s $CASSANDRA_HOME/bin/* /usr/local/bin
VOLUME ["/data","/conf"]
EXPOSE 9042 9160 7000 7001
ADD cassandra.in.sh $CASSANDRA_HOME/cassandra.in.sh
RUN mv $CASSANDRA_HOME/bin/cassandra.in.sh $CASSANDRA_HOME/bin/orig.cassandra.in.sh
CMD ["/opt/cassandra/bin/cassandra", "-f"]

View File

@@ -1,9 +0,0 @@
Yet another Cassandra image, but this one got container and non-container access right.
# Basic Usage
To support access from both Docker containers and external, non-Docker clients:
docker run -d --name cassandra -e PUBLISH_AS=192.168.59.103 -p 9160:9160 itzg/cassandra
replacing `192.168.59.103` with your Docker host's LAN IP address.

View File

@@ -1,24 +0,0 @@
#!/bin/bash
cassYml=$CASSANDRA_HOME/conf/cassandra.yaml
privateAddr=$(hostname -i)
seeds=${SEEDS:-${PUBLISH_AS:-$privateAddr}}
sed -i -e "s/- seeds:.*/- seeds: \"$seeds\"/" \
-e "s/listen_address:.*/listen_address: $privateAddr/" \
-e "s/rpc_address:.*/rpc_address: $privateAddr/" \
-e "s/start_rpc:.*/start_rpc: true/" \
-e "s#- /var/lib/cassandra/data#- $CASSANDRA_DATA#" \
$cassYml
if [ -n "$PUBLISH_AS" ]; then
sed -i -e "s/\(\s*#\)\?\s*broadcast_address:.*/broadcast_address: $PUBLISH_AS/" $cassYml
fi
# Copy over our tweaked files, but non-clobbering to let user have ultimate control
cp -rn $CASSANDRA_HOME/conf/* $CASSANDRA_CONF
# source the original
. $CASSANDRA_HOME/bin/orig.cassandra.in.sh

View File

@@ -1,26 +1,39 @@
FROM java:8u111-jre-alpine
FROM openjdk:8u121-jre-alpine
MAINTAINER itzg
LABEL maintainer "itzg"
ENV ES_VERSION=2.4.3
RUN apk -U add bash
ADD https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/$ES_VERSION/elasticsearch-$ES_VERSION.tar.gz /tmp/es.tgz
ARG ES_VERSION=5.5.1
# avoid conflicts with debian host systems when mounting to host volume
ARG DEFAULT_ES_USER_UID=1100
ADD https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$ES_VERSION.tar.gz /tmp
# need to adapt to both Docker's new remote-unpack-ADD behavior and the old behavior
RUN cd /usr/share && \
tar xf /tmp/es.tgz && \
rm /tmp/es.tgz
COPY start /start
if [ -f /tmp/elasticsearch-$ES_VERSION.tar.gz ]; then \
tar xf /tmp/elasticsearch-$ES_VERSION.tar.gz; \
else mv /tmp/elasticsearch-${ES_VERSION} /usr/share; \
fi && \
rm -f /tmp/elasticsearch-$ES_VERSION.tar.gz
EXPOSE 9200 9300
ENV ES_HOME=/usr/share/elasticsearch-$ES_VERSION \
OPTS=-Dnetwork.host=_non_loopback_ \
DEFAULT_ES_USER=elasticsearch
HEALTHCHECK --timeout=5s CMD wget -q -O - http://$HOSTNAME:9200/_cat/health
RUN adduser -S -s /bin/sh $DEFAULT_ES_USER
ENV ES_HOME=/usr/share/elasticsearch-$ES_VERSION \
DEFAULT_ES_USER=elasticsearch \
DEFAULT_ES_USER_UID=$DEFAULT_ES_USER_UID \
ES_JAVA_OPTS="-Xms1g -Xmx1g"
RUN adduser -S -s /bin/sh -u $DEFAULT_ES_USER_UID $DEFAULT_ES_USER
VOLUME ["/data","/conf"]
WORKDIR $ES_HOME
COPY java.policy /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/
COPY start /start
COPY log4j2.properties $ES_HOME/config/
CMD ["/start"]

View File

@@ -1,5 +1,10 @@
This Docker image provides an easily configurable Elasticsearch node. Via port mappings, it is easy to create an arbitrarily sized cluster of nodes. As long as the versions match, you can mix-and-match "real" Elasticsearch nodes with container-ized ones.
# NOTE for use on Linux hosts
Elasticsearch 5.x requires that the virtual memory mmap count is set sufficiently for stable,
production use. [Refer to this guide for more information](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html).
# Basic Usage
To start an Elasticsearch data node that listens on the standard ports on your host's network interface:
@@ -29,9 +34,9 @@ Where `DOCKERHOST` would be the actual hostname of your host running Docker.
To run a multi-node cluster (3-node in this example) on a single Docker machine use:
docker run -d --name es0 -p 9200:9200 es
docker run -d --name es1 --link es0 -e UNICAST_HOSTS=es0 es
docker run -d --name es2 --link es0 -e UNICAST_HOSTS=es0 es
docker run -d --name es0 -p 9200:9200 itzg/elasticsearch
docker run -d --name es1 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
docker run -d --name es2 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
and then check the cluster health, such as http://192.168.99.100:9200/_cluster/health?pretty
@@ -49,6 +54,40 @@ and then check the cluster health, such as http://192.168.99.100:9200/_cluster/h
"unassigned_shards" : 0
}
If you have a Docker Swarm cluster already initialized you can download this
[docker-compose.yml](https://raw.githubusercontent.com/itzg/dockerfiles/master/elasticsearch/docker-compose.yml) and deploy a cluster using:
docker stack deploy -c docker-compose.yml es
With a `docker service ls` you can confirm 1 master, 2 data, and 1 gateway nodes are running:
```
ID NAME MODE REPLICAS IMAGE
9nwnno8hbqgk es_kibana replicated 1/1 kibana:latest
f5x7nipwmvkr es_gateway replicated 1/1 es
om8rly2yxylw es_data replicated 2/2 es
tdvfilj370yn es_master replicated 1/1 es
```
As you can see, there is also a Kibana instance included and available at port 5601.
# Health Checks
This container declares a [HEALTHCHECK](https://docs.docker.com/engine/reference/builder/#/healthcheck) that queries the `_cat/health`
endpoint for a quick, one-line gauge of health every 30 seconds.
The current health of the container is shown in the `STATUS` column of `docker ps`, such as
Up 14 minutes (healthy)
You can also check the history of health checks from `inspect`, such as:
```
> docker inspect -f "{{json .State.Health}}" es
{"Status":"healthy","FailingStreak":0,"Log":[...
```
# Configuration Summary
## Ports
@@ -142,12 +181,14 @@ To simplify all that, this image provides a `TYPE` variable to let you amongst t
* `MASTER` : master-eligible, but holds no data. It is good to have three or more of these in a
large cluster
* `DATA` (or `NON_MASTER`) : holds data and serves search/index requests. Scale these out for elastic-y goodness.
* `GATEWAY` : only operates as a client node or a "smart router". These are the ones whose HTTP port 9200 will need to be exposed
* `NON_DATA` : performs all duties except holding data
* `GATEWAY` (or `COORDINATING`) : only operates as a client node or a "smart router". These are the ones whose HTTP port 9200 will need to be exposed
* `INGEST` : operates only as an ingest node and is not master or data eligible
A [Docker Compose](https://docs.docker.com/compose/overview/) file will serve as a good example of these three node types:
```
version: '2'
version: '3'
services:
gateway:
@@ -170,6 +211,14 @@ services:
environment:
UNICAST_HOSTS: master,gateway
TYPE: DATA
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://gateway:9200
```
## Minimum Master Nodes
@@ -182,18 +231,28 @@ Using the Docker Compose file above, a value of `2` is appropriate when scaling
docker-compose scale master=3
## Auto transport/http discovery with Swarm Mode
## Multiple Network Binding, such as Swarm Mode
When using Docker Swarm mode (starting with 1.12), the overlay and ingress network interfaces are assigned
multiple IP addresses. As a result, it creates confusion for the transport publish logic even when using
the special value `_eth0_`.
When using Docker Swarm mode the container is presented with multiple ethernet
devices. By default, all global, routable IP addresses are configured for
Elasticsearch to use as `network.host`.
To resolve this, add
That discovery can be overridden by providing a specific ethernet device name
to `DISCOVER_TRANSPORT_IP` and/or `DISCOVER_HTTP_IP`, such as
-e DISCOVER_TRANSPORT_IP=eth0
-e DISCOVER_HTTP_IP=eth2
replacing `eth0` with another interface within the container, if needed.
## Heap size and other JVM options
The same can be done for publish/binding of the http module by adding:
By default this image will run Elasticsearch with a Java heap size of 1 GB. If that value
or any other JVM options need to be adjusted, then replace the `ES_JAVA_OPTS`
environment variable.
-e DISCOVERY_HTTP_IP=eth2
For example, this would allow for the use of 16 GB of heap:
-e ES_JAVA_OPTS="-Xms16g -Xmx16g"
Refer to [this page](https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html)
for more information about why both the minimum and maximum sizes were set to
the same value.

View File

@@ -0,0 +1,35 @@
# This composition is known to work on a Swarm cluster consisting of
# 3 VM nodes with 1GB allocated to each.
version: '3'
services:
master:
image: itzg/elasticsearch
environment:
UNICAST_HOSTS: master
MIN_MASTERS: 1
ES_JAVA_OPTS: -Xms756m -Xmx756m
TYPE: NON_DATA
ports:
- "9200:9200"
- "9300:9300"
deploy:
replicas: 1
update_config:
parallelism: 1
data:
image: itzg/elasticsearch
deploy:
mode: global
update_config:
parallelism: 1
environment:
TYPE: DATA
UNICAST_HOSTS: master
ES_JAVA_OPTS: -Xms512m -Xmx512m
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://master:9200

View File

@@ -0,0 +1,35 @@
version: '3'
services:
master:
build: .
environment:
TYPE: MASTER
UNICAST_HOSTS: master
MIN_MASTERS: 1
data:
build: .
environment:
TYPE: DATA
UNICAST_HOSTS: master
gateway:
build: .
ports:
- "9200:9200"
- "9300:9300"
environment:
TYPE: GATEWAY
UNICAST_HOSTS: master
ingest:
build: .
ports:
- "9222:9200"
environment:
TYPE: INGEST
UNICAST_HOSTS: master
kibana:
image: kibana:5.5.1
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://gateway:9200

View File

@@ -0,0 +1,21 @@
version: '3'
services:
master:
image: itzg/elasticsearch
environment:
UNICAST_HOSTS: master
MIN_MASTERS: 1
ports:
- "9200:9200"
- "9300:9300"
deploy:
replicas: 1
update_config:
parallelism: 1
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://master:9200

View File

@@ -0,0 +1,44 @@
version: '3'
services:
master:
image: itzg/elasticsearch
environment:
TYPE: MASTER
UNICAST_HOSTS: master
MIN_MASTERS: 1
deploy:
replicas: 1
update_config:
parallelism: 1
data:
image: itzg/elasticsearch
environment:
TYPE: DATA
UNICAST_HOSTS: master
deploy:
replicas: 2
update_config:
parallelism: 1
delay: 60s
gateway:
image: itzg/elasticsearch
ports:
- "9200:9200"
- "9300:9300"
environment:
TYPE: GATEWAY
UNICAST_HOSTS: master
ingest:
image: itzg/elasticsearch
ports:
- "9222:9200"
environment:
TYPE: INGEST
UNICAST_HOSTS: master
kibana:
image: kibana
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://gateway:9200

View File

@@ -0,0 +1,6 @@
grant {
// JMX Java Management eXtensions
permission javax.management.MBeanTrustPermission "register";
permission javax.management.MBeanServerPermission "createMBeanServer";
permission javax.management.MBeanPermission "-#-[-]", "queryNames";
};

View File

@@ -0,0 +1,74 @@
status = error
# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
#rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
#logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1
appender.index_search_slowlog_rolling.policies.time.modulate = true
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = console
logger.index_search_slowlog_rolling.additivity = false
appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = console
logger.index_indexing_slowlog.additivity = false

View File

@@ -1,5 +1,126 @@
#!/bin/sh
pre_checks() {
mmc=$(sysctl vm.max_map_count|sed 's/.*= //')
if [[ $mmc -lt 262144 ]]; then
echo "
ERROR: As of 5.0.0 Elasticsearch requires increasing mmap counts.
Refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
"
exit 1
fi
}
discoverIpFromLink() {
dev=$1
mode=$2
ip=`ipaddr show dev $dev scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'`
echo "Discovered $mode address $ip for $dev"
OPTS="$OPTS -E $mode.host=$ip"
}
discoverAllGlobalIps() {
if [ ${#IGNORE_NETWORK} -eq 0 ]
then
IGNORE_NETWORK='999.999.999.999'
fi
printf "Finding IPs"
while [ ${#ips} -eq 0 ]
do
printf "."
ips=`ipaddr show scope global| grep -v "inet ${IGNORE_NETWORK}" | awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; addrs[length(addrs)] = $2 } } END { for (i in addrs) { if (i>0) printf "," ; printf addrs[i] } }'`
sleep 1
done
echo " found! $ips"
OPTS="$OPTS -E network.host=$ips"
}
setup_clustering() {
if [ -n "$CLUSTER" ]; then
OPTS="$OPTS -E cluster.name=$CLUSTER"
if [ -n "$CLUSTER_FROM" ]; then
if [ -d /data/$CLUSTER_FROM -a ! -d /data/$CLUSTER ]; then
echo "Performing cluster data migration from $CLUSTER_FROM to $CLUSTER"
mv /data/$CLUSTER_FROM /data/$CLUSTER
fi
fi
fi
if [ -n "$NODE_NAME" ]; then
OPTS="$OPTS -E node.name=$NODE_NAME"
fi
if [ -n "$MULTICAST" ]; then
OPTS="$OPTS -E discovery.zen.ping.multicast.enabled=$MULTICAST"
fi
if [ -n "$UNICAST_HOSTS" ]; then
OPTS="$OPTS -E discovery.zen.ping.unicast.hosts=$UNICAST_HOSTS"
fi
if [ -n "$PUBLISH_AS" ]; then
OPTS="$OPTS -E transport.publish_host=$(echo $PUBLISH_AS | awk -F: '{print $1}')"
OPTS="$OPTS -E transport.publish_port=$(echo $PUBLISH_AS | awk -F: '{if ($2) print $2; else print 9300}')"
fi
if [ -n "$MIN_MASTERS" ]; then
OPTS="$OPTS -E discovery.zen.minimum_master_nodes=$MIN_MASTERS"
fi
}
install_plugins() {
if [ -n "$PLUGINS" ]; then
for p in $(echo $PLUGINS | awk -v RS=, '{print}')
do
echo "Installing the plugin $p"
$ES_HOME/bin/elasticsearch-plugin install $p
done
else
mkdir -p $ES_HOME/plugins
fi
}
setup_personality() {
if [ -n "$TYPE" ]; then
case $TYPE in
MASTER)
OPTS="$OPTS -E node.master=true -E node.data=false -E node.ingest=false"
;;
GATEWAY|COORDINATING)
OPTS="$OPTS -E node.master=false -E node.data=false -E node.ingest=false"
;;
INGEST)
OPTS="$OPTS -E node.master=false -E node.data=false -E node.ingest=true"
;;
DATA)
OPTS="$OPTS -E node.master=false -E node.data=true -E node.ingest=false"
;;
NON_MASTER)
OPTS="$OPTS -E node.master=false -E node.data=true -E node.ingest=true"
;;
NON_DATA)
OPTS="$OPTS -E node.master=true -E node.data=false -E node.ingest=true"
;;
*)
echo "Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
exit 1
esac
fi
}
pre_checks
if [ -f /conf/env ]; then
. /conf/env
fi
@@ -8,95 +129,28 @@ if [ ! -e /conf/elasticsearch.* ]; then
cp $ES_HOME/config/elasticsearch.yml /conf
fi
if [ ! -e /conf/logging.* ]; then
cp $ES_HOME/config/logging.yml /conf
if [ ! -e /conf/log4j2.properties ]; then
cp $ES_HOME/config/log4j2.properties /conf
fi
OPTS="$OPTS -Des.path.conf=/conf \
-Des.path.data=/data \
-Des.path.logs=/data \
-Des.transport.tcp.port=9300 \
-Des.http.port=9200"
OPTS="$OPTS \
-E path.conf=/conf \
-E path.data=/data \
-E path.logs=/data \
-E transport.tcp.port=9300 \
-E http.port=9200"
discoverIpFromLink() {
dev=$1
mode=$2
ip=`ipaddr show dev $dev scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'`
echo "Discovered $mode address $ip for $dev"
OPTS="$OPTS -Des.$mode.host=$ip"
}
if [ "$DISCOVER_TRANSPORT_IP" != "" ]; then
discoverAllGlobalIps
if [ "${DISCOVER_TRANSPORT_IP}" != "" ]; then
discoverIpFromLink $DISCOVER_TRANSPORT_IP transport
fi
if [ "$DISCOVER_HTTP_IP" != "" ]; then
if [ "${DISCOVER_HTTP_IP}" != "" ]; then
discoverIpFromLink $DISCOVER_HTTP_IP http
fi
if [ -n "$CLUSTER" ]; then
OPTS="$OPTS -Des.cluster.name=$CLUSTER"
if [ -n "$CLUSTER_FROM" ]; then
if [ -d /data/$CLUSTER_FROM -a ! -d /data/$CLUSTER ]; then
echo "Performing cluster data migration from $CLUSTER_FROM to $CLUSTER"
mv /data/$CLUSTER_FROM /data/$CLUSTER
fi
fi
fi
if [ -n "$NODE_NAME" ]; then
OPTS="$OPTS -Des.node.name=$NODE_NAME"
fi
if [ -n "$MULTICAST" ]; then
OPTS="$OPTS -Des.discovery.zen.ping.multicast.enabled=$MULTICAST"
fi
if [ -n "$UNICAST_HOSTS" ]; then
OPTS="$OPTS -Des.discovery.zen.ping.unicast.hosts=$UNICAST_HOSTS"
fi
if [ -n "$PUBLISH_AS" ]; then
OPTS="$OPTS -Des.transport.publish_host=$(echo $PUBLISH_AS | awk -F: '{print $1}')"
OPTS="$OPTS -Des.transport.publish_port=$(echo $PUBLISH_AS | awk -F: '{if ($2) print $2; else print 9300}')"
fi
if [ -n "$TYPE" ]; then
case $TYPE in
MASTER)
OPTS="$OPTS --node.master=true --node.data=false"
;;
GATEWAY)
OPTS="$OPTS --node.master=false --node.data=false"
;;
DATA|NON_MASTER)
OPTS="$OPTS --node.master=false --node.data=true"
;;
*)
echo "Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
exit 1
esac
fi
if [ -n "$MIN_MASTERS" ]; then
OPTS="$OPTS --discovery.zen.minimum_master_nodes=$MIN_MASTERS"
fi
mkdir -p /conf/plugins
OPTS="$OPTS --path.plugins=/conf/plugins"
if [ -n "$PLUGINS" ]; then
PLUGIN_OPTS="-Des.path.conf=/conf -Des.path.plugins=/conf/plugins"
for p in $(echo $PLUGINS | awk -v RS=, '{print}')
do
echo "Installing the plugin $p"
$ES_HOME/bin/plugin $PLUGIN_OPTS install $p -t 1m -b
done
else
mkdir -p /conf/plugins
fi
setup_personality
setup_clustering
install_plugins
mkdir -p /conf/scripts
@@ -104,8 +158,7 @@ echo "Starting Elasticsearch with the options $OPTS"
CMD="$ES_HOME/bin/elasticsearch $OPTS"
if [ `id -u` = 0 ]; then
echo "Running as non-root..."
chown -R $DEFAULT_ES_USER /data
set -x
chown -R $DEFAULT_ES_USER /data /conf
su -c "$CMD" $DEFAULT_ES_USER
else
$CMD

View File

@@ -1,18 +1,18 @@
FROM java:8
MAINTAINER itzg
LABEL maintainer "itzg"
ENV GITBLIT_VERSION 1.7.1
RUN wget -qO /tmp/gitblit.tgz http://dl.bintray.com/gitblit/releases/gitblit-$GITBLIT_VERSION.tar.gz
RUN tar -C /opt -xvf /tmp/gitblit.tgz && \
rm /tmp/gitblit.tgz
VOLUME ["/data"]
ADD start.sh /start
ENV GITBLIT_PATH=/opt/gitblit-${GITBLIT_VERSION} \
GITBLIT_HTTPS_PORT=443 \
GITBLIT_HTTP_PORT=80 \
@@ -20,7 +20,7 @@ ENV GITBLIT_PATH=/opt/gitblit-${GITBLIT_VERSION} \
GITBLIT_ADMIN_USER=admin \
GITBLIT_INITIAL_REPO=
WORKDIR $GITBLIT_PATH
EXPOSE 80 443
ENTRYPOINT ["/start"]

View File

@@ -1,6 +1,6 @@
FROM itzg/ubuntu-openjdk-7
MAINTAINER itzg
LABEL maintainer "itzg"
RUN apt-get install -y curl unzip

View File

@@ -1,6 +1,6 @@
FROM ubuntu:trusty
MAINTAINER itzg
LABEL maintainer "itzg"
ENV APT_GET_UPDATE 2014-09-18
@@ -21,4 +21,3 @@ EXPOSE 4000
ADD start.sh /start
CMD ["/start"]

View File

@@ -1,12 +1,17 @@
FROM java:openjdk-8u72-jdk
FROM java:openjdk-8u102-jdk
MAINTAINER itzg
LABEL maintainer "itzg"
ADD download-and-start.sh /download-and-start
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
graphviz \
&& apt-get clean
ENV JENKINS_HOME=/data
VOLUME ["/data", "/root", "/opt/jenkins"]
EXPOSE 8080 38252
CMD ["/download-and-start"]
COPY download-and-start.sh /opt/download-and-start
CMD ["/opt/download-and-start"]

View File

@@ -1,13 +1,14 @@
FROM itzg/ubuntu-openjdk-7
FROM openjdk:8u111-jre
MAINTAINER itzg
LABEL maintainer "itzg"
ENV KIBANA_VERSION 4.1.1
ENV KIBANA_VERSION 5.1.2
RUN wget -q -O /tmp/kibana.tgz https://download.elasticsearch.org/kibana/kibana/kibana-${KIBANA_VERSION}-linux-x64.tar.gz
ADD https://artifacts.elastic.co/downloads/kibana/kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz /tmp/kibana.tgz
RUN tar -C /opt -xzf /tmp/kibana.tgz && rm /tmp/kibana.tgz
ENV KIBANA_HOME /opt/kibana-$KIBANA_VERSION-linux-x64
ENV KIBANA_HOME /opt/kibana-$KIBANA_VERSION-linux-x86_64
# Simplify for cross-container
ENV ES_URL http://es:9200

View File

@@ -0,0 +1,12 @@
version: '2'
services:
es:
build: ../elasticsearch
ports:
- "9200:9200"
kibana:
build: .
ports:
- "5601:5601"

View File

@@ -1,6 +1,5 @@
#!/bin/bash
#!/bin/sh
OPTS="-e $ES_URL"
OPTS="-e $ES_URL -H $HOSTNAME"
exec bin/kibana $OPTS

View File

@@ -1,6 +1,6 @@
FROM itzg/ubuntu-openjdk-7
MAINTAINER itzg
LABEL maintainer "itzg"
ENV LOGSTASH_VERSION 1.5.0-1

View File

@@ -1,30 +1,36 @@
FROM java:8
FROM openjdk:8u131-jre-alpine
MAINTAINER itzg
LABEL maintainer "itzg"
ENV APT_GET_UPDATE 2016-04-23
RUN apt-get update
RUN apk add -U \
openssl \
imagemagick \
lsof \
su-exec \
bash \
curl \
git \
jq \
mysql-client \
python python-dev py2-pip && \
rm -rf /var/cache/apk/*
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y \
imagemagick \
lsof \
nano \
sudo \
vim \
jq \
&& apt-get clean
RUN pip install mcstatus
RUN useradd -s /bin/false --uid 1000 minecraft \
HEALTHCHECK CMD mcstatus localhost ping
RUN addgroup -g 1000 minecraft \
&& adduser -Ss /bin/false -u 1000 -G minecraft -h /home/minecraft minecraft \
&& mkdir /data \
&& mkdir /config \
&& mkdir /mods \
&& mkdir /plugins \
&& mkdir /home/minecraft \
&& chown minecraft:minecraft /data /config /mods /plugins /home/minecraft
EXPOSE 25565 25575
ADD https://github.com/itzg/restify/releases/download/1.0.3/restify_linux_amd64 /usr/local/bin/restify
ADD https://github.com/itzg/restify/releases/download/1.0.4/restify_linux_amd64 /usr/local/bin/restify
ADD https://github.com/itzg/rcon-cli/releases/download/1.3/rcon-cli_linux_amd64 /usr/local/bin/rcon-cli
COPY start.sh /start
COPY start-minecraft.sh /start-minecraft
COPY mcadmin.jq /usr/share
@@ -38,6 +44,7 @@ ENTRYPOINT [ "/start" ]
ENV UID=1000 GID=1000 \
MOTD="A Minecraft Server Powered by Docker" \
JVM_OPTS="-Xmx1024M -Xms1024M" \
JVM_XX_OPTS="-XX:+UseG1GC" MEMORY="1G" \
TYPE=VANILLA VERSION=LATEST FORGEVERSION=RECOMMENDED LEVEL=world PVP=true DIFFICULTY=easy \
ENABLE_RCON=true RCON_PORT=25575 RCON_PASSWORD=minecraft \
LEVEL_TYPE=DEFAULT GENERATOR_SETTINGS= WORLD= MODPACK= ONLINE_MODE=TRUE CONSOLE=true

View File

@@ -0,0 +1 @@
<mxfile userAgent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36" version="6.4.4" editor="www.draw.io" type="github"><diagram name="Page-1">1VVNc5swEP01HDMDUiDOMbGd5pK2Ux96VkGAxgJRIRvcX9+VWD40ODOZ1j0Uz9jS2w+t3ts1Ad1W/SfNmvJNZVwGJMz6gO4CQjZRCN8WuAxA8vgwAIUW2QBFM3AQvziCGFecRMZbz9EoJY1ofDBVdc1T42FMa9X5brmS/qkNK/gKOKRMrtHvIjMlolEYzoZXLooSj97EaPjB0mOh1anG8wJCc/cM5oqNudC/LVmmugVE9wHdaqXMsKr6LZeW2pG2Ie7lHetUt+a1+UgAGQLOTJ74WHEiIfQ5E2dbn7kgJ8nPky3q2fDe3DEpijqgT+AheW5mK6wK/HVZ2obVI7ZT6ZFrCHlVrRkdoLSlzwJ2BYwo8WohXSkMPzQstfsOWhCcSlNJ2EX21JFVu8mFlFsllXahdJ/YD+Bnro0AyZ+Gu+yMslnwZjt3LVd/KurCpgptLlWbA5aBXNs0vH+X/mgSFWaFq4obfQEXDKDYBhd/2809RzeIlYt2mxwZ9nkxZZ61hgXKfV16upK+EjBMmuXmruX67KSCATMMYL2SwDU5z5DkvxAE52NNvdHqyBeOoXs8UWIbplP8A3ES3UCVja/K41oVEl5RJYpuoMr9SpU34B+Qr0rD0ITD0JE4TuI/nopwLcKLexYi6OFet6CT3Pt8TjwtCI3IFUJv0eXxis9v2y+fr/D58P/yGf87PmE7v4ycbfHCp/vf</diagram></mxfile>

View File

@@ -1,6 +1,7 @@
[![Docker Pulls](https://img.shields.io/docker/pulls/itzg/minecraft-server.svg)](https://hub.docker.com/r/itzg/minecraft-server/)
[![Docker Stars](https://img.shields.io/docker/stars/itzg/minecraft-server.svg?maxAge=2592000)](https://hub.docker.com/r/itzg/minecraft-server/)
[![GitHub Issues](https://img.shields.io/github/issues-raw/itzg/dockerfiles.svg)](https://github.com/itzg/dockerfiles/issues)
This docker image provides a Minecraft Server that will automatically download the latest stable
version at startup. You can also run/upgrade to any specific version or the
@@ -35,6 +36,24 @@ With that you can easily view the logs, stop, or re-start the container:
## Interacting with the server
[RCON](http://wiki.vg/RCON) is enabled by default, so you can `exec` into the container to
access the Minecraft server console:
```
docker exec -i mc rcon-cli
```
Note: The `-i` is required for interactive use of rcon-cli.
To run a simple, one-shot command, such as stopping a Minecraft server, pass the command as
arguments to `rcon-cli`, such as:
```
docker exec mc rcon-cli stop
```
_The `-i` is not needed in this case._
In order to attach and interact with the Minecraft server, add `-it` when starting the container, such as
docker run -d -it -p 25565:25565 --name mc itzg/minecraft-server
@@ -99,6 +118,34 @@ or a specific version:
docker run -d -e VERSION=1.7.9 ...
## Healthcheck
This image contains [Dinnerbone's mcstatus](https://github.com/Dinnerbone/mcstatus) and uses
its `ping` command to continually check on the container's. That can be observed
from the `STATUS` column of `docker ps`
```
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b418af073764 mc "/start" 43 seconds ago Up 41 seconds (healthy) 0.0.0.0:25565->25565/tcp, 25575/tcp mc
```
You can also query the container's health in a script friendly way:
```
> docker container inspect -f "{{.State.Health.Status}}" mc
healthy
```
Finally, since `mcstatus` is on the `PATH` you can exec into the container
and use mcstatus directly and invoke any of its other commands:
```
> docker exec mc mcstatus localhost status
version: v1.12 (protocol 335)
description: "{u'text': u'A Minecraft Server Powered by Docker'}"
players: 0/20 No players online
```
## Running a Forge Server
Enable Forge server mode by adding a `-e TYPE=FORGE` to your command-line.
@@ -109,6 +156,20 @@ but you can also choose to run a specific version with `-e FORGEVERSION=10.13.4.
-e TYPE=FORGE -e FORGEVERSION=10.13.4.1448 \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
To use a pre-downloaded Forge installer, place it in the attached `/data` directory and
specify the name of the installer file with `FORGE_INSTALLER`, such as:
$ docker run -d -v /path/on/host:/data ... \
-e FORGE_INSTALLER=forge-1.11.2-13.20.0.2228-installer.jar ...
To download a Forge installer from a custom location, such as your own file repository, specify
the URL with `FORGE_INSTALLER_URL`, such as:
$ docker run -d -v /path/on/host:/data ... \
-e FORGE_INSTALLER_URL=http://HOST/forge-1.11.2-13.20.0.2228-installer.jar ...
In both of the cases above, there is no need for the `VERSION` or `FORGEVERSION` variables.
In order to add mods, you have two options.
### Using the /data volume
@@ -159,6 +220,10 @@ Enable Bukkit/Spigot server mode by adding a `-e TYPE=BUKKIT -e VERSION=1.8` or
-e TYPE=SPIGOT -e VERSION=1.8 \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
If you are hosting your own copy of Bukkit/Spigot you can override the download URLs with:
* -e BUKKIT_DOWNLOAD_URL=<url>
* -e SPIGOT_DOWNLOAD_URL=<url>
You can build spigot from source by adding `-e BUILD_FROM_SOURCE=true`
__NOTE: to avoid pegging the CPU when running Spigot,__ you will need to
@@ -224,6 +289,9 @@ pass `--noconsole` at the very end of the command line and not use `-it`. For ex
-e TYPE=PAPER -e VERSION=1.9.4 \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server --noconsole
If you are hosting your own copy of PaperSpigot you can override the download URL with:
* -e PAPER_DOWNLOAD_URL=<url>
You can install Bukkit plugins in two ways...
### Using the /data volume
@@ -264,6 +332,72 @@ This works well if you want to have a common set of plugins in a separate
location, but still have multiple worlds with different server requirements
in either persistent volumes or a downloadable archive.
## Running a Server with a Feed-The-Beast (FTB) modpack
Enable this server mode by adding a `-e TYPE=FTB` to your command-line,
but note the following additional steps needed...
You need to specify a modpack to run, using the `FTB_SERVER_MOD` environment
variable. An FTB server modpack is available together with its respective
client modpack on https://www.feed-the-beast.com under "Additional Files."
Because of the interactive delayed download mechanism on that web site, you
must manually download the server modpack. Copy the modpack to the `/data`
directory (see "Attaching data directory to host filesystem”).
Now you can add a `-e FTB_SERVER_MOD=name_of_modpack.zip` to your command-line.
$ docker run -d -v /path/on/host:/data -e TYPE=FTB \
-e FTB_SERVER_MOD=FTBPresentsSkyfactory3Server_3.0.6.zip \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
Instead of explicitly downloading a modpack from the Feed the Beast site, you
can you set `FTB_SERVER_MOD` to the **server** URL of a modpack, such as
$ docker run ... \
-e TYPE=FTB \
-e FTB_SERVER_MOD=https://www.feed-the-beast.com/projects/ftb-infinity-lite-1-10/files/2402889
### Using the /data volume
You must use a persistent `/data` mount for this type of server.
To do this, you will need to attach the container's `/data` directory
(see "Attaching data directory to host filesystem”).
If the modpack is updated and you want to run the new version on your
server, you stop and remove the container:
docker stop mc
docker rm mc
Do not erase anything from your /data directory (unless you know of
specific mods that have been removed from the modpack). Download the
updated FTB server modpack and copy it to `/data`. Start a new container
with `FTB_SERVER_MOD` specifying the updated modpack file.
$ docker run -d -v /path/on/host:/data -e TYPE=FTB \
-e FTB_SERVER_MOD=FTBPresentsSkyfactory3Server_3.0.7.zip \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
### FTB server JVM options
An FTB server modpack contains its own startup script that launches the
JVM and it does not use the `JVM_OPTS` environment variable. Instead
you can use `MIN_RAM` and `MAX_RAM` variables. These are appended to
the JVM `-Xms` and `-Xmx` options. For example, `-e MIN_RAM=2G` results
in `-Xms2G` passed to the JVM.
Additionally, `PERMGEN_SIZE` is passed on to `-XX:PermSize`. Here is an
example:
$ docker run -d -v /path/on/host:/data -e TYPE=FTB \
-e MIN_RAM=1G -e MAX_RAM=2G -e PERMGEN_SIZE=512M \
-e FTB_SERVER_MOD=FTBPresentsSkyfactory3Server_3.0.6.zip \
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
Note: The FTB server start script will also override other options,
like `MOTD`.
## Using Docker Compose
Rather than type the server options below, the port mappings above, etc
@@ -290,7 +424,7 @@ minecraft-server:
and in the same directory as that file run
docker-compose -d up
docker-compose up -d
Now, go play...or adjust the `environment` section to configure
this server instance.
@@ -377,18 +511,20 @@ Enables command blocks
### Force Gamemode
Force players to join in the default game mode.
- false - Players will join in the gamemode they left in.
- true - Players will always join in the default gamemode.
docker run -d -e FORCE_GAMEMODE=false
* false - Players will join in the gamemode they left in.
* true - Players will always join in the default gamemode.
`docker run -d -e FORCE_GAMEMODE=false`
### Generate Structures
Defines whether structures (such as villages) will be generated.
- false - Structures will not be generated in new chunks.
- true - Structures will be generated in new chunks.
docker run -d -e GENERATE_STRUCTURES=true
* false - Structures will not be generated in new chunks.
* true - Structures will be generated in new chunks.
`docker run -d -e GENERATE_STRUCTURES=true`
### Hardcore
@@ -537,6 +673,20 @@ To use this option pass the environment variable `MODPACK`, such as
top level of the zip archive. Make sure the jars are compatible with the
particular `TYPE` of server you are running.
### Remove old mods/plugins
When the option above is specified (`MODPACK`) you can also instruct script to
delete old mods/plugins prior to installing new ones. This behaviour is desirable
in case you want to upgrade mods/plugins from downloaded zip file.
To use this option pass the environment variable `REMOVE_OLD_MODS="TRUE"`, such as
docker run -d -e REMOVE_OLD_MODS="TRUE" -e MODPACK=http://www.example.com/mods/modpack.zip ...
**NOTE:** This option will be taken into account only when option `MODPACK` is also used.
**WARNING:** All content of the `mods` or `plugins` directory will be deleted
before unpacking new content from the zip file.
### Online mode
By default, server checks connecting players against Minecraft's account database. If you want to create an offline server or your server is not connected to the internet, you can disable the server to try connecting to minecraft.net to authenticate players with environment variable `ONLINE_MODE`, like this
@@ -547,11 +697,23 @@ By default, server checks connecting players against Minecraft's account databas
### Memory Limit
The Java memory limit can be adjusted using the `JVM_OPTS` environment variable, where the default is
the setting shown in the example (max and min at 1024 MB):
By default, the image declares a Java initial and maximum memory limit of 1 GB. There are several
ways to adjust the memory settings:
docker run -e 'JVM_OPTS=-Xmx1024M -Xms1024M' ...
* `MEMORY`, "1G" by default, can be used to adjust both initial (`Xms`) and max (`Xmx`)
memory settings of the JVM
* `INIT_MEMORY`, independently sets the initial heap size
* `MAX_MEMORY`, independently sets the max heap size
The values of all three are passed directly to the JVM and support format/units as
`<size>[g|G|m|M|k|K]`.
### /data ownership
In order to adapt to differences in `UID` and `GID` settings the entry script will attempt to correct ownership and writability of the `/data` directory. This logic can be disabled by setting `-e SKIP_OWNERSHIP_FIX=TRUE`.
### JVM Options
General JVM options can be passed to the Minecraft Server invocation by passing a `JVM_OPTS`
environment variable. Options like `-X` that need to proceed general JVM options can be passed
via a `JVM_XX_OPTS` environment variable.

View File

@@ -0,0 +1,29 @@
version: '3'
services:
minecraft:
ports:
- "25565:25565"
volumes:
- "mcbig:/data"
environment:
EULA: "TRUE"
MAX_MEMORY: 32G
MAX_BUILD_HEIGHT: 256
VIEW_DISTANCE: 15
LEVEL_TYPE: LARGEBIOMES
MAX_PLAYERS: 100
CONSOLE: "false"
image: itzg/minecraft-server
restart: always
rcon:
image: itzg/rcon
ports:
- "4326:4326"
- "4327:4327"
volumes:
- "rcon:/opt/rcon-web-admin/db"
volumes:
mcbig:
rcon:

View File

@@ -1,14 +1,27 @@
minecraft-server:
ports:
- "25565:25565"
version: '3'
environment:
EULA: "TRUE"
services:
minecraft:
image: itzg/minecraft-server
ports:
- "25565:25565"
volumes:
- "mc:/data"
environment:
EULA: "TRUE"
CONSOLE: "false"
ENABLE_RCON: "true"
RCON_PASSWORD: "testing"
RCON_PORT: 28016
restart: always
rcon:
image: itzg/rcon
ports:
- "4326:4326"
- "4327:4327"
volumes:
- "rcon:/opt/rcon-web-admin/db"
image: itzg/minecraft-server
container_name: minecraft-server
tty: true
stdin_open: true
restart: always
volumes:
mc:
rcon:

View File

@@ -1,7 +1,7 @@
.[] |
select(.elements | length > 1) |
select(.elements[].elements[] | select(.class == "version" and .text == $version)) |
.elements[].elements[] |
.elements[].elements[] |
select(.class|contains("server-jar")) |
.elements[] | select(.name="a") |
.href

View File

@@ -1,5 +1,7 @@
#!/bin/bash
shopt -s nullglob
#umask 002
export HOME=/data
@@ -7,6 +9,10 @@ if [ ! -e /data/eula.txt ]; then
if [ "$EULA" != "" ]; then
echo "# Generated via Docker on $(date)" > eula.txt
echo "eula=$EULA" >> eula.txt
if [ $? != 0 ]; then
echo "ERROR: unable to write eula to /data. Please make sure attached directory is writable by uid=${UID}"
exit 2
fi
else
echo ""
echo "Please accept the Minecraft EULA at"
@@ -18,21 +24,28 @@ if [ ! -e /data/eula.txt ]; then
fi
fi
if ! touch /data/.verify_access; then
echo "ERROR: /data doesn't seem to be writable. Please make sure attached directory is writable by uid=${UID} "
exit 2
fi
SERVER_PROPERTIES=/data/server.properties
FTB_DIR=/data/FeedTheBeast
VERSIONS_JSON=https://launchermeta.mojang.com/mc/game/version_manifest.json
echo "Checking version information."
case "X$VERSION" in
X|XLATEST|Xlatest)
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.release'`
VANILLA_VERSION=`curl -fsSL $VERSIONS_JSON | jq -r '.latest.release'`
;;
XSNAPSHOT|Xsnapshot)
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.snapshot'`
VANILLA_VERSION=`curl -fsSL $VERSIONS_JSON | jq -r '.latest.snapshot'`
;;
X[1-9]*)
VANILLA_VERSION=$VERSION
;;
*)
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.release'`
VANILLA_VERSION=`curl -fsSL $VERSIONS_JSON | jq -r '.latest.release'`
;;
esac
@@ -56,28 +69,32 @@ function downloadSpigot {
case "$TYPE" in
*BUKKIT|*bukkit)
match="Craftbukkit"
downloadUrl=${BUKKIT_DOWNLOAD_URL}
;;
*)
match="Spigot"
downloadUrl=${SPIGOT_DOWNLOAD_URL}
;;
esac
downloadUrl=$(restify --class=jar-div https://mcadmin.net/ | \
jq --arg version "$match $VANILLA_VERSION" -r -f /usr/share/mcadmin.jq)
if [[ -n $downloadUrl ]]; then
echo "Downloading $match"
wget -q -O $SERVER "$downloadUrl"
status=$?
if [ $status != 0 ]; then
echo "ERROR: failed to download from $downloadUrl due to (error code was $status)"
exit 3
if [[ -z $downloadUrl ]]; then
downloadUrl=$(restify --class=jar-div https://mcadmin.net/ | \
jq --arg version "$match $VANILLA_VERSION" -r -f /usr/share/mcadmin.jq)
if [[ -z $downloadUrl ]]; then
echo "ERROR: Version $VANILLA_VERSION is not supported for $TYPE"
echo " Refer to https://mcadmin.net/ for supported versions"
exit 2
fi
else
echo "ERROR: Version $VANILLA_VERSION is not supported for $TYPE"
echo " Refer to https://mcadmin.net/ for supported versions"
exit 2
fi
echo "Downloading $match"
curl -kfsSL -o $SERVER "$downloadUrl"
status=$?
if [ ! -f $SERVER ]; then
echo "ERROR: failed to download from $downloadUrl (status=$status)"
exit 3
fi
}
function downloadPaper {
@@ -98,11 +115,11 @@ function downloadPaper {
esac
if [ $build != "nosupp" ]; then
downloadUrl="https://ci.destroystokyo.com/job/PaperSpigot/$build/artifact/paperclip.jar"
wget -q -O $SERVER "$downloadUrl"
status=$?
if [ $status != 0 ]; then
echo "ERROR: failed to download from $downloadUrl due to (error code was $status)"
rm $SERVER
downloadUrl=${PAPER_DOWNLOAD_URL:-https://ci.destroystokyo.com/job/PaperSpigot/$build/artifact/paperclip.jar}
curl -fsSL -o $SERVER "$downloadUrl"
if [ ! -f $SERVER ]; then
echo "ERROR: failed to download from $downloadUrl (status=$?)"
exit 3
fi
else
@@ -115,46 +132,181 @@ function downloadPaper {
function installForge {
TYPE=FORGE
norm=$VANILLA_VERSION
echo "Checking Forge version information."
case $FORGEVERSION in
RECOMMENDED)
curl -o /tmp/forge.json -sSL http://files.minecraftforge.net/maven/net/minecraftforge/forge/promotions_slim.json
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$norm-recommended\"]")
if [ $FORGE_VERSION = null ]; then
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$norm-latest\"]")
if [[ -z $FORGE_INSTALLER && -z $FORGE_INSTALLER_URL ]]; then
norm=$VANILLA_VERSION
case $VANILLA_VERSION in
*.*.*)
norm=$VANILLA_VERSION ;;
*.*)
norm=${VANILLA_VERSION}.0 ;;
esac
echo "Checking Forge version information."
case $FORGEVERSION in
RECOMMENDED)
curl -fsSL -o /tmp/forge.json http://files.minecraftforge.net/maven/net/minecraftforge/forge/promotions_slim.json
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$VANILLA_VERSION-recommended\"]")
if [ $FORGE_VERSION = null ]; then
echo "ERROR: Version $FORGE_VERSION is not supported by Forge"
echo " Refer to http://files.minecraftforge.net/ for supported versions"
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$VANILLA_VERSION-latest\"]")
if [ $FORGE_VERSION = null ]; then
echo "ERROR: Version $VANILLA_VERSION is not supported by Forge"
echo " Refer to http://files.minecraftforge.net/ for supported versions"
exit 2
fi
fi
;;
*)
FORGE_VERSION=$FORGEVERSION
;;
esac
normForgeVersion=$VANILLA_VERSION-$FORGE_VERSION-$norm
shortForgeVersion=$VANILLA_VERSION-$FORGE_VERSION
FORGE_INSTALLER="/tmp/forge-$shortForgeVersion-installer.jar"
elif [[ -z $FORGE_INSTALLER ]]; then
FORGE_INSTALLER="/tmp/forge-installer.jar"
elif [[ ! -e $FORGE_INSTALLER ]]; then
echo "ERROR: the given Forge installer doesn't exist : $FORGE_INSTALLER"
exit 2
fi
installMarker=".forge-installed-$shortForgeVersion"
if [ ! -e $installMarker ]; then
if [ ! -e $FORGE_INSTALLER ]; then
if [[ -z $FORGE_INSTALLER_URL ]]; then
echo "Downloading $normForgeVersion"
forgeFileNames="
$normForgeVersion/forge-$normForgeVersion-installer.jar
$shortForgeVersion/forge-$shortForgeVersion-installer.jar
END
"
for fn in $forgeFileNames; do
if [ $fn == END ]; then
echo "Unable to compute URL for $normForgeVersion"
exit 2
fi
downloadUrl=http://files.minecraftforge.net/maven/net/minecraftforge/forge/$fn
echo "...trying $downloadUrl"
if curl -o $FORGE_INSTALLER -fsSL $downloadUrl; then
break
fi
done
else
echo "Downloading $FORGE_INSTALLER_URL ..."
if ! curl -o $FORGE_INSTALLER -fsSL $FORGE_INSTALLER_URL; then
echo "Failed to download from given location $FORGE_INSTALLER_URL"
exit 2
fi
fi
;;
fi
*)
FORGE_VERSION=$FORGEVERSION
;;
esac
echo "Installing Forge $shortForgeVersion using $FORGE_INSTALLER"
mkdir -p mods
tries=3
while ((--tries >= 0)); do
java -jar $FORGE_INSTALLER --installServer
if [ $? == 0 ]; then
break
fi
done
if (($tries < 0)); then
echo "Forge failed to install after several tries." >&2
exit 10
fi
# NOTE $shortForgeVersion will be empty if installer location was given to us
echo "Finding installed server jar..."
for j in *forge*.jar; do
echo "...$j"
case $j in
*installer*)
;;
*)
SERVER=$j
break
;;
esac
done
if [[ -z $SERVER ]]; then
echo "Unable to derive server jar for Forge"
exit 2
fi
echo "Using server $SERVER"
echo $SERVER > $installMarker
# URL format changed for 1.7.10 from 10.13.2.1300
sorted=$((echo $FORGE_VERSION; echo 10.13.2.1300) | sort -V | head -1)
if [[ $norm == '1.7.10' && $sorted == '10.13.2.1300' ]]; then
# if $FORGEVERSION >= 10.13.2.1300
normForgeVersion="$norm-$FORGE_VERSION-$norm"
else
normForgeVersion="$norm-$FORGE_VERSION"
SERVER=$(cat $installMarker)
fi
}
function isURL {
local value=$1
if [[ ${value:0:8} == "https://" || ${value:0:7} = "http://" ]]; then
return 0
else
return 1
fi
}
function installFTB {
TYPE=FEED-THE-BEAST
echo "Looking for Feed-The-Beast server modpack."
if [[ -z $FTB_SERVER_MOD ]]; then
echo "Environment variable FTB_SERVER_MOD not set."
echo "Set FTB_SERVER_MOD to the file name of the FTB server modpack."
echo "(And place the modpack in the /data directory.)"
exit 2
fi
local srv_modpack=${FTB_SERVER_MOD}
if isURL ${srv_modpack}; then
case $srv_modpack in
*/download)
break;;
*)
srv_modpack=${srv_modpack}/download;;
esac
local file=$(basename $(dirname $srv_modpack))
local downloaded=/data/${file}.zip
echo "Downloading FTB modpack...
$srv_modpack -> $downloaded"
curl -sSL -o $downloaded $srv_modpack
srv_modpack=$downloaded
fi
if [[ ${srv_modpack:0:5} == "data/" ]]; then
# Prepend with "/"
srv_modpack=/${srv_modpack}
fi
if [[ ! ${srv_modpack:0:1} == "/" ]]; then
# If not an absolute path, assume file is in "/data"
srv_modpack=/data/${srv_modpack}
fi
if [[ ! -f ${srv_modpack} ]]; then
echo "FTB server modpack ${srv_modpack} not found."
exit 2
fi
if [[ ! ${srv_modpack: -4} == ".zip" ]]; then
echo "FTB server modpack ${srv_modpack} is not a zip archive."
echo "Please set FTB_SERVER_MOD to a file with a .zip extension."
exit 2
fi
FORGE_INSTALLER="forge-$normForgeVersion-installer.jar"
SERVER="forge-$normForgeVersion-universal.jar"
if [ ! -e "$SERVER" ]; then
echo "Downloading $FORGE_INSTALLER ..."
wget -q http://files.minecraftforge.net/maven/net/minecraftforge/forge/$normForgeVersion/$FORGE_INSTALLER
echo "Installing $SERVER"
java -jar $FORGE_INSTALLER --installServer
fi
echo "Unpacking FTB server modpack ${srv_modpack} ..."
mkdir -p ${FTB_DIR}
unzip -o ${srv_modpack} -d ${FTB_DIR}
cp -f /data/eula.txt ${FTB_DIR}/eula.txt
FTB_SERVER_START=${FTB_DIR}/ServerStart.sh
chmod a+x ${FTB_SERVER_START}
sed -i "s/-jar/-Dfml.queryResult=confirm -jar/" ${FTB_SERVER_START}
}
function installVanilla {
@@ -197,12 +349,17 @@ case "$TYPE" in
# normalize on Spigot for operations below
TYPE=SPIGOT
;;
FORGE|forge)
TYPE=FORGE
installForge
;;
FTB|ftb)
TYPE=FEED-THE-BEAST
installFTB
;;
VANILLA|vanilla)
installVanilla
;;
@@ -254,12 +411,18 @@ if [[ "$MODPACK" ]]; then
case "X$MODPACK" in
X[Hh][Tt][Tt][Pp]*[Zz][iI][pP])
echo "Downloading mod/plugin pack via HTTP"
echo "$MODPACK"
wget -q -O /tmp/modpack.zip "$MODPACK"
echo " from $MODPACK ..."
curl -sSL -o /tmp/modpack.zip "$MODPACK"
if [ "$TYPE" = "SPIGOT" ]; then
if [ "$REMOVE_OLD_MODS" = "TRUE" ]; then
rm -rf /data/plugins/*
fi
mkdir -p /data/plugins
unzip -o -d /data/plugins /tmp/modpack.zip
else
if [ "$REMOVE_OLD_MODS" = "TRUE" ]; then
rm -rf /data/mods/*
fi
mkdir -p /data/mods
unzip -o -d /data/mods /tmp/modpack.zip
fi
@@ -271,6 +434,28 @@ case "X$MODPACK" in
esac
fi
# If supplied with a URL for a config (simple zip of configurations), download it and unpack
if [[ "$MODCONFIG" ]]; then
case "X$MODCONFIG" in
X[Hh][Tt][Tt][Pp]*[Zz][iI][pP])
echo "Downloading mod/plugin configs via HTTP"
echo " from $MODCONFIG ..."
curl -sSL -o /tmp/modconfig.zip "$MODCONFIG"
if [ "$TYPE" = "SPIGOT" ]; then
mkdir -p /data/plugins
unzip -o -d /data/plugins /tmp/modconfig.zip
else
mkdir -p /data/config
unzip -o -d /data/config /tmp/modconfig.zip
fi
rm -f /tmp/modconfig.zip
;;
*)
echo "Invalid URL given for modconfig: Must be HTTP or HTTPS and a ZIP file"
;;
esac
fi
function setServerProp {
local prop=$1
local var=$2
@@ -295,11 +480,10 @@ if [ ! -e server.properties ]; then
setServerProp "allow-nether" "$ALLOW_NETHER"
setServerProp "announce-player-achievements" "$ANNOUNCE_PLAYER_ACHIEVEMENTS"
setServerProp "enable-command-block" "$ENABLE_COMMAND_BLOCK"
setServerProp "spawn-animals" "$SPAWN_ANIMAILS"
setServerProp "spawn-animals" "$SPAWN_ANIMALS"
setServerProp "spawn-monsters" "$SPAWN_MONSTERS"
setServerProp "spawn-npcs" "$SPAWN_NPCS"
setServerProp "generate-structures" "$GENERATE_STRUCTURES"
setServerProp "spawn-npcs" "$SPAWN_NPCS"
setServerProp "view-distance" "$VIEW_DISTANCE"
setServerProp "hardcore" "$HARDCORE"
setServerProp "max-build-height" "$MAX_BUILD_HEIGHT"
@@ -320,11 +504,11 @@ if [ ! -e server.properties ]; then
if [ -n "$LEVEL_TYPE" ]; then
# normalize to uppercase
LEVEL_TYPE=${LEVEL_TYPE^^}
LEVEL_TYPE=$( echo ${LEVEL_TYPE} | tr '[:lower:]' '[:upper:]' )
echo "Setting level type to $LEVEL_TYPE"
# check for valid values and only then set
case $LEVEL_TYPE in
DEFAULT|FLAT|LARGEBIOMES|AMPLIFIED|CUSTOMIZED)
DEFAULT|FLAT|LARGEBIOMES|AMPLIFIED|CUSTOMIZED|BIOMESOP)
sed -i "/level-type\s*=/ c level-type=$LEVEL_TYPE" /data/server.properties
;;
*)
@@ -359,10 +543,11 @@ if [ ! -e server.properties ]; then
if [ -n "$MODE" ]; then
echo "Setting mode"
case ${MODE,,?} in
MODE_LC=$( echo $MODE | tr '[:upper:]' '[:lower:]' )
case $MODE_LC in
0|1|2|3)
;;
s*)
su*)
MODE=0
;;
c*)
@@ -371,7 +556,7 @@ if [ ! -e server.properties ]; then
a*)
MODE=2
;;
s*)
sp*)
MODE=3
;;
*)
@@ -380,7 +565,7 @@ if [ ! -e server.properties ]; then
;;
esac
sed -i "/gamemode\s*=/ c gamemode=$MODE" /data/server.properties
sed -i "/^gamemode\s*=/ c gamemode=$MODE" $SERVER_PROPERTIES
fi
fi
@@ -408,21 +593,21 @@ if [ -n "$ICON" -a ! -e server-icon.png ]; then
fi
fi
# Make sure files exist to avoid errors
if [ ! -e banned-players.json ]; then
echo '' > banned-players.json
fi
if [ ! -e banned-ips.json ]; then
echo '' > banned-ips.json
fi
# Make sure files exist and are valid JSON (for pre-1.12 to 1.12 upgrades)
for j in *.json; do
if [[ $(python -c "print open('$j').read().strip()==''") = True ]]; then
echo "Fixing JSON $j"
echo '[]' > $j
fi
done
# If any modules have been provided, copy them over
[ -d /data/mods ] || mkdir /data/mods
for m in /mods/*.jar
mkdir -p /data/mods
for m in /mods/*.{jar,zip}
do
if [ -f "$m" ]; then
if [ -f "$m" -a ! -f "/data/mods/$m" ]; then
echo Copying mod `basename "$m"`
cp -f "$m" /data/mods
cp "$m" /data/mods
fi
done
[ -d /data/config ] || mkdir /data/config
@@ -441,16 +626,33 @@ if [ "$TYPE" = "SPIGOT" ]; then
fi
fi
if [[ $CONSOLE = false ]]; then
EXTRA_ARGS=--noconsole
else
EXTRA_ARGS=""
EXTRA_ARGS=""
# Optional disable console
if [[ ${CONSOLE} = false || ${CONSOLE} = FALSE ]]; then
EXTRA_ARGS+="--noconsole"
fi
# If we have a bootstrap.txt file... feed that in to the server stdin
if [ -f /data/bootstrap.txt ];
then
exec java $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS < /data/bootstrap.txt
else
exec java $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS
# Optional disable GUI for headless servers
if [[ ${GUI} = false || ${GUI} = FALSE ]]; then
EXTRA_ARGS="${EXTRA_ARGS} nogui"
fi
# put these prior JVM_OPTS at the end to give any memory settings there higher precedence
echo "Setting initial memory to ${INIT_MEMORY:-${MEMORY}} and max to ${MAX_MEMORY:-${MEMORY}}"
JVM_OPTS="-Xms${INIT_MEMORY:-${MEMORY}} -Xmx${MAX_MEMORY:-${MEMORY}} ${JVM_OPTS}"
if [[ ${TYPE} == "FEED-THE-BEAST" ]]; then
cp -f $SERVER_PROPERTIES ${FTB_DIR}/server.properties
cp -f /data/{eula,ops,white-list}.txt ${FTB_DIR}/
cd ${FTB_DIR}
echo "Running FTB server modpack start ..."
exec sh ${FTB_SERVER_START}
else
# If we have a bootstrap.txt file... feed that in to the server stdin
if [ -f /data/bootstrap.txt ];
then
exec java $JVM_XX_OPTS $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS < /data/bootstrap.txt
else
exec java $JVM_XX_OPTS $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS
fi
fi

View File

@@ -1,13 +1,13 @@
#!/bin/sh
set -e
usermod --uid $UID minecraft
groupmod --gid $GID minecraft
sed -i "/^minecraft/s/:1000:1000:/:${UID}:${GID}:/g" /etc/passwd
sed -i "/^minecraft/s/:1000:/:${GID}:/g" /etc/group
if [ "$SKIP_OWNERSHIP_FIX" != "TRUE" ]; then
fix_ownership() {
dir=$1
if ! sudo -u minecraft test -w $dir; then
if ! su-exec minecraft test -w $dir; then
echo "Correcting writability of $dir ..."
chown -R minecraft:minecraft $dir
chmod -R u+w $dir
@@ -19,4 +19,4 @@ if [ "$SKIP_OWNERSHIP_FIX" != "TRUE" ]; then
fi
echo "Switching to user 'minecraft'"
exec sudo -E -u minecraft /start-minecraft "$@"
su-exec minecraft /start-minecraft $@

View File

@@ -1,6 +1,6 @@
FROM itzg/gvm
MAINTAINER itzg
LABEL maintainer "itzg"
RUN ["/run", "install", "springboot"]

View File

@@ -1,11 +1,12 @@
FROM itzg/ubuntu-openjdk-7
FROM openjdk:8-jre
MAINTAINER itzg
LABEL maintainer "itzg"
ENV TITAN_VERSION 0.5.4
RUN wget -q -O /tmp/titan.zip http://s3.thinkaurelius.com/downloads/titan/titan-$TITAN_VERSION-hadoop2.zip
RUN unzip -q /tmp/titan.zip -d /opt && rm /tmp/titan.zip
ADD http://s3.thinkaurelius.com/downloads/titan/titan-$TITAN_VERSION-hadoop2.zip /tmp/titan.zip
RUN unzip -q /tmp/titan.zip -d /opt && \
rm /tmp/titan.zip
ENV TITAN_HOME /opt/titan-$TITAN_VERSION-hadoop2
WORKDIR $TITAN_HOME

View File

@@ -1,6 +1,6 @@
FROM itzg/ubuntu-openjdk-7
MAINTAINER itzg
LABEL maintainer "itzg"
ENV APT_GET_UPDATE 2014-07-19

View File

@@ -1,6 +1,6 @@
FROM ubuntu:trusty
MAINTAINER itzg
LABEL maintainer "itzg"
ENV APT_GET_UPDATE 2015-10-29
RUN apt-get update