mirror of
https://github.com/itzg/docker-minecraft-server.git
synced 2026-02-17 15:13:55 +00:00
Compare commits
263 Commits
minecraft-
...
es-5.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
262816bd2d | ||
|
|
e44d27c00a | ||
|
|
5062bc91ab | ||
|
|
dde4132d2c | ||
|
|
9bb3628792 | ||
|
|
03a5bb1ab0 | ||
|
|
3a9a1dc043 | ||
|
|
36ced1c630 | ||
|
|
0cb0755739 | ||
|
|
3e8eca6e28 | ||
|
|
a179f5f7ba | ||
|
|
f2955bcc2f | ||
|
|
a0c1ed88d1 | ||
|
|
de6d7a64ac | ||
|
|
3bf560bfbe | ||
|
|
f1b58323d3 | ||
|
|
d4a888073b | ||
|
|
54844930bb | ||
|
|
b26714c9c1 | ||
|
|
f03a8f0edc | ||
|
|
048beefadc | ||
|
|
de3545e8d9 | ||
|
|
e766301d1e | ||
|
|
3348083424 | ||
|
|
1e0183a3b1 | ||
|
|
fd0500feb2 | ||
|
|
e86138cdf2 | ||
|
|
ddb69ced3b | ||
|
|
c48f85cc4f | ||
|
|
aa13deecdb | ||
|
|
4c0f24613c | ||
|
|
b5bcea7d61 | ||
|
|
76d4fd1bbc | ||
|
|
f7523eef4e | ||
|
|
c38bfb22cc | ||
|
|
8cea619c13 | ||
|
|
e570ac8715 | ||
|
|
d14d666e24 | ||
|
|
8956d43c47 | ||
|
|
911fcf8b42 | ||
|
|
3e61c8084f | ||
|
|
fe6e8c04ff | ||
|
|
f1293ff979 | ||
|
|
5c5b8cd999 | ||
|
|
074424a5bd | ||
|
|
99b1de0bbd | ||
|
|
47c3350c10 | ||
|
|
84bb96ef38 | ||
|
|
ada4a1fab5 | ||
|
|
ea78bdd559 | ||
|
|
fa790ab787 | ||
|
|
281c044999 | ||
|
|
b9dcdee34b | ||
|
|
87b2ce1c24 | ||
|
|
55e38181bd | ||
|
|
ac3cbe75a9 | ||
|
|
d89de80db8 | ||
|
|
1e1a572227 | ||
|
|
379905c6ff | ||
|
|
ec1dd96a7c | ||
|
|
413de37568 | ||
|
|
3147c5638f | ||
|
|
0832bb0c43 | ||
|
|
b97348d703 | ||
|
|
4cff161deb | ||
|
|
b1c0e598ad | ||
|
|
63035c347b | ||
|
|
c806c425fd | ||
|
|
0defd266a6 | ||
|
|
dd5a21067b | ||
|
|
2d48bcb1a0 | ||
|
|
5617936f10 | ||
|
|
b5e459ab0a | ||
|
|
ae9109e2bf | ||
|
|
b9f247728f | ||
|
|
3052cd1155 | ||
|
|
46c865a123 | ||
|
|
cf5d194b14 | ||
|
|
0ea4ea7d0c | ||
|
|
a6580b3fe3 | ||
|
|
a4835ec331 | ||
|
|
21c9be4ec1 | ||
|
|
786192e4b7 | ||
|
|
8610e7d621 | ||
|
|
a195583990 | ||
|
|
a1ead0634e | ||
|
|
b23fb93366 | ||
|
|
68d731c096 | ||
|
|
a4bd241be6 | ||
|
|
5f104a32db | ||
|
|
03d9064b35 | ||
|
|
b15ad9e799 | ||
|
|
17bfedd137 | ||
|
|
afa25e582d | ||
|
|
7651cc658e | ||
|
|
dd0bcda23c | ||
|
|
afa710455d | ||
|
|
70a55e3be1 | ||
|
|
13ee106dac | ||
|
|
6253d5ddc0 | ||
|
|
63e3123ab0 | ||
|
|
1be18346ce | ||
|
|
9a90acab23 | ||
|
|
edd69c40f6 | ||
|
|
582c60cd03 | ||
|
|
13a6d91295 | ||
|
|
d0263f31d6 | ||
|
|
4ab31a049b | ||
|
|
4b74d13f08 | ||
|
|
2afbdc959e | ||
|
|
9fa27b9a3d | ||
|
|
16b3f18400 | ||
|
|
79f9e02446 | ||
|
|
a313c03fff | ||
|
|
e76068e63e | ||
|
|
2427cae5a2 | ||
|
|
e902c6f40f | ||
|
|
ef34025dd8 | ||
|
|
0acedd04b9 | ||
|
|
2f5402195c | ||
|
|
764ae807e0 | ||
|
|
32cb5f44e4 | ||
|
|
a6e32f0ea9 | ||
|
|
111883e0d5 | ||
|
|
847f403bd3 | ||
|
|
39bb0d75e2 | ||
|
|
01473c4e43 | ||
|
|
b3a79d6b08 | ||
|
|
5484f50659 | ||
|
|
53e727298e | ||
|
|
a4aa177d6e | ||
|
|
a412cfddad | ||
|
|
594c9873c4 | ||
|
|
94442458e3 | ||
|
|
f75d735540 | ||
|
|
fec75cb117 | ||
|
|
007fca149c | ||
|
|
ac608b7abb | ||
|
|
7f048c5e8b | ||
|
|
53932e64f2 | ||
|
|
19b77ba18b | ||
|
|
f911230c1b | ||
|
|
1c7469bb8c | ||
|
|
b5abb012db | ||
|
|
64b428f920 | ||
|
|
9ffdc49c1b | ||
|
|
3f5fa1642b | ||
|
|
6570028c06 | ||
|
|
ed19d745cb | ||
|
|
f2e6696368 | ||
|
|
83c31483f7 | ||
|
|
b077b3fcf2 | ||
|
|
81ab299419 | ||
|
|
53a0d0cf7a | ||
|
|
99b884f18a | ||
|
|
ebc0405e72 | ||
|
|
b86370295c | ||
|
|
7e624b90dd | ||
|
|
6ba5fe29c1 | ||
|
|
f9c7dddb19 | ||
|
|
ea32f70dd7 | ||
|
|
0f466e2146 | ||
|
|
128dd8950a | ||
|
|
5c947fd4ab | ||
|
|
5792b2996a | ||
|
|
57ca565d64 | ||
|
|
63925a794c | ||
|
|
d4d070018a | ||
|
|
8594054047 | ||
|
|
6f62af7d85 | ||
|
|
b1f9d1c7c6 | ||
|
|
8c3b0a720d | ||
|
|
cc3730e5a9 | ||
|
|
7b58d59cef | ||
|
|
84c3ea2465 | ||
|
|
78d9add46c | ||
|
|
2f18e7be04 | ||
|
|
011108cd7e | ||
|
|
1600b75513 | ||
|
|
9962d28f41 | ||
|
|
ea08a6b496 | ||
|
|
a61d44ff30 | ||
|
|
5e14d3fbdd | ||
|
|
eb28c2c980 | ||
|
|
024800201f | ||
|
|
32e5f94a87 | ||
|
|
88032acbfb | ||
|
|
22e1abe628 | ||
|
|
01d9c54575 | ||
|
|
fab9669b42 | ||
|
|
df09adbfd7 | ||
|
|
9e373d8b4d | ||
|
|
2f4b73d862 | ||
|
|
a252d794f9 | ||
|
|
79438e14de | ||
|
|
ddbc7e9618 | ||
|
|
87140179f2 | ||
|
|
14a1e5fb2a | ||
|
|
2e8704a311 | ||
|
|
b1741cab7b | ||
|
|
d63bc698d8 | ||
|
|
574fc53a55 | ||
|
|
b75379a980 | ||
|
|
086d9fb4ae | ||
|
|
214fcda183 | ||
|
|
4dff67e03b | ||
|
|
78d82b334b | ||
|
|
3059282c7a | ||
|
|
88509c66a6 | ||
|
|
307a9b119f | ||
|
|
6c58660a73 | ||
|
|
b7f5088b13 | ||
|
|
abee16a143 | ||
|
|
8319b336c7 | ||
|
|
6ea862859b | ||
|
|
88a796bb44 | ||
|
|
a8c3afbe88 | ||
|
|
60b6eea1e2 | ||
|
|
bc9afcd659 | ||
|
|
052f80d849 | ||
|
|
7e847829ee | ||
|
|
44c0eee0f9 | ||
|
|
0337f9fac1 | ||
|
|
287185919a | ||
|
|
b895e07e82 | ||
|
|
6396e58d3c | ||
|
|
4de10e1ba8 | ||
|
|
1a364ae733 | ||
|
|
9ffa2daeaa | ||
|
|
8973e7dc7b | ||
|
|
3c8db58c7e | ||
|
|
ee9f27546b | ||
|
|
8a2199a890 | ||
|
|
b4be1b37bd | ||
|
|
d590bc62c1 | ||
|
|
9349ca6d04 | ||
|
|
9e845521a9 | ||
|
|
5557f91c79 | ||
|
|
c324403f95 | ||
|
|
9fe8d6cca6 | ||
|
|
55cffbb598 | ||
|
|
4ff077f151 | ||
|
|
5566cf0953 | ||
|
|
5d3845a9ba | ||
|
|
b952ee6fdd | ||
|
|
8035aa5f69 | ||
|
|
84521eca53 | ||
|
|
77541a9689 | ||
|
|
716ff66b1b | ||
|
|
029e7d8974 | ||
|
|
7d8429ef0a | ||
|
|
df155ee51c | ||
|
|
f85240898d | ||
|
|
84154c3d64 | ||
|
|
6163e080cd | ||
|
|
7fb406c2bb | ||
|
|
8168c62bdd | ||
|
|
b354ba5981 | ||
|
|
972036feb4 | ||
|
|
6dbbcf61b7 | ||
|
|
159c38c194 | ||
|
|
bef6a36476 | ||
|
|
ebb4ee2214 |
17
.gitattributes
vendored
Normal file
17
.gitattributes
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
||||
|
||||
# Custom for Visual Studio
|
||||
*.cs diff=csharp
|
||||
|
||||
# Standard to msysgit
|
||||
*.doc diff=astextplain
|
||||
*.DOC diff=astextplain
|
||||
*.docx diff=astextplain
|
||||
*.DOCX diff=astextplain
|
||||
*.dot diff=astextplain
|
||||
*.DOT diff=astextplain
|
||||
*.pdf diff=astextplain
|
||||
*.PDF diff=astextplain
|
||||
*.rtf diff=astextplain
|
||||
*.RTF diff=astextplain
|
||||
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.iml
|
||||
.idea
|
||||
@@ -2,3 +2,5 @@ dockerfiles
|
||||
===========
|
||||
|
||||
Contains the various Dockerfile definitions I'm maintaining.
|
||||
|
||||
[](https://gitter.im/itzg/dockerfiles?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
9
build
9
build
@@ -1,4 +1,11 @@
|
||||
pkgs="ubuntu-openjdk-7 gvm spring-boot"
|
||||
#!/bin/bash
|
||||
|
||||
pkgs=ubuntu-openjdk-7
|
||||
pkgs="$pkgs minecraft-server"
|
||||
pkgs="$pkgs elasticsearch"
|
||||
pkgs="$pkgs kibana"
|
||||
pkgs="$pkgs titan-gremlin"
|
||||
pkgs="$pkgs cassandra"
|
||||
|
||||
for p in $pkgs
|
||||
do
|
||||
|
||||
26
cassandra/Dockerfile
Executable file
26
cassandra/Dockerfile
Executable file
@@ -0,0 +1,26 @@
|
||||
FROM java:openjdk-8u72-jdk
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV CASSANDRA_VERSION 2.2.8
|
||||
|
||||
ADD http://apache.mirrors.pair.com/cassandra/$CASSANDRA_VERSION/apache-cassandra-$CASSANDRA_VERSION-bin.tar.gz /tmp/apache-cassandra.tgz
|
||||
RUN tar -C /opt -zxf /tmp/apache-cassandra.tgz && \
|
||||
rm /tmp/apache-cassandra.tgz
|
||||
|
||||
|
||||
RUN mv /opt/apache-cassandra-$CASSANDRA_VERSION /opt/cassandra
|
||||
ENV CASSANDRA_HOME /opt/cassandra
|
||||
ENV CASSANDRA_CONF /conf
|
||||
ENV CASSANDRA_DATA /data
|
||||
WORKDIR $CASSANDRA_HOME
|
||||
|
||||
RUN ln -s $CASSANDRA_HOME/bin/* /usr/local/bin
|
||||
VOLUME ["/data","/conf"]
|
||||
|
||||
EXPOSE 9042 9160 7000 7001
|
||||
|
||||
ADD cassandra.in.sh $CASSANDRA_HOME/cassandra.in.sh
|
||||
RUN mv $CASSANDRA_HOME/bin/cassandra.in.sh $CASSANDRA_HOME/bin/orig.cassandra.in.sh
|
||||
|
||||
CMD ["/opt/cassandra/bin/cassandra", "-f"]
|
||||
9
cassandra/README.md
Normal file
9
cassandra/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
Yet another Cassandra image, but this one got container and non-container access right.
|
||||
|
||||
# Basic Usage
|
||||
|
||||
To support access from both Docker containers and external, non-Docker clients:
|
||||
|
||||
docker run -d --name cassandra -e PUBLISH_AS=192.168.59.103 -p 9160:9160 itzg/cassandra
|
||||
|
||||
replacing `192.168.59.103` with your Docker host's LAN IP address.
|
||||
24
cassandra/cassandra.in.sh
Executable file
24
cassandra/cassandra.in.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
cassYml=$CASSANDRA_HOME/conf/cassandra.yaml
|
||||
|
||||
privateAddr=$(hostname -i)
|
||||
|
||||
seeds=${SEEDS:-${PUBLISH_AS:-$privateAddr}}
|
||||
|
||||
sed -i -e "s/- seeds:.*/- seeds: \"$seeds\"/" \
|
||||
-e "s/listen_address:.*/listen_address: $privateAddr/" \
|
||||
-e "s/rpc_address:.*/rpc_address: $privateAddr/" \
|
||||
-e "s/start_rpc:.*/start_rpc: true/" \
|
||||
-e "s#- /var/lib/cassandra/data#- $CASSANDRA_DATA#" \
|
||||
$cassYml
|
||||
|
||||
if [ -n "$PUBLISH_AS" ]; then
|
||||
sed -i -e "s/\(\s*#\)\?\s*broadcast_address:.*/broadcast_address: $PUBLISH_AS/" $cassYml
|
||||
fi
|
||||
|
||||
# Copy over our tweaked files, but non-clobbering to let user have ultimate control
|
||||
cp -rn $CASSANDRA_HOME/conf/* $CASSANDRA_CONF
|
||||
|
||||
# source the original
|
||||
. $CASSANDRA_HOME/bin/orig.cassandra.in.sh
|
||||
11
circle.yml
Normal file
11
circle.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
machine:
|
||||
services:
|
||||
- docker
|
||||
|
||||
test:
|
||||
override:
|
||||
- ./build
|
||||
|
||||
notify:
|
||||
webhooks:
|
||||
- url: https://webhooks.gitter.im/e/4726bb683d8aed018486
|
||||
30
devbox/Dockerfile
Normal file
30
devbox/Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
FROM itzg/ubuntu-openjdk-7
|
||||
|
||||
RUN apt-get update
|
||||
RUN DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -yq git curl wget unzip openjdk-7-jdk && \
|
||||
apt-get clean
|
||||
|
||||
ENV MAVEN_VER 3.3.3
|
||||
ENV NODEJS_VER 0.12.7
|
||||
|
||||
RUN wget -O /tmp/maven.tgz http://apache.mirrors.pair.com/maven/maven-3/$MAVEN_VER/binaries/apache-maven-$MAVEN_VER-bin.tar.gz
|
||||
RUN tar xvf /tmp/maven.tgz && rm /tmp/maven.tgz
|
||||
ENV M2_HOME /opt/apache-maven-$MAVEN_VER
|
||||
ENV PATH $PATH:$M2_HOME/bin
|
||||
|
||||
RUN curl -s https://raw.githubusercontent.com/isaacs/nave/master/nave.sh > /usr/local/bin/nave
|
||||
RUN chmod +x /usr/local/bin/nave
|
||||
RUN nave usemain latest
|
||||
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
VOLUME ["/shared"]
|
||||
|
||||
RUN useradd -m -d /home/developer developer
|
||||
|
||||
USER developer
|
||||
RUN curl -s get.gvmtool.net | bash
|
||||
|
||||
WORKDIR /home/developer
|
||||
CMD bash
|
||||
25
devbox/README.md
Normal file
25
devbox/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
Provides a development/build environment for Java, Groovy, and NodeJS.
|
||||
|
||||
* Provides 'gvm' for Groovy (and more) installation management
|
||||
* Provides 'nave' for NodeJS installation management
|
||||
* Pre-installs the latest NodeJS via nave
|
||||
|
||||
## Using the persistent/shared workarea
|
||||
|
||||
Since devbox containers are intended to be disposable, the image is configured
|
||||
with a "volume" at `/shared`.
|
||||
|
||||
There are a couple of ways you can leverage that volume. Either attach it to
|
||||
a host-local directory:
|
||||
|
||||
docker run -it -v $(pwd)/workarea:/shared --rm itzg/devbox
|
||||
|
||||
or run a "base" container and mounts the `/shared` from that onto any
|
||||
subsequent containers:
|
||||
|
||||
docker run --name devbox-base itzg/devbox touch /shared/READY
|
||||
...later...
|
||||
docker run -it --volumes-from devbox-base --rm itzg/devbox
|
||||
|
||||
**NOTE** I am using the `--rm` option so the devbox containers will be truly
|
||||
"burn after use".
|
||||
@@ -1,5 +0,0 @@
|
||||
FROM dockerfile/elasticsearch
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
RUN /elasticsearch/bin/plugin -i elasticsearch/marvel/latest
|
||||
@@ -1,7 +1,5 @@
|
||||
This image extends the official elasticsearch image by adding the Marvel Sense plugin. This will get you a system ready to use with the [ElasticSearch Getting Started Guide](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current).
|
||||
**This image is now deprecated. **
|
||||
|
||||
With Marvel Sense installed you can access it using a URL like:
|
||||
|
||||
http://192.168.1.216:9200/_plugin/marvel/sense/index.html
|
||||
|
||||
(replacing the IP address shown here with your Docker host's)
|
||||
Please use [itzg/elasticsearch](https://registry.hub.docker.com/u/itzg/elasticsearch/)
|
||||
instead. See the plugins configuration section for that image to see how
|
||||
to install Marvel.
|
||||
|
||||
32
elasticsearch/Dockerfile
Executable file
32
elasticsearch/Dockerfile
Executable file
@@ -0,0 +1,32 @@
|
||||
FROM openjdk:8u111-jre-alpine
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
RUN apk -U add bash
|
||||
|
||||
ENV ES_VERSION=5.1.1
|
||||
|
||||
ADD https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-$ES_VERSION.tar.gz /tmp/es.tgz
|
||||
RUN cd /usr/share && \
|
||||
tar xf /tmp/es.tgz && \
|
||||
rm /tmp/es.tgz
|
||||
|
||||
EXPOSE 9200 9300
|
||||
|
||||
ENV ES_HOME=/usr/share/elasticsearch-$ES_VERSION \
|
||||
DEFAULT_ES_USER=elasticsearch \
|
||||
DISCOVER_TRANSPORT_IP=eth0 \
|
||||
DISCOVER_HTTP_IP=eth0 \
|
||||
ES_JAVA_OPTS="-Xms1g -Xmx1g"
|
||||
|
||||
RUN adduser -S -s /bin/sh $DEFAULT_ES_USER
|
||||
|
||||
VOLUME ["/data","/conf"]
|
||||
|
||||
WORKDIR $ES_HOME
|
||||
|
||||
COPY java.policy /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/
|
||||
COPY start /start
|
||||
COPY log4j2.properties $ES_HOME/config/
|
||||
|
||||
CMD ["/start"]
|
||||
218
elasticsearch/README.md
Executable file
218
elasticsearch/README.md
Executable file
@@ -0,0 +1,218 @@
|
||||
This Docker image provides an easily configurable Elasticsearch node. Via port mappings, it is easy to create an arbitrarily sized cluster of nodes. As long as the versions match, you can mix-and-match "real" Elasticsearch nodes with container-ized ones.
|
||||
|
||||
# NOTE for use on Linux hosts
|
||||
|
||||
Elasticsearch 5.x requires that the virtual memory mmap count is set sufficiently for stable,
|
||||
production use. [Refer to this guide for more information](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html).
|
||||
|
||||
# Basic Usage
|
||||
|
||||
To start an Elasticsearch data node that listens on the standard ports on your host's network interface:
|
||||
|
||||
docker run -d -p 9200:9200 -p 9300:9300 itzg/elasticsearch
|
||||
|
||||
You'll then be able to connect to the Elasticsearch HTTP interface to confirm it's alive:
|
||||
|
||||
http://DOCKERHOST:9200/
|
||||
|
||||
{
|
||||
"status" : 200,
|
||||
"name" : "Charon",
|
||||
"version" : {
|
||||
"number" : "1.3.5",
|
||||
"build_hash" : "4a50e7df768fddd572f48830ae9c35e4ded86ac1",
|
||||
"build_timestamp" : "2014-11-05T15:21:28Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.9"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
|
||||
Where `DOCKERHOST` would be the actual hostname of your host running Docker.
|
||||
|
||||
# Simple, multi-node cluster
|
||||
|
||||
To run a multi-node cluster (3-node in this example) on a single Docker machine use:
|
||||
|
||||
docker run -d --name es0 -p 9200:9200 itzg/elasticsearch
|
||||
docker run -d --name es1 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
|
||||
docker run -d --name es2 --link es0 -e UNICAST_HOSTS=es0 itzg/elasticsearch
|
||||
|
||||
|
||||
and then check the cluster health, such as http://192.168.99.100:9200/_cluster/health?pretty
|
||||
|
||||
{
|
||||
"cluster_name" : "elasticsearch",
|
||||
"status" : "green",
|
||||
"timed_out" : false,
|
||||
"number_of_nodes" : 3,
|
||||
"number_of_data_nodes" : 3,
|
||||
"active_primary_shards" : 0,
|
||||
"active_shards" : 0,
|
||||
"relocating_shards" : 0,
|
||||
"initializing_shards" : 0,
|
||||
"unassigned_shards" : 0
|
||||
}
|
||||
|
||||
# Configuration Summary
|
||||
|
||||
## Ports
|
||||
|
||||
* `9200` - HTTP REST
|
||||
* `9300` - Native transport
|
||||
|
||||
## Volumes
|
||||
|
||||
* `/data` - location of `path.data`
|
||||
* `/conf` - location of `path.conf`
|
||||
|
||||
# Configuration Details
|
||||
|
||||
The following configuration options are specified using `docker run` environment variables (`-e`) like
|
||||
|
||||
docker run ... -e NAME=VALUE ... itzg/elasticsearch
|
||||
|
||||
Since Docker's `-e` settings are baked into the container definition, this image provides an extra feature to change any of the settings below for an existing container. Either create/edit the file `env` in the `/conf` volume mapping or edit within the running container's context using:
|
||||
|
||||
docker exec -it CONTAINER_ID vi /conf/env
|
||||
|
||||
replacing `CONTAINER_ID` with the container's ID or name.
|
||||
|
||||
The contents of the `/conf/env` file are standard shell
|
||||
|
||||
NAME=VALUE
|
||||
|
||||
entries where `NAME` is one of the variables described below.
|
||||
|
||||
Configuration options not explicitly supported below can be specified via the `OPTS` environment variable. For example, by default `OPTS` is set with
|
||||
|
||||
OPTS=-Dnetwork.bind_host=_non_loopback_
|
||||
|
||||
_NOTE: That option is a default since `bind_host` defaults to `localhost` as of 2.0, which isn't helpful for
|
||||
port mapping out from the container_.
|
||||
|
||||
## Cluster Name
|
||||
|
||||
If joining a pre-existing cluster, then you may need to specify a cluster name different than the default "elasticsearch":
|
||||
|
||||
-e CLUSTER=dockers
|
||||
|
||||
## Zen Unicast Hosts
|
||||
|
||||
When joining a multi-physical-host cluster, multicast may not be supported on the physical network. In that case, your node can reference specific one or more hosts in the cluster via the [Zen Unicast Hosts](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#unicast) capability as a comma-separated list of `HOST:PORT` pairs:
|
||||
|
||||
-e UNICAST_HOSTS=HOST:PORT[,HOST:PORT]
|
||||
|
||||
such as
|
||||
|
||||
-e UNICAST_HOSTS=192.168.0.100:9300
|
||||
|
||||
## Plugins
|
||||
|
||||
You can install one or more plugins before startup by passing a comma-separated list of plugins.
|
||||
|
||||
-e PLUGINS=ID[,ID]
|
||||
|
||||
In this example, it will install the Marvel plugin
|
||||
|
||||
-e PLUGINS=elasticsearch/marvel/latest
|
||||
|
||||
Many more plugins [are available here](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-plugins.html#known-plugins).
|
||||
|
||||
## Publish As
|
||||
|
||||
Since the container gives the Elasticsearch software an isolated perspective of its networking, it will most likely advertise its published address with a container-internal IP address. This can be overridden with a physical networking name and port using:
|
||||
|
||||
-e PUBLISH_AS=DOCKERHOST:9301
|
||||
|
||||
_Author Note: I have yet to hit a case where this was actually necessary. Other
|
||||
than the cosmetic weirdness in the logs, Elasticsearch seems to be quite tolerant._
|
||||
|
||||
## Node Name
|
||||
|
||||
Rather than use the randomly assigned node name, you can indicate a specific one using:
|
||||
|
||||
-e NODE_NAME=Docker
|
||||
|
||||
## Node Type
|
||||
|
||||
If you refer to [the Node section](https://www.elastic.co/guide/en/elasticsearch/reference/2.3/modules-node.html)
|
||||
of the Elasticsearch reference guide, you'll find that there's three main types of nodes: master-eligible, data, and client.
|
||||
|
||||
In larger clusters it is important to dedicate a small number (>= 3) of master nodes. There are also cases where a large cluster may need dedicated gateway nodes that are neither master nor data nodes and purely operate as "smart routers" and have large amounts of CPU and memory to handle client requests and search-reduce.
|
||||
|
||||
To simplify all that, this image provides a `TYPE` variable to let you amongst these combinations. The choices are:
|
||||
|
||||
* (not set, the default) : the default node type which is both master-eligible and a data node
|
||||
* `MASTER` : master-eligible, but holds no data. It is good to have three or more of these in a
|
||||
large cluster
|
||||
* `DATA` (or `NON_MASTER`) : holds data and serves search/index requests. Scale these out for elastic-y goodness.
|
||||
* `GATEWAY` : only operates as a client node or a "smart router". These are the ones whose HTTP port 9200 will need to be exposed
|
||||
|
||||
A [Docker Compose](https://docs.docker.com/compose/overview/) file will serve as a good example of these three node types:
|
||||
|
||||
```
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
gateway:
|
||||
image: itzg/elasticsearch
|
||||
environment:
|
||||
UNICAST_HOSTS: master
|
||||
TYPE: GATEWAY
|
||||
ports:
|
||||
- "9200:9200"
|
||||
|
||||
master:
|
||||
image: itzg/elasticsearch
|
||||
environment:
|
||||
UNICAST_HOSTS: gateway
|
||||
TYPE: MASTER
|
||||
MIN_MASTERS: 2
|
||||
|
||||
data:
|
||||
image: itzg/elasticsearch
|
||||
environment:
|
||||
UNICAST_HOSTS: master,gateway
|
||||
TYPE: DATA
|
||||
```
|
||||
|
||||
## Minimum Master Nodes
|
||||
|
||||
In combination with the `TYPE` variable above, you will also want to configure the minimum master nodes to [avoid split-brain](https://www.elastic.co/guide/en/elasticsearch/reference/2.3/modules-node.html#split-brain) during network outages.
|
||||
|
||||
The minimum, which can be calculated as `(master_eligible_nodes / 2) + 1`, can be set with the `MIN_MASTERS` variable.
|
||||
|
||||
Using the Docker Compose file above, a value of `2` is appropriate when scaling the cluster to 3 master nodes:
|
||||
|
||||
docker-compose scale master=3
|
||||
|
||||
## Auto transport/http discovery with Swarm Mode
|
||||
|
||||
When using Docker Swarm mode (starting with 1.12), the overlay and ingress network interfaces are assigned
|
||||
multiple IP addresses. As a result, it creates confusion for the transport publish logic even when using
|
||||
the special value `_eth0_`.
|
||||
|
||||
To resolve this, add
|
||||
|
||||
-e DISCOVER_TRANSPORT_IP=eth0
|
||||
|
||||
replacing `eth0` with another interface within the container, if needed.
|
||||
|
||||
The same can be done for publish/binding of the http module by adding:
|
||||
|
||||
-e DISCOVER_HTTP_IP=eth2
|
||||
|
||||
## Heap size and other JVM options
|
||||
|
||||
By default this image will run Elasticsearch with a Java heap size of 1 GB. If that value
|
||||
or any other JVM options need to be adjusted, then replace the `ES_JAVA_OPTS`
|
||||
environment variable.
|
||||
|
||||
For example, this would allow for the use of 16 GB of heap:
|
||||
|
||||
-e ES_JAVA_OPTS="-Xms16g -Xmx16g"
|
||||
|
||||
Refer to [this page](https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html)
|
||||
for more information about why both the minimum and maximum sizes were set to
|
||||
the same value.
|
||||
6
elasticsearch/java.policy
Normal file
6
elasticsearch/java.policy
Normal file
@@ -0,0 +1,6 @@
|
||||
grant {
|
||||
// JMX Java Management eXtensions
|
||||
permission javax.management.MBeanTrustPermission "register";
|
||||
permission javax.management.MBeanServerPermission "createMBeanServer";
|
||||
permission javax.management.MBeanPermission "-#-[-]", "queryNames";
|
||||
};
|
||||
74
elasticsearch/log4j2.properties
Normal file
74
elasticsearch/log4j2.properties
Normal file
@@ -0,0 +1,74 @@
|
||||
status = error
|
||||
|
||||
# log action execution errors for easier debugging
|
||||
logger.action.name = org.elasticsearch.action
|
||||
logger.action.level = debug
|
||||
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
|
||||
|
||||
appender.rolling.type = RollingFile
|
||||
appender.rolling.name = rolling
|
||||
appender.rolling.fileName = ${sys:es.logs}.log
|
||||
appender.rolling.layout.type = PatternLayout
|
||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
|
||||
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
|
||||
appender.rolling.policies.type = Policies
|
||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.rolling.policies.time.interval = 1
|
||||
appender.rolling.policies.time.modulate = true
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
#rootLogger.appenderRef.rolling.ref = rolling
|
||||
|
||||
appender.deprecation_rolling.type = RollingFile
|
||||
appender.deprecation_rolling.name = deprecation_rolling
|
||||
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
|
||||
appender.deprecation_rolling.layout.type = PatternLayout
|
||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
|
||||
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
|
||||
appender.deprecation_rolling.policies.type = Policies
|
||||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
appender.deprecation_rolling.policies.size.size = 1GB
|
||||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.deprecation_rolling.strategy.max = 4
|
||||
|
||||
logger.deprecation.name = org.elasticsearch.deprecation
|
||||
logger.deprecation.level = warn
|
||||
#logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
||||
logger.deprecation.additivity = false
|
||||
|
||||
appender.index_search_slowlog_rolling.type = RollingFile
|
||||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
|
||||
appender.index_search_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
|
||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
|
||||
appender.index_search_slowlog_rolling.policies.type = Policies
|
||||
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.index_search_slowlog_rolling.policies.time.interval = 1
|
||||
appender.index_search_slowlog_rolling.policies.time.modulate = true
|
||||
|
||||
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
||||
logger.index_search_slowlog_rolling.level = trace
|
||||
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = console
|
||||
logger.index_search_slowlog_rolling.additivity = false
|
||||
|
||||
appender.index_indexing_slowlog_rolling.type = RollingFile
|
||||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
|
||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
|
||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
|
||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
||||
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
|
||||
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
|
||||
|
||||
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
||||
logger.index_indexing_slowlog.level = trace
|
||||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = console
|
||||
logger.index_indexing_slowlog.additivity = false
|
||||
132
elasticsearch/start
Executable file
132
elasticsearch/start
Executable file
@@ -0,0 +1,132 @@
|
||||
#!/bin/sh
|
||||
|
||||
pre_checks() {
|
||||
mmc=$(sysctl vm.max_map_count|sed 's/.*= //')
|
||||
if [[ $mmc -lt 262144 ]]; then
|
||||
echo "
|
||||
ERROR: As of 5.0.0 Elasticsearch requires increasing mmap counts.
|
||||
Refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
|
||||
"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
discoverIpFromLink() {
|
||||
dev=$1
|
||||
mode=$2
|
||||
ip=`ipaddr show dev $dev scope global|awk '$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'`
|
||||
echo "Discovered $mode address $ip for $dev"
|
||||
OPTS="$OPTS -E $mode.host=$ip"
|
||||
}
|
||||
|
||||
setup_clustering() {
|
||||
|
||||
if [ -n "$CLUSTER" ]; then
|
||||
OPTS="$OPTS -E cluster.name=$CLUSTER"
|
||||
if [ -n "$CLUSTER_FROM" ]; then
|
||||
if [ -d /data/$CLUSTER_FROM -a ! -d /data/$CLUSTER ]; then
|
||||
echo "Performing cluster data migration from $CLUSTER_FROM to $CLUSTER"
|
||||
mv /data/$CLUSTER_FROM /data/$CLUSTER
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$NODE_NAME" ]; then
|
||||
OPTS="$OPTS -E node.name=$NODE_NAME"
|
||||
fi
|
||||
|
||||
if [ -n "$MULTICAST" ]; then
|
||||
OPTS="$OPTS -E discovery.zen.ping.multicast.enabled=$MULTICAST"
|
||||
fi
|
||||
|
||||
if [ -n "$UNICAST_HOSTS" ]; then
|
||||
OPTS="$OPTS -E discovery.zen.ping.unicast.hosts=$UNICAST_HOSTS"
|
||||
fi
|
||||
|
||||
if [ -n "$PUBLISH_AS" ]; then
|
||||
OPTS="$OPTS -E transport.publish_host=$(echo $PUBLISH_AS | awk -F: '{print $1}')"
|
||||
OPTS="$OPTS -E transport.publish_port=$(echo $PUBLISH_AS | awk -F: '{if ($2) print $2; else print 9300}')"
|
||||
fi
|
||||
|
||||
if [ -n "$MIN_MASTERS" ]; then
|
||||
OPTS="$OPTS -E discovery.zen.minimum_master_nodes=$MIN_MASTERS"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
install_plugins() {
|
||||
|
||||
if [ -n "$PLUGINS" ]; then
|
||||
for p in $(echo $PLUGINS | awk -v RS=, '{print}')
|
||||
do
|
||||
echo "Installing the plugin $p"
|
||||
$ES_HOME/bin/elasticsearch-plugin install $p
|
||||
done
|
||||
else
|
||||
mkdir -p $ES_HOME/plugins
|
||||
fi
|
||||
}
|
||||
|
||||
setup_personality() {
|
||||
|
||||
if [ -n "$TYPE" ]; then
|
||||
case $TYPE in
|
||||
MASTER)
|
||||
OPTS="$OPTS -E node.master=true -E node.data=false"
|
||||
;;
|
||||
|
||||
GATEWAY)
|
||||
OPTS="$OPTS -E node.master=false -E node.data=false"
|
||||
;;
|
||||
|
||||
DATA|NON_MASTER)
|
||||
OPTS="$OPTS -E node.master=false -E node.data=true"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
|
||||
exit 1
|
||||
esac
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
pre_checks
|
||||
|
||||
if [ -f /conf/env ]; then
|
||||
. /conf/env
|
||||
fi
|
||||
|
||||
if [ ! -e /conf/elasticsearch.* ]; then
|
||||
cp $ES_HOME/config/elasticsearch.yml /conf
|
||||
fi
|
||||
|
||||
if [ ! -e /conf/log4j2.properties ]; then
|
||||
cp $ES_HOME/config/log4j2.properties /conf
|
||||
fi
|
||||
|
||||
OPTS="$OPTS \
|
||||
-E path.conf=/conf \
|
||||
-E path.data=/data \
|
||||
-E path.logs=/data \
|
||||
-E transport.tcp.port=9300 \
|
||||
-E http.port=9200"
|
||||
|
||||
discoverIpFromLink $DISCOVER_TRANSPORT_IP transport
|
||||
discoverIpFromLink $DISCOVER_HTTP_IP http
|
||||
|
||||
setup_personality
|
||||
setup_clustering
|
||||
install_plugins
|
||||
|
||||
mkdir -p /conf/scripts
|
||||
|
||||
echo "Starting Elasticsearch with the options $OPTS"
|
||||
CMD="$ES_HOME/bin/elasticsearch $OPTS"
|
||||
if [ `id -u` = 0 ]; then
|
||||
echo "Running as non-root..."
|
||||
chown -R $DEFAULT_ES_USER /data /conf
|
||||
su -c "$CMD" $DEFAULT_ES_USER
|
||||
else
|
||||
$CMD
|
||||
fi
|
||||
26
gitblit/Dockerfile
Normal file
26
gitblit/Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
FROM java:8
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV GITBLIT_VERSION 1.7.1
|
||||
|
||||
RUN wget -qO /tmp/gitblit.tgz http://dl.bintray.com/gitblit/releases/gitblit-$GITBLIT_VERSION.tar.gz
|
||||
|
||||
RUN tar -C /opt -xvf /tmp/gitblit.tgz && \
|
||||
rm /tmp/gitblit.tgz
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ADD start.sh /start
|
||||
|
||||
ENV GITBLIT_PATH=/opt/gitblit-${GITBLIT_VERSION} \
|
||||
GITBLIT_HTTPS_PORT=443 \
|
||||
GITBLIT_HTTP_PORT=80 \
|
||||
GITBLIT_BASE_FOLDER=/data \
|
||||
GITBLIT_ADMIN_USER=admin \
|
||||
GITBLIT_INITIAL_REPO=
|
||||
WORKDIR $GITBLIT_PATH
|
||||
|
||||
EXPOSE 80 443
|
||||
|
||||
ENTRYPOINT ["/start"]
|
||||
46
gitblit/README.md
Normal file
46
gitblit/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
Provides a ready-to-use instance of [GitBlit](http://gitblit.com/).
|
||||
|
||||
## Basic usage
|
||||
|
||||
Start the GitBlit container using
|
||||
|
||||
docker run -d -p 80:80 -p 443:443 --name gitblit itzg/gitblit
|
||||
|
||||
Access its web interface at the mapped HTTP (80) or HTTPS (443) port of the
|
||||
Docker host. Login with the default credentials __admin__ / __admin__ .
|
||||
|
||||
|
||||
## Data volume
|
||||
|
||||
In order to allow for future upgrades, run the container with a volume mount of `/data`, such as:
|
||||
|
||||
-v /tmp/gitblit-data:/data
|
||||
|
||||
## Initial repository creation
|
||||
|
||||
As a convenience for cluster configuration management with git
|
||||
(such as with [Spring Cloud Config](https://cloud.spring.io/spring-cloud-config/)),
|
||||
you may specify the name of an initial repository to be owned by the 'admin' user.
|
||||
This can be enabled by passing the name of that repository via the environment
|
||||
variable `GITBLIT_INITIAL_REPO`, such as
|
||||
|
||||
-e GITBLIT_INITIAL_REPO=default
|
||||
|
||||
## Create repositories with content
|
||||
|
||||
In addition to the approach above, you can push repostories with existing
|
||||
content by attaching them to sub-directories of `/repos`, such as
|
||||
|
||||
docker run -v $HOME/git/example:/repos/example ...
|
||||
|
||||
## Custom configuration
|
||||
|
||||
You can add or override any of the `*.properties` files for configuring GitBlit,
|
||||
typically `gitblit.properties`, by placing those files in a volume attached at
|
||||
`/config`, such as
|
||||
|
||||
-v $(pwd)/extra-config:/config
|
||||
|
||||
The property files in that configuration directory will be renamed with the
|
||||
suffix `.applied` to avoid overwriting manually modified configuration on
|
||||
the next container startup.
|
||||
103
gitblit/start.sh
Executable file
103
gitblit/start.sh
Executable file
@@ -0,0 +1,103 @@
|
||||
#!/bin/bash
|
||||
|
||||
apply_base_data() {
|
||||
contents=`ls $GITBLIT_BASE_FOLDER|wc -l`
|
||||
|
||||
if [ $contents = "0" ]; then
|
||||
cp -r $GITBLIT_PATH/data/* $GITBLIT_BASE_FOLDER
|
||||
fi
|
||||
}
|
||||
|
||||
apply_config() {
|
||||
cp -rf /config/* $GITBLIT_BASE_FOLDER
|
||||
}
|
||||
|
||||
create_repo() {
|
||||
local repo_dir=$GITBLIT_BASE_FOLDER/git/$1.git
|
||||
mkdir -p $repo_dir
|
||||
cd $repo_dir
|
||||
|
||||
git init --bare
|
||||
|
||||
echo "
|
||||
[gitblit]
|
||||
description =
|
||||
originRepository =
|
||||
owner = $GITBLIT_ADMIN_USER
|
||||
acceptNewPatchsets = true
|
||||
acceptNewTickets = true
|
||||
mergeTo = master
|
||||
useIncrementalPushTags = false
|
||||
allowForks = true
|
||||
accessRestriction = PUSH
|
||||
authorizationControl = AUTHENTICATED
|
||||
verifyCommitter = false
|
||||
showRemoteBranches = false
|
||||
isFrozen = false
|
||||
skipSizeCalculation = false
|
||||
skipSummaryMetrics = false
|
||||
federationStrategy = FEDERATE_THIS
|
||||
isFederated = false
|
||||
gcThreshold =
|
||||
gcPeriod = 0
|
||||
" >> config
|
||||
|
||||
git config --replace-all core.logallrefupdates false
|
||||
|
||||
echo "
|
||||
CREATING repository '$1' with:
|
||||
* read/clone access for all
|
||||
* push access for authenticated users"
|
||||
|
||||
RET="file://$repo_dir"
|
||||
}
|
||||
|
||||
apply_repos() {
|
||||
for rdir in /repos/*; do
|
||||
if [ -e $rdir/.git ]; then
|
||||
r=$(basename $rdir)
|
||||
create_repo $r
|
||||
local url=$RET
|
||||
cd $rdir
|
||||
echo "* pushed existing content"
|
||||
git push --all $url
|
||||
fi
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
create_initial_repo() {
|
||||
if [ -d $GITBLIT_INITIAL_REPO ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
create_repo $GITBLIT_INITIAL_REPO
|
||||
}
|
||||
|
||||
shopt -s nullglob
|
||||
if [ ! -f /var/local/gitblit_firststart ]; then
|
||||
FIRSTSTART=1
|
||||
else
|
||||
FIRSTSTART=0
|
||||
fi
|
||||
|
||||
if [ $FIRSTSTART = 1 ]; then
|
||||
apply_base_data
|
||||
|
||||
echo "
|
||||
Applying configuration from /config
|
||||
"
|
||||
apply_config
|
||||
touch /var/local/gitblit_firststart
|
||||
fi
|
||||
|
||||
|
||||
if [[ -n $GITBLIT_INITIAL_REPO ]]; then
|
||||
create_initial_repo
|
||||
fi
|
||||
apply_repos
|
||||
|
||||
cd $GITBLIT_PATH
|
||||
$JAVA_HOME/bin/java -jar $GITBLIT_PATH/gitblit.jar \
|
||||
--httpsPort $GITBLIT_HTTPS_PORT --httpPort $GITBLIT_HTTP_PORT \
|
||||
--baseFolder $GITBLIT_BASE_FOLDER
|
||||
1
jekyll-github-pages/.gitignore
vendored
Normal file
1
jekyll-github-pages/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/site
|
||||
24
jekyll-github-pages/Dockerfile
Normal file
24
jekyll-github-pages/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
||||
FROM ubuntu:trusty
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV APT_GET_UPDATE 2014-09-18
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y upgrade
|
||||
|
||||
RUN apt-get -y install ruby ruby-dev make patch nodejs
|
||||
RUN gem install bundler
|
||||
|
||||
ADD Gemfile /tmp/Gemfile
|
||||
WORKDIR /tmp
|
||||
RUN bundle install
|
||||
|
||||
ADD template /site-template
|
||||
|
||||
VOLUME ["/site"]
|
||||
EXPOSE 4000
|
||||
|
||||
ADD start.sh /start
|
||||
CMD ["/start"]
|
||||
|
||||
2
jekyll-github-pages/Gemfile
Normal file
2
jekyll-github-pages/Gemfile
Normal file
@@ -0,0 +1,2 @@
|
||||
source 'https://rubygems.org'
|
||||
gem 'github-pages'
|
||||
14
jekyll-github-pages/README.md
Normal file
14
jekyll-github-pages/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
This container is pre-configured according to the
|
||||
[GitHub Pages use of Jekyll](https://help.github.com/articles/using-jekyll-with-pages).
|
||||
|
||||
It serves up the generated content on port 4000 and the site is generated from
|
||||
the container's `/site` volume. You can either bring your own site content or
|
||||
let it generate some VERY simple content along with the standard Jekyll directory
|
||||
layout.
|
||||
|
||||
A typical way to run this:
|
||||
|
||||
docker run -it -p 4000:4000 -v $(pwd)/site:/site itzg/jekyll-github-pages
|
||||
|
||||
where either it will load your content or initialize the content under
|
||||
`site` in your current working directory.
|
||||
13
jekyll-github-pages/start.sh
Executable file
13
jekyll-github-pages/start.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ `ls /site/index.* 2> /dev/null | wc -l` = 0 ]; then
|
||||
echo "Preparing /site with default content..."
|
||||
cp -r /site-template/* /site
|
||||
fi
|
||||
|
||||
if [ ! -e /site/Gemfile ]; then
|
||||
cp /tmp/Gemfile /site/Gemfile
|
||||
fi
|
||||
|
||||
cd /site
|
||||
bundle exec jekyll serve
|
||||
1
jekyll-github-pages/template/_config.yml
Normal file
1
jekyll-github-pages/template/_config.yml
Normal file
@@ -0,0 +1 @@
|
||||
highlighter: pygments
|
||||
3
jekyll-github-pages/template/_includes/footer.html
Normal file
3
jekyll-github-pages/template/_includes/footer.html
Normal file
@@ -0,0 +1,3 @@
|
||||
<footer>
|
||||
<i>Goodbye</i>
|
||||
</footer>
|
||||
3
jekyll-github-pages/template/_includes/header.html
Normal file
3
jekyll-github-pages/template/_includes/header.html
Normal file
@@ -0,0 +1,3 @@
|
||||
<header>
|
||||
<h1>{{ page.title }}</h1>
|
||||
</header>
|
||||
18
jekyll-github-pages/template/_includes/top.html
Normal file
18
jekyll-github-pages/template/_includes/top.html
Normal file
@@ -0,0 +1,18 @@
|
||||
<!doctype html>
|
||||
<html lang="en-US">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>{{ page.title }}</title>
|
||||
|
||||
<!-- Latest compiled and minified CSS -->
|
||||
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
|
||||
|
||||
<!-- Optional theme -->
|
||||
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css">
|
||||
|
||||
<!-- Latest compiled and minified JavaScript -->
|
||||
<script src="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
|
||||
</head>
|
||||
12
jekyll-github-pages/template/_layouts/default.html
Normal file
12
jekyll-github-pages/template/_layouts/default.html
Normal file
@@ -0,0 +1,12 @@
|
||||
{% include top.html %}
|
||||
|
||||
<body>
|
||||
|
||||
{% include header.html %}
|
||||
|
||||
{{ content }}
|
||||
|
||||
{% include footer.html %}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
6
jekyll-github-pages/template/index.html
Normal file
6
jekyll-github-pages/template/index.html
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
layout: default
|
||||
title: Powered by Jekyll
|
||||
overview: true
|
||||
---
|
||||
This is where the content goes.
|
||||
17
jenkins/Dockerfile
Normal file
17
jenkins/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM java:openjdk-8u102-jdk
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
graphviz \
|
||||
&& apt-get clean
|
||||
|
||||
ENV JENKINS_HOME=/data
|
||||
|
||||
VOLUME ["/data", "/root", "/opt/jenkins"]
|
||||
EXPOSE 8080 38252
|
||||
|
||||
COPY download-and-start.sh /opt/download-and-start
|
||||
|
||||
CMD ["/opt/download-and-start"]
|
||||
51
jenkins/README.md
Normal file
51
jenkins/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
A self-upgrading [Jenkins CI](http://jenkins-ci.org/) server
|
||||
|
||||
# Basic Usage
|
||||
|
||||
To start Jenkins with the latest version:
|
||||
|
||||
ID=$(docker run -d -p 8080:8080 itzg/jenkins)
|
||||
|
||||
At a later time, you can upgrade by restarting the container:
|
||||
|
||||
docker stop $ID
|
||||
docker start $ID
|
||||
|
||||
# Attaching host directory to Jenkins home directory
|
||||
|
||||
The Jenkins home directory is attachable at `/data`, so attaching to a host volume
|
||||
would be:
|
||||
|
||||
ID=$(docker run -d -p 8080:8080 -v /SOME_HOST_DIR:/data itzg/jenkins
|
||||
|
||||
# Enabling Jenkins slave agents
|
||||
|
||||
By default, Jenkins will pick a random port to allow slave nodes launched
|
||||
by JNLP. Since Docker networking is basically a firewall, a random port
|
||||
won't work for us. Instead the fixed port **38252** was chosen (arbitrarily)
|
||||
to be exposed by the container.
|
||||
|
||||
Launch your Jenkins container using
|
||||
|
||||
ID=$(docker run -d -p 8080:8080 -p 38252:38252 itzg/jenkins)
|
||||
|
||||
and configure the port in the Global Security settings:
|
||||
|
||||

|
||||
|
||||
# Image Parameters
|
||||
|
||||
## Volumes
|
||||
|
||||
* `/data` - a majority of the Jenkins content is maintained here, such as workspaces
|
||||
* `/root` - some tools, such as Maven, utilize the home directory for default repository storage
|
||||
* `/opt/jenkins` - the installed distribution is expanded here
|
||||
|
||||
## Ports
|
||||
|
||||
* `8080` - for the web UI
|
||||
* `38252` - for slave incoming JMX access
|
||||
|
||||
## Environment Variables
|
||||
|
||||
* `JENKINS_OPTS` - passed to the initial Java invocation of Jenkins
|
||||
29
jenkins/download-and-start.sh
Executable file
29
jenkins/download-and-start.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
mirrorUrl=http://mirrors.jenkins-ci.org/war/latest/jenkins.war
|
||||
url=$(curl -s --head $mirrorUrl|awk -F': ' '$1 == "Location" { print $2 }' | sed 's/[[:space:]]*$//')
|
||||
version=$(echo $url | sed 's#.*/war/\(.*\)/jenkins.war#\1#')
|
||||
|
||||
mkdir -p /opt/jenkins
|
||||
trackingFile=/opt/jenkins/INSTALLED
|
||||
|
||||
installed=
|
||||
if [ -f $trackingFile ]; then
|
||||
installed=$(cat $trackingFile)
|
||||
echo "Version installed is $installed"
|
||||
fi
|
||||
|
||||
if [ $version != "$installed" ]; then
|
||||
echo "Downloading $version from '$url'"
|
||||
while ! curl -s -o /opt/jenkins/jenkins.war "$url"
|
||||
do
|
||||
echo "Trying again in 5 seconds"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo $version > $trackingFile
|
||||
fi
|
||||
|
||||
|
||||
cd /opt/jenkins
|
||||
exec java $JENKINS_OPTS -jar jenkins.war
|
||||
22
kibana/Dockerfile
Executable file
22
kibana/Dockerfile
Executable file
@@ -0,0 +1,22 @@
|
||||
FROM openjdk:8u111-jre
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV KIBANA_VERSION 5.1.1
|
||||
|
||||
ADD https://artifacts.elastic.co/downloads/kibana/kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz /tmp/kibana.tgz
|
||||
|
||||
RUN tar -C /opt -xzf /tmp/kibana.tgz && rm /tmp/kibana.tgz
|
||||
|
||||
ENV KIBANA_HOME /opt/kibana-$KIBANA_VERSION-linux-x86_64
|
||||
|
||||
# Simplify for cross-container
|
||||
ENV ES_URL http://es:9200
|
||||
|
||||
WORKDIR $KIBANA_HOME
|
||||
|
||||
ADD start.sh /start
|
||||
|
||||
EXPOSE 5601
|
||||
|
||||
CMD ["/start"]
|
||||
26
kibana/README.md
Normal file
26
kibana/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
Provides a ready-to-run [Kibana](http://www.elasticsearch.org/overview/kibana/) server that can
|
||||
easily hook into your [Elasticsearch containers](https://registry.hub.docker.com/u/itzg/elasticsearch/).
|
||||
|
||||
## Usage with Docker elasticsearch container
|
||||
|
||||
This is by far the easiest and most Docker'ish way to run Kibana.
|
||||
|
||||
Assuming you started one or more containers using something like
|
||||
|
||||
docker run -d --name your-es -p 9200:9200 itzg/elasticsearch
|
||||
|
||||
Start Kibana using
|
||||
|
||||
docker run -d -p 5601:5601 --link your-es:es itzg/kibana
|
||||
|
||||
Proceed to use Kibana starting from
|
||||
[this point in the documentation](http://www.elasticsearch.org/guide/en/kibana/current/access.html)
|
||||
|
||||
## Usage with non-Docker elasticsearch
|
||||
|
||||
Start Kibana using
|
||||
|
||||
docker run -d -p 5601:5601 -e ES_URL=http://YOUR_ES:9200 itzg/kibana
|
||||
|
||||
Replacing `http://YOUR_ES:9200` with the appropriate URL for your system.
|
||||
5
kibana/start.sh
Executable file
5
kibana/start.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
OPTS="-e $ES_URL -H $HOSTNAME"
|
||||
|
||||
exec bin/kibana $OPTS
|
||||
25
logstash/Dockerfile
Executable file
25
logstash/Dockerfile
Executable file
@@ -0,0 +1,25 @@
|
||||
FROM itzg/ubuntu-openjdk-7
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV LOGSTASH_VERSION 1.5.0-1
|
||||
|
||||
RUN wget -qO /tmp/logstash.deb http://download.elastic.co/logstash/logstash/packages/debian/logstash_${LOGSTASH_VERSION}_all.deb
|
||||
|
||||
RUN dpkg -i /tmp/logstash.deb && rm /tmp/logstash.deb
|
||||
|
||||
WORKDIR /opt/logstash
|
||||
|
||||
# For collectd reception
|
||||
EXPOSE 25826
|
||||
|
||||
# /conf is the default directory where our logstash will read pipeline config files
|
||||
# /logs is an optional attach point to reference something like /var/log on the host
|
||||
VOLUME ["/conf","/logs"]
|
||||
|
||||
ENV PLUGIN_UPDATES 2015-06-10
|
||||
|
||||
RUN bin/plugin install logstash-input-heartbeat
|
||||
RUN bin/plugin install logstash-output-elasticsearch_groom
|
||||
|
||||
CMD ["bin/logstash","agent","-f","/conf"]
|
||||
44
logstash/README.md
Normal file
44
logstash/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
This image bundles the latest (1.5.x) version of Logstash with the ability to
|
||||
groom its own Elasticsearch indices.
|
||||
|
||||
# Basic Usage
|
||||
|
||||
To start a Logstash container, setup a directory on your host with one or more Logstash
|
||||
pipeline configurations files, called `$HOST_CONF` here, and run
|
||||
|
||||
docker run -d -v $HOST_CONF:/conf itzg/logstash
|
||||
|
||||
# Accessing host logs
|
||||
|
||||
Logstash is much more useful when it is actually processing...logs. Logs inside the container
|
||||
are non-existent, but you can attach the host machine's `/var/log` directory via the container's
|
||||
`/logs` volume:
|
||||
|
||||
docker run ... -v /var/log:/logs ...
|
||||
|
||||
Keep in mind you will need to configure `file` inputs with a base path of `/logs`, such as
|
||||
|
||||
```
|
||||
file {
|
||||
path => ['/logs/syslog']
|
||||
type => 'syslog'
|
||||
}
|
||||
```
|
||||
|
||||
# Receiving input from collectd
|
||||
|
||||
To allow for incoming [collectd](https://collectd.org/) content, **UDP** port 25826 is exposed and
|
||||
can be mapped onto the host using:
|
||||
|
||||
docker run ... -p 25826:25826/udp
|
||||
|
||||
Regardless of the host port, be sure to configure the logstash input to bind at port `25826`, such
|
||||
as
|
||||
|
||||
```
|
||||
udp {
|
||||
port => 25826
|
||||
codec => collectd { }
|
||||
buffer_size => 1452
|
||||
}
|
||||
```
|
||||
25
logstash/conf/example.conf
Normal file
25
logstash/conf/example.conf
Normal file
@@ -0,0 +1,25 @@
|
||||
input {
|
||||
heartbeat {
|
||||
type => 'groom'
|
||||
interval => 11
|
||||
add_field => {
|
||||
scope => 'open'
|
||||
cutoff => '4w'
|
||||
action => 'close'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
|
||||
if [type] == 'groom' {
|
||||
elasticsearch_groom {
|
||||
host => 'es:9200'
|
||||
index => 'logstash-%{+YYYY.MM.dd}'
|
||||
scope => '%{scope}'
|
||||
age_cutoff => '%{cutoff}'
|
||||
action => '%{action}'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1
minecraft-server/.dockerignore
Normal file
1
minecraft-server/.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
data
|
||||
1
minecraft-server/.gitignore
vendored
Normal file
1
minecraft-server/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/data/
|
||||
@@ -1,25 +1,43 @@
|
||||
FROM itzg/ubuntu-openjdk-7
|
||||
FROM java:8
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV APT_GET_UPDATE 2016-04-23
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y wget libmozjs-24-bin
|
||||
RUN update-alternatives --install /usr/bin/js js /usr/bin/js24 100
|
||||
|
||||
RUN wget -O /usr/bin/jsawk https://github.com/micha/jsawk/raw/master/jsawk
|
||||
RUN chmod +x /usr/bin/jsawk
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
imagemagick \
|
||||
lsof \
|
||||
nano \
|
||||
sudo \
|
||||
vim \
|
||||
jq \
|
||||
&& apt-get clean
|
||||
|
||||
EXPOSE 25565
|
||||
RUN useradd -s /bin/false --uid 1000 minecraft \
|
||||
&& mkdir /data \
|
||||
&& mkdir /config \
|
||||
&& mkdir /mods \
|
||||
&& mkdir /plugins \
|
||||
&& mkdir /home/minecraft \
|
||||
&& chown minecraft:minecraft /data /config /mods /plugins /home/minecraft
|
||||
|
||||
ADD run.sh /run
|
||||
EXPOSE 25565 25575
|
||||
|
||||
VOLUME ['/data']
|
||||
ADD server.properties /tmp/server.properties
|
||||
ADD https://github.com/itzg/restify/releases/download/1.0.3/restify_linux_amd64 /usr/local/bin/restify
|
||||
COPY start.sh /start
|
||||
COPY start-minecraft.sh /start-minecraft
|
||||
COPY mcadmin.jq /usr/share
|
||||
RUN chmod +x /usr/local/bin/*
|
||||
|
||||
VOLUME ["/data","/mods","/config","/plugins","/home/minecraft"]
|
||||
COPY server.properties /tmp/server.properties
|
||||
WORKDIR /data
|
||||
|
||||
ENTRYPOINT /run
|
||||
ENTRYPOINT [ "/start" ]
|
||||
|
||||
ENV MOTD A Minecraft Server Powered by Docker
|
||||
ENV LEVEL world
|
||||
ENV JVM_OPTS -Xmx512M -Xms512M
|
||||
ENV VERSION 1.7.9
|
||||
ENV UID=1000 GID=1000 \
|
||||
MOTD="A Minecraft Server Powered by Docker" \
|
||||
JVM_OPTS="-Xmx1024M -Xms1024M" \
|
||||
TYPE=VANILLA VERSION=LATEST FORGEVERSION=RECOMMENDED LEVEL=world PVP=true DIFFICULTY=easy \
|
||||
LEVEL_TYPE=DEFAULT GENERATOR_SETTINGS= WORLD= MODPACK= ONLINE_MODE=TRUE CONSOLE=true
|
||||
|
||||
@@ -1,29 +1,559 @@
|
||||
This docker image provides a Minecraft Server that will automatically download the latest stable, latest snapshot, or any specific server version.
|
||||
|
||||
[](https://hub.docker.com/r/itzg/minecraft-server/)
|
||||
[](https://hub.docker.com/r/itzg/minecraft-server/)
|
||||
|
||||
This docker image provides a Minecraft Server that will automatically download the latest stable
|
||||
version at startup. You can also run/upgrade to any specific version or the
|
||||
latest snapshot. See the *Versions* section below for more information.
|
||||
|
||||
To simply use the latest stable version, run
|
||||
|
||||
docker run -d -p 25565:25565 minecraft-server
|
||||
|
||||
where the default server port, 25565, will be exposed on your host machine.
|
||||
docker run -d -p 25565:25565 --name mc itzg/minecraft-server
|
||||
|
||||
In order to persist the Minecraft data, which you *probably want to do for a real server setup*, use the `-v` argument to map a local path to the `/data' path in the container, such as
|
||||
where the standard server port, 25565, will be exposed on your host machine.
|
||||
|
||||
docker run -d -v /path/on/host:/data -p 25565:25565 minecraft-server
|
||||
If you want to serve up multiple Minecraft servers or just use an alternate port,
|
||||
change the host-side port mapping such as
|
||||
|
||||
docker run -p 25566:25565 ...
|
||||
|
||||
will serve your Minecraft server on your host's port 25566 since the `-p` syntax is
|
||||
`host-port`:`container-port`.
|
||||
|
||||
Speaking of multiple servers, it's handy to give your containers explicit names using `--name`, such as
|
||||
|
||||
docker run -d -p 25565:25565 --name mc itzg/minecraft-server
|
||||
|
||||
With that you can easily view the logs, stop, or re-start the container:
|
||||
|
||||
docker logs -f mc
|
||||
( Ctrl-C to exit logs action )
|
||||
|
||||
docker stop mc
|
||||
|
||||
docker start mc
|
||||
|
||||
## Interacting with the server
|
||||
|
||||
In order to attach and interact with the Minecraft server, add `-it` when starting the container, such as
|
||||
|
||||
docker run -d -it -p 25565:25565 --name mc itzg/minecraft-server
|
||||
|
||||
With that you can attach and interact at any time using
|
||||
|
||||
docker attach mc
|
||||
|
||||
and then Control-p Control-q to **detach**.
|
||||
|
||||
For remote access, configure your Docker daemon to use a `tcp` socket (such as `-H tcp://0.0.0.0:2375`)
|
||||
and attach from another machine:
|
||||
|
||||
docker -H $HOST:2375 attach mc
|
||||
|
||||
Unless you're on a home/private LAN, you should [enable TLS access](https://docs.docker.com/articles/https/).
|
||||
|
||||
## EULA Support
|
||||
|
||||
Mojang now requires accepting the [Minecraft EULA](https://account.mojang.com/documents/minecraft_eula). To accept add
|
||||
|
||||
-e EULA=TRUE
|
||||
|
||||
such as
|
||||
|
||||
docker run -d -it -e EULA=TRUE -p 25565:25565 --name mc itzg/minecraft-server
|
||||
|
||||
## Attaching data directory to host filesystem
|
||||
|
||||
In order to readily access the Minecraft data, use the `-v` argument
|
||||
to map a directory on your host machine to the container's `/data` directory, such as:
|
||||
|
||||
docker run -d -v /path/on/host:/data ...
|
||||
|
||||
When attached in this way you can stop the server, edit the configuration under your attached `/path/on/host`
|
||||
and start the server again with `docker start CONTAINERID` to pick up the new configuration.
|
||||
|
||||
**NOTE**: By default, the files in the attached directory will be owned by the host user with UID of 1000 and host group with GID of 1000.
|
||||
You can use an different UID and GID by passing the options:
|
||||
|
||||
-e UID=1000 -e GID=1000
|
||||
|
||||
replacing 1000 with a UID and GID that is present on the host.
|
||||
Here is one way to find the UID and GID:
|
||||
|
||||
id some_host_user
|
||||
getent group some_host_group
|
||||
|
||||
## Versions
|
||||
|
||||
To use a different Minecraft version, pass the `VERSION` environment variable, which can have the value
|
||||
|
||||
* LATEST
|
||||
* SNAPSHOT
|
||||
* (or a specific version, such as "1.7.9")
|
||||
|
||||
For example, to use the latest snapshot:
|
||||
|
||||
docker run -d -e VERSION=SNAPSHOT -p 25565:25565 minecraft-server
|
||||
|
||||
docker run -d -e VERSION=SNAPSHOT ...
|
||||
|
||||
or a specific version:
|
||||
|
||||
docker run -d -e VERSION=1.7.9 -p 25565:25565 minecraft-server
|
||||
|
||||
docker run -d -e VERSION=1.7.9 ...
|
||||
|
||||
## Running a Forge Server
|
||||
|
||||
Enable Forge server mode by adding a `-e TYPE=FORGE` to your command-line.
|
||||
By default the container will run the `RECOMMENDED` version of [Forge server](http://www.minecraftforge.net/wiki/)
|
||||
but you can also choose to run a specific version with `-e FORGEVERSION=10.13.4.1448`.
|
||||
|
||||
$ docker run -d -v /path/on/host:/data -e VERSION=1.7.10 \
|
||||
-e TYPE=FORGE -e FORGEVERSION=10.13.4.1448 \
|
||||
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
|
||||
|
||||
In order to add mods, you have two options.
|
||||
|
||||
### Using the /data volume
|
||||
|
||||
This is the easiest way if you are using a persistent `/data` mount.
|
||||
|
||||
To do this, you will need to attach the container's `/data` directory
|
||||
(see "Attaching data directory to host filesystem”).
|
||||
Then, you can add mods to the `/path/on/host/mods` folder you chose. From the example above,
|
||||
the `/path/on/host` folder contents look like:
|
||||
|
||||
```
|
||||
/path/on/host
|
||||
├── mods
|
||||
│ └── ... INSTALL MODS HERE ...
|
||||
├── config
|
||||
│ └── ... CONFIGURE MODS HERE ...
|
||||
├── ops.json
|
||||
├── server.properties
|
||||
├── whitelist.json
|
||||
└── ...
|
||||
```
|
||||
|
||||
If you add mods while the container is running, you'll need to restart it to pick those
|
||||
up:
|
||||
|
||||
docker stop mc
|
||||
docker start mc
|
||||
|
||||
### Using separate mounts
|
||||
|
||||
This is the easiest way if you are using an ephemeral `/data` filesystem,
|
||||
or downloading a world with the `WORLD` option.
|
||||
|
||||
There are two additional volumes that can be mounted; `/mods` and `/config`.
|
||||
Any files in either of these filesystems will be copied over to the main
|
||||
`/data` filesystem before starting Minecraft.
|
||||
|
||||
This works well if you want to have a common set of modules in a separate
|
||||
location, but still have multiple worlds with different server requirements
|
||||
in either persistent volumes or a downloadable archive.
|
||||
|
||||
## Running a Bukkit/Spigot server
|
||||
|
||||
Enable Bukkit/Spigot server mode by adding a `-e TYPE=BUKKIT -e VERSION=1.8` or `-e TYPE=SPIGOT -e VERSION=1.8` to your command-line.
|
||||
|
||||
docker run -d -v /path/on/host:/data \
|
||||
-e TYPE=SPIGOT -e VERSION=1.8 \
|
||||
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
|
||||
|
||||
You can build spigot from source by adding `-e BUILD_FROM_SOURCE=true`
|
||||
|
||||
__NOTE: to avoid pegging the CPU when running Spigot,__ you will need to
|
||||
pass `--noconsole` at the very end of the command line and not use `-it`. For example,
|
||||
|
||||
docker run -d -v /path/on/host:/data \
|
||||
-e TYPE=SPIGOT -e VERSION=1.8 \
|
||||
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server --noconsole
|
||||
|
||||
|
||||
You can install Bukkit plugins in two ways...
|
||||
|
||||
### Using the /data volume
|
||||
|
||||
This is the easiest way if you are using a persistent `/data` mount.
|
||||
|
||||
To do this, you will need to attach the container's `/data` directory
|
||||
(see "Attaching data directory to host filesystem”).
|
||||
Then, you can add plugins to the `/path/on/host/plugins` folder you chose. From the example above,
|
||||
the `/path/on/host` folder contents look like:
|
||||
|
||||
```
|
||||
/path/on/host
|
||||
├── plugins
|
||||
│ └── ... INSTALL PLUGINS HERE ...
|
||||
├── ops.json
|
||||
├── server.properties
|
||||
├── whitelist.json
|
||||
└── ...
|
||||
```
|
||||
|
||||
If you add plugins while the container is running, you'll need to restart it to pick those
|
||||
up:
|
||||
|
||||
docker stop mc
|
||||
docker start mc
|
||||
|
||||
### Using separate mounts
|
||||
|
||||
This is the easiest way if you are using an ephemeral `/data` filesystem,
|
||||
or downloading a world with the `WORLD` option.
|
||||
|
||||
There is one additional volume that can be mounted; `/plugins`.
|
||||
Any files in this filesystem will be copied over to the main
|
||||
`/data/plugins` filesystem before starting Minecraft.
|
||||
|
||||
This works well if you want to have a common set of plugins in a separate
|
||||
location, but still have multiple worlds with different server requirements
|
||||
in either persistent volumes or a downloadable archive.
|
||||
|
||||
## Running a PaperSpigot server
|
||||
|
||||
Enable PaperSpigot server mode by adding a `-e TYPE=PAPER -e VERSION=1.9.4` to your command-line.
|
||||
|
||||
docker run -d -v /path/on/host:/data \
|
||||
-e TYPE=PAPER -e VERSION=1.9.4 \
|
||||
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server
|
||||
|
||||
__NOTE: to avoid pegging the CPU when running PaperSpigot,__ you will need to
|
||||
pass `--noconsole` at the very end of the command line and not use `-it`. For example,
|
||||
|
||||
docker run -d -v /path/on/host:/data \
|
||||
-e TYPE=PAPER -e VERSION=1.9.4 \
|
||||
-p 25565:25565 -e EULA=TRUE --name mc itzg/minecraft-server --noconsole
|
||||
|
||||
You can install Bukkit plugins in two ways...
|
||||
|
||||
### Using the /data volume
|
||||
|
||||
This is the easiest way if you are using a persistent `/data` mount.
|
||||
|
||||
To do this, you will need to attach the container's `/data` directory
|
||||
(see "Attaching data directory to host filesystem”).
|
||||
Then, you can add plugins to the `/path/on/host/plugins` folder you chose. From the example above,
|
||||
the `/path/on/host` folder contents look like:
|
||||
|
||||
```
|
||||
/path/on/host
|
||||
├── plugins
|
||||
│ └── ... INSTALL PLUGINS HERE ...
|
||||
├── ops.json
|
||||
├── server.properties
|
||||
├── whitelist.json
|
||||
└── ...
|
||||
```
|
||||
|
||||
If you add plugins while the container is running, you'll need to restart it to pick those
|
||||
up:
|
||||
|
||||
docker stop mc
|
||||
docker start mc
|
||||
|
||||
### Using separate mounts
|
||||
|
||||
This is the easiest way if you are using an ephemeral `/data` filesystem,
|
||||
or downloading a world with the `WORLD` option.
|
||||
|
||||
There is one additional volume that can be mounted; `/plugins`.
|
||||
Any files in this filesystem will be copied over to the main
|
||||
`/data/plugins` filesystem before starting Minecraft.
|
||||
|
||||
This works well if you want to have a common set of plugins in a separate
|
||||
location, but still have multiple worlds with different server requirements
|
||||
in either persistent volumes or a downloadable archive.
|
||||
|
||||
## Using Docker Compose
|
||||
|
||||
Rather than type the server options below, the port mappings above, etc
|
||||
every time you want to create new Minecraft server, you can now use
|
||||
[Docker Compose](https://docs.docker.com/compose/). Start with a
|
||||
`docker-compose.yml` file like the following:
|
||||
|
||||
```
|
||||
minecraft-server:
|
||||
ports:
|
||||
- "25565:25565"
|
||||
|
||||
environment:
|
||||
EULA: "TRUE"
|
||||
|
||||
image: itzg/minecraft-server
|
||||
|
||||
container_name: mc
|
||||
|
||||
tty: true
|
||||
stdin_open: true
|
||||
restart: always
|
||||
```
|
||||
|
||||
and in the same directory as that file run
|
||||
|
||||
docker-compose -d up
|
||||
|
||||
Now, go play...or adjust the `environment` section to configure
|
||||
this server instance.
|
||||
|
||||
## Server configuration
|
||||
|
||||
### Difficulty
|
||||
|
||||
The difficulty level (default: `easy`) can be set like:
|
||||
|
||||
docker run -d -e DIFFICULTY=hard ...
|
||||
|
||||
Valid values are: `peaceful`, `easy`, `normal`, and `hard`, and an
|
||||
error message will be output in the logs if it's not one of these
|
||||
values.
|
||||
|
||||
### Whitelist Players
|
||||
|
||||
To whitelist players for your Minecraft server, pass the Minecraft usernames separated by commas via the `WHITELIST` environment variable, such as
|
||||
|
||||
docker run -d -e WHITELIST=user1,user2 ...
|
||||
|
||||
If the `WHITELIST` environment variable is not used, any user can join your Minecraft server if it's publicly accessible.
|
||||
|
||||
### Op/Administrator Players
|
||||
|
||||
To add more "op" (aka adminstrator) users to your Minecraft server, pass the Minecraft usernames separated by commas via the `OPS` environment variable, such as
|
||||
|
||||
docker run -d -e OPS=user1,user2 ...
|
||||
|
||||
### Server icon
|
||||
|
||||
A server icon can be configured using the `ICON` variable. The image will be automatically
|
||||
downloaded, scaled, and converted from any other image format:
|
||||
|
||||
docker run -d -e ICON=http://..../some/image.png ...
|
||||
|
||||
### Rcon
|
||||
|
||||
To use rcon use the `ENABLE_RCON` and `RCON_PASSORD` variables.
|
||||
By default rcon port will be `25575` but can easily be changed with the `RCON_PORT` variable.
|
||||
|
||||
docker run -d -e ENABLE_RCON=true -e RCON_PASSWORD=testing
|
||||
|
||||
### Query
|
||||
|
||||
Enabling this will enable the gamespy query protocol.
|
||||
By default the query port will be `25565` (UDP) but can easily be changed with the `QUERY_PORT` variable.
|
||||
|
||||
docker run -d -e ENABLE_QUERY=true
|
||||
|
||||
|
||||
### Max players
|
||||
|
||||
By default max players is 20, you can increase this with the `MAX_PLAYERS` variable.
|
||||
|
||||
docker run -d -e MAX_PLAYERS=50
|
||||
|
||||
|
||||
### Max world size
|
||||
|
||||
This sets the maximum possible size in blocks, expressed as a radius, that the world border can obtain.
|
||||
|
||||
docker run -d -e MAX_WORLD_SIZE=10000
|
||||
|
||||
### Allow Nether
|
||||
|
||||
Allows players to travel to the Nether.
|
||||
|
||||
docker run -d -e ALLOW_NETHER=true
|
||||
|
||||
### Announce Player Achievements
|
||||
|
||||
Allows server to announce when a player gets an achievement.
|
||||
|
||||
docker run -d -e ANNOUNCE_PLAYER_ACHIEVEMENTS=true
|
||||
|
||||
### Enable Command Block
|
||||
|
||||
Enables command blocks
|
||||
|
||||
docker run -d -e ENABLE_COMMAND_BLOCK=true
|
||||
|
||||
### Force Gamemode
|
||||
|
||||
Force players to join in the default game mode.
|
||||
|
||||
* false - Players will join in the gamemode they left in.
|
||||
* true - Players will always join in the default gamemode.
|
||||
|
||||
`docker run -d -e FORCE_GAMEMODE=false`
|
||||
|
||||
### Generate Structures
|
||||
|
||||
Defines whether structures (such as villages) will be generated.
|
||||
|
||||
* false - Structures will not be generated in new chunks.
|
||||
* true - Structures will be generated in new chunks.
|
||||
|
||||
`docker run -d -e GENERATE_STRUCTURES=true`
|
||||
|
||||
### Hardcore
|
||||
|
||||
If set to true, players will be set to spectator mode if they die.
|
||||
|
||||
docker run -d -e HARDCORE=false
|
||||
|
||||
### Max Build Height
|
||||
|
||||
The maximum height in which building is allowed.
|
||||
Terrain may still naturally generate above a low height limit.
|
||||
|
||||
docker run -d -e MAX_BUILD_HEIGHT=256
|
||||
|
||||
### Max Tick Time
|
||||
|
||||
The maximum number of milliseconds a single tick may take before the server watchdog stops the server with the message, A single server tick took 60.00 seconds (should be max 0.05); Considering it to be crashed, server will forcibly shutdown. Once this criteria is met, it calls System.exit(1).
|
||||
Setting this to -1 will disable watchdog entirely
|
||||
|
||||
docker run -d -e MAX_TICK_TIME=60000
|
||||
|
||||
### Spawn Animals
|
||||
|
||||
Determines if animals will be able to spawn.
|
||||
|
||||
docker run -d -e SPAWN_ANIMALS=true
|
||||
|
||||
### Spawn Monsters
|
||||
|
||||
Determines if monsters will be spawned.
|
||||
|
||||
docker run -d -e SPAWN_MONSTERS=true
|
||||
|
||||
### Spawn NPCs
|
||||
|
||||
Determines if villagers will be spawned.
|
||||
|
||||
docker run -d -e SPAWN_NPCS=true
|
||||
|
||||
### View Distance
|
||||
Sets the amount of world data the server sends the client, measured in chunks in each direction of the player (radius, not diameter).
|
||||
It determines the server-side viewing distance.
|
||||
|
||||
docker run -d -e VIEW_DISTANCE=10
|
||||
|
||||
### Level Seed
|
||||
|
||||
If you want to create the Minecraft level with a specific seed, use `SEED`, such as
|
||||
|
||||
docker run -d -e SEED=1785852800490497919 ...
|
||||
|
||||
### Game Mode
|
||||
|
||||
By default, Minecraft servers are configured to run in Survival mode. You can
|
||||
change the mode using `MODE` where you can either provide the [standard
|
||||
numerical values](http://minecraft.gamepedia.com/Game_mode#Game_modes) or the
|
||||
shortcut values:
|
||||
|
||||
* creative
|
||||
* survival
|
||||
* adventure
|
||||
* spectator (only for Minecraft 1.8 or later)
|
||||
|
||||
For example:
|
||||
|
||||
docker run -d -e MODE=creative ...
|
||||
|
||||
### Message of the Day
|
||||
|
||||
The message of the day, shown below each server entry in the UI, can be changed with the `MOTD` environment variable, such as
|
||||
|
||||
docker run -d -e 'MOTD=My Server' -p 25565:25565 minecraft-server
|
||||
|
||||
docker run -d -e 'MOTD=My Server' ...
|
||||
|
||||
If you leave it off, the last used or default message will be used. _The example shows how to specify a server
|
||||
message of the day that contains spaces by putting quotes around the whole thing._
|
||||
|
||||
### PVP Mode
|
||||
|
||||
By default, servers are created with player-vs-player (PVP) mode enabled. You can disable this with the `PVP`
|
||||
environment variable set to `false`, such as
|
||||
|
||||
docker run -d -e PVP=false ...
|
||||
|
||||
### Level Type and Generator Settings
|
||||
|
||||
By default, a standard world is generated with hills, valleys, water, etc. A different level type can
|
||||
be configured by setting `LEVEL_TYPE` to
|
||||
|
||||
* DEFAULT
|
||||
* FLAT
|
||||
* LARGEBIOMES
|
||||
* AMPLIFIED
|
||||
* CUSTOMIZED
|
||||
|
||||
Descriptions are available at the [gamepedia](http://minecraft.gamepedia.com/Server.properties).
|
||||
|
||||
When using a level type of `FLAT` and `CUSTOMIZED`, you can further configure the world generator
|
||||
by passing [custom generator settings](http://minecraft.gamepedia.com/Superflat).
|
||||
**Since generator settings usually have ;'s in them, surround the -e value with a single quote, like below.**
|
||||
|
||||
For example (just the `-e` bits):
|
||||
|
||||
-e LEVEL_TYPE=flat -e 'GENERATOR_SETTINGS=3;minecraft:bedrock,3*minecraft:stone,52*minecraft:sandstone;2;'
|
||||
|
||||
### World Save Name
|
||||
|
||||
You can either switch between world saves or run multiple containers with different saves by using the `LEVEL` option,
|
||||
where the default is "world":
|
||||
|
||||
docker run -d -e LEVEL=bonus ...
|
||||
|
||||
**NOTE:** if running multiple containers be sure to either specify a different `-v` host directory for each
|
||||
`LEVEL` in use or don't use `-v` and the container's filesystem will keep things encapsulated.
|
||||
|
||||
### Downloadable world
|
||||
|
||||
Instead of mounting the `/data` volume, you can instead specify the URL of
|
||||
a ZIP file containing an archived world. This will be downloaded, and
|
||||
unpacked in the `/data` directory; if it does not contain a subdirectory
|
||||
called `world/` then it will be searched for a file `level.dat` and the
|
||||
containing subdirectory renamed to `world`. This means that most of the
|
||||
archived Minecraft worlds downloadable from the Internet will already be in
|
||||
the correct format.
|
||||
|
||||
The ZIP file may also contain a `server.properties` file and `modules`
|
||||
directory, if required.
|
||||
|
||||
docker run -d -e WORLD=http://www.example.com/worlds/MySave.zip ...
|
||||
|
||||
**NOTE:** Unless you also mount `/data` as an external volume, this world
|
||||
will be deleted when the container is deleted.
|
||||
|
||||
**NOTE:** This URL must be accessible from inside the container. Therefore,
|
||||
you should use an IP address or a globally resolveable FQDN, or else the
|
||||
name of a linked container.
|
||||
|
||||
### Downloadable mod/plugin pack for Forge, Bukkit, and Spigot Servers
|
||||
|
||||
Like the `WORLD` option above, you can specify the URL of a "mod pack"
|
||||
to download and install into `mods` for Forge or `plugins` for Bukkit/Spigot.
|
||||
To use this option pass the environment variable `MODPACK`, such as
|
||||
|
||||
docker run -d -e MODPACK=http://www.example.com/mods/modpack.zip ...
|
||||
|
||||
**NOTE:** The referenced URL must be a zip file with one or more jar files at the
|
||||
top level of the zip archive. Make sure the jars are compatible with the
|
||||
particular `TYPE` of server you are running.
|
||||
|
||||
### Online mode
|
||||
|
||||
By default, server checks connecting players against Minecraft's account database. If you want to create an offline server or your server is not connected to the internet, you can disable the server to try connecting to minecraft.net to authenticate players with environment variable `ONLINE_MODE`, like this
|
||||
|
||||
docker run -d -e ONLINE_MODE=FALSE ...
|
||||
|
||||
## Miscellaneous Options
|
||||
|
||||
### Memory Limit
|
||||
|
||||
The Java memory limit can be adjusted using the `JVM_OPTS` environment variable, where the default is
|
||||
the setting shown in the example (max and min at 1024 MB):
|
||||
|
||||
docker run -e 'JVM_OPTS=-Xmx1024M -Xms1024M' ...
|
||||
|
||||
### /data ownership
|
||||
|
||||
In order to adapt to differences in `UID` and `GID` settings the entry script will attempt to correct ownership and writability of the `/data` directory. This logic can be disabled by setting `-e SKIP_OWNERSHIP_FIX=TRUE`.
|
||||
|
||||
14
minecraft-server/docker-compose.yml
Normal file
14
minecraft-server/docker-compose.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
minecraft-server:
|
||||
ports:
|
||||
- "25565:25565"
|
||||
|
||||
environment:
|
||||
EULA: "TRUE"
|
||||
|
||||
image: itzg/minecraft-server
|
||||
|
||||
container_name: minecraft-server
|
||||
|
||||
tty: true
|
||||
stdin_open: true
|
||||
restart: always
|
||||
7
minecraft-server/mcadmin.jq
Normal file
7
minecraft-server/mcadmin.jq
Normal file
@@ -0,0 +1,7 @@
|
||||
.[] |
|
||||
select(.elements | length > 1) |
|
||||
select(.elements[].elements[] | select(.class == "version" and .text == $version)) |
|
||||
.elements[].elements[] |
|
||||
select(.class|contains("server-jar")) |
|
||||
.elements[] | select(.name="a") |
|
||||
.href
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
case $VERSION in
|
||||
LATEST)
|
||||
export VERSION=`wget -O - https://s3.amazonaws.com/Minecraft.Download/versions/versions.json | jsawk -n 'out(this.latest.release)'`
|
||||
;;
|
||||
|
||||
SNAPSHOT)
|
||||
export VERSION=`wget -O - https://s3.amazonaws.com/Minecraft.Download/versions/versions.json | jsawk -n 'out(this.latest.snapshot)'`
|
||||
;;
|
||||
esac
|
||||
|
||||
cd /data
|
||||
|
||||
if [ ! -e minecraft_server.$VERSION.jar ]; then
|
||||
wget https://s3.amazonaws.com/Minecraft.Download/versions/$VERSION/minecraft_server.$VERSION.jar
|
||||
fi
|
||||
|
||||
if [ ! -e server.properties ]; then
|
||||
cp /tmp/server.properties .
|
||||
fi
|
||||
|
||||
sed -i "/motd\s*=/ c motd=$MOTD" /data/server.properties
|
||||
sed -i "/level-name\s*=/ c level-name=$LEVEL" /data/server.properties
|
||||
|
||||
java $JVM_OPTS -jar minecraft_server.$VERSION.jar
|
||||
@@ -5,11 +5,14 @@ enable-query=false
|
||||
allow-flight=false
|
||||
announce-player-achievements=true
|
||||
server-port=25565
|
||||
rcon.port=25575
|
||||
query.port=25565
|
||||
level-type=DEFAULT
|
||||
enable-rcon=false
|
||||
force-gamemode=false
|
||||
level-seed=
|
||||
server-ip=
|
||||
max-tick-time=60000
|
||||
max-build-height=256
|
||||
spawn-npcs=true
|
||||
white-list=false
|
||||
@@ -21,7 +24,7 @@ online-mode=true
|
||||
resource-pack=
|
||||
pvp=true
|
||||
difficulty=1
|
||||
enable-command-block=false
|
||||
enable-command-block=true
|
||||
player-idle-timeout=0
|
||||
gamemode=0
|
||||
max-players=20
|
||||
@@ -29,5 +32,7 @@ spawn-monsters=true
|
||||
generate-structures=true
|
||||
view-distance=10
|
||||
spawn-protection=16
|
||||
motd=A Minecraft Server
|
||||
|
||||
motd=A Minecraft Server powered by Docker
|
||||
generator-settings=
|
||||
rcon.password=
|
||||
max-world-size=29999984
|
||||
|
||||
456
minecraft-server/start-minecraft.sh
Executable file
456
minecraft-server/start-minecraft.sh
Executable file
@@ -0,0 +1,456 @@
|
||||
#!/bin/bash
|
||||
|
||||
#umask 002
|
||||
export HOME=/data
|
||||
|
||||
if [ ! -e /data/eula.txt ]; then
|
||||
if [ "$EULA" != "" ]; then
|
||||
echo "# Generated via Docker on $(date)" > eula.txt
|
||||
echo "eula=$EULA" >> eula.txt
|
||||
else
|
||||
echo ""
|
||||
echo "Please accept the Minecraft EULA at"
|
||||
echo " https://account.mojang.com/documents/minecraft_eula"
|
||||
echo "by adding the following immediately after 'docker run':"
|
||||
echo " -e EULA=TRUE"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
VERSIONS_JSON=https://launchermeta.mojang.com/mc/game/version_manifest.json
|
||||
|
||||
echo "Checking version information."
|
||||
case "X$VERSION" in
|
||||
X|XLATEST|Xlatest)
|
||||
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.release'`
|
||||
;;
|
||||
XSNAPSHOT|Xsnapshot)
|
||||
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.snapshot'`
|
||||
;;
|
||||
X[1-9]*)
|
||||
VANILLA_VERSION=$VERSION
|
||||
;;
|
||||
*)
|
||||
VANILLA_VERSION=`curl -sSL $VERSIONS_JSON | jq -r '.latest.release'`
|
||||
;;
|
||||
esac
|
||||
|
||||
cd /data
|
||||
|
||||
function buildSpigotFromSource {
|
||||
echo "Building Spigot $VANILLA_VERSION from source, might take a while, get some coffee"
|
||||
mkdir /data/temp
|
||||
cd /data/temp
|
||||
wget -q -P /data/temp https://hub.spigotmc.org/jenkins/job/BuildTools/lastSuccessfulBuild/artifact/target/BuildTools.jar && \
|
||||
java -jar /data/temp/BuildTools.jar --rev $VANILLA_VERSION 2>&1 |tee /data/spigot_build.log| while read l; do echo -n .; done; echo "done"
|
||||
mv spigot-*.jar /data/spigot_server.jar
|
||||
mv craftbukkit-*.jar /data/craftbukkit_server.jar
|
||||
echo "Cleaning up"
|
||||
rm -rf /data/temp
|
||||
cd /data
|
||||
}
|
||||
|
||||
function downloadSpigot {
|
||||
local match
|
||||
case "$TYPE" in
|
||||
*BUKKIT|*bukkit)
|
||||
match="Craftbukkit"
|
||||
|
||||
;;
|
||||
*)
|
||||
match="Spigot"
|
||||
;;
|
||||
esac
|
||||
|
||||
downloadUrl=$(restify --class=jar-div https://mcadmin.net/ | \
|
||||
jq --arg version "$match $VANILLA_VERSION" -r -f /usr/share/mcadmin.jq)
|
||||
if [[ -n $downloadUrl ]]; then
|
||||
echo "Downloading $match"
|
||||
wget -q -O $SERVER "$downloadUrl"
|
||||
status=$?
|
||||
if [ $status != 0 ]; then
|
||||
echo "ERROR: failed to download from $downloadUrl due to (error code was $status)"
|
||||
exit 3
|
||||
fi
|
||||
else
|
||||
echo "ERROR: Version $VANILLA_VERSION is not supported for $TYPE"
|
||||
echo " Refer to https://mcadmin.net/ for supported versions"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
function downloadPaper {
|
||||
local build
|
||||
case "$VERSION" in
|
||||
latest|LATEST|1.10)
|
||||
build="lastSuccessfulBuild";;
|
||||
1.9.4)
|
||||
build="773";;
|
||||
1.9.2)
|
||||
build="727";;
|
||||
1.9)
|
||||
build="612";;
|
||||
1.8.8)
|
||||
build="443";;
|
||||
*)
|
||||
build="nosupp";;
|
||||
esac
|
||||
|
||||
if [ $build != "nosupp" ]; then
|
||||
downloadUrl="https://ci.destroystokyo.com/job/PaperSpigot/$build/artifact/paperclip.jar"
|
||||
wget -q -O $SERVER "$downloadUrl"
|
||||
status=$?
|
||||
if [ $status != 0 ]; then
|
||||
echo "ERROR: failed to download from $downloadUrl due to (error code was $status)"
|
||||
exit 3
|
||||
fi
|
||||
else
|
||||
echo "ERROR: Version $VERSION is not supported for $TYPE"
|
||||
echo " Refer to https://ci.destroystokyo.com/job/PaperSpigot/"
|
||||
echo " for supported versions"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
function installForge {
|
||||
TYPE=FORGE
|
||||
norm=$VANILLA_VERSION
|
||||
|
||||
echo "Checking Forge version information."
|
||||
case $FORGEVERSION in
|
||||
RECOMMENDED)
|
||||
curl -o /tmp/forge.json -sSL http://files.minecraftforge.net/maven/net/minecraftforge/forge/promotions_slim.json
|
||||
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$norm-recommended\"]")
|
||||
if [ $FORGE_VERSION = null ]; then
|
||||
FORGE_VERSION=$(cat /tmp/forge.json | jq -r ".promos[\"$norm-latest\"]")
|
||||
if [ $FORGE_VERSION = null ]; then
|
||||
echo "ERROR: Version $FORGE_VERSION is not supported by Forge"
|
||||
echo " Refer to http://files.minecraftforge.net/ for supported versions"
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
|
||||
*)
|
||||
FORGE_VERSION=$FORGEVERSION
|
||||
;;
|
||||
esac
|
||||
|
||||
# URL format changed for 1.7.10 from 10.13.2.1300
|
||||
sorted=$((echo $FORGE_VERSION; echo 10.13.2.1300) | sort -V | head -1)
|
||||
if [[ $norm == '1.7.10' && $sorted == '10.13.2.1300' ]]; then
|
||||
# if $FORGEVERSION >= 10.13.2.1300
|
||||
normForgeVersion="$norm-$FORGE_VERSION-$norm"
|
||||
else
|
||||
normForgeVersion="$norm-$FORGE_VERSION"
|
||||
fi
|
||||
|
||||
FORGE_INSTALLER="forge-$normForgeVersion-installer.jar"
|
||||
SERVER="forge-$normForgeVersion-universal.jar"
|
||||
|
||||
if [ ! -e "$SERVER" ]; then
|
||||
echo "Downloading $FORGE_INSTALLER ..."
|
||||
wget -q http://files.minecraftforge.net/maven/net/minecraftforge/forge/$normForgeVersion/$FORGE_INSTALLER
|
||||
echo "Installing $SERVER"
|
||||
java -jar $FORGE_INSTALLER --installServer
|
||||
fi
|
||||
}
|
||||
|
||||
function installVanilla {
|
||||
SERVER="minecraft_server.$VANILLA_VERSION.jar"
|
||||
|
||||
if [ ! -e $SERVER ]; then
|
||||
echo "Downloading $SERVER ..."
|
||||
wget -q https://s3.amazonaws.com/Minecraft.Download/versions/$VANILLA_VERSION/$SERVER
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Checking type information."
|
||||
case "$TYPE" in
|
||||
*BUKKIT|*bukkit|SPIGOT|spigot)
|
||||
case "$TYPE" in
|
||||
*BUKKIT|*bukkit)
|
||||
SERVER=craftbukkit_server.jar
|
||||
;;
|
||||
*)
|
||||
SERVER=spigot_server.jar
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ ! -f $SERVER ]; then
|
||||
if [[ "$BUILD_SPIGOT_FROM_SOURCE" = TRUE || "$BUILD_SPIGOT_FROM_SOURCE" = true || "$BUILD_FROM_SOURCE" = TRUE || "$BUILD_FROM_SOURCE" = true ]]; then
|
||||
buildSpigotFromSource
|
||||
else
|
||||
downloadSpigot
|
||||
fi
|
||||
fi
|
||||
# normalize on Spigot for operations below
|
||||
TYPE=SPIGOT
|
||||
;;
|
||||
|
||||
PAPER|paper)
|
||||
SERVER=paper_server.jar
|
||||
if [ ! -f $SERVER ]; then
|
||||
downloadPaper
|
||||
fi
|
||||
# normalize on Spigot for operations below
|
||||
TYPE=SPIGOT
|
||||
;;
|
||||
|
||||
FORGE|forge)
|
||||
TYPE=FORGE
|
||||
installForge
|
||||
;;
|
||||
|
||||
VANILLA|vanilla)
|
||||
installVanilla
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid type: '$TYPE'"
|
||||
echo "Must be: VANILLA, FORGE, SPIGOT"
|
||||
exit 1
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
|
||||
# If supplied with a URL for a world, download it and unpack
|
||||
if [[ "$WORLD" ]]; then
|
||||
case "X$WORLD" in
|
||||
X[Hh][Tt][Tt][Pp]*)
|
||||
echo "Downloading world via HTTP"
|
||||
echo "$WORLD"
|
||||
wget -q -O - "$WORLD" > /data/world.zip
|
||||
echo "Unzipping word"
|
||||
unzip -q /data/world.zip
|
||||
rm -f /data/world.zip
|
||||
if [ ! -d /data/world ]; then
|
||||
echo World directory not found
|
||||
for i in /data/*/level.dat; do
|
||||
if [ -f "$i" ]; then
|
||||
d=`dirname "$i"`
|
||||
echo Renaming world directory from $d
|
||||
mv -f "$d" /data/world
|
||||
fi
|
||||
done
|
||||
fi
|
||||
if [ "$TYPE" = "SPIGOT" ]; then
|
||||
# Reorganise if a Spigot server
|
||||
echo "Moving End and Nether maps to Spigot location"
|
||||
[ -d "/data/world/DIM1" ] && mv -f "/data/world/DIM1" "/data/world_the_end"
|
||||
[ -d "/data/world/DIM-1" ] && mv -f "/data/world/DIM-1" "/data/world_nether"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Invalid URL given for world: Must be HTTP or HTTPS and a ZIP file"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# If supplied with a URL for a modpack (simple zip of jars), download it and unpack
|
||||
if [[ "$MODPACK" ]]; then
|
||||
case "X$MODPACK" in
|
||||
X[Hh][Tt][Tt][Pp]*[Zz][iI][pP])
|
||||
echo "Downloading mod/plugin pack via HTTP"
|
||||
echo "$MODPACK"
|
||||
wget -q -O /tmp/modpack.zip "$MODPACK"
|
||||
if [ "$TYPE" = "SPIGOT" ]; then
|
||||
mkdir -p /data/plugins
|
||||
unzip -o -d /data/plugins /tmp/modpack.zip
|
||||
else
|
||||
mkdir -p /data/mods
|
||||
unzip -o -d /data/mods /tmp/modpack.zip
|
||||
fi
|
||||
rm -f /tmp/modpack.zip
|
||||
;;
|
||||
*)
|
||||
echo "Invalid URL given for modpack: Must be HTTP or HTTPS and a ZIP file"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
function setServerProp {
|
||||
local prop=$1
|
||||
local var=$2
|
||||
if [ -n "$var" ]; then
|
||||
echo "Setting $prop to $var"
|
||||
sed -i "/$prop\s*=/ c $prop=$var" /data/server.properties
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
if [ ! -e server.properties ]; then
|
||||
echo "Creating server.properties"
|
||||
cp /tmp/server.properties .
|
||||
|
||||
if [ -n "$WHITELIST" ]; then
|
||||
echo "Creating whitelist"
|
||||
sed -i "/whitelist\s*=/ c whitelist=true" /data/server.properties
|
||||
sed -i "/white-list\s*=/ c white-list=true" /data/server.properties
|
||||
fi
|
||||
|
||||
setServerProp "motd" "$MOTD"
|
||||
setServerProp "allow-nether" "$ALLOW_NETHER"
|
||||
setServerProp "announce-player-achievements" "$ANNOUNCE_PLAYER_ACHIEVEMENTS"
|
||||
setServerProp "enable-command-block" "$ENABLE_COMMAND_BLOCK"
|
||||
setServerProp "spawn-animals" "$SPAWN_ANIMAILS"
|
||||
setServerProp "spawn-monsters" "$SPAWN_MONSTERS"
|
||||
setServerProp "spawn-npcs" "$SPAWN_NPCS"
|
||||
setServerProp "generate-structures" "$GENERATE_STRUCTURES"
|
||||
setServerProp "spawn-npcs" "$SPAWN_NPCS"
|
||||
setServerProp "view-distance" "$VIEW_DISTANCE"
|
||||
setServerProp "hardcore" "$HARDCORE"
|
||||
setServerProp "max-build-height" "$MAX_BUILD_HEIGHT"
|
||||
setServerProp "force-gamemode" "$FORCE_GAMEMODE"
|
||||
setServerProp "hardmax-tick-timecore" "$MAX_TICK_TIME"
|
||||
setServerProp "enable-query" "$ENABLE_QUERY"
|
||||
setServerProp "query.port" "$QUERY_PORT"
|
||||
setServerProp "enable-rcon" "$ENABLE_RCON"
|
||||
setServerProp "rcon.password" "$RCON_PASSWORD"
|
||||
setServerProp "rcon.port" "$RCON_PORT"
|
||||
setServerProp "max-players" "$MAX_PLAYERS"
|
||||
setServerProp "max-world-size" "$MAX_WORLD_SIZE"
|
||||
setServerProp "level-name" "$LEVEL"
|
||||
setServerProp "level-seed" "$SEED"
|
||||
setServerProp "pvp" "$PVP"
|
||||
setServerProp "generator-settings" "$GENERATOR_SETTINGS"
|
||||
setServerProp "online-mode" "$ONLINE_MODE"
|
||||
|
||||
if [ -n "$LEVEL_TYPE" ]; then
|
||||
# normalize to uppercase
|
||||
LEVEL_TYPE=${LEVEL_TYPE^^}
|
||||
echo "Setting level type to $LEVEL_TYPE"
|
||||
# check for valid values and only then set
|
||||
case $LEVEL_TYPE in
|
||||
DEFAULT|FLAT|LARGEBIOMES|AMPLIFIED|CUSTOMIZED)
|
||||
sed -i "/level-type\s*=/ c level-type=$LEVEL_TYPE" /data/server.properties
|
||||
;;
|
||||
*)
|
||||
echo "Invalid LEVEL_TYPE: $LEVEL_TYPE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ -n "$DIFFICULTY" ]; then
|
||||
case $DIFFICULTY in
|
||||
peaceful|0)
|
||||
DIFFICULTY=0
|
||||
;;
|
||||
easy|1)
|
||||
DIFFICULTY=1
|
||||
;;
|
||||
normal|2)
|
||||
DIFFICULTY=2
|
||||
;;
|
||||
hard|3)
|
||||
DIFFICULTY=3
|
||||
;;
|
||||
*)
|
||||
echo "DIFFICULTY must be peaceful, easy, normal, or hard."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo "Setting difficulty to $DIFFICULTY"
|
||||
sed -i "/difficulty\s*=/ c difficulty=$DIFFICULTY" /data/server.properties
|
||||
fi
|
||||
|
||||
if [ -n "$MODE" ]; then
|
||||
echo "Setting mode"
|
||||
case ${MODE,,?} in
|
||||
0|1|2|3)
|
||||
;;
|
||||
su*)
|
||||
MODE=0
|
||||
;;
|
||||
c*)
|
||||
MODE=1
|
||||
;;
|
||||
a*)
|
||||
MODE=2
|
||||
;;
|
||||
sp*)
|
||||
MODE=3
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Invalid game mode: $MODE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
sed -i "/gamemode\s*=/ c gamemode=$MODE" /data/server.properties
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
if [ -n "$OPS" -a ! -e ops.txt.converted ]; then
|
||||
echo "Setting ops"
|
||||
echo $OPS | awk -v RS=, '{print}' >> ops.txt
|
||||
fi
|
||||
|
||||
if [ -n "$WHITELIST" -a ! -e white-list.txt.converted ]; then
|
||||
echo "Setting whitelist"
|
||||
echo $WHITELIST | awk -v RS=, '{print}' >> white-list.txt
|
||||
fi
|
||||
|
||||
if [ -n "$ICON" -a ! -e server-icon.png ]; then
|
||||
echo "Using server icon from $ICON..."
|
||||
# Not sure what it is yet...call it "img"
|
||||
wget -q -O /tmp/icon.img $ICON
|
||||
specs=$(identify /tmp/icon.img | awk '{print $2,$3}')
|
||||
if [ "$specs" = "PNG 64x64" ]; then
|
||||
mv /tmp/icon.img /data/server-icon.png
|
||||
else
|
||||
echo "Converting image to 64x64 PNG..."
|
||||
convert /tmp/icon.img -resize 64x64! /data/server-icon.png
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make sure files exist to avoid errors
|
||||
if [ ! -e banned-players.json ]; then
|
||||
echo '' > banned-players.json
|
||||
fi
|
||||
if [ ! -e banned-ips.json ]; then
|
||||
echo '' > banned-ips.json
|
||||
fi
|
||||
|
||||
# If any modules have been provided, copy them over
|
||||
[ -d /data/mods ] || mkdir /data/mods
|
||||
for m in /mods/*.jar
|
||||
do
|
||||
if [ -f "$m" ]; then
|
||||
echo Copying mod `basename "$m"`
|
||||
cp -f "$m" /data/mods
|
||||
fi
|
||||
done
|
||||
[ -d /data/config ] || mkdir /data/config
|
||||
for c in /config/*
|
||||
do
|
||||
if [ -f "$c" ]; then
|
||||
echo Copying configuration `basename "$c"`
|
||||
cp -rf "$c" /data/config
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$TYPE" = "SPIGOT" ]; then
|
||||
if [ -d /plugins ]; then
|
||||
echo Copying any Bukkit plugins over
|
||||
cp -r /plugins /data
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $CONSOLE = false ]]; then
|
||||
EXTRA_ARGS=--noconsole
|
||||
else
|
||||
EXTRA_ARGS=""
|
||||
fi
|
||||
|
||||
# If we have a bootstrap.txt file... feed that in to the server stdin
|
||||
if [ -f /data/bootstrap.txt ];
|
||||
then
|
||||
exec java $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS < /data/bootstrap.txt
|
||||
else
|
||||
exec java $JVM_OPTS -jar $SERVER "$@" $EXTRA_ARGS
|
||||
fi
|
||||
22
minecraft-server/start.sh
Executable file
22
minecraft-server/start.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
usermod --uid $UID minecraft
|
||||
groupmod --gid $GID minecraft
|
||||
|
||||
if [ "$SKIP_OWNERSHIP_FIX" != "TRUE" ]; then
|
||||
fix_ownership() {
|
||||
dir=$1
|
||||
if ! sudo -u minecraft test -w $dir; then
|
||||
echo "Correcting writability of $dir ..."
|
||||
chown -R minecraft:minecraft $dir
|
||||
chmod -R u+w $dir
|
||||
fi
|
||||
}
|
||||
|
||||
fix_ownership /data
|
||||
fix_ownership /home/minecraft
|
||||
fi
|
||||
|
||||
echo "Switching to user 'minecraft'"
|
||||
exec sudo -E -u minecraft /start-minecraft "$@"
|
||||
16
titan-gremlin/Dockerfile
Executable file
16
titan-gremlin/Dockerfile
Executable file
@@ -0,0 +1,16 @@
|
||||
FROM itzg/ubuntu-openjdk-7
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV TITAN_VERSION 0.5.4
|
||||
|
||||
RUN wget -q -O /tmp/titan.zip http://s3.thinkaurelius.com/downloads/titan/titan-$TITAN_VERSION-hadoop2.zip
|
||||
RUN unzip -q /tmp/titan.zip -d /opt && rm /tmp/titan.zip
|
||||
|
||||
ENV TITAN_HOME /opt/titan-$TITAN_VERSION-hadoop2
|
||||
WORKDIR $TITAN_HOME
|
||||
|
||||
VOLUME ["/conf","/data"]
|
||||
ADD start-gremlin.sh /opt/start-gremlin.sh
|
||||
|
||||
CMD ["/opt/start-gremlin.sh"]
|
||||
63
titan-gremlin/README.md
Normal file
63
titan-gremlin/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
Runs the Gremlin console from the Titan Graph Database's "all" distribution.
|
||||
|
||||
# Basic Usage
|
||||
|
||||
To start the Gremlin console with the default configuration files available:
|
||||
|
||||
docker run -it itzg/titan-gremlin
|
||||
|
||||
In order to adjust or further define property files to use within Gremlin,
|
||||
attach a host directory to the container's `/conf` such as
|
||||
|
||||
docker run -it -v $(pwd)/conf:/conf itzg/titan-gremlin
|
||||
|
||||
After running once your host directory will be populated with the distribution-default
|
||||
configuration files. Modify those or add to them and they will be available during
|
||||
the next time you (re)start your container.
|
||||
|
||||
# Connecting to Cassandra and Elasticsearch Containers
|
||||
|
||||
First start containers for Cassandra and Elasticsearch (pre-2.x),
|
||||
where the `--name` you choose can be arbitrary or left off to use a generated name.
|
||||
|
||||
docker run -d --name gremlin-cass itzg/cassandra
|
||||
docker run -d --name gremlin-es itzg/elasticsearch:1.x
|
||||
|
||||
Now start Gremlin with networking links to those containers with the aliases
|
||||
|
||||
* `--link <container>:cass`
|
||||
* `--link <container>:es`
|
||||
|
||||
such as
|
||||
|
||||
docker run -it --rm --link gremlin-cass:cass --link gremlin-es:es itzg/titan-gremlin
|
||||
|
||||
and with that you can follow the
|
||||
[Graph of the Gods example](http://s3.thinkaurelius.com/docs/titan/current/getting-started.html), such as
|
||||
|
||||
gremlin> GraphOfTheGodsFactory.load(g)
|
||||
gremlin> saturn = g.V.has('name','saturn').next()
|
||||
==>v[256]
|
||||
gremlin> saturn.map()
|
||||
==>name=saturn
|
||||
==>age=10000
|
||||
gremlin> saturn.in('father').in('father').name
|
||||
==>hercules
|
||||
|
||||
# Running and Connecting with Docker Compose
|
||||
|
||||
Create the following Compose content as the file `docker-compose.yml` and in
|
||||
that directory invoke `docker-compose run titan` to run the Gremlin shell
|
||||
with the supporting Elasticsearch and Cassandra containers.
|
||||
|
||||
```
|
||||
titan:
|
||||
image: itzg/titan-gremlin
|
||||
links:
|
||||
- cass
|
||||
- es
|
||||
cass:
|
||||
image: itzg/cassandra
|
||||
es:
|
||||
image: itzg/elasticsearch:1.x
|
||||
```
|
||||
64
titan-gremlin/start-gremlin.sh
Executable file
64
titan-gremlin/start-gremlin.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
args=
|
||||
|
||||
if [ $(ls /conf|wc -l) = 0 ]; then
|
||||
cp -r $TITAN_HOME/conf/* /conf
|
||||
fi
|
||||
|
||||
rm -f /tmp/titan.properties
|
||||
|
||||
if [ -n "$CASS_PORT_9160_TCP_ADDR" ]; then
|
||||
|
||||
shortcut=/tmp/titan.properties
|
||||
cat >> /tmp/titan.properties <<END
|
||||
storage.backend=cassandra
|
||||
storage.hostname=$CASS_PORT_9160_TCP_ADDR
|
||||
END
|
||||
|
||||
elif [ -n "$CASS_ADDR" ]; then
|
||||
|
||||
shortcut=/tmp/titan.properties
|
||||
cat >> /tmp/titan.properties <<END
|
||||
storage.backend=cassandra
|
||||
storage.hostname=$CASS_ADDR
|
||||
END
|
||||
|
||||
fi
|
||||
|
||||
|
||||
esAddr=${ES_ENV_PUBLISH_AS:-${ES_PORT_9300_TCP_ADDR}}
|
||||
|
||||
if [ -n "$ES_CLUSTER" -o -n "$esAddr" ]; then
|
||||
shortcut=/tmp/titan.properties
|
||||
cat >> /tmp/titan.properties <<END
|
||||
index.search.backend=elasticsearch
|
||||
index.search.elasticsearch.client-only=true
|
||||
END
|
||||
|
||||
if [ -n "$ES_CLUSTER" ]; then
|
||||
cat >> /tmp/titan.properties <<END
|
||||
index.search.elasticsearch.ext.cluster.name=$ES_CLUSTER
|
||||
END
|
||||
fi
|
||||
if [ -n "$esAddr" ]; then
|
||||
# strip off the port spec, if present
|
||||
esAddr=$(echo $esAddr | cut -d: -f1)
|
||||
cat >> /tmp/titan.properties <<END
|
||||
index.search.hostname=$esAddr
|
||||
END
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
if [ -n "$shortcut" ]; then
|
||||
cat > /tmp/init.groovy <<END
|
||||
g = TitanFactory.open('$shortcut')
|
||||
println 'The graph \'g\' was opened using $shortcut'
|
||||
END
|
||||
args="$args /tmp/init.groovy"
|
||||
fi
|
||||
|
||||
# Allow a little extra time for Cassandra to be ready
|
||||
sleep 1
|
||||
exec $TITAN_HOME/bin/gremlin.sh $args
|
||||
54
titandb/Dockerfile
Normal file
54
titandb/Dockerfile
Normal file
@@ -0,0 +1,54 @@
|
||||
FROM itzg/ubuntu-openjdk-7
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV APT_GET_UPDATE 2014-07-19
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y wget unzip
|
||||
|
||||
ENV TITAN_VER 0.4.4
|
||||
ENV TITAN_STORAGE all
|
||||
ENV REXSTER_VER 2.4.0
|
||||
|
||||
RUN wget -O /tmp/titan.zip http://s3.thinkaurelius.com/downloads/titan/titan-$TITAN_STORAGE-$TITAN_VER.zip
|
||||
RUN wget -O /tmp/rexster.zip http://tinkerpop.com/downloads/rexster/rexster-server-$REXSTER_VER.zip
|
||||
|
||||
WORKDIR /opt
|
||||
|
||||
RUN unzip /tmp/titan.zip
|
||||
RUN rm /tmp/titan.zip
|
||||
|
||||
RUN unzip /tmp/rexster.zip
|
||||
RUN rm /tmp/rexster.zip
|
||||
|
||||
RUN ln -s titan-$TITAN_STORAGE-$TITAN_VER titan
|
||||
RUN ln -s rexster-server-$REXSTER_VER rexster-server
|
||||
|
||||
RUN mkdir /opt/rexster-server/ext/titan
|
||||
RUN ln -s /opt/titan-$TITAN_STORAGE-$TITAN_VER/lib/* /opt/rexster-server/ext/titan
|
||||
RUN wget -O /opt/rexster-server/ext/titan/titan-rexter-$TITAN_VER.jar \
|
||||
http://central.maven.org/maven2/com/thinkaurelius/titan/titan-rexster/$TITAN_VER/titan-rexster-$TITAN_VER.jar
|
||||
|
||||
# Clean up distro area
|
||||
RUN rm /opt/rexster-server/ext/titan/log4j* /opt/rexster-server/ext/titan/slf4j*
|
||||
RUN rm /opt/rexster-server/lib/lucene-core*
|
||||
|
||||
ADD gremlin /usr/local/bin/gremlin
|
||||
ADD rexster /usr/local/bin/rexster
|
||||
|
||||
WORKDIR titan
|
||||
|
||||
ADD titan.properties /opt/titan/conf/titan.properties
|
||||
ADD init-graph-storage.groovy /tmp/init-graph-storage.groovy
|
||||
|
||||
VOLUME ["/data", "/config", "/scripts"]
|
||||
RUN ln -s /data /opt/titan/db
|
||||
|
||||
RUN gremlin -e /tmp/init-graph-storage.groovy && rm /tmp/init-graph-storage.groovy
|
||||
|
||||
EXPOSE 8182 8184
|
||||
|
||||
ADD rexster.xml /config/rexster.xml
|
||||
|
||||
CMD ["/usr/local/bin/rexster", "-s", "-c", "/config/rexster.xml"]
|
||||
3
titandb/gremlin
Executable file
3
titandb/gremlin
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
/opt/titan/bin/gremlin.sh $*
|
||||
12
titandb/init-graph-storage.groovy
Normal file
12
titandb/init-graph-storage.groovy
Normal file
@@ -0,0 +1,12 @@
|
||||
import com.thinkaurelius.titan.core.TitanFactory
|
||||
import com.thinkaurelius.titan.core.TitanGraph
|
||||
import com.thinkaurelius.titan.graphdb.configuration.GraphDatabaseConfiguration
|
||||
import org.apache.commons.configuration.BaseConfiguration
|
||||
import org.apache.commons.configuration.Configuration
|
||||
|
||||
// Setup a blank one
|
||||
|
||||
TitanGraph g = TitanFactory.open('conf/titan.properties')
|
||||
g.shutdown()
|
||||
|
||||
|
||||
14
titandb/rexster
Executable file
14
titandb/rexster
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ -n "$PUBLISH_ADDR" ]; then
|
||||
echo "Enabling Doghouse access at http://$PUBLISH_ADDR:8182/doghouse"
|
||||
sed -i "s#<base-uri>.*</base-uri>#<base-uri>http://$PUBLISH_ADDR</base-uri>#" /config/rexster.xml
|
||||
fi
|
||||
|
||||
if [ -n "$SCRIPT" ]; then
|
||||
echo "Running Gremlin script $SCRIPT from /scripts..."
|
||||
gremlin -e /scripts/$SCRIPT
|
||||
fi
|
||||
|
||||
echo "Starting Rexster..."
|
||||
/opt/rexster-server/bin/rexster.sh $*
|
||||
28
titandb/rexster.xml
Normal file
28
titandb/rexster.xml
Normal file
@@ -0,0 +1,28 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rexster>
|
||||
<http>
|
||||
<base-uri>http://localhost</base-uri>
|
||||
</http>
|
||||
<graphs>
|
||||
<graph>
|
||||
<graph-name>titan</graph-name>
|
||||
<graph-type>com.thinkaurelius.titan.tinkerpop.rexster.TitanGraphConfiguration</graph-type>
|
||||
<graph-location>/data/persistit</graph-location>
|
||||
<graph-read-only>false</graph-read-only>
|
||||
<properties>
|
||||
<storage.backend>persistit</storage.backend>
|
||||
<storage.directory>/data/persistit</storage.directory>
|
||||
<storage.buffercount>5000</storage.buffercount>
|
||||
<storage.index.search.backend>elasticsearch</storage.index.search.backend>
|
||||
<storage.index.search.local-mode>true</storage.index.search.local-mode>
|
||||
<storage.index.search.client-only>false</storage.index.search.client-only>
|
||||
<storage.index.search.directory>/data/es</storage.index.search.directory>
|
||||
</properties>
|
||||
<extensions>
|
||||
<allows>
|
||||
<allow>tp:gremlin</allow>
|
||||
</allows>
|
||||
</extensions>
|
||||
</graph>
|
||||
</graphs>
|
||||
</rexster>
|
||||
7
titandb/scripts/GraphOfTheGods.groovy
Normal file
7
titandb/scripts/GraphOfTheGods.groovy
Normal file
@@ -0,0 +1,7 @@
|
||||
import com.thinkaurelius.titan.core.TitanFactory
|
||||
import com.thinkaurelius.titan.example.GraphOfTheGodsFactory
|
||||
|
||||
def g = TitanFactory.open('conf/titan.properties')
|
||||
GraphOfTheGodsFactory.load(g)
|
||||
|
||||
g.shutdown()
|
||||
8
titandb/titan.properties
Normal file
8
titandb/titan.properties
Normal file
@@ -0,0 +1,8 @@
|
||||
storage.backend=persistit
|
||||
storage.directory=/data/persistit
|
||||
storage.buffercount=5000
|
||||
|
||||
storage.index.search.backend=elasticsearch
|
||||
storage.index.search.local-mode=true
|
||||
storage.index.search.client-only=false
|
||||
storage.index.search.directory=/data/es
|
||||
7
ubuntu-openjdk-7/Dockerfile
Normal file → Executable file
7
ubuntu-openjdk-7/Dockerfile
Normal file → Executable file
@@ -2,9 +2,10 @@ FROM ubuntu:trusty
|
||||
|
||||
MAINTAINER itzg
|
||||
|
||||
ENV BUILT_ON 20140517
|
||||
|
||||
ENV APT_GET_UPDATE 2015-10-29
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y openjdk-7-jre-headless
|
||||
RUN DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get -q -y install openjdk-7-jre-headless wget unzip \
|
||||
&& apt-get clean
|
||||
|
||||
ENV JAVA_HOME /usr/lib/jvm/java-7-openjdk-amd64
|
||||
|
||||
Reference in New Issue
Block a user