mirror of
https://github.com/mailcow/mailcow-dockerized.git
synced 2026-02-17 22:56:23 +00:00
Compare commits
31 Commits
feat/php8.
...
feat/pytho
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d211ea767 | ||
|
|
4cc463a728 | ||
|
|
909eb8a63a | ||
|
|
4c4a440cdf | ||
|
|
743bfcec60 | ||
|
|
744aa5d137 | ||
|
|
e8d155d7e0 | ||
|
|
6382e3128d | ||
|
|
bc11aed753 | ||
|
|
a6e13daa8c | ||
|
|
eb7d2628ac | ||
|
|
5f93ff04a9 | ||
|
|
f35def48cb | ||
|
|
dba9675a9b | ||
|
|
13b4f86d29 | ||
|
|
3eb17a5f78 | ||
|
|
c38a4c203e | ||
|
|
f329549c2e | ||
|
|
767d746419 | ||
|
|
9174a05af3 | ||
|
|
faf8fa8c2c | ||
|
|
55d90afee4 | ||
|
|
669f75182d | ||
|
|
5a39ae45cb | ||
|
|
8baa3c9fb5 | ||
|
|
b8888521f1 | ||
|
|
2efea9c832 | ||
|
|
5a097ed5f7 | ||
|
|
cde2ba4851 | ||
|
|
1d482ed425 | ||
|
|
d3185c3c68 |
69
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
69
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
@@ -11,35 +11,22 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Checklist prior issue creation
|
label: I've found a bug and checked that ...
|
||||||
description: Prior to creating the issue...
|
description: Prior to placing the issue, please check following:** *(fill out each checkbox with an `X` once done)*
|
||||||
options:
|
options:
|
||||||
- label: I understand that failure to follow below instructions may cause this issue to be closed.
|
- label: ... I understand that not following the below instructions will result in immediate closure and/or deletion of my issue.
|
||||||
required: true
|
required: true
|
||||||
- label: I understand that vague, incomplete or inaccurate information may cause this issue to be closed.
|
- label: ... I have understood that this bug report is dedicated for bugs, and not for support-related inquiries.
|
||||||
required: true
|
required: true
|
||||||
- label: I understand that this form is intended solely for reporting software bugs and not for support-related inquiries.
|
- label: ... I have understood that answers are voluntary and community-driven, and not commercial support.
|
||||||
required: true
|
required: true
|
||||||
- label: I understand that all responses are voluntary and community-driven, and do not constitute commercial support.
|
- label: ... I have verified that my issue has not been already answered in the past. I also checked previous [issues](https://github.com/mailcow/mailcow-dockerized/issues).
|
||||||
required: true
|
|
||||||
- label: I confirm that I have reviewed previous [issues](https://github.com/mailcow/mailcow-dockerized/issues) to ensure this matter has not already been addressed.
|
|
||||||
required: true
|
|
||||||
- label: I confirm that my environment meets all [prerequisite requirements](https://docs.mailcow.email/getstarted/prerequisite-system/) as specified in the official documentation.
|
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Description
|
label: Description
|
||||||
description: Please provide a brief description of the bug. If applicable, add screenshots to help explain your problem. (Very useful for bugs in mailcow UI.)
|
description: Please provide a brief description of the bug in 1-2 sentences. If applicable, add screenshots to help explain your problem. Very useful for bugs in mailcow UI.
|
||||||
validations:
|
render: plain text
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: "Steps to reproduce:"
|
|
||||||
description: "Please describe the steps to reproduce the bug. Screenshots can be added, if helpful."
|
|
||||||
placeholder: |-
|
|
||||||
1. ...
|
|
||||||
2. ...
|
|
||||||
3. ...
|
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
@@ -49,36 +36,45 @@ body:
|
|||||||
render: plain text
|
render: plain text
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: "Steps to reproduce:"
|
||||||
|
description: "Please describe the steps to reproduce the bug. Screenshots can be added, if helpful."
|
||||||
|
render: plain text
|
||||||
|
placeholder: |-
|
||||||
|
1. ...
|
||||||
|
2. ...
|
||||||
|
3. ...
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
## System information
|
## System information
|
||||||
In this stage we would kindly ask you to attach general system information about your setup.
|
### In this stage we would kindly ask you to attach general system information about your setup.
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: "Which branch are you using?"
|
label: "Which branch are you using?"
|
||||||
description: "#### Run: `git rev-parse --abbrev-ref HEAD`"
|
description: "#### `git rev-parse --abbrev-ref HEAD`"
|
||||||
multiple: false
|
multiple: false
|
||||||
options:
|
options:
|
||||||
- master (stable)
|
- master
|
||||||
- staging
|
|
||||||
- nightly
|
- nightly
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: "Which architecture are you using?"
|
label: "Which architecture are you using?"
|
||||||
description: "#### Run: `uname -m`"
|
description: "#### `uname -m`"
|
||||||
multiple: false
|
multiple: false
|
||||||
options:
|
options:
|
||||||
- x86_64
|
- x86
|
||||||
- ARM64 (aarch64)
|
- ARM64 (aarch64)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: "Operating System:"
|
label: "Operating System:"
|
||||||
description: "#### Run: `lsb_release -ds`"
|
|
||||||
placeholder: "e.g. Ubuntu 22.04 LTS"
|
placeholder: "e.g. Ubuntu 22.04 LTS"
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@@ -97,44 +93,43 @@ body:
|
|||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: "Virtualization technology:"
|
label: "Virtualization technology:"
|
||||||
description: "LXC and OpenVZ are not supported!"
|
placeholder: "KVM, VMware, Xen, etc - **LXC and OpenVZ are not supported**"
|
||||||
placeholder: "KVM, VMware ESXi, Xen, etc"
|
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: "Docker version:"
|
label: "Docker version:"
|
||||||
description: "#### Run: `docker version`"
|
description: "#### `docker version`"
|
||||||
placeholder: "20.10.21"
|
placeholder: "20.10.21"
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: "docker-compose version or docker compose version:"
|
label: "docker-compose version or docker compose version:"
|
||||||
description: "#### Run: `docker-compose version` or `docker compose version`"
|
description: "#### `docker-compose version` or `docker compose version`"
|
||||||
placeholder: "v2.12.2"
|
placeholder: "v2.12.2"
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: "mailcow version:"
|
label: "mailcow version:"
|
||||||
description: "#### Run: ```git describe --tags `git rev-list --tags --max-count=1` ```"
|
description: "#### ```git describe --tags `git rev-list --tags --max-count=1` ```"
|
||||||
placeholder: "2022-08x"
|
placeholder: "2022-08"
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: "Reverse proxy:"
|
label: "Reverse proxy:"
|
||||||
placeholder: "e.g. nginx/Traefik, or none"
|
placeholder: "e.g. Nginx/Traefik"
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: "Logs of git diff:"
|
label: "Logs of git diff:"
|
||||||
description: "#### Output of `git diff origin/master`, any other changes to the code? Sanitize if needed. If so, **please post them**:"
|
description: "#### Output of `git diff origin/master`, any other changes to the code? If so, **please post them**:"
|
||||||
render: plain text
|
render: plain text
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: "Logs of iptables -L -vn:"
|
label: "Logs of iptables -L -vn:"
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ jobs:
|
|||||||
pull-requests: write
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
- name: Mark/Close Stale Issues and Pull Requests 🗑️
|
- name: Mark/Close Stale Issues and Pull Requests 🗑️
|
||||||
uses: actions/stale@v10.1.0
|
uses: actions/stale@v9.1.0
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.STALE_ACTION_PAT }}
|
repo-token: ${{ secrets.STALE_ACTION_PAT }}
|
||||||
days-before-stale: 60
|
days-before-stale: 60
|
||||||
|
|||||||
2
.github/workflows/image_builds.yml
vendored
2
.github/workflows/image_builds.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
|||||||
- "watchdog-mailcow"
|
- "watchdog-mailcow"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v4
|
||||||
- name: Setup Docker
|
- name: Setup Docker
|
||||||
run: |
|
run: |
|
||||||
curl -sSL https://get.docker.com/ | CHANNEL=stable sudo sh
|
curl -sSL https://get.docker.com/ | CHANNEL=stable sudo sh
|
||||||
|
|||||||
4
.github/workflows/pr_to_nightly.yml
vendored
4
.github/workflows/pr_to_nightly.yml
vendored
@@ -8,11 +8,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Run the Action
|
- name: Run the Action
|
||||||
uses: devops-infra/action-pull-request@v1.0.2
|
uses: devops-infra/action-pull-request@v0.6.0
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.PRTONIGHTLY_ACTION_PAT }}
|
github_token: ${{ secrets.PRTONIGHTLY_ACTION_PAT }}
|
||||||
title: Automatic PR to nightly from ${{ github.event.repository.updated_at}}
|
title: Automatic PR to nightly from ${{ github.event.repository.updated_at}}
|
||||||
|
|||||||
2
.github/workflows/rebuild_backup_image.yml
vendored
2
.github/workflows/rebuild_backup_image.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
packages: write
|
packages: write
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Generate postscreen_access.cidr
|
- name: Generate postscreen_access.cidr
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
45
.gitignore
vendored
45
.gitignore
vendored
@@ -1,6 +1,3 @@
|
|||||||
!data/conf/nginx/dynmaps.conf
|
|
||||||
!data/conf/nginx/meta_exporter.conf
|
|
||||||
!data/conf/nginx/site.conf
|
|
||||||
!/**/.gitkeep
|
!/**/.gitkeep
|
||||||
*.iml
|
*.iml
|
||||||
.idea
|
.idea
|
||||||
@@ -8,52 +5,33 @@
|
|||||||
data/assets/ssl-example/*
|
data/assets/ssl-example/*
|
||||||
data/assets/ssl/*
|
data/assets/ssl/*
|
||||||
data/conf/borgmatic/
|
data/conf/borgmatic/
|
||||||
data/conf/clamav/whitelist.ign2
|
data/conf/clamav/rendered_configs
|
||||||
data/conf/dovecot/acl_anyone
|
|
||||||
data/conf/dovecot/dovecot-master.passwd
|
|
||||||
data/conf/dovecot/dovecot-master.userdb
|
|
||||||
data/conf/dovecot/extra.conf
|
data/conf/dovecot/extra.conf
|
||||||
data/conf/dovecot/mail_replica.conf
|
data/conf/dovecot/rendered_configs
|
||||||
data/conf/dovecot/global_sieve_*
|
data/conf/nginx/rendered_configs
|
||||||
data/conf/dovecot/last_login
|
|
||||||
data/conf/dovecot/lua
|
|
||||||
data/conf/dovecot/mail_plugins*
|
|
||||||
data/conf/dovecot/shared_namespace.conf
|
|
||||||
data/conf/dovecot/sni.conf
|
|
||||||
data/conf/dovecot/sogo-sso.conf
|
|
||||||
data/conf/dovecot/sogo_trusted_ip.conf
|
|
||||||
data/conf/dovecot/sql
|
|
||||||
data/conf/dovecot/conf.d/fts.conf
|
|
||||||
data/conf/nextcloud-*.bak
|
|
||||||
data/conf/nginx/*.active
|
|
||||||
data/conf/nginx/*.bak
|
|
||||||
data/conf/nginx/*.conf
|
|
||||||
data/conf/nginx/*.custom
|
|
||||||
data/conf/phpfpm/sogo-sso/sogo-sso.pass
|
data/conf/phpfpm/sogo-sso/sogo-sso.pass
|
||||||
|
data/conf/phpfpm/rendered_configs
|
||||||
data/conf/portainer/
|
data/conf/portainer/
|
||||||
data/conf/postfix/allow_mailcow_local.regexp
|
|
||||||
data/conf/postfix/custom_postscreen_whitelist.cidr
|
|
||||||
data/conf/postfix/custom_transport.pcre
|
|
||||||
data/conf/postfix/extra.cf
|
data/conf/postfix/extra.cf
|
||||||
data/conf/postfix/sni.map
|
data/conf/postfix/rendered_configs
|
||||||
data/conf/postfix/sni.map.db
|
|
||||||
data/conf/postfix/sql
|
|
||||||
data/conf/postfix/dns_blocklists.cf
|
|
||||||
data/conf/postfix/dnsbl_reply.map
|
|
||||||
data/conf/rspamd/custom/*
|
data/conf/rspamd/custom/*
|
||||||
data/conf/rspamd/local.d/*
|
data/conf/rspamd/local.d/*
|
||||||
data/conf/rspamd/override.d/*
|
data/conf/rspamd/override.d/*
|
||||||
|
data/conf/rspamd/rendered_configs
|
||||||
data/conf/sogo/custom-theme.js
|
data/conf/sogo/custom-theme.js
|
||||||
data/conf/sogo/plist_ldap
|
|
||||||
data/conf/sogo/plist_ldap.sh
|
|
||||||
data/conf/sogo/sieve.creds
|
data/conf/sogo/sieve.creds
|
||||||
data/conf/sogo/cron.creds
|
data/conf/sogo/cron.creds
|
||||||
data/conf/sogo/custom-fulllogo.svg
|
data/conf/sogo/custom-fulllogo.svg
|
||||||
data/conf/sogo/custom-shortlogo.svg
|
data/conf/sogo/custom-shortlogo.svg
|
||||||
data/conf/sogo/custom-fulllogo.png
|
data/conf/sogo/custom-fulllogo.png
|
||||||
|
data/conf/sogo/rendered_configs
|
||||||
|
data/conf/mysql/rendered_configs
|
||||||
data/gitea/
|
data/gitea/
|
||||||
data/gogs/
|
data/gogs/
|
||||||
|
data/hooks/clamd/*
|
||||||
data/hooks/dovecot/*
|
data/hooks/dovecot/*
|
||||||
|
data/hooks/mariadb/*
|
||||||
|
data/hooks/nginx/*
|
||||||
data/hooks/phpfpm/*
|
data/hooks/phpfpm/*
|
||||||
data/hooks/postfix/*
|
data/hooks/postfix/*
|
||||||
data/hooks/rspamd/*
|
data/hooks/rspamd/*
|
||||||
@@ -75,4 +53,3 @@ refresh_images.sh
|
|||||||
update_diffs/
|
update_diffs/
|
||||||
create_cold_standby.sh
|
create_cold_standby.sh
|
||||||
!data/conf/nginx/mailcow_auth.conf
|
!data/conf/nginx/mailcow_auth.conf
|
||||||
data/conf/postfix/postfix-tlspol
|
|
||||||
@@ -1,230 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# _modules/scripts/core.sh
|
|
||||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
|
||||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
|
||||||
|
|
||||||
# ANSI color for red errors
|
|
||||||
RED='\e[31m'
|
|
||||||
GREEN='\e[32m'
|
|
||||||
YELLOW='\e[33m'
|
|
||||||
BLUE='\e[34m'
|
|
||||||
MAGENTA='\e[35m'
|
|
||||||
LIGHT_RED='\e[91m'
|
|
||||||
LIGHT_GREEN='\e[92m'
|
|
||||||
NC='\e[0m'
|
|
||||||
|
|
||||||
caller="${BASH_SOURCE[1]##*/}"
|
|
||||||
|
|
||||||
get_installed_tools(){
|
|
||||||
for bin in openssl curl docker git awk sha1sum grep cut jq; do
|
|
||||||
if [[ -z $(command -v ${bin}) ]]; then
|
|
||||||
echo "Error: Cannot find command '${bin}'. Cannot proceed."
|
|
||||||
echo "Solution: Please review system requirements and install requirements. Then, re-run the script."
|
|
||||||
echo "See System Requirements: https://docs.mailcow.email/getstarted/install/"
|
|
||||||
echo "Exiting..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if grep --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox grep detected, please install gnu grep, \"apk add --no-cache --upgrade grep\"${NC}"; exit 1; fi
|
|
||||||
# This will also cover sort
|
|
||||||
if cp --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox cp detected, please install coreutils, \"apk add --no-cache --upgrade coreutils\"${NC}"; exit 1; fi
|
|
||||||
if sed --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox sed detected, please install gnu sed, \"apk add --no-cache --upgrade sed\"${NC}"; exit 1; fi
|
|
||||||
}
|
|
||||||
|
|
||||||
get_docker_version(){
|
|
||||||
# Check Docker Version (need at least 24.X)
|
|
||||||
docker_version=$(docker version --format '{{.Server.Version}}' | cut -d '.' -f 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
get_compose_type(){
|
|
||||||
if docker compose > /dev/null 2>&1; then
|
|
||||||
if docker compose version --short | grep -e "^2." -e "^v2." > /dev/null 2>&1; then
|
|
||||||
COMPOSE_VERSION=native
|
|
||||||
COMPOSE_COMMAND="docker compose"
|
|
||||||
if [[ "$caller" == "update.sh" ]]; then
|
|
||||||
sed -i 's/^DOCKER_COMPOSE_VERSION=.*/DOCKER_COMPOSE_VERSION=native/' "$SCRIPT_DIR/mailcow.conf"
|
|
||||||
fi
|
|
||||||
echo -e "\e[33mFound Docker Compose Plugin (native).\e[0m"
|
|
||||||
echo -e "\e[33mSetting the DOCKER_COMPOSE_VERSION Variable to native\e[0m"
|
|
||||||
sleep 2
|
|
||||||
echo -e "\e[33mNotice: You'll have to update this Compose Version via your Package Manager manually!\e[0m"
|
|
||||||
else
|
|
||||||
echo -e "\e[31mCannot find Docker Compose with a Version Higher than 2.X.X.\e[0m"
|
|
||||||
echo -e "\e[31mPlease update/install it manually regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
elif docker-compose > /dev/null 2>&1; then
|
|
||||||
if ! [[ $(alias docker-compose 2> /dev/null) ]] ; then
|
|
||||||
if docker-compose version --short | grep "^2." > /dev/null 2>&1; then
|
|
||||||
COMPOSE_VERSION=standalone
|
|
||||||
COMPOSE_COMMAND="docker-compose"
|
|
||||||
if [[ "$caller" == "update.sh" ]]; then
|
|
||||||
sed -i 's/^DOCKER_COMPOSE_VERSION=.*/DOCKER_COMPOSE_VERSION=standalone/' "$SCRIPT_DIR/mailcow.conf"
|
|
||||||
fi
|
|
||||||
echo -e "\e[33mFound Docker Compose Standalone.\e[0m"
|
|
||||||
echo -e "\e[33mSetting the DOCKER_COMPOSE_VERSION Variable to standalone\e[0m"
|
|
||||||
sleep 2
|
|
||||||
echo -e "\e[33mNotice: For an automatic update of docker-compose please use the update_compose.sh scripts located at the helper-scripts folder.\e[0m"
|
|
||||||
else
|
|
||||||
echo -e "\e[31mCannot find Docker Compose with a Version Higher than 2.X.X.\e[0m"
|
|
||||||
echo -e "\e[31mPlease update/install manually regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "\e[31mCannot find Docker Compose.\e[0m"
|
|
||||||
echo -e "\e[31mPlease install it regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
detect_bad_asn() {
|
|
||||||
echo -e "\e[33mDetecting if your IP is listed on Spamhaus Bad ASN List...\e[0m"
|
|
||||||
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
|
|
||||||
if [ "$response" -eq 503 ]; then
|
|
||||||
if [ -z "$SPAMHAUS_DQS_KEY" ]; then
|
|
||||||
echo -e "\e[33mYour server's public IP uses an AS that is blocked by Spamhaus to use their DNS public blocklists for Postfix.\e[0m"
|
|
||||||
echo -e "\e[33mmailcow did not detected a value for the variable SPAMHAUS_DQS_KEY inside mailcow.conf!\e[0m"
|
|
||||||
sleep 2
|
|
||||||
echo ""
|
|
||||||
echo -e "\e[33mTo use the Spamhaus DNS Blocklists again, you will need to create a FREE account for their Data Query Service (DQS) at: https://www.spamhaus.com/free-trial/sign-up-for-a-free-data-query-service-account\e[0m"
|
|
||||||
echo -e "\e[33mOnce done, enter your DQS API key in mailcow.conf and mailcow will do the rest for you!\e[0m"
|
|
||||||
echo ""
|
|
||||||
sleep 2
|
|
||||||
else
|
|
||||||
echo -e "\e[33mYour server's public IP uses an AS that is blocked by Spamhaus to use their DNS public blocklists for Postfix.\e[0m"
|
|
||||||
echo -e "\e[32mmailcow detected a Value for the variable SPAMHAUS_DQS_KEY inside mailcow.conf. Postfix will use DQS with the given API key...\e[0m"
|
|
||||||
fi
|
|
||||||
elif [ "$response" -eq 200 ]; then
|
|
||||||
echo -e "\e[33mCheck completed! Your IP is \e[32mclean\e[0m"
|
|
||||||
elif [ "$response" -eq 429 ]; then
|
|
||||||
echo -e "\e[33mCheck completed! \e[31mYour IP seems to be rate limited on the ASN Check service... please try again later!\e[0m"
|
|
||||||
else
|
|
||||||
echo -e "\e[31mCheck failed! \e[0mMaybe a DNS or Network problem?\e[0m"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_online_status() {
|
|
||||||
CHECK_ONLINE_DOMAINS=('https://github.com' 'https://hub.docker.com')
|
|
||||||
for domain in "${CHECK_ONLINE_DOMAINS[@]}"; do
|
|
||||||
if timeout 6 curl --head --silent --output /dev/null ${domain}; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
prefetch_images() {
|
|
||||||
[[ -z ${BRANCH} ]] && { echo -e "\e[33m\nUnknown branch...\e[0m"; exit 1; }
|
|
||||||
git fetch origin #${BRANCH}
|
|
||||||
while read image; do
|
|
||||||
RET_C=0
|
|
||||||
until docker pull "${image}"; do
|
|
||||||
RET_C=$((RET_C + 1))
|
|
||||||
echo -e "\e[33m\nError pulling $image, retrying...\e[0m"
|
|
||||||
[ ${RET_C} -gt 3 ] && { echo -e "\e[31m\nToo many failed retries, exiting\e[0m"; exit 1; }
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
done < <(git show "origin/${BRANCH}:docker-compose.yml" | grep "image:" | awk '{ gsub("image:","", $3); print $2 }')
|
|
||||||
}
|
|
||||||
|
|
||||||
docker_garbage() {
|
|
||||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
|
|
||||||
IMGS_TO_DELETE=()
|
|
||||||
|
|
||||||
declare -A IMAGES_INFO
|
|
||||||
COMPOSE_IMAGES=($(grep -oP "image: \K(ghcr\.io/)?mailcow.+" "${SCRIPT_DIR}/docker-compose.yml"))
|
|
||||||
|
|
||||||
for existing_image in $(docker images --format "{{.ID}}:{{.Repository}}:{{.Tag}}" | grep -E '(mailcow/|ghcr\.io/mailcow/)'); do
|
|
||||||
ID=$(echo "$existing_image" | cut -d ':' -f 1)
|
|
||||||
REPOSITORY=$(echo "$existing_image" | cut -d ':' -f 2)
|
|
||||||
TAG=$(echo "$existing_image" | cut -d ':' -f 3)
|
|
||||||
|
|
||||||
if [[ "$REPOSITORY" == "mailcow/backup" || "$REPOSITORY" == "ghcr.io/mailcow/backup" ]]; then
|
|
||||||
if [[ "$TAG" != "<none>" ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ " ${COMPOSE_IMAGES[@]} " =~ " ${REPOSITORY}:${TAG} " ]]; then
|
|
||||||
continue
|
|
||||||
else
|
|
||||||
IMGS_TO_DELETE+=("$ID")
|
|
||||||
IMAGES_INFO["$ID"]="$REPOSITORY:$TAG"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ ! -z ${IMGS_TO_DELETE[*]} ]]; then
|
|
||||||
echo "The following unused mailcow images were found:"
|
|
||||||
for id in "${IMGS_TO_DELETE[@]}"; do
|
|
||||||
echo " ${IMAGES_INFO[$id]} ($id)"
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "$FORCE" ]; then
|
|
||||||
read -r -p "Do you want to delete them to free up some space? [y/N] " response
|
|
||||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
docker rmi ${IMGS_TO_DELETE[*]}
|
|
||||||
else
|
|
||||||
echo "OK, skipped."
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Running in forced mode! Force removing old mailcow images..."
|
|
||||||
docker rmi ${IMGS_TO_DELETE[*]}
|
|
||||||
fi
|
|
||||||
echo -e "\e[32mFurther cleanup...\e[0m"
|
|
||||||
echo "If you want to cleanup further garbage collected by Docker, please make sure all containers are up and running before cleaning your system by executing \"docker system prune\""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
in_array() {
|
|
||||||
local e match="$1"
|
|
||||||
shift
|
|
||||||
for e; do [[ "$e" == "$match" ]] && return 0; done
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
detect_major_update() {
|
|
||||||
if [ ${BRANCH} == "master" ]; then
|
|
||||||
# Array with major versions
|
|
||||||
# Add major versions here
|
|
||||||
MAJOR_VERSIONS=(
|
|
||||||
"2025-02"
|
|
||||||
"2025-03"
|
|
||||||
"2025-09"
|
|
||||||
)
|
|
||||||
|
|
||||||
current_version=""
|
|
||||||
if [[ -f "${SCRIPT_DIR}/data/web/inc/app_info.inc.php" ]]; then
|
|
||||||
current_version=$(grep 'MAILCOW_GIT_VERSION' ${SCRIPT_DIR}/data/web/inc/app_info.inc.php | sed -E 's/.*MAILCOW_GIT_VERSION="([^"]+)".*/\1/')
|
|
||||||
fi
|
|
||||||
if [[ -z "$current_version" ]]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
release_url="https://github.com/mailcow/mailcow-dockerized/releases/tag"
|
|
||||||
|
|
||||||
updates_to_apply=()
|
|
||||||
|
|
||||||
for version in "${MAJOR_VERSIONS[@]}"; do
|
|
||||||
if [[ "$current_version" < "$version" ]]; then
|
|
||||||
updates_to_apply+=("$version")
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ ${#updates_to_apply[@]} -gt 0 ]]; then
|
|
||||||
echo -e "\e[33m\nMAJOR UPDATES to be applied:\e[0m"
|
|
||||||
for update in "${updates_to_apply[@]}"; do
|
|
||||||
echo "$update - $release_url/$update"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo -e "\nPlease read the release notes before proceeding."
|
|
||||||
read -p "Do you want to proceed with the update? [y/n] " response
|
|
||||||
if [[ "${response}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
echo "Proceeding with the update..."
|
|
||||||
else
|
|
||||||
echo "Update canceled. Exiting."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
@@ -1,239 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# _modules/scripts/ipv6_controller.sh
|
|
||||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
|
||||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
|
||||||
|
|
||||||
# 1) Check if the host supports IPv6
|
|
||||||
get_ipv6_support() {
|
|
||||||
# ---- helper: probe external IPv6 connectivity without DNS ----
|
|
||||||
_probe_ipv6_connectivity() {
|
|
||||||
# Use literal, always-on IPv6 echo responders (no DNS required)
|
|
||||||
local PROBE_IPS=("2001:4860:4860::8888" "2606:4700:4700::1111")
|
|
||||||
local ip rc=1
|
|
||||||
|
|
||||||
for ip in "${PROBE_IPS[@]}"; do
|
|
||||||
if command -v ping6 &>/dev/null; then
|
|
||||||
ping6 -c1 -W2 "$ip" &>/dev/null || ping6 -c1 -w2 "$ip" &>/dev/null
|
|
||||||
rc=$?
|
|
||||||
elif command -v ping &>/dev/null; then
|
|
||||||
ping -6 -c1 -W2 "$ip" &>/dev/null || ping -6 -c1 -w2 "$ip" &>/dev/null
|
|
||||||
rc=$?
|
|
||||||
else
|
|
||||||
rc=1
|
|
||||||
fi
|
|
||||||
[[ $rc -eq 0 ]] && return 0
|
|
||||||
done
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ ! -f /proc/net/if_inet6 ]] || grep -qs '^1' /proc/sys/net/ipv6/conf/all/disable_ipv6 2>/dev/null; then
|
|
||||||
DETECTED_IPV6=false
|
|
||||||
echo -e "${YELLOW}IPv6 not detected on host – ${LIGHT_RED}IPv6 is administratively disabled${YELLOW}.${NC}"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ip -6 route show default 2>/dev/null | grep -qE '^default'; then
|
|
||||||
echo -e "${YELLOW}Default IPv6 route found – testing external IPv6 connectivity...${NC}"
|
|
||||||
if _probe_ipv6_connectivity; then
|
|
||||||
DETECTED_IPV6=true
|
|
||||||
echo -e "IPv6 detected on host – ${LIGHT_GREEN}leaving IPv6 support enabled${YELLOW}.${NC}"
|
|
||||||
else
|
|
||||||
DETECTED_IPV6=false
|
|
||||||
echo -e "${YELLOW}Default IPv6 route present but external IPv6 connectivity failed – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
|
||||||
fi
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ip -6 addr show scope global 2>/dev/null | grep -q 'inet6'; then
|
|
||||||
DETECTED_IPV6=false
|
|
||||||
echo -e "${YELLOW}Global IPv6 address present but no default route – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ip -6 addr show scope link 2>/dev/null | grep -q 'inet6'; then
|
|
||||||
echo -e "${YELLOW}Only link-local IPv6 addresses found – testing external IPv6 connectivity...${NC}"
|
|
||||||
if _probe_ipv6_connectivity; then
|
|
||||||
DETECTED_IPV6=true
|
|
||||||
echo -e "External IPv6 connectivity available – ${LIGHT_GREEN}leaving IPv6 support enabled${YELLOW}.${NC}"
|
|
||||||
else
|
|
||||||
DETECTED_IPV6=false
|
|
||||||
echo -e "${YELLOW}Only link-local IPv6 present and no external connectivity – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
|
||||||
fi
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
DETECTED_IPV6=false
|
|
||||||
echo -e "${YELLOW}IPv6 not detected on host – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# 2) Ensure Docker daemon.json has (or create) the required IPv6 settings
|
|
||||||
docker_daemon_edit(){
|
|
||||||
DOCKER_DAEMON_CONFIG="/etc/docker/daemon.json"
|
|
||||||
DOCKER_MAJOR=$(docker version --format '{{.Server.Version}}' 2>/dev/null | cut -d. -f1)
|
|
||||||
MISSING=()
|
|
||||||
|
|
||||||
_has_kv() { grep -Eq "\"$1\"[[:space:]]*:[[:space:]]*$2" "$DOCKER_DAEMON_CONFIG" 2>/dev/null; }
|
|
||||||
|
|
||||||
if [[ -f "$DOCKER_DAEMON_CONFIG" ]]; then
|
|
||||||
|
|
||||||
# reject empty or whitespace-only file immediately
|
|
||||||
if [[ ! -s "$DOCKER_DAEMON_CONFIG" ]] || ! grep -Eq '[{}]' "$DOCKER_DAEMON_CONFIG"; then
|
|
||||||
echo -e "${RED}ERROR: $DOCKER_DAEMON_CONFIG exists but is empty or contains no JSON braces – please initialize it with valid JSON (e.g. {}).${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Validate JSON if jq is present
|
|
||||||
if command -v jq &>/dev/null && ! jq empty "$DOCKER_DAEMON_CONFIG" &>/dev/null; then
|
|
||||||
echo -e "${RED}ERROR: Invalid JSON in $DOCKER_DAEMON_CONFIG – please correct manually.${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Gather missing keys
|
|
||||||
! _has_kv ipv6 true && MISSING+=("ipv6: true")
|
|
||||||
|
|
||||||
# For Docker < 28, keep requiring fixed-cidr-v6 (default bridge needs it on old engines)
|
|
||||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
|
||||||
! grep -Eq '"fixed-cidr-v6"[[:space:]]*:[[:space:]]*".+"' "$DOCKER_DAEMON_CONFIG" \
|
|
||||||
&& MISSING+=('fixed-cidr-v6: "fd00:dead:beef:c0::/80"')
|
|
||||||
fi
|
|
||||||
|
|
||||||
# For Docker < 27, ip6tables needed and was tied to experimental in older releases
|
|
||||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
|
||||||
_has_kv ipv6 true && ! _has_kv ip6tables true && MISSING+=("ip6tables: true")
|
|
||||||
! _has_kv experimental true && MISSING+=("experimental: true")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Fix if needed
|
|
||||||
if ((${#MISSING[@]}>0)); then
|
|
||||||
echo -e "${MAGENTA}Your daemon.json is missing: ${YELLOW}${MISSING[*]}${NC}"
|
|
||||||
if [[ -n "$FORCE" ]]; then
|
|
||||||
ans=Y
|
|
||||||
else
|
|
||||||
read -p "Would you like to update $DOCKER_DAEMON_CONFIG now? [Y/n] " ans
|
|
||||||
ans=${ans:-Y}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $ans =~ ^[Yy]$ ]]; then
|
|
||||||
cp "$DOCKER_DAEMON_CONFIG" "${DOCKER_DAEMON_CONFIG}.bak"
|
|
||||||
if command -v jq &>/dev/null; then
|
|
||||||
TMP=$(mktemp)
|
|
||||||
# Base filter: ensure ipv6 = true
|
|
||||||
JQ_FILTER='.ipv6 = true'
|
|
||||||
|
|
||||||
# Add fixed-cidr-v6 only for Docker < 28
|
|
||||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
|
||||||
JQ_FILTER+=' | .["fixed-cidr-v6"] = (.["fixed-cidr-v6"] // "fd00:dead:beef:c0::/80")'
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Add ip6tables/experimental only for Docker < 27
|
|
||||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
|
||||||
JQ_FILTER+=' | .ip6tables = true | .experimental = true'
|
|
||||||
fi
|
|
||||||
|
|
||||||
jq "$JQ_FILTER" "$DOCKER_DAEMON_CONFIG" >"$TMP" && mv "$TMP" "$DOCKER_DAEMON_CONFIG"
|
|
||||||
echo -e "${LIGHT_GREEN}daemon.json updated. Restarting Docker...${NC}"
|
|
||||||
(command -v systemctl &>/dev/null && systemctl restart docker) || service docker restart
|
|
||||||
echo -e "${YELLOW}Docker restarted.${NC}"
|
|
||||||
else
|
|
||||||
echo -e "${RED}Please install jq or manually update daemon.json and restart Docker.${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "${YELLOW}User declined Docker update – please insert these changes manually:${NC}"
|
|
||||||
echo "${MISSING[*]}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
else
|
|
||||||
# Create new daemon.json if missing
|
|
||||||
if [[ -n "$FORCE" ]]; then
|
|
||||||
ans=Y
|
|
||||||
else
|
|
||||||
read -p "$DOCKER_DAEMON_CONFIG not found. Create it with IPv6 settings? [Y/n] " ans
|
|
||||||
ans=${ans:-Y}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $ans =~ ^[Yy]$ ]]; then
|
|
||||||
mkdir -p "$(dirname "$DOCKER_DAEMON_CONFIG")"
|
|
||||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
|
||||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
|
||||||
{
|
|
||||||
"ipv6": true,
|
|
||||||
"fixed-cidr-v6": "fd00:dead:beef:c0::/80",
|
|
||||||
"ip6tables": true,
|
|
||||||
"experimental": true
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
elif [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
|
||||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
|
||||||
{
|
|
||||||
"ipv6": true,
|
|
||||||
"fixed-cidr-v6": "fd00:dead:beef:c0::/80"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
# Docker 28+: ipv6 works without fixed-cidr-v6
|
|
||||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
|
||||||
{
|
|
||||||
"ipv6": true
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
echo -e "${GREEN}Created $DOCKER_DAEMON_CONFIG with IPv6 settings.${NC}"
|
|
||||||
echo "Restarting Docker..."
|
|
||||||
(command -v systemctl &>/dev/null && systemctl restart docker) || service docker restart
|
|
||||||
echo "Docker restarted."
|
|
||||||
else
|
|
||||||
echo "User declined to create daemon.json – please manually merge the docker daemon with these configs:"
|
|
||||||
echo "${MISSING[*]}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# 3) Main wrapper for generate_config.sh and update.sh
|
|
||||||
configure_ipv6() {
|
|
||||||
# detect manual override if mailcow.conf is present
|
|
||||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]] && grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
|
||||||
MANUAL_SETTING=$(grep '^ENABLE_IPV6=' "$MAILCOW_CONF" | cut -d= -f2)
|
|
||||||
elif [[ -z "$MAILCOW_CONF" ]] && [[ -n "${ENABLE_IPV6:-}" ]]; then
|
|
||||||
MANUAL_SETTING="$ENABLE_IPV6"
|
|
||||||
else
|
|
||||||
MANUAL_SETTING=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
get_ipv6_support
|
|
||||||
|
|
||||||
# if user manually set it, check for mismatch
|
|
||||||
if [[ "$DETECTED_IPV6" != "true" ]]; then
|
|
||||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]]; then
|
|
||||||
if grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
|
||||||
sed -i 's/^ENABLE_IPV6=.*/ENABLE_IPV6=false/' "$MAILCOW_CONF"
|
|
||||||
else
|
|
||||||
echo "ENABLE_IPV6=false" >> "$MAILCOW_CONF"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
export IPV6_BOOL=false
|
|
||||||
fi
|
|
||||||
echo "Skipping Docker IPv6 configuration because host does not support IPv6."
|
|
||||||
echo "Make sure to check if your docker daemon.json does not include \"enable_ipv6\": true if you do not want IPv6."
|
|
||||||
echo "IPv6 configuration complete: ENABLE_IPV6=false"
|
|
||||||
sleep 2
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker_daemon_edit
|
|
||||||
|
|
||||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]]; then
|
|
||||||
if grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
|
||||||
sed -i 's/^ENABLE_IPV6=.*/ENABLE_IPV6=true/' "$MAILCOW_CONF"
|
|
||||||
else
|
|
||||||
echo "ENABLE_IPV6=true" >> "$MAILCOW_CONF"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
export IPV6_BOOL=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "IPv6 configuration complete: ENABLE_IPV6=true"
|
|
||||||
}
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# _modules/scripts/migrate_options.sh
|
|
||||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
|
||||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
|
||||||
|
|
||||||
migrate_config_options() {
|
|
||||||
|
|
||||||
sed -i --follow-symlinks '$a\' mailcow.conf
|
|
||||||
|
|
||||||
KEYS=(
|
|
||||||
SOLR_HEAP
|
|
||||||
SKIP_SOLR
|
|
||||||
SOLR_PORT
|
|
||||||
FLATCURVE_EXPERIMENTAL
|
|
||||||
DISABLE_IPv6
|
|
||||||
ACME_CONTACT
|
|
||||||
)
|
|
||||||
|
|
||||||
for key in "${KEYS[@]}"; do
|
|
||||||
if grep -q "${key}" mailcow.conf; then
|
|
||||||
case "${key}" in
|
|
||||||
SOLR_HEAP)
|
|
||||||
echo "Removing ${key} in mailcow.conf"
|
|
||||||
sed -i '/# Solr heap size in MB\b/d' mailcow.conf
|
|
||||||
sed -i '/# Solr is a prone to run\b/d' mailcow.conf
|
|
||||||
sed -i '/SOLR_HEAP\b/d' mailcow.conf
|
|
||||||
;;
|
|
||||||
SKIP_SOLR)
|
|
||||||
echo "Removing ${key} in mailcow.conf"
|
|
||||||
sed -i '/\bSkip Solr on low-memory\b/d' mailcow.conf
|
|
||||||
sed -i '/\bSolr is disabled by default\b/d' mailcow.conf
|
|
||||||
sed -i '/\bDisable Solr or\b/d' mailcow.conf
|
|
||||||
sed -i '/\bSKIP_SOLR\b/d' mailcow.conf
|
|
||||||
;;
|
|
||||||
SOLR_PORT)
|
|
||||||
echo "Removing ${key} in mailcow.conf"
|
|
||||||
sed -i '/\bSOLR_PORT\b/d' mailcow.conf
|
|
||||||
;;
|
|
||||||
FLATCURVE_EXPERIMENTAL)
|
|
||||||
echo "Removing ${key} in mailcow.conf"
|
|
||||||
sed -i '/\bFLATCURVE_EXPERIMENTAL\b/d' mailcow.conf
|
|
||||||
;;
|
|
||||||
DISABLE_IPv6)
|
|
||||||
echo "Migrating ${key} to ENABLE_IPv6 in mailcow.conf"
|
|
||||||
local old=$(grep '^DISABLE_IPv6=' "mailcow.conf" | cut -d'=' -f2)
|
|
||||||
local new
|
|
||||||
if [[ "$old" == "y" ]]; then
|
|
||||||
new="false"
|
|
||||||
else
|
|
||||||
new="true"
|
|
||||||
fi
|
|
||||||
sed -i '/^DISABLE_IPv6=/d' "mailcow.conf"
|
|
||||||
echo "ENABLE_IPV6=$new" >> "mailcow.conf"
|
|
||||||
;;
|
|
||||||
ACME_CONTACT)
|
|
||||||
echo "Deleting obsoleted ${key} in mailcow.conf"
|
|
||||||
sed -i '/^# Lets Encrypt registration contact information/d' mailcow.conf
|
|
||||||
sed -i '/^# Optional: Leave empty for none/d' mailcow.conf
|
|
||||||
sed -i '/^# This value is only used on first order!/d' mailcow.conf
|
|
||||||
sed -i '/^# Setting it at a later point will require the following steps:/d' mailcow.conf
|
|
||||||
sed -i '/^# https:\/\/docs.mailcow.email\/troubleshooting\/debug-reset_tls\//d' mailcow.conf
|
|
||||||
sed -i '/^ACME_CONTACT=.*/d' mailcow.conf
|
|
||||||
sed -i '/^#ACME_CONTACT=.*/d' mailcow.conf
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
solr_volume=$(docker volume ls -qf name=^${COMPOSE_PROJECT_NAME}_solr-vol-1)
|
|
||||||
if [[ -n $solr_volume ]]; then
|
|
||||||
echo -e "\e[34mSolr has been replaced within mailcow since 2025-01.\nThe volume $solr_volume is unused.\e[0m"
|
|
||||||
sleep 1
|
|
||||||
if [ ! "$FORCE" ]; then
|
|
||||||
read -r -p "Remove $solr_volume? [y/N] " response
|
|
||||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
echo -e "\e[33mRemoving $solr_volume...\e[0m"
|
|
||||||
docker volume rm $solr_volume || echo -e "\e[31mFailed to remove. Remove it manually!\e[0m"
|
|
||||||
echo -e "\e[32mSuccessfully removed $solr_volume!\e[0m"
|
|
||||||
else
|
|
||||||
echo -e "Not removing $solr_volume. Run \`docker volume rm $solr_volume\` manually if needed."
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "\e[33mForce removing $solr_volume...\e[0m"
|
|
||||||
docker volume rm $solr_volume || echo -e "\e[31mFailed to remove. Remove it manually!\e[0m"
|
|
||||||
echo -e "\e[32mSuccessfully removed $solr_volume!\e[0m"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Delete old fts.conf before forced switch to flatcurve to ensure update is working properly
|
|
||||||
FTS_CONF_PATH="${SCRIPT_DIR}/data/conf/dovecot/conf.d/fts.conf"
|
|
||||||
if [[ -f "$FTS_CONF_PATH" ]]; then
|
|
||||||
if grep -q "Autogenerated by mailcow" "$FTS_CONF_PATH"; then
|
|
||||||
rm -rf $FTS_CONF_PATH
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
@@ -1,300 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# _modules/scripts/new_options.sh
|
|
||||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
|
||||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
|
||||||
|
|
||||||
adapt_new_options() {
|
|
||||||
|
|
||||||
CONFIG_ARRAY=(
|
|
||||||
"AUTODISCOVER_SAN"
|
|
||||||
"SKIP_LETS_ENCRYPT"
|
|
||||||
"SKIP_SOGO"
|
|
||||||
"USE_WATCHDOG"
|
|
||||||
"WATCHDOG_NOTIFY_EMAIL"
|
|
||||||
"WATCHDOG_NOTIFY_WEBHOOK"
|
|
||||||
"WATCHDOG_NOTIFY_WEBHOOK_BODY"
|
|
||||||
"WATCHDOG_NOTIFY_BAN"
|
|
||||||
"WATCHDOG_NOTIFY_START"
|
|
||||||
"WATCHDOG_EXTERNAL_CHECKS"
|
|
||||||
"WATCHDOG_SUBJECT"
|
|
||||||
"SKIP_CLAMD"
|
|
||||||
"SKIP_OLEFY"
|
|
||||||
"SKIP_IP_CHECK"
|
|
||||||
"ADDITIONAL_SAN"
|
|
||||||
"DOVEADM_PORT"
|
|
||||||
"IPV4_NETWORK"
|
|
||||||
"IPV6_NETWORK"
|
|
||||||
"LOG_LINES"
|
|
||||||
"SNAT_TO_SOURCE"
|
|
||||||
"SNAT6_TO_SOURCE"
|
|
||||||
"COMPOSE_PROJECT_NAME"
|
|
||||||
"DOCKER_COMPOSE_VERSION"
|
|
||||||
"SQL_PORT"
|
|
||||||
"API_KEY"
|
|
||||||
"API_KEY_READ_ONLY"
|
|
||||||
"API_ALLOW_FROM"
|
|
||||||
"MAILDIR_GC_TIME"
|
|
||||||
"MAILDIR_SUB"
|
|
||||||
"ACL_ANYONE"
|
|
||||||
"FTS_HEAP"
|
|
||||||
"FTS_PROCS"
|
|
||||||
"SKIP_FTS"
|
|
||||||
"ENABLE_SSL_SNI"
|
|
||||||
"ALLOW_ADMIN_EMAIL_LOGIN"
|
|
||||||
"SKIP_HTTP_VERIFICATION"
|
|
||||||
"SOGO_EXPIRE_SESSION"
|
|
||||||
"SOGO_URL_ENCRYPTION_KEY"
|
|
||||||
"REDIS_PORT"
|
|
||||||
"REDISPASS"
|
|
||||||
"DOVECOT_MASTER_USER"
|
|
||||||
"DOVECOT_MASTER_PASS"
|
|
||||||
"MAILCOW_PASS_SCHEME"
|
|
||||||
"ADDITIONAL_SERVER_NAMES"
|
|
||||||
"WATCHDOG_VERBOSE"
|
|
||||||
"WEBAUTHN_ONLY_TRUSTED_VENDORS"
|
|
||||||
"SPAMHAUS_DQS_KEY"
|
|
||||||
"SKIP_UNBOUND_HEALTHCHECK"
|
|
||||||
"DISABLE_NETFILTER_ISOLATION_RULE"
|
|
||||||
"HTTP_REDIRECT"
|
|
||||||
"ENABLE_IPV6"
|
|
||||||
)
|
|
||||||
|
|
||||||
sed -i --follow-symlinks '$a\' mailcow.conf
|
|
||||||
for option in ${CONFIG_ARRAY[@]}; do
|
|
||||||
if grep -q "${option}" mailcow.conf; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Adding new option \"${option}\" to mailcow.conf"
|
|
||||||
|
|
||||||
case "${option}" in
|
|
||||||
AUTODISCOVER_SAN)
|
|
||||||
echo '# Obtain certificates for autodiscover.* and autoconfig.* domains.' >> mailcow.conf
|
|
||||||
echo '# This can be useful to switch off in case you are in a scenario where a reverse proxy already handles those.' >> mailcow.conf
|
|
||||||
echo '# There are mixed scenarios where ports 80,443 are occupied and you do not want to share certs' >> mailcow.conf
|
|
||||||
echo '# between services. So acme-mailcow obtains for maildomains and all web-things get handled' >> mailcow.conf
|
|
||||||
echo '# in the reverse proxy.' >> mailcow.conf
|
|
||||||
echo 'AUTODISCOVER_SAN=y' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
|
|
||||||
DOCKER_COMPOSE_VERSION)
|
|
||||||
echo "# Used Docker Compose version" >> mailcow.conf
|
|
||||||
echo "# Switch here between native (compose plugin) and standalone" >> mailcow.conf
|
|
||||||
echo "# For more informations take a look at the mailcow docs regarding the configuration options." >> mailcow.conf
|
|
||||||
echo "# Normally this should be untouched but if you decided to use either of those you can switch it manually here." >> mailcow.conf
|
|
||||||
echo "# Please be aware that at least one of those variants should be installed on your machine or mailcow will fail." >> mailcow.conf
|
|
||||||
echo "" >> mailcow.conf
|
|
||||||
echo "DOCKER_COMPOSE_VERSION=${DOCKER_COMPOSE_VERSION}" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
|
|
||||||
DOVEADM_PORT)
|
|
||||||
echo "DOVEADM_PORT=127.0.0.1:19991" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
|
|
||||||
LOG_LINES)
|
|
||||||
echo '# Max log lines per service to keep in Redis logs' >> mailcow.conf
|
|
||||||
echo "LOG_LINES=9999" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
IPV4_NETWORK)
|
|
||||||
echo '# Internal IPv4 /24 subnet, format n.n.n. (expands to n.n.n.0/24)' >> mailcow.conf
|
|
||||||
echo "IPV4_NETWORK=172.22.1" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
IPV6_NETWORK)
|
|
||||||
echo '# Internal IPv6 subnet in fc00::/7' >> mailcow.conf
|
|
||||||
echo "IPV6_NETWORK=fd4d:6169:6c63:6f77::/64" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SQL_PORT)
|
|
||||||
echo '# Bind SQL to 127.0.0.1 on port 13306' >> mailcow.conf
|
|
||||||
echo "SQL_PORT=127.0.0.1:13306" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
API_KEY)
|
|
||||||
echo '# Create or override API key for web UI' >> mailcow.conf
|
|
||||||
echo "#API_KEY=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
API_KEY_READ_ONLY)
|
|
||||||
echo '# Create or override read-only API key for web UI' >> mailcow.conf
|
|
||||||
echo "#API_KEY_READ_ONLY=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
API_ALLOW_FROM)
|
|
||||||
echo '# Must be set for API_KEY to be active' >> mailcow.conf
|
|
||||||
echo '# IPs only, no networks (networks can be set via UI)' >> mailcow.conf
|
|
||||||
echo "#API_ALLOW_FROM=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SNAT_TO_SOURCE)
|
|
||||||
echo '# Use this IPv4 for outgoing connections (SNAT)' >> mailcow.conf
|
|
||||||
echo "#SNAT_TO_SOURCE=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SNAT6_TO_SOURCE)
|
|
||||||
echo '# Use this IPv6 for outgoing connections (SNAT)' >> mailcow.conf
|
|
||||||
echo "#SNAT6_TO_SOURCE=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
MAILDIR_GC_TIME)
|
|
||||||
echo '# Garbage collector cleanup' >> mailcow.conf
|
|
||||||
echo '# Deleted domains and mailboxes are moved to /var/vmail/_garbage/timestamp_sanitizedstring' >> mailcow.conf
|
|
||||||
echo '# How long should objects remain in the garbage until they are being deleted? (value in minutes)' >> mailcow.conf
|
|
||||||
echo '# Check interval is hourly' >> mailcow.conf
|
|
||||||
echo 'MAILDIR_GC_TIME=1440' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
ACL_ANYONE)
|
|
||||||
echo '# Set this to "allow" to enable the anyone pseudo user. Disabled by default.' >> mailcow.conf
|
|
||||||
echo '# When enabled, ACL can be created, that apply to "All authenticated users"' >> mailcow.conf
|
|
||||||
echo '# This should probably only be activated on mail hosts, that are used exclusively by one organisation.' >> mailcow.conf
|
|
||||||
echo '# Otherwise a user might share data with too many other users.' >> mailcow.conf
|
|
||||||
echo 'ACL_ANYONE=disallow' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
FTS_HEAP)
|
|
||||||
echo '# Dovecot Indexing (FTS) Process maximum heap size in MB, there is no recommendation, please see Dovecot docs.' >> mailcow.conf
|
|
||||||
echo '# Flatcurve is used as FTS Engine. It is supposed to be pretty efficient in CPU and RAM consumption.' >> mailcow.conf
|
|
||||||
echo '# Please always monitor your Resource consumption!' >> mailcow.conf
|
|
||||||
echo "FTS_HEAP=128" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SKIP_FTS)
|
|
||||||
echo '# Skip FTS (Fulltext Search) for Dovecot on low-memory, low-threaded systems or if you simply want to disable it.' >> mailcow.conf
|
|
||||||
echo "# Dovecot inside mailcow use Flatcurve as FTS Backend." >> mailcow.conf
|
|
||||||
echo "SKIP_FTS=y" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
FTS_PROCS)
|
|
||||||
echo '# Controls how many processes the Dovecot indexing process can spawn at max.' >> mailcow.conf
|
|
||||||
echo '# Too many indexing processes can use a lot of CPU and Disk I/O' >> mailcow.conf
|
|
||||||
echo '# Please visit: https://doc.dovecot.org/configuration_manual/service_configuration/#indexer-worker for more informations' >> mailcow.conf
|
|
||||||
echo "FTS_PROCS=1" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
ENABLE_SSL_SNI)
|
|
||||||
echo '# Create seperate certificates for all domains - y/n' >> mailcow.conf
|
|
||||||
echo '# this will allow adding more than 100 domains, but some email clients will not be able to connect with alternative hostnames' >> mailcow.conf
|
|
||||||
echo '# see https://wiki.dovecot.org/SSL/SNIClientSupport' >> mailcow.conf
|
|
||||||
echo "ENABLE_SSL_SNI=n" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SKIP_SOGO)
|
|
||||||
echo '# Skip SOGo: Will disable SOGo integration and therefore webmail, DAV protocols and ActiveSync support (experimental, unsupported, not fully implemented) - y/n' >> mailcow.conf
|
|
||||||
echo "SKIP_SOGO=n" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
MAILDIR_SUB)
|
|
||||||
echo '# MAILDIR_SUB defines a path in a users virtual home to keep the maildir in. Leave empty for updated setups.' >> mailcow.conf
|
|
||||||
echo "#MAILDIR_SUB=Maildir" >> mailcow.conf
|
|
||||||
echo "MAILDIR_SUB=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WATCHDOG_NOTIFY_WEBHOOK)
|
|
||||||
echo '# Send notifications to a webhook URL that receives a POST request with the content type "application/json".' >> mailcow.conf
|
|
||||||
echo '# You can use this to send notifications to services like Discord, Slack and others.' >> mailcow.conf
|
|
||||||
echo '#WATCHDOG_NOTIFY_WEBHOOK=https://discord.com/api/webhooks/XXXXXXXXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WATCHDOG_NOTIFY_WEBHOOK_BODY)
|
|
||||||
echo '# JSON body included in the webhook POST request. Needs to be in single quotes.' >> mailcow.conf
|
|
||||||
echo '# Following variables are available: SUBJECT, BODY' >> mailcow.conf
|
|
||||||
WEBHOOK_BODY='{"username": "mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}'
|
|
||||||
echo "#WATCHDOG_NOTIFY_WEBHOOK_BODY='${WEBHOOK_BODY}'" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WATCHDOG_NOTIFY_BAN)
|
|
||||||
echo '# Notify about banned IP. Includes whois lookup.' >> mailcow.conf
|
|
||||||
echo "WATCHDOG_NOTIFY_BAN=y" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WATCHDOG_NOTIFY_START)
|
|
||||||
echo '# Send a notification when the watchdog is started.' >> mailcow.conf
|
|
||||||
echo "WATCHDOG_NOTIFY_START=y" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WATCHDOG_SUBJECT)
|
|
||||||
echo '# Subject for watchdog mails. Defaults to "Watchdog ALERT" followed by the error message.' >> mailcow.conf
|
|
||||||
echo "#WATCHDOG_SUBJECT=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WATCHDOG_EXTERNAL_CHECKS)
|
|
||||||
echo '# Checks if mailcow is an open relay. Requires a SAL. More checks will follow.' >> mailcow.conf
|
|
||||||
echo '# No data is collected. Opt-in and anonymous.' >> mailcow.conf
|
|
||||||
echo '# Will only work with unmodified mailcow setups.' >> mailcow.conf
|
|
||||||
echo "WATCHDOG_EXTERNAL_CHECKS=n" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SOGO_EXPIRE_SESSION)
|
|
||||||
echo '# SOGo session timeout in minutes' >> mailcow.conf
|
|
||||||
echo "SOGO_EXPIRE_SESSION=480" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
REDIS_PORT)
|
|
||||||
echo "REDIS_PORT=127.0.0.1:7654" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
DOVECOT_MASTER_USER)
|
|
||||||
echo '# DOVECOT_MASTER_USER and _PASS must _both_ be provided. No special chars.' >> mailcow.conf
|
|
||||||
echo '# Empty by default to auto-generate master user and password on start.' >> mailcow.conf
|
|
||||||
echo '# User expands to DOVECOT_MASTER_USER@mailcow.local' >> mailcow.conf
|
|
||||||
echo '# LEAVE EMPTY IF UNSURE' >> mailcow.conf
|
|
||||||
echo "DOVECOT_MASTER_USER=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
DOVECOT_MASTER_PASS)
|
|
||||||
echo '# LEAVE EMPTY IF UNSURE' >> mailcow.conf
|
|
||||||
echo "DOVECOT_MASTER_PASS=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
MAILCOW_PASS_SCHEME)
|
|
||||||
echo '# Password hash algorithm' >> mailcow.conf
|
|
||||||
echo '# Only certain password hash algorithm are supported. For a fully list of supported schemes,' >> mailcow.conf
|
|
||||||
echo '# see https://docs.mailcow.email/models/model-passwd/' >> mailcow.conf
|
|
||||||
echo "MAILCOW_PASS_SCHEME=BLF-CRYPT" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
ADDITIONAL_SERVER_NAMES)
|
|
||||||
echo '# Additional server names for mailcow UI' >> mailcow.conf
|
|
||||||
echo '#' >> mailcow.conf
|
|
||||||
echo '# Specify alternative addresses for the mailcow UI to respond to' >> mailcow.conf
|
|
||||||
echo '# This is useful when you set mail.* as ADDITIONAL_SAN and want to make sure mail.maildomain.com will always point to the mailcow UI.' >> mailcow.conf
|
|
||||||
echo '# If the server name does not match a known site, Nginx decides by best-guess and may redirect users to the wrong web root.' >> mailcow.conf
|
|
||||||
echo '# You can understand this as server_name directive in Nginx.' >> mailcow.conf
|
|
||||||
echo '# Comma separated list without spaces! Example: ADDITIONAL_SERVER_NAMES=a.b.c,d.e.f' >> mailcow.conf
|
|
||||||
echo 'ADDITIONAL_SERVER_NAMES=' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WEBAUTHN_ONLY_TRUSTED_VENDORS)
|
|
||||||
echo "# WebAuthn device manufacturer verification" >> mailcow.conf
|
|
||||||
echo '# After setting WEBAUTHN_ONLY_TRUSTED_VENDORS=y only devices from trusted manufacturers are allowed' >> mailcow.conf
|
|
||||||
echo '# root certificates can be placed for validation under mailcow-dockerized/data/web/inc/lib/WebAuthn/rootCertificates' >> mailcow.conf
|
|
||||||
echo 'WEBAUTHN_ONLY_TRUSTED_VENDORS=n' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SPAMHAUS_DQS_KEY)
|
|
||||||
echo "# Spamhaus Data Query Service Key" >> mailcow.conf
|
|
||||||
echo '# Optional: Leave empty for none' >> mailcow.conf
|
|
||||||
echo '# Enter your key here if you are using a blocked ASN (OVH, AWS, Cloudflare e.g) for the unregistered Spamhaus Blocklist.' >> mailcow.conf
|
|
||||||
echo '# If empty, it will completely disable Spamhaus blocklists if it detects that you are running on a server using a blocked AS.' >> mailcow.conf
|
|
||||||
echo '# Otherwise it will work as usual.' >> mailcow.conf
|
|
||||||
echo 'SPAMHAUS_DQS_KEY=' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
WATCHDOG_VERBOSE)
|
|
||||||
echo '# Enable watchdog verbose logging' >> mailcow.conf
|
|
||||||
echo 'WATCHDOG_VERBOSE=n' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SKIP_UNBOUND_HEALTHCHECK)
|
|
||||||
echo '# Skip Unbound (DNS Resolver) Healthchecks (NOT Recommended!) - y/n' >> mailcow.conf
|
|
||||||
echo 'SKIP_UNBOUND_HEALTHCHECK=n' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
DISABLE_NETFILTER_ISOLATION_RULE)
|
|
||||||
echo '# Prevent netfilter from setting an iptables/nftables rule to isolate the mailcow docker network - y/n' >> mailcow.conf
|
|
||||||
echo '# CAUTION: Disabling this may expose container ports to other neighbors on the same subnet, even if the ports are bound to localhost' >> mailcow.conf
|
|
||||||
echo 'DISABLE_NETFILTER_ISOLATION_RULE=n' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
HTTP_REDIRECT)
|
|
||||||
echo '# Redirect HTTP connections to HTTPS - y/n' >> mailcow.conf
|
|
||||||
echo 'HTTP_REDIRECT=n' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
ENABLE_IPV6)
|
|
||||||
echo '# IPv6 Controller Section' >> mailcow.conf
|
|
||||||
echo '# This variable controls the usage of IPv6 within mailcow.' >> mailcow.conf
|
|
||||||
echo '# Can either be true or false | Defaults to true' >> mailcow.conf
|
|
||||||
echo '# WARNING: MAKE SURE TO PROPERLY CONFIGURE IPv6 ON YOUR HOST FIRST BEFORE ENABLING THIS AS FAULTY CONFIGURATIONS CAN LEAD TO OPEN RELAYS!' >> mailcow.conf
|
|
||||||
echo '# A COMPLETE DOCKER STACK REBUILD (compose down && compose up -d) IS NEEDED TO APPLY THIS.' >> mailcow.conf
|
|
||||||
echo ENABLE_IPV6=${IPV6_BOOL} >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SKIP_CLAMD)
|
|
||||||
echo '# Skip ClamAV (clamd-mailcow) anti-virus (Rspamd will auto-detect a missing ClamAV container) - y/n' >> mailcow.conf
|
|
||||||
echo 'SKIP_CLAMD=n' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SKIP_OLEFY)
|
|
||||||
echo '# Skip Olefy (olefy-mailcow) anti-virus for Office documents (Rspamd will auto-detect a missing Olefy container) - y/n' >> mailcow.conf
|
|
||||||
echo 'SKIP_OLEFY=n' >> mailcow.conf
|
|
||||||
;;
|
|
||||||
REDISPASS)
|
|
||||||
echo "REDISPASS=$(LC_ALL=C </dev/urandom tr -dc A-Za-z0-9 2>/dev/null | head -c 28)" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
SOGO_URL_ENCRYPTION_KEY)
|
|
||||||
echo '# SOGo URL encryption key (exactly 16 characters, limited to A–Z, a–z, 0–9)' >> mailcow.conf
|
|
||||||
echo '# This key is used to encrypt email addresses within SOGo URLs' >> mailcow.conf
|
|
||||||
echo "SOGO_URL_ENCRYPTION_KEY=$(LC_ALL=C </dev/urandom tr -dc A-Za-z0-9 2>/dev/null | head -c 16)" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "${option}=" >> mailcow.conf
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
}
|
|
||||||
@@ -159,6 +159,18 @@ while true; do
|
|||||||
fi
|
fi
|
||||||
if [[ ! -f ${ACME_BASE}/acme/account.pem ]]; then
|
if [[ ! -f ${ACME_BASE}/acme/account.pem ]]; then
|
||||||
log_f "Generating missing Lets Encrypt account key..."
|
log_f "Generating missing Lets Encrypt account key..."
|
||||||
|
if [[ ! -z ${ACME_CONTACT} ]]; then
|
||||||
|
if ! verify_email "${ACME_CONTACT}"; then
|
||||||
|
log_f "Invalid email address, will not start registration!"
|
||||||
|
sleep 365d
|
||||||
|
exec $(readlink -f "$0")
|
||||||
|
else
|
||||||
|
ACME_CONTACT_PARAMETER="--contact mailto:${ACME_CONTACT}"
|
||||||
|
log_f "Valid email address, using ${ACME_CONTACT} for registration"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
ACME_CONTACT_PARAMETER=""
|
||||||
|
fi
|
||||||
openssl genrsa 4096 > ${ACME_BASE}/acme/account.pem
|
openssl genrsa 4096 > ${ACME_BASE}/acme/account.pem
|
||||||
else
|
else
|
||||||
log_f "Using existing Lets Encrypt account key ${ACME_BASE}/acme/account.pem"
|
log_f "Using existing Lets Encrypt account key ${ACME_BASE}/acme/account.pem"
|
||||||
@@ -206,7 +218,7 @@ while true; do
|
|||||||
|
|
||||||
if [[ ${AUTODISCOVER_SAN} == "y" ]]; then
|
if [[ ${AUTODISCOVER_SAN} == "y" ]]; then
|
||||||
# Fetch certs for autoconfig and autodiscover subdomains
|
# Fetch certs for autoconfig and autodiscover subdomains
|
||||||
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig' 'mta-sts')
|
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ ${SKIP_IP_CHECK} != "y" ]]; then
|
if [[ ${SKIP_IP_CHECK} != "y" ]]; then
|
||||||
@@ -287,7 +299,7 @@ while true; do
|
|||||||
VALIDATED_CERTIFICATES+=("${CERT_NAME}")
|
VALIDATED_CERTIFICATES+=("${CERT_NAME}")
|
||||||
|
|
||||||
# obtain server certificate if required
|
# obtain server certificate if required
|
||||||
DOMAINS=${SERVER_SAN_VALIDATED[@]} /srv/obtain-certificate.sh rsa
|
ACME_CONTACT_PARAMETER=${ACME_CONTACT_PARAMETER} DOMAINS=${SERVER_SAN_VALIDATED[@]} /srv/obtain-certificate.sh rsa
|
||||||
RETURN="$?"
|
RETURN="$?"
|
||||||
if [[ "$RETURN" == "0" ]]; then # 0 = cert created successfully
|
if [[ "$RETURN" == "0" ]]; then # 0 = cert created successfully
|
||||||
CERT_AMOUNT_CHANGED=1
|
CERT_AMOUNT_CHANGED=1
|
||||||
|
|||||||
@@ -93,8 +93,8 @@ until dig letsencrypt.org +time=3 +tries=1 @unbound > /dev/null; do
|
|||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
log_f "Resolver OK"
|
log_f "Resolver OK"
|
||||||
log_f "Using command acme-tiny ${DIRECTORY_URL} --account-key ${ACME_BASE}/acme/account.pem --disable-check --csr ${CSR} --acme-dir /var/www/acme/"
|
log_f "Using command acme-tiny ${DIRECTORY_URL} ${ACME_CONTACT_PARAMETER} --account-key ${ACME_BASE}/acme/account.pem --disable-check --csr ${CSR} --acme-dir /var/www/acme/"
|
||||||
ACME_RESPONSE=$(acme-tiny ${DIRECTORY_URL} \
|
ACME_RESPONSE=$(acme-tiny ${DIRECTORY_URL} ${ACME_CONTACT_PARAMETER} \
|
||||||
--account-key ${ACME_BASE}/acme/account.pem \
|
--account-key ${ACME_BASE}/acme/account.pem \
|
||||||
--disable-check \
|
--disable-check \
|
||||||
--csr ${CSR} \
|
--csr ${CSR} \
|
||||||
|
|||||||
66
data/Dockerfiles/bootstrap/main.py
Normal file
66
data/Dockerfiles/bootstrap/main.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import signal
|
||||||
|
import ipaddress
|
||||||
|
|
||||||
|
def handle_sigterm(signum, frame):
|
||||||
|
print("Received SIGTERM, exiting gracefully...")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
def get_mysql_config(service_name):
|
||||||
|
db_config = {
|
||||||
|
"user": os.getenv("DBUSER") or os.getenv("MYSQL_USER"),
|
||||||
|
"password": os.getenv("DBPASS") or os.getenv("MYSQL_PASSWORD"),
|
||||||
|
"database": os.getenv("DBNAME") or os.getenv("MYSQL_DATABASE"),
|
||||||
|
"connection_timeout": 2,
|
||||||
|
"service_table": "service_settings",
|
||||||
|
"service_types": [service_name]
|
||||||
|
}
|
||||||
|
|
||||||
|
db_host = os.getenv("DB_HOST")
|
||||||
|
if db_host.startswith("/"):
|
||||||
|
db_config["host"] = "localhost"
|
||||||
|
db_config["unix_socket"] = db_host
|
||||||
|
else:
|
||||||
|
db_config["host"] = db_host
|
||||||
|
|
||||||
|
return db_config
|
||||||
|
|
||||||
|
def get_redis_config():
|
||||||
|
redis_config = {
|
||||||
|
"read_host": os.getenv("REDIS_HOST"),
|
||||||
|
"read_port": 6379,
|
||||||
|
"write_host": os.getenv("REDIS_SLAVEOF_IP") or os.getenv("REDIS_HOST"),
|
||||||
|
"write_port": int(os.getenv("REDIS_SLAVEOF_PORT") or 6379),
|
||||||
|
"password": os.getenv("REDISPASS"),
|
||||||
|
"db": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return redis_config
|
||||||
|
|
||||||
|
def main():
|
||||||
|
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||||
|
|
||||||
|
container_name = os.getenv("CONTAINER_NAME")
|
||||||
|
service_name = container_name.replace("-mailcow", "").replace("-", "")
|
||||||
|
module_name = f"Bootstrap{service_name.capitalize()}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
mod = __import__(f"modules.{module_name}", fromlist=[module_name])
|
||||||
|
Bootstrap = getattr(mod, module_name)
|
||||||
|
except (ImportError, AttributeError) as e:
|
||||||
|
print(f"Failed to load bootstrap module for: {container_name} → {module_name}")
|
||||||
|
print(str(e))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
b = Bootstrap(
|
||||||
|
container=container_name,
|
||||||
|
service=service_name,
|
||||||
|
db_config=get_mysql_config(service_name),
|
||||||
|
redis_config=get_redis_config()
|
||||||
|
)
|
||||||
|
|
||||||
|
b.bootstrap()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
827
data/Dockerfiles/bootstrap/modules/BootstrapBase.py
Normal file
827
data/Dockerfiles/bootstrap/modules/BootstrapBase.py
Normal file
@@ -0,0 +1,827 @@
|
|||||||
|
import os
|
||||||
|
import pwd
|
||||||
|
import grp
|
||||||
|
import shutil
|
||||||
|
import secrets
|
||||||
|
import string
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import re
|
||||||
|
import redis
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import psutil
|
||||||
|
import signal
|
||||||
|
from urllib.parse import quote
|
||||||
|
from pathlib import Path
|
||||||
|
import dns.resolver
|
||||||
|
import mysql.connector
|
||||||
|
|
||||||
|
class BootstrapBase:
|
||||||
|
def __init__(self, container, service, db_config, redis_config):
|
||||||
|
self.container = container
|
||||||
|
self.service = service
|
||||||
|
self.db_config = db_config
|
||||||
|
self.redis_config = redis_config
|
||||||
|
|
||||||
|
self.env = None
|
||||||
|
self.env_vars = None
|
||||||
|
self.mysql_conn = None
|
||||||
|
self.redis_connr = None
|
||||||
|
self.redis_connw = None
|
||||||
|
|
||||||
|
def render_config(self, config_dir):
|
||||||
|
"""
|
||||||
|
Renders multiple Jinja2 templates from a config.json file in a given directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_dir (str or Path): Path to the directory containing config.json
|
||||||
|
|
||||||
|
Behavior:
|
||||||
|
- Renders each template defined in config.json
|
||||||
|
- Writes the result to the specified output path
|
||||||
|
- Also copies the rendered file to: <config_dir>/rendered_configs/<relative_output_path>
|
||||||
|
"""
|
||||||
|
|
||||||
|
config_dir = Path(config_dir)
|
||||||
|
config_path = config_dir / "config.json"
|
||||||
|
|
||||||
|
if not config_path.exists():
|
||||||
|
print(f"config.json not found in: {config_dir}")
|
||||||
|
return
|
||||||
|
|
||||||
|
with config_path.open("r") as f:
|
||||||
|
entries = json.load(f)
|
||||||
|
|
||||||
|
for entry in entries:
|
||||||
|
template_name = entry["template"]
|
||||||
|
output_path = Path(entry["output"])
|
||||||
|
clean_blank_lines = entry.get("clean_blank_lines", False)
|
||||||
|
if_not_exists = entry.get("if_not_exists", False)
|
||||||
|
|
||||||
|
if if_not_exists and output_path.exists():
|
||||||
|
print(f"Skipping {output_path} (already exists)")
|
||||||
|
continue
|
||||||
|
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
template = self.env.get_template(template_name)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Template not found: {template_name} ({e})")
|
||||||
|
continue
|
||||||
|
|
||||||
|
rendered = template.render(self.env_vars)
|
||||||
|
|
||||||
|
if clean_blank_lines:
|
||||||
|
rendered = "\n".join(line for line in rendered.splitlines() if line.strip())
|
||||||
|
|
||||||
|
rendered = rendered.replace('\r\n', '\n').replace('\r', '\n')
|
||||||
|
|
||||||
|
with output_path.open("w") as f:
|
||||||
|
f.write(rendered)
|
||||||
|
|
||||||
|
rendered_copy_path = config_dir / "rendered_configs" / output_path.name
|
||||||
|
rendered_copy_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
self.copy_file(output_path, rendered_copy_path)
|
||||||
|
|
||||||
|
print(f"Rendered {template_name} → {output_path}")
|
||||||
|
|
||||||
|
def prepare_template_vars(self, overwrite_path, extra_vars = None):
|
||||||
|
"""
|
||||||
|
Loads and merges environment variables for Jinja2 templates from multiple sources, and registers custom template filters.
|
||||||
|
|
||||||
|
This method combines variables from:
|
||||||
|
1. System environment variables
|
||||||
|
2. The MySQL `service_settings` table (filtered by service type if defined)
|
||||||
|
3. An optional `extra_vars` dictionary
|
||||||
|
4. A JSON overwrite file (if it exists at the given path)
|
||||||
|
|
||||||
|
Also registers custom Jinja2 filters.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
overwrite_path (str or Path): Path to a JSON file containing key-value overrides.
|
||||||
|
extra_vars (dict, optional): Additional variables to merge into the environment.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: A dictionary containing all resolved template variables.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Prints errors if database fetch or JSON parsing fails, but does not raise exceptions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 1. setup filters
|
||||||
|
self.env.filters['sha1'] = self.sha1_filter
|
||||||
|
self.env.filters['urlencode'] = self.urlencode_filter
|
||||||
|
self.env.filters['escape_quotes'] = self.escape_quotes_filter
|
||||||
|
|
||||||
|
# 2. Load env vars
|
||||||
|
env_vars = dict(os.environ)
|
||||||
|
|
||||||
|
# 3. Load from MySQL
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
|
||||||
|
if self.db_config['service_types']:
|
||||||
|
placeholders = ','.join(['%s'] * len(self.db_config['service_types']))
|
||||||
|
sql = f"SELECT `key`, `value` FROM {self.db_config['service_table']} WHERE `type` IN ({placeholders})"
|
||||||
|
cursor.execute(sql, self.db_config['service_types'])
|
||||||
|
else:
|
||||||
|
cursor.execute(f"SELECT `key`, `value` FROM {self.db_config['service_table']}")
|
||||||
|
|
||||||
|
for key, value in cursor.fetchall():
|
||||||
|
env_vars[key] = value
|
||||||
|
|
||||||
|
cursor.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch DB service settings: {e}")
|
||||||
|
|
||||||
|
# 4. Load extra vars
|
||||||
|
if extra_vars:
|
||||||
|
env_vars.update(extra_vars)
|
||||||
|
|
||||||
|
# 5. Load overwrites
|
||||||
|
overwrite_path = Path(overwrite_path)
|
||||||
|
if overwrite_path.exists():
|
||||||
|
try:
|
||||||
|
with overwrite_path.open("r") as f:
|
||||||
|
overwrite_data = json.load(f)
|
||||||
|
env_vars.update(overwrite_data)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to parse overwrites: {e}")
|
||||||
|
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
def set_timezone(self):
|
||||||
|
"""
|
||||||
|
Sets the system timezone based on the TZ environment variable.
|
||||||
|
|
||||||
|
If the TZ variable is set, writes its value to /etc/timezone.
|
||||||
|
"""
|
||||||
|
|
||||||
|
timezone = os.getenv("TZ")
|
||||||
|
if timezone:
|
||||||
|
with open("/etc/timezone", "w") as f:
|
||||||
|
f.write(timezone + "\n")
|
||||||
|
|
||||||
|
def set_syslog_redis(self):
|
||||||
|
"""
|
||||||
|
Reconfigures syslog-ng to use a Redis slave configuration.
|
||||||
|
|
||||||
|
If the REDIS_SLAVEOF_IP environment variable is set, replaces the syslog-ng config
|
||||||
|
with the Redis slave-specific config.
|
||||||
|
"""
|
||||||
|
|
||||||
|
redis_slave_ip = os.getenv("REDIS_SLAVEOF_IP")
|
||||||
|
if redis_slave_ip:
|
||||||
|
shutil.copy("/etc/syslog-ng/syslog-ng-redis_slave.conf", "/etc/syslog-ng/syslog-ng.conf")
|
||||||
|
|
||||||
|
def rsync_file(self, src, dst, recursive=False, owner=None, mode=None):
|
||||||
|
"""
|
||||||
|
Copies files or directories using rsync, with optional ownership and permissions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
src (str or Path): Source file or directory.
|
||||||
|
dst (str or Path): Destination directory.
|
||||||
|
recursive (bool): If True, copies contents recursively.
|
||||||
|
owner (tuple): Tuple of (user, group) to set ownership.
|
||||||
|
mode (int): File mode (e.g., 0o644) to set permissions after sync.
|
||||||
|
"""
|
||||||
|
|
||||||
|
src_path = Path(src)
|
||||||
|
dst_path = Path(dst)
|
||||||
|
dst_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
rsync_cmd = ["rsync", "-a"]
|
||||||
|
if recursive:
|
||||||
|
rsync_cmd.append(str(src_path) + "/")
|
||||||
|
else:
|
||||||
|
rsync_cmd.append(str(src_path))
|
||||||
|
rsync_cmd.append(str(dst_path))
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.run(rsync_cmd, check=True)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Rsync failed: {e}")
|
||||||
|
|
||||||
|
if owner:
|
||||||
|
self.set_owner(dst_path, *owner, recursive=True)
|
||||||
|
if mode:
|
||||||
|
self.set_permissions(dst_path, mode)
|
||||||
|
|
||||||
|
def set_permissions(self, path, mode):
|
||||||
|
"""
|
||||||
|
Sets file or directory permissions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str or Path): Path to the file or directory.
|
||||||
|
mode (int): File mode to apply, e.g., 0o644.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If the path does not exist.
|
||||||
|
"""
|
||||||
|
|
||||||
|
file_path = Path(path)
|
||||||
|
if not file_path.exists():
|
||||||
|
raise FileNotFoundError(f"Cannot chmod: {file_path} does not exist")
|
||||||
|
os.chmod(file_path, mode)
|
||||||
|
|
||||||
|
def set_owner(self, path, user, group=None, recursive=False):
|
||||||
|
"""
|
||||||
|
Changes ownership of a file or directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str or Path): Path to the file or directory.
|
||||||
|
user (str or int): Username or UID for new owner.
|
||||||
|
group (str or int, optional): Group name or GID; defaults to user's group if not provided.
|
||||||
|
recursive (bool): If True and path is a directory, ownership is applied recursively.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If the path does not exist.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Resolve UID
|
||||||
|
uid = int(user) if str(user).isdigit() else pwd.getpwnam(user).pw_uid
|
||||||
|
# Resolve GID
|
||||||
|
if group is not None:
|
||||||
|
gid = int(group) if str(group).isdigit() else grp.getgrnam(group).gr_gid
|
||||||
|
else:
|
||||||
|
gid = uid if isinstance(user, int) or str(user).isdigit() else grp.getgrnam(user).gr_gid
|
||||||
|
|
||||||
|
p = Path(path)
|
||||||
|
if not p.exists():
|
||||||
|
raise FileNotFoundError(f"{path} does not exist")
|
||||||
|
|
||||||
|
if recursive and p.is_dir():
|
||||||
|
for sub_path in p.rglob("*"):
|
||||||
|
os.chown(sub_path, uid, gid)
|
||||||
|
os.chown(p, uid, gid)
|
||||||
|
|
||||||
|
def fix_permissions(self, path, user=None, group=None, mode=None, recursive=False):
|
||||||
|
"""
|
||||||
|
Sets owner and/or permissions on a file or directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str or Path): Target path.
|
||||||
|
user (str|int, optional): Username or UID.
|
||||||
|
group (str|int, optional): Group name or GID.
|
||||||
|
mode (int, optional): File mode (e.g. 0o644).
|
||||||
|
recursive (bool): Apply recursively if path is a directory.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if user or group:
|
||||||
|
self.set_owner(path, user, group, recursive)
|
||||||
|
if mode:
|
||||||
|
self.set_permissions(path, mode)
|
||||||
|
|
||||||
|
def move_file(self, src, dst, overwrite=True):
|
||||||
|
"""
|
||||||
|
Moves a file from src to dst, optionally overwriting existing files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
src (str or Path): Source file path.
|
||||||
|
dst (str or Path): Destination path.
|
||||||
|
overwrite (bool): If False, raises error if dst exists.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If the source file does not exist.
|
||||||
|
FileExistsError: If the destination file exists and overwrite is False.
|
||||||
|
"""
|
||||||
|
|
||||||
|
src_path = Path(src)
|
||||||
|
dst_path = Path(dst)
|
||||||
|
|
||||||
|
if not src_path.exists():
|
||||||
|
raise FileNotFoundError(f"Source file does not exist: {src}")
|
||||||
|
|
||||||
|
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
if dst_path.exists() and not overwrite:
|
||||||
|
raise FileExistsError(f"Destination already exists: {dst} (set overwrite=True to overwrite)")
|
||||||
|
|
||||||
|
shutil.move(str(src_path), str(dst_path))
|
||||||
|
|
||||||
|
def copy_file(self, src, dst, overwrite=True):
|
||||||
|
"""
|
||||||
|
Copies a file from src to dst using shutil.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
src (str or Path): Source file path.
|
||||||
|
dst (str or Path): Destination file path.
|
||||||
|
overwrite (bool): Whether to overwrite the destination if it exists.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If the source file doesn't exist.
|
||||||
|
FileExistsError: If the destination exists and overwrite is False.
|
||||||
|
IOError: If the copy operation fails.
|
||||||
|
"""
|
||||||
|
|
||||||
|
src_path = Path(src)
|
||||||
|
dst_path = Path(dst)
|
||||||
|
|
||||||
|
if not src_path.is_file():
|
||||||
|
raise FileNotFoundError(f"Source file not found: {src_path}")
|
||||||
|
|
||||||
|
if dst_path.exists() and not overwrite:
|
||||||
|
raise FileExistsError(f"Destination exists: {dst_path}")
|
||||||
|
|
||||||
|
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
shutil.copy2(src_path, dst_path)
|
||||||
|
|
||||||
|
def remove(self, path, recursive=False, wipe_contents=False, exclude=None):
|
||||||
|
"""
|
||||||
|
Removes a file or directory with optional exclusion logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str or Path): The file or directory path to remove.
|
||||||
|
recursive (bool): If True, directories will be removed recursively.
|
||||||
|
wipe_contents (bool): If True and path is a directory, only its contents are removed, not the dir itself.
|
||||||
|
exclude (list[str], optional): List of filenames to exclude from deletion.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If the path does not exist.
|
||||||
|
ValueError: If a directory is passed without recursive or wipe_contents.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
path = Path(path)
|
||||||
|
exclude = set(exclude or [])
|
||||||
|
|
||||||
|
if not path.exists():
|
||||||
|
raise FileNotFoundError(f"Cannot remove: {path} does not exist")
|
||||||
|
|
||||||
|
if wipe_contents and path.is_dir():
|
||||||
|
for child in path.iterdir():
|
||||||
|
if child.name in exclude:
|
||||||
|
continue
|
||||||
|
if child.is_dir():
|
||||||
|
shutil.rmtree(child)
|
||||||
|
else:
|
||||||
|
child.unlink()
|
||||||
|
elif path.is_file():
|
||||||
|
if path.name not in exclude:
|
||||||
|
path.unlink()
|
||||||
|
elif path.is_dir():
|
||||||
|
if recursive:
|
||||||
|
shutil.rmtree(path)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"{path} is a directory. Use recursive=True or wipe_contents=True to remove it.")
|
||||||
|
|
||||||
|
def create_dir(self, path):
|
||||||
|
"""
|
||||||
|
Creates a directory if it does not exist.
|
||||||
|
|
||||||
|
If the directory is missing, it will be created along with any necessary parent directories.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str or Path): The directory path to create.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dir_path = Path(path)
|
||||||
|
if not dir_path.exists():
|
||||||
|
print(f"Creating directory: {dir_path}")
|
||||||
|
dir_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
def patch_exists(self, target_file, patch_file, reverse=False):
|
||||||
|
"""
|
||||||
|
Checks whether a patch can be applied (or reversed) to a target file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target_file (str): File to test the patch against.
|
||||||
|
patch_file (str): Patch file to apply.
|
||||||
|
reverse (bool): If True, checks whether the patch can be reversed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if patch is applicable, False otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
|
cmd = ["patch", "-sfN", "--dry-run", target_file, "<", patch_file]
|
||||||
|
if reverse:
|
||||||
|
cmd.insert(1, "-R")
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
" ".join(cmd),
|
||||||
|
shell=True,
|
||||||
|
stdout=subprocess.DEVNULL,
|
||||||
|
stderr=subprocess.DEVNULL
|
||||||
|
)
|
||||||
|
return result.returncode == 0
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Patch dry-run failed: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def apply_patch(self, target_file, patch_file, reverse=False):
|
||||||
|
"""
|
||||||
|
Applies a patch file to a target file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target_file (str): File to be patched.
|
||||||
|
patch_file (str): Patch file containing the diff.
|
||||||
|
reverse (bool): If True, applies the patch in reverse (rollback).
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Success or failure of the patching operation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
cmd = ["patch", target_file, "<", patch_file]
|
||||||
|
if reverse:
|
||||||
|
cmd.insert(0, "-R")
|
||||||
|
try:
|
||||||
|
subprocess.run(" ".join(cmd), shell=True, check=True)
|
||||||
|
print(f"Applied patch {'(reverse)' if reverse else ''} to {target_file}")
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"Patch failed: {e}")
|
||||||
|
|
||||||
|
def isYes(self, value):
|
||||||
|
"""
|
||||||
|
Determines whether a given string represents a "yes"-like value.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
value (str): Input string to evaluate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if value is "yes" or "y" (case-insensitive), otherwise False.
|
||||||
|
"""
|
||||||
|
return value.lower() in ["yes", "y"]
|
||||||
|
|
||||||
|
def is_port_open(self, host, port):
|
||||||
|
"""
|
||||||
|
Checks whether a TCP port is open on a given host.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
host (str): The hostname or IP address to check.
|
||||||
|
port (int): The TCP port number to test.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the port is open and accepting connections, False otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
||||||
|
sock.settimeout(1)
|
||||||
|
result = sock.connect_ex((host, port))
|
||||||
|
return result == 0
|
||||||
|
|
||||||
|
def resolve_docker_dns_record(self, hostname, record_type="A"):
|
||||||
|
"""
|
||||||
|
Resolves DNS A or AAAA records for a given hostname.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hostname (str): The domain to query.
|
||||||
|
record_type (str): "A" for IPv4, "AAAA" for IPv6. Default is "A".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list[str]: A list of resolved IP addresses.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception: If resolution fails or no results are found.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
resolver = dns.resolver.Resolver()
|
||||||
|
resolver.nameservers = ["127.0.0.11"]
|
||||||
|
answers = resolver.resolve(hostname, record_type)
|
||||||
|
return [answer.to_text() for answer in answers]
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Failed to resolve {record_type} record for {hostname}: {e}")
|
||||||
|
|
||||||
|
def kill_proc(self, process_name):
|
||||||
|
"""
|
||||||
|
Sends SIGTERM to all running processes matching the given name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
process_name (str): Name of the process to terminate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Number of processes successfully signaled.
|
||||||
|
"""
|
||||||
|
|
||||||
|
killed = 0
|
||||||
|
for proc in psutil.process_iter(['name']):
|
||||||
|
try:
|
||||||
|
if proc.info['name'] == process_name:
|
||||||
|
proc.send_signal(signal.SIGTERM)
|
||||||
|
killed += 1
|
||||||
|
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||||
|
continue
|
||||||
|
return killed
|
||||||
|
|
||||||
|
def connect_mysql(self, socket=None):
|
||||||
|
"""
|
||||||
|
Establishes a connection to the MySQL database using the provided configuration.
|
||||||
|
|
||||||
|
Continuously retries the connection until the database is reachable. Stores
|
||||||
|
the connection in `self.mysql_conn` once successful.
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Connection status and retry errors to stdout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
socket (str, optional): Custom UNIX socket path to override the default.
|
||||||
|
"""
|
||||||
|
|
||||||
|
print("Connecting to MySQL...")
|
||||||
|
config = {
|
||||||
|
"host": self.db_config['host'],
|
||||||
|
"user": self.db_config['user'],
|
||||||
|
"password": self.db_config['password'],
|
||||||
|
"database": self.db_config['database'],
|
||||||
|
'connection_timeout': self.db_config['connection_timeout']
|
||||||
|
}
|
||||||
|
if self.db_config['unix_socket']:
|
||||||
|
config["unix_socket"] = socket or self.db_config['unix_socket']
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
self.mysql_conn = mysql.connector.connect(**config)
|
||||||
|
if self.mysql_conn.is_connected():
|
||||||
|
print("MySQL is up and ready!")
|
||||||
|
break
|
||||||
|
except mysql.connector.Error as e:
|
||||||
|
print(f"Waiting for MySQL... ({e})")
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
def close_mysql(self):
|
||||||
|
"""
|
||||||
|
Closes the MySQL connection if it's currently open and connected.
|
||||||
|
|
||||||
|
Safe to call even if the connection has already been closed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.mysql_conn and self.mysql_conn.is_connected():
|
||||||
|
self.mysql_conn.close()
|
||||||
|
|
||||||
|
def connect_redis(self, max_retries=10, delay=2):
|
||||||
|
"""
|
||||||
|
Connects to both read and write Redis servers and stores the connections.
|
||||||
|
|
||||||
|
Read server: tries indefinitely until successful.
|
||||||
|
Write server: tries up to `max_retries` before giving up.
|
||||||
|
|
||||||
|
Sets:
|
||||||
|
self.redis_connr: Redis client for read
|
||||||
|
self.redis_connw: Redis client for write
|
||||||
|
"""
|
||||||
|
|
||||||
|
use_rw = self.redis_config['read_host'] == self.redis_config['write_host'] and self.redis_config['read_port'] == self.redis_config['write_port']
|
||||||
|
|
||||||
|
if use_rw:
|
||||||
|
print("Connecting to Redis read server...")
|
||||||
|
else:
|
||||||
|
print("Connecting to Redis server...")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
clientr = redis.Redis(
|
||||||
|
host=self.redis_config['read_host'],
|
||||||
|
port=self.redis_config['read_port'],
|
||||||
|
password=self.redis_config['password'],
|
||||||
|
db=self.redis_config['db'],
|
||||||
|
decode_responses=True
|
||||||
|
)
|
||||||
|
if clientr.ping():
|
||||||
|
self.redis_connr = clientr
|
||||||
|
print("Redis read server is up and ready!")
|
||||||
|
if use_rw:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self.redis_connw = clientr
|
||||||
|
return
|
||||||
|
except redis.RedisError as e:
|
||||||
|
print(f"Waiting for Redis read... ({e})")
|
||||||
|
time.sleep(delay)
|
||||||
|
|
||||||
|
|
||||||
|
print("Connecting to Redis write server...")
|
||||||
|
for attempt in range(max_retries):
|
||||||
|
try:
|
||||||
|
clientw = redis.Redis(
|
||||||
|
host=self.redis_config['write_host'],
|
||||||
|
port=self.redis_config['write_port'],
|
||||||
|
password=self.redis_config['password'],
|
||||||
|
db=self.redis_config['db'],
|
||||||
|
decode_responses=True
|
||||||
|
)
|
||||||
|
if clientw.ping():
|
||||||
|
self.redis_connw = clientw
|
||||||
|
print("Redis write server is up and ready!")
|
||||||
|
return
|
||||||
|
except redis.RedisError as e:
|
||||||
|
print(f"Waiting for Redis write... (attempt {attempt + 1}/{max_retries}) ({e})")
|
||||||
|
time.sleep(delay)
|
||||||
|
print("Redis write server is unreachable.")
|
||||||
|
|
||||||
|
def close_redis(self):
|
||||||
|
"""
|
||||||
|
Closes the Redis read/write connections if open.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.redis_connr:
|
||||||
|
try:
|
||||||
|
self.redis_connr.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error while closing Redis read connection: {e}")
|
||||||
|
finally:
|
||||||
|
self.redis_connr = None
|
||||||
|
|
||||||
|
if self.redis_connw:
|
||||||
|
try:
|
||||||
|
self.redis_connw.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error while closing Redis write connection: {e}")
|
||||||
|
finally:
|
||||||
|
self.redis_connw = None
|
||||||
|
|
||||||
|
def wait_for_schema_update(self, init_file_path="init_db.inc.php", check_interval=5):
|
||||||
|
"""
|
||||||
|
Waits until the current database schema version matches the expected version
|
||||||
|
defined in a PHP initialization file.
|
||||||
|
|
||||||
|
Compares the `version` value in the `versions` table for `application = 'db_schema'`
|
||||||
|
with the `$db_version` value extracted from the specified PHP file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
init_file_path (str): Path to the PHP file containing the expected version string.
|
||||||
|
check_interval (int): Time in seconds to wait between version checks.
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Current vs. expected schema versions until they match.
|
||||||
|
"""
|
||||||
|
|
||||||
|
print("Checking database schema version...")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
current_version = self._get_current_db_version()
|
||||||
|
expected_version = self._get_expected_schema_version(init_file_path)
|
||||||
|
|
||||||
|
if current_version == expected_version:
|
||||||
|
print(f"DB schema is up to date: {current_version}")
|
||||||
|
break
|
||||||
|
|
||||||
|
print(f"Waiting for schema update... (DB: {current_version}, Expected: {expected_version})")
|
||||||
|
time.sleep(check_interval)
|
||||||
|
|
||||||
|
def wait_for_host(self, host, retry_interval=1.0, count=1):
|
||||||
|
"""
|
||||||
|
Waits for a host to respond to ICMP ping.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
host (str): Hostname or IP to ping.
|
||||||
|
retry_interval (float): Seconds to wait between pings.
|
||||||
|
count (int): Number of ping packets to send per check (default 1).
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
["ping", "-c", str(count), host],
|
||||||
|
stdout=subprocess.DEVNULL,
|
||||||
|
stderr=subprocess.DEVNULL
|
||||||
|
)
|
||||||
|
if result.returncode == 0:
|
||||||
|
print(f"{host} is reachable via ping.")
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
print(f"Waiting for {host}...")
|
||||||
|
time.sleep(retry_interval)
|
||||||
|
|
||||||
|
def wait_for_dns(self, domain, retry_interval=1, timeout=30):
|
||||||
|
"""
|
||||||
|
Waits until the domain resolves via DNS using pure Python (socket).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain (str): The domain to resolve.
|
||||||
|
retry_interval (int): Time (seconds) to wait between attempts.
|
||||||
|
timeout (int): Maximum total wait time (seconds).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if resolved, False if timed out.
|
||||||
|
"""
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
socket.gethostbyname(domain)
|
||||||
|
print(f"{domain} is resolving via DNS.")
|
||||||
|
return True
|
||||||
|
except socket.gaierror:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if time.time() - start > timeout:
|
||||||
|
print(f"DNS resolution for {domain} timed out.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
print(f"Waiting for DNS for {domain}...")
|
||||||
|
time.sleep(retry_interval)
|
||||||
|
|
||||||
|
def _get_current_db_version(self):
|
||||||
|
"""
|
||||||
|
Fetches the current schema version from the database.
|
||||||
|
|
||||||
|
Executes a SELECT query on the `versions` table where `application = 'db_schema'`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str or None: The current schema version as a string, or None if not found or on error.
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Error message if the query fails.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
cursor.execute("SELECT version FROM versions WHERE application = 'db_schema'")
|
||||||
|
result = cursor.fetchone()
|
||||||
|
cursor.close()
|
||||||
|
return result[0] if result else None
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching current DB schema version: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_expected_schema_version(self, filepath):
|
||||||
|
"""
|
||||||
|
Extracts the expected database schema version from a PHP initialization file.
|
||||||
|
|
||||||
|
Looks for a line in the form of: `$db_version = "..."` and extracts the version string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filepath (str): Path to the PHP file containing the `$db_version` definition.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str or None: The extracted version string, or None if not found or on error.
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Error message if the file cannot be read or parsed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(filepath, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
match = re.search(r'\$db_version\s*=\s*"([^"]+)"', content)
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading expected schema version from {filepath}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def rand_pass(self, length=22):
|
||||||
|
"""
|
||||||
|
Generates a secure random password using allowed characters.
|
||||||
|
|
||||||
|
Allowed characters include upper/lowercase letters, digits, underscores, and hyphens.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
length (int): Length of the password to generate. Default is 22.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: A securely generated random password string.
|
||||||
|
"""
|
||||||
|
|
||||||
|
allowed_chars = string.ascii_letters + string.digits + "_-"
|
||||||
|
return ''.join(secrets.choice(allowed_chars) for _ in range(length))
|
||||||
|
|
||||||
|
def run_command(self, command, check=True, shell=False, input_stream=None, log_output=True):
|
||||||
|
"""
|
||||||
|
Executes a shell command and optionally logs output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command (str or list): Command to run.
|
||||||
|
check (bool): Raise if non-zero exit.
|
||||||
|
shell (bool): Run in shell.
|
||||||
|
input_stream: stdin stream.
|
||||||
|
log_output (bool): If True, print output.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
subprocess.CompletedProcess
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
command,
|
||||||
|
shell=shell,
|
||||||
|
check=check,
|
||||||
|
stdin=input_stream,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
if log_output:
|
||||||
|
if result.stdout:
|
||||||
|
print(result.stdout.strip())
|
||||||
|
if result.stderr:
|
||||||
|
print(result.stderr.strip())
|
||||||
|
return result
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"Command failed with exit code {e.returncode}: {e.cmd}")
|
||||||
|
print(e.stderr.strip())
|
||||||
|
if check:
|
||||||
|
raise
|
||||||
|
return e
|
||||||
|
|
||||||
|
def sha1_filter(self, value):
|
||||||
|
return hashlib.sha1(value.encode()).hexdigest()
|
||||||
|
|
||||||
|
def urlencode_filter(self, value):
|
||||||
|
return quote(value, safe='')
|
||||||
|
|
||||||
|
def escape_quotes_filter(self, value):
|
||||||
|
return value.replace('"', r'\"')
|
||||||
60
data/Dockerfiles/bootstrap/modules/BootstrapClamd.py
Normal file
60
data/Dockerfiles/bootstrap/modules/BootstrapClamd.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
class BootstrapClamd(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
# Skip Clamd if set
|
||||||
|
if self.isYes(os.getenv("SKIP_CLAMD", "")):
|
||||||
|
print("SKIP_CLAMD is set, skipping ClamAV startup...")
|
||||||
|
time.sleep(365 * 24 * 60 * 60)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Connect to MySQL
|
||||||
|
self.connect_mysql()
|
||||||
|
|
||||||
|
print("Cleaning up tmp files...")
|
||||||
|
tmp_files = Path("/var/lib/clamav").glob("clamav-*.tmp")
|
||||||
|
for tmp_file in tmp_files:
|
||||||
|
try:
|
||||||
|
self.remove(tmp_file)
|
||||||
|
print(f"Removed: {tmp_file}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to remove {tmp_file}: {e}")
|
||||||
|
|
||||||
|
self.create_dir("/run/clamav")
|
||||||
|
self.create_dir("/var/lib/clamav")
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
# Fix permissions
|
||||||
|
self.set_owner("/var/lib/clamav", "clamav", "clamav", recursive=True)
|
||||||
|
self.set_owner("/run/clamav", "clamav", "clamav", recursive=True)
|
||||||
|
self.set_permissions("/var/lib/clamav", 0o755)
|
||||||
|
for item in Path("/var/lib/clamav").glob("*"):
|
||||||
|
self.set_permissions(item, 0o644)
|
||||||
|
self.set_permissions("/run/clamav", 0o750)
|
||||||
|
|
||||||
|
# Copying to /etc/clamav to expose file as-is to administrator
|
||||||
|
self.copy_file("/var/lib/clamav/whitelist.ign2", "/etc/clamav/whitelist.ign2")
|
||||||
289
data/Dockerfiles/bootstrap/modules/BootstrapDovecot.py
Normal file
289
data/Dockerfiles/bootstrap/modules/BootstrapDovecot.py
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
import pwd
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
class BootstrapDovecot(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
# Connect to MySQL
|
||||||
|
self.connect_mysql()
|
||||||
|
self.wait_for_schema_update()
|
||||||
|
|
||||||
|
# Connect to Redis
|
||||||
|
self.connect_redis()
|
||||||
|
if self.redis_connw:
|
||||||
|
self.redis_connw.set("DOVECOT_REPL_HEALTH", 1)
|
||||||
|
|
||||||
|
# Wait for DNS
|
||||||
|
self.wait_for_dns("mailcow.email")
|
||||||
|
|
||||||
|
# Create missing directories
|
||||||
|
self.create_dir("/etc/dovecot/sql/")
|
||||||
|
self.create_dir("/etc/dovecot/auth/")
|
||||||
|
self.create_dir("/var/vmail/_garbage")
|
||||||
|
self.create_dir("/var/vmail/sieve")
|
||||||
|
self.create_dir("/etc/sogo")
|
||||||
|
self.create_dir("/var/volatile")
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
"VALID_CERT_DIRS": self.get_valid_cert_dirs(),
|
||||||
|
"RAND_USER": self.rand_pass(),
|
||||||
|
"RAND_PASS": self.rand_pass(),
|
||||||
|
"RAND_PASS2": self.rand_pass(),
|
||||||
|
"ENV_VARS": dict(os.environ)
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
files = [
|
||||||
|
"/etc/dovecot/mail_plugins",
|
||||||
|
"/etc/dovecot/mail_plugins_imap",
|
||||||
|
"/etc/dovecot/mail_plugins_lmtp",
|
||||||
|
"/templates/quarantine.tpl"
|
||||||
|
]
|
||||||
|
for file in files:
|
||||||
|
self.set_permissions(file, 0o644)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Migrate old sieve_after file
|
||||||
|
self.move_file("/etc/dovecot/sieve_after", "/var/vmail/sieve/global_sieve_after.sieve")
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
# Cleanup random user maildirs
|
||||||
|
self.remove("/var/vmail/mailcow.local", wipe_contents=True)
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
# Cleanup PIDs
|
||||||
|
self.remove("/tmp/quarantine_notify.pid")
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
self.remove("/var/run/dovecot/master.pid")
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Check permissions of vmail/index/garbage directories.
|
||||||
|
# Do not do this every start-up, it may take a very long time. So we use a stat check here.
|
||||||
|
files = [
|
||||||
|
"/var/vmail",
|
||||||
|
"/var/vmail/_garbage",
|
||||||
|
"/var/vmail_index"
|
||||||
|
]
|
||||||
|
for file in files:
|
||||||
|
path = Path(file)
|
||||||
|
try:
|
||||||
|
stat_info = path.stat()
|
||||||
|
current_user = pwd.getpwuid(stat_info.st_uid).pw_name
|
||||||
|
|
||||||
|
if current_user != "vmail":
|
||||||
|
print(f"Ownership of {path} is {current_user}, fixing to vmail:vmail...")
|
||||||
|
self.set_owner(path, user="vmail", group="vmail", recursive=True)
|
||||||
|
else:
|
||||||
|
print(f"Ownership of {path} is already correct (vmail)")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error checking ownership of {path}: {e}")
|
||||||
|
|
||||||
|
# Compile sieve scripts
|
||||||
|
files = [
|
||||||
|
"/var/vmail/sieve/global_sieve_before.sieve",
|
||||||
|
"/var/vmail/sieve/global_sieve_after.sieve",
|
||||||
|
"/usr/lib/dovecot/sieve/report-spam.sieve",
|
||||||
|
"/usr/lib/dovecot/sieve/report-ham.sieve",
|
||||||
|
]
|
||||||
|
for file in files:
|
||||||
|
self.run_command(["sievec", file], check=False)
|
||||||
|
|
||||||
|
# Fix permissions
|
||||||
|
for path in Path("/etc/dovecot/sql").glob("*.conf"):
|
||||||
|
self.set_owner(path, "root", "root")
|
||||||
|
self.set_permissions(path, 0o640)
|
||||||
|
|
||||||
|
files = [
|
||||||
|
"/etc/dovecot/auth/passwd-verify.lua",
|
||||||
|
*Path("/etc/dovecot/sql").glob("dovecot-dict-sql-sieve*"),
|
||||||
|
*Path("/etc/dovecot/sql").glob("dovecot-dict-sql-quota*")
|
||||||
|
]
|
||||||
|
for file in files:
|
||||||
|
self.set_owner(file, "root", "dovecot")
|
||||||
|
|
||||||
|
self.set_permissions("/etc/dovecot/auth/passwd-verify.lua", 0o640)
|
||||||
|
|
||||||
|
for file in ["/var/vmail/sieve", "/var/volatile", "/var/vmail_index"]:
|
||||||
|
self.set_owner(file, "vmail", "vmail", recursive=True)
|
||||||
|
|
||||||
|
self.run_command(["adduser", "vmail", "tty"])
|
||||||
|
self.run_command(["chmod", "g+rw", "/dev/console"])
|
||||||
|
self.set_owner("/dev/console", "root", "tty")
|
||||||
|
files = [
|
||||||
|
"/usr/lib/dovecot/sieve/rspamd-pipe-ham",
|
||||||
|
"/usr/lib/dovecot/sieve/rspamd-pipe-spam",
|
||||||
|
"/usr/local/bin/imapsync_runner.pl",
|
||||||
|
"/usr/local/bin/imapsync",
|
||||||
|
"/usr/local/bin/trim_logs.sh",
|
||||||
|
"/usr/local/bin/sa-rules.sh",
|
||||||
|
"/usr/local/bin/clean_q_aged.sh",
|
||||||
|
"/usr/local/bin/maildir_gc.sh",
|
||||||
|
"/usr/local/sbin/stop-supervisor.sh",
|
||||||
|
"/usr/local/bin/quota_notify.py",
|
||||||
|
"/usr/local/bin/repl_health.sh",
|
||||||
|
"/usr/local/bin/optimize-fts.sh"
|
||||||
|
]
|
||||||
|
for file in files:
|
||||||
|
self.set_permissions(file, 0o755)
|
||||||
|
|
||||||
|
# Collect SA rules once now
|
||||||
|
self.run_command(["/usr/local/bin/sa-rules.sh"], check=False)
|
||||||
|
|
||||||
|
self.generate_mail_crypt_keys()
|
||||||
|
self.cleanup_imapsync_jobs()
|
||||||
|
self.generate_guid_version()
|
||||||
|
|
||||||
|
def get_valid_cert_dirs(self):
|
||||||
|
"""
|
||||||
|
Returns a mapping of domains to their certificate directory path.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
{
|
||||||
|
"example.com": "/etc/ssl/mail/example.com/",
|
||||||
|
"www.example.com": "/etc/ssl/mail/example.com/"
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
sni_map = {}
|
||||||
|
base_path = Path("/etc/ssl/mail")
|
||||||
|
if not base_path.exists():
|
||||||
|
return sni_map
|
||||||
|
|
||||||
|
for cert_dir in base_path.iterdir():
|
||||||
|
if not cert_dir.is_dir():
|
||||||
|
continue
|
||||||
|
|
||||||
|
domains_file = cert_dir / "domains"
|
||||||
|
cert_file = cert_dir / "cert.pem"
|
||||||
|
key_file = cert_dir / "key.pem"
|
||||||
|
|
||||||
|
if not (domains_file.exists() and cert_file.exists() and key_file.exists()):
|
||||||
|
continue
|
||||||
|
|
||||||
|
with open(domains_file, "r") as f:
|
||||||
|
domains = [line.strip() for line in f if line.strip()]
|
||||||
|
for domain in domains:
|
||||||
|
sni_map[domain] = str(cert_dir)
|
||||||
|
|
||||||
|
return sni_map
|
||||||
|
|
||||||
|
def generate_mail_crypt_keys(self):
|
||||||
|
"""
|
||||||
|
Ensures mail_crypt EC keypair exists. Generates if missing. Adjusts permissions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
key_dir = Path("/mail_crypt")
|
||||||
|
priv_key = key_dir / "ecprivkey.pem"
|
||||||
|
pub_key = key_dir / "ecpubkey.pem"
|
||||||
|
|
||||||
|
# Generate keys if they don't exist or are empty
|
||||||
|
if not priv_key.exists() or priv_key.stat().st_size == 0 or \
|
||||||
|
not pub_key.exists() or pub_key.stat().st_size == 0:
|
||||||
|
self.run_command(
|
||||||
|
"openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem",
|
||||||
|
shell=True
|
||||||
|
)
|
||||||
|
self.run_command(
|
||||||
|
"openssl pkey -in /mail_crypt/ecprivkey.pem -pubout -out /mail_crypt/ecpubkey.pem",
|
||||||
|
shell=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set ownership to UID 401 (dovecot)
|
||||||
|
self.set_owner(priv_key, user='401')
|
||||||
|
self.set_owner(pub_key, user='401')
|
||||||
|
|
||||||
|
def cleanup_imapsync_jobs(self):
|
||||||
|
"""
|
||||||
|
Cleans up stale imapsync locks and resets running status in the database.
|
||||||
|
|
||||||
|
Deletes the imapsync_busy.lock file if present and sets `is_running` to 0
|
||||||
|
in the `imapsync` table, if it exists.
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Any issues with file operations or SQL execution.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lock_file = Path("/tmp/imapsync_busy.lock")
|
||||||
|
if lock_file.exists():
|
||||||
|
try:
|
||||||
|
lock_file.unlink()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to remove lock file: {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
cursor.execute("SHOW TABLES LIKE 'imapsync'")
|
||||||
|
result = cursor.fetchone()
|
||||||
|
if result:
|
||||||
|
cursor.execute("UPDATE imapsync SET is_running='0'")
|
||||||
|
self.mysql_conn.commit()
|
||||||
|
cursor.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error updating imapsync table: {e}")
|
||||||
|
|
||||||
|
def generate_guid_version(self):
|
||||||
|
"""
|
||||||
|
Waits for the `versions` table to be created, then generates a GUID
|
||||||
|
based on the mail hostname and Dovecot's public key and inserts it
|
||||||
|
into the `versions` table.
|
||||||
|
|
||||||
|
If the key or hash is missing or malformed, marks it as INVALID.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = self.run_command(["doveconf", "-P"], check=True, log_output=False)
|
||||||
|
pubkey_path = None
|
||||||
|
for line in result.stdout.splitlines():
|
||||||
|
if "mail_crypt_global_public_key" in line:
|
||||||
|
parts = line.split('<')
|
||||||
|
if len(parts) > 1:
|
||||||
|
pubkey_path = parts[1].strip()
|
||||||
|
break
|
||||||
|
|
||||||
|
if pubkey_path and Path(pubkey_path).exists():
|
||||||
|
with open(pubkey_path, "rb") as key_file:
|
||||||
|
pubkey_data = key_file.read()
|
||||||
|
|
||||||
|
hostname = self.env_vars.get("MAILCOW_HOSTNAME", "mailcow.local").encode("utf-8")
|
||||||
|
concat = hostname + pubkey_data
|
||||||
|
guid = hashlib.sha256(concat).hexdigest()
|
||||||
|
|
||||||
|
if len(guid) == 64:
|
||||||
|
version_value = guid
|
||||||
|
else:
|
||||||
|
version_value = "INVALID"
|
||||||
|
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
"REPLACE INTO versions (application, version) VALUES (%s, %s)",
|
||||||
|
("GUID", version_value)
|
||||||
|
)
|
||||||
|
self.mysql_conn.commit()
|
||||||
|
cursor.close()
|
||||||
|
else:
|
||||||
|
print("Public key not found or unreadable. GUID not generated.")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to generate or store GUID: {e}")
|
||||||
163
data/Dockerfiles/bootstrap/modules/BootstrapMysql.py
Normal file
163
data/Dockerfiles/bootstrap/modules/BootstrapMysql.py
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
class BootstrapMysql(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
dbuser = "root"
|
||||||
|
dbpass = os.getenv("MYSQL_ROOT_PASSWORD", "")
|
||||||
|
socket = "/tmp/mysql-temp.sock"
|
||||||
|
|
||||||
|
# Check if mysql has been initialized
|
||||||
|
if os.path.exists("/var/lib/mysql/mysql/db.frm"):
|
||||||
|
print("Starting temporary mysqld for upgrade...")
|
||||||
|
self.start_temporary(socket)
|
||||||
|
|
||||||
|
self.connect_mysql(socket)
|
||||||
|
|
||||||
|
print("Running mysql_upgrade...")
|
||||||
|
self.upgrade_mysql(dbuser, dbpass, socket)
|
||||||
|
print("Checking timezone support with CONVERT_TZ...")
|
||||||
|
self.check_and_import_timezone_support(dbuser, dbpass, socket)
|
||||||
|
|
||||||
|
print("Shutting down temporary mysqld...")
|
||||||
|
self.close_mysql()
|
||||||
|
self.stop_temporary(dbuser, dbpass, socket)
|
||||||
|
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
def start_temporary(self, socket):
|
||||||
|
"""
|
||||||
|
Starts a temporary mysqld process in the background using the given UNIX socket.
|
||||||
|
|
||||||
|
The server is started with networking disabled (--skip-networking).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
socket (str): Path to the UNIX socket file for MySQL to listen on.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
subprocess.Popen: The running mysqld process object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return subprocess.Popen([
|
||||||
|
"mysqld",
|
||||||
|
"--user=mysql",
|
||||||
|
"--skip-networking",
|
||||||
|
f"--socket={socket}"
|
||||||
|
])
|
||||||
|
|
||||||
|
def stop_temporary(self, dbuser, dbpass, socket):
|
||||||
|
"""
|
||||||
|
Shuts down the temporary mysqld instance gracefully.
|
||||||
|
|
||||||
|
Uses mariadb-admin to issue a shutdown command to the running server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dbuser (str): The MySQL username with shutdown privileges (typically 'root').
|
||||||
|
dbpass (str): The password for the MySQL user.
|
||||||
|
socket (str): Path to the UNIX socket the server is listening on.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.run_command([
|
||||||
|
"mariadb-admin",
|
||||||
|
"shutdown",
|
||||||
|
f"--socket={socket}",
|
||||||
|
"-u", dbuser,
|
||||||
|
f"-p{dbpass}"
|
||||||
|
])
|
||||||
|
|
||||||
|
def upgrade_mysql(self, dbuser, dbpass, socket, max_retries=5, wait_interval=3):
|
||||||
|
"""
|
||||||
|
Executes mysql_upgrade to check and fix any schema or table incompatibilities.
|
||||||
|
|
||||||
|
Retries the upgrade command if it fails, up to a maximum number of attempts.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dbuser (str): MySQL username with privilege to perform the upgrade.
|
||||||
|
dbpass (str): Password for the MySQL user.
|
||||||
|
socket (str): Path to the MySQL UNIX socket for local communication.
|
||||||
|
max_retries (int): Maximum number of attempts before giving up. Default is 5.
|
||||||
|
wait_interval (int): Number of seconds to wait between retries. Default is 3.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if upgrade succeeded, False if all attempts failed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
retries = 0
|
||||||
|
while retries < max_retries:
|
||||||
|
result = self.run_command([
|
||||||
|
"mysql_upgrade",
|
||||||
|
"-u", dbuser,
|
||||||
|
f"-p{dbpass}",
|
||||||
|
f"--socket={socket}"
|
||||||
|
], check=False)
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
print("mysql_upgrade completed successfully.")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
print(f"mysql_upgrade failed (try {retries+1}/{max_retries})")
|
||||||
|
retries += 1
|
||||||
|
time.sleep(wait_interval)
|
||||||
|
else:
|
||||||
|
print("mysql_upgrade failed after all retries.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def check_and_import_timezone_support(self, dbuser, dbpass, socket):
|
||||||
|
"""
|
||||||
|
Checks if MySQL supports timezone conversion (CONVERT_TZ).
|
||||||
|
If not, it imports timezone info using mysql_tzinfo_to_sql piped into mariadb.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
cursor.execute("SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC')")
|
||||||
|
result = cursor.fetchone()
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
if not result or result[0] is None:
|
||||||
|
print("Timezone conversion failed or returned NULL. Importing timezone info...")
|
||||||
|
|
||||||
|
# Use mysql_tzinfo_to_sql piped into mariadb
|
||||||
|
tz_dump = subprocess.Popen(
|
||||||
|
["mysql_tzinfo_to_sql", "/usr/share/zoneinfo"],
|
||||||
|
stdout=subprocess.PIPE
|
||||||
|
)
|
||||||
|
|
||||||
|
self.run_command([
|
||||||
|
"mariadb",
|
||||||
|
"--socket", socket,
|
||||||
|
"-u", dbuser,
|
||||||
|
f"-p{dbpass}",
|
||||||
|
"mysql"
|
||||||
|
], input_stream=tz_dump.stdout)
|
||||||
|
|
||||||
|
tz_dump.stdout.close()
|
||||||
|
tz_dump.wait()
|
||||||
|
|
||||||
|
print("Timezone info successfully imported.")
|
||||||
|
else:
|
||||||
|
print(f"Timezone support is working. Sample result: {result[0]}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to verify or import timezone info: {e}")
|
||||||
65
data/Dockerfiles/bootstrap/modules/BootstrapNginx.py
Normal file
65
data/Dockerfiles/bootstrap/modules/BootstrapNginx.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
import os
|
||||||
|
|
||||||
|
class BootstrapNginx(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
# Connect to MySQL
|
||||||
|
self.connect_mysql()
|
||||||
|
|
||||||
|
# wait for Hosts
|
||||||
|
php_service = os.getenv("PHPFPM_HOST") or "php-fpm-mailcow"
|
||||||
|
rspamd_service = os.getenv("RSPAMD_HOST") or "rspamd-mailcow"
|
||||||
|
sogo_service = os.getenv("SOGO_HOST")
|
||||||
|
self.wait_for_host(php_service)
|
||||||
|
if not self.isYes(os.getenv("SKIP_RSPAMD", False)):
|
||||||
|
self.wait_for_host(rspamd_service)
|
||||||
|
if not self.isYes(os.getenv("SKIP_SOGO", False)):
|
||||||
|
self.wait_for_host(sogo_service)
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
"VALID_CERT_DIRS": self.get_valid_cert_dirs(),
|
||||||
|
'TRUSTED_PROXIES': [item.strip() for item in os.getenv("TRUSTED_PROXIES", "").split(",") if item.strip()],
|
||||||
|
'ADDITIONAL_SERVER_NAMES': [item.strip() for item in os.getenv("ADDITIONAL_SERVER_NAMES", "").split(",") if item.strip()],
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
def get_valid_cert_dirs(self):
|
||||||
|
ssl_dir = '/etc/ssl/mail/'
|
||||||
|
valid_cert_dirs = []
|
||||||
|
for d in os.listdir(ssl_dir):
|
||||||
|
full_path = os.path.join(ssl_dir, d)
|
||||||
|
if not os.path.isdir(full_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
cert_path = os.path.join(full_path, 'cert.pem')
|
||||||
|
key_path = os.path.join(full_path, 'key.pem')
|
||||||
|
domains_path = os.path.join(full_path, 'domains')
|
||||||
|
|
||||||
|
if os.path.isfile(cert_path) and os.path.isfile(key_path) and os.path.isfile(domains_path):
|
||||||
|
with open(domains_path, 'r') as file:
|
||||||
|
domains = file.read().strip()
|
||||||
|
domains_list = domains.split()
|
||||||
|
if domains_list and os.getenv("MAILCOW_HOSTNAME", "") not in domains_list:
|
||||||
|
valid_cert_dirs.append({
|
||||||
|
'cert_path': full_path + '/',
|
||||||
|
'domains': domains
|
||||||
|
})
|
||||||
|
|
||||||
|
return valid_cert_dirs
|
||||||
202
data/Dockerfiles/bootstrap/modules/BootstrapPhpfpm.py
Normal file
202
data/Dockerfiles/bootstrap/modules/BootstrapPhpfpm.py
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
import os
|
||||||
|
import ipaddress
|
||||||
|
|
||||||
|
class BootstrapPhpfpm(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
self.connect_mysql()
|
||||||
|
self.connect_redis()
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
# Prepare Redis and MySQL Database
|
||||||
|
# TODO: move to dockerapi
|
||||||
|
if self.isYes(os.getenv("MASTER", "")):
|
||||||
|
print("We are master, preparing...")
|
||||||
|
self.prepare_redis()
|
||||||
|
self.setup_apikeys(
|
||||||
|
os.getenv("API_ALLOW_FROM", "").strip(),
|
||||||
|
os.getenv("API_KEY", "").strip(),
|
||||||
|
os.getenv("API_KEY_READ_ONLY", "").strip()
|
||||||
|
)
|
||||||
|
self.setup_mysql_events()
|
||||||
|
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
self.copy_file("/usr/local/etc/php/conf.d/opcache-recommended.ini", "/php-conf/opcache-recommended.ini")
|
||||||
|
self.copy_file("/usr/local/etc/php-fpm.d/z-pools.conf", "/php-conf/pools.conf")
|
||||||
|
self.copy_file("/usr/local/etc/php/conf.d/zzz-other.ini", "/php-conf/other.ini")
|
||||||
|
self.copy_file("/usr/local/etc/php/conf.d/upload.ini", "/php-conf/upload.ini")
|
||||||
|
self.copy_file("/usr/local/etc/php/conf.d/session_store.ini", "/php-conf/session_store.ini")
|
||||||
|
|
||||||
|
self.set_owner("/global_sieve", 82, 82, recursive=True)
|
||||||
|
self.set_owner("/web/templates/cache", 82, 82, recursive=True)
|
||||||
|
self.remove("/web/templates/cache", wipe_contents=True, exclude=[".gitkeep"])
|
||||||
|
|
||||||
|
print("Running DB init...")
|
||||||
|
self.run_command(["php", "-c", "/usr/local/etc/php", "-f", "/web/inc/init_db.inc.php"], check=False)
|
||||||
|
|
||||||
|
def prepare_redis(self):
|
||||||
|
print("Setting default Redis keys if missing...")
|
||||||
|
|
||||||
|
# Q_RELEASE_FORMAT
|
||||||
|
if self.redis_connw and self.redis_connr.get("Q_RELEASE_FORMAT") is None:
|
||||||
|
self.redis_connw.set("Q_RELEASE_FORMAT", "raw")
|
||||||
|
|
||||||
|
# Q_MAX_AGE
|
||||||
|
if self.redis_connw and self.redis_connr.get("Q_MAX_AGE") is None:
|
||||||
|
self.redis_connw.set("Q_MAX_AGE", 365)
|
||||||
|
|
||||||
|
# PASSWD_POLICY hash defaults
|
||||||
|
if self.redis_connw and self.redis_connr.hget("PASSWD_POLICY", "length") is None:
|
||||||
|
self.redis_connw.hset("PASSWD_POLICY", mapping={
|
||||||
|
"length": 6,
|
||||||
|
"chars": 0,
|
||||||
|
"special_chars": 0,
|
||||||
|
"lowerupper": 0,
|
||||||
|
"numbers": 0
|
||||||
|
})
|
||||||
|
|
||||||
|
# DOMAIN_MAP
|
||||||
|
print("Rebuilding DOMAIN_MAP from MySQL...")
|
||||||
|
if self.redis_connw:
|
||||||
|
self.redis_connw.delete("DOMAIN_MAP")
|
||||||
|
domains = set()
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
|
||||||
|
cursor.execute("SELECT domain FROM domain")
|
||||||
|
domains.update(row[0] for row in cursor.fetchall())
|
||||||
|
cursor.execute("SELECT alias_domain FROM alias_domain")
|
||||||
|
domains.update(row[0] for row in cursor.fetchall())
|
||||||
|
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
if domains:
|
||||||
|
for domain in domains:
|
||||||
|
if self.redis_connw:
|
||||||
|
self.redis_conn.hset("DOMAIN_MAP", domain, 1)
|
||||||
|
print(f"{len(domains)} domains added to DOMAIN_MAP.")
|
||||||
|
else:
|
||||||
|
print("No domains found to insert into DOMAIN_MAP.")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to rebuild DOMAIN_MAP: {e}")
|
||||||
|
|
||||||
|
def setup_apikeys(self, api_allow_from, api_key_rw, api_key_ro):
|
||||||
|
if not api_allow_from or api_allow_from == "invalid":
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Validating API_ALLOW_FROM IPs...")
|
||||||
|
ip_list = [ip.strip() for ip in api_allow_from.split(",")]
|
||||||
|
validated_ips = []
|
||||||
|
|
||||||
|
for ip in ip_list:
|
||||||
|
try:
|
||||||
|
ipaddress.ip_network(ip, strict=False)
|
||||||
|
validated_ips.append(ip)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
if not validated_ips:
|
||||||
|
print("No valid IPs found in API_ALLOW_FROM")
|
||||||
|
return
|
||||||
|
|
||||||
|
allow_from_str = ",".join(validated_ips)
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
try:
|
||||||
|
if api_key_rw and api_key_rw != "invalid":
|
||||||
|
print("Setting RW API key...")
|
||||||
|
cursor.execute("DELETE FROM api WHERE access = 'rw'")
|
||||||
|
cursor.execute(
|
||||||
|
"INSERT INTO api (api_key, active, allow_from, access) VALUES (%s, %s, %s, %s)",
|
||||||
|
(api_key_rw, 1, allow_from_str, "rw")
|
||||||
|
)
|
||||||
|
|
||||||
|
if api_key_ro and api_key_ro != "invalid":
|
||||||
|
print("Setting RO API key...")
|
||||||
|
cursor.execute("DELETE FROM api WHERE access = 'ro'")
|
||||||
|
cursor.execute(
|
||||||
|
"INSERT INTO api (api_key, active, allow_from, access) VALUES (%s, %s, %s, %s)",
|
||||||
|
(api_key_ro, 1, allow_from_str, "ro")
|
||||||
|
)
|
||||||
|
|
||||||
|
self.mysql_conn.commit()
|
||||||
|
print("API key(s) set successfully.")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to configure API keys: {e}")
|
||||||
|
self.mysql_conn.rollback()
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def setup_mysql_events(self):
|
||||||
|
print("Creating scheduled MySQL EVENTS...")
|
||||||
|
|
||||||
|
queries = [
|
||||||
|
"DROP EVENT IF EXISTS clean_spamalias;",
|
||||||
|
"""
|
||||||
|
CREATE EVENT clean_spamalias
|
||||||
|
ON SCHEDULE EVERY 1 DAY
|
||||||
|
DO
|
||||||
|
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP();
|
||||||
|
""",
|
||||||
|
"DROP EVENT IF EXISTS clean_oauth2;",
|
||||||
|
"""
|
||||||
|
CREATE EVENT clean_oauth2
|
||||||
|
ON SCHEDULE EVERY 1 DAY
|
||||||
|
DO
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM oauth_refresh_tokens WHERE expires < NOW();
|
||||||
|
DELETE FROM oauth_access_tokens WHERE expires < NOW();
|
||||||
|
DELETE FROM oauth_authorization_codes WHERE expires < NOW();
|
||||||
|
END;
|
||||||
|
""",
|
||||||
|
"DROP EVENT IF EXISTS clean_sasl_log;",
|
||||||
|
"""
|
||||||
|
CREATE EVENT clean_sasl_log
|
||||||
|
ON SCHEDULE EVERY 1 DAY
|
||||||
|
DO
|
||||||
|
BEGIN
|
||||||
|
DELETE sasl_log.* FROM sasl_log
|
||||||
|
LEFT JOIN (
|
||||||
|
SELECT username, service, MAX(datetime) AS lastdate
|
||||||
|
FROM sasl_log
|
||||||
|
GROUP BY username, service
|
||||||
|
) AS last
|
||||||
|
ON sasl_log.username = last.username AND sasl_log.service = last.service
|
||||||
|
WHERE datetime < DATE_SUB(NOW(), INTERVAL 31 DAY)
|
||||||
|
AND datetime < lastdate;
|
||||||
|
|
||||||
|
DELETE FROM sasl_log
|
||||||
|
WHERE username NOT IN (SELECT username FROM mailbox)
|
||||||
|
AND datetime < DATE_SUB(NOW(), INTERVAL 31 DAY);
|
||||||
|
END;
|
||||||
|
"""
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
for query in queries:
|
||||||
|
cursor.execute(query)
|
||||||
|
self.mysql_conn.commit()
|
||||||
|
cursor.close()
|
||||||
|
print("MySQL EVENTS created successfully.")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to create MySQL EVENTS: {e}")
|
||||||
|
self.mysql_conn.rollback()
|
||||||
83
data/Dockerfiles/bootstrap/modules/BootstrapPostfix.py
Normal file
83
data/Dockerfiles/bootstrap/modules/BootstrapPostfix.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
class BootstrapPostfix(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
# Connect to MySQL
|
||||||
|
self.connect_mysql()
|
||||||
|
|
||||||
|
# Wait for DNS
|
||||||
|
self.wait_for_dns("mailcow.email")
|
||||||
|
|
||||||
|
self.create_dir("/opt/postfix/conf/sql/")
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
"VALID_CERT_DIRS": self.get_valid_cert_dirs()
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
print("Set Syslog redis")
|
||||||
|
self.set_syslog_redis()
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
# Create aliases DB
|
||||||
|
self.run_command(["newaliases"])
|
||||||
|
|
||||||
|
# Create SNI Config
|
||||||
|
self.run_command(["postmap", "-F", "hash:/opt/postfix/conf/sni.map"])
|
||||||
|
|
||||||
|
# Fix Postfix permissions
|
||||||
|
self.set_owner("/opt/postfix/conf/sql", user="root", group="postfix", recursive=True)
|
||||||
|
self.set_owner("/opt/postfix/conf/custom_transport.pcre", user="root", group="postfix")
|
||||||
|
for cf_file in Path("/opt/postfix/conf/sql").glob("*.cf"):
|
||||||
|
self.set_permissions(cf_file, 0o640)
|
||||||
|
self.set_permissions("/opt/postfix/conf/custom_transport.pcre", 0o640)
|
||||||
|
self.set_owner("/var/spool/postfix/public", user="root", group="postdrop", recursive=True)
|
||||||
|
self.set_owner("/var/spool/postfix/maildrop", user="root", group="postdrop", recursive=True)
|
||||||
|
self.run_command(["postfix", "set-permissions"], check=False)
|
||||||
|
|
||||||
|
# Checking if there is a leftover of a crashed postfix container before starting a new one
|
||||||
|
pid_file = Path("/var/spool/postfix/pid/master.pid")
|
||||||
|
if pid_file.exists():
|
||||||
|
print(f"Removing stale Postfix PID file: {pid_file}")
|
||||||
|
pid_file.unlink()
|
||||||
|
|
||||||
|
def get_valid_cert_dirs(self):
|
||||||
|
certs = {}
|
||||||
|
base_path = Path("/etc/ssl/mail")
|
||||||
|
if not base_path.exists():
|
||||||
|
return certs
|
||||||
|
|
||||||
|
for cert_dir in base_path.iterdir():
|
||||||
|
if not cert_dir.is_dir():
|
||||||
|
continue
|
||||||
|
|
||||||
|
domains_file = cert_dir / "domains"
|
||||||
|
cert_file = cert_dir / "cert.pem"
|
||||||
|
key_file = cert_dir / "key.pem"
|
||||||
|
|
||||||
|
if not (domains_file.exists() and cert_file.exists() and key_file.exists()):
|
||||||
|
continue
|
||||||
|
|
||||||
|
with open(domains_file, "r") as f:
|
||||||
|
domains = [line.strip() for line in f if line.strip()]
|
||||||
|
if domains:
|
||||||
|
certs[str(cert_dir)] = domains
|
||||||
|
|
||||||
|
return certs
|
||||||
132
data/Dockerfiles/bootstrap/modules/BootstrapRspamd.py
Normal file
132
data/Dockerfiles/bootstrap/modules/BootstrapRspamd.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
from pathlib import Path
|
||||||
|
import time
|
||||||
|
import platform
|
||||||
|
|
||||||
|
class BootstrapRspamd(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
# Connect to MySQL
|
||||||
|
self.connect_mysql()
|
||||||
|
|
||||||
|
# Connect to MySQL
|
||||||
|
self.connect_redis()
|
||||||
|
|
||||||
|
# get dovecot ips
|
||||||
|
dovecot_v4 = []
|
||||||
|
dovecot_v6 = []
|
||||||
|
while not dovecot_v4 and not dovecot_v6:
|
||||||
|
try:
|
||||||
|
dovecot_v4 = self.resolve_docker_dns_record("dovecot-mailcow", "A")
|
||||||
|
dovecot_v6 = self.resolve_docker_dns_record("dovecot-mailcow", "AAAA")
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
if not dovecot_v4 and not dovecot_v6:
|
||||||
|
print("Waiting for Dovecot IPs...")
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# get rspamd ips
|
||||||
|
rspamd_v4 = []
|
||||||
|
rspamd_v6 = []
|
||||||
|
while not rspamd_v4 and not rspamd_v6:
|
||||||
|
try:
|
||||||
|
rspamd_v4 = self.resolve_docker_dns_record("rspamd-mailcow", "A")
|
||||||
|
rspamd_v6 = self.resolve_docker_dns_record("rspamd-mailcow", "AAAA")
|
||||||
|
except Exception:
|
||||||
|
print(e)
|
||||||
|
if not rspamd_v4 and not rspamd_v6:
|
||||||
|
print("Waiting for Rspamd IPs...")
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# wait for Services
|
||||||
|
services = [
|
||||||
|
["php-fpm-mailcow", 9001],
|
||||||
|
["php-fpm-mailcow", 9002]
|
||||||
|
]
|
||||||
|
for service in services:
|
||||||
|
while not self.is_port_open(service[0], service[1]):
|
||||||
|
print(f"Waiting for {service[0]} on port {service[1]}...")
|
||||||
|
time.sleep(1)
|
||||||
|
print(f"Service {service[0]} on port {service[1]} is ready!")
|
||||||
|
|
||||||
|
for dir_path in ["/etc/rspamd/plugins.d", "/etc/rspamd/custom"]:
|
||||||
|
Path(dir_path).mkdir(parents=True, exist_ok=True)
|
||||||
|
for file_path in ["/etc/rspamd/rspamd.conf.local", "/etc/rspamd/rspamd.conf.override"]:
|
||||||
|
Path(file_path).touch(exist_ok=True)
|
||||||
|
self.set_permissions("/var/lib/rspamd", 0o755)
|
||||||
|
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
"DOVECOT_V4": dovecot_v4[0],
|
||||||
|
"DOVECOT_V6": dovecot_v6[0],
|
||||||
|
"RSPAMD_V4": rspamd_v4[0],
|
||||||
|
"RSPAMD_V6": rspamd_v6[0],
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
# Fix missing default global maps, if any
|
||||||
|
# These exists in mailcow UI and should not be removed
|
||||||
|
files = [
|
||||||
|
"/etc/rspamd/custom/global_mime_from_blacklist.map",
|
||||||
|
"/etc/rspamd/custom/global_rcpt_blacklist.map",
|
||||||
|
"/etc/rspamd/custom/global_smtp_from_blacklist.map",
|
||||||
|
"/etc/rspamd/custom/global_mime_from_whitelist.map",
|
||||||
|
"/etc/rspamd/custom/global_rcpt_whitelist.map",
|
||||||
|
"/etc/rspamd/custom/global_smtp_from_whitelist.map",
|
||||||
|
"/etc/rspamd/custom/bad_languages.map",
|
||||||
|
"/etc/rspamd/custom/sa-rules",
|
||||||
|
"/etc/rspamd/custom/dovecot_trusted.map",
|
||||||
|
"/etc/rspamd/custom/rspamd_trusted.map",
|
||||||
|
"/etc/rspamd/custom/mailcow_networks.map",
|
||||||
|
"/etc/rspamd/custom/ip_wl.map",
|
||||||
|
"/etc/rspamd/custom/fishy_tlds.map",
|
||||||
|
"/etc/rspamd/custom/bad_words.map",
|
||||||
|
"/etc/rspamd/custom/bad_asn.map",
|
||||||
|
"/etc/rspamd/custom/bad_words_de.map",
|
||||||
|
"/etc/rspamd/custom/bulk_header.map",
|
||||||
|
"/etc/rspamd/custom/bad_header.map"
|
||||||
|
]
|
||||||
|
for file in files:
|
||||||
|
path = Path(file)
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
path.touch(exist_ok=True)
|
||||||
|
|
||||||
|
# Fix permissions
|
||||||
|
paths_rspamd = [
|
||||||
|
"/var/lib/rspamd",
|
||||||
|
"/etc/rspamd/local.d",
|
||||||
|
"/etc/rspamd/override.d",
|
||||||
|
"/etc/rspamd/rspamd.conf.local",
|
||||||
|
"/etc/rspamd/rspamd.conf.override",
|
||||||
|
"/etc/rspamd/plugins.d"
|
||||||
|
]
|
||||||
|
for path in paths_rspamd:
|
||||||
|
self.set_owner(path, "_rspamd", "_rspamd", recursive=True)
|
||||||
|
self.set_owner("/etc/rspamd/custom", "_rspamd", "_rspamd")
|
||||||
|
self.set_permissions("/etc/rspamd/custom", 0o755)
|
||||||
|
|
||||||
|
custom_path = Path("/etc/rspamd/custom")
|
||||||
|
for child in custom_path.iterdir():
|
||||||
|
if child.is_file():
|
||||||
|
self.set_owner(child, 82, 82)
|
||||||
|
self.set_permissions(child, 0o644)
|
||||||
|
|
||||||
|
# Provide additional lua modules
|
||||||
|
arch = platform.machine()
|
||||||
|
self.run_command(["ln", "-s", f"/usr/lib/{arch}-linux-gnu/liblua5.1-cjson.so.0.0.0", "/usr/lib/rspamd/cjson.so"], check=False)
|
||||||
138
data/Dockerfiles/bootstrap/modules/BootstrapSogo.py
Normal file
138
data/Dockerfiles/bootstrap/modules/BootstrapSogo.py
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from modules.BootstrapBase import BootstrapBase
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
class BootstrapSogo(BootstrapBase):
|
||||||
|
def bootstrap(self):
|
||||||
|
# Skip SOGo if set
|
||||||
|
if self.isYes(os.getenv("SKIP_SOGO", "")):
|
||||||
|
print("SKIP_SOGO is set, skipping SOGo startup...")
|
||||||
|
time.sleep(365 * 24 * 60 * 60)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Connect to MySQL
|
||||||
|
self.connect_mysql()
|
||||||
|
|
||||||
|
# Wait until port is free
|
||||||
|
while self.is_port_open(os.getenv("SOGO_HOST"), 20000):
|
||||||
|
print("Port 20000 still in use — terminating sogod...")
|
||||||
|
self.kill_proc("sogod")
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# Wait for schema to update to expected version
|
||||||
|
self.wait_for_schema_update(init_file_path="init_db.inc.php")
|
||||||
|
|
||||||
|
# Setup Jinja2 Environment and load vars
|
||||||
|
self.env = Environment(
|
||||||
|
loader=FileSystemLoader([
|
||||||
|
'/service_config/custom_templates',
|
||||||
|
'/service_config/config_templates'
|
||||||
|
]),
|
||||||
|
keep_trailing_newline=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
trim_blocks=True
|
||||||
|
)
|
||||||
|
extra_vars = {
|
||||||
|
"SQL_DOMAINS": self.get_domains(),
|
||||||
|
"IAM_SETTINGS": self.get_identity_provider_settings()
|
||||||
|
}
|
||||||
|
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||||
|
|
||||||
|
print("Set Timezone")
|
||||||
|
self.set_timezone()
|
||||||
|
|
||||||
|
print("Set Syslog redis")
|
||||||
|
self.set_syslog_redis()
|
||||||
|
|
||||||
|
print("Render config")
|
||||||
|
self.render_config("/service_config")
|
||||||
|
|
||||||
|
print("Fix permissions")
|
||||||
|
self.set_owner("/var/lib/sogo", "sogo", "sogo", recursive=True)
|
||||||
|
self.set_permissions("/var/lib/sogo/GNUstep/Defaults/sogod.plist", 0o600)
|
||||||
|
|
||||||
|
# Rename custom logo
|
||||||
|
logo_src = Path("/etc/sogo/sogo-full.svg")
|
||||||
|
if logo_src.exists():
|
||||||
|
print("Set Logo")
|
||||||
|
self.move_file(logo_src, "/etc/sogo/custom-fulllogo.svg")
|
||||||
|
|
||||||
|
# Rsync web content
|
||||||
|
print("Syncing web content")
|
||||||
|
self.rsync_file("/usr/lib/GNUstep/SOGo/", "/sogo_web/", recursive=True)
|
||||||
|
|
||||||
|
# Chown backup path
|
||||||
|
self.set_owner("/sogo_backup", "sogo", "sogo", recursive=True)
|
||||||
|
|
||||||
|
def get_domains(self):
|
||||||
|
"""
|
||||||
|
Retrieves a list of domains and their GAL (Global Address List) status.
|
||||||
|
|
||||||
|
Executes a SQL query to select:
|
||||||
|
- `domain`
|
||||||
|
- a human-readable GAL status ("YES" or "NO")
|
||||||
|
- `ldap_gal` as a boolean (True/False)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list[dict]: A list of dicts with keys: domain, gal_status, ldap_gal.
|
||||||
|
Example: [{"domain": "example.com", "gal_status": "YES", "ldap_gal": True}]
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Error messages if the query fails.
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = """
|
||||||
|
SELECT domain,
|
||||||
|
CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal_status,
|
||||||
|
ldap_gal = 1 AS ldap_gal
|
||||||
|
FROM domain;
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
cursor.execute(query)
|
||||||
|
result = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"domain": row[0],
|
||||||
|
"gal_status": row[1],
|
||||||
|
"ldap_gal": bool(row[2])
|
||||||
|
}
|
||||||
|
for row in result
|
||||||
|
]
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching domains: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_identity_provider_settings(self):
|
||||||
|
"""
|
||||||
|
Retrieves all key-value identity provider settings.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Settings in the format { key: value }
|
||||||
|
|
||||||
|
Logs:
|
||||||
|
Error messages if the query fails.
|
||||||
|
"""
|
||||||
|
query = "SELECT `key`, `value` FROM identity_provider;"
|
||||||
|
try:
|
||||||
|
cursor = self.mysql_conn.cursor()
|
||||||
|
cursor.execute(query)
|
||||||
|
result = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
iam_settings = {row[0]: row[1] for row in result}
|
||||||
|
|
||||||
|
if iam_settings['authsource'] == "ldap":
|
||||||
|
protocol = "ldaps" if iam_settings.get("use_ssl") else "ldap"
|
||||||
|
starttls = "/????!StartTLS" if iam_settings.get("use_tls") else ""
|
||||||
|
iam_settings['ldap_url'] = f"{protocol}://{iam_settings['host']}:{iam_settings['port']}{starttls}"
|
||||||
|
|
||||||
|
return iam_settings
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching identity provider settings: {e}")
|
||||||
|
return {}
|
||||||
0
data/Dockerfiles/bootstrap/modules/__init__.py
Normal file
0
data/Dockerfiles/bootstrap/modules/__init__.py
Normal file
@@ -41,7 +41,7 @@ RUN wget -P /src https://www.clamav.net/downloads/production/clamav-${CLAMD_VERS
|
|||||||
-D ENABLE_MILTER=ON \
|
-D ENABLE_MILTER=ON \
|
||||||
-D ENABLE_MAN_PAGES=OFF \
|
-D ENABLE_MAN_PAGES=OFF \
|
||||||
-D ENABLE_STATIC_LIB=OFF \
|
-D ENABLE_STATIC_LIB=OFF \
|
||||||
-D ENABLE_JSON_SHARED=ON \
|
-D ENABLE_JSON_SHARED=ON \
|
||||||
&& cmake --build . \
|
&& cmake --build . \
|
||||||
&& make DESTDIR="/clamav" -j$(($(nproc) - 1)) install \
|
&& make DESTDIR="/clamav" -j$(($(nproc) - 1)) install \
|
||||||
&& rm -r "/clamav/usr/lib/pkgconfig/" \
|
&& rm -r "/clamav/usr/lib/pkgconfig/" \
|
||||||
@@ -88,23 +88,42 @@ RUN apk upgrade --no-cache \
|
|||||||
pcre2 \
|
pcre2 \
|
||||||
zlib \
|
zlib \
|
||||||
libgcc \
|
libgcc \
|
||||||
|
py3-pip \
|
||||||
&& addgroup -S "clamav" && \
|
&& addgroup -S "clamav" && \
|
||||||
adduser -D -G "clamav" -h "/var/lib/clamav" -s "/bin/false" -S "clamav" && \
|
adduser -D -G "clamav" -h "/var/lib/clamav" -s "/bin/false" -S "clamav" && \
|
||||||
install -d -m 755 -g "clamav" -o "clamav" "/var/log/clamav" && \
|
install -d -m 755 -g "clamav" -o "clamav" "/var/log/clamav" && \
|
||||||
chown -R clamav:clamav /var/lib/clamav
|
chown -R clamav:clamav /var/lib/clamav
|
||||||
|
|
||||||
|
RUN apk add --no-cache --virtual .build-deps \
|
||||||
|
gcc \
|
||||||
|
musl-dev \
|
||||||
|
python3-dev \
|
||||||
|
linux-headers \
|
||||||
|
&& pip install --break-system-packages psutil \
|
||||||
|
&& apk del .build-deps
|
||||||
|
|
||||||
|
RUN pip install --break-system-packages \
|
||||||
|
mysql-connector-python \
|
||||||
|
jinja2 \
|
||||||
|
redis \
|
||||||
|
dnspython
|
||||||
|
|
||||||
|
|
||||||
COPY --from=builder "/clamav" "/"
|
COPY --from=builder "/clamav" "/"
|
||||||
|
|
||||||
# init
|
|
||||||
COPY clamd.sh /clamd.sh
|
|
||||||
RUN chmod +x /sbin/tini
|
|
||||||
|
|
||||||
# healthcheck
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
COPY healthcheck.sh /healthcheck.sh
|
COPY data/Dockerfiles/clamd/docker-entrypoint.sh /docker-entrypoint.sh
|
||||||
COPY clamdcheck.sh /usr/local/bin
|
COPY data/Dockerfiles/clamd/clamd.sh /clamd.sh
|
||||||
RUN chmod +x /healthcheck.sh
|
COPY data/Dockerfiles/clamd/healthcheck.sh /healthcheck.sh
|
||||||
RUN chmod +x /usr/local/bin/clamdcheck.sh
|
COPY data/Dockerfiles/clamd/clamdcheck.sh /usr/local/bin
|
||||||
HEALTHCHECK --start-period=6m CMD "/healthcheck.sh"
|
HEALTHCHECK --start-period=6m CMD "/healthcheck.sh"
|
||||||
|
|
||||||
ENTRYPOINT []
|
RUN chmod +x /docker-entrypoint.sh \
|
||||||
|
/clamd.sh \
|
||||||
|
/healthcheck.sh \
|
||||||
|
/usr/local/bin/clamdcheck.sh \
|
||||||
|
/sbin/tini
|
||||||
|
|
||||||
|
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||||
CMD ["/sbin/tini", "-g", "--", "/clamd.sh"]
|
CMD ["/sbin/tini", "-g", "--", "/clamd.sh"]
|
||||||
@@ -1,48 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [[ "${SKIP_CLAMD}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
echo "SKIP_CLAMD=y, skipping ClamAV..."
|
|
||||||
sleep 365d
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Cleaning up garbage
|
|
||||||
echo "Cleaning up tmp files..."
|
|
||||||
rm -rf /var/lib/clamav/tmp.*
|
|
||||||
|
|
||||||
# Prepare whitelist
|
|
||||||
|
|
||||||
mkdir -p /run/clamav /var/lib/clamav
|
|
||||||
|
|
||||||
if [[ -s /etc/clamav/whitelist.ign2 ]]; then
|
|
||||||
echo "Copying non-empty whitelist.ign2 to /var/lib/clamav/whitelist.ign2"
|
|
||||||
cp /etc/clamav/whitelist.ign2 /var/lib/clamav/whitelist.ign2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -f /var/lib/clamav/whitelist.ign2 ]]; then
|
|
||||||
echo "Creating /var/lib/clamav/whitelist.ign2"
|
|
||||||
cat <<EOF > /var/lib/clamav/whitelist.ign2
|
|
||||||
# Please restart ClamAV after changing signatures
|
|
||||||
Example-Signature.Ignore-1
|
|
||||||
PUA.Win.Trojan.EmbeddedPDF-1
|
|
||||||
PUA.Pdf.Trojan.EmbeddedJavaScript-1
|
|
||||||
PUA.Pdf.Trojan.OpenActionObjectwithJavascript-1
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
chown clamav:clamav -R /var/lib/clamav /run/clamav
|
|
||||||
|
|
||||||
chmod 755 /var/lib/clamav
|
|
||||||
chmod 644 -R /var/lib/clamav/*
|
|
||||||
chmod 750 /run/clamav
|
|
||||||
|
|
||||||
stat /var/lib/clamav/whitelist.ign2
|
|
||||||
dos2unix /var/lib/clamav/whitelist.ign2
|
|
||||||
sed -i '/^\s*$/d' /var/lib/clamav/whitelist.ign2
|
|
||||||
# Copying to /etc/clamav to expose file as-is to administrator
|
|
||||||
cp -p /var/lib/clamav/whitelist.ign2 /etc/clamav/whitelist.ign2
|
|
||||||
|
|
||||||
|
|
||||||
BACKGROUND_TASKS=()
|
BACKGROUND_TASKS=()
|
||||||
|
|
||||||
echo "Running freshclam..."
|
echo "Running freshclam..."
|
||||||
|
|||||||
20
data/Dockerfiles/clamd/docker-entrypoint.sh
Normal file
20
data/Dockerfiles/clamd/docker-entrypoint.sh
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Run hooks
|
||||||
|
for file in /hooks/*; do
|
||||||
|
if [ -x "${file}" ]; then
|
||||||
|
echo "Running hook ${file}"
|
||||||
|
"${file}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
python3 -u /bootstrap/main.py
|
||||||
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Clamd."
|
||||||
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Bootstrap succeeded. Starting Clamd..."
|
||||||
|
exec "$@"
|
||||||
@@ -19,9 +19,9 @@ RUN apk add --update --no-cache python3 \
|
|||||||
docker
|
docker
|
||||||
RUN mkdir /app/modules
|
RUN mkdir /app/modules
|
||||||
|
|
||||||
COPY docker-entrypoint.sh /app/
|
COPY data/Dockerfiles/dockerapi/docker-entrypoint.sh /app/
|
||||||
COPY main.py /app/main.py
|
COPY data/Dockerfiles/dockerapi/main.py /app/main.py
|
||||||
COPY modules/ /app/modules/
|
COPY data/Dockerfiles/dockerapi/modules/ /app/modules/
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
|
||||||
CMD ["python", "main.py"]
|
CMD ["python", "main.py"]
|
||||||
@@ -36,7 +36,7 @@ async def lifespan(app: FastAPI):
|
|||||||
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
||||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0", password=os.environ['REDISPASS'])
|
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0", password=os.environ['REDISPASS'])
|
||||||
else:
|
else:
|
||||||
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0", password=os.environ['REDISPASS'])
|
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_HOST']}:6379/0", password=os.environ['REDISPASS'])
|
||||||
|
|
||||||
# Init docker clients
|
# Init docker clients
|
||||||
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ FROM alpine:3.21
|
|||||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||||
|
|
||||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||||
ARG GOSU_VERSION=1.17
|
ARG GOSU_VERSION=1.16
|
||||||
|
|
||||||
ENV LANG=C.UTF-8
|
ENV LANG=C.UTF-8
|
||||||
ENV LC_ALL=C.UTF-8
|
ENV LC_ALL=C.UTF-8
|
||||||
@@ -87,11 +87,11 @@ RUN addgroup -g 5000 vmail \
|
|||||||
perl-proc-processtable \
|
perl-proc-processtable \
|
||||||
perl-app-cpanminus \
|
perl-app-cpanminus \
|
||||||
procps \
|
procps \
|
||||||
python3 \
|
python3 py3-pip python3-dev \
|
||||||
py3-mysqlclient \
|
|
||||||
py3-html2text \
|
py3-html2text \
|
||||||
py3-jinja2 \
|
linux-headers \
|
||||||
py3-redis \
|
musl-dev \
|
||||||
|
gcc \
|
||||||
redis \
|
redis \
|
||||||
syslog-ng \
|
syslog-ng \
|
||||||
syslog-ng-redis \
|
syslog-ng-redis \
|
||||||
@@ -115,25 +115,36 @@ RUN addgroup -g 5000 vmail \
|
|||||||
&& chmod +x /usr/local/bin/gosu \
|
&& chmod +x /usr/local/bin/gosu \
|
||||||
&& gosu nobody true
|
&& gosu nobody true
|
||||||
|
|
||||||
COPY trim_logs.sh /usr/local/bin/trim_logs.sh
|
RUN pip install --break-system-packages \
|
||||||
COPY clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
mysql-connector-python \
|
||||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
jinja2 \
|
||||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
redis \
|
||||||
COPY imapsync /usr/local/bin/imapsync
|
dnspython \
|
||||||
COPY imapsync_runner.pl /usr/local/bin/imapsync_runner.pl
|
psutil
|
||||||
COPY report-spam.sieve /usr/lib/dovecot/sieve/report-spam.sieve
|
|
||||||
COPY report-ham.sieve /usr/lib/dovecot/sieve/report-ham.sieve
|
|
||||||
COPY rspamd-pipe-ham /usr/lib/dovecot/sieve/rspamd-pipe-ham
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
COPY rspamd-pipe-spam /usr/lib/dovecot/sieve/rspamd-pipe-spam
|
COPY data/Dockerfiles/dovecot/trim_logs.sh /usr/local/bin/trim_logs.sh
|
||||||
COPY sa-rules.sh /usr/local/bin/sa-rules.sh
|
COPY data/Dockerfiles/dovecot/clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
||||||
COPY maildir_gc.sh /usr/local/bin/maildir_gc.sh
|
COPY data/Dockerfiles/dovecot/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||||
COPY docker-entrypoint.sh /
|
COPY data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
COPY data/Dockerfiles/dovecot/imapsync /usr/local/bin/imapsync
|
||||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
COPY data/Dockerfiles/dovecot/imapsync_runner.pl /usr/local/bin/imapsync_runner.pl
|
||||||
COPY quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
COPY data/Dockerfiles/dovecot/report-spam.sieve /usr/lib/dovecot/sieve/report-spam.sieve
|
||||||
COPY quota_notify.py /usr/local/bin/quota_notify.py
|
COPY data/Dockerfiles/dovecot/report-ham.sieve /usr/lib/dovecot/sieve/report-ham.sieve
|
||||||
COPY repl_health.sh /usr/local/bin/repl_health.sh
|
COPY data/Dockerfiles/dovecot/rspamd-pipe-ham /usr/lib/dovecot/sieve/rspamd-pipe-ham
|
||||||
COPY optimize-fts.sh /usr/local/bin/optimize-fts.sh
|
COPY data/Dockerfiles/dovecot/rspamd-pipe-spam /usr/lib/dovecot/sieve/rspamd-pipe-spam
|
||||||
|
COPY data/Dockerfiles/dovecot/sa-rules.sh /usr/local/bin/sa-rules.sh
|
||||||
|
COPY data/Dockerfiles/dovecot/docker-entrypoint.sh /
|
||||||
|
COPY data/Dockerfiles/dovecot/supervisord.conf /etc/supervisor/supervisord.conf
|
||||||
|
COPY data/Dockerfiles/dovecot/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||||
|
COPY data/Dockerfiles/dovecot/quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
||||||
|
COPY data/Dockerfiles/dovecot/quota_notify.py /usr/local/bin/quota_notify.py
|
||||||
|
COPY data/Dockerfiles/dovecot/repl_health.sh /usr/local/bin/repl_health.sh
|
||||||
|
COPY data/Dockerfiles/dovecot/optimize-fts.sh /usr/local/bin/optimize-fts.sh
|
||||||
|
|
||||||
|
RUN chmod +x /docker-entrypoint.sh \
|
||||||
|
/usr/local/sbin/stop-supervisor.sh
|
||||||
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
|
||||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||||
|
|||||||
@@ -1,253 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
|
||||||
|
|
||||||
# Wait for MySQL to warm-up
|
# Run hooks
|
||||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
for file in /hooks/*; do
|
||||||
echo "Waiting for database to come up..."
|
if [ -x "${file}" ]; then
|
||||||
sleep 2
|
echo "Running hook ${file}"
|
||||||
done
|
"${file}"
|
||||||
|
|
||||||
until dig +short mailcow.email > /dev/null; do
|
|
||||||
echo "Waiting for DNS..."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
# Do not attempt to write to slave
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|
||||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
|
||||||
else
|
|
||||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
|
||||||
fi
|
|
||||||
|
|
||||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
|
||||||
echo "Waiting for Redis..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
|
||||||
|
|
||||||
# Create missing directories
|
|
||||||
[[ ! -d /etc/dovecot/sql/ ]] && mkdir -p /etc/dovecot/sql/
|
|
||||||
[[ ! -d /etc/dovecot/auth/ ]] && mkdir -p /etc/dovecot/auth/
|
|
||||||
[[ ! -d /etc/dovecot/conf.d/ ]] && mkdir -p /etc/dovecot/conf.d/
|
|
||||||
[[ ! -d /var/vmail/_garbage ]] && mkdir -p /var/vmail/_garbage
|
|
||||||
[[ ! -d /var/vmail/sieve ]] && mkdir -p /var/vmail/sieve
|
|
||||||
[[ ! -d /etc/sogo ]] && mkdir -p /etc/sogo
|
|
||||||
[[ ! -d /var/volatile ]] && mkdir -p /var/volatile
|
|
||||||
|
|
||||||
# Set Dovecot sql config parameters, escape " in db password
|
|
||||||
DBPASS=$(echo ${DBPASS} | sed 's/"/\\"/g')
|
|
||||||
|
|
||||||
# Create quota dict for Dovecot
|
|
||||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
QUOTA_TABLE=quota2
|
|
||||||
else
|
|
||||||
QUOTA_TABLE=quota2replica
|
|
||||||
fi
|
|
||||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-quota.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
|
||||||
map {
|
|
||||||
pattern = priv/quota/storage
|
|
||||||
table = ${QUOTA_TABLE}
|
|
||||||
username_field = username
|
|
||||||
value_field = bytes
|
|
||||||
}
|
|
||||||
map {
|
|
||||||
pattern = priv/quota/messages
|
|
||||||
table = ${QUOTA_TABLE}
|
|
||||||
username_field = username
|
|
||||||
value_field = messages
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Create dict used for sieve pre and postfilters
|
|
||||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_before.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
|
||||||
map {
|
|
||||||
pattern = priv/sieve/name/\$script_name
|
|
||||||
table = sieve_before
|
|
||||||
username_field = username
|
|
||||||
value_field = id
|
|
||||||
fields {
|
|
||||||
script_name = \$script_name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
map {
|
|
||||||
pattern = priv/sieve/data/\$id
|
|
||||||
table = sieve_before
|
|
||||||
username_field = username
|
|
||||||
value_field = script_data
|
|
||||||
fields {
|
|
||||||
id = \$id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_after.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
|
||||||
map {
|
|
||||||
pattern = priv/sieve/name/\$script_name
|
|
||||||
table = sieve_after
|
|
||||||
username_field = username
|
|
||||||
value_field = id
|
|
||||||
fields {
|
|
||||||
script_name = \$script_name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
map {
|
|
||||||
pattern = priv/sieve/data/\$id
|
|
||||||
table = sieve_after
|
|
||||||
username_field = username
|
|
||||||
value_field = script_data
|
|
||||||
fields {
|
|
||||||
id = \$id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo -n ${ACL_ANYONE} > /etc/dovecot/acl_anyone
|
|
||||||
|
|
||||||
if [[ "${SKIP_FTS}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
echo -e "\e[33mDetecting SKIP_FTS=y... not enabling Flatcurve (FTS) then...\e[0m"
|
|
||||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify listescape replication lazy_expunge' > /etc/dovecot/mail_plugins
|
|
||||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify listescape replication mail_log' > /etc/dovecot/mail_plugins_imap
|
|
||||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
|
||||||
else
|
|
||||||
echo -e "\e[32mDetecting SKIP_FTS=n... enabling Flatcurve (FTS)\e[0m"
|
|
||||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify fts fts_flatcurve listescape replication lazy_expunge' > /etc/dovecot/mail_plugins
|
|
||||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify mail_log fts fts_flatcurve listescape replication' > /etc/dovecot/mail_plugins_imap
|
|
||||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl fts fts_flatcurve notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
|
||||||
fi
|
|
||||||
chmod 644 /etc/dovecot/mail_plugins /etc/dovecot/mail_plugins_imap /etc/dovecot/mail_plugins_lmtp /templates/quarantine.tpl
|
|
||||||
|
|
||||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-userdb.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
driver = mysql
|
|
||||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
|
||||||
user_query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%n/${MAILDIR_SUB}:VOLATILEDIR=/var/volatile/%u:INDEX=/var/vmail_index/%u') AS mail, '%s' AS protocol, 5000 AS uid, 5000 AS gid, concat('*:bytes=', quota) AS quota_rule FROM mailbox WHERE username = '%u' AND (active = '1' OR active = '2')
|
|
||||||
iterate_query = SELECT username FROM mailbox WHERE active = '1' OR active = '2';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
# Migrate old sieve_after file
|
|
||||||
[[ -f /etc/dovecot/sieve_after ]] && mv /etc/dovecot/sieve_after /etc/dovecot/global_sieve_after
|
|
||||||
# Create global sieve scripts
|
|
||||||
cat /etc/dovecot/global_sieve_after > /var/vmail/sieve/global_sieve_after.sieve
|
|
||||||
cat /etc/dovecot/global_sieve_before > /var/vmail/sieve/global_sieve_before.sieve
|
|
||||||
|
|
||||||
# Check permissions of vmail/index/garbage directories.
|
|
||||||
# Do not do this every start-up, it may take a very long time. So we use a stat check here.
|
|
||||||
if [[ $(stat -c %U /var/vmail/) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail ; fi
|
|
||||||
if [[ $(stat -c %U /var/vmail/_garbage) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail/_garbage ; fi
|
|
||||||
if [[ $(stat -c %U /var/vmail_index) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail_index ; fi
|
|
||||||
|
|
||||||
# Cleanup random user maildirs
|
|
||||||
rm -rf /var/vmail/mailcow.local/*
|
|
||||||
# Cleanup PIDs
|
|
||||||
[[ -f /tmp/quarantine_notify.pid ]] && rm /tmp/quarantine_notify.pid
|
|
||||||
|
|
||||||
# create sni configuration
|
|
||||||
echo "" > /etc/dovecot/sni.conf
|
|
||||||
for cert_dir in /etc/ssl/mail/*/ ; do
|
|
||||||
if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
fi
|
||||||
domains=($(cat ${cert_dir}domains))
|
|
||||||
for domain in ${domains[@]}; do
|
|
||||||
echo 'local_name '${domain}' {' >> /etc/dovecot/sni.conf;
|
|
||||||
echo ' ssl_cert = <'${cert_dir}'cert.pem' >> /etc/dovecot/sni.conf;
|
|
||||||
echo ' ssl_key = <'${cert_dir}'key.pem' >> /etc/dovecot/sni.conf;
|
|
||||||
echo '}' >> /etc/dovecot/sni.conf;
|
|
||||||
done
|
|
||||||
done
|
done
|
||||||
|
|
||||||
# Create random master for SOGo sieve features
|
python3 -u /bootstrap/main.py
|
||||||
RAND_USER=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 16 | head -n 1)
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 24 | head -n 1)
|
|
||||||
|
|
||||||
if [[ ! -z ${DOVECOT_MASTER_USER} ]] && [[ ! -z ${DOVECOT_MASTER_PASS} ]]; then
|
|
||||||
RAND_USER=${DOVECOT_MASTER_USER}
|
|
||||||
RAND_PASS=${DOVECOT_MASTER_PASS}
|
|
||||||
fi
|
|
||||||
echo ${RAND_USER}@mailcow.local:{SHA1}$(echo -n ${RAND_PASS} | sha1sum | awk '{print $1}'):::::: > /etc/dovecot/dovecot-master.passwd
|
|
||||||
echo ${RAND_USER}@mailcow.local::5000:5000:::: > /etc/dovecot/dovecot-master.userdb
|
|
||||||
echo ${RAND_USER}@mailcow.local:${RAND_PASS} > /etc/sogo/sieve.creds
|
|
||||||
|
|
||||||
if [[ -z ${MAILDIR_SUB} ]]; then
|
|
||||||
MAILDIR_SUB_SHARED=
|
|
||||||
else
|
|
||||||
MAILDIR_SUB_SHARED=/${MAILDIR_SUB}
|
|
||||||
fi
|
|
||||||
cat <<EOF > /etc/dovecot/shared_namespace.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
namespace {
|
|
||||||
type = shared
|
|
||||||
separator = /
|
|
||||||
prefix = Shared/%%u/
|
|
||||||
location = maildir:%%h${MAILDIR_SUB_SHARED}:INDEX=~${MAILDIR_SUB_SHARED}/Shared/%%u
|
|
||||||
subscriptions = no
|
|
||||||
list = children
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
cat <<EOF > /etc/dovecot/sogo_trusted_ip.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
remote ${IPV4_NETWORK}.248 {
|
|
||||||
disable_plaintext_auth = no
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Create random master Password for SOGo SSO
|
|
||||||
RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 32 | head -n 1)
|
|
||||||
echo -n ${RAND_PASS} > /etc/phpfpm/sogo-sso.pass
|
|
||||||
# Creating additional creds file for SOGo notify crons (calendars, etc)
|
|
||||||
echo -n ${RAND_USER}@mailcow.local:${RAND_PASS} > /etc/sogo/cron.creds
|
|
||||||
cat <<EOF > /etc/dovecot/sogo-sso.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
passdb {
|
|
||||||
driver = static
|
|
||||||
args = allow_real_nets=${IPV4_NETWORK}.248/32 password={plain}${RAND_PASS}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if [[ "${MASTER}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
|
||||||
# Toggling MASTER will result in a rebuild of containers, so the quota script will be recreated
|
|
||||||
cat <<'EOF' > /usr/local/bin/quota_notify.py
|
|
||||||
#!/usr/bin/python3
|
|
||||||
import sys
|
|
||||||
sys.exit()
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set mail_replica for HA setups
|
|
||||||
if [[ -n ${MAILCOW_REPLICA_IP} && -n ${DOVEADM_REPLICA_PORT} ]]; then
|
|
||||||
cat <<EOF > /etc/dovecot/mail_replica.conf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
mail_replica = tcp:${MAILCOW_REPLICA_IP}:${DOVEADM_REPLICA_PORT}
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Setting variables for indexer-worker inside fts.conf automatically according to mailcow.conf settings
|
|
||||||
if [[ "${SKIP_FTS}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
|
||||||
echo -e "\e[94mConfiguring FTS Settings...\e[0m"
|
|
||||||
echo -e "\e[94mSetting FTS Memory Limit (per process) to ${FTS_HEAP} MB\e[0m"
|
|
||||||
sed -i "s/vsz_limit\s*=\s*[0-9]*\s*MB*/vsz_limit=${FTS_HEAP} MB/" /etc/dovecot/conf.d/fts.conf
|
|
||||||
echo -e "\e[94mSetting FTS Process Limit to ${FTS_PROCS}\e[0m"
|
|
||||||
sed -i "s/process_limit\s*=\s*[0-9]*/process_limit=${FTS_PROCS}/" /etc/dovecot/conf.d/fts.conf
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 401 is user dovecot
|
|
||||||
if [[ ! -s /mail_crypt/ecprivkey.pem || ! -s /mail_crypt/ecpubkey.pem ]]; then
|
|
||||||
openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem
|
|
||||||
openssl pkey -in /mail_crypt/ecprivkey.pem -pubout -out /mail_crypt/ecpubkey.pem
|
|
||||||
chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
|
|
||||||
else
|
|
||||||
chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||||
if grep -qE 'ssl_min_protocol\s*=\s*(TLSv1|TLSv1\.1)\s*$' /etc/dovecot/dovecot.conf /etc/dovecot/extra.conf; then
|
if grep -qE 'ssl_min_protocol\s*=\s*(TLSv1|TLSv1\.1)\s*$' /etc/dovecot/dovecot.conf /etc/dovecot/extra.conf; then
|
||||||
@@ -260,89 +22,10 @@ if grep -qE 'ssl_min_protocol\s*=\s*(TLSv1|TLSv1\.1)\s*$' /etc/dovecot/dovecot.c
|
|||||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Compile sieve scripts
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
sievec /var/vmail/sieve/global_sieve_before.sieve
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Dovecot."
|
||||||
sievec /var/vmail/sieve/global_sieve_after.sieve
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
sievec /usr/lib/dovecot/sieve/report-spam.sieve
|
|
||||||
sievec /usr/lib/dovecot/sieve/report-ham.sieve
|
|
||||||
|
|
||||||
# Fix permissions
|
|
||||||
chown root:root /etc/dovecot/sql/*.conf
|
|
||||||
chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/auth/passwd-verify.lua
|
|
||||||
chmod 640 /etc/dovecot/sql/*.conf /etc/dovecot/auth/passwd-verify.lua
|
|
||||||
chown -R vmail:vmail /var/vmail/sieve
|
|
||||||
chown -R vmail:vmail /var/volatile
|
|
||||||
chown -R vmail:vmail /var/vmail_index
|
|
||||||
adduser vmail tty
|
|
||||||
chmod g+rw /dev/console
|
|
||||||
chown root:tty /dev/console
|
|
||||||
chmod +x /usr/lib/dovecot/sieve/rspamd-pipe-ham \
|
|
||||||
/usr/lib/dovecot/sieve/rspamd-pipe-spam \
|
|
||||||
/usr/local/bin/imapsync_runner.pl \
|
|
||||||
/usr/local/bin/imapsync \
|
|
||||||
/usr/local/bin/trim_logs.sh \
|
|
||||||
/usr/local/bin/sa-rules.sh \
|
|
||||||
/usr/local/bin/clean_q_aged.sh \
|
|
||||||
/usr/local/bin/maildir_gc.sh \
|
|
||||||
/usr/local/sbin/stop-supervisor.sh \
|
|
||||||
/usr/local/bin/quota_notify.py \
|
|
||||||
/usr/local/bin/repl_health.sh \
|
|
||||||
/usr/local/bin/optimize-fts.sh
|
|
||||||
|
|
||||||
# Prepare environment file for cronjobs
|
|
||||||
printenv | sed 's/^\(.*\)$/export \1/g' > /source_env.sh
|
|
||||||
|
|
||||||
# Clean old PID if any
|
|
||||||
[[ -f /var/run/dovecot/master.pid ]] && rm /var/run/dovecot/master.pid
|
|
||||||
|
|
||||||
# Clean stopped imapsync jobs
|
|
||||||
rm -f /tmp/imapsync_busy.lock
|
|
||||||
IMAPSYNC_TABLE=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'imapsync'" -Bs)
|
|
||||||
[[ ! -z ${IMAPSYNC_TABLE} ]] && mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "UPDATE imapsync SET is_running='0'"
|
|
||||||
|
|
||||||
# Envsubst maildir_gc
|
|
||||||
echo "$(envsubst < /usr/local/bin/maildir_gc.sh)" > /usr/local/bin/maildir_gc.sh
|
|
||||||
|
|
||||||
# GUID generation
|
|
||||||
while [[ ${VERSIONS_OK} != 'OK' ]]; do
|
|
||||||
if [[ ! -z $(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = \"${DBNAME}\" AND TABLE_NAME = 'versions'") ]]; then
|
|
||||||
VERSIONS_OK=OK
|
|
||||||
else
|
|
||||||
echo "Waiting for versions table to be created..."
|
|
||||||
sleep 3
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
PUBKEY_MCRYPT=$(doveconf -P 2> /dev/null | grep -i mail_crypt_global_public_key | cut -d '<' -f2)
|
|
||||||
if [ -f ${PUBKEY_MCRYPT} ]; then
|
|
||||||
GUID=$(cat <(echo ${MAILCOW_HOSTNAME}) /mail_crypt/ecpubkey.pem | sha256sum | cut -d ' ' -f1 | tr -cd "[a-fA-F0-9.:/] ")
|
|
||||||
if [ ${#GUID} -eq 64 ]; then
|
|
||||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
|
||||||
REPLACE INTO versions (application, version) VALUES ("GUID", "${GUID}");
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
|
||||||
REPLACE INTO versions (application, version) VALUES ("GUID", "INVALID");
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Collect SA rules once now
|
echo "Bootstrap succeeded. Starting Dovecot..."
|
||||||
/usr/local/bin/sa-rules.sh
|
/usr/sbin/dovecot -F
|
||||||
|
|
||||||
# Run hooks
|
|
||||||
for file in /hooks/*; do
|
|
||||||
if [ -x "${file}" ]; then
|
|
||||||
echo "Running hook ${file}"
|
|
||||||
"${file}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# For some strange, unknown and stupid reason, Dovecot may run into a race condition, when this file is not touched before it is read by dovecot/auth
|
|
||||||
# May be related to something inside Docker, I seriously don't know
|
|
||||||
touch /etc/dovecot/auth/passwd-verify.lua
|
|
||||||
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|
||||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
||||||
|
|||||||
@@ -132,8 +132,8 @@ while ($row = $sth->fetchrow_arrayref()) {
|
|||||||
"--tmpdir", "/tmp",
|
"--tmpdir", "/tmp",
|
||||||
"--nofoldersizes",
|
"--nofoldersizes",
|
||||||
"--addheader",
|
"--addheader",
|
||||||
($timeout1 le "0" ? () : ('--timeout1', $timeout1)),
|
($timeout1 gt "0" ? () : ('--timeout1', $timeout1)),
|
||||||
($timeout2 le "0" ? () : ('--timeout2', $timeout2)),
|
($timeout2 gt "0" ? () : ('--timeout2', $timeout2)),
|
||||||
($exclude eq "" ? () : ("--exclude", $exclude)),
|
($exclude eq "" ? () : ("--exclude", $exclude)),
|
||||||
($subfolder2 eq "" ? () : ('--subfolder2', $subfolder2)),
|
($subfolder2 eq "" ? () : ('--subfolder2', $subfolder2)),
|
||||||
($maxage eq "0" ? () : ('--maxage', $maxage)),
|
($maxage eq "0" ? () : ('--maxage', $maxage)),
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
[ -d /var/vmail/_garbage/ ] && /usr/bin/find /var/vmail/_garbage/ -mindepth 1 -maxdepth 1 -type d -cmin +${MAILDIR_GC_TIME} -exec rm -r {} \;
|
|
||||||
@@ -8,8 +8,7 @@ from email.mime.multipart import MIMEMultipart
|
|||||||
from email.mime.text import MIMEText
|
from email.mime.text import MIMEText
|
||||||
from email.utils import COMMASPACE, formatdate
|
from email.utils import COMMASPACE, formatdate
|
||||||
import jinja2
|
import jinja2
|
||||||
from jinja2 import TemplateError
|
from jinja2 import Template
|
||||||
from jinja2.sandbox import SandboxedEnvironment
|
|
||||||
import json
|
import json
|
||||||
import redis
|
import redis
|
||||||
import time
|
import time
|
||||||
@@ -76,27 +75,22 @@ try:
|
|||||||
|
|
||||||
def notify_rcpt(rcpt, msg_count, quarantine_acl, category):
|
def notify_rcpt(rcpt, msg_count, quarantine_acl, category):
|
||||||
if category == "add_header": category = "add header"
|
if category == "add_header": category = "add header"
|
||||||
meta_query = query_mysql('SELECT `qhash`, id, subject, score, sender, created, action FROM quarantine WHERE notified = 0 AND rcpt = "%s" AND score < %f AND (action = "%s" OR "all" = "%s")' % (rcpt, max_score, category, category))
|
meta_query = query_mysql('SELECT SHA2(CONCAT(id, qid), 256) AS qhash, id, subject, score, sender, created, action FROM quarantine WHERE notified = 0 AND rcpt = "%s" AND score < %f AND (action = "%s" OR "all" = "%s")' % (rcpt, max_score, category, category))
|
||||||
print("%s: %d of %d messages qualify for notification" % (rcpt, len(meta_query), msg_count))
|
print("%s: %d of %d messages qualify for notification" % (rcpt, len(meta_query), msg_count))
|
||||||
if len(meta_query) == 0:
|
if len(meta_query) == 0:
|
||||||
return
|
return
|
||||||
msg_count = len(meta_query)
|
msg_count = len(meta_query)
|
||||||
env = SandboxedEnvironment()
|
|
||||||
if r.get('Q_HTML'):
|
if r.get('Q_HTML'):
|
||||||
try:
|
try:
|
||||||
template = env.from_string(r.get('Q_HTML'))
|
template = Template(r.get('Q_HTML'))
|
||||||
except Exception:
|
except:
|
||||||
print("Error: Cannot parse quarantine template, falling back to default template.")
|
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||||
with open('/templates/quarantine.tpl') as file_:
|
|
||||||
template = env.from_string(file_.read())
|
|
||||||
else:
|
|
||||||
with open('/templates/quarantine.tpl') as file_:
|
with open('/templates/quarantine.tpl') as file_:
|
||||||
template = env.from_string(file_.read())
|
template = Template(file_.read())
|
||||||
try:
|
else:
|
||||||
html = template.render(meta=meta_query, username=rcpt, counter=msg_count, hostname=mailcow_hostname, quarantine_acl=quarantine_acl)
|
with open('/templates/quarantine.tpl') as file_:
|
||||||
except (jinja2.exceptions.SecurityError, TemplateError) as ex:
|
template = Template(file_.read())
|
||||||
print(f"SecurityError or TemplateError in template rendering: {ex}")
|
html = template.render(meta=meta_query, username=rcpt, counter=msg_count, hostname=mailcow_hostname, quarantine_acl=quarantine_acl)
|
||||||
return
|
|
||||||
text = html2text.html2text(html)
|
text = html2text.html2text(html)
|
||||||
count = 0
|
count = 0
|
||||||
while count < 15:
|
while count < 15:
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from email.mime.multipart import MIMEMultipart
|
|||||||
from email.mime.text import MIMEText
|
from email.mime.text import MIMEText
|
||||||
from email.utils import COMMASPACE, formatdate
|
from email.utils import COMMASPACE, formatdate
|
||||||
import jinja2
|
import jinja2
|
||||||
from jinja2.sandbox import SandboxedEnvironment
|
from jinja2 import Template
|
||||||
import redis
|
import redis
|
||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
@@ -14,6 +14,11 @@ import sys
|
|||||||
import html2text
|
import html2text
|
||||||
from subprocess import Popen, PIPE, STDOUT
|
from subprocess import Popen, PIPE, STDOUT
|
||||||
|
|
||||||
|
|
||||||
|
# Don't run if role is not master
|
||||||
|
if os.getenv("MASTER").lower() in ["n", "no"]:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
if len(sys.argv) > 2:
|
if len(sys.argv) > 2:
|
||||||
percent = int(sys.argv[1])
|
percent = int(sys.argv[1])
|
||||||
username = str(sys.argv[2])
|
username = str(sys.argv[2])
|
||||||
@@ -33,24 +38,16 @@ while True:
|
|||||||
|
|
||||||
if r.get('QW_HTML'):
|
if r.get('QW_HTML'):
|
||||||
try:
|
try:
|
||||||
env = SandboxedEnvironment()
|
template = Template(r.get('QW_HTML'))
|
||||||
template = env.from_string(r.get('QW_HTML'))
|
except:
|
||||||
except Exception:
|
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||||
print("Error: Cannot parse quota template, falling back to default template.")
|
|
||||||
with open('/templates/quota.tpl') as file_:
|
with open('/templates/quota.tpl') as file_:
|
||||||
env = SandboxedEnvironment()
|
template = Template(file_.read())
|
||||||
template = env.from_string(file_.read())
|
|
||||||
else:
|
else:
|
||||||
with open('/templates/quota.tpl') as file_:
|
with open('/templates/quota.tpl') as file_:
|
||||||
env = SandboxedEnvironment()
|
template = Template(file_.read())
|
||||||
template = env.from_string(file_.read())
|
|
||||||
|
|
||||||
try:
|
|
||||||
html = template.render(username=username, percent=percent)
|
|
||||||
except (jinja2.exceptions.SecurityError, jinja2.TemplateError) as ex:
|
|
||||||
print(f"SecurityError or TemplateError in template rendering: {ex}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
html = template.render(username=username, percent=percent)
|
||||||
text = html2text.html2text(html)
|
text = html2text.html2text(html)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ stderr_logfile=/dev/stderr
|
|||||||
stderr_logfile_maxbytes=0
|
stderr_logfile_maxbytes=0
|
||||||
autostart=true
|
autostart=true
|
||||||
|
|
||||||
[program:dovecot]
|
[program:bootstrap]
|
||||||
command=/usr/sbin/dovecot -F
|
command=/docker-entrypoint.sh
|
||||||
stdout_logfile=/dev/stdout
|
stdout_logfile=/dev/stdout
|
||||||
stdout_logfile_maxbytes=0
|
stdout_logfile_maxbytes=0
|
||||||
stderr_logfile=/dev/stderr
|
stderr_logfile=/dev/stderr
|
||||||
|
|||||||
28
data/Dockerfiles/mariadb/Dockerfile
Normal file
28
data/Dockerfiles/mariadb/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
FROM mariadb:10.11
|
||||||
|
|
||||||
|
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||||
|
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
python3 \
|
||||||
|
python3-pip \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN pip install \
|
||||||
|
mysql-connector-python \
|
||||||
|
jinja2 \
|
||||||
|
redis \
|
||||||
|
dnspython \
|
||||||
|
psutil
|
||||||
|
|
||||||
|
|
||||||
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
|
COPY data/Dockerfiles/mariadb/docker-entrypoint.sh /docker-entrypoint.sh
|
||||||
|
|
||||||
|
RUN chmod +x /docker-entrypoint.sh
|
||||||
|
|
||||||
|
|
||||||
|
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||||
|
CMD ["mysqld"]
|
||||||
20
data/Dockerfiles/mariadb/docker-entrypoint.sh
Normal file
20
data/Dockerfiles/mariadb/docker-entrypoint.sh
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Run hooks
|
||||||
|
for file in /hooks/*; do
|
||||||
|
if [ -x "${file}" ]; then
|
||||||
|
echo "Running hook ${file}"
|
||||||
|
"${file}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
python3 -u /bootstrap/main.py
|
||||||
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting MariaDB."
|
||||||
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Bootstrap succeeded. Starting MariaDB..."
|
||||||
|
exec /usr/local/bin/docker-entrypoint.sh "$@"
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
backend=nftables
|
backend=iptables
|
||||||
|
|
||||||
nft list table ip filter &>/dev/null
|
nft list table ip filter &>/dev/null
|
||||||
nftables_found=$?
|
nftables_found=$?
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
DEBUG = False
|
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -22,13 +20,10 @@ from modules.Logger import Logger
|
|||||||
from modules.IPTables import IPTables
|
from modules.IPTables import IPTables
|
||||||
from modules.NFTables import NFTables
|
from modules.NFTables import NFTables
|
||||||
|
|
||||||
def logdebug(msg):
|
|
||||||
if DEBUG:
|
|
||||||
logger.logInfo("DEBUG: %s" % msg)
|
|
||||||
|
|
||||||
# Globals
|
# globals
|
||||||
WHITELIST = []
|
WHITELIST = []
|
||||||
BLACKLIST = []
|
BLACKLIST= []
|
||||||
bans = {}
|
bans = {}
|
||||||
quit_now = False
|
quit_now = False
|
||||||
exit_code = 0
|
exit_code = 0
|
||||||
@@ -38,10 +33,12 @@ r = None
|
|||||||
pubsub = None
|
pubsub = None
|
||||||
clear_before_quit = False
|
clear_before_quit = False
|
||||||
|
|
||||||
|
|
||||||
def refreshF2boptions():
|
def refreshF2boptions():
|
||||||
global f2boptions
|
global f2boptions
|
||||||
global quit_now
|
global quit_now
|
||||||
global exit_code
|
global exit_code
|
||||||
|
|
||||||
f2boptions = {}
|
f2boptions = {}
|
||||||
|
|
||||||
if not r.get('F2B_OPTIONS'):
|
if not r.get('F2B_OPTIONS'):
|
||||||
@@ -55,9 +52,8 @@ def refreshF2boptions():
|
|||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
f2boptions = json.loads(r.get('F2B_OPTIONS'))
|
f2boptions = json.loads(r.get('F2B_OPTIONS'))
|
||||||
except ValueError as e:
|
except ValueError:
|
||||||
logger.logCrit(
|
logger.logCrit('Error loading F2B options: F2B_OPTIONS is not json')
|
||||||
'Error loading F2B options: F2B_OPTIONS is not json. Exception: %s' % e)
|
|
||||||
quit_now = True
|
quit_now = True
|
||||||
exit_code = 2
|
exit_code = 2
|
||||||
|
|
||||||
@@ -65,15 +61,15 @@ def refreshF2boptions():
|
|||||||
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
|
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
|
||||||
|
|
||||||
def verifyF2boptions(f2boptions):
|
def verifyF2boptions(f2boptions):
|
||||||
verifyF2boption(f2boptions, 'ban_time', 1800)
|
verifyF2boption(f2boptions,'ban_time', 1800)
|
||||||
verifyF2boption(f2boptions, 'max_ban_time', 10000)
|
verifyF2boption(f2boptions,'max_ban_time', 10000)
|
||||||
verifyF2boption(f2boptions, 'ban_time_increment', True)
|
verifyF2boption(f2boptions,'ban_time_increment', True)
|
||||||
verifyF2boption(f2boptions, 'max_attempts', 10)
|
verifyF2boption(f2boptions,'max_attempts', 10)
|
||||||
verifyF2boption(f2boptions, 'retry_window', 600)
|
verifyF2boption(f2boptions,'retry_window', 600)
|
||||||
verifyF2boption(f2boptions, 'netban_ipv4', 32)
|
verifyF2boption(f2boptions,'netban_ipv4', 32)
|
||||||
verifyF2boption(f2boptions, 'netban_ipv6', 128)
|
verifyF2boption(f2boptions,'netban_ipv6', 128)
|
||||||
verifyF2boption(f2boptions, 'banlist_id', str(uuid.uuid4()))
|
verifyF2boption(f2boptions,'banlist_id', str(uuid.uuid4()))
|
||||||
verifyF2boption(f2boptions, 'manage_external', 0)
|
verifyF2boption(f2boptions,'manage_external', 0)
|
||||||
|
|
||||||
def verifyF2boption(f2boptions, f2boption, f2bdefault):
|
def verifyF2boption(f2boptions, f2boption, f2bdefault):
|
||||||
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
|
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
|
||||||
@@ -115,7 +111,7 @@ def get_ip(address):
|
|||||||
def ban(address):
|
def ban(address):
|
||||||
global f2boptions
|
global f2boptions
|
||||||
global lock
|
global lock
|
||||||
logdebug("ban() called with address=%s" % address)
|
|
||||||
refreshF2boptions()
|
refreshF2boptions()
|
||||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||||
RETRY_WINDOW = int(f2boptions['retry_window'])
|
RETRY_WINDOW = int(f2boptions['retry_window'])
|
||||||
@@ -123,43 +119,31 @@ def ban(address):
|
|||||||
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
|
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
|
||||||
|
|
||||||
ip = get_ip(address)
|
ip = get_ip(address)
|
||||||
if not ip:
|
if not ip: return
|
||||||
logdebug("No valid IP -- skipping ban()")
|
|
||||||
return
|
|
||||||
address = str(ip)
|
address = str(ip)
|
||||||
self_network = ipaddress.ip_network(address)
|
self_network = ipaddress.ip_network(address)
|
||||||
|
|
||||||
with lock:
|
with lock:
|
||||||
temp_whitelist = set(WHITELIST)
|
temp_whitelist = set(WHITELIST)
|
||||||
logdebug("Checking if %s overlaps with any WHITELIST entries" % self_network)
|
if temp_whitelist:
|
||||||
if temp_whitelist:
|
for wl_key in temp_whitelist:
|
||||||
for wl_key in temp_whitelist:
|
wl_net = ipaddress.ip_network(wl_key, False)
|
||||||
wl_net = ipaddress.ip_network(wl_key, False)
|
if wl_net.overlaps(self_network):
|
||||||
logdebug("Checking overlap between %s and %s" % (self_network, wl_net))
|
logger.logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
|
||||||
if wl_net.overlaps(self_network):
|
return
|
||||||
logger.logInfo(
|
|
||||||
'Address %s is allowlisted by rule %s' % (self_network, wl_net))
|
|
||||||
return
|
|
||||||
|
|
||||||
net = ipaddress.ip_network(
|
net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
||||||
(address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
|
||||||
net = str(net)
|
net = str(net)
|
||||||
logdebug("Ban net: %s" % net)
|
|
||||||
|
|
||||||
if not net in bans:
|
if not net in bans:
|
||||||
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
|
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
|
||||||
logdebug("Initing new ban counter for %s" % net)
|
|
||||||
|
|
||||||
current_attempt = time.time()
|
current_attempt = time.time()
|
||||||
logdebug("Current attempt ts=%s, previous: %s, retry_window: %s" %
|
|
||||||
(current_attempt, bans[net]['last_attempt'], RETRY_WINDOW))
|
|
||||||
if current_attempt - bans[net]['last_attempt'] > RETRY_WINDOW:
|
if current_attempt - bans[net]['last_attempt'] > RETRY_WINDOW:
|
||||||
bans[net]['attempts'] = 0
|
bans[net]['attempts'] = 0
|
||||||
logdebug("Ban counter for %s reset as window expired" % net)
|
|
||||||
|
|
||||||
bans[net]['attempts'] += 1
|
bans[net]['attempts'] += 1
|
||||||
bans[net]['last_attempt'] = current_attempt
|
bans[net]['last_attempt'] = current_attempt
|
||||||
logdebug("%s attempts now %d" % (net, bans[net]['attempts']))
|
|
||||||
|
|
||||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||||
cur_time = int(round(time.time()))
|
cur_time = int(round(time.time()))
|
||||||
@@ -167,41 +151,34 @@ def ban(address):
|
|||||||
logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
||||||
if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1:
|
if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1:
|
||||||
with lock:
|
with lock:
|
||||||
logdebug("Calling tables.banIPv4(%s)" % net)
|
|
||||||
tables.banIPv4(net)
|
tables.banIPv4(net)
|
||||||
elif int(f2boptions['manage_external']) != 1:
|
elif int(f2boptions['manage_external']) != 1:
|
||||||
with lock:
|
with lock:
|
||||||
logdebug("Calling tables.banIPv6(%s)" % net)
|
|
||||||
tables.banIPv6(net)
|
tables.banIPv6(net)
|
||||||
|
|
||||||
logdebug("Updating F2B_ACTIVE_BANS[%s]=%d" %
|
|
||||||
(net, cur_time + NET_BAN_TIME))
|
|
||||||
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
|
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
|
||||||
else:
|
else:
|
||||||
logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (
|
logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
||||||
MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
|
||||||
|
|
||||||
def unban(net):
|
def unban(net):
|
||||||
global lock
|
global lock
|
||||||
logdebug("Calling unban() with net=%s" % net)
|
|
||||||
if not net in bans:
|
if not net in bans:
|
||||||
logger.logInfo(
|
logger.logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
||||||
'%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
return
|
||||||
return
|
|
||||||
logger.logInfo('Unbanning %s' % net)
|
logger.logInfo('Unbanning %s' % net)
|
||||||
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
|
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
|
||||||
with lock:
|
with lock:
|
||||||
logdebug("Calling tables.unbanIPv4(%s)" % net)
|
|
||||||
tables.unbanIPv4(net)
|
tables.unbanIPv4(net)
|
||||||
else:
|
else:
|
||||||
with lock:
|
with lock:
|
||||||
logdebug("Calling tables.unbanIPv6(%s)" % net)
|
|
||||||
tables.unbanIPv6(net)
|
tables.unbanIPv6(net)
|
||||||
|
|
||||||
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
|
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
|
||||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||||
if net in bans:
|
if net in bans:
|
||||||
logdebug("Unban for %s, setting attempts=0, ban_counter+=1" % net)
|
|
||||||
bans[net]['attempts'] = 0
|
bans[net]['attempts'] = 0
|
||||||
bans[net]['ban_counter'] += 1
|
bans[net]['ban_counter'] += 1
|
||||||
|
|
||||||
@@ -227,19 +204,17 @@ def permBan(net, unban=False):
|
|||||||
|
|
||||||
if is_unbanned:
|
if is_unbanned:
|
||||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||||
logger.logCrit('Removed host/network %s from denylist' % net)
|
logger.logCrit('Removed host/network %s from blacklist' % net)
|
||||||
elif is_banned:
|
elif is_banned:
|
||||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||||
logger.logCrit('Added host/network %s to denylist' % net)
|
logger.logCrit('Added host/network %s to blacklist' % net)
|
||||||
|
|
||||||
def clear():
|
def clear():
|
||||||
global lock
|
global lock
|
||||||
logger.logInfo('Clearing all bans')
|
logger.logInfo('Clearing all bans')
|
||||||
for net in bans.copy():
|
for net in bans.copy():
|
||||||
logdebug("Unbanning net: %s" % net)
|
|
||||||
unban(net)
|
unban(net)
|
||||||
with lock:
|
with lock:
|
||||||
logdebug("Clearing IPv4/IPv6 table")
|
|
||||||
tables.clearIPv4Table()
|
tables.clearIPv4Table()
|
||||||
tables.clearIPv6Table()
|
tables.clearIPv6Table()
|
||||||
try:
|
try:
|
||||||
@@ -300,35 +275,21 @@ def snat6(snat_target):
|
|||||||
|
|
||||||
def autopurge():
|
def autopurge():
|
||||||
global f2boptions
|
global f2boptions
|
||||||
logdebug("autopurge thread started")
|
|
||||||
while not quit_now:
|
while not quit_now:
|
||||||
logdebug("autopurge tick")
|
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
refreshF2boptions()
|
refreshF2boptions()
|
||||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||||
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
||||||
logdebug("QUEUE_UNBAN: %s" % QUEUE_UNBAN)
|
|
||||||
if QUEUE_UNBAN:
|
if QUEUE_UNBAN:
|
||||||
for net in QUEUE_UNBAN:
|
for net in QUEUE_UNBAN:
|
||||||
logdebug("Autopurge: unbanning queued net: %s" % net)
|
|
||||||
unban(str(net))
|
unban(str(net))
|
||||||
# Only check expiry for actively banned IPs:
|
for net in bans.copy():
|
||||||
active_bans = r.hgetall('F2B_ACTIVE_BANS')
|
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||||
now = time.time()
|
NET_BAN_TIME = calcNetBanTime(bans[net]['ban_counter'])
|
||||||
for net_str, expire_str in active_bans.items():
|
TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt']
|
||||||
logdebug("Checking ban expiry for (actively banned): %s" % net_str)
|
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME:
|
||||||
# Defensive: always process if timer missing or expired
|
unban(net)
|
||||||
try:
|
|
||||||
expire = float(expire_str)
|
|
||||||
except Exception:
|
|
||||||
logdebug("Invalid expire time for %s; unbanning" % net_str)
|
|
||||||
unban(net_str)
|
|
||||||
continue
|
|
||||||
time_left = expire - now
|
|
||||||
logdebug("Time left for %s: %.1f seconds" % (net_str, time_left))
|
|
||||||
if time_left <= 0:
|
|
||||||
logdebug("Ban expired for %s" % net_str)
|
|
||||||
unban(net_str)
|
|
||||||
|
|
||||||
def mailcowChainOrder():
|
def mailcowChainOrder():
|
||||||
global lock
|
global lock
|
||||||
@@ -398,7 +359,7 @@ def whitelistUpdate():
|
|||||||
with lock:
|
with lock:
|
||||||
if Counter(new_whitelist) != Counter(WHITELIST):
|
if Counter(new_whitelist) != Counter(WHITELIST):
|
||||||
WHITELIST = new_whitelist
|
WHITELIST = new_whitelist
|
||||||
logger.logInfo('Allowlist was changed, it has %s entries' % len(WHITELIST))
|
logger.logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
|
||||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||||
|
|
||||||
def blacklistUpdate():
|
def blacklistUpdate():
|
||||||
@@ -414,7 +375,7 @@ def blacklistUpdate():
|
|||||||
addban = set(new_blacklist).difference(BLACKLIST)
|
addban = set(new_blacklist).difference(BLACKLIST)
|
||||||
delban = set(BLACKLIST).difference(new_blacklist)
|
delban = set(BLACKLIST).difference(new_blacklist)
|
||||||
BLACKLIST = new_blacklist
|
BLACKLIST = new_blacklist
|
||||||
logger.logInfo('Denylist was changed, it has %s entries' % len(BLACKLIST))
|
logger.logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
|
||||||
if addban:
|
if addban:
|
||||||
for net in addban:
|
for net in addban:
|
||||||
permBan(net=net)
|
permBan(net=net)
|
||||||
@@ -425,43 +386,42 @@ def blacklistUpdate():
|
|||||||
|
|
||||||
def sigterm_quit(signum, frame):
|
def sigterm_quit(signum, frame):
|
||||||
global clear_before_quit
|
global clear_before_quit
|
||||||
logdebug("SIGTERM received, setting clear_before_quit to True and exiting")
|
|
||||||
clear_before_quit = True
|
clear_before_quit = True
|
||||||
sys.exit(exit_code)
|
sys.exit(exit_code)
|
||||||
|
|
||||||
def before_quit():
|
def berfore_quit():
|
||||||
logdebug("before_quit called, clear_before_quit=%s" % clear_before_quit)
|
|
||||||
if clear_before_quit:
|
if clear_before_quit:
|
||||||
clear()
|
clear()
|
||||||
if pubsub is not None:
|
if pubsub is not None:
|
||||||
pubsub.unsubscribe()
|
pubsub.unsubscribe()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
logger = Logger()
|
atexit.register(berfore_quit)
|
||||||
logdebug("Sys.argv: %s" % sys.argv)
|
|
||||||
atexit.register(before_quit)
|
|
||||||
signal.signal(signal.SIGTERM, sigterm_quit)
|
signal.signal(signal.SIGTERM, sigterm_quit)
|
||||||
|
|
||||||
|
# init Logger
|
||||||
|
logger = Logger()
|
||||||
|
|
||||||
|
# init backend
|
||||||
backend = sys.argv[1]
|
backend = sys.argv[1]
|
||||||
logdebug("Backend: %s" % backend)
|
|
||||||
if backend == "nftables":
|
if backend == "nftables":
|
||||||
logger.logInfo('Using NFTables backend')
|
logger.logInfo('Using NFTables backend')
|
||||||
tables = NFTables(chain_name, logger)
|
tables = NFTables(chain_name, logger)
|
||||||
else:
|
else:
|
||||||
logger.logInfo('Using IPTables backend')
|
logger.logInfo('Using IPTables backend')
|
||||||
logger.logWarn(
|
|
||||||
"DEPRECATION: iptables-legacy is deprecated and will be removed in future releases. "
|
|
||||||
"Please switch to nftables on your host to ensure complete compatibility."
|
|
||||||
)
|
|
||||||
time.sleep(5)
|
|
||||||
tables = IPTables(chain_name, logger)
|
tables = IPTables(chain_name, logger)
|
||||||
|
|
||||||
|
# In case a previous session was killed without cleanup
|
||||||
clear()
|
clear()
|
||||||
|
|
||||||
|
# Reinit MAILCOW chain
|
||||||
|
# Is called before threads start, no locking
|
||||||
logger.logInfo("Initializing mailcow netfilter chain")
|
logger.logInfo("Initializing mailcow netfilter chain")
|
||||||
tables.initChainIPv4()
|
tables.initChainIPv4()
|
||||||
tables.initChainIPv6()
|
tables.initChainIPv6()
|
||||||
|
|
||||||
if os.getenv("DISABLE_NETFILTER_ISOLATION_RULE", "").lower() in ("y", "yes"):
|
if os.getenv("DISABLE_NETFILTER_ISOLATION_RULE").lower() in ("y", "yes"):
|
||||||
logger.logInfo(f"Skipping {chain_name} isolation")
|
logger.logInfo(f"Skipping {chain_name} isolation")
|
||||||
else:
|
else:
|
||||||
logger.logInfo(f"Setting {chain_name} isolation")
|
logger.logInfo(f"Setting {chain_name} isolation")
|
||||||
@@ -472,28 +432,23 @@ if __name__ == '__main__':
|
|||||||
try:
|
try:
|
||||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||||
logdebug(
|
|
||||||
"Connecting redis (SLAVEOF_IP:%s, PORT:%s)" % (redis_slaveof_ip, redis_slaveof_port))
|
|
||||||
if "".__eq__(redis_slaveof_ip):
|
if "".__eq__(redis_slaveof_ip):
|
||||||
r = redis.StrictRedis(
|
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
||||||
host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
|
||||||
else:
|
else:
|
||||||
r = redis.StrictRedis(
|
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0, password=os.environ['REDISPASS'])
|
||||||
host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0, password=os.environ['REDISPASS'])
|
|
||||||
r.ping()
|
r.ping()
|
||||||
pubsub = r.pubsub()
|
pubsub = r.pubsub()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logdebug(
|
print('%s - trying again in 3 seconds' % (ex))
|
||||||
'Redis connection failed: %s - trying again in 3 seconds' % (ex))
|
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
logger.set_redis(r)
|
logger.set_redis(r)
|
||||||
logdebug("Redis connection established, setting up F2B keys")
|
|
||||||
|
|
||||||
|
# rename fail2ban to netfilter
|
||||||
if r.exists('F2B_LOG'):
|
if r.exists('F2B_LOG'):
|
||||||
logdebug("Renaming F2B_LOG to NETFILTER_LOG")
|
|
||||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||||
|
# clear bans in redis
|
||||||
r.delete('F2B_ACTIVE_BANS')
|
r.delete('F2B_ACTIVE_BANS')
|
||||||
r.delete('F2B_PERM_BANS')
|
r.delete('F2B_PERM_BANS')
|
||||||
|
|
||||||
@@ -508,7 +463,7 @@ if __name__ == '__main__':
|
|||||||
snat_ip = os.getenv('SNAT_TO_SOURCE')
|
snat_ip = os.getenv('SNAT_TO_SOURCE')
|
||||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||||
if type(snat_ipo) is ipaddress.IPv4Address:
|
if type(snat_ipo) is ipaddress.IPv4Address:
|
||||||
snat4_thread = Thread(target=snat4, args=(snat_ip,))
|
snat4_thread = Thread(target=snat4,args=(snat_ip,))
|
||||||
snat4_thread.daemon = True
|
snat4_thread.daemon = True
|
||||||
snat4_thread.start()
|
snat4_thread.start()
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@@ -544,5 +499,4 @@ if __name__ == '__main__':
|
|||||||
while not quit_now:
|
while not quit_now:
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
logdebug("Exiting with code %s" % exit_code)
|
sys.exit(exit_code)
|
||||||
sys.exit(exit_code)
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
import datetime
|
|
||||||
|
|
||||||
class Logger:
|
class Logger:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@@ -9,28 +8,17 @@ class Logger:
|
|||||||
def set_redis(self, redis):
|
def set_redis(self, redis):
|
||||||
self.r = redis
|
self.r = redis
|
||||||
|
|
||||||
def _format_timestamp(self):
|
|
||||||
# Local time with milliseconds
|
|
||||||
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
||||||
|
|
||||||
def log(self, priority, message):
|
def log(self, priority, message):
|
||||||
# build redis-friendly dict
|
tolog = {}
|
||||||
tolog = {
|
tolog['time'] = int(round(time.time()))
|
||||||
'time': int(round(time.time())), # keep raw timestamp for Redis
|
tolog['priority'] = priority
|
||||||
'priority': priority,
|
tolog['message'] = message
|
||||||
'message': message
|
print(message)
|
||||||
}
|
|
||||||
|
|
||||||
# print human-readable message with timestamp
|
|
||||||
ts = self._format_timestamp()
|
|
||||||
print(f"{ts} {priority.upper()}: {message}", flush=True)
|
|
||||||
|
|
||||||
# also push JSON to Redis if connected
|
|
||||||
if self.r is not None:
|
if self.r is not None:
|
||||||
try:
|
try:
|
||||||
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print(f'{ts} WARN: Failed logging to redis: {ex}', flush=True)
|
print('Failed logging to redis: %s' % (ex))
|
||||||
|
|
||||||
def logWarn(self, message):
|
def logWarn(self, message):
|
||||||
self.log('warn', message)
|
self.log('warn', message)
|
||||||
@@ -39,4 +27,4 @@ class Logger:
|
|||||||
self.log('crit', message)
|
self.log('crit', message)
|
||||||
|
|
||||||
def logInfo(self, message):
|
def logInfo(self, message):
|
||||||
self.log('info', message)
|
self.log('info', message)
|
||||||
|
|||||||
@@ -4,15 +4,33 @@ LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
|||||||
ENV PIP_BREAK_SYSTEM_PACKAGES=1
|
ENV PIP_BREAK_SYSTEM_PACKAGES=1
|
||||||
|
|
||||||
RUN apk add --no-cache nginx \
|
RUN apk add --no-cache nginx \
|
||||||
python3 \
|
python3 py3-pip \
|
||||||
py3-pip && \
|
supervisor
|
||||||
pip install --upgrade pip && \
|
|
||||||
pip install Jinja2
|
RUN apk add --no-cache --virtual .build-deps \
|
||||||
|
gcc \
|
||||||
|
musl-dev \
|
||||||
|
python3-dev \
|
||||||
|
linux-headers \
|
||||||
|
&& pip install --break-system-packages psutil \
|
||||||
|
&& apk del .build-deps
|
||||||
|
|
||||||
|
RUN pip install --break-system-packages \
|
||||||
|
mysql-connector-python \
|
||||||
|
jinja2 \
|
||||||
|
redis \
|
||||||
|
dnspython
|
||||||
|
|
||||||
RUN mkdir -p /etc/nginx/includes
|
RUN mkdir -p /etc/nginx/includes
|
||||||
|
|
||||||
COPY ./bootstrap.py /
|
|
||||||
COPY ./docker-entrypoint.sh /
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
CMD ["nginx", "-g", "daemon off;"]
|
COPY data/Dockerfiles/nginx/docker-entrypoint.sh /
|
||||||
|
COPY data/Dockerfiles/nginx/supervisord.conf /etc/supervisor/supervisord.conf
|
||||||
|
COPY data/Dockerfiles/nginx/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||||
|
|
||||||
|
RUN chmod +x /docker-entrypoint.sh
|
||||||
|
RUN chmod +x /usr/local/sbin/stop-supervisor.sh
|
||||||
|
|
||||||
|
|
||||||
|
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||||
|
|||||||
@@ -1,100 +0,0 @@
|
|||||||
import os
|
|
||||||
import subprocess
|
|
||||||
from jinja2 import Environment, FileSystemLoader
|
|
||||||
|
|
||||||
def includes_conf(env, template_vars):
|
|
||||||
server_name = "server_name.active"
|
|
||||||
listen_plain = "listen_plain.active"
|
|
||||||
listen_ssl = "listen_ssl.active"
|
|
||||||
|
|
||||||
server_name_config = f"server_name {template_vars['MAILCOW_HOSTNAME']} autodiscover.* autoconfig.* {' '.join(template_vars['ADDITIONAL_SERVER_NAMES'])};"
|
|
||||||
listen_plain_config = f"listen {template_vars['HTTP_PORT']};"
|
|
||||||
listen_ssl_config = f"listen {template_vars['HTTPS_PORT']};"
|
|
||||||
if template_vars['ENABLE_IPV6']:
|
|
||||||
listen_plain_config += f"\nlisten [::]:{template_vars['HTTP_PORT']};"
|
|
||||||
listen_ssl_config += f"\nlisten [::]:{template_vars['HTTPS_PORT']} ssl;"
|
|
||||||
listen_ssl_config += "\nhttp2 on;"
|
|
||||||
|
|
||||||
with open(f"/etc/nginx/conf.d/{server_name}", "w") as f:
|
|
||||||
f.write(server_name_config)
|
|
||||||
|
|
||||||
with open(f"/etc/nginx/conf.d/{listen_plain}", "w") as f:
|
|
||||||
f.write(listen_plain_config)
|
|
||||||
|
|
||||||
with open(f"/etc/nginx/conf.d/{listen_ssl}", "w") as f:
|
|
||||||
f.write(listen_ssl_config)
|
|
||||||
|
|
||||||
def sites_default_conf(env, template_vars):
|
|
||||||
config_name = "sites-default.conf"
|
|
||||||
template = env.get_template(f"{config_name}.j2")
|
|
||||||
config = template.render(template_vars)
|
|
||||||
|
|
||||||
with open(f"/etc/nginx/includes/{config_name}", "w") as f:
|
|
||||||
f.write(config)
|
|
||||||
|
|
||||||
def nginx_conf(env, template_vars):
|
|
||||||
config_name = "nginx.conf"
|
|
||||||
template = env.get_template(f"{config_name}.j2")
|
|
||||||
config = template.render(template_vars)
|
|
||||||
|
|
||||||
with open(f"/etc/nginx/{config_name}", "w") as f:
|
|
||||||
f.write(config)
|
|
||||||
|
|
||||||
def prepare_template_vars():
|
|
||||||
ipv4_network = os.getenv("IPV4_NETWORK", "172.22.1")
|
|
||||||
additional_server_names = os.getenv("ADDITIONAL_SERVER_NAMES", "")
|
|
||||||
trusted_proxies = os.getenv("TRUSTED_PROXIES", "")
|
|
||||||
|
|
||||||
template_vars = {
|
|
||||||
'IPV4_NETWORK': ipv4_network,
|
|
||||||
'TRUSTED_PROXIES': [item.strip() for item in trusted_proxies.split(",") if item.strip()],
|
|
||||||
'SKIP_RSPAMD': os.getenv("SKIP_RSPAMD", "n").lower() in ("y", "yes"),
|
|
||||||
'SKIP_SOGO': os.getenv("SKIP_SOGO", "n").lower() in ("y", "yes"),
|
|
||||||
'NGINX_USE_PROXY_PROTOCOL': os.getenv("NGINX_USE_PROXY_PROTOCOL", "n").lower() in ("y", "yes"),
|
|
||||||
'MAILCOW_HOSTNAME': os.getenv("MAILCOW_HOSTNAME", ""),
|
|
||||||
'ADDITIONAL_SERVER_NAMES': [item.strip() for item in additional_server_names.split(",") if item.strip()],
|
|
||||||
'HTTP_PORT': os.getenv("HTTP_PORT", "80"),
|
|
||||||
'HTTPS_PORT': os.getenv("HTTPS_PORT", "443"),
|
|
||||||
'SOGOHOST': os.getenv("SOGOHOST", ipv4_network + ".248"),
|
|
||||||
'RSPAMDHOST': os.getenv("RSPAMDHOST", "rspamd-mailcow"),
|
|
||||||
'PHPFPMHOST': os.getenv("PHPFPMHOST", "php-fpm-mailcow"),
|
|
||||||
'ENABLE_IPV6': os.getenv("ENABLE_IPV6", "true").lower() != "false",
|
|
||||||
'HTTP_REDIRECT': os.getenv("HTTP_REDIRECT", "n").lower() in ("y", "yes"),
|
|
||||||
}
|
|
||||||
|
|
||||||
ssl_dir = '/etc/ssl/mail/'
|
|
||||||
template_vars['valid_cert_dirs'] = []
|
|
||||||
for d in os.listdir(ssl_dir):
|
|
||||||
full_path = os.path.join(ssl_dir, d)
|
|
||||||
if not os.path.isdir(full_path):
|
|
||||||
continue
|
|
||||||
|
|
||||||
cert_path = os.path.join(full_path, 'cert.pem')
|
|
||||||
key_path = os.path.join(full_path, 'key.pem')
|
|
||||||
domains_path = os.path.join(full_path, 'domains')
|
|
||||||
|
|
||||||
if os.path.isfile(cert_path) and os.path.isfile(key_path) and os.path.isfile(domains_path):
|
|
||||||
with open(domains_path, 'r') as file:
|
|
||||||
domains = file.read().strip()
|
|
||||||
domains_list = domains.split()
|
|
||||||
if domains_list and template_vars["MAILCOW_HOSTNAME"] not in domains_list:
|
|
||||||
template_vars['valid_cert_dirs'].append({
|
|
||||||
'cert_path': full_path + '/',
|
|
||||||
'domains': domains
|
|
||||||
})
|
|
||||||
|
|
||||||
return template_vars
|
|
||||||
|
|
||||||
def main():
|
|
||||||
env = Environment(loader=FileSystemLoader('./etc/nginx/conf.d/templates'))
|
|
||||||
|
|
||||||
# Render config
|
|
||||||
print("Render config")
|
|
||||||
template_vars = prepare_template_vars()
|
|
||||||
sites_default_conf(env, template_vars)
|
|
||||||
nginx_conf(env, template_vars)
|
|
||||||
includes_conf(env, template_vars)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,26 +1,20 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
PHPFPMHOST=${PHPFPMHOST:-"php-fpm-mailcow"}
|
# Run hooks
|
||||||
SOGOHOST=${SOGOHOST:-"$IPV4_NETWORK.248"}
|
for file in /hooks/*; do
|
||||||
RSPAMDHOST=${RSPAMDHOST:-"rspamd-mailcow"}
|
if [ -x "${file}" ]; then
|
||||||
|
echo "Running hook ${file}"
|
||||||
until ping ${PHPFPMHOST} -c1 > /dev/null; do
|
"${file}"
|
||||||
echo "Waiting for PHP..."
|
fi
|
||||||
sleep 1
|
|
||||||
done
|
done
|
||||||
if ! printf "%s\n" "${SKIP_SOGO}" | grep -E '^([yY][eE][sS]|[yY])+$' >/dev/null; then
|
|
||||||
until ping ${SOGOHOST} -c1 > /dev/null; do
|
python3 -u /bootstrap/main.py
|
||||||
echo "Waiting for SOGo..."
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
sleep 1
|
|
||||||
done
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
fi
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Nginx."
|
||||||
if ! printf "%s\n" "${SKIP_RSPAMD}" | grep -E '^([yY][eE][sS]|[yY])+$' >/dev/null; then
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
until ping ${RSPAMDHOST} -c1 > /dev/null; do
|
|
||||||
echo "Waiting for Rspamd..."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
python3 /bootstrap.py
|
echo "Bootstrap succeeded. Starting Nginx..."
|
||||||
|
nginx -g "daemon off;"
|
||||||
exec "$@"
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
[supervisord]
|
[supervisord]
|
||||||
pidfile=/var/run/supervisord.pid
|
|
||||||
nodaemon=true
|
nodaemon=true
|
||||||
user=root
|
user=root
|
||||||
|
|
||||||
@@ -10,16 +9,19 @@ stdout_logfile_maxbytes=0
|
|||||||
stderr_logfile=/dev/stderr
|
stderr_logfile=/dev/stderr
|
||||||
stderr_logfile_maxbytes=0
|
stderr_logfile_maxbytes=0
|
||||||
autostart=true
|
autostart=true
|
||||||
|
priority=1
|
||||||
|
|
||||||
[program:postfix-tlspol]
|
[program:bootstrap]
|
||||||
startsecs=10
|
command=/docker-entrypoint.sh
|
||||||
autorestart=true
|
|
||||||
command=/opt/postfix-tlspol.sh
|
|
||||||
stdout_logfile=/dev/stdout
|
stdout_logfile=/dev/stdout
|
||||||
stdout_logfile_maxbytes=0
|
stdout_logfile_maxbytes=0
|
||||||
stderr_logfile=/dev/stderr
|
stderr_logfile=/dev/stderr
|
||||||
stderr_logfile_maxbytes=0
|
stderr_logfile_maxbytes=0
|
||||||
|
priority=2
|
||||||
|
startretries=10
|
||||||
|
autorestart=true
|
||||||
|
stopwaitsecs=120
|
||||||
|
|
||||||
[eventlistener:processes]
|
[eventlistener:processes]
|
||||||
command=/usr/local/sbin/stop-supervisor.sh
|
command=/usr/local/sbin/stop-supervisor.sh
|
||||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||||
@@ -1,25 +1,21 @@
|
|||||||
FROM php:8.4-fpm-alpine3.22
|
FROM php:8.2-fpm-alpine3.21
|
||||||
|
|
||||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||||
|
|
||||||
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||||
ARG APCU_PECL_VERSION=5.1.27
|
ARG APCU_PECL_VERSION=5.1.24
|
||||||
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=(?<version>.*)$
|
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||||
ARG IMAGICK_PECL_VERSION=3.8.0
|
ARG IMAGICK_PECL_VERSION=3.8.0
|
||||||
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||||
ARG MAILPARSE_PECL_VERSION=3.1.9
|
ARG MAILPARSE_PECL_VERSION=3.1.8
|
||||||
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||||
ARG MEMCACHED_PECL_VERSION=3.3.0
|
ARG MEMCACHED_PECL_VERSION=3.2.0
|
||||||
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=(?<version>.*)$
|
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||||
ARG REDIS_PECL_VERSION=6.2.0
|
ARG REDIS_PECL_VERSION=6.1.0
|
||||||
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=(?<version>.*)$
|
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||||
ARG COMPOSER_VERSION=2.8.6
|
ARG COMPOSER_VERSION=2.8.6
|
||||||
# renovate: datasource=github-tags depName=php/pecl-text-pspell versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
|
||||||
ARG PSPELL_PECL_VERSION=1.0.1
|
|
||||||
# renovate: datasource=github-tags depName=php/pecl-mail-imap versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
|
||||||
ARG IMAP_PECL_VERSION=1.0.3
|
|
||||||
|
|
||||||
RUN apk update && apk add -U --no-cache autoconf \
|
RUN apk add -U --no-cache autoconf \
|
||||||
aspell-dev \
|
aspell-dev \
|
||||||
aspell-libs \
|
aspell-libs \
|
||||||
bash \
|
bash \
|
||||||
@@ -67,14 +63,13 @@ RUN apk update && apk add -U --no-cache autoconf \
|
|||||||
samba-client \
|
samba-client \
|
||||||
zlib-dev \
|
zlib-dev \
|
||||||
tzdata \
|
tzdata \
|
||||||
|
python3 py3-pip \
|
||||||
&& pecl install APCu-${APCU_PECL_VERSION} \
|
&& pecl install APCu-${APCU_PECL_VERSION} \
|
||||||
&& pecl install imagick-${IMAGICK_PECL_VERSION} \
|
&& pecl install imagick-${IMAGICK_PECL_VERSION} \
|
||||||
&& pecl install mailparse-${MAILPARSE_PECL_VERSION} \
|
&& pecl install mailparse-${MAILPARSE_PECL_VERSION} \
|
||||||
&& pecl install memcached-${MEMCACHED_PECL_VERSION} \
|
&& pecl install memcached-${MEMCACHED_PECL_VERSION} \
|
||||||
&& pecl install redis-${REDIS_PECL_VERSION} \
|
&& pecl install redis-${REDIS_PECL_VERSION} \
|
||||||
&& pecl install pspell-${PSPELL_PECL_VERSION} \
|
&& docker-php-ext-enable apcu imagick memcached mailparse redis \
|
||||||
&& pecl install --configureoptions='with-kerberos="no" with-imap="yes" with-imap-ssl="yes"' imap-${IMAP_PECL_VERSION} \
|
|
||||||
&& docker-php-ext-enable apcu imagick memcached mailparse redis pspell imap \
|
|
||||||
&& pecl clear-cache \
|
&& pecl clear-cache \
|
||||||
&& docker-php-ext-configure intl \
|
&& docker-php-ext-configure intl \
|
||||||
&& docker-php-ext-configure exif \
|
&& docker-php-ext-configure exif \
|
||||||
@@ -83,7 +78,9 @@ RUN apk update && apk add -U --no-cache autoconf \
|
|||||||
--with-webp \
|
--with-webp \
|
||||||
--with-xpm \
|
--with-xpm \
|
||||||
--with-avif \
|
--with-avif \
|
||||||
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql soap sockets zip bcmath gmp \
|
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets zip bcmath gmp \
|
||||||
|
&& docker-php-ext-configure imap --with-imap --with-imap-ssl \
|
||||||
|
&& docker-php-ext-install -j 4 imap \
|
||||||
&& curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER_VERSION} \
|
&& curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER_VERSION} \
|
||||||
&& mv composer.phar /usr/local/bin/composer \
|
&& mv composer.phar /usr/local/bin/composer \
|
||||||
&& chmod +x /usr/local/bin/composer \
|
&& chmod +x /usr/local/bin/composer \
|
||||||
@@ -111,8 +108,26 @@ RUN apk update && apk add -U --no-cache autoconf \
|
|||||||
pcre-dev \
|
pcre-dev \
|
||||||
zlib-dev
|
zlib-dev
|
||||||
|
|
||||||
COPY ./docker-entrypoint.sh /
|
RUN apk add --no-cache --virtual .build-deps \
|
||||||
|
gcc \
|
||||||
|
musl-dev \
|
||||||
|
python3-dev \
|
||||||
|
linux-headers \
|
||||||
|
&& pip install --break-system-packages psutil \
|
||||||
|
&& apk del .build-deps
|
||||||
|
|
||||||
|
RUN pip install --break-system-packages \
|
||||||
|
mysql-connector-python \
|
||||||
|
jinja2 \
|
||||||
|
redis \
|
||||||
|
dnspython
|
||||||
|
|
||||||
|
|
||||||
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
|
COPY data/Dockerfiles/phpfpm/docker-entrypoint.sh /
|
||||||
|
|
||||||
|
RUN chmod +x /docker-entrypoint.sh
|
||||||
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||||
|
|
||||||
CMD ["php-fpm"]
|
CMD ["php-fpm"]
|
||||||
|
|||||||
@@ -1,219 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
function array_by_comma { local IFS=","; echo "$*"; }
|
|
||||||
|
|
||||||
# Wait for containers
|
|
||||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
|
||||||
echo "Waiting for SQL..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
# Do not attempt to write to slave
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|
||||||
REDIS_HOST=$REDIS_SLAVEOF_IP
|
|
||||||
REDIS_PORT=$REDIS_SLAVEOF_PORT
|
|
||||||
else
|
|
||||||
REDIS_HOST="redis"
|
|
||||||
REDIS_PORT="6379"
|
|
||||||
fi
|
|
||||||
REDIS_CMDLINE="redis-cli -h ${REDIS_HOST} -p ${REDIS_PORT} -a ${REDISPASS} --no-auth-warning"
|
|
||||||
|
|
||||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
|
||||||
echo "Waiting for Redis..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
# Set redis session store
|
|
||||||
echo -n '
|
|
||||||
session.save_handler = redis
|
|
||||||
session.save_path = "tcp://'${REDIS_HOST}':'${REDIS_PORT}'?auth='${REDISPASS}'"
|
|
||||||
' > /usr/local/etc/php/conf.d/session_store.ini
|
|
||||||
|
|
||||||
# Check mysql_upgrade (master and slave)
|
|
||||||
CONTAINER_ID=
|
|
||||||
until [[ ! -z "${CONTAINER_ID}" ]] && [[ "${CONTAINER_ID}" =~ ^[[:alnum:]]*$ ]]; do
|
|
||||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
|
||||||
echo "Could not get mysql-mailcow container id... trying again"
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
echo "MySQL @ ${CONTAINER_ID}"
|
|
||||||
SQL_LOOP_C=0
|
|
||||||
SQL_CHANGED=0
|
|
||||||
until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
|
|
||||||
if [ ${SQL_LOOP_C} -gt 4 ]; then
|
|
||||||
echo "Tried to upgrade MySQL and failed, giving up after ${SQL_LOOP_C} retries and starting container (oops, not good)"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
|
|
||||||
SQL_UPGRADE_STATUS=$(echo ${SQL_FULL_UPGRADE_RETURN} | jq -r .type)
|
|
||||||
SQL_LOOP_C=$((SQL_LOOP_C+1))
|
|
||||||
echo "SQL upgrade iteration #${SQL_LOOP_C}"
|
|
||||||
if [[ ${SQL_UPGRADE_STATUS} == 'warning' ]]; then
|
|
||||||
SQL_CHANGED=1
|
|
||||||
echo "MySQL applied an upgrade, debug output:"
|
|
||||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
|
||||||
sleep 3
|
|
||||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
|
||||||
echo "Waiting for SQL to return, please wait"
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
continue
|
|
||||||
elif [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; then
|
|
||||||
echo "MySQL is up-to-date - debug output:"
|
|
||||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
|
||||||
else
|
|
||||||
echo "No valid reponse for mysql_upgrade was received, debug output:"
|
|
||||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# doing post-installation stuff, if SQL was upgraded (master and slave)
|
|
||||||
if [ ${SQL_CHANGED} -eq 1 ]; then
|
|
||||||
POSTFIX=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
|
||||||
if [[ -z "${POSTFIX}" ]] || ! [[ "${POSTFIX}" =~ ^[[:alnum:]]*$ ]]; then
|
|
||||||
echo "Could not determine Postfix container ID, skipping Postfix restart."
|
|
||||||
else
|
|
||||||
echo "Restarting Postfix"
|
|
||||||
curl -X POST --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/restart | jq -r '.msg'
|
|
||||||
echo "Sleeping 5 seconds..."
|
|
||||||
sleep 5
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check mysql tz import (master and slave)
|
|
||||||
TZ_CHECK=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC') AS time;" -BN 2> /dev/null)
|
|
||||||
if [[ -z ${TZ_CHECK} ]] || [[ "${TZ_CHECK}" == "NULL" ]]; then
|
|
||||||
SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
|
|
||||||
echo "MySQL mysql_tzinfo_to_sql - debug output:"
|
|
||||||
echo ${SQL_FULL_TZINFO_IMPORT_RETURN}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
echo "We are master, preparing..."
|
|
||||||
# Set a default release format
|
|
||||||
if [[ -z $(${REDIS_CMDLINE} --raw GET Q_RELEASE_FORMAT) ]]; then
|
|
||||||
${REDIS_CMDLINE} --raw SET Q_RELEASE_FORMAT raw
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set max age of q items - if unset
|
|
||||||
if [[ -z $(${REDIS_CMDLINE} --raw GET Q_MAX_AGE) ]]; then
|
|
||||||
${REDIS_CMDLINE} --raw SET Q_MAX_AGE 365
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set default password policy - if unset
|
|
||||||
if [[ -z $(${REDIS_CMDLINE} --raw HGET PASSWD_POLICY length) ]]; then
|
|
||||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY length 6
|
|
||||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY chars 0
|
|
||||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY special_chars 0
|
|
||||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY lowerupper 0
|
|
||||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY numbers 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Trigger db init
|
|
||||||
echo "Running DB init..."
|
|
||||||
php -c /usr/local/etc/php -f /web/inc/init_db.inc.php
|
|
||||||
|
|
||||||
# Recreating domain map
|
|
||||||
echo "Rebuilding domain map in Redis..."
|
|
||||||
declare -a DOMAIN_ARR
|
|
||||||
${REDIS_CMDLINE} DEL DOMAIN_MAP > /dev/null
|
|
||||||
while read line
|
|
||||||
do
|
|
||||||
DOMAIN_ARR+=("$line")
|
|
||||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain" -Bs)
|
|
||||||
while read line
|
|
||||||
do
|
|
||||||
DOMAIN_ARR+=("$line")
|
|
||||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT alias_domain FROM alias_domain" -Bs)
|
|
||||||
|
|
||||||
if [[ ! -z ${DOMAIN_ARR} ]]; then
|
|
||||||
for domain in "${DOMAIN_ARR[@]}"; do
|
|
||||||
${REDIS_CMDLINE} HSET DOMAIN_MAP ${domain} 1 > /dev/null
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set API options if env vars are not empty
|
|
||||||
if [[ ${API_ALLOW_FROM} != "invalid" ]] && [[ ! -z ${API_ALLOW_FROM} ]]; then
|
|
||||||
IFS=',' read -r -a API_ALLOW_FROM_ARR <<< "${API_ALLOW_FROM}"
|
|
||||||
declare -a VALIDATED_API_ALLOW_FROM_ARR
|
|
||||||
REGEX_IP6='^([0-9a-fA-F]{0,4}:){1,7}[0-9a-fA-F]{0,4}(/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$'
|
|
||||||
REGEX_IP4='^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(/([0-9]|[1-2][0-9]|3[0-2]))?$'
|
|
||||||
for IP in "${API_ALLOW_FROM_ARR[@]}"; do
|
|
||||||
if [[ ${IP} =~ ${REGEX_IP6} ]] || [[ ${IP} =~ ${REGEX_IP4} ]]; then
|
|
||||||
VALIDATED_API_ALLOW_FROM_ARR+=("${IP}")
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
VALIDATED_IPS=$(array_by_comma ${VALIDATED_API_ALLOW_FROM_ARR[*]})
|
|
||||||
if [[ ! -z ${VALIDATED_IPS} ]]; then
|
|
||||||
if [[ ${API_KEY} != "invalid" ]] && [[ ! -z ${API_KEY} ]]; then
|
|
||||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
|
||||||
DELETE FROM api WHERE access = 'rw';
|
|
||||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY}", "1", "${VALIDATED_IPS}", "rw");
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [[ ${API_KEY_READ_ONLY} != "invalid" ]] && [[ ! -z ${API_KEY_READ_ONLY} ]]; then
|
|
||||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
|
||||||
DELETE FROM api WHERE access = 'ro';
|
|
||||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY_READ_ONLY}", "1", "${VALIDATED_IPS}", "ro");
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create events (master only, STATUS for event on slave will be SLAVESIDE_DISABLED)
|
|
||||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
|
||||||
DROP EVENT IF EXISTS clean_spamalias;
|
|
||||||
DELIMITER //
|
|
||||||
CREATE EVENT clean_spamalias
|
|
||||||
ON SCHEDULE EVERY 1 DAY DO
|
|
||||||
BEGIN
|
|
||||||
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP();
|
|
||||||
END;
|
|
||||||
//
|
|
||||||
DELIMITER ;
|
|
||||||
DROP EVENT IF EXISTS clean_oauth2;
|
|
||||||
DELIMITER //
|
|
||||||
CREATE EVENT clean_oauth2
|
|
||||||
ON SCHEDULE EVERY 1 DAY DO
|
|
||||||
BEGIN
|
|
||||||
DELETE FROM oauth_refresh_tokens WHERE expires < NOW();
|
|
||||||
DELETE FROM oauth_access_tokens WHERE expires < NOW();
|
|
||||||
DELETE FROM oauth_authorization_codes WHERE expires < NOW();
|
|
||||||
END;
|
|
||||||
//
|
|
||||||
DELIMITER ;
|
|
||||||
DROP EVENT IF EXISTS clean_sasl_log;
|
|
||||||
DELIMITER //
|
|
||||||
CREATE EVENT clean_sasl_log
|
|
||||||
ON SCHEDULE EVERY 1 DAY DO
|
|
||||||
BEGIN
|
|
||||||
DELETE sasl_log.* FROM sasl_log
|
|
||||||
LEFT JOIN (
|
|
||||||
SELECT username, service, MAX(datetime) AS lastdate
|
|
||||||
FROM sasl_log
|
|
||||||
GROUP BY username, service
|
|
||||||
) AS last ON sasl_log.username = last.username AND sasl_log.service = last.service
|
|
||||||
WHERE datetime < DATE_SUB(NOW(), INTERVAL 31 DAY) AND datetime < lastdate;
|
|
||||||
DELETE FROM sasl_log
|
|
||||||
WHERE username NOT IN (SELECT username FROM mailbox) AND
|
|
||||||
datetime < DATE_SUB(NOW(), INTERVAL 31 DAY);
|
|
||||||
END;
|
|
||||||
//
|
|
||||||
DELIMITER ;
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create dummy for custom overrides of mailcow style
|
|
||||||
[[ ! -f /web/css/build/0081-custom-mailcow.css ]] && echo '/* Autogenerated by mailcow */' > /web/css/build/0081-custom-mailcow.css
|
|
||||||
|
|
||||||
# Fix permissions for global filters
|
|
||||||
chown -R 82:82 /global_sieve/*
|
|
||||||
|
|
||||||
# Fix permissions on twig cache folder
|
|
||||||
chown -R 82:82 /web/templates/cache
|
|
||||||
# Clear cache
|
|
||||||
find /web/templates/cache/* -not -name '.gitkeep' -delete
|
|
||||||
|
|
||||||
# Run hooks
|
# Run hooks
|
||||||
for file in /hooks/*; do
|
for file in /hooks/*; do
|
||||||
if [ -x "${file}" ]; then
|
if [ -x "${file}" ]; then
|
||||||
@@ -222,4 +8,13 @@ for file in /hooks/*; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
python3 -u /bootstrap/main.py
|
||||||
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting PHP-FPM."
|
||||||
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Bootstrap succeeded. Starting PHP-FPM..."
|
||||||
exec "$@"
|
exec "$@"
|
||||||
|
|||||||
@@ -1,50 +0,0 @@
|
|||||||
FROM golang:1.25-bookworm AS builder
|
|
||||||
WORKDIR /src
|
|
||||||
|
|
||||||
ENV CGO_ENABLED=0 \
|
|
||||||
GO111MODULE=on \
|
|
||||||
NOOPT=1 \
|
|
||||||
VERSION=1.8.14
|
|
||||||
|
|
||||||
RUN git clone --branch v${VERSION} https://github.com/Zuplu/postfix-tlspol && \
|
|
||||||
cd /src/postfix-tlspol && \
|
|
||||||
scripts/build.sh build-only
|
|
||||||
|
|
||||||
|
|
||||||
FROM debian:bookworm-slim
|
|
||||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV LC_ALL=C
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
dirmngr \
|
|
||||||
dnsutils \
|
|
||||||
iputils-ping \
|
|
||||||
sudo \
|
|
||||||
supervisor \
|
|
||||||
redis-tools \
|
|
||||||
syslog-ng \
|
|
||||||
syslog-ng-core \
|
|
||||||
syslog-ng-mod-redis \
|
|
||||||
tzdata \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& touch /etc/default/locale
|
|
||||||
|
|
||||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
|
||||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
|
||||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
|
||||||
COPY postfix-tlspol.sh /opt/postfix-tlspol.sh
|
|
||||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
|
||||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
|
||||||
COPY --from=builder /src/postfix-tlspol/build/postfix-tlspol /usr/local/bin/postfix-tlspol
|
|
||||||
|
|
||||||
RUN chmod +x /opt/postfix-tlspol.sh \
|
|
||||||
/usr/local/sbin/stop-supervisor.sh \
|
|
||||||
/docker-entrypoint.sh
|
|
||||||
RUN rm -rf /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
|
||||||
|
|
||||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|
||||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
LOGLVL=info
|
|
||||||
|
|
||||||
if [ ${DEV_MODE} != "n" ]; then
|
|
||||||
echo -e "\e[31mEnabling debug mode\e[0m"
|
|
||||||
set -x
|
|
||||||
LOGLVL=debug
|
|
||||||
fi
|
|
||||||
|
|
||||||
[[ ! -d /etc/postfix-tlspol ]] && mkdir -p /etc/postfix-tlspol
|
|
||||||
[[ ! -d /var/lib/postfix-tlspol ]] && mkdir -p /var/lib/postfix-tlspol
|
|
||||||
|
|
||||||
until dig +short mailcow.email > /dev/null; do
|
|
||||||
echo "Waiting for DNS..."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
# Do not attempt to write to slave
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|
||||||
export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
|
||||||
else
|
|
||||||
export REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
|
||||||
fi
|
|
||||||
|
|
||||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
|
||||||
echo "Waiting for Redis..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Waiting for Postfix..."
|
|
||||||
until ping postfix -c1 > /dev/null; do
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
echo "Postfix OK"
|
|
||||||
|
|
||||||
cat <<EOF > /etc/postfix-tlspol/config.yaml
|
|
||||||
server:
|
|
||||||
address: 0.0.0.0:8642
|
|
||||||
|
|
||||||
log-level: ${LOGLVL}
|
|
||||||
|
|
||||||
prefetch: true
|
|
||||||
|
|
||||||
cache-file: /var/lib/postfix-tlspol/cache.db
|
|
||||||
|
|
||||||
dns:
|
|
||||||
# must support DNSSEC
|
|
||||||
address: 127.0.0.11:53
|
|
||||||
EOF
|
|
||||||
|
|
||||||
/usr/local/bin/postfix-tlspol -config /etc/postfix-tlspol/config.yaml
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
@version: 3.38
|
|
||||||
@include "scl.conf"
|
|
||||||
options {
|
|
||||||
chain_hostnames(off);
|
|
||||||
flush_lines(0);
|
|
||||||
use_dns(no);
|
|
||||||
dns_cache(no);
|
|
||||||
use_fqdn(no);
|
|
||||||
owner("root"); group("adm"); perm(0640);
|
|
||||||
stats_freq(0);
|
|
||||||
bad_hostname("^gconfd$");
|
|
||||||
};
|
|
||||||
source s_src {
|
|
||||||
unix-stream("/dev/log");
|
|
||||||
internal();
|
|
||||||
};
|
|
||||||
destination d_stdout { pipe("/dev/stdout"); };
|
|
||||||
destination d_redis_ui_log {
|
|
||||||
redis(
|
|
||||||
host("`REDIS_SLAVEOF_IP`")
|
|
||||||
persist-name("redis1")
|
|
||||||
port(`REDIS_SLAVEOF_PORT`)
|
|
||||||
auth("`REDISPASS`")
|
|
||||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
|
||||||
);
|
|
||||||
};
|
|
||||||
filter f_mail { facility(mail); };
|
|
||||||
# start
|
|
||||||
# overriding warnings are still displayed when the entrypoint runs its initial check
|
|
||||||
# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
|
|
||||||
# Some other warnings are ignored
|
|
||||||
filter f_ignore {
|
|
||||||
not match("overriding earlier entry" value("MESSAGE"));
|
|
||||||
not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
|
|
||||||
not match("no SASL support" value("MESSAGE"));
|
|
||||||
not facility (local0, local1, local2, local3, local4, local5, local6, local7);
|
|
||||||
};
|
|
||||||
# end
|
|
||||||
log {
|
|
||||||
source(s_src);
|
|
||||||
filter(f_ignore);
|
|
||||||
destination(d_stdout);
|
|
||||||
filter(f_mail);
|
|
||||||
destination(d_redis_ui_log);
|
|
||||||
};
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
@version: 3.38
|
|
||||||
@include "scl.conf"
|
|
||||||
options {
|
|
||||||
chain_hostnames(off);
|
|
||||||
flush_lines(0);
|
|
||||||
use_dns(no);
|
|
||||||
dns_cache(no);
|
|
||||||
use_fqdn(no);
|
|
||||||
owner("root"); group("adm"); perm(0640);
|
|
||||||
stats_freq(0);
|
|
||||||
bad_hostname("^gconfd$");
|
|
||||||
};
|
|
||||||
source s_src {
|
|
||||||
unix-stream("/dev/log");
|
|
||||||
internal();
|
|
||||||
};
|
|
||||||
destination d_stdout { pipe("/dev/stdout"); };
|
|
||||||
destination d_redis_ui_log {
|
|
||||||
redis(
|
|
||||||
host("redis-mailcow")
|
|
||||||
persist-name("redis1")
|
|
||||||
port(6379)
|
|
||||||
auth("`REDISPASS`")
|
|
||||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
|
||||||
);
|
|
||||||
};
|
|
||||||
filter f_mail { facility(mail); };
|
|
||||||
# start
|
|
||||||
# overriding warnings are still displayed when the entrypoint runs its initial check
|
|
||||||
# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
|
|
||||||
# Some other warnings are ignored
|
|
||||||
filter f_ignore {
|
|
||||||
not match("overriding earlier entry" value("MESSAGE"));
|
|
||||||
not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
|
|
||||||
not match("no SASL support" value("MESSAGE"));
|
|
||||||
not facility (local0, local1, local2, local3, local4, local5, local6, local7);
|
|
||||||
};
|
|
||||||
# end
|
|
||||||
log {
|
|
||||||
source(s_src);
|
|
||||||
filter(f_ignore);
|
|
||||||
destination(d_stdout);
|
|
||||||
filter(f_mail);
|
|
||||||
destination(d_redis_ui_log);
|
|
||||||
};
|
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
FROM debian:bookworm-slim
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ENV LC_ALL=C
|
ENV LC_ALL C
|
||||||
|
|
||||||
RUN dpkg-divert --local --rename --add /sbin/initctl \
|
RUN dpkg-divert --local --rename --add /sbin/initctl \
|
||||||
&& ln -sf /bin/true /sbin/initctl \
|
&& ln -sf /bin/true /sbin/initctl \
|
||||||
@@ -34,23 +34,31 @@ RUN groupadd -g 102 postfix \
|
|||||||
syslog-ng-core \
|
syslog-ng-core \
|
||||||
syslog-ng-mod-redis \
|
syslog-ng-mod-redis \
|
||||||
tzdata \
|
tzdata \
|
||||||
|
python3 python3-pip \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& touch /etc/default/locale \
|
&& touch /etc/default/locale \
|
||||||
&& printf '#!/bin/bash\n/usr/sbin/postconf -c /opt/postfix/conf "$@"' > /usr/local/sbin/postconf \
|
&& printf '#!/bin/bash\n/usr/sbin/postconf -c /opt/postfix/conf "$@"' > /usr/local/sbin/postconf \
|
||||||
&& chmod +x /usr/local/sbin/postconf
|
&& chmod +x /usr/local/sbin/postconf
|
||||||
|
|
||||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
RUN pip install --break-system-packages \
|
||||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
mysql-connector-python \
|
||||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
jinja2 \
|
||||||
COPY postfix.sh /opt/postfix.sh
|
redis \
|
||||||
COPY rspamd-pipe-ham /usr/local/bin/rspamd-pipe-ham
|
dnspython \
|
||||||
COPY rspamd-pipe-spam /usr/local/bin/rspamd-pipe-spam
|
psutil
|
||||||
COPY whitelist_forwardinghosts.sh /usr/local/bin/whitelist_forwardinghosts.sh
|
|
||||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
|
||||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
|
||||||
|
|
||||||
RUN chmod +x /opt/postfix.sh \
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
/usr/local/bin/rspamd-pipe-ham \
|
COPY data/Dockerfiles/postfix/supervisord.conf /etc/supervisor/supervisord.conf
|
||||||
|
COPY data/Dockerfiles/postfix/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||||
|
COPY data/Dockerfiles/postfix/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||||
|
COPY data/Dockerfiles/postfix/rspamd-pipe-ham /usr/local/bin/rspamd-pipe-ham
|
||||||
|
COPY data/Dockerfiles/postfix/rspamd-pipe-spam /usr/local/bin/rspamd-pipe-spam
|
||||||
|
COPY data/Dockerfiles/postfix/whitelist_forwardinghosts.sh /usr/local/bin/whitelist_forwardinghosts.sh
|
||||||
|
COPY data/Dockerfiles/postfix/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||||
|
COPY data/Dockerfiles/postfix/docker-entrypoint.sh /docker-entrypoint.sh
|
||||||
|
|
||||||
|
RUN chmod +x /usr/local/bin/rspamd-pipe-ham \
|
||||||
|
/docker-entrypoint.sh \
|
||||||
/usr/local/bin/rspamd-pipe-spam \
|
/usr/local/bin/rspamd-pipe-spam \
|
||||||
/usr/local/bin/whitelist_forwardinghosts.sh \
|
/usr/local/bin/whitelist_forwardinghosts.sh \
|
||||||
/usr/local/sbin/stop-supervisor.sh
|
/usr/local/sbin/stop-supervisor.sh
|
||||||
@@ -58,6 +66,5 @@ RUN rm -rf /tmp/* /var/tmp/*
|
|||||||
|
|
||||||
EXPOSE 588
|
EXPOSE 588
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
|
||||||
|
|
||||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||||
|
|||||||
@@ -8,8 +8,12 @@ for file in /hooks/*; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
python3 -u /bootstrap/main.py
|
||||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Postfix."
|
||||||
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||||
@@ -21,6 +25,16 @@ if grep -qE '\!SSLv2|\!SSLv3|>=TLSv1(\.[0-1])?$' /opt/postfix/conf/main.cf /opt/
|
|||||||
echo "[tls_system_default]" >> /etc/ssl/openssl.cnf
|
echo "[tls_system_default]" >> /etc/ssl/openssl.cnf
|
||||||
echo "MinProtocol = TLSv1" >> /etc/ssl/openssl.cnf
|
echo "MinProtocol = TLSv1" >> /etc/ssl/openssl.cnf
|
||||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec "$@"
|
|
||||||
|
# Start Postfix
|
||||||
|
postconf -c /opt/postfix/conf > /dev/null
|
||||||
|
if [[ $? != 0 ]]; then
|
||||||
|
echo "Postfix configuration error, refusing to start."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Bootstrap succeeded. Starting Postfix..."
|
||||||
|
postfix -c /opt/postfix/conf start
|
||||||
|
sleep 126144000
|
||||||
|
fi
|
||||||
|
|||||||
@@ -1,527 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
trap "postfix stop" EXIT
|
|
||||||
|
|
||||||
[[ ! -d /opt/postfix/conf/sql/ ]] && mkdir -p /opt/postfix/conf/sql/
|
|
||||||
|
|
||||||
# Wait for MySQL to warm-up
|
|
||||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
|
||||||
echo "Waiting for database to come up..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
until dig +short mailcow.email > /dev/null; do
|
|
||||||
echo "Waiting for DNS..."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
cat <<EOF > /etc/aliases
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
null: /dev/null
|
|
||||||
watchdog: /dev/null
|
|
||||||
ham: "|/usr/local/bin/rspamd-pipe-ham"
|
|
||||||
spam: "|/usr/local/bin/rspamd-pipe-spam"
|
|
||||||
EOF
|
|
||||||
newaliases;
|
|
||||||
|
|
||||||
# create sni configuration
|
|
||||||
if [[ "${SKIP_LETS_ENCRYPT}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
echo -n "" > /opt/postfix/conf/sni.map
|
|
||||||
else
|
|
||||||
echo -n "" > /opt/postfix/conf/sni.map;
|
|
||||||
for cert_dir in /etc/ssl/mail/*/ ; do
|
|
||||||
if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
|
|
||||||
continue;
|
|
||||||
fi
|
|
||||||
IFS=" " read -r -a domains <<< "$(cat "${cert_dir}domains")"
|
|
||||||
for domain in "${domains[@]}"; do
|
|
||||||
echo -n "${domain} ${cert_dir}key.pem ${cert_dir}cert.pem" >> /opt/postfix/conf/sni.map;
|
|
||||||
echo "" >> /opt/postfix/conf/sni.map;
|
|
||||||
done
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
postmap -F hash:/opt/postfix/conf/sni.map;
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_relay_ne.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT IF(EXISTS(SELECT address, domain FROM alias
|
|
||||||
WHERE address = '%s'
|
|
||||||
AND domain IN (
|
|
||||||
SELECT domain FROM domain
|
|
||||||
WHERE backupmx = '1'
|
|
||||||
AND relay_all_recipients = '1'
|
|
||||||
AND relay_unknown_only = '1')
|
|
||||||
|
|
||||||
), 'lmtp:inet:dovecot:24', NULL) AS 'transport'
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_relay_recipient_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT DISTINCT
|
|
||||||
CASE WHEN '%d' IN (
|
|
||||||
SELECT domain FROM domain
|
|
||||||
WHERE relay_all_recipients=1
|
|
||||||
AND domain='%d'
|
|
||||||
AND backupmx=1
|
|
||||||
)
|
|
||||||
THEN '%s' ELSE (
|
|
||||||
SELECT goto FROM alias WHERE address='%s' AND active='1'
|
|
||||||
)
|
|
||||||
END AS result;
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_tls_policy_override_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT CONCAT(policy, ' ', parameters) AS tls_policy FROM tls_policy_override WHERE active = '1' AND dest = '%s'
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_tls_enforce_in_policy.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT IF(EXISTS(
|
|
||||||
SELECT 'TLS_ACTIVE' FROM alias
|
|
||||||
LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
|
|
||||||
WHERE (address='%s'
|
|
||||||
OR address IN (
|
|
||||||
SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
|
|
||||||
WHERE alias_domain='%d'
|
|
||||||
)
|
|
||||||
) AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_in')) = '1' AND mailbox.active = '1'
|
|
||||||
), 'reject_plaintext_session', NULL) AS 'tls_enforce_in';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sender_dependent_default_transport_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT GROUP_CONCAT(transport SEPARATOR '') AS transport_maps
|
|
||||||
FROM (
|
|
||||||
SELECT IF(EXISTS(SELECT 'smtp_type' FROM alias
|
|
||||||
LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
|
|
||||||
WHERE (address = '%s'
|
|
||||||
OR address IN (
|
|
||||||
SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
|
|
||||||
WHERE alias_domain = '%d'
|
|
||||||
)
|
|
||||||
)
|
|
||||||
AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_out')) = '1'
|
|
||||||
AND mailbox.active = '1'
|
|
||||||
), 'smtp_enforced_tls:', 'smtp:') AS 'transport'
|
|
||||||
UNION ALL
|
|
||||||
SELECT COALESCE(
|
|
||||||
(SELECT hostname FROM relayhosts
|
|
||||||
LEFT OUTER JOIN mailbox ON JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.relayhost')) = relayhosts.id
|
|
||||||
WHERE relayhosts.active = '1'
|
|
||||||
AND (
|
|
||||||
mailbox.username IN (SELECT alias.goto from alias
|
|
||||||
JOIN mailbox ON mailbox.username = alias.goto
|
|
||||||
WHERE alias.active = '1'
|
|
||||||
AND alias.address = '%s'
|
|
||||||
AND alias.address NOT LIKE '@%%'
|
|
||||||
)
|
|
||||||
)
|
|
||||||
),
|
|
||||||
(SELECT hostname FROM relayhosts
|
|
||||||
LEFT OUTER JOIN domain ON domain.relayhost = relayhosts.id
|
|
||||||
WHERE relayhosts.active = '1'
|
|
||||||
AND (domain.domain = '%d'
|
|
||||||
OR domain.domain IN (
|
|
||||||
SELECT target_domain FROM alias_domain
|
|
||||||
WHERE alias_domain = '%d'
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
) AS transport_view;
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_transport_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT CONCAT('smtp_via_transport_maps:', nexthop) AS transport FROM transports
|
|
||||||
WHERE active = '1'
|
|
||||||
AND destination = '%s';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_resource_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT 'null@localhost' FROM mailbox
|
|
||||||
WHERE kind REGEXP 'location|thing|group' AND username = '%s';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_sender_dependent.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM relayhosts
|
|
||||||
WHERE id IN (
|
|
||||||
SELECT COALESCE(
|
|
||||||
(SELECT id FROM relayhosts
|
|
||||||
LEFT OUTER JOIN domain ON domain.relayhost = relayhosts.id
|
|
||||||
WHERE relayhosts.active = '1'
|
|
||||||
AND (domain.domain = '%d'
|
|
||||||
OR domain.domain IN (
|
|
||||||
SELECT target_domain FROM alias_domain
|
|
||||||
WHERE alias_domain = '%d'
|
|
||||||
)
|
|
||||||
)
|
|
||||||
),
|
|
||||||
(SELECT id FROM relayhosts
|
|
||||||
LEFT OUTER JOIN mailbox ON JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.relayhost')) = relayhosts.id
|
|
||||||
WHERE relayhosts.active = '1'
|
|
||||||
AND (
|
|
||||||
mailbox.username IN (
|
|
||||||
SELECT alias.goto from alias
|
|
||||||
JOIN mailbox ON mailbox.username = alias.goto
|
|
||||||
WHERE alias.active = '1'
|
|
||||||
AND alias.address = '%s'
|
|
||||||
AND alias.address NOT LIKE '@%%'
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
AND active = '1'
|
|
||||||
AND username != '';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_transport_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM transports
|
|
||||||
WHERE nexthop = '%s'
|
|
||||||
AND active = '1'
|
|
||||||
AND username != ''
|
|
||||||
LIMIT 1;
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_domain_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT username FROM mailbox, alias_domain
|
|
||||||
WHERE alias_domain.alias_domain = '%d'
|
|
||||||
AND mailbox.username = CONCAT('%u', '@', alias_domain.target_domain)
|
|
||||||
AND (mailbox.active = '1' OR mailbox.active = '2')
|
|
||||||
AND alias_domain.active='1'
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT goto FROM alias
|
|
||||||
WHERE address='%s'
|
|
||||||
AND (active='1' OR active='2');
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_bcc_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT bcc_dest FROM bcc_maps
|
|
||||||
WHERE local_dest='%s'
|
|
||||||
AND type='rcpt'
|
|
||||||
AND active='1';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sender_bcc_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT bcc_dest FROM bcc_maps
|
|
||||||
WHERE local_dest='%s'
|
|
||||||
AND type='sender'
|
|
||||||
AND active='1';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_canonical_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT new_dest FROM recipient_maps
|
|
||||||
WHERE old_dest='%s'
|
|
||||||
AND active='1';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_domains_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT alias_domain from alias_domain WHERE alias_domain='%s' AND active='1'
|
|
||||||
UNION
|
|
||||||
SELECT domain FROM domain
|
|
||||||
WHERE domain='%s'
|
|
||||||
AND active = '1'
|
|
||||||
AND backupmx = '0'
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_mailbox_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%u/') FROM mailbox WHERE username='%s' AND (active = '1' OR active = '2')
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_relay_domain_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT domain FROM domain WHERE domain='%s' AND backupmx = '1' AND active = '1'
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_sender_acl.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
# First select queries domain and alias_domain to determine if domains are active.
|
|
||||||
query = SELECT goto FROM alias
|
|
||||||
WHERE id IN (
|
|
||||||
SELECT COALESCE (
|
|
||||||
(
|
|
||||||
SELECT id FROM alias
|
|
||||||
WHERE address='%s'
|
|
||||||
AND (active='1' OR active='2')
|
|
||||||
), (
|
|
||||||
SELECT id FROM alias
|
|
||||||
WHERE address='@%d'
|
|
||||||
AND (active='1' OR active='2')
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
AND active='1'
|
|
||||||
AND (domain IN
|
|
||||||
(SELECT domain FROM domain
|
|
||||||
WHERE domain='%d'
|
|
||||||
AND active='1')
|
|
||||||
OR domain in (
|
|
||||||
SELECT alias_domain FROM alias_domain
|
|
||||||
WHERE alias_domain='%d'
|
|
||||||
AND active='1'
|
|
||||||
)
|
|
||||||
)
|
|
||||||
UNION
|
|
||||||
SELECT logged_in_as FROM sender_acl
|
|
||||||
WHERE send_as='@%d'
|
|
||||||
OR send_as='%s'
|
|
||||||
OR send_as='*'
|
|
||||||
OR send_as IN (
|
|
||||||
SELECT CONCAT('@',target_domain) FROM alias_domain
|
|
||||||
WHERE alias_domain = '%d')
|
|
||||||
OR send_as IN (
|
|
||||||
SELECT CONCAT('%u','@',target_domain) FROM alias_domain
|
|
||||||
WHERE alias_domain = '%d')
|
|
||||||
AND logged_in_as NOT IN (
|
|
||||||
SELECT goto FROM alias
|
|
||||||
WHERE address='%s')
|
|
||||||
UNION
|
|
||||||
SELECT username FROM mailbox, alias_domain
|
|
||||||
WHERE alias_domain.alias_domain = '%d'
|
|
||||||
AND mailbox.username = CONCAT('%u','@',alias_domain.target_domain)
|
|
||||||
AND (mailbox.active = '1' OR mailbox.active ='2')
|
|
||||||
AND alias_domain.active='1';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# MX based routing
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_mbr_access_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT CONCAT('FILTER smtp_via_transport_maps:', nexthop) as transport FROM transports
|
|
||||||
WHERE '%s' REGEXP destination
|
|
||||||
AND active='1'
|
|
||||||
AND is_mx_based='1';
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_spamalias_maps.cf
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
user = ${DBUSER}
|
|
||||||
password = ${DBPASS}
|
|
||||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
|
||||||
dbname = ${DBNAME}
|
|
||||||
query = SELECT goto FROM spamalias
|
|
||||||
WHERE address='%s'
|
|
||||||
AND validity >= UNIX_TIMESTAMP()
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if [ ! -f /opt/postfix/conf/dns_blocklists.cf ]; then
|
|
||||||
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
|
|
||||||
# This file can be edited.
|
|
||||||
# Delete this file and restart postfix container to revert any changes.
|
|
||||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
|
||||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
|
||||||
list.dnswl.org=127.0.[0..255].0*-2
|
|
||||||
list.dnswl.org=127.0.[0..255].1*-4
|
|
||||||
list.dnswl.org=127.0.[0..255].2*-6
|
|
||||||
list.dnswl.org=127.0.[0..255].3*-8
|
|
||||||
bl.spamcop.net*2
|
|
||||||
bl.suomispam.net*2
|
|
||||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
|
||||||
hostkarma.junkemailfilter.com=127.0.0.4*2
|
|
||||||
hostkarma.junkemailfilter.com=127.0.1.2*1
|
|
||||||
backscatter.spameatingmonkey.net*2
|
|
||||||
bl.ipv6.spameatingmonkey.net*2
|
|
||||||
bl.spameatingmonkey.net*2
|
|
||||||
b.barracudacentral.org=127.0.0.2*7
|
|
||||||
bl.mailspike.net=127.0.0.2*5
|
|
||||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove discontinued DNSBLs from existing dns_blocklists.cf
|
|
||||||
sed -i '/ix\.dnsbl\.manitu\.net\*2/d' /opt/postfix/conf/dns_blocklists.cf # Nixspam
|
|
||||||
|
|
||||||
DNSBL_CONFIG=$(grep -v '^#' /opt/postfix/conf/dns_blocklists.cf | grep '\S')
|
|
||||||
|
|
||||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
|
||||||
echo -e "\e[33mChecking if ASN for your IP is listed for Spamhaus Bad ASN List...\e[0m"
|
|
||||||
if [ -n "$SPAMHAUS_DQS_KEY" ]; then
|
|
||||||
echo -e "\e[32mDetected SPAMHAUS_DQS_KEY variable from mailcow.conf...\e[0m"
|
|
||||||
echo -e "\e[33mUsing DQS Blocklists from Spamhaus!\e[0m"
|
|
||||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
|
||||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[4..7]*6
|
|
||||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[10;11]*8
|
|
||||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.3*4
|
|
||||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.2*3
|
|
||||||
postscreen_dnsbl_reply_map = texthash:/opt/postfix/conf/dnsbl_reply.map
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF > /opt/postfix/conf/dnsbl_reply.map
|
|
||||||
# Autogenerated by mailcow, using Spamhaus DQS reply domains
|
|
||||||
${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net sbl.spamhaus.org
|
|
||||||
${SPAMHAUS_DQS_KEY}.xbl.dq.spamhaus.net xbl.spamhaus.org
|
|
||||||
${SPAMHAUS_DQS_KEY}.pbl.dq.spamhaus.net pbl.spamhaus.org
|
|
||||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net zen.spamhaus.org
|
|
||||||
${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net dbl.spamhaus.org
|
|
||||||
${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net zrd.spamhaus.org
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
else
|
|
||||||
if [ -f "/opt/postfix/conf/dnsbl_reply.map" ]; then
|
|
||||||
rm /opt/postfix/conf/dnsbl_reply.map
|
|
||||||
fi
|
|
||||||
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
|
|
||||||
if [ "$response" -eq 503 ]; then
|
|
||||||
echo -e "\e[31mThe AS of your IP is listed as a banned AS from Spamhaus!\e[0m"
|
|
||||||
echo -e "\e[33mNo SPAMHAUS_DQS_KEY found... Skipping Spamhaus blocklists entirely!\e[0m"
|
|
||||||
SPAMHAUS_DNSBL_CONFIG=""
|
|
||||||
elif [ "$response" -eq 200 ]; then
|
|
||||||
echo -e "\e[32mThe AS of your IP is NOT listed as a banned AS from Spamhaus!\e[0m"
|
|
||||||
echo -e "\e[33mUsing the open Spamhaus blocklists.\e[0m"
|
|
||||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
|
||||||
zen.spamhaus.org=127.0.0.[10;11]*8
|
|
||||||
zen.spamhaus.org=127.0.0.[4..7]*6
|
|
||||||
zen.spamhaus.org=127.0.0.3*4
|
|
||||||
zen.spamhaus.org=127.0.0.2*3
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
else
|
|
||||||
echo -e "\e[31mWe couldn't determine your AS... (maybe DNS/Network issue?) Response Code: $response\e[0m"
|
|
||||||
echo -e "\e[33mDeactivating Spamhaus DNS Blocklists to be on the safe site!\e[0m"
|
|
||||||
SPAMHAUS_DNSBL_CONFIG=""
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Reset main.cf
|
|
||||||
sed -i '/Overrides/q' /opt/postfix/conf/main.cf
|
|
||||||
echo >> /opt/postfix/conf/main.cf
|
|
||||||
# Append postscreen dnsbl sites to main.cf
|
|
||||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
|
||||||
echo -e "${DNSBL_CONFIG}\n${SPAMHAUS_DNSBL_CONFIG}" >> /opt/postfix/conf/main.cf
|
|
||||||
fi
|
|
||||||
# Append user overrides
|
|
||||||
echo -e "\n# User Overrides" >> /opt/postfix/conf/main.cf
|
|
||||||
touch /opt/postfix/conf/extra.cf
|
|
||||||
sed -i '/\$myhostname/! { /myhostname/d }' /opt/postfix/conf/extra.cf
|
|
||||||
echo -e "myhostname = ${MAILCOW_HOSTNAME}\n$(cat /opt/postfix/conf/extra.cf)" > /opt/postfix/conf/extra.cf
|
|
||||||
cat /opt/postfix/conf/extra.cf >> /opt/postfix/conf/main.cf
|
|
||||||
|
|
||||||
if [ ! -f /opt/postfix/conf/custom_transport.pcre ]; then
|
|
||||||
echo "Creating dummy custom_transport.pcre"
|
|
||||||
touch /opt/postfix/conf/custom_transport.pcre
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -f /opt/postfix/conf/custom_postscreen_whitelist.cidr ]]; then
|
|
||||||
echo "Creating dummy custom_postscreen_whitelist.cidr"
|
|
||||||
cat <<EOF > /opt/postfix/conf/custom_postscreen_whitelist.cidr
|
|
||||||
# Autogenerated by mailcow
|
|
||||||
# Rules are evaluated in the order as specified.
|
|
||||||
# Blacklist 192.168.* except 192.168.0.1.
|
|
||||||
# 192.168.0.1 permit
|
|
||||||
# 192.168.0.0/16 reject
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Fix Postfix permissions
|
|
||||||
chown -R root:postfix /opt/postfix/conf/sql/ /opt/postfix/conf/custom_transport.pcre
|
|
||||||
chmod 640 /opt/postfix/conf/sql/*.cf /opt/postfix/conf/custom_transport.pcre
|
|
||||||
chgrp -R postdrop /var/spool/postfix/public
|
|
||||||
chgrp -R postdrop /var/spool/postfix/maildrop
|
|
||||||
postfix set-permissions
|
|
||||||
|
|
||||||
# Checking if there is a leftover of a crashed postfix container before starting a new one
|
|
||||||
if [ -e /var/spool/postfix/pid/master.pid ]; then
|
|
||||||
rm -rf /var/spool/postfix/pid/master.pid
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check Postfix configuration
|
|
||||||
postconf -c /opt/postfix/conf > /dev/null
|
|
||||||
|
|
||||||
if [[ $? != 0 ]]; then
|
|
||||||
echo "Postfix configuration error, refusing to start."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
postfix -c /opt/postfix/conf start
|
|
||||||
sleep 126144000
|
|
||||||
fi
|
|
||||||
@@ -11,8 +11,8 @@ stderr_logfile=/dev/stderr
|
|||||||
stderr_logfile_maxbytes=0
|
stderr_logfile_maxbytes=0
|
||||||
autostart=true
|
autostart=true
|
||||||
|
|
||||||
[program:postfix]
|
[program:bootstrap]
|
||||||
command=/opt/postfix.sh
|
command=/docker-entrypoint.sh
|
||||||
stdout_logfile=/dev/stdout
|
stdout_logfile=/dev/stdout
|
||||||
stdout_logfile_maxbytes=0
|
stdout_logfile_maxbytes=0
|
||||||
stderr_logfile=/dev/stderr
|
stderr_logfile=/dev/stderr
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ FROM debian:bookworm-slim
|
|||||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ARG RSPAMD_VER=rspamd_3.13.2-1~8bf602278
|
ARG RSPAMD_VER=rspamd_3.11.1-1~ab0b44951
|
||||||
ARG CODENAME=bookworm
|
ARG CODENAME=bookworm
|
||||||
ENV LC_ALL=C
|
ENV LC_ALL=C
|
||||||
|
|
||||||
@@ -18,6 +18,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
procps \
|
procps \
|
||||||
nano \
|
nano \
|
||||||
lua-cjson \
|
lua-cjson \
|
||||||
|
python3 python3-pip \
|
||||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||||
&& wget -P /tmp https://rspamd.com/apt-stable/pool/main/r/rspamd/${RSPAMD_VER}~${CODENAME}_${arch}.deb\
|
&& wget -P /tmp https://rspamd.com/apt-stable/pool/main/r/rspamd/${RSPAMD_VER}~${CODENAME}_${arch}.deb\
|
||||||
&& apt install -y /tmp/${RSPAMD_VER}~${CODENAME}_${arch}.deb \
|
&& apt install -y /tmp/${RSPAMD_VER}~${CODENAME}_${arch}.deb \
|
||||||
@@ -29,12 +30,20 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||||||
&& echo 'alias ll="ls -la --color"' >> ~/.bashrc \
|
&& echo 'alias ll="ls -la --color"' >> ~/.bashrc \
|
||||||
&& sed -i 's/#analysis_keyword_table > 0/analysis_cat_table.macro_exist == "M"/g' /usr/share/rspamd/lualib/lua_scanners/oletools.lua
|
&& sed -i 's/#analysis_keyword_table > 0/analysis_cat_table.macro_exist == "M"/g' /usr/share/rspamd/lualib/lua_scanners/oletools.lua
|
||||||
|
|
||||||
COPY settings.conf /etc/rspamd/settings.conf
|
RUN pip install --break-system-packages \
|
||||||
COPY set_worker_password.sh /set_worker_password.sh
|
mysql-connector-python \
|
||||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
jinja2 \
|
||||||
|
redis \
|
||||||
|
dnspython \
|
||||||
|
psutil
|
||||||
|
|
||||||
|
|
||||||
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
|
COPY data/Dockerfiles/rspamd/settings.conf /etc/rspamd/settings.conf
|
||||||
|
COPY data/Dockerfiles/rspamd/set_worker_password.sh /set_worker_password.sh
|
||||||
|
COPY data/Dockerfiles/rspamd/docker-entrypoint.sh /docker-entrypoint.sh
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
|
||||||
|
|
||||||
STOPSIGNAL SIGTERM
|
STOPSIGNAL SIGTERM
|
||||||
|
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||||
CMD ["/usr/bin/rspamd", "-f", "-u", "_rspamd", "-g", "_rspamd"]
|
CMD ["/usr/bin/rspamd", "-f", "-u", "_rspamd", "-g", "_rspamd"]
|
||||||
|
|||||||
@@ -1,146 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
until nc phpfpm 9001 -z; do
|
|
||||||
echo "Waiting for PHP on port 9001..."
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
|
|
||||||
until nc phpfpm 9002 -z; do
|
|
||||||
echo "Waiting for PHP on port 9002..."
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
|
|
||||||
mkdir -p /etc/rspamd/plugins.d \
|
|
||||||
/etc/rspamd/custom
|
|
||||||
|
|
||||||
touch /etc/rspamd/rspamd.conf.local \
|
|
||||||
/etc/rspamd/rspamd.conf.override
|
|
||||||
|
|
||||||
chmod 755 /var/lib/rspamd
|
|
||||||
|
|
||||||
|
|
||||||
[[ ! -f /etc/rspamd/override.d/worker-controller-password.inc ]] && echo '# Autogenerated by mailcow' > /etc/rspamd/override.d/worker-controller-password.inc
|
|
||||||
|
|
||||||
echo ${IPV4_NETWORK}.0/24 > /etc/rspamd/custom/mailcow_networks.map
|
|
||||||
echo ${IPV6_NETWORK} >> /etc/rspamd/custom/mailcow_networks.map
|
|
||||||
|
|
||||||
DOVECOT_V4=
|
|
||||||
DOVECOT_V6=
|
|
||||||
until [[ ! -z ${DOVECOT_V4} ]]; do
|
|
||||||
DOVECOT_V4=$(dig a dovecot +short)
|
|
||||||
DOVECOT_V6=$(dig aaaa dovecot +short)
|
|
||||||
[[ ! -z ${DOVECOT_V4} ]] && break;
|
|
||||||
echo "Waiting for Dovecot..."
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
echo ${DOVECOT_V4}/32 > /etc/rspamd/custom/dovecot_trusted.map
|
|
||||||
if [[ ! -z ${DOVECOT_V6} ]]; then
|
|
||||||
echo ${DOVECOT_V6}/128 >> /etc/rspamd/custom/dovecot_trusted.map
|
|
||||||
fi
|
|
||||||
|
|
||||||
RSPAMD_V4=
|
|
||||||
RSPAMD_V6=
|
|
||||||
until [[ ! -z ${RSPAMD_V4} ]]; do
|
|
||||||
RSPAMD_V4=$(dig a rspamd +short)
|
|
||||||
RSPAMD_V6=$(dig aaaa rspamd +short)
|
|
||||||
[[ ! -z ${RSPAMD_V4} ]] && break;
|
|
||||||
echo "Waiting for Rspamd..."
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
echo ${RSPAMD_V4}/32 > /etc/rspamd/custom/rspamd_trusted.map
|
|
||||||
if [[ ! -z ${RSPAMD_V6} ]]; then
|
|
||||||
echo ${RSPAMD_V6}/128 >> /etc/rspamd/custom/rspamd_trusted.map
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|
||||||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
|
||||||
read_servers = "redis:6379";
|
|
||||||
write_servers = "${REDIS_SLAVEOF_IP}:${REDIS_SLAVEOF_PORT}";
|
|
||||||
password = "${REDISPASS}";
|
|
||||||
timeout = 10;
|
|
||||||
EOF
|
|
||||||
until [[ $(redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
|
||||||
echo "Waiting for Redis @redis-mailcow..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
until [[ $(redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
|
||||||
echo "Waiting for Redis @${REDIS_SLAVEOF_IP}..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning SLAVEOF ${REDIS_SLAVEOF_IP} ${REDIS_SLAVEOF_PORT}
|
|
||||||
else
|
|
||||||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
|
||||||
servers = "redis:6379";
|
|
||||||
password = "${REDISPASS}";
|
|
||||||
timeout = 10;
|
|
||||||
EOF
|
|
||||||
until [[ $(redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
|
||||||
echo "Waiting for Redis slave..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning SLAVEOF NO ONE
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "${SKIP_OLEFY}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
if [[ -f /etc/rspamd/local.d/external_services.conf ]]; then
|
|
||||||
rm /etc/rspamd/local.d/external_services.conf
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
if [[ ! -f /etc/rspamd/local.d/external_services.conf ]]; then
|
|
||||||
cat <<EOF > /etc/rspamd/local.d/external_services.conf
|
|
||||||
oletools {
|
|
||||||
# default olefy settings
|
|
||||||
servers = "olefy:10055";
|
|
||||||
# needs to be set explicitly for Rspamd < 1.9.5
|
|
||||||
scan_mime_parts = true;
|
|
||||||
# mime-part regex matching in content-type or filename
|
|
||||||
# block all macros
|
|
||||||
extended = true;
|
|
||||||
max_size = 3145728;
|
|
||||||
timeout = 20.0;
|
|
||||||
retransmits = 1;
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Provide additional lua modules
|
|
||||||
ln -s /usr/lib/$(uname -m)-linux-gnu/liblua5.1-cjson.so.0.0.0 /usr/lib/rspamd/cjson.so
|
|
||||||
|
|
||||||
chown -R _rspamd:_rspamd /var/lib/rspamd \
|
|
||||||
/etc/rspamd/local.d \
|
|
||||||
/etc/rspamd/override.d \
|
|
||||||
/etc/rspamd/rspamd.conf.local \
|
|
||||||
/etc/rspamd/rspamd.conf.override \
|
|
||||||
/etc/rspamd/plugins.d
|
|
||||||
|
|
||||||
# Fix missing default global maps, if any
|
|
||||||
# These exists in mailcow UI and should not be removed
|
|
||||||
touch /etc/rspamd/custom/global_mime_from_blacklist.map \
|
|
||||||
/etc/rspamd/custom/global_rcpt_blacklist.map \
|
|
||||||
/etc/rspamd/custom/global_smtp_from_blacklist.map \
|
|
||||||
/etc/rspamd/custom/global_mime_from_whitelist.map \
|
|
||||||
/etc/rspamd/custom/global_rcpt_whitelist.map \
|
|
||||||
/etc/rspamd/custom/global_smtp_from_whitelist.map \
|
|
||||||
/etc/rspamd/custom/bad_languages.map \
|
|
||||||
/etc/rspamd/custom/sa-rules \
|
|
||||||
/etc/rspamd/custom/dovecot_trusted.map \
|
|
||||||
/etc/rspamd/custom/rspamd_trusted.map \
|
|
||||||
/etc/rspamd/custom/mailcow_networks.map \
|
|
||||||
/etc/rspamd/custom/ip_wl.map \
|
|
||||||
/etc/rspamd/custom/fishy_tlds.map \
|
|
||||||
/etc/rspamd/custom/bad_words.map \
|
|
||||||
/etc/rspamd/custom/bad_asn.map \
|
|
||||||
/etc/rspamd/custom/bad_words_de.map \
|
|
||||||
/etc/rspamd/custom/bulk_header.map \
|
|
||||||
/etc/rspamd/custom/bad_header.map
|
|
||||||
|
|
||||||
# www-data (82) group needs to write to these files
|
|
||||||
chown _rspamd:_rspamd /etc/rspamd/custom/
|
|
||||||
chmod 0755 /etc/rspamd/custom/.
|
|
||||||
chown -R 82:82 /etc/rspamd/custom/*
|
|
||||||
chmod 644 -R /etc/rspamd/custom/*
|
|
||||||
|
|
||||||
# Run hooks
|
# Run hooks
|
||||||
for file in /hooks/*; do
|
for file in /hooks/*; do
|
||||||
if [ -x "${file}" ]; then
|
if [ -x "${file}" ]; then
|
||||||
@@ -149,190 +8,13 @@ for file in /hooks/*; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# If DQS KEY is set in mailcow.conf add Spamhaus DQS RBLs
|
python3 -u /bootstrap/main.py
|
||||||
if [[ ! -z ${SPAMHAUS_DQS_KEY} ]]; then
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
cat <<EOF > /etc/rspamd/custom/dqs-rbl.conf
|
|
||||||
# Autogenerated by mailcow. DO NOT TOUCH!
|
|
||||||
spamhaus {
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
|
||||||
from = false;
|
|
||||||
}
|
|
||||||
spamhaus_from {
|
|
||||||
from = true;
|
|
||||||
received = false;
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
|
||||||
returncodes {
|
|
||||||
SPAMHAUS_ZEN = [ "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.0.9", "127.0.0.10", "127.0.0.11" ];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spamhaus_authbl_received {
|
|
||||||
# Check if the sender client is listed in AuthBL (AuthBL is *not* part of ZEN)
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.authbl.dq.spamhaus.net";
|
|
||||||
from = false;
|
|
||||||
received = true;
|
|
||||||
ipv6 = true;
|
|
||||||
returncodes {
|
|
||||||
SH_AUTHBL_RECEIVED = "127.0.0.20"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spamhaus_dbl {
|
|
||||||
# Add checks on the HELO string
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
|
||||||
helo = true;
|
|
||||||
rdns = true;
|
|
||||||
dkim = true;
|
|
||||||
disable_monitoring = true;
|
|
||||||
returncodes {
|
|
||||||
RBL_DBL_SPAM = "127.0.1.2";
|
|
||||||
RBL_DBL_PHISH = "127.0.1.4";
|
|
||||||
RBL_DBL_MALWARE = "127.0.1.5";
|
|
||||||
RBL_DBL_BOTNET = "127.0.1.6";
|
|
||||||
RBL_DBL_ABUSED_SPAM = "127.0.1.102";
|
|
||||||
RBL_DBL_ABUSED_PHISH = "127.0.1.104";
|
|
||||||
RBL_DBL_ABUSED_MALWARE = "127.0.1.105";
|
|
||||||
RBL_DBL_ABUSED_BOTNET = "127.0.1.106";
|
|
||||||
RBL_DBL_DONT_QUERY_IPS = "127.0.1.255";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spamhaus_dbl_fullurls {
|
|
||||||
ignore_defaults = true;
|
|
||||||
no_ip = true;
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
|
||||||
selector = 'urls:get_host'
|
|
||||||
disable_monitoring = true;
|
|
||||||
returncodes {
|
|
||||||
DBLABUSED_SPAM_FULLURLS = "127.0.1.102";
|
|
||||||
DBLABUSED_PHISH_FULLURLS = "127.0.1.104";
|
|
||||||
DBLABUSED_MALWARE_FULLURLS = "127.0.1.105";
|
|
||||||
DBLABUSED_BOTNET_FULLURLS = "127.0.1.106";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spamhaus_zrd {
|
|
||||||
# Add checks on the HELO string also for DQS
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
|
||||||
helo = true;
|
|
||||||
rdns = true;
|
|
||||||
dkim = true;
|
|
||||||
disable_monitoring = true;
|
|
||||||
returncodes {
|
|
||||||
RBL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
|
||||||
RBL_ZRD_FRESH_DOMAIN = [
|
|
||||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
|
||||||
];
|
|
||||||
RBL_ZRD_DONT_QUERY_IPS = "127.0.2.255";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"SPAMHAUS_ZEN_URIBL" {
|
|
||||||
enabled = true;
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
|
||||||
resolve_ip = true;
|
|
||||||
checks = ['urls'];
|
|
||||||
replyto = true;
|
|
||||||
emails = true;
|
|
||||||
ipv4 = true;
|
|
||||||
ipv6 = true;
|
|
||||||
emails_domainonly = true;
|
|
||||||
returncodes {
|
|
||||||
URIBL_SBL = "127.0.0.2";
|
|
||||||
URIBL_SBL_CSS = "127.0.0.3";
|
|
||||||
URIBL_XBL = ["127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7"];
|
|
||||||
URIBL_PBL = ["127.0.0.10", "127.0.0.11"];
|
|
||||||
URIBL_DROP = "127.0.0.9";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SH_EMAIL_DBL {
|
|
||||||
ignore_defaults = true;
|
|
||||||
replyto = true;
|
|
||||||
emails_domainonly = true;
|
|
||||||
disable_monitoring = true;
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
|
||||||
returncodes = {
|
|
||||||
SH_EMAIL_DBL = [
|
|
||||||
"127.0.1.2",
|
|
||||||
"127.0.1.4",
|
|
||||||
"127.0.1.5",
|
|
||||||
"127.0.1.6"
|
|
||||||
];
|
|
||||||
SH_EMAIL_DBL_ABUSED = [
|
|
||||||
"127.0.1.102",
|
|
||||||
"127.0.1.104",
|
|
||||||
"127.0.1.105",
|
|
||||||
"127.0.1.106"
|
|
||||||
];
|
|
||||||
SH_EMAIL_DBL_DONT_QUERY_IPS = [ "127.0.1.255" ];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SH_EMAIL_ZRD {
|
|
||||||
ignore_defaults = true;
|
|
||||||
replyto = true;
|
|
||||||
emails_domainonly = true;
|
|
||||||
disable_monitoring = true;
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
|
||||||
returncodes = {
|
|
||||||
SH_EMAIL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
|
||||||
SH_EMAIL_ZRD_FRESH_DOMAIN = [
|
|
||||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
|
||||||
];
|
|
||||||
SH_EMAIL_ZRD_DONT_QUERY_IPS = [ "127.0.2.255" ];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"DBL" {
|
|
||||||
# override the defaults for DBL defined in modules.d/rbl.conf
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
|
||||||
disable_monitoring = true;
|
|
||||||
}
|
|
||||||
"ZRD" {
|
|
||||||
ignore_defaults = true;
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
|
||||||
no_ip = true;
|
|
||||||
dkim = true;
|
|
||||||
emails = true;
|
|
||||||
emails_domainonly = true;
|
|
||||||
urls = true;
|
|
||||||
returncodes = {
|
|
||||||
ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
|
||||||
ZRD_FRESH_DOMAIN = ["127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spamhaus_sbl_url {
|
|
||||||
ignore_defaults = true
|
|
||||||
rbl = "${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net";
|
|
||||||
checks = ['urls'];
|
|
||||||
disable_monitoring = true;
|
|
||||||
returncodes {
|
|
||||||
SPAMHAUS_SBL_URL = "127.0.0.2";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SH_HBL_EMAIL {
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
ignore_defaults = true;
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Rspamd."
|
||||||
rbl = "_email.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net";
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
emails_domainonly = false;
|
|
||||||
selector = "from('smtp').lower;from('mime').lower";
|
|
||||||
ignore_whitelist = true;
|
|
||||||
checks = ['emails', 'replyto'];
|
|
||||||
hash = "sha1";
|
|
||||||
returncodes = {
|
|
||||||
SH_HBL_EMAIL = [
|
|
||||||
"127.0.3.2"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spamhaus_dqs_hbl {
|
|
||||||
symbol = "HBL_FILE_UNKNOWN";
|
|
||||||
rbl = "_file.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net.";
|
|
||||||
selector = "attachments('rbase32', 'sha256')";
|
|
||||||
ignore_whitelist = true;
|
|
||||||
ignore_defaults = true;
|
|
||||||
returncodes {
|
|
||||||
SH_HBL_FILE_MALICIOUS = "127.0.3.10";
|
|
||||||
SH_HBL_FILE_SUSPICIOUS = "127.0.3.15";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
rm -rf /etc/rspamd/custom/dqs-rbl.conf
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "Bootstrap succeeded. Starting Rspamd..."
|
||||||
exec "$@"
|
exec "$@"
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
|||||||
psmisc \
|
psmisc \
|
||||||
wget \
|
wget \
|
||||||
patch \
|
patch \
|
||||||
|
python3 python3-pip \
|
||||||
&& dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
&& dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
||||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
||||||
&& chmod +x /usr/local/bin/gosu \
|
&& chmod +x /usr/local/bin/gosu \
|
||||||
@@ -42,18 +43,21 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
|||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& touch /etc/default/locale
|
&& touch /etc/default/locale
|
||||||
|
|
||||||
COPY ./bootstrap-sogo.sh /bootstrap-sogo.sh
|
RUN pip install --break-system-packages \
|
||||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
mysql-connector-python \
|
||||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
jinja2 \
|
||||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
redis \
|
||||||
COPY acl.diff /acl.diff
|
dnspython \
|
||||||
COPY navMailcowBtns.diff /navMailcowBtns.diff
|
psutil
|
||||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
|
||||||
COPY docker-entrypoint.sh /
|
|
||||||
|
|
||||||
RUN chmod +x /bootstrap-sogo.sh \
|
|
||||||
/usr/local/sbin/stop-supervisor.sh
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||||
|
COPY data/Dockerfiles/sogo/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||||
|
COPY data/Dockerfiles/sogo/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||||
|
COPY data/Dockerfiles/sogo/supervisord.conf /etc/supervisor/supervisord.conf
|
||||||
|
COPY data/Dockerfiles/sogo/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||||
|
COPY data/Dockerfiles/sogo/docker-entrypoint.sh /
|
||||||
|
|
||||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
RUN chmod +x /usr/local/sbin/stop-supervisor.sh
|
||||||
|
|
||||||
|
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
--- /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:57.987504204 +0200
|
|
||||||
+++ /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:35.918291298 +0200
|
|
||||||
@@ -46,7 +46,7 @@
|
|
||||||
</md-item-template>
|
|
||||||
</md-autocomplete>
|
|
||||||
</div>
|
|
||||||
- <md-card ng-repeat="user in acl.users | orderBy:['userClass', 'cn']"
|
|
||||||
+ <md-card ng-repeat="user in acl.users | filter:{ userClass: 'normal' } | orderBy:['cn']"
|
|
||||||
class="sg-collapsed"
|
|
||||||
ng-class="{ 'sg-expanded': user.uid == acl.selectedUid }">
|
|
||||||
<a class="md-flex md-button" ng-click="acl.selectUser(user, $event)">
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Wait for MySQL to warm-up
|
|
||||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
|
||||||
echo "Waiting for database to come up..."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
|
|
||||||
# Wait until port becomes free and send sig
|
|
||||||
until ! nc -z sogo-mailcow 20000;
|
|
||||||
do
|
|
||||||
killall -TERM sogod
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
|
|
||||||
# Wait for updated schema
|
|
||||||
DBV_NOW=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
|
||||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
|
||||||
while [[ "${DBV_NOW}" != "${DBV_NEW}" ]]; do
|
|
||||||
echo "Waiting for schema update..."
|
|
||||||
DBV_NOW=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
|
||||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
echo "DB schema is ${DBV_NOW}"
|
|
||||||
|
|
||||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP TRIGGER IF EXISTS sogo_update_password"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# cat /dev/urandom seems to hang here occasionally and is not recommended anyway, better use openssl
|
|
||||||
RAND_PASS=$(openssl rand -base64 16 | tr -dc _A-Z-a-z-0-9)
|
|
||||||
|
|
||||||
# Generate plist header with timezone data
|
|
||||||
mkdir -p /var/lib/sogo/GNUstep/Defaults/
|
|
||||||
cat <<EOF > /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!DOCTYPE plist PUBLIC "-//GNUstep//DTD plist 0.9//EN" "http://www.gnustep.org/plist-0_9.xml">
|
|
||||||
<plist version="0.9">
|
|
||||||
<dict>
|
|
||||||
<key>OCSAclURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_acl</string>
|
|
||||||
<key>SOGoIMAPServer</key>
|
|
||||||
<string>imap://${IPV4_NETWORK}.250:143/?TLS=YES&tlsVerifyMode=none</string>
|
|
||||||
<key>SOGoSieveServer</key>
|
|
||||||
<string>sieve://${IPV4_NETWORK}.250:4190/?TLS=YES&tlsVerifyMode=none</string>
|
|
||||||
<key>SOGoSMTPServer</key>
|
|
||||||
<string>smtp://${IPV4_NETWORK}.253:588/?TLS=YES&tlsVerifyMode=none</string>
|
|
||||||
<key>SOGoTrustProxyAuthentication</key>
|
|
||||||
<string>YES</string>
|
|
||||||
<key>SOGoEncryptionKey</key>
|
|
||||||
<string>${RAND_PASS}</string>
|
|
||||||
<key>SOGoURLEncryptionEnabled</key>
|
|
||||||
<string>YES</string>
|
|
||||||
<key>SOGoURLEncryptionPassphrase</key>
|
|
||||||
<string>${SOGO_URL_ENCRYPTION_KEY}</string>
|
|
||||||
<key>OCSAdminURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_admin</string>
|
|
||||||
<key>OCSCacheFolderURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_cache_folder</string>
|
|
||||||
<key>OCSEMailAlarmsFolderURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_alarms_folder</string>
|
|
||||||
<key>OCSFolderInfoURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_folder_info</string>
|
|
||||||
<key>OCSSessionsFolderURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_sessions_folder</string>
|
|
||||||
<key>OCSStoreURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_store</string>
|
|
||||||
<key>SOGoProfileURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_user_profile</string>
|
|
||||||
<key>SOGoTimeZone</key>
|
|
||||||
<string>${TZ}</string>
|
|
||||||
<key>domains</key>
|
|
||||||
<dict>
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Generate multi-domain setup
|
|
||||||
while read -r line gal
|
|
||||||
do
|
|
||||||
echo " <key>${line}</key>
|
|
||||||
<dict>
|
|
||||||
<key>SOGoMailDomain</key>
|
|
||||||
<string>${line}</string>
|
|
||||||
<key>SOGoUserSources</key>
|
|
||||||
<array>
|
|
||||||
<dict>
|
|
||||||
<key>MailFieldNames</key>
|
|
||||||
<array>
|
|
||||||
<string>aliases</string>
|
|
||||||
<string>ad_aliases</string>
|
|
||||||
<string>ext_acl</string>
|
|
||||||
</array>
|
|
||||||
<key>KindFieldName</key>
|
|
||||||
<string>kind</string>
|
|
||||||
<key>DomainFieldName</key>
|
|
||||||
<string>domain</string>
|
|
||||||
<key>MultipleBookingsFieldName</key>
|
|
||||||
<string>multiple_bookings</string>
|
|
||||||
<key>listRequiresDot</key>
|
|
||||||
<string>NO</string>
|
|
||||||
<key>canAuthenticate</key>
|
|
||||||
<string>YES</string>
|
|
||||||
<key>displayName</key>
|
|
||||||
<string>GAL ${line}</string>
|
|
||||||
<key>id</key>
|
|
||||||
<string>${line}</string>
|
|
||||||
<key>isAddressBook</key>
|
|
||||||
<string>${gal}</string>
|
|
||||||
<key>type</key>
|
|
||||||
<string>sql</string>
|
|
||||||
<key>userPasswordAlgorithm</key>
|
|
||||||
<string>${MAILCOW_PASS_SCHEME}</string>
|
|
||||||
<key>prependPasswordScheme</key>
|
|
||||||
<string>YES</string>
|
|
||||||
<key>viewURL</key>
|
|
||||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/_sogo_static_view</string>
|
|
||||||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|
||||||
# Generate alternative LDAP authentication dict, when SQL authentication fails
|
|
||||||
# This will nevertheless read attributes from LDAP
|
|
||||||
/etc/sogo/plist_ldap.sh ${line} ${gal} >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|
||||||
echo " </array>
|
|
||||||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|
||||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain, CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal FROM domain;" -B -N)
|
|
||||||
|
|
||||||
# Generate footer
|
|
||||||
echo ' </dict>
|
|
||||||
</dict>
|
|
||||||
</plist>' >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|
||||||
|
|
||||||
# Fix permissions
|
|
||||||
chown sogo:sogo -R /var/lib/sogo/
|
|
||||||
chmod 600 /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
|
||||||
|
|
||||||
# Patch ACLs
|
|
||||||
#if [[ ${ACL_ANYONE} == 'allow' ]]; then
|
|
||||||
# #enable any or authenticated targets for ACL
|
|
||||||
# if patch -R -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
|
|
||||||
# patch -R /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
|
|
||||||
# fi
|
|
||||||
#else
|
|
||||||
# #disable any or authenticated targets for ACL
|
|
||||||
# if patch -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
|
|
||||||
# patch /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
|
|
||||||
# fi
|
|
||||||
#fi
|
|
||||||
|
|
||||||
if patch -R -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxTopnavToolbar.wox < /navMailcowBtns.diff > /dev/null; then
|
|
||||||
patch -R /usr/lib/GNUstep/SOGo/Templates/UIxTopnavToolbar.wox < /navMailcowBtns.diff;
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Rename custom logo, if any
|
|
||||||
[[ -f /etc/sogo/sogo-full.svg ]] && mv /etc/sogo/sogo-full.svg /etc/sogo/custom-fulllogo.svg
|
|
||||||
|
|
||||||
# Rsync web content
|
|
||||||
echo "Syncing web content with named volume"
|
|
||||||
rsync -a /usr/lib/GNUstep/SOGo/. /sogo_web/
|
|
||||||
|
|
||||||
# Chown backup path
|
|
||||||
chown -R sogo:sogo /sogo_backup
|
|
||||||
|
|
||||||
exec gosu sogo /usr/sbin/sogod
|
|
||||||
@@ -1,17 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [[ "${SKIP_SOGO}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
|
||||||
echo "SKIP_SOGO=y, skipping SOGo..."
|
|
||||||
sleep 365d
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
|
||||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "$TZ" > /etc/timezone
|
|
||||||
|
|
||||||
# Run hooks
|
# Run hooks
|
||||||
for file in /hooks/*; do
|
for file in /hooks/*; do
|
||||||
if [ -x "${file}" ]; then
|
if [ -x "${file}" ]; then
|
||||||
@@ -20,4 +8,13 @@ for file in /hooks/*; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
exec "$@"
|
python3 -u /bootstrap/main.py
|
||||||
|
BOOTSTRAP_EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||||
|
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting SOGo."
|
||||||
|
exit $BOOTSTRAP_EXIT_CODE
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Bootstrap succeeded. Starting SOGo..."
|
||||||
|
exec gosu sogo /usr/sbin/sogod
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
60,65d58
|
|
||||||
< var:ng-click="navButtonClick"
|
|
||||||
< ng-href="/user">
|
|
||||||
< <md-icon>build</md-icon>
|
|
||||||
< <md-tooltip>mailcow <var:string label:value="Preferences"/></md-tooltip>
|
|
||||||
< </md-button>
|
|
||||||
< <md-button class="md-icon-button"
|
|
||||||
83c76
|
|
||||||
< onclick="mc_logout();"
|
|
||||||
---
|
|
||||||
> ng-show="::activeUser.path.logoff.length"
|
|
||||||
85c78
|
|
||||||
< ng-href="#">
|
|
||||||
---
|
|
||||||
> ng-href="{{::activeUser.path.logoff}}">
|
|
||||||
@@ -11,8 +11,8 @@ stderr_logfile_maxbytes=0
|
|||||||
autostart=true
|
autostart=true
|
||||||
priority=1
|
priority=1
|
||||||
|
|
||||||
[program:bootstrap-sogo]
|
[program:bootstrap]
|
||||||
command=/bootstrap-sogo.sh
|
command=/docker-entrypoint.sh
|
||||||
stdout_logfile=/dev/stdout
|
stdout_logfile=/dev/stdout
|
||||||
stdout_logfile_maxbytes=0
|
stdout_logfile_maxbytes=0
|
||||||
stderr_logfile=/dev/stderr
|
stderr_logfile=/dev/stderr
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ RUN apk add --update \
|
|||||||
fcgi \
|
fcgi \
|
||||||
openssl \
|
openssl \
|
||||||
nagios-plugins-mysql \
|
nagios-plugins-mysql \
|
||||||
|
nagios-plugins-dns \
|
||||||
nagios-plugins-disk \
|
nagios-plugins-disk \
|
||||||
bind-tools \
|
bind-tools \
|
||||||
redis \
|
redis \
|
||||||
@@ -31,11 +32,9 @@ RUN apk add --update \
|
|||||||
tzdata \
|
tzdata \
|
||||||
whois \
|
whois \
|
||||||
&& curl https://raw.githubusercontent.com/mludvig/smtp-cli/v3.10/smtp-cli -o /smtp-cli \
|
&& curl https://raw.githubusercontent.com/mludvig/smtp-cli/v3.10/smtp-cli -o /smtp-cli \
|
||||||
&& chmod +x smtp-cli \
|
&& chmod +x smtp-cli
|
||||||
&& mkdir /usr/lib/mailcow
|
|
||||||
|
|
||||||
COPY watchdog.sh /watchdog.sh
|
COPY data/Dockerfiles/watchdog/watchdog.sh /watchdog.sh
|
||||||
COPY check_mysql_slavestatus.sh /usr/lib/nagios/plugins/check_mysql_slavestatus.sh
|
COPY data/Dockerfiles/watchdog/check_mysql_slavestatus.sh /usr/lib/nagios/plugins/check_mysql_slavestatus.sh
|
||||||
COPY check_dns.sh /usr/lib/mailcow/check_dns.sh
|
|
||||||
|
|
||||||
CMD ["/watchdog.sh"]
|
CMD ["/watchdog.sh"]
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
while getopts "H:s:" opt; do
|
|
||||||
case "$opt" in
|
|
||||||
H) HOST="$OPTARG" ;;
|
|
||||||
s) SERVER="$OPTARG" ;;
|
|
||||||
*) echo "Usage: $0 -H host -s server"; exit 3 ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "$SERVER" ]; then
|
|
||||||
echo "No DNS Server provided"
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$HOST" ]; then
|
|
||||||
echo "No host to test provided"
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
|
|
||||||
# run dig and measure the time it takes to run
|
|
||||||
START_TIME=$(date +%s%3N)
|
|
||||||
dig_output=$(dig +short +timeout=2 +tries=1 "$HOST" @"$SERVER" 2>/dev/null)
|
|
||||||
dig_rc=$?
|
|
||||||
dig_output_ips=$(echo "$dig_output" | grep -E '^[0-9.]+$' | sort | paste -sd ',' -)
|
|
||||||
END_TIME=$(date +%s%3N)
|
|
||||||
ELAPSED_TIME=$((END_TIME - START_TIME))
|
|
||||||
|
|
||||||
# validate and perform nagios like output and exit codes
|
|
||||||
if [ $dig_rc -ne 0 ] || [ -z "$dig_output" ]; then
|
|
||||||
echo "Domain $HOST was not found by the server"
|
|
||||||
exit 2
|
|
||||||
elif [ $dig_rc -eq 0 ]; then
|
|
||||||
echo "DNS OK: $ELAPSED_TIME ms response time. $HOST returns $dig_output_ips"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "Unknown error"
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
@@ -1,10 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [ "${DEV_MODE}" != "n" ]; then
|
|
||||||
echo -e "\e[31mEnabled Debug Mode\e[0m"
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
trap "exit" INT TERM
|
trap "exit" INT TERM
|
||||||
trap "kill 0" EXIT
|
trap "kill 0" EXIT
|
||||||
|
|
||||||
@@ -302,7 +297,7 @@ unbound_checks() {
|
|||||||
touch /tmp/unbound-mailcow; echo "$(tail -50 /tmp/unbound-mailcow)" > /tmp/unbound-mailcow
|
touch /tmp/unbound-mailcow; echo "$(tail -50 /tmp/unbound-mailcow)" > /tmp/unbound-mailcow
|
||||||
host_ip=$(get_container_ip unbound-mailcow)
|
host_ip=$(get_container_ip unbound-mailcow)
|
||||||
err_c_cur=${err_count}
|
err_c_cur=${err_count}
|
||||||
/usr/lib/mailcow/check_dns.sh -s ${host_ip} -H stackoverflow.com 2>> /tmp/unbound-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
/usr/lib/nagios/plugins/check_dns -s ${host_ip} -H stackoverflow.com 2>> /tmp/unbound-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||||
DNSSEC=$(dig com +dnssec | egrep 'flags:.+ad')
|
DNSSEC=$(dig com +dnssec | egrep 'flags:.+ad')
|
||||||
if [[ -z ${DNSSEC} ]]; then
|
if [[ -z ${DNSSEC} ]]; then
|
||||||
echo "DNSSEC failure" 2>> /tmp/unbound-mailcow 1>&2
|
echo "DNSSEC failure" 2>> /tmp/unbound-mailcow 1>&2
|
||||||
@@ -407,7 +402,7 @@ sogo_checks() {
|
|||||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||||
touch /tmp/sogo-mailcow; echo "$(tail -50 /tmp/sogo-mailcow)" > /tmp/sogo-mailcow
|
touch /tmp/sogo-mailcow; echo "$(tail -50 /tmp/sogo-mailcow)" > /tmp/sogo-mailcow
|
||||||
host_ip=$(get_container_ip sogo-mailcow)
|
host_ip=$SOGO_HOST
|
||||||
err_c_cur=${err_count}
|
err_c_cur=${err_count}
|
||||||
/usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 2>> /tmp/sogo-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
/usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 2>> /tmp/sogo-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
||||||
@@ -450,31 +445,6 @@ postfix_checks() {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
postfix-tlspol_checks() {
|
|
||||||
err_count=0
|
|
||||||
diff_c=0
|
|
||||||
THRESHOLD=${POSTFIX_TLSPOL_THRESHOLD}
|
|
||||||
# Reduce error count by 2 after restarting an unhealthy container
|
|
||||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
|
||||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
|
||||||
touch /tmp/postfix-tlspol-mailcow; echo "$(tail -50 /tmp/postfix-tlspol-mailcow)" > /tmp/postfix-tlspol-mailcow
|
|
||||||
host_ip=$(get_container_ip postfix-tlspol-mailcow)
|
|
||||||
err_c_cur=${err_count}
|
|
||||||
/usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 8642 2>> /tmp/postfix-tlspol-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
|
||||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
|
||||||
[ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
|
|
||||||
progress "Postfix TLS Policy companion" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
|
|
||||||
if [[ $? == 10 ]]; then
|
|
||||||
diff_c=0
|
|
||||||
sleep 1
|
|
||||||
else
|
|
||||||
diff_c=0
|
|
||||||
sleep $(( ( RANDOM % 60 ) + 20 ))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
clamd_checks() {
|
clamd_checks() {
|
||||||
err_count=0
|
err_count=0
|
||||||
diff_c=0
|
diff_c=0
|
||||||
@@ -952,18 +922,6 @@ PID=$!
|
|||||||
echo "Spawned mailq_checks with PID ${PID}"
|
echo "Spawned mailq_checks with PID ${PID}"
|
||||||
BACKGROUND_TASKS+=(${PID})
|
BACKGROUND_TASKS+=(${PID})
|
||||||
|
|
||||||
(
|
|
||||||
while true; do
|
|
||||||
if ! postfix-tlspol_checks; then
|
|
||||||
log_msg "Postfix TLS Policy hit error limit"
|
|
||||||
echo postfix-tlspol-mailcow > /tmp/com_pipe
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
) &
|
|
||||||
PID=$!
|
|
||||||
echo "Spawned postfix-tlspol_checks with PID ${PID}"
|
|
||||||
BACKGROUND_TASKS+=(${PID})
|
|
||||||
|
|
||||||
(
|
(
|
||||||
while true; do
|
while true; do
|
||||||
if ! dovecot_checks; then
|
if ! dovecot_checks; then
|
||||||
|
|||||||
15
data/conf/clamav/config.json
Normal file
15
data/conf/clamav/config.json
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"template": "whitelist.ign2.j2",
|
||||||
|
"output": "/var/lib/clamav/whitelist.ign2",
|
||||||
|
"clean_blank_lines": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "clamd.conf.j2",
|
||||||
|
"output": "/etc/clamav/clamd.conf"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "freshclam.conf.j2",
|
||||||
|
"output": "/etc/clamav/freshclam.conf"
|
||||||
|
}
|
||||||
|
]
|
||||||
5
data/conf/clamav/config_templates/whitelist.ign2.j2
Normal file
5
data/conf/clamav/config_templates/whitelist.ign2.j2
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Please restart ClamAV after changing signatures
|
||||||
|
Example-Signature.Ignore-1
|
||||||
|
PUA.Win.Trojan.EmbeddedPDF-1
|
||||||
|
PUA.Pdf.Trojan.EmbeddedJavaScript-1
|
||||||
|
PUA.Pdf.Trojan.OpenActionObjectwithJavascript-1
|
||||||
0
data/conf/clamav/custom_templates/.gitkeep
Normal file
0
data/conf/clamav/custom_templates/.gitkeep
Normal file
@@ -67,7 +67,7 @@ require_once 'functions.ratelimit.inc.php';
|
|||||||
require_once 'functions.acl.inc.php';
|
require_once 'functions.acl.inc.php';
|
||||||
|
|
||||||
|
|
||||||
$isSOGoRequest = $post['real_rip'] == getenv('IPV4_NETWORK') . '.248';
|
$isSOGoRequest = $post['real_rip'] == getenv('SOGO_HOST');
|
||||||
$result = false;
|
$result = false;
|
||||||
if ($isSOGoRequest) {
|
if ($isSOGoRequest) {
|
||||||
// This is a SOGo Auth request. First check for SSO password.
|
// This is a SOGo Auth request. First check for SSO password.
|
||||||
@@ -86,7 +86,7 @@ if ($result === false){
|
|||||||
'remote_addr' => $post['real_rip']
|
'remote_addr' => $post['real_rip']
|
||||||
));
|
));
|
||||||
if ($result) {
|
if ($result) {
|
||||||
error_log('MAILCOWAUTH: App auth for user ' . $post['username'] . " with service " . $post['service'] . " from IP " . $post['real_rip']);
|
error_log('MAILCOWAUTH: App auth for user ' . $post['username']);
|
||||||
set_sasl_log($post['username'], $post['real_rip'], $post['service']);
|
set_sasl_log($post['username'], $post['real_rip'], $post['service']);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -94,9 +94,9 @@ if ($result === false){
|
|||||||
// Init Identity Provider
|
// Init Identity Provider
|
||||||
$iam_provider = identity_provider('init');
|
$iam_provider = identity_provider('init');
|
||||||
$iam_settings = identity_provider('get');
|
$iam_settings = identity_provider('get');
|
||||||
$result = user_login($post['username'], $post['password'], array('is_internal' => true, 'service' => $post['service']));
|
$result = user_login($post['username'], $post['password'], array('is_internal' => true));
|
||||||
if ($result) {
|
if ($result) {
|
||||||
error_log('MAILCOWAUTH: User auth for user ' . $post['username'] . " with service " . $post['service'] . " from IP " . $post['real_rip']);
|
error_log('MAILCOWAUTH: User auth for user ' . $post['username']);
|
||||||
set_sasl_log($post['username'], $post['real_rip'], $post['service']);
|
set_sasl_log($post['username'], $post['real_rip'], $post['service']);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -105,7 +105,7 @@ if ($result) {
|
|||||||
http_response_code(200); // OK
|
http_response_code(200); // OK
|
||||||
$return['success'] = true;
|
$return['success'] = true;
|
||||||
} else {
|
} else {
|
||||||
error_log("MAILCOWAUTH: Login failed for user " . $post['username'] . " with service " . $post['service'] . " from IP " . $post['real_rip']);
|
error_log("MAILCOWAUTH: Login failed for user " . $post['username']);
|
||||||
http_response_code(401); // Unauthorized
|
http_response_code(401); // Unauthorized
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
70
data/conf/dovecot/config.json
Normal file
70
data/conf/dovecot/config.json
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"template": "dovecot-dict-sql-quota.conf.j2",
|
||||||
|
"output": "/etc/dovecot/sql/dovecot-dict-sql-quota.conf"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "dovecot-dict-sql-userdb.conf.j2",
|
||||||
|
"output": "/etc/dovecot/sql/dovecot-dict-sql-userdb.conf"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "dovecot-dict-sql-sieve_before.conf.j2",
|
||||||
|
"output": "/etc/dovecot/sql/dovecot-dict-sql-sieve_before.conf"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "dovecot-dict-sql-sieve_after.conf.j2",
|
||||||
|
"output": "/etc/dovecot/sql/dovecot-dict-sql-sieve_after.conf"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "mail_plugins.j2",
|
||||||
|
"output": "/etc/dovecot/mail_plugins"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "mail_plugins_imap.j2",
|
||||||
|
"output": "/etc/dovecot/mail_plugins_imap"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "mail_plugins_lmtp.j2",
|
||||||
|
"output": "/etc/dovecot/mail_plugins_lmtp"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "global_sieve_after.sieve.j2",
|
||||||
|
"output": "/var/vmail/sieve/global_sieve_after.sieve"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "global_sieve_before.sieve.j2",
|
||||||
|
"output": "/var/vmail/sieve/global_sieve_before.sieve"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "dovecot-master.passwd.j2",
|
||||||
|
"output": "/etc/dovecot/dovecot-master.passwd"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "dovecot-master.userdb.j2",
|
||||||
|
"output": "/etc/dovecot/dovecot-master.userdb"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "sieve.creds.j2",
|
||||||
|
"output": "/etc/sogo/sieve.creds"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "sogo-sso.pass.j2",
|
||||||
|
"output": "/etc/phpfpm/sogo-sso.pass"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "cron.creds.j2",
|
||||||
|
"output": "/etc/sogo/cron.creds"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "source_env.sh.j2",
|
||||||
|
"output": "/source_env.sh"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "maildir_gc.sh.j2",
|
||||||
|
"output": "/usr/local/bin/maildir_gc.sh"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"template": "dovecot.conf.j2",
|
||||||
|
"output": "/etc/dovecot/dovecot.conf"
|
||||||
|
}
|
||||||
|
]
|
||||||
1
data/conf/dovecot/config_templates/cron.creds.j2
Normal file
1
data/conf/dovecot/config_templates/cron.creds.j2
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{{ RAND_USER }}@mailcow.local:{{ RAND_PASS2 }}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
{% set QUOTA_TABLE = "quota2" if MASTER|lower in ["y", "yes"] else "quota2replica" %}
|
||||||
|
connect = "host=/var/run/mysqld/mysqld.sock dbname={{ DBNAME }} user={{ DBUSER }} password={{ DBPASS | escape_quotes }}"
|
||||||
|
map {
|
||||||
|
pattern = priv/quota/storage
|
||||||
|
table = {{ QUOTA_TABLE }}
|
||||||
|
username_field = username
|
||||||
|
value_field = bytes
|
||||||
|
}
|
||||||
|
map {
|
||||||
|
pattern = priv/quota/messages
|
||||||
|
table = {{ QUOTA_TABLE }}
|
||||||
|
username_field = username
|
||||||
|
value_field = messages
|
||||||
|
}
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
connect = "host=/var/run/mysqld/mysqld.sock dbname={{ DBNAME }} user={{ DBUSER }} password={{ DBPASS | escape_quotes }}"
|
||||||
|
map {
|
||||||
|
pattern = priv/sieve/name/$script_name
|
||||||
|
table = sieve_after
|
||||||
|
username_field = username
|
||||||
|
value_field = id
|
||||||
|
fields {
|
||||||
|
script_name = $script_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
map {
|
||||||
|
pattern = priv/sieve/data/$id
|
||||||
|
table = sieve_after
|
||||||
|
username_field = username
|
||||||
|
value_field = script_data
|
||||||
|
fields {
|
||||||
|
id = $id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
connect = "host=/var/run/mysqld/mysqld.sock dbname={{ DBNAME }} user={{ DBUSER }} password={{ DBPASS | escape_quotes }}"
|
||||||
|
map {
|
||||||
|
pattern = priv/sieve/name/$script_name
|
||||||
|
table = sieve_before
|
||||||
|
username_field = username
|
||||||
|
value_field = id
|
||||||
|
fields {
|
||||||
|
script_name = $script_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
map {
|
||||||
|
pattern = priv/sieve/data/$id
|
||||||
|
table = sieve_before
|
||||||
|
username_field = username
|
||||||
|
value_field = script_data
|
||||||
|
fields {
|
||||||
|
id = $id
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
driver = mysql
|
||||||
|
connect = "host=/var/run/mysqld/mysqld.sock dbname={{ DBNAME }} user={{ DBUSER }} password={{ DBPASS | escape_quotes }}"
|
||||||
|
user_query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%n/{{ MAILDIR_SUB }}:VOLATILEDIR=/var/volatile/%u:INDEX=/var/vmail_index/%u') AS mail, '%s' AS protocol, 5000 AS uid, 5000 AS gid, concat('*:bytes=', quota) AS quota_rule FROM mailbox WHERE username = '%u' AND (active = '1' OR active = '2')
|
||||||
|
iterate_query = SELECT username FROM mailbox WHERE active = '1' OR active = '2';
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
{%- set master_user = DOVECOT_MASTER_USER or RAND_USER %}
|
||||||
|
{%- set master_pass = DOVECOT_MASTER_PASS or RAND_PASS %}
|
||||||
|
{{ master_user }}@mailcow.local:{SHA1}{{ master_pass | sha1 }}::::::
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
{{ DOVECOT_MASTER_USER or RAND_USER }}@mailcow.local::5000:5000::::
|
||||||
@@ -1,23 +1,16 @@
|
|||||||
# --------------------------------------------------------------------------
|
|
||||||
# Please create a file "extra.conf" for persistent overrides to dovecot.conf
|
|
||||||
# --------------------------------------------------------------------------
|
|
||||||
# LDAP example:
|
|
||||||
#passdb {
|
|
||||||
# args = /etc/dovecot/ldap/passdb.conf
|
|
||||||
# driver = ldap
|
|
||||||
#}
|
|
||||||
|
|
||||||
auth_mechanisms = plain login
|
auth_mechanisms = plain login
|
||||||
#mail_debug = yes
|
#mail_debug = yes
|
||||||
#auth_debug = yes
|
#auth_debug = yes
|
||||||
#log_debug = category=fts-flatcurve # Activate Logging for Flatcurve FTS Searchings
|
#log_debug = category=fts-flatcurve # Activate Logging for Flatcurve FTS Searchings
|
||||||
log_path = syslog
|
log_path = syslog
|
||||||
disable_plaintext_auth = yes
|
disable_plaintext_auth = yes
|
||||||
|
|
||||||
# Uncomment on NFS share
|
# Uncomment on NFS share
|
||||||
#mmap_disable = yes
|
#mmap_disable = yes
|
||||||
#mail_fsync = always
|
#mail_fsync = always
|
||||||
#mail_nfs_index = yes
|
#mail_nfs_index = yes
|
||||||
#mail_nfs_storage = yes
|
#mail_nfs_storage = yes
|
||||||
|
|
||||||
login_log_format_elements = "user=<%u> method=%m rip=%r lip=%l mpid=%e %c %k"
|
login_log_format_elements = "user=<%u> method=%m rip=%r lip=%l mpid=%e %c %k"
|
||||||
mail_home = /var/vmail/%d/%n
|
mail_home = /var/vmail/%d/%n
|
||||||
mail_location = maildir:~/
|
mail_location = maildir:~/
|
||||||
@@ -78,7 +71,9 @@ service doveadm {
|
|||||||
}
|
}
|
||||||
vsz_limit=2048 MB
|
vsz_limit=2048 MB
|
||||||
}
|
}
|
||||||
!include /etc/dovecot/dovecot.folders.conf
|
|
||||||
|
{% include 'dovecot.folders.conf.j2' %}
|
||||||
|
|
||||||
protocols = imap sieve lmtp pop3
|
protocols = imap sieve lmtp pop3
|
||||||
service dict {
|
service dict {
|
||||||
unix_listener dict {
|
unix_listener dict {
|
||||||
@@ -193,7 +188,7 @@ protocol sieve {
|
|||||||
}
|
}
|
||||||
plugin {
|
plugin {
|
||||||
# Allow "any" or "authenticated" to be used in ACLs
|
# Allow "any" or "authenticated" to be used in ACLs
|
||||||
acl_anyone = </etc/dovecot/acl_anyone
|
acl_anyone = {{ ACL_ANYONE }}
|
||||||
acl_shared_dict = file:/var/vmail/shared-mailboxes.db
|
acl_shared_dict = file:/var/vmail/shared-mailboxes.db
|
||||||
acl = vfile
|
acl = vfile
|
||||||
acl_user = %u
|
acl_user = %u
|
||||||
@@ -249,7 +244,7 @@ plugin {
|
|||||||
mail_log_cached_only = yes
|
mail_log_cached_only = yes
|
||||||
|
|
||||||
# Try set mail_replica
|
# Try set mail_replica
|
||||||
!include_try /etc/dovecot/mail_replica.conf
|
{% include 'mail_replica.conf.j2' %}
|
||||||
}
|
}
|
||||||
service quota-warning {
|
service quota-warning {
|
||||||
executable = script /usr/local/bin/quota_notify.py
|
executable = script /usr/local/bin/quota_notify.py
|
||||||
@@ -300,12 +295,15 @@ service replicator {
|
|||||||
replication_max_conns = 10
|
replication_max_conns = 10
|
||||||
doveadm_port = 12345
|
doveadm_port = 12345
|
||||||
replication_dsync_parameters = -d -l 30 -U -n INBOX
|
replication_dsync_parameters = -d -l 30 -U -n INBOX
|
||||||
|
|
||||||
|
{% include 'sogo_trusted_ip.conf.j2' %}
|
||||||
|
{% include 'shared_namespace.conf.j2' %}
|
||||||
|
{% include 'fts.conf.j2' %}
|
||||||
|
{% include 'sni.conf.j2' %}
|
||||||
|
|
||||||
# <Includes>
|
# <Includes>
|
||||||
!include_try /etc/dovecot/sni.conf
|
|
||||||
!include_try /etc/dovecot/sogo_trusted_ip.conf
|
|
||||||
!include_try /etc/dovecot/extra.conf
|
!include_try /etc/dovecot/extra.conf
|
||||||
!include_try /etc/dovecot/shared_namespace.conf
|
|
||||||
!include_try /etc/dovecot/conf.d/fts.conf
|
|
||||||
# </Includes>
|
# </Includes>
|
||||||
|
|
||||||
default_client_limit = 10400
|
default_client_limit = 10400
|
||||||
default_vsz_limit = 1024 M
|
default_vsz_limit = 1024 M
|
||||||
@@ -1,308 +1,308 @@
|
|||||||
namespace inbox {
|
namespace inbox {
|
||||||
inbox = yes
|
inbox = yes
|
||||||
location =
|
location =
|
||||||
separator = /
|
separator = /
|
||||||
mailbox "Trash" {
|
mailbox "Trash" {
|
||||||
auto = subscribe
|
auto = subscribe
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Deleted Messages" {
|
mailbox "Deleted Messages" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Deleted Items" {
|
mailbox "Deleted Items" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Rubbish" {
|
mailbox "Rubbish" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Gelöschte Objekte" {
|
mailbox "Gelöschte Objekte" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Gelöschte Elemente" {
|
mailbox "Gelöschte Elemente" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Papierkorb" {
|
mailbox "Papierkorb" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Itens Excluidos" {
|
mailbox "Itens Excluidos" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Itens Excluídos" {
|
mailbox "Itens Excluídos" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Lixeira" {
|
mailbox "Lixeira" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Prullenbak" {
|
mailbox "Prullenbak" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Odstránené položky" {
|
mailbox "Odstránené položky" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Koš" {
|
mailbox "Koš" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Verwijderde items" {
|
mailbox "Verwijderde items" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Удаленные" {
|
mailbox "Удаленные" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Удаленные элементы" {
|
mailbox "Удаленные элементы" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Корзина" {
|
mailbox "Корзина" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Видалені" {
|
mailbox "Видалені" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Видалені елементи" {
|
mailbox "Видалені елементи" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Кошик" {
|
mailbox "Кошик" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "废件箱" {
|
mailbox "废件箱" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "已删除消息" {
|
mailbox "已删除消息" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "已删除邮件" {
|
mailbox "已删除邮件" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Archive" {
|
mailbox "Archive" {
|
||||||
auto = subscribe
|
auto = subscribe
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Archiv" {
|
mailbox "Archiv" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Archives" {
|
mailbox "Archives" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Arquivo" {
|
mailbox "Arquivo" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Arquivos" {
|
mailbox "Arquivos" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Archief" {
|
mailbox "Archief" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Archív" {
|
mailbox "Archív" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Archivovať" {
|
mailbox "Archivovať" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "归档" {
|
mailbox "归档" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Архив" {
|
mailbox "Архив" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Архів" {
|
mailbox "Архів" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Sent" {
|
mailbox "Sent" {
|
||||||
auto = subscribe
|
auto = subscribe
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Sent Messages" {
|
mailbox "Sent Messages" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Sent Items" {
|
mailbox "Sent Items" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "已发送" {
|
mailbox "已发送" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "已发送消息" {
|
mailbox "已发送消息" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "已发送邮件" {
|
mailbox "已发送邮件" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Отправленные" {
|
mailbox "Отправленные" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Отправленные элементы" {
|
mailbox "Отправленные элементы" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Надіслані" {
|
mailbox "Надіслані" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Надіслані елементи" {
|
mailbox "Надіслані елементи" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Gesendet" {
|
mailbox "Gesendet" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Gesendete Objekte" {
|
mailbox "Gesendete Objekte" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Gesendete Elemente" {
|
mailbox "Gesendete Elemente" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Itens Enviados" {
|
mailbox "Itens Enviados" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Enviados" {
|
mailbox "Enviados" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Verzonden items" {
|
mailbox "Verzonden items" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Verzonden" {
|
mailbox "Verzonden" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Odoslaná pošta" {
|
mailbox "Odoslaná pošta" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Odoslané" {
|
mailbox "Odoslané" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Drafts" {
|
mailbox "Drafts" {
|
||||||
auto = subscribe
|
auto = subscribe
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Entwürfe" {
|
mailbox "Entwürfe" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Rascunhos" {
|
mailbox "Rascunhos" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Concepten" {
|
mailbox "Concepten" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Koncepty" {
|
mailbox "Koncepty" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "草稿" {
|
mailbox "草稿" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "草稿箱" {
|
mailbox "草稿箱" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Черновики" {
|
mailbox "Черновики" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Чернетки" {
|
mailbox "Чернетки" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Junk" {
|
mailbox "Junk" {
|
||||||
auto = subscribe
|
auto = subscribe
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Junk-E-Mail" {
|
mailbox "Junk-E-Mail" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Junk E-Mail" {
|
mailbox "Junk E-Mail" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Spam" {
|
mailbox "Spam" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Lixo Eletrônico" {
|
mailbox "Lixo Eletrônico" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Nevyžiadaná pošta" {
|
mailbox "Nevyžiadaná pošta" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Infikované položky" {
|
mailbox "Infikované položky" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Ongewenste e-mail" {
|
mailbox "Ongewenste e-mail" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "垃圾" {
|
mailbox "垃圾" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "垃圾箱" {
|
mailbox "垃圾箱" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Нежелательная почта" {
|
mailbox "Нежелательная почта" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Спам" {
|
mailbox "Спам" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Небажана пошта" {
|
mailbox "Небажана пошта" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Koncepty" {
|
mailbox "Koncepty" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Nevyžádaná pošta" {
|
mailbox "Nevyžádaná pošta" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Odstraněná pošta" {
|
mailbox "Odstraněná pošta" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Odeslaná pošta" {
|
mailbox "Odeslaná pošta" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Skräp" {
|
mailbox "Skräp" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Borttagna Meddelanden" {
|
mailbox "Borttagna Meddelanden" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Arkiv" {
|
mailbox "Arkiv" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Arkeverat" {
|
mailbox "Arkeverat" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Skickat" {
|
mailbox "Skickat" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Skickade Meddelanden" {
|
mailbox "Skickade Meddelanden" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Utkast" {
|
mailbox "Utkast" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Skraldespand" {
|
mailbox "Skraldespand" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Slettet mails" {
|
mailbox "Slettet mails" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Arkiv" {
|
mailbox "Arkiv" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Arkiveret mails" {
|
mailbox "Arkiveret mails" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
mailbox "Sendt" {
|
mailbox "Sendt" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Sendte mails" {
|
mailbox "Sendte mails" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Udkast" {
|
mailbox "Udkast" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Kladde" {
|
mailbox "Kladde" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Πρόχειρα" {
|
mailbox "Πρόχειρα" {
|
||||||
special_use = \Drafts
|
special_use = \Drafts
|
||||||
}
|
}
|
||||||
mailbox "Απεσταλμένα" {
|
mailbox "Απεσταλμένα" {
|
||||||
special_use = \Sent
|
special_use = \Sent
|
||||||
}
|
}
|
||||||
mailbox "Κάδος απορριμάτων" {
|
mailbox "Κάδος απορριμάτων" {
|
||||||
special_use = \Trash
|
special_use = \Trash
|
||||||
}
|
}
|
||||||
mailbox "Ανεπιθύμητα" {
|
mailbox "Ανεπιθύμητα" {
|
||||||
special_use = \Junk
|
special_use = \Junk
|
||||||
}
|
}
|
||||||
mailbox "Αρχειοθετημένα" {
|
mailbox "Αρχειοθετημένα" {
|
||||||
special_use = \Archive
|
special_use = \Archive
|
||||||
}
|
}
|
||||||
prefix =
|
prefix =
|
||||||
}
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# mailcow FTS Flatcurve Settings, change them as you like.
|
{% if SKIP_FTS|lower in ['n', 'no'] %}
|
||||||
plugin {
|
plugin {
|
||||||
fts_autoindex = yes
|
fts_autoindex = yes
|
||||||
fts_autoindex_exclude = \Junk
|
fts_autoindex_exclude = \Junk
|
||||||
@@ -24,14 +24,11 @@ plugin {
|
|||||||
fts_index_timeout = 300s
|
fts_index_timeout = 300s
|
||||||
}
|
}
|
||||||
|
|
||||||
### THIS PART WILL BE CHANGED BY MODIFYING mailcow.conf AUTOMATICALLY DURING RUNTIME! ###
|
|
||||||
|
|
||||||
service indexer-worker {
|
service indexer-worker {
|
||||||
# Max amount of simultaniously running indexer jobs.
|
# Max amount of simultaniously running indexer jobs.
|
||||||
process_limit=1
|
process_limit = {{ FTS_PROCS }}
|
||||||
|
|
||||||
# Max amount of RAM used by EACH indexer process.
|
# Max amount of RAM used by EACH indexer process.
|
||||||
vsz_limit=128 MB
|
vsz_limit = {{ FTS_HEAP }} MB
|
||||||
}
|
}
|
||||||
|
{% endif %}
|
||||||
### THIS PART WILL BE CHANGED BY MODIFYING mailcow.conf AUTOMATICALLY DURING RUNTIME! ###
|
|
||||||
5
data/conf/dovecot/config_templates/mail_plugins.j2
Normal file
5
data/conf/dovecot/config_templates/mail_plugins.j2
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{%- if SKIP_FTS|lower in ["y", "yes"] -%}
|
||||||
|
quota acl zlib mail_crypt mail_crypt_acl mail_log notify listescape replication lazy_expunge
|
||||||
|
{%- else -%}
|
||||||
|
quota acl zlib mail_crypt mail_crypt_acl mail_log notify fts fts_flatcurve listescape replication lazy_expunge
|
||||||
|
{%- endif -%}
|
||||||
5
data/conf/dovecot/config_templates/mail_plugins_imap.j2
Normal file
5
data/conf/dovecot/config_templates/mail_plugins_imap.j2
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{%- if SKIP_FTS|lower in ["y", "yes"] -%}
|
||||||
|
quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify listescape replication mail_log
|
||||||
|
{%- else -%}
|
||||||
|
quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify mail_log fts fts_flatcurve listescape replication
|
||||||
|
{%- endif -%}
|
||||||
5
data/conf/dovecot/config_templates/mail_plugins_lmtp.j2
Normal file
5
data/conf/dovecot/config_templates/mail_plugins_lmtp.j2
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{%- if SKIP_FTS|lower in ["y", "yes"] -%}
|
||||||
|
quota sieve acl zlib mail_crypt mail_crypt_acl notify listescape replication
|
||||||
|
{%- else -%}
|
||||||
|
quota sieve acl zlib mail_crypt mail_crypt_acl fts fts_flatcurve notify listescape replication
|
||||||
|
{%- endif -%}
|
||||||
3
data/conf/dovecot/config_templates/mail_replica.conf.j2
Normal file
3
data/conf/dovecot/config_templates/mail_replica.conf.j2
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{% if MAILCOW_REPLICA_IP and DOVEADM_REPLICA_PORT %}
|
||||||
|
mail_replica = tcp:{{ MAILCOW_REPLICA_IP }}:{{ DOVEADM_REPLICA_PORT }}
|
||||||
|
{% endif %}
|
||||||
2
data/conf/dovecot/config_templates/maildir_gc.sh.j2
Normal file
2
data/conf/dovecot/config_templates/maildir_gc.sh.j2
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
[ -d /var/vmail/_garbage/ ] && /usr/bin/find /var/vmail/_garbage/ -mindepth 1 -maxdepth 1 -type d -cmin +{{ MAILDIR_GC_TIME }} -exec rm -r {} \;
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
{% set MAILDIR_SUB_SHARED = '' if not MAILDIR_SUB else '/' ~ MAILDIR_SUB %}
|
||||||
|
namespace {
|
||||||
|
type = shared
|
||||||
|
separator = /
|
||||||
|
prefix = Shared/%%u/
|
||||||
|
location = maildir:%%h{{ MAILDIR_SUB_SHARED }}:INDEX=~{{ MAILDIR_SUB_SHARED }}/Shared/%%u
|
||||||
|
subscriptions = no
|
||||||
|
list = children
|
||||||
|
}
|
||||||
1
data/conf/dovecot/config_templates/sieve.creds.j2
Normal file
1
data/conf/dovecot/config_templates/sieve.creds.j2
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{{ DOVECOT_MASTER_USER or RAND_USER }}@mailcow.local:{{ DOVECOT_MASTER_PASS or RAND_PASS }}
|
||||||
6
data/conf/dovecot/config_templates/sni.conf.j2
Normal file
6
data/conf/dovecot/config_templates/sni.conf.j2
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{% for domain, path in VALID_CERT_DIRS.items() %}
|
||||||
|
local_name "{{ domain }}" {
|
||||||
|
ssl_cert = <{{ path }}/cert.pem
|
||||||
|
ssl_key = <{{ path }}/key.pem
|
||||||
|
}
|
||||||
|
{% endfor %}
|
||||||
1
data/conf/dovecot/config_templates/sogo-sso.pass.j2
Normal file
1
data/conf/dovecot/config_templates/sogo-sso.pass.j2
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{{ RAND_PASS2 }}
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
remote {{ SOGO_HOST }} {
|
||||||
|
disable_plaintext_auth = no
|
||||||
|
}
|
||||||
3
data/conf/dovecot/config_templates/source_env.sh.j2
Normal file
3
data/conf/dovecot/config_templates/source_env.sh.j2
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{% for key, value in ENV_VARS.items() %}
|
||||||
|
export {{ key }}="{{ value | replace('"', '\\"') }}"
|
||||||
|
{% endfor %}
|
||||||
0
data/conf/dovecot/custom_templates/.gitkeep
Normal file
0
data/conf/dovecot/custom_templates/.gitkeep
Normal file
@@ -1,9 +0,0 @@
|
|||||||
#hosts = 1.2.3.4
|
|
||||||
#dn = cn=admin,dc=example,dc=local
|
|
||||||
#dnpass = password
|
|
||||||
#ldap_version = 3
|
|
||||||
#base = ou=People,dc=example,dc=local
|
|
||||||
#auth_bind = no
|
|
||||||
#pass_filter = (&(objectClass=posixAccount)(mail=%u))
|
|
||||||
#pass_attrs = mail=user,userPassword=password
|
|
||||||
#default_pass_scheme = SSHA
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user