mirror of
https://github.com/mailcow/mailcow-dockerized.git
synced 2026-02-19 07:36:23 +00:00
Compare commits
561 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37beed6ad9 | ||
|
|
75f18df143 | ||
|
|
89398c4726 | ||
|
|
8971b11c49 | ||
|
|
bb7fd483f7 | ||
|
|
439a936fd8 | ||
|
|
567ebbc324 | ||
|
|
f9a7712025 | ||
|
|
3d62869664 | ||
|
|
b70bcd36fb | ||
|
|
cb50d08605 | ||
|
|
f3da8bb85f | ||
|
|
12e4d639f0 | ||
|
|
eb3f88fc91 | ||
|
|
9a729d89bf | ||
|
|
74b4097ee0 | ||
|
|
e00d0d5f8d | ||
|
|
c5e399ebc2 | ||
|
|
cb9ca772b1 | ||
|
|
162f05ccda | ||
|
|
6c97c4f372 | ||
|
|
6d4fcacd83 | ||
|
|
1994f706c0 | ||
|
|
e34afd3fdd | ||
|
|
a6f71faf46 | ||
|
|
b26ccc2019 | ||
|
|
b1c1e403d2 | ||
|
|
8753ea2be6 | ||
|
|
9fee568082 | ||
|
|
294a406b91 | ||
|
|
8b933f1967 | ||
|
|
824a473fea | ||
|
|
7f790c5360 | ||
|
|
52431a3942 | ||
|
|
8017394e9d | ||
|
|
76194be7dd | ||
|
|
3b23afa0ff | ||
|
|
6e00d653ce | ||
|
|
b6c036496d | ||
|
|
5d7c9b20bc | ||
|
|
4b400eadb1 | ||
|
|
ab2abda8cc | ||
|
|
2fe21e9641 | ||
|
|
b7ed6982d8 | ||
|
|
fd927853cb | ||
|
|
c48f4f4ab8 | ||
|
|
a4c006828e | ||
|
|
b56291f62b | ||
|
|
0cdf7647c4 | ||
|
|
8fe1cc4961 | ||
|
|
bf050f17c4 | ||
|
|
edd85dea8d | ||
|
|
3bf90c1f73 | ||
|
|
292306b191 | ||
|
|
b3e0a66222 | ||
|
|
e994cf4d05 | ||
|
|
cc0dc2eae0 | ||
|
|
a001a0584f | ||
|
|
926af87cfb | ||
|
|
b0339372b5 | ||
|
|
e398cb91e9 | ||
|
|
6ee0303b0f | ||
|
|
68616c2d57 | ||
|
|
f8de520d29 | ||
|
|
10077ece31 | ||
|
|
c918726143 | ||
|
|
3885b07a99 | ||
|
|
fcf27d640d | ||
|
|
82fde23cc1 | ||
|
|
cbca306fc1 | ||
|
|
6a8986fe4f | ||
|
|
ff34eb12e2 | ||
|
|
fbecd60e56 | ||
|
|
c37bf0bb32 | ||
|
|
2208d7e6fb | ||
|
|
e426c3a7e7 | ||
|
|
03fccb28e9 | ||
|
|
8fbfd99dd6 | ||
|
|
7f7a869678 | ||
|
|
73257151c4 | ||
|
|
efb2572f0f | ||
|
|
66aa28b5de | ||
|
|
987a027339 | ||
|
|
eea81e21f6 | ||
|
|
a689109f44 | ||
|
|
58c0a46459 | ||
|
|
2dbe8bf4ca | ||
|
|
ef7ec06947 | ||
|
|
fc7ea7a247 | ||
|
|
9b478b3859 | ||
|
|
384e5a2e64 | ||
|
|
aadeeb0df3 | ||
|
|
f33d82ffc1 | ||
|
|
ffeeb179e1 | ||
|
|
8e2d3a6db5 | ||
|
|
70126e1f0c | ||
|
|
b9ae174a6a | ||
|
|
9715c57314 | ||
|
|
b9f8959d92 | ||
|
|
9c814cc182 | ||
|
|
cf6594220c | ||
|
|
2cf952eb36 | ||
|
|
6fc86dd7d3 | ||
|
|
bf13af9691 | ||
|
|
1af9c21a50 | ||
|
|
443941e687 | ||
|
|
527577b438 | ||
|
|
9daf2d80c0 | ||
|
|
38b0641742 | ||
|
|
f675af5bb0 | ||
|
|
533c4e7956 | ||
|
|
1b2c2c0037 | ||
|
|
97768494e1 | ||
|
|
4a052da289 | ||
|
|
18d7a55b15 | ||
|
|
9ca2fb7ccf | ||
|
|
b4e8355827 | ||
|
|
e0bde1c459 | ||
|
|
27c007ebd3 | ||
|
|
8f3ea09732 | ||
|
|
af626d98d3 | ||
|
|
34b0574e56 | ||
|
|
49d738809b | ||
|
|
2fa3a22eca | ||
|
|
dc5eb6f92e | ||
|
|
ba8902f0b1 | ||
|
|
11e9a77840 | ||
|
|
64cd7e74c5 | ||
|
|
cac65d081e | ||
|
|
e5ada994be | ||
|
|
6ba2459645 | ||
|
|
58f63aad08 | ||
|
|
8a8687a63c | ||
|
|
f7f93c360d | ||
|
|
c160e1f68e | ||
|
|
47c08ab8d2 | ||
|
|
cd83ffbaa2 | ||
|
|
e12981a821 | ||
|
|
47fd1bb894 | ||
|
|
20582b6353 | ||
|
|
c8ff5387c0 | ||
|
|
7cb138d515 | ||
|
|
3dd4c45fab | ||
|
|
549539bec9 | ||
|
|
e449cac464 | ||
|
|
62e458f39b | ||
|
|
b37caaf9e5 | ||
|
|
7660ca89ae | ||
|
|
36b5cccd18 | ||
|
|
9decfa9c31 | ||
|
|
3aee2b6cf5 | ||
|
|
17d797cee4 | ||
|
|
75550eeea3 | ||
|
|
0d09c86c12 | ||
|
|
2db8f482db | ||
|
|
00d4b32a1b | ||
|
|
8a82bab1f3 | ||
|
|
237a25e6b0 | ||
|
|
5dc836671d | ||
|
|
26be1cb602 | ||
|
|
dc7a48cbf9 | ||
|
|
52455be815 | ||
|
|
5c851f2935 | ||
|
|
bbbdcfb625 | ||
|
|
b054a57e16 | ||
|
|
fd73b3ad88 | ||
|
|
8c0637b556 | ||
|
|
914a8204d4 | ||
|
|
d92ffe8fc7 | ||
|
|
e0eb3a4f13 | ||
|
|
1fb0060a73 | ||
|
|
d7430bf516 | ||
|
|
35f039a119 | ||
|
|
79432a40d7 | ||
|
|
98cdb95bc0 | ||
|
|
02a55ce9db | ||
|
|
6f4720e1ea | ||
|
|
6a807b7799 | ||
|
|
8d4ef147d2 | ||
|
|
8ed6217d1c | ||
|
|
7dae4a976d | ||
|
|
3b83949ba3 | ||
|
|
d8baadb991 | ||
|
|
7d3f9fa407 | ||
|
|
705d144a85 | ||
|
|
ff05cff36c | ||
|
|
861fa7b145 | ||
|
|
d65a0bba44 | ||
|
|
dac1bd88dc | ||
|
|
288dbfa37c | ||
|
|
a0e55cb9b1 | ||
|
|
86ba019ca0 | ||
|
|
3cb9c2ece5 | ||
|
|
1787c53d98 | ||
|
|
8ae762a8c8 | ||
|
|
63426c3cd0 | ||
|
|
e184713c67 | ||
|
|
1926625297 | ||
|
|
63bb8e8cef | ||
|
|
583c5b48a0 | ||
|
|
d08ccbce78 | ||
|
|
5a9702771c | ||
| eb91d9905b | |||
| 38cc85fa4c | |||
|
|
77e6ef218c | ||
|
|
464b6f2e93 | ||
|
|
20c90642f9 | ||
|
|
57e67ea8f7 | ||
|
|
c9e9628383 | ||
|
|
909f07939e | ||
|
|
a310493485 | ||
|
|
1e09df20b6 | ||
|
|
087481ac12 | ||
|
|
c941e802d4 | ||
|
|
39589bd441 | ||
|
|
2e57325dde | ||
|
|
2072301d89 | ||
|
|
b236fd3ac6 | ||
|
|
b968695e31 | ||
|
|
694f1d1623 | ||
|
|
93e4d58606 | ||
|
|
cc77caad67 | ||
|
|
f74573f5d0 | ||
|
|
deb6f0babc | ||
|
|
cb978136bd | ||
|
|
1159450cc4 | ||
|
|
a0613e4b10 | ||
|
|
68989f0a45 | ||
|
|
7da5e3697e | ||
|
|
6e7a0eb662 | ||
|
|
b25ac855ca | ||
|
|
3e02dcbb95 | ||
|
|
53be119e39 | ||
|
|
25bdc4c9ed | ||
|
|
9d4055fc4d | ||
|
|
d2edf359ac | ||
|
|
aa1d92dfbb | ||
|
|
b89d71e6e4 | ||
|
|
ed493f9c3a | ||
|
|
76f8a5b7de | ||
|
|
cb3bc207b9 | ||
|
|
b5db5dd0b4 | ||
|
|
90a7cff2c9 | ||
|
|
cc3adbe78c | ||
|
|
bd6a7210b7 | ||
|
|
905a202873 | ||
|
|
accedf0280 | ||
|
|
99d9a2eacd | ||
|
|
ac4f131fa8 | ||
|
|
7f6f7e0e9f | ||
|
|
43bb26f28c | ||
|
|
b29dc37991 | ||
|
|
cf9f02adbb | ||
|
|
6dc0bdbfa3 | ||
|
|
b5a1a18b04 | ||
|
|
b4eeb0ffae | ||
|
|
48549ead7f | ||
|
|
01b0ad0fd9 | ||
|
|
2b21501450 | ||
|
|
b491f6af9b | ||
|
|
942ef7c254 | ||
|
|
1ee3bb42f3 | ||
|
|
25007b1963 | ||
|
|
f442378377 | ||
|
|
333b7ebc0c | ||
|
|
5896766fc3 | ||
|
|
89540aec28 | ||
|
|
b960143045 | ||
|
|
6ab45cf668 | ||
|
|
fd206a7ef6 | ||
|
|
1c7347d38d | ||
|
|
7f58c422f2 | ||
|
|
0a0e2b5e93 | ||
|
|
de00c424f4 | ||
|
|
a249e2028d | ||
|
|
68036eeccf | ||
|
|
cb0b0235f0 | ||
|
|
6ff6f7a28d | ||
|
|
0b628fb22d | ||
|
|
b4bb11320f | ||
|
|
c61938db23 | ||
|
|
acf9d5480c | ||
|
|
a1cb7fd778 | ||
|
|
c24543fea0 | ||
|
|
100e8ab00d | ||
|
|
38497b04ac | ||
|
|
7bd27b920a | ||
|
|
efab11720d | ||
|
|
121f0120f0 | ||
|
|
515b85bb2f | ||
|
|
f27e41d19c | ||
|
|
603d451fc9 | ||
|
|
89adaabb64 | ||
|
|
987ca68ca6 | ||
|
|
71defbf2f9 | ||
|
|
5c35b42844 | ||
|
|
904b37c4be | ||
|
|
4e252f8243 | ||
|
|
dc3e52a900 | ||
|
|
06ad5f6652 | ||
|
|
c3b5474cbf | ||
|
|
69e3b830ed | ||
|
|
96a5891ce7 | ||
|
|
66b9245b28 | ||
|
|
f38ec68695 | ||
|
|
996772a27d | ||
|
|
7f4e9c1ad4 | ||
|
|
218ba69501 | ||
|
|
c2e5dfd933 | ||
|
|
3e40bbc603 | ||
|
|
3498d4b9c5 | ||
|
|
f4b838cad8 | ||
|
|
86fa8634ee | ||
|
|
8882006700 | ||
|
|
40fdf99a55 | ||
|
|
0257736c64 | ||
|
|
2024cda560 | ||
|
|
03aaf4ad76 | ||
|
|
550b88861f | ||
|
|
02ae5fa007 | ||
|
|
d81f105ed7 | ||
|
|
d3ed225675 | ||
|
|
efcca61f5a | ||
|
|
4dad0002cd | ||
|
|
9ffc83f0f6 | ||
|
|
981c7d5974 | ||
|
|
5da089ccd7 | ||
|
|
91e00f7d97 | ||
|
|
3a675fb541 | ||
|
|
9a5d8d2d22 | ||
|
|
de812221ef | ||
|
|
340980bdd0 | ||
|
|
f68a28fa2b | ||
|
|
7b7798e8c4 | ||
|
|
b3ac94115e | ||
|
|
b1a172cad9 | ||
|
|
f2e21c68d0 | ||
|
|
8b784c0eb1 | ||
|
|
bc59f32b96 | ||
|
|
a4fa8a4fae | ||
|
|
f730192c98 | ||
|
|
f994501296 | ||
|
|
9c3e73606c | ||
|
|
5619e16b70 | ||
|
|
d2e3867893 | ||
|
|
979f5475c3 | ||
|
|
5a10f2dd7c | ||
|
|
a80b5b7dd0 | ||
|
|
392967d664 | ||
|
|
d4dd1e37ce | ||
|
|
a8dfa95126 | ||
|
|
3b3c2b7141 | ||
|
|
f55c3c0887 | ||
|
|
f423ad77f3 | ||
|
|
8ba1e1ba9e | ||
|
|
55576084fc | ||
|
|
03311b06c9 | ||
|
|
b5c3d01834 | ||
|
|
f398ecbe39 | ||
|
|
8f1ae0f099 | ||
|
|
c8bee57732 | ||
|
|
85641794c3 | ||
|
|
849decaa59 | ||
|
|
6e88550f92 | ||
|
|
7c52483887 | ||
|
|
0aa520c030 | ||
|
|
548999f163 | ||
|
|
63df547306 | ||
|
|
547d2ca308 | ||
|
|
46b995f9e3 | ||
|
|
4f109c1a94 | ||
|
|
1fdf704cb4 | ||
|
|
5ec9c4c750 | ||
|
|
28cec99699 | ||
|
|
3e194c7906 | ||
|
|
afed94cc0e | ||
|
|
6f48c5ace0 | ||
|
|
9a7e1c2b5a | ||
|
|
2ef7539d55 | ||
|
|
4e52542e33 | ||
|
|
a1895ad924 | ||
|
|
d5a2c96887 | ||
|
|
3f30fe3113 | ||
|
|
d89f24a1a3 | ||
|
|
413354ff29 | ||
|
|
a28ba5bebb | ||
|
|
b93375b671 | ||
|
|
f39005b72d | ||
|
|
b568a33581 | ||
|
|
b05ef8edac | ||
|
|
015f9b663f | ||
|
|
b6167257c9 | ||
|
|
687fe044b2 | ||
|
|
cfa47eb873 | ||
|
|
7079000ee0 | ||
|
|
f60c4f39ee | ||
|
|
473713219f | ||
|
|
03ed81dc3f | ||
|
|
53543ccf26 | ||
|
|
3b183933e3 | ||
|
|
6c6fde8e2e | ||
|
|
61e23b6b81 | ||
|
|
6c649debc9 | ||
|
|
87b0683f77 | ||
|
|
59c1e7a18a | ||
|
|
4f9dad5dd3 | ||
|
|
adc6a0054c | ||
|
|
5425cca47e | ||
|
|
8a70cdb48b | ||
|
|
bb4bc11383 | ||
|
|
a366494c34 | ||
|
|
99de302ec9 | ||
|
|
907912046f | ||
|
|
2c0d379dc5 | ||
|
|
5b8efeb2ba | ||
|
|
f1c93fa337 | ||
|
|
a94a29a6ac | ||
|
|
7e3d736ee1 | ||
|
|
437534556e | ||
|
|
ce4b9c98dc | ||
|
|
c134078d60 | ||
|
|
a8bc6aff2e | ||
|
|
0b627017e0 | ||
|
|
eb3be80286 | ||
|
|
1fda71e4fa | ||
|
|
a02bd4beff | ||
|
|
d7f3ee16aa | ||
|
|
87e3c91c26 | ||
|
|
33a38e6fde | ||
|
|
3d8f45db43 | ||
|
|
40df25dcf0 | ||
|
|
5de151a966 | ||
|
|
115d0681a7 | ||
|
|
1c403a6d60 | ||
|
|
e67ba60863 | ||
|
|
0c0ec7be58 | ||
|
|
a72b3689b0 | ||
|
|
c4c76e0945 | ||
|
|
1a793e0b7e | ||
|
|
d0562ddbd9 | ||
|
|
3851a48ea0 | ||
|
|
40dcf86846 | ||
|
|
257e104d2b | ||
|
|
3f2a9b6973 | ||
|
|
ed365c35e7 | ||
|
|
24ff70759a | ||
|
|
c55c38f77b | ||
|
|
934bc15fae | ||
|
|
c2c994bfbb | ||
|
|
b1c2ffba6e | ||
|
|
b4a56052c5 | ||
|
|
69d15df221 | ||
|
|
e5752755d1 | ||
|
|
d98cfe0fc7 | ||
|
|
1a1955c1c2 | ||
|
|
0303dbc1d2 | ||
|
|
acee742822 | ||
|
|
8d792fbd62 | ||
|
|
d132a51a4d | ||
|
|
2111115a73 | ||
|
|
160c9caee3 | ||
|
|
33de788453 | ||
|
|
f86f5657d9 | ||
|
|
e02a92a0d0 | ||
|
|
5ae9605e77 | ||
|
|
88fbec1e53 | ||
|
|
d098e7b9e6 | ||
|
|
a8930e8060 | ||
|
|
e26501261e | ||
|
|
89bc11ce0f | ||
|
|
4b096962a9 | ||
|
|
c64fdf9aa3 | ||
|
|
9caaaa6498 | ||
|
|
105a7a4c74 | ||
|
|
09782e5b47 | ||
|
|
8d75b570c8 | ||
|
|
21121f9827 | ||
|
|
8e87e76dcf | ||
|
|
2629f3d865 | ||
|
|
8e5cd90707 | ||
|
|
9ffa810054 | ||
|
|
db9562e843 | ||
|
|
3540075b61 | ||
|
|
d0ba061f7a | ||
|
|
871ae5d7d2 | ||
|
|
633ebe5e8d | ||
|
|
1b7cc830ca | ||
|
|
d48193fd0e | ||
|
|
bb69f39976 | ||
|
|
f059db54d0 | ||
|
|
e4e8abb1b9 | ||
|
|
1a207f4d88 | ||
|
|
25d6e0bbd0 | ||
|
|
8e5323023a | ||
|
|
6d9805109a | ||
|
|
1822d56efb | ||
|
|
1e3766e2f1 | ||
|
|
718dcb69be | ||
|
|
372b1c7bbc | ||
|
|
9ba5c13702 | ||
|
|
30e241babe | ||
|
|
956b170674 | ||
|
|
2c52753adb | ||
|
|
095d59c01b | ||
|
|
1a2f145b28 | ||
|
|
930473a980 | ||
|
|
1db8990271 | ||
|
|
025fd03310 | ||
|
|
e468c59dfc | ||
|
|
340ef866d2 | ||
|
|
533bd36572 | ||
|
|
5bf29e6ac1 | ||
|
|
d6c3c58f42 | ||
|
|
b050cb9864 | ||
|
|
e176724775 | ||
|
|
8f9ed9e0df | ||
|
|
003eecf131 | ||
|
|
180b9fc8d2 | ||
|
|
5d3491c801 | ||
|
|
c45684b986 | ||
|
|
5c886d2f4e | ||
|
|
9f39af46aa | ||
|
|
7cda9f063f | ||
|
|
5e7583c5e6 | ||
|
|
a1fb962215 | ||
|
|
57d849a51b | ||
|
|
3000da6b88 | ||
|
|
db75cbbcb0 | ||
|
|
22acbb6b57 | ||
|
|
31cb0f7db1 | ||
|
|
6d17b9f504 | ||
|
|
0f337971ff | ||
|
|
6cf2775e7e | ||
|
|
dabf9104ed | ||
|
|
952ddb18fd | ||
|
|
34d990a800 | ||
|
|
020cb21b35 | ||
|
|
525364ba65 | ||
|
|
731fabef58 | ||
|
|
c10be77a1b | ||
|
|
d8fd023cdb | ||
|
|
db2759b7d1 | ||
|
|
3c3b9575a2 | ||
|
|
987cfd5dae | ||
|
|
1537fb39c0 | ||
|
|
65cbc478b8 | ||
|
|
e2e8fbe313 | ||
|
|
5619175108 | ||
|
|
f295b8cd91 | ||
|
|
2eafd89412 | ||
|
|
a3c5f785e9 | ||
|
|
7877215d59 | ||
|
|
e4347792b8 | ||
|
|
50fde60899 | ||
|
|
38f5e293b0 | ||
|
|
b6b399a590 | ||
|
|
b83841d253 | ||
|
|
3e69304f0f | ||
|
|
fe8131f743 | ||
|
|
9ef14a20d1 | ||
|
|
5897b97065 |
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1 +1,2 @@
|
||||
github: mailcow
|
||||
custom: ["https://www.servercow.de/mailcow?lang=en#sal"]
|
||||
|
||||
10
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
10
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
@@ -62,6 +62,16 @@ body:
|
||||
- nightly
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: "Which architecture are you using?"
|
||||
description: "#### `uname -m`"
|
||||
multiple: false
|
||||
options:
|
||||
- x86
|
||||
- ARM64 (aarch64)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Operating System:"
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/config.yml
vendored
9
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +1,11 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: ❓ Community-driven support
|
||||
url: https://mailcow.github.io/mailcow-dockerized-docs/#get-support
|
||||
- name: ❓ Community-driven support (Free)
|
||||
url: https://docs.mailcow.email/#get-support
|
||||
about: Please use the community forum for questions or assistance
|
||||
- name: 🔥 Premium Support (Paid)
|
||||
url: https://www.servercow.de/mailcow?lang=en#support
|
||||
about: Buy a support subscription for any critical issues and get assisted by the mailcow Team. See conditions!
|
||||
- name: 🚨 Report a security vulnerability
|
||||
url: https://www.servercow.de/anfrage?lang=en
|
||||
url: "mailto:info@servercow.de?subject=mailcow: dockerized Security Vulnerability"
|
||||
about: Please give us appropriate time to verify, respond and fix before disclosure.
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/pr_to_nighty_template.yml
vendored
12
.github/ISSUE_TEMPLATE/pr_to_nighty_template.yml
vendored
@@ -1,13 +1,3 @@
|
||||
## :memo: Brief description
|
||||
<!-- Diff summary - START -->
|
||||
<!-- Diff summary - END -->
|
||||
|
||||
|
||||
## :computer: Commits
|
||||
<!-- Diff commits - START -->
|
||||
<!-- Diff commits - END -->
|
||||
|
||||
|
||||
## :file_folder: Modified files
|
||||
<!-- Diff files - START -->
|
||||
<!-- Diff files - END -->
|
||||
<!-- Diff files - END -->
|
||||
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
<!-- _Please make sure to review and check all of these items, otherwise we might refuse your PR:_ -->
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
* [ ] I've read the [contribution guidelines](https://github.com/mailcow/mailcow-dockerized/blob/master/CONTRIBUTING.md) and wholeheartedly agree them
|
||||
|
||||
<!-- _NOTE: this tickbox is needed to fullfil on order to get your PR reviewed._ -->
|
||||
|
||||
## What does this PR include?
|
||||
|
||||
### Short Description
|
||||
|
||||
<!-- Please write a short description, what your PR does here. -->
|
||||
|
||||
### Affected Containers
|
||||
|
||||
<!-- Please list all affected Docker containers here, which you commited changes to -->
|
||||
|
||||
<!--
|
||||
|
||||
Please list them like this:
|
||||
|
||||
- container1
|
||||
- container2
|
||||
- container3
|
||||
etc.
|
||||
|
||||
-->
|
||||
|
||||
## Did you run tests?
|
||||
|
||||
### What did you tested?
|
||||
|
||||
<!-- Please write shortly, what you've tested (which components etc.). -->
|
||||
|
||||
### What were the final results? (Awaited, got)
|
||||
|
||||
<!-- Please write shortly, what your final tests results were. What did you awaited? Was the outcome the awaited one? -->
|
||||
4
.github/renovate.json
vendored
4
.github/renovate.json
vendored
@@ -12,7 +12,7 @@
|
||||
"baseBranches": ["staging"],
|
||||
"enabledManagers": ["github-actions", "regex", "docker-compose"],
|
||||
"ignorePaths": [
|
||||
"data\/web\/inc\/lib\/vendor\/matthiasmullie\/minify\/**"
|
||||
"data\/web\/inc\/lib\/vendor\/**"
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
@@ -24,7 +24,7 @@
|
||||
{
|
||||
"fileMatch": ["(^|/)Dockerfile[^/]*$"],
|
||||
"matchStrings": [
|
||||
"#\\srenovate:\\sdatasource=(?<datasource>.*?) depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?\\s(ENV|ARG) .*?_VERSION=(?<currentValue>.*)\\s"
|
||||
"#\\srenovate:\\sdatasource=(?<datasource>.*?) depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?( extractVersion=(?<extractVersion>.*?))?\\s(ENV|ARG) .*?_VERSION=(?<currentValue>.*)\\s"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
37
.github/workflows/check_if_support_labeled.yml
vendored
Normal file
37
.github/workflows/check_if_support_labeled.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Check if labeled support, if so send message and close issue
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
jobs:
|
||||
add-comment:
|
||||
if: github.event.label.name == 'support'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Add comment
|
||||
run: gh issue comment "$NUMBER" --body "$BODY"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.SUPPORTISSUES_ACTION_PAT }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
BODY: |
|
||||
**THIS IS A AUTOMATED MESSAGE!**
|
||||
|
||||
It seems your issue is not a bug.
|
||||
Therefore we highly advise you to get support!
|
||||
|
||||
You can get support either by:
|
||||
- ordering a paid [support contract at Servercow](https://www.servercow.de/mailcow?lang=en#support/) (Directly from the developers) or
|
||||
- using the [community forum](https://community.mailcow.email) (**Based on volunteers! NO guaranteed answer**) or
|
||||
- using the [Telegram support channel](https://t.me/mailcow) (**Based on volunteers! NO guaranteed answer**)
|
||||
|
||||
This issue will be closed. If you think your reported issue is not a support case feel free to comment above and if so the issue will reopened.
|
||||
|
||||
- name: Close issue
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.SUPPORTISSUES_ACTION_PAT }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
run: gh issue close "$NUMBER" -r "not planned"
|
||||
@@ -10,7 +10,7 @@ jobs:
|
||||
if: github.event.pull_request.base.ref != 'staging' #check if the target branch is not staging
|
||||
steps:
|
||||
- name: Send message
|
||||
uses: thollander/actions-comment-pull-request@v2.4.0
|
||||
uses: thollander/actions-comment-pull-request@v2.5.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.CHECKIFPRISSTAGING_ACTION_PAT }}
|
||||
message: |
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Mark/Close Stale Issues and Pull Requests 🗑️
|
||||
uses: actions/stale@v8.0.0
|
||||
uses: actions/stale@v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.STALE_ACTION_PAT }}
|
||||
days-before-stale: 60
|
||||
|
||||
2
.github/workflows/image_builds.yml
vendored
2
.github/workflows/image_builds.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
- "watchdog-mailcow"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Docker
|
||||
run: |
|
||||
curl -sSL https://get.docker.com/ | CHANNEL=stable sudo sh
|
||||
|
||||
2
.github/workflows/pr_to_nightly.yml
vendored
2
.github/workflows/pr_to_nightly.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run the Action
|
||||
|
||||
11
.github/workflows/rebuild_backup_image.yml
vendored
11
.github/workflows/rebuild_backup_image.yml
vendored
@@ -11,24 +11,25 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.BACKUPIMAGEBUILD_ACTION_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.BACKUPIMAGEBUILD_ACTION_DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
file: data/Dockerfiles/backup/Dockerfile
|
||||
push: true
|
||||
tags: mailcow/backup:latest
|
||||
|
||||
@@ -15,14 +15,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Generate postscreen_access.cidr
|
||||
run: |
|
||||
bash helper-scripts/update_postscreen_whitelist.sh
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
token: ${{ secrets.mailcow_action_Update_postscreen_access_cidr_pat }}
|
||||
commit-message: update postscreen_access.cidr
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -13,6 +13,7 @@ data/conf/dovecot/acl_anyone
|
||||
data/conf/dovecot/dovecot-master.passwd
|
||||
data/conf/dovecot/dovecot-master.userdb
|
||||
data/conf/dovecot/extra.conf
|
||||
data/conf/dovecot/mail_replica.conf
|
||||
data/conf/dovecot/global_sieve_*
|
||||
data/conf/dovecot/last_login
|
||||
data/conf/dovecot/lua
|
||||
@@ -37,6 +38,7 @@ data/conf/postfix/sni.map
|
||||
data/conf/postfix/sni.map.db
|
||||
data/conf/postfix/sql
|
||||
data/conf/postfix/dns_blocklists.cf
|
||||
data/conf/postfix/dnsbl_reply.map
|
||||
data/conf/rspamd/custom/*
|
||||
data/conf/rspamd/local.d/*
|
||||
data/conf/rspamd/override.d/*
|
||||
|
||||
@@ -1,9 +1,58 @@
|
||||
When a problem occurs, then always for a reason! What you want to do in such a case is:
|
||||
# Contribution Guidelines
|
||||
**_Last modified on 15th August 2024_**
|
||||
|
||||
First of all, thank you for wanting to provide a bugfix or a new feature for the mailcow community, it's because of your help that the project can continue to grow!
|
||||
|
||||
As we want to keep mailcow's development structured we setup these Guidelines which helps you to create your issue/pull request accordingly.
|
||||
|
||||
**PLEASE NOTE, THAT WE MIGHT CLOSE ISSUES/PULL REQUESTS IF THEY DON'T FULLFIL OUR WRITTEN GUIDELINES WRITTEN INSIDE THIS DOCUMENT**. So please check this guidelines before you propose a Issue/Pull Request.
|
||||
|
||||
## Topics
|
||||
|
||||
- [Pull Requests](#pull-requests)
|
||||
- [Issue Reporting](#issue-reporting)
|
||||
- [Guidelines](#issue-reporting-guidelines)
|
||||
- [Issue Report Guide](#issue-report-guide)
|
||||
|
||||
## Pull Requests
|
||||
**_Last modified on 15th August 2024_**
|
||||
|
||||
However, please note the following regarding pull requests:
|
||||
|
||||
1. **ALWAYS** create your PR using the staging branch of your locally cloned mailcow instance, as the pull request will end up in said staging branch of mailcow once approved. Ideally, you should simply create a new branch for your pull request that is named after the type of your PR (e.g. `feat/` for function updates or `fix/` for bug fixes) and the actual content (e.g. `sogo-6.0.0` for an update from SOGo to version 6 or `html-escape` for a fix that includes escaping HTML in mailcow).
|
||||
2. **ALWAYS** report/request issues/features in the english language, even though mailcow is a german based company. This is done to allow other GitHub users to reply to your issues/requests too which did not speak german or other languages besides english.
|
||||
3. Please **keep** this pull request branch **clean** and free of commits that have nothing to do with the changes you have made (e.g. commits from other users from other branches). *If you make changes to the `update.sh` script or other scripts that trigger a commit, there is usually a developer mode for clean working in this case.*
|
||||
4. **Test your changes before you commit them as a pull request.** <ins>If possible</ins>, write a small **test log** or demonstrate the functionality with a **screenshot or GIF**. *We will of course also test your pull request ourselves, but proof from you will save us the question of whether you have tested your own changes yourself.*
|
||||
5. **Please use** the pull request template we provide once creating a pull request. *HINT: During editing you encounter comments which looks like: `<!-- CONTENT -->`. These can be removed or kept, as they will not rendered later on GitHub! Please only create actual content without the said comments.*
|
||||
6. Please **ALWAYS** create the actual pull request against the staging branch and **NEVER** directly against the master branch. *If you forget to do this, our moobot will remind you to switch the branch to staging.*
|
||||
7. Wait for a merge commit: It may happen that we do not accept your pull request immediately or sometimes not at all for various reasons. Please do not be disappointed if this is the case. We always endeavor to incorporate any meaningful changes from the community into the mailcow project.
|
||||
8. If you are planning larger and therefore more complex pull requests, it would be advisable to first announce this in a separate issue and then start implementing it after the idea has been accepted in order to avoid unnecessary frustration and effort!
|
||||
|
||||
---
|
||||
|
||||
## Issue Reporting
|
||||
**_Last modified on 15th August 2024_**
|
||||
|
||||
If you plan to report a issue within mailcow please read and understand the following rules:
|
||||
|
||||
### Issue Reporting Guidelines
|
||||
|
||||
1. **ONLY** use the issue tracker for bug reports or improvement requests and NOT for support questions. For support questions you can either contact the [mailcow community on Telegram](https://docs.mailcow.email/#community-support-and-chat) or the mailcow team directly in exchange for a [support fee](https://docs.mailcow.email/#commercial-support).
|
||||
2. **ONLY** report an error if you have the **necessary know-how (at least the basics)** for the administration of an e-mail server and the usage of Docker. mailcow is a complex and fully-fledged e-mail server including groupware components on a Docker basement and it requires a bit of technical know-how for debugging and operating.
|
||||
3. **ALWAYS** report/request issues/features in the english language, even though mailcow is a german based company. This is done to allow other GitHub users to reply to your issues/requests too which did not speak german or other languages besides english.
|
||||
4. **ONLY** report bugs that are contained in the latest mailcow release series. *The definition of the latest release series includes the last major patch (e.g. 2023-12) and all minor patches (revisions) below it (e.g. 2023-12a, b, c etc.).* New issue reports published starting from January 1, 2024 must meet this criterion, as versions below the latest releases are no longer supported by us.
|
||||
5. When reporting a problem, please be as detailed as possible and include even the smallest changes to your mailcow installation. Simply fill out the corresponding bug report form in detail and accurately to minimize possible questions.
|
||||
6. **Before you open an issue/feature request**, please first check whether a similar request already exists in the mailcow tracker on GitHub. If so, please include yourself in this request.
|
||||
7. When you create a issue/feature request: Please note that the creation does <ins>**not guarantee an instant implementation or fix by the mailcow team or the community**</ins>.
|
||||
8. Please **ALWAYS** anonymize any sensitive information in your bug report or feature request before submitting it.
|
||||
|
||||
### Issue Report Guide
|
||||
1. Read your logs; follow them to see what the reason for your problem is.
|
||||
2. Follow the leads given to you in your logfiles and start investigating.
|
||||
3. Restarting the troubled service or the whole stack to see if the problem persists.
|
||||
4. Read the [documentation](https://mailcow.github.io/mailcow-dockerized-docs/) of the troubled service and search its bugtracker for your problem.
|
||||
4. Read the [documentation](https://docs.mailcow.email/) of the troubled service and search its bugtracker for your problem.
|
||||
5. Search our [issues](https://github.com/mailcow/mailcow-dockerized/issues) for your problem.
|
||||
6. [Create an issue](https://github.com/mailcow/mailcow-dockerized/issues/new/choose) over at our GitHub repository if you think your problem might be a bug or a missing feature you badly need. But please make sure, that you include **all the logs** and a full description to your problem.
|
||||
7. Ask your questions in our community-driven [support channels](https://mailcow.github.io/mailcow-dockerized-docs/#community-support-and-chat).
|
||||
7. Ask your questions in our community-driven [support channels](https://docs.mailcow.email/#community-support-and-chat).
|
||||
|
||||
## When creating an issue/feature request or a pull request, you will be asked to confirm these guidelines.
|
||||
|
||||
10
README.md
10
README.md
@@ -2,6 +2,8 @@
|
||||
|
||||
[](https://translate.mailcow.email/engage/mailcow-dockerized/)
|
||||
[](https://twitter.com/mailcow_email)
|
||||

|
||||
|
||||
|
||||
## Want to support mailcow?
|
||||
|
||||
@@ -13,7 +15,7 @@ Or just spread the word: moo.
|
||||
|
||||
## Info, documentation and support
|
||||
|
||||
Please see [the official documentation](https://mailcow.github.io/mailcow-dockerized-docs/) for installation and support instructions. 🐄
|
||||
Please see [the official documentation](https://docs.mailcow.email/) for installation and support instructions. 🐄
|
||||
|
||||
🐛 **If you found a critical security issue, please mail us to [info at servercow.de](mailto:info@servercow.de).**
|
||||
|
||||
@@ -25,7 +27,9 @@ Please see [the official documentation](https://mailcow.github.io/mailcow-docker
|
||||
|
||||
[Telegram mailcow Off-Topic channel](https://t.me/mailcowOfftopic)
|
||||
|
||||
[Official Twitter Account](https://twitter.com/mailcow_email)
|
||||
[Official 𝕏 (Twitter) Account](https://twitter.com/mailcow_email)
|
||||
|
||||
[Official Mastodon Account](https://mailcow.social/@doncow)
|
||||
|
||||
Telegram desktop clients are available for [multiple platforms](https://desktop.telegram.org). You can search the groups history for keywords.
|
||||
|
||||
@@ -38,4 +42,4 @@ mailcow is a registered word mark of The Infrastructure Company GmbH, Parkstr. 4
|
||||
|
||||
The project is managed and maintained by The Infrastructure Company GmbH.
|
||||
|
||||
Originated from @andryyy (André)
|
||||
Originated from @andryyy (André)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
FROM alpine:3.17
|
||||
FROM alpine:3.20
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
@@ -14,9 +15,7 @@ RUN apk upgrade --no-cache \
|
||||
tini \
|
||||
tzdata \
|
||||
python3 \
|
||||
py3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install acme-tiny
|
||||
acme-tiny --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community/
|
||||
|
||||
COPY acme.sh /srv/acme.sh
|
||||
COPY functions.sh /srv/functions.sh
|
||||
|
||||
@@ -33,6 +33,10 @@ if [[ "${ONLY_MAILCOW_HOSTNAME}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
ONLY_MAILCOW_HOSTNAME=y
|
||||
fi
|
||||
|
||||
if [[ "${AUTODISCOVER_SAN}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
AUTODISCOVER_SAN=y
|
||||
fi
|
||||
|
||||
# Request individual certificate for every domain
|
||||
if [[ "${ENABLE_SSL_SNI}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
ENABLE_SSL_SNI=y
|
||||
@@ -113,13 +117,13 @@ fi
|
||||
chmod 600 ${ACME_BASE}/key.pem
|
||||
|
||||
log_f "Waiting for database..."
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent > /dev/null; do
|
||||
while ! /usr/bin/mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent > /dev/null; do
|
||||
sleep 2
|
||||
done
|
||||
log_f "Database OK"
|
||||
|
||||
log_f "Waiting for Nginx..."
|
||||
until $(curl --output /dev/null --silent --head --fail http://nginx:8081); do
|
||||
until $(curl --output /dev/null --silent --head --fail http://nginx.${COMPOSE_PROJECT_NAME}_mailcow-network:8081); do
|
||||
sleep 2
|
||||
done
|
||||
log_f "Nginx OK"
|
||||
@@ -133,7 +137,7 @@ log_f "Resolver OK"
|
||||
# Waiting for domain table
|
||||
log_f "Waiting for domain table..."
|
||||
while [[ -z ${DOMAIN_TABLE} ]]; do
|
||||
curl --silent http://nginx/ >/dev/null 2>&1
|
||||
curl --silent http://nginx.${COMPOSE_PROJECT_NAME}_mailcow-network/ >/dev/null 2>&1
|
||||
DOMAIN_TABLE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'domain'" -Bs)
|
||||
[[ -z ${DOMAIN_TABLE} ]] && sleep 10
|
||||
done
|
||||
@@ -211,7 +215,11 @@ while true; do
|
||||
ADDITIONAL_SAN_ARR+=($i)
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${AUTODISCOVER_SAN} == "y" ]]; then
|
||||
# Fetch certs for autoconfig and autodiscover subdomains
|
||||
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig')
|
||||
fi
|
||||
|
||||
if [[ ${SKIP_IP_CHECK} != "y" ]]; then
|
||||
# Start IP detection
|
||||
|
||||
@@ -2,32 +2,32 @@
|
||||
|
||||
# Reading container IDs
|
||||
# Wrapping as array to ensure trimmed content when calling $NGINX etc.
|
||||
NGINX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
NGINX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
|
||||
reload_nginx(){
|
||||
echo "Reloading Nginx..."
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${NGINX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Nginx, restarting container..."; restart_container ${NGINX} ; }
|
||||
}
|
||||
|
||||
reload_dovecot(){
|
||||
echo "Reloading Dovecot..."
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${DOVECOT_RELOAD_RET} != 'success' ]] && { echo "Could not reload Dovecot, restarting container..."; restart_container ${DOVECOT} ; }
|
||||
}
|
||||
|
||||
reload_postfix(){
|
||||
echo "Reloading Postfix..."
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${POSTFIX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Postfix, restarting container..."; restart_container ${POSTFIX} ; }
|
||||
}
|
||||
|
||||
restart_container(){
|
||||
for container in $*; do
|
||||
echo "Restarting ${container}..."
|
||||
C_REST_OUT=$(curl -X POST --insecure https://dockerapi/containers/${container}/restart --silent | jq -r '.msg')
|
||||
C_REST_OUT=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${container}/restart --silent | jq -r '.msg')
|
||||
echo "${C_REST_OUT}"
|
||||
done
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
FROM debian:bullseye-slim
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
RUN apt update && apt install pigz
|
||||
@@ -1,12 +1,14 @@
|
||||
FROM clamav/clamav:1.0.1-1_base
|
||||
FROM alpine:3.20
|
||||
|
||||
LABEL maintainer "André Peters <andre.peters@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
rsync \
|
||||
clamav \
|
||||
bind-tools \
|
||||
bash
|
||||
bash \
|
||||
tini
|
||||
|
||||
# init
|
||||
COPY clamd.sh /clamd.sh
|
||||
@@ -14,7 +16,9 @@ RUN chmod +x /sbin/tini
|
||||
|
||||
# healthcheck
|
||||
COPY healthcheck.sh /healthcheck.sh
|
||||
COPY clamdcheck.sh /usr/local/bin
|
||||
RUN chmod +x /healthcheck.sh
|
||||
RUN chmod +x /usr/local/bin/clamdcheck.sh
|
||||
HEALTHCHECK --start-period=6m CMD "/healthcheck.sh"
|
||||
|
||||
ENTRYPOINT []
|
||||
|
||||
14
data/Dockerfiles/clamd/clamdcheck.sh
Normal file
14
data/Dockerfiles/clamd/clamdcheck.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
if [ "${CLAMAV_NO_CLAMD:-}" != "false" ]; then
|
||||
if [ "$(echo "PING" | nc localhost 3310)" != "PONG" ]; then
|
||||
echo "ERROR: Unable to contact server"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Clamd is up"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -1,7 +1,8 @@
|
||||
FROM alpine:3.17
|
||||
FROM alpine:3.20
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --update --no-cache python3 \
|
||||
@@ -9,12 +10,13 @@ RUN apk add --update --no-cache python3 \
|
||||
openssl \
|
||||
tzdata \
|
||||
py3-psutil \
|
||||
py3-redis \
|
||||
py3-async-timeout \
|
||||
&& pip3 install --upgrade pip \
|
||||
fastapi \
|
||||
uvicorn \
|
||||
aiodocker \
|
||||
docker \
|
||||
aioredis
|
||||
docker
|
||||
RUN mkdir /app/modules
|
||||
|
||||
COPY docker-entrypoint.sh /app/
|
||||
@@ -22,4 +24,4 @@ COPY main.py /app/main.py
|
||||
COPY modules/ /app/modules/
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
|
||||
CMD exec python main.py
|
||||
CMD ["python", "main.py"]
|
||||
@@ -5,16 +5,63 @@ import json
|
||||
import uuid
|
||||
import async_timeout
|
||||
import asyncio
|
||||
import aioredis
|
||||
import aiodocker
|
||||
import docker
|
||||
import logging
|
||||
from logging.config import dictConfig
|
||||
from fastapi import FastAPI, Response, Request
|
||||
from modules.DockerApi import DockerApi
|
||||
from redis import asyncio as aioredis
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
dockerapi = None
|
||||
app = FastAPI()
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
global dockerapi
|
||||
|
||||
# Initialize a custom logger
|
||||
logger = logging.getLogger("dockerapi")
|
||||
logger.setLevel(logging.INFO)
|
||||
# Configure the logger to output logs to the terminal
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("%(levelname)s: %(message)s")
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
logger.info("Init APP")
|
||||
|
||||
# Init redis client
|
||||
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0")
|
||||
else:
|
||||
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0")
|
||||
|
||||
# Init docker clients
|
||||
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
||||
async_docker_client = aiodocker.Docker(url='unix:///var/run/docker.sock')
|
||||
|
||||
dockerapi = DockerApi(redis_client, sync_docker_client, async_docker_client, logger)
|
||||
|
||||
logger.info("Subscribe to redis channel")
|
||||
# Subscribe to redis channel
|
||||
dockerapi.pubsub = redis.pubsub()
|
||||
await dockerapi.pubsub.subscribe("MC_CHANNEL")
|
||||
asyncio.create_task(handle_pubsub_messages(dockerapi.pubsub))
|
||||
|
||||
|
||||
yield
|
||||
|
||||
# Close docker connections
|
||||
dockerapi.sync_docker_client.close()
|
||||
await dockerapi.async_docker_client.close()
|
||||
|
||||
# Close redis
|
||||
await dockerapi.pubsub.unsubscribe("MC_CHANNEL")
|
||||
await dockerapi.redis_client.close()
|
||||
|
||||
app = FastAPI(lifespan=lifespan)
|
||||
|
||||
# Define Routes
|
||||
@app.get("/host/stats")
|
||||
@@ -144,53 +191,7 @@ async def post_container_update_stats(container_id : str):
|
||||
|
||||
stats = json.loads(await dockerapi.redis_client.get(container_id + '_stats'))
|
||||
return Response(content=json.dumps(stats, indent=4), media_type="application/json")
|
||||
|
||||
# Events
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
global dockerapi
|
||||
|
||||
# Initialize a custom logger
|
||||
logger = logging.getLogger("dockerapi")
|
||||
logger.setLevel(logging.INFO)
|
||||
# Configure the logger to output logs to the terminal
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("%(levelname)s: %(message)s")
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
logger.info("Init APP")
|
||||
|
||||
# Init redis client
|
||||
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0")
|
||||
else:
|
||||
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0")
|
||||
|
||||
# Init docker clients
|
||||
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
||||
async_docker_client = aiodocker.Docker(url='unix:///var/run/docker.sock')
|
||||
|
||||
dockerapi = DockerApi(redis_client, sync_docker_client, async_docker_client, logger)
|
||||
|
||||
logger.info("Subscribe to redis channel")
|
||||
# Subscribe to redis channel
|
||||
dockerapi.pubsub = redis.pubsub()
|
||||
await dockerapi.pubsub.subscribe("MC_CHANNEL")
|
||||
asyncio.create_task(handle_pubsub_messages(dockerapi.pubsub))
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown_event():
|
||||
global dockerapi
|
||||
|
||||
# Close docker connections
|
||||
dockerapi.sync_docker_client.close()
|
||||
await dockerapi.async_docker_client.close()
|
||||
|
||||
# Close redis
|
||||
await dockerapi.pubsub.unsubscribe("MC_CHANNEL")
|
||||
await dockerapi.redis_client.close()
|
||||
|
||||
|
||||
# PubSub Handler
|
||||
async def handle_pubsub_messages(channel: aioredis.client.PubSub):
|
||||
@@ -198,8 +199,8 @@ async def handle_pubsub_messages(channel: aioredis.client.PubSub):
|
||||
|
||||
while True:
|
||||
try:
|
||||
async with async_timeout.timeout(1):
|
||||
message = await channel.get_message(ignore_subscribe_messages=True)
|
||||
async with async_timeout.timeout(60):
|
||||
message = await channel.get_message(ignore_subscribe_messages=True, timeout=30)
|
||||
if message is not None:
|
||||
# Parse message
|
||||
data_json = json.loads(message['data'].decode('utf-8'))
|
||||
@@ -244,7 +245,7 @@ async def handle_pubsub_messages(channel: aioredis.client.PubSub):
|
||||
else:
|
||||
dockerapi.logger.error("Unknwon PubSub recieved - %s" % json.dumps(data_json))
|
||||
|
||||
await asyncio.sleep(0.01)
|
||||
await asyncio.sleep(0.0)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
|
||||
|
||||
@@ -358,8 +358,8 @@ class DockerApi:
|
||||
for line in cmd_response.split("\n"):
|
||||
if '$2$' in line:
|
||||
hash = line.strip()
|
||||
hash_out = re.search('\$2\$.+$', hash).group(0)
|
||||
rspamd_passphrase_hash = re.sub('[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
|
||||
hash_out = re.search(r'\$2\$.+$', hash).group(0)
|
||||
rspamd_passphrase_hash = re.sub(r'[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
|
||||
rspamd_password_filename = "/etc/rspamd/override.d/worker-controller-password.inc"
|
||||
cmd = '''/bin/echo 'enable_password = "%s";' > %s && cat %s''' % (rspamd_passphrase_hash, rspamd_password_filename, rspamd_password_filename)
|
||||
cmd_response = self.exec_cmd_container(container, cmd, user="_rspamd")
|
||||
|
||||
@@ -1,119 +1,116 @@
|
||||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.20
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
# renovate: datasource=github-tags depName=dovecot/core versioning=semver-coerced
|
||||
ARG DOVECOT=2.3.20
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.16
|
||||
ENV LC_ALL C
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV LC_ALL=C.UTF-8
|
||||
|
||||
# Add groups and users before installing Dovecot to not break compatibility
|
||||
RUN groupadd -g 5000 vmail \
|
||||
&& groupadd -g 401 dovecot \
|
||||
&& groupadd -g 402 dovenull \
|
||||
&& groupadd -g 999 sogo \
|
||||
&& usermod -a -G sogo nobody \
|
||||
&& useradd -g vmail -u 5000 vmail -d /var/vmail \
|
||||
&& useradd -c "Dovecot unprivileged user" -d /dev/null -u 401 -g dovecot -s /bin/false dovecot \
|
||||
&& useradd -c "Dovecot login user" -d /dev/null -u 402 -g dovenull -s /bin/false dovenull \
|
||||
&& touch /etc/default/locale \
|
||||
&& apt-get update \
|
||||
&& apt-get -y --no-install-recommends install \
|
||||
build-essential \
|
||||
apt-transport-https \
|
||||
RUN addgroup -g 5000 vmail \
|
||||
&& addgroup -g 401 dovecot \
|
||||
&& addgroup -g 402 dovenull \
|
||||
&& sed -i "s/999/99/" /etc/group \
|
||||
&& addgroup -g 999 sogo \
|
||||
&& addgroup nobody sogo \
|
||||
&& adduser -D -u 5000 -G vmail -h /var/vmail vmail \
|
||||
&& adduser -D -G dovecot -u 401 -h /dev/null -s /sbin/nologin dovecot \
|
||||
&& adduser -D -G dovenull -u 402 -h /dev/null -s /sbin/nologin dovenull \
|
||||
&& apk add --no-cache --update \
|
||||
bash \
|
||||
bind-tools \
|
||||
findutils \
|
||||
envsubst \
|
||||
ca-certificates \
|
||||
cpanminus \
|
||||
curl \
|
||||
dnsutils \
|
||||
dirmngr \
|
||||
gettext \
|
||||
gnupg2 \
|
||||
coreutils \
|
||||
jq \
|
||||
libauthen-ntlm-perl \
|
||||
libcgi-pm-perl \
|
||||
libcrypt-openssl-rsa-perl \
|
||||
libcrypt-ssleay-perl \
|
||||
libdata-uniqid-perl \
|
||||
libdbd-mysql-perl \
|
||||
libdbi-perl \
|
||||
libdigest-hmac-perl \
|
||||
libdist-checkconflicts-perl \
|
||||
libencode-imaputf7-perl \
|
||||
libfile-copy-recursive-perl \
|
||||
libfile-tail-perl \
|
||||
libhtml-parser-perl \
|
||||
libio-compress-perl \
|
||||
libio-socket-inet6-perl \
|
||||
libio-socket-ssl-perl \
|
||||
libio-tee-perl \
|
||||
libipc-run-perl \
|
||||
libjson-webtoken-perl \
|
||||
liblockfile-simple-perl \
|
||||
libmail-imapclient-perl \
|
||||
libmodule-implementation-perl \
|
||||
libmodule-scandeps-perl \
|
||||
libnet-ssleay-perl \
|
||||
libpackage-stash-perl \
|
||||
libpackage-stash-xs-perl \
|
||||
libpar-packer-perl \
|
||||
libparse-recdescent-perl \
|
||||
libproc-processtable-perl \
|
||||
libreadonly-perl \
|
||||
libregexp-common-perl \
|
||||
libssl-dev \
|
||||
libsys-meminfo-perl \
|
||||
libterm-readkey-perl \
|
||||
libtest-deep-perl \
|
||||
libtest-fatal-perl \
|
||||
libtest-mock-guard-perl \
|
||||
libtest-mockobject-perl \
|
||||
libtest-nowarnings-perl \
|
||||
libtest-pod-perl \
|
||||
libtest-requires-perl \
|
||||
libtest-simple-perl \
|
||||
libtest-warn-perl \
|
||||
libtry-tiny-perl \
|
||||
libunicode-string-perl \
|
||||
liburi-perl \
|
||||
libwww-perl \
|
||||
lua-sql-mysql \
|
||||
lua \
|
||||
lua-cjson \
|
||||
lua-socket \
|
||||
lua-sql-mysql \
|
||||
lua5.3-sql-mysql \
|
||||
icu-data-full \
|
||||
mariadb-connector-c \
|
||||
gcompat \
|
||||
mariadb-client \
|
||||
perl \
|
||||
perl-ntlm \
|
||||
perl-cgi \
|
||||
perl-crypt-openssl-rsa \
|
||||
perl-utils \
|
||||
perl-crypt-ssleay \
|
||||
perl-data-uniqid \
|
||||
perl-dbd-mysql \
|
||||
perl-dbi \
|
||||
perl-digest-hmac \
|
||||
perl-dist-checkconflicts \
|
||||
perl-encode-imaputf7 \
|
||||
perl-file-copy-recursive \
|
||||
perl-file-tail \
|
||||
perl-io-socket-inet6 \
|
||||
perl-io-gzip \
|
||||
perl-io-socket-ssl \
|
||||
perl-io-tee \
|
||||
perl-ipc-run \
|
||||
perl-json-webtoken \
|
||||
perl-mail-imapclient \
|
||||
perl-module-implementation \
|
||||
perl-module-scandeps \
|
||||
perl-net-ssleay \
|
||||
perl-package-stash \
|
||||
perl-package-stash-xs \
|
||||
perl-par-packer \
|
||||
perl-parse-recdescent \
|
||||
perl-lockfile-simple \
|
||||
libproc \
|
||||
perl-readonly \
|
||||
perl-regexp-common \
|
||||
perl-sys-meminfo \
|
||||
perl-term-readkey \
|
||||
perl-test-deep \
|
||||
perl-test-fatal \
|
||||
perl-test-mockobject \
|
||||
perl-test-mock-guard \
|
||||
perl-test-pod \
|
||||
perl-test-requires \
|
||||
perl-test-simple \
|
||||
perl-test-warn \
|
||||
perl-try-tiny \
|
||||
perl-unicode-string \
|
||||
perl-proc-processtable \
|
||||
perl-app-cpanminus \
|
||||
procps \
|
||||
python3-pip \
|
||||
redis-server \
|
||||
supervisor \
|
||||
python3 \
|
||||
py3-mysqlclient \
|
||||
py3-html2text \
|
||||
py3-jinja2 \
|
||||
py3-redis \
|
||||
redis \
|
||||
syslog-ng \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
syslog-ng-redis \
|
||||
syslog-ng-json \
|
||||
supervisor \
|
||||
tzdata \
|
||||
wget \
|
||||
&& dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true \
|
||||
&& apt-key adv --fetch-keys https://repo.dovecot.org/DOVECOT-REPO-GPG \
|
||||
&& echo "deb https://repo.dovecot.org/ce-${DOVECOT}/debian/bullseye bullseye main" > /etc/apt/sources.list.d/dovecot.list \
|
||||
&& apt-get update \
|
||||
&& apt-get -y --no-install-recommends install \
|
||||
dovecot-lua \
|
||||
dovecot-managesieved \
|
||||
dovecot-sieve \
|
||||
dovecot \
|
||||
dovecot-dev \
|
||||
dovecot-lmtpd \
|
||||
dovecot-lua \
|
||||
dovecot-ldap \
|
||||
dovecot-mysql \
|
||||
dovecot-core \
|
||||
dovecot-sql \
|
||||
dovecot-submissiond \
|
||||
dovecot-pigeonhole-plugin \
|
||||
dovecot-pop3d \
|
||||
dovecot-imapd \
|
||||
dovecot-solr \
|
||||
&& pip3 install mysql-connector-python html2text jinja2 redis \
|
||||
&& apt-get autoremove --purge -y \
|
||||
&& apt-get autoclean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rm -rf /tmp/* /var/tmp/* /root/.cache/
|
||||
# imapsync dependencies
|
||||
RUN cpan Crypt::OpenSSL::PKCS12
|
||||
dovecot-fts-solr \
|
||||
dovecot-fts-flatcurve \
|
||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$arch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true
|
||||
|
||||
COPY trim_logs.sh /usr/local/bin/trim_logs.sh
|
||||
COPY clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
||||
@@ -133,6 +130,7 @@ COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
||||
COPY quota_notify.py /usr/local/bin/quota_notify.py
|
||||
COPY repl_health.sh /usr/local/bin/repl_health.sh
|
||||
COPY optimize-fts.sh /usr/local/bin/optimize-fts.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
set -e
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -29,6 +29,7 @@ ${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
||||
# Create missing directories
|
||||
[[ ! -d /etc/dovecot/sql/ ]] && mkdir -p /etc/dovecot/sql/
|
||||
[[ ! -d /etc/dovecot/lua/ ]] && mkdir -p /etc/dovecot/lua/
|
||||
[[ ! -d /etc/dovecot/conf.d/ ]] && mkdir -p /etc/dovecot/conf.d/
|
||||
[[ ! -d /var/vmail/_garbage ]] && mkdir -p /var/vmail/_garbage
|
||||
[[ ! -d /var/vmail/sieve ]] && mkdir -p /var/vmail/sieve
|
||||
[[ ! -d /etc/sogo ]] && mkdir -p /etc/sogo
|
||||
@@ -109,7 +110,14 @@ EOF
|
||||
|
||||
echo -n ${ACL_ANYONE} > /etc/dovecot/acl_anyone
|
||||
|
||||
if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
if [[ "${FLATCURVE_EXPERIMENTAL}" =~ ^([yY][eE][sS]|[yY]) ]]; then
|
||||
echo -e "\e[33mActivating Flatcurve as FTS Backend...\e[0m"
|
||||
echo -e "\e[33mDepending on your previous setup a full reindex might be needed... \e[0m"
|
||||
echo -e "\e[34mVisit https://docs.mailcow.email/manual-guides/Dovecot/u_e-dovecot-fts/#fts-related-dovecot-commands to learn how to reindex\e[0m"
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify fts fts_flatcurve listescape replication' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify mail_log fts fts_flatcurve listescape replication' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl fts fts_flatcurve notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
elif [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify listescape replication' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify listescape replication mail_log' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
@@ -239,6 +247,51 @@ function script_deinit()
|
||||
end
|
||||
EOF
|
||||
|
||||
# Temporarily set FTS depending on user choice inside mailcow.conf. Will be removed as soon as Solr is dropped
|
||||
if [[ "${FLATCURVE_EXPERIMENTAL}" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
||||
cat <<EOF > /etc/dovecot/conf.d/fts.conf
|
||||
# Autogenerated by mailcow
|
||||
plugin {
|
||||
fts_autoindex = yes
|
||||
fts_autoindex_exclude = \Junk
|
||||
fts_autoindex_exclude2 = \Trash
|
||||
fts = flatcurve
|
||||
|
||||
# Maximum term length can be set via the 'maxlen' argument (maxlen is
|
||||
# specified in bytes, not number of UTF-8 characters)
|
||||
fts_tokenizer_email_address = maxlen=100
|
||||
fts_tokenizer_generic = algorithm=simple maxlen=30
|
||||
|
||||
# These are not flatcurve settings, but required for Dovecot FTS. See
|
||||
# Dovecot FTS Configuration link above for further information.
|
||||
fts_languages = en es de
|
||||
fts_tokenizers = generic email-address
|
||||
|
||||
# OPTIONAL: Recommended default FTS core configuration
|
||||
fts_filters = normalizer-icu snowball stopwords
|
||||
fts_filters_en = lowercase snowball english-possessive stopwords
|
||||
}
|
||||
EOF
|
||||
elif [[ ! "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
||||
cat <<EOF > /etc/dovecot/conf.d/fts.conf
|
||||
# Autogenerated by mailcow
|
||||
plugin {
|
||||
fts = solr
|
||||
fts_autoindex = yes
|
||||
fts_autoindex_exclude = \Junk
|
||||
fts_autoindex_exclude2 = \Trash
|
||||
fts_solr = url=http://solr:8983/solr/dovecot-fts/
|
||||
|
||||
fts_tokenizers = generic email-address
|
||||
fts_tokenizer_generic = algorithm=simple
|
||||
|
||||
fts_filters = normalizer-icu snowball stopwords
|
||||
fts_filters_en = lowercase snowball english-possessive stopwords
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
|
||||
|
||||
# Replace patterns in app-passdb.lua
|
||||
sed -i "s/__DBUSER__/${DBUSER}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
sed -i "s/__DBPASS__/${DBPASS}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
@@ -335,6 +388,14 @@ sys.exit()
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Set mail_replica for HA setups
|
||||
if [[ -n ${MAILCOW_REPLICA_IP} && -n ${DOVEADM_REPLICA_PORT} ]]; then
|
||||
cat <<EOF > /etc/dovecot/mail_replica.conf
|
||||
# Autogenerated by mailcow
|
||||
mail_replica = tcp:${MAILCOW_REPLICA_IP}:${DOVEADM_REPLICA_PORT}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# 401 is user dovecot
|
||||
if [[ ! -s /mail_crypt/ecprivkey.pem || ! -s /mail_crypt/ecpubkey.pem ]]; then
|
||||
openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem
|
||||
@@ -350,14 +411,6 @@ sievec /var/vmail/sieve/global_sieve_after.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
|
||||
for file in /var/vmail/*/*/sieve/*.sieve ; do
|
||||
if [[ "$file" == "/var/vmail/*/*/sieve/*.sieve" ]]; then
|
||||
continue
|
||||
fi
|
||||
sievec "$file" "$(dirname "$file")/../.dovecot.svbin"
|
||||
chown vmail:vmail "$(dirname "$file")/../.dovecot.svbin"
|
||||
done
|
||||
|
||||
# Fix permissions
|
||||
chown root:root /etc/dovecot/sql/*.conf
|
||||
chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/lua/passwd-verify.lua
|
||||
@@ -378,7 +431,8 @@ chmod +x /usr/lib/dovecot/sieve/rspamd-pipe-ham \
|
||||
/usr/local/bin/maildir_gc.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh \
|
||||
/usr/local/bin/quota_notify.py \
|
||||
/usr/local/bin/repl_health.sh
|
||||
/usr/local/bin/repl_health.sh \
|
||||
/usr/local/bin/optimize-fts.sh
|
||||
|
||||
# Prepare environment file for cronjobs
|
||||
printenv | sed 's/^\(.*\)$/export \1/g' > /source_env.sh
|
||||
@@ -432,4 +486,8 @@ done
|
||||
# May be related to something inside Docker, I seriously don't know
|
||||
touch /etc/dovecot/lua/passwd-verify.lua
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
||||
@@ -75,7 +75,8 @@ my $sth = $dbh->prepare("SELECT id,
|
||||
custom_params,
|
||||
subscribeall,
|
||||
timeout1,
|
||||
timeout2
|
||||
timeout2,
|
||||
dry
|
||||
FROM imapsync
|
||||
WHERE active = 1
|
||||
AND is_running = 0
|
||||
@@ -111,13 +112,16 @@ while ($row = $sth->fetchrow_arrayref()) {
|
||||
$subscribeall = @$row[18];
|
||||
$timeout1 = @$row[19];
|
||||
$timeout2 = @$row[20];
|
||||
$dry = @$row[21];
|
||||
|
||||
if ($enc1 eq "TLS") { $enc1 = "--tls1"; } elsif ($enc1 eq "SSL") { $enc1 = "--ssl1"; } else { undef $enc1; }
|
||||
|
||||
my $template = $run_dir . '/imapsync.XXXXXXX';
|
||||
my $passfile1 = File::Temp->new(TEMPLATE => $template);
|
||||
my $passfile2 = File::Temp->new(TEMPLATE => $template);
|
||||
|
||||
|
||||
binmode( $passfile1, ":utf8" );
|
||||
|
||||
print $passfile1 "$password1\n";
|
||||
print $passfile2 trim($master_pass) . "\n";
|
||||
|
||||
@@ -148,6 +152,7 @@ while ($row = $sth->fetchrow_arrayref()) {
|
||||
"--host2", "localhost",
|
||||
"--user2", $user2 . '*' . trim($master_user),
|
||||
"--passfile2", $passfile2->filename,
|
||||
($dry eq "1" ? ('--dry') : ()),
|
||||
'--no-modulesversion',
|
||||
'--noreleasecheck'];
|
||||
|
||||
|
||||
7
data/Dockerfiles/dovecot/optimize-fts.sh
Normal file
7
data/Dockerfiles/dovecot/optimize-fts.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ && ! "${FLATCURVE_EXPERIMENTAL}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
exit 0
|
||||
else
|
||||
doveadm fts optimize -A
|
||||
fi
|
||||
@@ -3,11 +3,10 @@
|
||||
import smtplib
|
||||
import os
|
||||
import sys
|
||||
import mysql.connector
|
||||
import MySQLdb
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from email.utils import COMMASPACE, formatdate
|
||||
import cgi
|
||||
import jinja2
|
||||
from jinja2 import Template
|
||||
import json
|
||||
@@ -50,7 +49,7 @@ try:
|
||||
def query_mysql(query, headers = True, update = False):
|
||||
while True:
|
||||
try:
|
||||
cnx = mysql.connector.connect(unix_socket = '/var/run/mysqld/mysqld.sock', user=os.environ.get('DBUSER'), passwd=os.environ.get('DBPASS'), database=os.environ.get('DBNAME'), charset="utf8mb4", collation="utf8mb4_general_ci")
|
||||
cnx = MySQLdb.connect(user=os.environ.get('DBUSER'), password=os.environ.get('DBPASS'), database=os.environ.get('DBNAME'), charset="utf8mb4", collation="utf8mb4_general_ci")
|
||||
except Exception as ex:
|
||||
print('%s - trying again...' % (ex))
|
||||
time.sleep(3)
|
||||
|
||||
@@ -55,7 +55,7 @@ try:
|
||||
msg.attach(text_part)
|
||||
msg.attach(html_part)
|
||||
msg['To'] = username
|
||||
p = Popen(['/usr/lib/dovecot/dovecot-lda', '-d', username, '-o', '"plugin/quota=maildir:User quota:noenforcing"'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
|
||||
p = Popen(['/usr/libexec/dovecot/dovecot-lda', '-d', username, '-o', '"plugin/quota=maildir:User quota:noenforcing"'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
|
||||
p.communicate(input=bytes(msg.as_string(), 'utf-8'))
|
||||
|
||||
domain = username.split("@")[-1]
|
||||
|
||||
@@ -11,7 +11,7 @@ fi
|
||||
|
||||
# Is replication active?
|
||||
# grep on file is less expensive than doveconf
|
||||
if ! grep -qi mail_replica /etc/dovecot/dovecot.conf; then
|
||||
if [ -n ${MAILCOW_REPLICA_IP} ]; then
|
||||
${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
||||
exit
|
||||
fi
|
||||
|
||||
@@ -3,8 +3,8 @@ FILE=/tmp/mail$$
|
||||
cat > $FILE
|
||||
trap "/bin/rm -f $FILE" 0 1 2 3 13 15
|
||||
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnham
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/learnham
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzyadd
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -3,8 +3,8 @@ FILE=/tmp/mail$$
|
||||
cat > $FILE
|
||||
trap "/bin/rm -f $FILE" 0 1 2 3 13 15
|
||||
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnspam
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/learnspam
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzyadd
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -11,21 +11,25 @@ else
|
||||
fi
|
||||
|
||||
# Deploy
|
||||
curl --connect-timeout 15 --retry 10 --max-time 30 http://www.spamassassin.heinlein-support.de/$(dig txt 1.4.3.spamassassin.heinlein-support.de +short | tr -d '"' | tr -dc '0-9').tar.gz --output /tmp/sa-rules-heinlein.tar.gz
|
||||
if gzip -t /tmp/sa-rules-heinlein.tar.gz; then
|
||||
tar xfvz /tmp/sa-rules-heinlein.tar.gz -C /tmp/sa-rules-heinlein
|
||||
cat /tmp/sa-rules-heinlein/*cf > /etc/rspamd/custom/sa-rules
|
||||
if curl --connect-timeout 15 --retry 10 --max-time 30 https://www.spamassassin.heinlein-support.de/$(dig txt 1.4.3.spamassassin.heinlein-support.de +short | tr -d '"' | tr -dc '0-9').tar.gz --output /tmp/sa-rules-heinlein.tar.gz; then
|
||||
if gzip -t /tmp/sa-rules-heinlein.tar.gz; then
|
||||
tar xfvz /tmp/sa-rules-heinlein.tar.gz -C /tmp/sa-rules-heinlein
|
||||
cat /tmp/sa-rules-heinlein/*cf > /etc/rspamd/custom/sa-rules
|
||||
fi
|
||||
else
|
||||
echo "Failed to download SA rules. Exiting."
|
||||
exit 0 # Must be 0 otherwise dovecot would not start at all
|
||||
fi
|
||||
|
||||
sed -i -e 's/\([^\\]\)\$\([^\/]\)/\1\\$\2/g' /etc/rspamd/custom/sa-rules
|
||||
|
||||
if [[ "$(cat /etc/rspamd/custom/sa-rules | md5sum | cut -d' ' -f1)" != "${HASH_SA_RULES}" ]]; then
|
||||
CONTAINER_NAME=rspamd-mailcow
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | \
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | \
|
||||
jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | \
|
||||
jq -rc "select( .name | tostring | contains(\"${CONTAINER_NAME}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi/containers/${CONTAINER_ID}/restart
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -13,6 +13,10 @@ autostart=true
|
||||
|
||||
[program:dovecot]
|
||||
command=/usr/sbin/dovecot -F
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[eventlistener:processes]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 4.5
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
@@ -6,11 +6,12 @@ options {
|
||||
use_dns(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
source s_dgram {
|
||||
unix-dgram("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
@@ -36,7 +37,7 @@ filter f_replica {
|
||||
not match("Error: sync: Unknown user in remote" value("MESSAGE"));
|
||||
};
|
||||
log {
|
||||
source(s_src);
|
||||
source(s_dgram);
|
||||
filter(f_replica);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 4.5
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
@@ -6,11 +6,12 @@ options {
|
||||
use_dns(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
source s_dgram {
|
||||
unix-dgram("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
@@ -36,7 +37,7 @@ filter f_replica {
|
||||
not match("Error: sync: Unknown user in remote" value("MESSAGE"));
|
||||
};
|
||||
log {
|
||||
source(s_src);
|
||||
source(s_dgram);
|
||||
filter(f_replica);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
FROM alpine:3.17
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.20
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
ENV XTABLES_LIBDIR /usr/lib/xtables
|
||||
ENV PYTHON_IPTABLES_XTABLES_VERSION 12
|
||||
ENV IPTABLES_LIBDIR /usr/lib
|
||||
@@ -12,12 +16,16 @@ RUN apk add --virtual .build-deps \
|
||||
openssl-dev \
|
||||
&& apk add -U python3 \
|
||||
iptables \
|
||||
iptables-dev \
|
||||
ip6tables \
|
||||
xtables-addons \
|
||||
nftables \
|
||||
tzdata \
|
||||
py3-pip \
|
||||
py3-nftables \
|
||||
musl-dev \
|
||||
&& pip3 install --ignore-installed --upgrade pip \
|
||||
jsonschema \
|
||||
python-iptables \
|
||||
redis \
|
||||
ipaddress \
|
||||
@@ -26,5 +34,10 @@ RUN apk add --virtual .build-deps \
|
||||
|
||||
# && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \
|
||||
|
||||
COPY server.py /
|
||||
CMD ["python3", "-u", "/server.py"]
|
||||
COPY modules /app/modules
|
||||
COPY main.py /app/
|
||||
COPY ./docker-entrypoint.sh /app/
|
||||
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
CMD ["/bin/sh", "-c", "/app/docker-entrypoint.sh"]
|
||||
29
data/Dockerfiles/netfilter/docker-entrypoint.sh
Executable file
29
data/Dockerfiles/netfilter/docker-entrypoint.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
|
||||
backend=iptables
|
||||
|
||||
nft list table ip filter &>/dev/null
|
||||
nftables_found=$?
|
||||
|
||||
iptables -L -n &>/dev/null
|
||||
iptables_found=$?
|
||||
|
||||
if [ $nftables_found -lt $iptables_found ]; then
|
||||
backend=nftables
|
||||
fi
|
||||
|
||||
if [ $nftables_found -gt $iptables_found ]; then
|
||||
backend=iptables
|
||||
fi
|
||||
|
||||
if [ $nftables_found -eq 0 ] && [ $nftables_found -eq $iptables_found ]; then
|
||||
nftables_lines=$(nft list ruleset | wc -l)
|
||||
iptables_lines=$(iptables-save | wc -l)
|
||||
if [ $nftables_lines -gt $iptables_lines ]; then
|
||||
backend=nftables
|
||||
else
|
||||
backend=iptables
|
||||
fi
|
||||
fi
|
||||
|
||||
exec python -u /app/main.py $backend
|
||||
503
data/Dockerfiles/netfilter/main.py
Normal file
503
data/Dockerfiles/netfilter/main.py
Normal file
@@ -0,0 +1,503 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import atexit
|
||||
import signal
|
||||
import ipaddress
|
||||
from collections import Counter
|
||||
from random import randint
|
||||
from threading import Thread
|
||||
from threading import Lock
|
||||
import redis
|
||||
import json
|
||||
import dns.resolver
|
||||
import dns.exception
|
||||
import uuid
|
||||
from modules.Logger import Logger
|
||||
from modules.IPTables import IPTables
|
||||
from modules.NFTables import NFTables
|
||||
|
||||
|
||||
# globals
|
||||
WHITELIST = []
|
||||
BLACKLIST= []
|
||||
bans = {}
|
||||
quit_now = False
|
||||
exit_code = 0
|
||||
lock = Lock()
|
||||
chain_name = "MAILCOW"
|
||||
r = None
|
||||
pubsub = None
|
||||
clear_before_quit = False
|
||||
|
||||
|
||||
def refreshF2boptions():
|
||||
global f2boptions
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
f2boptions = {}
|
||||
|
||||
if not r.get('F2B_OPTIONS'):
|
||||
f2boptions['ban_time'] = r.get('F2B_BAN_TIME')
|
||||
f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME')
|
||||
f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT')
|
||||
f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS')
|
||||
f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW')
|
||||
f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4')
|
||||
f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6')
|
||||
else:
|
||||
try:
|
||||
f2boptions = json.loads(r.get('F2B_OPTIONS'))
|
||||
except ValueError:
|
||||
logger.logCrit('Error loading F2B options: F2B_OPTIONS is not json')
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
verifyF2boptions(f2boptions)
|
||||
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
|
||||
|
||||
def verifyF2boptions(f2boptions):
|
||||
verifyF2boption(f2boptions,'ban_time', 1800)
|
||||
verifyF2boption(f2boptions,'max_ban_time', 10000)
|
||||
verifyF2boption(f2boptions,'ban_time_increment', True)
|
||||
verifyF2boption(f2boptions,'max_attempts', 10)
|
||||
verifyF2boption(f2boptions,'retry_window', 600)
|
||||
verifyF2boption(f2boptions,'netban_ipv4', 32)
|
||||
verifyF2boption(f2boptions,'netban_ipv6', 128)
|
||||
verifyF2boption(f2boptions,'banlist_id', str(uuid.uuid4()))
|
||||
verifyF2boption(f2boptions,'manage_external', 0)
|
||||
|
||||
def verifyF2boption(f2boptions, f2boption, f2bdefault):
|
||||
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
|
||||
|
||||
def refreshF2bregex():
|
||||
global f2bregex
|
||||
global quit_now
|
||||
global exit_code
|
||||
if not r.get('F2B_REGEX'):
|
||||
f2bregex = {}
|
||||
f2bregex[1] = r'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
|
||||
f2bregex[2] = r'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
|
||||
f2bregex[3] = r'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
|
||||
f2bregex[4] = r'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
|
||||
f2bregex[5] = r'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
|
||||
f2bregex[6] = r'-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
|
||||
f2bregex[7] = r'-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[8] = r'-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[9] = r'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
|
||||
f2bregex[10] = r'([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
|
||||
r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
|
||||
else:
|
||||
try:
|
||||
f2bregex = {}
|
||||
f2bregex = json.loads(r.get('F2B_REGEX'))
|
||||
except ValueError:
|
||||
logger.logCrit('Error loading F2B options: F2B_REGEX is not json')
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def get_ip(address):
|
||||
ip = ipaddress.ip_address(address)
|
||||
if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
|
||||
ip = ip.ipv4_mapped
|
||||
if ip.is_private or ip.is_loopback:
|
||||
return False
|
||||
|
||||
return ip
|
||||
|
||||
def ban(address):
|
||||
global f2boptions
|
||||
global lock
|
||||
|
||||
refreshF2boptions()
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
RETRY_WINDOW = int(f2boptions['retry_window'])
|
||||
NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
|
||||
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
|
||||
|
||||
ip = get_ip(address)
|
||||
if not ip: return
|
||||
address = str(ip)
|
||||
self_network = ipaddress.ip_network(address)
|
||||
|
||||
with lock:
|
||||
temp_whitelist = set(WHITELIST)
|
||||
if temp_whitelist:
|
||||
for wl_key in temp_whitelist:
|
||||
wl_net = ipaddress.ip_network(wl_key, False)
|
||||
if wl_net.overlaps(self_network):
|
||||
logger.logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
|
||||
return
|
||||
|
||||
net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
||||
net = str(net)
|
||||
|
||||
if not net in bans:
|
||||
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
|
||||
|
||||
current_attempt = time.time()
|
||||
if current_attempt - bans[net]['last_attempt'] > RETRY_WINDOW:
|
||||
bans[net]['attempts'] = 0
|
||||
|
||||
bans[net]['attempts'] += 1
|
||||
bans[net]['last_attempt'] = current_attempt
|
||||
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
cur_time = int(round(time.time()))
|
||||
NET_BAN_TIME = calcNetBanTime(bans[net]['ban_counter'])
|
||||
logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
||||
if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1:
|
||||
with lock:
|
||||
tables.banIPv4(net)
|
||||
elif int(f2boptions['manage_external']) != 1:
|
||||
with lock:
|
||||
tables.banIPv6(net)
|
||||
|
||||
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
|
||||
else:
|
||||
logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
||||
|
||||
def unban(net):
|
||||
global lock
|
||||
|
||||
if not net in bans:
|
||||
logger.logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
return
|
||||
|
||||
logger.logInfo('Unbanning %s' % net)
|
||||
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
tables.unbanIPv4(net)
|
||||
else:
|
||||
with lock:
|
||||
tables.unbanIPv6(net)
|
||||
|
||||
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
if net in bans:
|
||||
bans[net]['attempts'] = 0
|
||||
bans[net]['ban_counter'] += 1
|
||||
|
||||
def permBan(net, unban=False):
|
||||
global f2boptions
|
||||
global lock
|
||||
|
||||
is_unbanned = False
|
||||
is_banned = False
|
||||
if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
if unban:
|
||||
is_unbanned = tables.unbanIPv4(net)
|
||||
elif int(f2boptions['manage_external']) != 1:
|
||||
is_banned = tables.banIPv4(net)
|
||||
else:
|
||||
with lock:
|
||||
if unban:
|
||||
is_unbanned = tables.unbanIPv6(net)
|
||||
elif int(f2boptions['manage_external']) != 1:
|
||||
is_banned = tables.banIPv6(net)
|
||||
|
||||
|
||||
if is_unbanned:
|
||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||
logger.logCrit('Removed host/network %s from blacklist' % net)
|
||||
elif is_banned:
|
||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||
logger.logCrit('Added host/network %s to blacklist' % net)
|
||||
|
||||
def clear():
|
||||
global lock
|
||||
logger.logInfo('Clearing all bans')
|
||||
for net in bans.copy():
|
||||
unban(net)
|
||||
with lock:
|
||||
tables.clearIPv4Table()
|
||||
tables.clearIPv6Table()
|
||||
try:
|
||||
if r is not None:
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
except Exception as ex:
|
||||
logger.logWarn('Error clearing redis keys F2B_ACTIVE_BANS and F2B_PERM_BANS: %s' % ex)
|
||||
|
||||
def watch():
|
||||
global pubsub
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
logger.logInfo('Watching Redis channel F2B_CHANNEL')
|
||||
pubsub.subscribe('F2B_CHANNEL')
|
||||
|
||||
while not quit_now:
|
||||
try:
|
||||
for item in pubsub.listen():
|
||||
refreshF2bregex()
|
||||
for rule_id, rule_regex in f2bregex.items():
|
||||
if item['data'] and item['type'] == 'message':
|
||||
try:
|
||||
result = re.search(rule_regex, item['data'])
|
||||
except re.error:
|
||||
result = False
|
||||
if result:
|
||||
addr = result.group(1)
|
||||
ip = ipaddress.ip_address(addr)
|
||||
if ip.is_private or ip.is_loopback:
|
||||
continue
|
||||
logger.logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
|
||||
ban(addr)
|
||||
except Exception as ex:
|
||||
logger.logWarn('Error reading log line from pubsub: %s' % ex)
|
||||
pubsub = None
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def snat4(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
tables.snat4(snat_target, os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24')
|
||||
|
||||
def snat6(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
tables.snat6(snat_target, os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64'))
|
||||
|
||||
def autopurge():
|
||||
global f2boptions
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
refreshF2boptions()
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
||||
if QUEUE_UNBAN:
|
||||
for net in QUEUE_UNBAN:
|
||||
unban(str(net))
|
||||
for net in bans.copy():
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
NET_BAN_TIME = calcNetBanTime(bans[net]['ban_counter'])
|
||||
TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt']
|
||||
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME:
|
||||
unban(net)
|
||||
|
||||
def mailcowChainOrder():
|
||||
global lock
|
||||
global quit_now
|
||||
global exit_code
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
quit_now, exit_code = tables.checkIPv4ChainOrder()
|
||||
if quit_now: return
|
||||
quit_now, exit_code = tables.checkIPv6ChainOrder()
|
||||
|
||||
def calcNetBanTime(ban_counter):
|
||||
global f2boptions
|
||||
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
MAX_BAN_TIME = int(f2boptions['max_ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** ban_counter
|
||||
NET_BAN_TIME = max([BAN_TIME, min([NET_BAN_TIME, MAX_BAN_TIME])])
|
||||
return NET_BAN_TIME
|
||||
|
||||
def isIpNetwork(address):
|
||||
try:
|
||||
ipaddress.ip_network(address, False)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def genNetworkList(list):
|
||||
resolver = dns.resolver.Resolver()
|
||||
hostnames = []
|
||||
networks = []
|
||||
for key in list:
|
||||
if isIpNetwork(key):
|
||||
networks.append(key)
|
||||
else:
|
||||
hostnames.append(key)
|
||||
for hostname in hostnames:
|
||||
hostname_ips = []
|
||||
for rdtype in ['A', 'AAAA']:
|
||||
try:
|
||||
answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
|
||||
except dns.exception.Timeout:
|
||||
logger.logInfo('Hostname %s timedout on resolve' % hostname)
|
||||
break
|
||||
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
|
||||
continue
|
||||
except dns.exception.DNSException as dnsexception:
|
||||
logger.logInfo('%s' % dnsexception)
|
||||
continue
|
||||
for rdata in answer:
|
||||
hostname_ips.append(rdata.to_text())
|
||||
networks.extend(hostname_ips)
|
||||
return set(networks)
|
||||
|
||||
def whitelistUpdate():
|
||||
global lock
|
||||
global quit_now
|
||||
global WHITELIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_WHITELIST')
|
||||
new_whitelist = []
|
||||
if list:
|
||||
new_whitelist = genNetworkList(list)
|
||||
with lock:
|
||||
if Counter(new_whitelist) != Counter(WHITELIST):
|
||||
WHITELIST = new_whitelist
|
||||
logger.logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def blacklistUpdate():
|
||||
global quit_now
|
||||
global BLACKLIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_BLACKLIST')
|
||||
new_blacklist = []
|
||||
if list:
|
||||
new_blacklist = genNetworkList(list)
|
||||
if Counter(new_blacklist) != Counter(BLACKLIST):
|
||||
addban = set(new_blacklist).difference(BLACKLIST)
|
||||
delban = set(BLACKLIST).difference(new_blacklist)
|
||||
BLACKLIST = new_blacklist
|
||||
logger.logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
|
||||
if addban:
|
||||
for net in addban:
|
||||
permBan(net=net)
|
||||
if delban:
|
||||
for net in delban:
|
||||
permBan(net=net, unban=True)
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def sigterm_quit(signum, frame):
|
||||
global clear_before_quit
|
||||
clear_before_quit = True
|
||||
sys.exit(exit_code)
|
||||
|
||||
def berfore_quit():
|
||||
if clear_before_quit:
|
||||
clear()
|
||||
if pubsub is not None:
|
||||
pubsub.unsubscribe()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
atexit.register(berfore_quit)
|
||||
signal.signal(signal.SIGTERM, sigterm_quit)
|
||||
|
||||
# init Logger
|
||||
logger = Logger()
|
||||
|
||||
# init backend
|
||||
backend = sys.argv[1]
|
||||
if backend == "nftables":
|
||||
logger.logInfo('Using NFTables backend')
|
||||
tables = NFTables(chain_name, logger)
|
||||
else:
|
||||
logger.logInfo('Using IPTables backend')
|
||||
tables = IPTables(chain_name, logger)
|
||||
|
||||
# In case a previous session was killed without cleanup
|
||||
clear()
|
||||
|
||||
# Reinit MAILCOW chain
|
||||
# Is called before threads start, no locking
|
||||
logger.logInfo("Initializing mailcow netfilter chain")
|
||||
tables.initChainIPv4()
|
||||
tables.initChainIPv6()
|
||||
|
||||
if os.getenv("DISABLE_NETFILTER_ISOLATION_RULE").lower() in ("y", "yes"):
|
||||
logger.logInfo(f"Skipping {chain_name} isolation")
|
||||
else:
|
||||
logger.logInfo(f"Setting {chain_name} isolation")
|
||||
tables.create_mailcow_isolation_rule("br-mailcow", [3306, 6379, 8983, 12345], os.getenv("MAILCOW_REPLICA_IP"))
|
||||
|
||||
# connect to redis
|
||||
while True:
|
||||
try:
|
||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||
if "".__eq__(redis_slaveof_ip):
|
||||
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
|
||||
else:
|
||||
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
|
||||
r.ping()
|
||||
pubsub = r.pubsub()
|
||||
except Exception as ex:
|
||||
print('%s - trying again in 3 seconds' % (ex))
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
logger.set_redis(r)
|
||||
|
||||
# rename fail2ban to netfilter
|
||||
if r.exists('F2B_LOG'):
|
||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||
# clear bans in redis
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
|
||||
refreshF2boptions()
|
||||
|
||||
watch_thread = Thread(target=watch)
|
||||
watch_thread.daemon = True
|
||||
watch_thread.start()
|
||||
|
||||
if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv4Address:
|
||||
snat4_thread = Thread(target=snat4,args=(snat_ip,))
|
||||
snat4_thread.daemon = True
|
||||
snat4_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
|
||||
|
||||
if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT6_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv6Address:
|
||||
snat6_thread = Thread(target=snat6,args=(snat_ip,))
|
||||
snat6_thread.daemon = True
|
||||
snat6_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address')
|
||||
|
||||
autopurge_thread = Thread(target=autopurge)
|
||||
autopurge_thread.daemon = True
|
||||
autopurge_thread.start()
|
||||
|
||||
mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
|
||||
mailcowchainwatch_thread.daemon = True
|
||||
mailcowchainwatch_thread.start()
|
||||
|
||||
blacklistupdate_thread = Thread(target=blacklistUpdate)
|
||||
blacklistupdate_thread.daemon = True
|
||||
blacklistupdate_thread.start()
|
||||
|
||||
whitelistupdate_thread = Thread(target=whitelistUpdate)
|
||||
whitelistupdate_thread.daemon = True
|
||||
whitelistupdate_thread.start()
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(0.5)
|
||||
|
||||
sys.exit(exit_code)
|
||||
252
data/Dockerfiles/netfilter/modules/IPTables.py
Normal file
252
data/Dockerfiles/netfilter/modules/IPTables.py
Normal file
@@ -0,0 +1,252 @@
|
||||
import iptc
|
||||
import time
|
||||
import os
|
||||
|
||||
class IPTables:
|
||||
def __init__(self, chain_name, logger):
|
||||
self.chain_name = chain_name
|
||||
self.logger = logger
|
||||
|
||||
def initChainIPv4(self):
|
||||
if not iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name) in iptc.Table(iptc.Table.FILTER).chains:
|
||||
iptc.Table(iptc.Table.FILTER).create_chain(self.chain_name)
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
|
||||
rule = iptc.Rule()
|
||||
rule.src = '0.0.0.0/0'
|
||||
rule.dst = '0.0.0.0/0'
|
||||
target = iptc.Target(rule, self.chain_name)
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
|
||||
def initChainIPv6(self):
|
||||
if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name) in iptc.Table6(iptc.Table6.FILTER).chains:
|
||||
iptc.Table6(iptc.Table6.FILTER).create_chain(self.chain_name)
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = '::/0'
|
||||
rule.dst = '::/0'
|
||||
target = iptc.Target(rule, self.chain_name)
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
|
||||
def checkIPv4ChainOrder(self):
|
||||
filter_table = iptc.Table(iptc.Table.FILTER)
|
||||
filter_table.refresh()
|
||||
return self.checkChainOrder(filter_table)
|
||||
|
||||
def checkIPv6ChainOrder(self):
|
||||
filter_table = iptc.Table6(iptc.Table6.FILTER)
|
||||
filter_table.refresh()
|
||||
return self.checkChainOrder(filter_table)
|
||||
|
||||
def checkChainOrder(self, filter_table):
|
||||
err = False
|
||||
exit_code = None
|
||||
|
||||
forward_chain = iptc.Chain(filter_table, 'FORWARD')
|
||||
input_chain = iptc.Chain(filter_table, 'INPUT')
|
||||
for chain in [forward_chain, input_chain]:
|
||||
target_found = False
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item.target.name == self.chain_name:
|
||||
target_found = True
|
||||
if position > 2:
|
||||
self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name))
|
||||
err = True
|
||||
exit_code = 2
|
||||
if not target_found:
|
||||
self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name))
|
||||
err = True
|
||||
exit_code = 2
|
||||
|
||||
return err, exit_code
|
||||
|
||||
def clearIPv4Table(self):
|
||||
self.clearTable(iptc.Table(iptc.Table.FILTER))
|
||||
|
||||
def clearIPv6Table(self):
|
||||
self.clearTable(iptc.Table6(iptc.Table6.FILTER))
|
||||
|
||||
def clearTable(self, filter_table):
|
||||
filter_table.autocommit = False
|
||||
forward_chain = iptc.Chain(filter_table, "FORWARD")
|
||||
input_chain = iptc.Chain(filter_table, "INPUT")
|
||||
mailcow_chain = iptc.Chain(filter_table, self.chain_name)
|
||||
if mailcow_chain in filter_table.chains:
|
||||
for rule in mailcow_chain.rules:
|
||||
mailcow_chain.delete_rule(rule)
|
||||
for rule in forward_chain.rules:
|
||||
if rule.target.name == self.chain_name:
|
||||
forward_chain.delete_rule(rule)
|
||||
for rule in input_chain.rules:
|
||||
if rule.target.name == self.chain_name:
|
||||
input_chain.delete_rule(rule)
|
||||
filter_table.delete_chain(self.chain_name)
|
||||
filter_table.commit()
|
||||
filter_table.refresh()
|
||||
filter_table.autocommit = True
|
||||
|
||||
def banIPv4(self, source):
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
|
||||
rule = iptc.Rule()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
return False
|
||||
chain.insert_rule(rule)
|
||||
return True
|
||||
|
||||
def banIPv6(self, source):
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
return False
|
||||
chain.insert_rule(rule)
|
||||
return True
|
||||
|
||||
def unbanIPv4(self, source):
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
|
||||
rule = iptc.Rule()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
return False
|
||||
chain.delete_rule(rule)
|
||||
return True
|
||||
|
||||
def unbanIPv6(self, source):
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = source
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
return False
|
||||
chain.delete_rule(rule)
|
||||
return True
|
||||
|
||||
def snat4(self, snat_target, source):
|
||||
try:
|
||||
table = iptc.Table('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
new_rule = self.getSnat4Rule(snat_target, source)
|
||||
|
||||
if not chain.rules:
|
||||
# if there are no rules in the chain, insert the new rule directly
|
||||
self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
for position, rule in enumerate(chain.rules):
|
||||
if not hasattr(rule.target, 'parameter'):
|
||||
continue
|
||||
match = all((
|
||||
new_rule.get_src() == rule.get_src(),
|
||||
new_rule.get_dst() == rule.get_dst(),
|
||||
new_rule.target.parameters == rule.target.parameters,
|
||||
new_rule.target.name == rule.target.name
|
||||
))
|
||||
if position == 0:
|
||||
if not match:
|
||||
self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
if match:
|
||||
self.logger.logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}')
|
||||
chain.delete_rule(rule)
|
||||
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
return True
|
||||
except:
|
||||
self.logger.logCrit('Error running SNAT4, retrying...')
|
||||
return False
|
||||
|
||||
def snat6(self, snat_target, source):
|
||||
try:
|
||||
table = iptc.Table6('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
new_rule = self.getSnat6Rule(snat_target, source)
|
||||
|
||||
if new_rule not in chain.rules:
|
||||
self.logger.logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (new_rule.src, snat_target))
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item == new_rule:
|
||||
if position != 0:
|
||||
chain.delete_rule(new_rule)
|
||||
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
except:
|
||||
self.logger.logCrit('Error running SNAT6, retrying...')
|
||||
|
||||
|
||||
def getSnat4Rule(self, snat_target, source):
|
||||
rule = iptc.Rule()
|
||||
rule.src = source
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
match = rule.create_match("comment")
|
||||
match.comment = f'{int(round(time.time()))}'
|
||||
return rule
|
||||
|
||||
def getSnat6Rule(self, snat_target, source):
|
||||
rule = iptc.Rule6()
|
||||
rule.src = source
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
return rule
|
||||
|
||||
def create_mailcow_isolation_rule(self, _interface:str, _dports:list, _allow:str = ""):
|
||||
try:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
|
||||
|
||||
# insert mailcow isolation rule
|
||||
rule = iptc.Rule()
|
||||
rule.in_interface = f'!{_interface}'
|
||||
rule.out_interface = _interface
|
||||
rule.protocol = 'tcp'
|
||||
rule.create_target("DROP")
|
||||
match = rule.create_match("multiport")
|
||||
match.dports = ','.join(map(str, _dports))
|
||||
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
chain.insert_rule(rule, position=0)
|
||||
|
||||
# insert mailcow isolation exception rule
|
||||
if _allow != "":
|
||||
rule = iptc.Rule()
|
||||
rule.src = _allow
|
||||
rule.in_interface = f'!{_interface}'
|
||||
rule.out_interface = _interface
|
||||
rule.protocol = 'tcp'
|
||||
rule.create_target("ACCEPT")
|
||||
match = rule.create_match("multiport")
|
||||
match.dports = ','.join(map(str, _dports))
|
||||
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
chain.insert_rule(rule, position=0)
|
||||
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.logCrit(f"Error adding {self.chain_name} isolation: {e}")
|
||||
return False
|
||||
30
data/Dockerfiles/netfilter/modules/Logger.py
Normal file
30
data/Dockerfiles/netfilter/modules/Logger.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import time
|
||||
import json
|
||||
|
||||
class Logger:
|
||||
def __init__(self):
|
||||
self.r = None
|
||||
|
||||
def set_redis(self, redis):
|
||||
self.r = redis
|
||||
|
||||
def log(self, priority, message):
|
||||
tolog = {}
|
||||
tolog['time'] = int(round(time.time()))
|
||||
tolog['priority'] = priority
|
||||
tolog['message'] = message
|
||||
print(message)
|
||||
if self.r is not None:
|
||||
try:
|
||||
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||
except Exception as ex:
|
||||
print('Failed logging to redis: %s' % (ex))
|
||||
|
||||
def logWarn(self, message):
|
||||
self.log('warn', message)
|
||||
|
||||
def logCrit(self, message):
|
||||
self.log('crit', message)
|
||||
|
||||
def logInfo(self, message):
|
||||
self.log('info', message)
|
||||
659
data/Dockerfiles/netfilter/modules/NFTables.py
Normal file
659
data/Dockerfiles/netfilter/modules/NFTables.py
Normal file
@@ -0,0 +1,659 @@
|
||||
import nftables
|
||||
import ipaddress
|
||||
import os
|
||||
|
||||
class NFTables:
|
||||
def __init__(self, chain_name, logger):
|
||||
self.chain_name = chain_name
|
||||
self.logger = logger
|
||||
|
||||
self.nft = nftables.Nftables()
|
||||
self.nft.set_json_output(True)
|
||||
self.nft.set_handle_output(True)
|
||||
self.nft_chain_names = {'ip': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} },
|
||||
'ip6': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} } }
|
||||
|
||||
self.search_current_chains()
|
||||
|
||||
def initChainIPv4(self):
|
||||
self.insert_mailcow_chains("ip")
|
||||
|
||||
def initChainIPv6(self):
|
||||
self.insert_mailcow_chains("ip6")
|
||||
|
||||
def checkIPv4ChainOrder(self):
|
||||
return self.checkChainOrder("ip")
|
||||
|
||||
def checkIPv6ChainOrder(self):
|
||||
return self.checkChainOrder("ip6")
|
||||
|
||||
def checkChainOrder(self, filter_table):
|
||||
err = False
|
||||
exit_code = None
|
||||
|
||||
for chain in ['input', 'forward']:
|
||||
chain_position = self.check_mailcow_chains(filter_table, chain)
|
||||
if chain_position is None: continue
|
||||
|
||||
if chain_position is False:
|
||||
self.logger.logCrit(f'MAILCOW target not found in {filter_table} {chain} table, restarting container to fix it...')
|
||||
err = True
|
||||
exit_code = 2
|
||||
|
||||
if chain_position > 0:
|
||||
chain_position += 1
|
||||
self.logger.logCrit(f'MAILCOW target is in position {chain_position} in the {filter_table} {chain} table, restarting container to fix it...')
|
||||
err = True
|
||||
exit_code = 2
|
||||
|
||||
return err, exit_code
|
||||
|
||||
def clearIPv4Table(self):
|
||||
self.clearTable("ip")
|
||||
|
||||
def clearIPv6Table(self):
|
||||
self.clearTable("ip6")
|
||||
|
||||
def clearTable(self, _family):
|
||||
is_empty_dict = True
|
||||
json_command = self.get_base_dict()
|
||||
chain_handle = self.get_chain_handle(_family, "filter", self.chain_name)
|
||||
# if no handle, the chain doesn't exists
|
||||
if chain_handle is not None:
|
||||
is_empty_dict = False
|
||||
# flush chain
|
||||
mailcow_chain = {'family': _family, 'table': 'filter', 'name': self.chain_name}
|
||||
flush_chain = {'flush': {'chain': mailcow_chain}}
|
||||
json_command["nftables"].append(flush_chain)
|
||||
|
||||
# remove rule in forward chain
|
||||
# remove rule in input chain
|
||||
chains_family = [self.nft_chain_names[_family]['filter']['input'],
|
||||
self.nft_chain_names[_family]['filter']['forward'] ]
|
||||
|
||||
for chain_base in chains_family:
|
||||
if not chain_base: continue
|
||||
|
||||
rules_handle = self.get_rules_handle(_family, "filter", chain_base)
|
||||
if rules_handle is not None:
|
||||
for r_handle in rules_handle:
|
||||
is_empty_dict = False
|
||||
mailcow_rule = {'family':_family,
|
||||
'table': 'filter',
|
||||
'chain': chain_base,
|
||||
'handle': r_handle }
|
||||
delete_rules = {'delete': {'rule': mailcow_rule} }
|
||||
json_command["nftables"].append(delete_rules)
|
||||
|
||||
# remove chain
|
||||
# after delete all rules referencing this chain
|
||||
if chain_handle is not None:
|
||||
mc_chain_handle = {'family':_family,
|
||||
'table': 'filter',
|
||||
'name': self.chain_name,
|
||||
'handle': chain_handle }
|
||||
delete_chain = {'delete': {'chain': mc_chain_handle} }
|
||||
json_command["nftables"].append(delete_chain)
|
||||
|
||||
if is_empty_dict == False:
|
||||
if self.nft_exec_dict(json_command):
|
||||
self.logger.logInfo(f"Clear completed: {_family}")
|
||||
|
||||
def banIPv4(self, source):
|
||||
ban_dict = self.get_ban_ip_dict(source, "ip")
|
||||
return self.nft_exec_dict(ban_dict)
|
||||
|
||||
def banIPv6(self, source):
|
||||
ban_dict = self.get_ban_ip_dict(source, "ip6")
|
||||
return self.nft_exec_dict(ban_dict)
|
||||
|
||||
def unbanIPv4(self, source):
|
||||
unban_dict = self.get_unban_ip_dict(source, "ip")
|
||||
if not unban_dict:
|
||||
return False
|
||||
return self.nft_exec_dict(unban_dict)
|
||||
|
||||
def unbanIPv6(self, source):
|
||||
unban_dict = self.get_unban_ip_dict(source, "ip6")
|
||||
if not unban_dict:
|
||||
return False
|
||||
return self.nft_exec_dict(unban_dict)
|
||||
|
||||
def snat4(self, snat_target, source):
|
||||
self.snat_rule("ip", snat_target, source)
|
||||
|
||||
def snat6(self, snat_target, source):
|
||||
self.snat_rule("ip6", snat_target, source)
|
||||
|
||||
|
||||
def nft_exec_dict(self, query: dict):
|
||||
if not query: return False
|
||||
|
||||
rc, output, error = self.nft.json_cmd(query)
|
||||
if rc != 0:
|
||||
#self.logger.logCrit(f"Nftables Error: {error}")
|
||||
return False
|
||||
|
||||
# Prevent returning False or empty string on commands that do not produce output
|
||||
if rc == 0 and len(output) == 0:
|
||||
return True
|
||||
|
||||
return output
|
||||
|
||||
def get_base_dict(self):
|
||||
return {'nftables': [{ 'metainfo': { 'json_schema_version': 1} } ] }
|
||||
|
||||
def search_current_chains(self):
|
||||
nft_chain_priority = {'ip': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} },
|
||||
'ip6': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} } }
|
||||
|
||||
# Command: 'nft list chains'
|
||||
_list = {'list' : {'chains': 'null'} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset['nftables']:
|
||||
chain = _object.get("chain")
|
||||
if not chain: continue
|
||||
|
||||
_family = chain['family']
|
||||
_table = chain['table']
|
||||
_hook = chain.get("hook")
|
||||
_priority = chain.get("prio")
|
||||
_name = chain['name']
|
||||
|
||||
if _family not in self.nft_chain_names: continue
|
||||
if _table not in self.nft_chain_names[_family]: continue
|
||||
if _hook not in self.nft_chain_names[_family][_table]: continue
|
||||
if _priority is None: continue
|
||||
|
||||
_saved_priority = nft_chain_priority[_family][_table][_hook]
|
||||
if _saved_priority is None or _priority < _saved_priority:
|
||||
# at this point, we know the chain has:
|
||||
# hook and priority set
|
||||
# and it has the lowest priority
|
||||
nft_chain_priority[_family][_table][_hook] = _priority
|
||||
self.nft_chain_names[_family][_table][_hook] = _name
|
||||
|
||||
def search_for_chain(self, kernel_ruleset: dict, chain_name: str):
|
||||
found = False
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
chain = _object.get("chain")
|
||||
if not chain:
|
||||
continue
|
||||
ch_name = chain.get("name")
|
||||
if ch_name == chain_name:
|
||||
found = True
|
||||
break
|
||||
return found
|
||||
|
||||
def get_chain_dict(self, _family: str, _name: str):
|
||||
# nft (add | create) chain [<family>] <table> <name>
|
||||
_chain_opts = {'family': _family, 'table': 'filter', 'name': _name }
|
||||
_add = {'add': {'chain': _chain_opts} }
|
||||
final_chain = self.get_base_dict()
|
||||
final_chain["nftables"].append(_add)
|
||||
return final_chain
|
||||
|
||||
def get_mailcow_jump_rule_dict(self, _family: str, _chain: str):
|
||||
_jump_rule = self.get_base_dict()
|
||||
_expr_opt=[]
|
||||
_expr_counter = {'family': _family, 'table': 'filter', 'packets': 0, 'bytes': 0}
|
||||
_counter_dict = {'counter': _expr_counter}
|
||||
_expr_opt.append(_counter_dict)
|
||||
|
||||
_jump_opts = {'jump': {'target': self.chain_name} }
|
||||
|
||||
_expr_opt.append(_jump_opts)
|
||||
|
||||
_rule_params = {'family': _family,
|
||||
'table': 'filter',
|
||||
'chain': _chain,
|
||||
'expr': _expr_opt,
|
||||
'comment': "mailcow" }
|
||||
|
||||
_add_rule = {'insert': {'rule': _rule_params} }
|
||||
|
||||
_jump_rule["nftables"].append(_add_rule)
|
||||
|
||||
return _jump_rule
|
||||
|
||||
def insert_mailcow_chains(self, _family: str):
|
||||
nft_input_chain = self.nft_chain_names[_family]['filter']['input']
|
||||
nft_forward_chain = self.nft_chain_names[_family]['filter']['forward']
|
||||
# Command: 'nft list table <family> filter'
|
||||
_table_opts = {'family': _family, 'name': 'filter'}
|
||||
_list = {'list': {'table': _table_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
# chain
|
||||
if not self.search_for_chain(kernel_ruleset, self.chain_name):
|
||||
cadena = self.get_chain_dict(_family, self.chain_name)
|
||||
if self.nft_exec_dict(cadena):
|
||||
self.logger.logInfo(f"MAILCOW {_family} chain created successfully.")
|
||||
|
||||
input_jump_found, forward_jump_found = False, False
|
||||
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]
|
||||
if nft_input_chain and rule["chain"] == nft_input_chain:
|
||||
if rule.get("comment") and rule["comment"] == "mailcow":
|
||||
input_jump_found = True
|
||||
if nft_forward_chain and rule["chain"] == nft_forward_chain:
|
||||
if rule.get("comment") and rule["comment"] == "mailcow":
|
||||
forward_jump_found = True
|
||||
|
||||
if not input_jump_found:
|
||||
command = self.get_mailcow_jump_rule_dict(_family, nft_input_chain)
|
||||
self.nft_exec_dict(command)
|
||||
|
||||
if not forward_jump_found:
|
||||
command = self.get_mailcow_jump_rule_dict(_family, nft_forward_chain)
|
||||
self.nft_exec_dict(command)
|
||||
|
||||
def delete_nat_rule(self, _family:str, _chain: str, _handle:str):
|
||||
delete_command = self.get_base_dict()
|
||||
_rule_opts = {'family': _family,
|
||||
'table': 'nat',
|
||||
'chain': _chain,
|
||||
'handle': _handle }
|
||||
_delete = {'delete': {'rule': _rule_opts} }
|
||||
delete_command["nftables"].append(_delete)
|
||||
|
||||
return self.nft_exec_dict(delete_command)
|
||||
|
||||
def delete_filter_rule(self, _family:str, _chain: str, _handle:str):
|
||||
delete_command = self.get_base_dict()
|
||||
_rule_opts = {'family': _family,
|
||||
'table': 'filter',
|
||||
'chain': _chain,
|
||||
'handle': _handle }
|
||||
_delete = {'delete': {'rule': _rule_opts} }
|
||||
delete_command["nftables"].append(_delete)
|
||||
|
||||
return self.nft_exec_dict(delete_command)
|
||||
|
||||
def snat_rule(self, _family: str, snat_target: str, source_address: str):
|
||||
chain_name = self.nft_chain_names[_family]['nat']['postrouting']
|
||||
|
||||
# no postrouting chain, may occur if docker has ipv6 disabled.
|
||||
if not chain_name: return
|
||||
|
||||
# Command: nft list chain <family> nat <chain_name>
|
||||
_chain_opts = {'family': _family, 'table': 'nat', 'name': chain_name}
|
||||
_list = {'list':{'chain': _chain_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if not kernel_ruleset:
|
||||
return
|
||||
|
||||
rule_position = 0
|
||||
rule_handle = None
|
||||
rule_found = False
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]
|
||||
if not rule.get("comment") or not rule["comment"] == "mailcow":
|
||||
rule_position +=1
|
||||
continue
|
||||
|
||||
rule_found = True
|
||||
rule_handle = rule["handle"]
|
||||
break
|
||||
|
||||
dest_net = ipaddress.ip_network(source_address, strict=False)
|
||||
target_net = ipaddress.ip_network(snat_target, strict=False)
|
||||
|
||||
if rule_found:
|
||||
saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"]
|
||||
saddr_len = int(rule["expr"][0]["match"]["right"]["prefix"]["len"])
|
||||
|
||||
daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"]
|
||||
daddr_len = int(rule["expr"][1]["match"]["right"]["prefix"]["len"])
|
||||
|
||||
target_ip = rule["expr"][3]["snat"]["addr"]
|
||||
|
||||
saddr_net = ipaddress.ip_network(saddr_ip + '/' + str(saddr_len), strict=False)
|
||||
daddr_net = ipaddress.ip_network(daddr_ip + '/' + str(daddr_len), strict=False)
|
||||
current_target_net = ipaddress.ip_network(target_ip, strict=False)
|
||||
|
||||
match = all((
|
||||
dest_net == saddr_net,
|
||||
dest_net == daddr_net,
|
||||
target_net == current_target_net
|
||||
))
|
||||
try:
|
||||
if rule_position == 0:
|
||||
if not match:
|
||||
# Position 0 , it is a mailcow rule , but it does not have the same parameters
|
||||
if self.delete_nat_rule(_family, chain_name, rule_handle):
|
||||
self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule does not match configured parameters')
|
||||
else:
|
||||
# Position > 0 and is mailcow rule
|
||||
if self.delete_nat_rule(_family, chain_name, rule_handle):
|
||||
self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule is at position {rule_position}')
|
||||
except:
|
||||
self.logger.logCrit(f"Error running SNAT on {_family}, retrying..." )
|
||||
else:
|
||||
# rule not found
|
||||
json_command = self.get_base_dict()
|
||||
try:
|
||||
snat_dict = {'snat': {'addr': str(target_net.network_address)} }
|
||||
|
||||
expr_counter = {'family': _family, 'table': 'nat', 'packets': 0, 'bytes': 0}
|
||||
counter_dict = {'counter': expr_counter}
|
||||
|
||||
prefix_dict = {'prefix': {'addr': str(dest_net.network_address), 'len': int(dest_net.prefixlen)} }
|
||||
payload_dict = {'payload': {'protocol': _family, 'field': "saddr"} }
|
||||
match_dict1 = {'match': {'op': '==', 'left': payload_dict, 'right': prefix_dict} }
|
||||
|
||||
payload_dict2 = {'payload': {'protocol': _family, 'field': "daddr"} }
|
||||
match_dict2 = {'match': {'op': '!=', 'left': payload_dict2, 'right': prefix_dict } }
|
||||
expr_list = [
|
||||
match_dict1,
|
||||
match_dict2,
|
||||
counter_dict,
|
||||
snat_dict
|
||||
]
|
||||
rule_fields = {'family': _family,
|
||||
'table': 'nat',
|
||||
'chain': chain_name,
|
||||
'comment': "mailcow",
|
||||
'expr': expr_list }
|
||||
|
||||
insert_dict = {'insert': {'rule': rule_fields} }
|
||||
json_command["nftables"].append(insert_dict)
|
||||
if self.nft_exec_dict(json_command):
|
||||
self.logger.logInfo(f'Added {_family} nat {chain_name} rule for source network {dest_net} to {target_net}')
|
||||
except:
|
||||
self.logger.logCrit(f"Error running SNAT on {_family}, retrying...")
|
||||
|
||||
def get_chain_handle(self, _family: str, _table: str, chain_name: str):
|
||||
chain_handle = None
|
||||
# Command: 'nft list chains {family}'
|
||||
_list = {'list': {'chains': {'family': _family} } }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("chain"):
|
||||
continue
|
||||
chain = _object["chain"]
|
||||
if chain["family"] == _family and chain["table"] == _table and chain["name"] == chain_name:
|
||||
chain_handle = chain["handle"]
|
||||
break
|
||||
return chain_handle
|
||||
|
||||
def get_rules_handle(self, _family: str, _table: str, chain_name: str, _comment_filter = "mailcow"):
|
||||
rule_handle = []
|
||||
# Command: 'nft list chain {family} {table} {chain_name}'
|
||||
_chain_opts = {'family': _family, 'table': _table, 'name': chain_name}
|
||||
_list = {'list': {'chain': _chain_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]
|
||||
if rule["family"] == _family and rule["table"] == _table and rule["chain"] == chain_name:
|
||||
if rule.get("comment") and rule["comment"] == _comment_filter:
|
||||
rule_handle.append(rule["handle"])
|
||||
return rule_handle
|
||||
|
||||
def get_ban_ip_dict(self, ipaddr: str, _family: str):
|
||||
json_command = self.get_base_dict()
|
||||
|
||||
expr_opt = []
|
||||
ipaddr_net = ipaddress.ip_network(ipaddr, strict=False)
|
||||
right_dict = {'prefix': {'addr': str(ipaddr_net.network_address), 'len': int(ipaddr_net.prefixlen) } }
|
||||
|
||||
left_dict = {'payload': {'protocol': _family, 'field': 'saddr'} }
|
||||
match_dict = {'op': '==', 'left': left_dict, 'right': right_dict }
|
||||
expr_opt.append({'match': match_dict})
|
||||
|
||||
counter_dict = {'counter': {'family': _family, 'table': "filter", 'packets': 0, 'bytes': 0} }
|
||||
expr_opt.append(counter_dict)
|
||||
|
||||
expr_opt.append({'drop': "null"})
|
||||
|
||||
rule_dict = {'family': _family, 'table': "filter", 'chain': self.chain_name, 'expr': expr_opt}
|
||||
|
||||
base_dict = {'insert': {'rule': rule_dict} }
|
||||
json_command["nftables"].append(base_dict)
|
||||
|
||||
return json_command
|
||||
|
||||
def get_unban_ip_dict(self, ipaddr:str, _family: str):
|
||||
json_command = self.get_base_dict()
|
||||
# Command: 'nft list chain {s_family} filter MAILCOW'
|
||||
_chain_opts = {'family': _family, 'table': 'filter', 'name': self.chain_name}
|
||||
_list = {'list': {'chain': _chain_opts} }
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
rule_handle = None
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
|
||||
rule = _object["rule"]["expr"][0]["match"]
|
||||
if not "payload" in rule["left"]:
|
||||
continue
|
||||
left_opt = rule["left"]["payload"]
|
||||
if not left_opt["protocol"] == _family:
|
||||
continue
|
||||
if not left_opt["field"] =="saddr":
|
||||
continue
|
||||
|
||||
# ip currently banned
|
||||
rule_right = rule["right"]
|
||||
if isinstance(rule_right, dict):
|
||||
current_rule_ip = rule_right["prefix"]["addr"] + '/' + str(rule_right["prefix"]["len"])
|
||||
else:
|
||||
current_rule_ip = rule_right
|
||||
current_rule_net = ipaddress.ip_network(current_rule_ip)
|
||||
|
||||
# ip to ban
|
||||
candidate_net = ipaddress.ip_network(ipaddr, strict=False)
|
||||
|
||||
if current_rule_net == candidate_net:
|
||||
rule_handle = _object["rule"]["handle"]
|
||||
break
|
||||
|
||||
if rule_handle is not None:
|
||||
mailcow_rule = {'family': _family, 'table': 'filter', 'chain': self.chain_name, 'handle': rule_handle}
|
||||
delete_rule = {'delete': {'rule': mailcow_rule} }
|
||||
json_command["nftables"].append(delete_rule)
|
||||
else:
|
||||
return False
|
||||
|
||||
return json_command
|
||||
|
||||
def check_mailcow_chains(self, family: str, chain: str):
|
||||
position = 0
|
||||
rule_found = False
|
||||
chain_name = self.nft_chain_names[family]['filter'][chain]
|
||||
|
||||
if not chain_name: return None
|
||||
|
||||
_chain_opts = {'family': family, 'table': 'filter', 'name': chain_name}
|
||||
_list = {'list': {'chain': _chain_opts}}
|
||||
command = self.get_base_dict()
|
||||
command['nftables'].append(_list)
|
||||
kernel_ruleset = self.nft_exec_dict(command)
|
||||
if kernel_ruleset:
|
||||
for _object in kernel_ruleset["nftables"]:
|
||||
if not _object.get("rule"):
|
||||
continue
|
||||
rule = _object["rule"]
|
||||
if rule.get("comment") and rule["comment"] == "mailcow":
|
||||
rule_found = True
|
||||
break
|
||||
|
||||
position+=1
|
||||
|
||||
return position if rule_found else False
|
||||
|
||||
def create_mailcow_isolation_rule(self, _interface:str, _dports:list, _allow:str = ""):
|
||||
family = "ip"
|
||||
table = "filter"
|
||||
comment_filter_drop = "mailcow isolation"
|
||||
comment_filter_allow = "mailcow isolation allow"
|
||||
json_command = self.get_base_dict()
|
||||
|
||||
# Delete old mailcow isolation rules
|
||||
handles = self.get_rules_handle(family, table, self.chain_name, comment_filter_drop)
|
||||
for handle in handles:
|
||||
self.delete_filter_rule(family, self.chain_name, handle)
|
||||
handles = self.get_rules_handle(family, table, self.chain_name, comment_filter_allow)
|
||||
for handle in handles:
|
||||
self.delete_filter_rule(family, self.chain_name, handle)
|
||||
|
||||
# insert mailcow isolation rule
|
||||
_match_dict_drop = [
|
||||
{
|
||||
"match": {
|
||||
"op": "!=",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "iifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "oifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "tcp",
|
||||
"field": "dport"
|
||||
}
|
||||
},
|
||||
"right": {
|
||||
"set": _dports
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"counter": {
|
||||
"packets": 0,
|
||||
"bytes": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"drop": None
|
||||
}
|
||||
]
|
||||
rule_drop = { "insert": { "rule": {
|
||||
"family": family,
|
||||
"table": table,
|
||||
"chain": self.chain_name,
|
||||
"comment": comment_filter_drop,
|
||||
"expr": _match_dict_drop
|
||||
}}}
|
||||
json_command["nftables"].append(rule_drop)
|
||||
|
||||
# insert mailcow isolation allow rule
|
||||
if _allow != "":
|
||||
_match_dict_allow = [
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "ip",
|
||||
"field": "saddr"
|
||||
}
|
||||
},
|
||||
"right": _allow
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "!=",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "iifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "oifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "tcp",
|
||||
"field": "dport"
|
||||
}
|
||||
},
|
||||
"right": {
|
||||
"set": _dports
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"counter": {
|
||||
"packets": 0,
|
||||
"bytes": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"accept": None
|
||||
}
|
||||
]
|
||||
rule_allow = { "insert": { "rule": {
|
||||
"family": family,
|
||||
"table": table,
|
||||
"chain": self.chain_name,
|
||||
"comment": comment_filter_allow,
|
||||
"expr": _match_dict_allow
|
||||
}}}
|
||||
json_command["nftables"].append(rule_allow)
|
||||
|
||||
success = self.nft_exec_dict(json_command)
|
||||
if success == False:
|
||||
self.logger.logCrit(f"Error adding {self.chain_name} isolation")
|
||||
return False
|
||||
|
||||
return True
|
||||
0
data/Dockerfiles/netfilter/modules/__init__.py
Normal file
0
data/Dockerfiles/netfilter/modules/__init__.py
Normal file
@@ -1,610 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import atexit
|
||||
import signal
|
||||
import ipaddress
|
||||
from collections import Counter
|
||||
from random import randint
|
||||
from threading import Thread
|
||||
from threading import Lock
|
||||
import redis
|
||||
import json
|
||||
import iptc
|
||||
import dns.resolver
|
||||
import dns.exception
|
||||
|
||||
while True:
|
||||
try:
|
||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||
if "".__eq__(redis_slaveof_ip):
|
||||
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
|
||||
else:
|
||||
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
|
||||
r.ping()
|
||||
except Exception as ex:
|
||||
print('%s - trying again in 3 seconds' % (ex))
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
|
||||
pubsub = r.pubsub()
|
||||
|
||||
WHITELIST = []
|
||||
BLACKLIST= []
|
||||
|
||||
bans = {}
|
||||
|
||||
quit_now = False
|
||||
exit_code = 0
|
||||
lock = Lock()
|
||||
|
||||
def log(priority, message):
|
||||
tolog = {}
|
||||
tolog['time'] = int(round(time.time()))
|
||||
tolog['priority'] = priority
|
||||
tolog['message'] = message
|
||||
r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||
print(message)
|
||||
|
||||
def logWarn(message):
|
||||
log('warn', message)
|
||||
|
||||
def logCrit(message):
|
||||
log('crit', message)
|
||||
|
||||
def logInfo(message):
|
||||
log('info', message)
|
||||
|
||||
def refreshF2boptions():
|
||||
global f2boptions
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
f2boptions = {}
|
||||
|
||||
if not r.get('F2B_OPTIONS'):
|
||||
f2boptions['ban_time'] = r.get('F2B_BAN_TIME')
|
||||
f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME')
|
||||
f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT')
|
||||
f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS')
|
||||
f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW')
|
||||
f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4')
|
||||
f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6')
|
||||
else:
|
||||
try:
|
||||
f2boptions = json.loads(r.get('F2B_OPTIONS'))
|
||||
except ValueError:
|
||||
print('Error loading F2B options: F2B_OPTIONS is not json')
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
verifyF2boptions(f2boptions)
|
||||
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
|
||||
|
||||
def verifyF2boptions(f2boptions):
|
||||
verifyF2boption(f2boptions,'ban_time', 1800)
|
||||
verifyF2boption(f2boptions,'max_ban_time', 10000)
|
||||
verifyF2boption(f2boptions,'ban_time_increment', True)
|
||||
verifyF2boption(f2boptions,'max_attempts', 10)
|
||||
verifyF2boption(f2boptions,'retry_window', 600)
|
||||
verifyF2boption(f2boptions,'netban_ipv4', 32)
|
||||
verifyF2boption(f2boptions,'netban_ipv6', 128)
|
||||
|
||||
def verifyF2boption(f2boptions, f2boption, f2bdefault):
|
||||
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
|
||||
|
||||
def refreshF2bregex():
|
||||
global f2bregex
|
||||
global quit_now
|
||||
global exit_code
|
||||
if not r.get('F2B_REGEX'):
|
||||
f2bregex = {}
|
||||
f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
|
||||
f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
|
||||
f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
|
||||
f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
|
||||
f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
|
||||
f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
|
||||
f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
|
||||
f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
|
||||
r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
|
||||
else:
|
||||
try:
|
||||
f2bregex = {}
|
||||
f2bregex = json.loads(r.get('F2B_REGEX'))
|
||||
except ValueError:
|
||||
print('Error loading F2B options: F2B_REGEX is not json')
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
if r.exists('F2B_LOG'):
|
||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||
|
||||
def mailcowChainOrder():
|
||||
global lock
|
||||
global quit_now
|
||||
global exit_code
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
filter4_table = iptc.Table(iptc.Table.FILTER)
|
||||
filter6_table = iptc.Table6(iptc.Table6.FILTER)
|
||||
filter4_table.refresh()
|
||||
filter6_table.refresh()
|
||||
for f in [filter4_table, filter6_table]:
|
||||
forward_chain = iptc.Chain(f, 'FORWARD')
|
||||
input_chain = iptc.Chain(f, 'INPUT')
|
||||
for chain in [forward_chain, input_chain]:
|
||||
target_found = False
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item.target.name == 'MAILCOW':
|
||||
target_found = True
|
||||
if position > 2:
|
||||
logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position))
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
if not target_found:
|
||||
logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name))
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def ban(address):
|
||||
global lock
|
||||
refreshF2boptions()
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
RETRY_WINDOW = int(f2boptions['retry_window'])
|
||||
NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
|
||||
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
|
||||
|
||||
ip = ipaddress.ip_address(address)
|
||||
if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
|
||||
ip = ip.ipv4_mapped
|
||||
address = str(ip)
|
||||
if ip.is_private or ip.is_loopback:
|
||||
return
|
||||
|
||||
self_network = ipaddress.ip_network(address)
|
||||
|
||||
with lock:
|
||||
temp_whitelist = set(WHITELIST)
|
||||
|
||||
if temp_whitelist:
|
||||
for wl_key in temp_whitelist:
|
||||
wl_net = ipaddress.ip_network(wl_key, False)
|
||||
if wl_net.overlaps(self_network):
|
||||
logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
|
||||
return
|
||||
|
||||
net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
||||
net = str(net)
|
||||
|
||||
if not net in bans:
|
||||
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
|
||||
|
||||
bans[net]['attempts'] += 1
|
||||
bans[net]['last_attempt'] = time.time()
|
||||
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
cur_time = int(round(time.time()))
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
|
||||
logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
||||
if type(ip) is ipaddress.IPv4Address:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
else:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule6()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
|
||||
else:
|
||||
logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
||||
|
||||
def unban(net):
|
||||
global lock
|
||||
if not net in bans:
|
||||
logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
return
|
||||
logInfo('Unbanning %s' % net)
|
||||
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
else:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule6()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
if net in bans:
|
||||
bans[net]['attempts'] = 0
|
||||
bans[net]['ban_counter'] += 1
|
||||
|
||||
def permBan(net, unban=False):
|
||||
global lock
|
||||
if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules and not unban:
|
||||
logCrit('Add host/network %s to blacklist' % net)
|
||||
chain.insert_rule(rule)
|
||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||
elif rule in chain.rules and unban:
|
||||
logCrit('Remove host/network %s from blacklist' % net)
|
||||
chain.delete_rule(rule)
|
||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||
else:
|
||||
with lock:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
|
||||
rule = iptc.Rule6()
|
||||
rule.src = net
|
||||
target = iptc.Target(rule, "REJECT")
|
||||
rule.target = target
|
||||
if rule not in chain.rules and not unban:
|
||||
logCrit('Add host/network %s to blacklist' % net)
|
||||
chain.insert_rule(rule)
|
||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||
elif rule in chain.rules and unban:
|
||||
logCrit('Remove host/network %s from blacklist' % net)
|
||||
chain.delete_rule(rule)
|
||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||
|
||||
def quit(signum, frame):
|
||||
global quit_now
|
||||
quit_now = True
|
||||
|
||||
def clear():
|
||||
global lock
|
||||
logInfo('Clearing all bans')
|
||||
for net in bans.copy():
|
||||
unban(net)
|
||||
with lock:
|
||||
filter4_table = iptc.Table(iptc.Table.FILTER)
|
||||
filter6_table = iptc.Table6(iptc.Table6.FILTER)
|
||||
for filter_table in [filter4_table, filter6_table]:
|
||||
filter_table.autocommit = False
|
||||
forward_chain = iptc.Chain(filter_table, "FORWARD")
|
||||
input_chain = iptc.Chain(filter_table, "INPUT")
|
||||
mailcow_chain = iptc.Chain(filter_table, "MAILCOW")
|
||||
if mailcow_chain in filter_table.chains:
|
||||
for rule in mailcow_chain.rules:
|
||||
mailcow_chain.delete_rule(rule)
|
||||
for rule in forward_chain.rules:
|
||||
if rule.target.name == 'MAILCOW':
|
||||
forward_chain.delete_rule(rule)
|
||||
for rule in input_chain.rules:
|
||||
if rule.target.name == 'MAILCOW':
|
||||
input_chain.delete_rule(rule)
|
||||
filter_table.delete_chain("MAILCOW")
|
||||
filter_table.commit()
|
||||
filter_table.refresh()
|
||||
filter_table.autocommit = True
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
pubsub.unsubscribe()
|
||||
|
||||
def watch():
|
||||
logInfo('Watching Redis channel F2B_CHANNEL')
|
||||
pubsub.subscribe('F2B_CHANNEL')
|
||||
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
while not quit_now:
|
||||
try:
|
||||
for item in pubsub.listen():
|
||||
refreshF2bregex()
|
||||
for rule_id, rule_regex in f2bregex.items():
|
||||
if item['data'] and item['type'] == 'message':
|
||||
try:
|
||||
result = re.search(rule_regex, item['data'])
|
||||
except re.error:
|
||||
result = False
|
||||
if result:
|
||||
addr = result.group(1)
|
||||
ip = ipaddress.ip_address(addr)
|
||||
if ip.is_private or ip.is_loopback:
|
||||
continue
|
||||
logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
|
||||
ban(addr)
|
||||
except Exception as ex:
|
||||
logWarn('Error reading log line from pubsub: %s' % ex)
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
def snat4(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
def get_snat4_rule():
|
||||
rule = iptc.Rule()
|
||||
rule.src = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24'
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
match = rule.create_match("comment")
|
||||
match.comment = f'{int(round(time.time()))}'
|
||||
return rule
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
try:
|
||||
table = iptc.Table('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
new_rule = get_snat4_rule()
|
||||
|
||||
if not chain.rules:
|
||||
# if there are no rules in the chain, insert the new rule directly
|
||||
logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
for position, rule in enumerate(chain.rules):
|
||||
if not hasattr(rule.target, 'parameter'):
|
||||
continue
|
||||
match = all((
|
||||
new_rule.get_src() == rule.get_src(),
|
||||
new_rule.get_dst() == rule.get_dst(),
|
||||
new_rule.target.parameters == rule.target.parameters,
|
||||
new_rule.target.name == rule.target.name
|
||||
))
|
||||
if position == 0:
|
||||
if not match:
|
||||
logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
|
||||
chain.insert_rule(new_rule)
|
||||
else:
|
||||
if match:
|
||||
logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}')
|
||||
chain.delete_rule(rule)
|
||||
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
except:
|
||||
print('Error running SNAT4, retrying...')
|
||||
|
||||
def snat6(snat_target):
|
||||
global lock
|
||||
global quit_now
|
||||
|
||||
def get_snat6_rule():
|
||||
rule = iptc.Rule6()
|
||||
rule.src = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64')
|
||||
rule.dst = '!' + rule.src
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
return rule
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
with lock:
|
||||
try:
|
||||
table = iptc.Table6('nat')
|
||||
table.refresh()
|
||||
chain = iptc.Chain(table, 'POSTROUTING')
|
||||
table.autocommit = False
|
||||
if get_snat6_rule() not in chain.rules:
|
||||
logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat6_rule().src, snat_target))
|
||||
chain.insert_rule(get_snat6_rule())
|
||||
table.commit()
|
||||
else:
|
||||
for position, item in enumerate(chain.rules):
|
||||
if item == get_snat6_rule():
|
||||
if position != 0:
|
||||
chain.delete_rule(get_snat6_rule())
|
||||
table.commit()
|
||||
table.autocommit = True
|
||||
except:
|
||||
print('Error running SNAT6, retrying...')
|
||||
|
||||
def autopurge():
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
refreshF2boptions()
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
MAX_BAN_TIME = int(f2boptions['max_ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
||||
if QUEUE_UNBAN:
|
||||
for net in QUEUE_UNBAN:
|
||||
unban(str(net))
|
||||
for net in bans.copy():
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
|
||||
TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt']
|
||||
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME or TIME_SINCE_LAST_ATTEMPT > MAX_BAN_TIME:
|
||||
unban(net)
|
||||
|
||||
def isIpNetwork(address):
|
||||
try:
|
||||
ipaddress.ip_network(address, False)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def genNetworkList(list):
|
||||
resolver = dns.resolver.Resolver()
|
||||
hostnames = []
|
||||
networks = []
|
||||
for key in list:
|
||||
if isIpNetwork(key):
|
||||
networks.append(key)
|
||||
else:
|
||||
hostnames.append(key)
|
||||
for hostname in hostnames:
|
||||
hostname_ips = []
|
||||
for rdtype in ['A', 'AAAA']:
|
||||
try:
|
||||
answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
|
||||
except dns.exception.Timeout:
|
||||
logInfo('Hostname %s timedout on resolve' % hostname)
|
||||
break
|
||||
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
|
||||
continue
|
||||
except dns.exception.DNSException as dnsexception:
|
||||
logInfo('%s' % dnsexception)
|
||||
continue
|
||||
for rdata in answer:
|
||||
hostname_ips.append(rdata.to_text())
|
||||
networks.extend(hostname_ips)
|
||||
return set(networks)
|
||||
|
||||
def whitelistUpdate():
|
||||
global lock
|
||||
global quit_now
|
||||
global WHITELIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_WHITELIST')
|
||||
new_whitelist = []
|
||||
if list:
|
||||
new_whitelist = genNetworkList(list)
|
||||
with lock:
|
||||
if Counter(new_whitelist) != Counter(WHITELIST):
|
||||
WHITELIST = new_whitelist
|
||||
logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def blacklistUpdate():
|
||||
global quit_now
|
||||
global BLACKLIST
|
||||
while not quit_now:
|
||||
start_time = time.time()
|
||||
list = r.hgetall('F2B_BLACKLIST')
|
||||
new_blacklist = []
|
||||
if list:
|
||||
new_blacklist = genNetworkList(list)
|
||||
if Counter(new_blacklist) != Counter(BLACKLIST):
|
||||
addban = set(new_blacklist).difference(BLACKLIST)
|
||||
delban = set(BLACKLIST).difference(new_blacklist)
|
||||
BLACKLIST = new_blacklist
|
||||
logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
|
||||
if addban:
|
||||
for net in addban:
|
||||
permBan(net=net)
|
||||
if delban:
|
||||
for net in delban:
|
||||
permBan(net=net, unban=True)
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def initChain():
|
||||
# Is called before threads start, no locking
|
||||
print("Initializing mailcow netfilter chain")
|
||||
# IPv4
|
||||
if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains:
|
||||
iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW")
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
|
||||
rule = iptc.Rule()
|
||||
rule.src = '0.0.0.0/0'
|
||||
rule.dst = '0.0.0.0/0'
|
||||
target = iptc.Target(rule, "MAILCOW")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
# IPv6
|
||||
if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), "MAILCOW") in iptc.Table6(iptc.Table6.FILTER).chains:
|
||||
iptc.Table6(iptc.Table6.FILTER).create_chain("MAILCOW")
|
||||
for c in ['FORWARD', 'INPUT']:
|
||||
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c)
|
||||
rule = iptc.Rule6()
|
||||
rule.src = '::/0'
|
||||
rule.dst = '::/0'
|
||||
target = iptc.Target(rule, "MAILCOW")
|
||||
rule.target = target
|
||||
if rule not in chain.rules:
|
||||
chain.insert_rule(rule)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# In case a previous session was killed without cleanup
|
||||
clear()
|
||||
# Reinit MAILCOW chain
|
||||
initChain()
|
||||
|
||||
watch_thread = Thread(target=watch)
|
||||
watch_thread.daemon = True
|
||||
watch_thread.start()
|
||||
|
||||
if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv4Address:
|
||||
snat4_thread = Thread(target=snat4,args=(snat_ip,))
|
||||
snat4_thread.daemon = True
|
||||
snat4_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
|
||||
|
||||
if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n':
|
||||
try:
|
||||
snat_ip = os.getenv('SNAT6_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv6Address:
|
||||
snat6_thread = Thread(target=snat6,args=(snat_ip,))
|
||||
snat6_thread.daemon = True
|
||||
snat6_thread.start()
|
||||
except ValueError:
|
||||
print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address')
|
||||
|
||||
autopurge_thread = Thread(target=autopurge)
|
||||
autopurge_thread.daemon = True
|
||||
autopurge_thread.start()
|
||||
|
||||
mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
|
||||
mailcowchainwatch_thread.daemon = True
|
||||
mailcowchainwatch_thread.start()
|
||||
|
||||
blacklistupdate_thread = Thread(target=blacklistUpdate)
|
||||
blacklistupdate_thread.daemon = True
|
||||
blacklistupdate_thread.start()
|
||||
|
||||
whitelistupdate_thread = Thread(target=whitelistUpdate)
|
||||
whitelistupdate_thread.daemon = True
|
||||
whitelistupdate_thread.start()
|
||||
|
||||
signal.signal(signal.SIGTERM, quit)
|
||||
atexit.register(clear)
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(0.5)
|
||||
|
||||
sys.exit(exit_code)
|
||||
@@ -1,6 +1,8 @@
|
||||
FROM alpine:3.17
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.20
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
WORKDIR /app
|
||||
|
||||
#RUN addgroup -S olefy && adduser -S olefy -G olefy \
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
FROM php:8.2-fpm-alpine3.17
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM php:8.2-fpm-alpine3.18
|
||||
|
||||
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced
|
||||
ARG APCU_PECL_VERSION=5.1.22
|
||||
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG APCU_PECL_VERSION=5.1.23
|
||||
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG IMAGICK_PECL_VERSION=3.7.0
|
||||
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.4
|
||||
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced
|
||||
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.6
|
||||
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MEMCACHED_PECL_VERSION=3.2.0
|
||||
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced
|
||||
ARG REDIS_PECL_VERSION=5.3.7
|
||||
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced
|
||||
ARG COMPOSER_VERSION=2.5.5
|
||||
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG REDIS_PECL_VERSION=6.0.2
|
||||
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG COMPOSER_VERSION=2.6.6
|
||||
|
||||
RUN apk add -U --no-cache autoconf \
|
||||
aspell-dev \
|
||||
@@ -110,4 +111,4 @@ COPY ./docker-entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["php-fpm"]
|
||||
CMD ["php-fpm"]
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
function array_by_comma { local IFS=","; echo "$*"; }
|
||||
|
||||
# Wait for containers
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -23,7 +23,8 @@ done
|
||||
# Check mysql_upgrade (master and slave)
|
||||
CONTAINER_ID=
|
||||
until [[ ! -z "${CONTAINER_ID}" ]] && [[ "${CONTAINER_ID}" =~ ^[[:alnum:]]*$ ]]; do
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
echo "Could not get mysql-mailcow container id... trying again"
|
||||
sleep 2
|
||||
done
|
||||
echo "MySQL @ ${CONTAINER_ID}"
|
||||
@@ -34,7 +35,7 @@ until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
|
||||
echo "Tried to upgrade MySQL and failed, giving up after ${SQL_LOOP_C} retries and starting container (oops, not good)"
|
||||
break
|
||||
fi
|
||||
SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
|
||||
SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
|
||||
SQL_UPGRADE_STATUS=$(echo ${SQL_FULL_UPGRADE_RETURN} | jq -r .type)
|
||||
SQL_LOOP_C=$((SQL_LOOP_C+1))
|
||||
echo "SQL upgrade iteration #${SQL_LOOP_C}"
|
||||
@@ -43,7 +44,7 @@ until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
|
||||
echo "MySQL applied an upgrade, debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
sleep 3
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL to return, please wait"
|
||||
sleep 2
|
||||
done
|
||||
@@ -59,12 +60,12 @@ done
|
||||
|
||||
# doing post-installation stuff, if SQL was upgraded (master and slave)
|
||||
if [ ${SQL_CHANGED} -eq 1 ]; then
|
||||
POSTFIX=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
POSTFIX=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
if [[ -z "${POSTFIX}" ]] || ! [[ "${POSTFIX}" =~ ^[[:alnum:]]*$ ]]; then
|
||||
echo "Could not determine Postfix container ID, skipping Postfix restart."
|
||||
else
|
||||
echo "Restarting Postfix"
|
||||
curl -X POST --silent --insecure https://dockerapi/containers/${POSTFIX}/restart | jq -r '.msg'
|
||||
curl -X POST --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/restart | jq -r '.msg'
|
||||
echo "Sleeping 5 seconds..."
|
||||
sleep 5
|
||||
fi
|
||||
@@ -73,7 +74,7 @@ fi
|
||||
# Check mysql tz import (master and slave)
|
||||
TZ_CHECK=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC') AS time;" -BN 2> /dev/null)
|
||||
if [[ -z ${TZ_CHECK} ]] || [[ "${TZ_CHECK}" == "NULL" ]]; then
|
||||
SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
|
||||
SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
|
||||
echo "MySQL mysql_tzinfo_to_sql - debug output:"
|
||||
echo ${SQL_FULL_TZINFO_IMPORT_RETURN}
|
||||
fi
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV LC_ALL C
|
||||
@@ -17,10 +18,10 @@ RUN groupadd -g 102 postfix \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dirmngr \
|
||||
dnsutils \
|
||||
dnsutils \
|
||||
gnupg \
|
||||
libsasl2-modules \
|
||||
mariadb-client \
|
||||
mariadb-client \
|
||||
perl \
|
||||
postfix \
|
||||
postfix-mysql \
|
||||
@@ -32,8 +33,7 @@ RUN groupadd -g 102 postfix \
|
||||
syslog-ng \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
tzdata \
|
||||
whois \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale \
|
||||
&& printf '#!/bin/bash\n/usr/sbin/postconf -c /opt/postfix/conf "$@"' > /usr/local/sbin/postconf \
|
||||
@@ -60,4 +60,4 @@ EXPOSE 588
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -5,7 +5,7 @@ trap "postfix stop" EXIT
|
||||
[[ ! -d /opt/postfix/conf/sql/ ]] && mkdir -p /opt/postfix/conf/sql/
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -393,117 +393,95 @@ query = SELECT goto FROM spamalias
|
||||
AND validity >= UNIX_TIMESTAMP()
|
||||
EOF
|
||||
|
||||
if [ -n "$SPAMHAUS_DQS_KEY" ]; then
|
||||
echo -e "\e[32mDetected SPAMHAUS_DQS_KEY variable from mailcow.conf...\e[0m"
|
||||
echo -e "\e[33mUsing DQS Blocklists from Spamhaus!\e[0m"
|
||||
if [ ! -f /opt/postfix/conf/dns_blocklists.cf ]; then
|
||||
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
|
||||
# Autogenerated by mailcow
|
||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
||||
list.dnswl.org=127.0.[0..255].0*-2
|
||||
list.dnswl.org=127.0.[0..255].1*-4
|
||||
list.dnswl.org=127.0.[0..255].2*-6
|
||||
list.dnswl.org=127.0.[0..255].3*-8
|
||||
ix.dnsbl.manitu.net*2
|
||||
bl.spamcop.net*2
|
||||
bl.suomispam.net*2
|
||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
||||
hostkarma.junkemailfilter.com=127.0.0.4*2
|
||||
hostkarma.junkemailfilter.com=127.0.1.2*1
|
||||
backscatter.spameatingmonkey.net*2
|
||||
bl.ipv6.spameatingmonkey.net*2
|
||||
bl.spameatingmonkey.net*2
|
||||
b.barracudacentral.org=127.0.0.2*7
|
||||
bl.mailspike.net=127.0.0.2*5
|
||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
||||
dnsbl.sorbs.net=127.0.0.10*8
|
||||
dnsbl.sorbs.net=127.0.0.5*6
|
||||
dnsbl.sorbs.net=127.0.0.7*3
|
||||
dnsbl.sorbs.net=127.0.0.8*2
|
||||
dnsbl.sorbs.net=127.0.0.6*2
|
||||
dnsbl.sorbs.net=127.0.0.9*2
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[4..7]*6
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.org=127.0.0.[10;11]*8
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.org=127.0.0.3*4
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.org=127.0.0.2*3
|
||||
${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net=127.0.0.3*4
|
||||
${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net=127.0.0.2*3
|
||||
# This file can be edited.
|
||||
# Delete this file and restart postfix container to revert any changes.
|
||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
||||
list.dnswl.org=127.0.[0..255].0*-2
|
||||
list.dnswl.org=127.0.[0..255].1*-4
|
||||
list.dnswl.org=127.0.[0..255].2*-6
|
||||
list.dnswl.org=127.0.[0..255].3*-8
|
||||
ix.dnsbl.manitu.net*2
|
||||
bl.spamcop.net*2
|
||||
bl.suomispam.net*2
|
||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
||||
hostkarma.junkemailfilter.com=127.0.0.4*2
|
||||
hostkarma.junkemailfilter.com=127.0.1.2*1
|
||||
backscatter.spameatingmonkey.net*2
|
||||
bl.ipv6.spameatingmonkey.net*2
|
||||
bl.spameatingmonkey.net*2
|
||||
b.barracudacentral.org=127.0.0.2*7
|
||||
bl.mailspike.net=127.0.0.2*5
|
||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
||||
EOF
|
||||
fi
|
||||
DNSBL_CONFIG=$(grep -v '^#' /opt/postfix/conf/dns_blocklists.cf | grep '\S')
|
||||
|
||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
||||
echo -e "\e[33mChecking if ASN for your IP is listed for Spamhaus Bad ASN List...\e[0m"
|
||||
if [ -n "$SPAMHAUS_DQS_KEY" ]; then
|
||||
echo -e "\e[32mDetected SPAMHAUS_DQS_KEY variable from mailcow.conf...\e[0m"
|
||||
echo -e "\e[33mUsing DQS Blocklists from Spamhaus!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[4..7]*6
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[10;11]*8
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.3*4
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.2*3
|
||||
postscreen_dnsbl_reply_map = texthash:/opt/postfix/conf/dnsbl_reply.map
|
||||
EOF
|
||||
|
||||
else
|
||||
if curl -s http://fuzzy.mailcow.email/asn_list.txt | grep $(whois -h whois.radb.net $(curl -s http://ipv4.mailcow.email) | grep -i origin | tr -s " " | cut -d " " -f2 | head -1) > /dev/null; then
|
||||
echo -e "\e[31mThe AS of your IP is listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mNo SPAMHAUS_DQS_KEY found... Skipping Spamhaus blocklists entirely!\e[0m"
|
||||
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
|
||||
# Autogenerated by mailcow
|
||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
||||
list.dnswl.org=127.0.[0..255].0*-2
|
||||
list.dnswl.org=127.0.[0..255].1*-4
|
||||
list.dnswl.org=127.0.[0..255].2*-6
|
||||
list.dnswl.org=127.0.[0..255].3*-8
|
||||
ix.dnsbl.manitu.net*2
|
||||
bl.spamcop.net*2
|
||||
bl.suomispam.net*2
|
||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
||||
hostkarma.junkemailfilter.com=127.0.0.4*2
|
||||
hostkarma.junkemailfilter.com=127.0.1.2*1
|
||||
backscatter.spameatingmonkey.net*2
|
||||
bl.ipv6.spameatingmonkey.net*2
|
||||
bl.spameatingmonkey.net*2
|
||||
b.barracudacentral.org=127.0.0.2*7
|
||||
bl.mailspike.net=127.0.0.2*5
|
||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
||||
dnsbl.sorbs.net=127.0.0.10*8
|
||||
dnsbl.sorbs.net=127.0.0.5*6
|
||||
dnsbl.sorbs.net=127.0.0.7*3
|
||||
dnsbl.sorbs.net=127.0.0.8*2
|
||||
dnsbl.sorbs.net=127.0.0.6*2
|
||||
dnsbl.sorbs.net=127.0.0.9*2
|
||||
cat <<EOF > /opt/postfix/conf/dnsbl_reply.map
|
||||
# Autogenerated by mailcow, using Spamhaus DQS reply domains
|
||||
${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net sbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.xbl.dq.spamhaus.net xbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.pbl.dq.spamhaus.net pbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net zen.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net dbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net zrd.spamhaus.org
|
||||
EOF
|
||||
)
|
||||
else
|
||||
echo -e "\e[32mThe AS of your IP is NOT listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mUsing the open Spamhaus blocklists.\e[0m"
|
||||
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
|
||||
# Autogenerated by mailcow
|
||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
||||
list.dnswl.org=127.0.[0..255].0*-2
|
||||
list.dnswl.org=127.0.[0..255].1*-4
|
||||
list.dnswl.org=127.0.[0..255].2*-6
|
||||
list.dnswl.org=127.0.[0..255].3*-8
|
||||
ix.dnsbl.manitu.net*2
|
||||
bl.spamcop.net*2
|
||||
bl.suomispam.net*2
|
||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
||||
hostkarma.junkemailfilter.com=127.0.0.4*2
|
||||
hostkarma.junkemailfilter.com=127.0.1.2*1
|
||||
backscatter.spameatingmonkey.net*2
|
||||
bl.ipv6.spameatingmonkey.net*2
|
||||
bl.spameatingmonkey.net*2
|
||||
b.barracudacentral.org=127.0.0.2*7
|
||||
bl.mailspike.net=127.0.0.2*5
|
||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
||||
dnsbl.sorbs.net=127.0.0.10*8
|
||||
dnsbl.sorbs.net=127.0.0.5*6
|
||||
dnsbl.sorbs.net=127.0.0.7*3
|
||||
dnsbl.sorbs.net=127.0.0.8*2
|
||||
dnsbl.sorbs.net=127.0.0.6*2
|
||||
dnsbl.sorbs.net=127.0.0.9*2
|
||||
zen.spamhaus.org=127.0.0.[10;11]*8
|
||||
zen.spamhaus.org=127.0.0.[4..7]*6
|
||||
zen.spamhaus.org=127.0.0.3*4
|
||||
zen.spamhaus.org=127.0.0.2*3
|
||||
if [ -f "/opt/postfix/conf/dnsbl_reply.map" ]; then
|
||||
rm /opt/postfix/conf/dnsbl_reply.map
|
||||
fi
|
||||
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
|
||||
if [ "$response" -eq 503 ]; then
|
||||
echo -e "\e[31mThe AS of your IP is listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mNo SPAMHAUS_DQS_KEY found... Skipping Spamhaus blocklists entirely!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=""
|
||||
elif [ "$response" -eq 200 ]; then
|
||||
echo -e "\e[32mThe AS of your IP is NOT listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mUsing the open Spamhaus blocklists.\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
||||
zen.spamhaus.org=127.0.0.[10;11]*8
|
||||
zen.spamhaus.org=127.0.0.[4..7]*6
|
||||
zen.spamhaus.org=127.0.0.3*4
|
||||
zen.spamhaus.org=127.0.0.2*3
|
||||
EOF
|
||||
)
|
||||
|
||||
else
|
||||
echo -e "\e[31mWe couldn't determine your AS... (maybe DNS/Network issue?) Response Code: $response\e[0m"
|
||||
echo -e "\e[33mDeactivating Spamhaus DNS Blocklists to be on the safe site!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=""
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
sed -i '/User overrides/q' /opt/postfix/conf/main.cf
|
||||
# Reset main.cf
|
||||
sed -i '/Overrides/q' /opt/postfix/conf/main.cf
|
||||
echo >> /opt/postfix/conf/main.cf
|
||||
# Append postscreen dnsbl sites to main.cf
|
||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
||||
echo -e "${DNSBL_CONFIG}\n${SPAMHAUS_DNSBL_CONFIG}" >> /opt/postfix/conf/main.cf
|
||||
fi
|
||||
# Append user overrides
|
||||
echo -e "\n# User Overrides" >> /opt/postfix/conf/main.cf
|
||||
touch /opt/postfix/conf/extra.cf
|
||||
sed -i '/myhostname/d' /opt/postfix/conf/extra.cf
|
||||
sed -i '/\$myhostname/! { /myhostname/d }' /opt/postfix/conf/extra.cf
|
||||
echo -e "myhostname = ${MAILCOW_HOSTNAME}\n$(cat /opt/postfix/conf/extra.cf)" > /opt/postfix/conf/extra.cf
|
||||
|
||||
cat /opt/postfix/conf/extra.cf >> /opt/postfix/conf/main.cf
|
||||
|
||||
if [ ! -f /opt/postfix/conf/custom_transport.pcre ]; then
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG CODENAME=bullseye
|
||||
ENV LC_ALL C
|
||||
ARG RSPAMD_VER=rspamd_3.9.1-1~82f43560f
|
||||
ARG CODENAME=bookworm
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
tzdata \
|
||||
@@ -11,12 +12,16 @@ RUN apt-get update && apt-get install -y \
|
||||
gnupg2 \
|
||||
apt-transport-https \
|
||||
dnsutils \
|
||||
netcat \
|
||||
&& apt-key adv --fetch-keys https://rspamd.com/apt-stable/gpg.key \
|
||||
&& echo "deb [arch=amd64] https://rspamd.com/apt-stable/ $CODENAME main" > /etc/apt/sources.list.d/rspamd.list \
|
||||
&& apt-get update \
|
||||
&& apt-get --no-install-recommends -y install rspamd redis-tools procps nano \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
netcat-traditional \
|
||||
wget \
|
||||
redis-tools \
|
||||
procps \
|
||||
nano \
|
||||
lua-cjson \
|
||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||
&& wget -P /tmp https://rspamd.com/apt-stable/pool/main/r/rspamd/${RSPAMD_VER}~${CODENAME}_${arch}.deb\
|
||||
&& apt install -y /tmp/${RSPAMD_VER}~${CODENAME}_${arch}.deb \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/*\
|
||||
&& apt-get autoremove --purge \
|
||||
&& apt-get clean \
|
||||
&& mkdir -p /run/rspamd \
|
||||
@@ -25,7 +30,6 @@ RUN apt-get update && apt-get install -y \
|
||||
&& sed -i 's/#analysis_keyword_table > 0/analysis_cat_table.macro_exist == "M"/g' /usr/share/rspamd/lualib/lua_scanners/oletools.lua
|
||||
|
||||
COPY settings.conf /etc/rspamd/settings.conf
|
||||
COPY metadata_exporter.lua /usr/share/rspamd/plugins/metadata_exporter.lua
|
||||
COPY set_worker_password.sh /set_worker_password.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
|
||||
@@ -79,6 +79,9 @@ EOF
|
||||
redis-cli -h redis-mailcow SLAVEOF NO ONE
|
||||
fi
|
||||
|
||||
# Provide additional lua modules
|
||||
ln -s /usr/lib/$(uname -m)-linux-gnu/liblua5.1-cjson.so.0.0.0 /usr/lib/rspamd/cjson.so
|
||||
|
||||
chown -R _rspamd:_rspamd /var/lib/rspamd \
|
||||
/etc/rspamd/local.d \
|
||||
/etc/rspamd/override.d \
|
||||
@@ -121,4 +124,190 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
# If DQS KEY is set in mailcow.conf add Spamhaus DQS RBLs
|
||||
if [[ ! -z ${SPAMHAUS_DQS_KEY} ]]; then
|
||||
cat <<EOF > /etc/rspamd/custom/dqs-rbl.conf
|
||||
# Autogenerated by mailcow. DO NOT TOUCH!
|
||||
spamhaus {
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
from = false;
|
||||
}
|
||||
spamhaus_from {
|
||||
from = true;
|
||||
received = false;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
returncodes {
|
||||
SPAMHAUS_ZEN = [ "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.0.9", "127.0.0.10", "127.0.0.11" ];
|
||||
}
|
||||
}
|
||||
spamhaus_authbl_received {
|
||||
# Check if the sender client is listed in AuthBL (AuthBL is *not* part of ZEN)
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.authbl.dq.spamhaus.net";
|
||||
from = false;
|
||||
received = true;
|
||||
ipv6 = true;
|
||||
returncodes {
|
||||
SH_AUTHBL_RECEIVED = "127.0.0.20"
|
||||
}
|
||||
}
|
||||
spamhaus_dbl {
|
||||
# Add checks on the HELO string
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
helo = true;
|
||||
rdns = true;
|
||||
dkim = true;
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
RBL_DBL_SPAM = "127.0.1.2";
|
||||
RBL_DBL_PHISH = "127.0.1.4";
|
||||
RBL_DBL_MALWARE = "127.0.1.5";
|
||||
RBL_DBL_BOTNET = "127.0.1.6";
|
||||
RBL_DBL_ABUSED_SPAM = "127.0.1.102";
|
||||
RBL_DBL_ABUSED_PHISH = "127.0.1.104";
|
||||
RBL_DBL_ABUSED_MALWARE = "127.0.1.105";
|
||||
RBL_DBL_ABUSED_BOTNET = "127.0.1.106";
|
||||
RBL_DBL_DONT_QUERY_IPS = "127.0.1.255";
|
||||
}
|
||||
}
|
||||
spamhaus_dbl_fullurls {
|
||||
ignore_defaults = true;
|
||||
no_ip = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
selector = 'urls:get_host'
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
DBLABUSED_SPAM_FULLURLS = "127.0.1.102";
|
||||
DBLABUSED_PHISH_FULLURLS = "127.0.1.104";
|
||||
DBLABUSED_MALWARE_FULLURLS = "127.0.1.105";
|
||||
DBLABUSED_BOTNET_FULLURLS = "127.0.1.106";
|
||||
}
|
||||
}
|
||||
spamhaus_zrd {
|
||||
# Add checks on the HELO string also for DQS
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
helo = true;
|
||||
rdns = true;
|
||||
dkim = true;
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
RBL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
RBL_ZRD_FRESH_DOMAIN = [
|
||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
||||
];
|
||||
RBL_ZRD_DONT_QUERY_IPS = "127.0.2.255";
|
||||
}
|
||||
}
|
||||
"SPAMHAUS_ZEN_URIBL" {
|
||||
enabled = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
resolve_ip = true;
|
||||
checks = ['urls'];
|
||||
replyto = true;
|
||||
emails = true;
|
||||
ipv4 = true;
|
||||
ipv6 = true;
|
||||
emails_domainonly = true;
|
||||
returncodes {
|
||||
URIBL_SBL = "127.0.0.2";
|
||||
URIBL_SBL_CSS = "127.0.0.3";
|
||||
URIBL_XBL = ["127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7"];
|
||||
URIBL_PBL = ["127.0.0.10", "127.0.0.11"];
|
||||
URIBL_DROP = "127.0.0.9";
|
||||
}
|
||||
}
|
||||
SH_EMAIL_DBL {
|
||||
ignore_defaults = true;
|
||||
replyto = true;
|
||||
emails_domainonly = true;
|
||||
disable_monitoring = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
returncodes = {
|
||||
SH_EMAIL_DBL = [
|
||||
"127.0.1.2",
|
||||
"127.0.1.4",
|
||||
"127.0.1.5",
|
||||
"127.0.1.6"
|
||||
];
|
||||
SH_EMAIL_DBL_ABUSED = [
|
||||
"127.0.1.102",
|
||||
"127.0.1.104",
|
||||
"127.0.1.105",
|
||||
"127.0.1.106"
|
||||
];
|
||||
SH_EMAIL_DBL_DONT_QUERY_IPS = [ "127.0.1.255" ];
|
||||
}
|
||||
}
|
||||
SH_EMAIL_ZRD {
|
||||
ignore_defaults = true;
|
||||
replyto = true;
|
||||
emails_domainonly = true;
|
||||
disable_monitoring = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
returncodes = {
|
||||
SH_EMAIL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
SH_EMAIL_ZRD_FRESH_DOMAIN = [
|
||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
||||
];
|
||||
SH_EMAIL_ZRD_DONT_QUERY_IPS = [ "127.0.2.255" ];
|
||||
}
|
||||
}
|
||||
"DBL" {
|
||||
# override the defaults for DBL defined in modules.d/rbl.conf
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
disable_monitoring = true;
|
||||
}
|
||||
"ZRD" {
|
||||
ignore_defaults = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
no_ip = true;
|
||||
dkim = true;
|
||||
emails = true;
|
||||
emails_domainonly = true;
|
||||
urls = true;
|
||||
returncodes = {
|
||||
ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
ZRD_FRESH_DOMAIN = ["127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"];
|
||||
}
|
||||
}
|
||||
spamhaus_sbl_url {
|
||||
ignore_defaults = true
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net";
|
||||
checks = ['urls'];
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
SPAMHAUS_SBL_URL = "127.0.0.2";
|
||||
}
|
||||
}
|
||||
|
||||
SH_HBL_EMAIL {
|
||||
ignore_defaults = true;
|
||||
rbl = "_email.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net";
|
||||
emails_domainonly = false;
|
||||
selector = "from('smtp').lower;from('mime').lower";
|
||||
ignore_whitelist = true;
|
||||
checks = ['emails', 'replyto'];
|
||||
hash = "sha1";
|
||||
returncodes = {
|
||||
SH_HBL_EMAIL = [
|
||||
"127.0.3.2"
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
spamhaus_dqs_hbl {
|
||||
symbol = "HBL_FILE_UNKNOWN";
|
||||
rbl = "_file.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net.";
|
||||
selector = "attachments('rbase32', 'sha256')";
|
||||
ignore_whitelist = true;
|
||||
ignore_defaults = true;
|
||||
returncodes {
|
||||
SH_HBL_FILE_MALICIOUS = "127.0.3.10";
|
||||
SH_HBL_FILE_SUSPICIOUS = "127.0.3.15";
|
||||
}
|
||||
}
|
||||
EOF
|
||||
else
|
||||
rm -rf /etc/rspamd/custom/dqs-rbl.conf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
||||
@@ -1,632 +0,0 @@
|
||||
--[[
|
||||
Copyright (c) 2016, Andrew Lewis <nerf@judo.za.org>
|
||||
Copyright (c) 2016, Vsevolod Stakhov <vsevolod@highsecure.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
]]--
|
||||
|
||||
if confighelp then
|
||||
return
|
||||
end
|
||||
|
||||
-- A plugin that pushes metadata (or whole messages) to external services
|
||||
|
||||
local redis_params
|
||||
local lua_util = require "lua_util"
|
||||
local rspamd_http = require "rspamd_http"
|
||||
local rspamd_util = require "rspamd_util"
|
||||
local rspamd_logger = require "rspamd_logger"
|
||||
local ucl = require "ucl"
|
||||
local E = {}
|
||||
local N = 'metadata_exporter'
|
||||
|
||||
local settings = {
|
||||
pusher_enabled = {},
|
||||
pusher_format = {},
|
||||
pusher_select = {},
|
||||
mime_type = 'text/plain',
|
||||
defer = false,
|
||||
mail_from = '',
|
||||
mail_to = 'postmaster@localhost',
|
||||
helo = 'rspamd',
|
||||
email_template = [[From: "Rspamd" <$mail_from>
|
||||
To: $mail_to
|
||||
Subject: Spam alert
|
||||
Date: $date
|
||||
MIME-Version: 1.0
|
||||
Message-ID: <$our_message_id>
|
||||
Content-type: text/plain; charset=utf-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Authenticated username: $user
|
||||
IP: $ip
|
||||
Queue ID: $qid
|
||||
SMTP FROM: $from
|
||||
SMTP RCPT: $rcpt
|
||||
MIME From: $header_from
|
||||
MIME To: $header_to
|
||||
MIME Date: $header_date
|
||||
Subject: $header_subject
|
||||
Message-ID: $message_id
|
||||
Action: $action
|
||||
Score: $score
|
||||
Symbols: $symbols]],
|
||||
}
|
||||
|
||||
local function get_general_metadata(task, flatten, no_content)
|
||||
local r = {}
|
||||
local ip = task:get_from_ip()
|
||||
if ip and ip:is_valid() then
|
||||
r.ip = tostring(ip)
|
||||
else
|
||||
r.ip = 'unknown'
|
||||
end
|
||||
r.user = task:get_user() or 'unknown'
|
||||
r.qid = task:get_queue_id() or 'unknown'
|
||||
r.subject = task:get_subject() or 'unknown'
|
||||
r.action = task:get_metric_action('default')
|
||||
|
||||
local s = task:get_metric_score('default')[1]
|
||||
r.score = flatten and string.format('%.2f', s) or s
|
||||
|
||||
local fuzzy = task:get_mempool():get_variable("fuzzy_hashes", "fstrings")
|
||||
if fuzzy and #fuzzy > 0 then
|
||||
local fz = {}
|
||||
for _,h in ipairs(fuzzy) do
|
||||
table.insert(fz, h)
|
||||
end
|
||||
if not flatten then
|
||||
r.fuzzy = fz
|
||||
else
|
||||
r.fuzzy = table.concat(fz, ', ')
|
||||
end
|
||||
else
|
||||
r.fuzzy = 'unknown'
|
||||
end
|
||||
|
||||
local rcpt = task:get_recipients('smtp')
|
||||
if rcpt then
|
||||
local l = {}
|
||||
for _, a in ipairs(rcpt) do
|
||||
table.insert(l, a['addr'])
|
||||
end
|
||||
if not flatten then
|
||||
r.rcpt = l
|
||||
else
|
||||
r.rcpt = table.concat(l, ', ')
|
||||
end
|
||||
else
|
||||
r.rcpt = 'unknown'
|
||||
end
|
||||
local from = task:get_from('smtp')
|
||||
if ((from or E)[1] or E).addr then
|
||||
r.from = from[1].addr
|
||||
else
|
||||
r.from = 'unknown'
|
||||
end
|
||||
local syminf = task:get_symbols_all()
|
||||
if flatten then
|
||||
local l = {}
|
||||
for _, sym in ipairs(syminf) do
|
||||
local txt
|
||||
if sym.options then
|
||||
local topt = table.concat(sym.options, ', ')
|
||||
txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')' .. ' [' .. topt .. ']'
|
||||
else
|
||||
txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')'
|
||||
end
|
||||
table.insert(l, txt)
|
||||
end
|
||||
r.symbols = table.concat(l, '\n\t')
|
||||
else
|
||||
r.symbols = syminf
|
||||
end
|
||||
local function process_header(name)
|
||||
local hdr = task:get_header_full(name)
|
||||
if hdr then
|
||||
local l = {}
|
||||
for _, h in ipairs(hdr) do
|
||||
table.insert(l, h.decoded)
|
||||
end
|
||||
if not flatten then
|
||||
return l
|
||||
else
|
||||
return table.concat(l, '\n')
|
||||
end
|
||||
else
|
||||
return 'unknown'
|
||||
end
|
||||
end
|
||||
if not no_content then
|
||||
r.header_from = process_header('from')
|
||||
r.header_to = process_header('to')
|
||||
r.header_subject = process_header('subject')
|
||||
r.header_date = process_header('date')
|
||||
r.message_id = task:get_message_id()
|
||||
end
|
||||
return r
|
||||
end
|
||||
|
||||
local formatters = {
|
||||
default = function(task)
|
||||
return task:get_content(), {}
|
||||
end,
|
||||
email_alert = function(task, rule, extra)
|
||||
local meta = get_general_metadata(task, true)
|
||||
local display_emails = {}
|
||||
local mail_targets = {}
|
||||
meta.mail_from = rule.mail_from or settings.mail_from
|
||||
local mail_rcpt = rule.mail_to or settings.mail_to
|
||||
if type(mail_rcpt) ~= 'table' then
|
||||
table.insert(display_emails, string.format('<%s>', mail_rcpt))
|
||||
table.insert(mail_targets, mail_rcpt)
|
||||
else
|
||||
for _, e in ipairs(mail_rcpt) do
|
||||
table.insert(display_emails, string.format('<%s>', e))
|
||||
table.insert(mail_targets, mail_rcpt)
|
||||
end
|
||||
end
|
||||
if rule.email_alert_sender then
|
||||
local x = task:get_from('smtp')
|
||||
if x and string.len(x[1].addr) > 0 then
|
||||
table.insert(mail_targets, x)
|
||||
table.insert(display_emails, string.format('<%s>', x[1].addr))
|
||||
end
|
||||
end
|
||||
if rule.email_alert_user then
|
||||
local x = task:get_user()
|
||||
if x then
|
||||
table.insert(mail_targets, x)
|
||||
table.insert(display_emails, string.format('<%s>', x))
|
||||
end
|
||||
end
|
||||
if rule.email_alert_recipients then
|
||||
local x = task:get_recipients('smtp')
|
||||
if x then
|
||||
for _, e in ipairs(x) do
|
||||
if string.len(e.addr) > 0 then
|
||||
table.insert(mail_targets, e.addr)
|
||||
table.insert(display_emails, string.format('<%s>', e.addr))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
meta.mail_to = table.concat(display_emails, ', ')
|
||||
meta.our_message_id = rspamd_util.random_hex(12) .. '@rspamd'
|
||||
meta.date = rspamd_util.time_to_string(rspamd_util.get_time())
|
||||
return lua_util.template(rule.email_template or settings.email_template, meta), { mail_targets = mail_targets}
|
||||
end,
|
||||
json = function(task)
|
||||
return ucl.to_format(get_general_metadata(task), 'json-compact')
|
||||
end
|
||||
}
|
||||
|
||||
local function is_spam(action)
|
||||
return (action == 'reject' or action == 'add header' or action == 'rewrite subject')
|
||||
end
|
||||
|
||||
local selectors = {
|
||||
default = function(task)
|
||||
return true
|
||||
end,
|
||||
is_spam = function(task)
|
||||
local action = task:get_metric_action('default')
|
||||
return is_spam(action)
|
||||
end,
|
||||
is_spam_authed = function(task)
|
||||
if not task:get_user() then
|
||||
return false
|
||||
end
|
||||
local action = task:get_metric_action('default')
|
||||
return is_spam(action)
|
||||
end,
|
||||
is_reject = function(task)
|
||||
local action = task:get_metric_action('default')
|
||||
return (action == 'reject')
|
||||
end,
|
||||
is_reject_authed = function(task)
|
||||
if not task:get_user() then
|
||||
return false
|
||||
end
|
||||
local action = task:get_metric_action('default')
|
||||
return (action == 'reject')
|
||||
end,
|
||||
}
|
||||
|
||||
local function maybe_defer(task, rule)
|
||||
if rule.defer then
|
||||
rspamd_logger.warnx(task, 'deferring message')
|
||||
task:set_pre_result('soft reject', 'deferred', N)
|
||||
end
|
||||
end
|
||||
|
||||
local pushers = {
|
||||
redis_pubsub = function(task, formatted, rule)
|
||||
local _,ret,upstream
|
||||
local function redis_pub_cb(err)
|
||||
if err then
|
||||
rspamd_logger.errx(task, 'got error %s when publishing on server %s',
|
||||
err, upstream:get_addr())
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
return true
|
||||
end
|
||||
ret,_,upstream = rspamd_redis_make_request(task,
|
||||
redis_params, -- connect params
|
||||
nil, -- hash key
|
||||
true, -- is write
|
||||
redis_pub_cb, --callback
|
||||
'PUBLISH', -- command
|
||||
{rule.channel, formatted} -- arguments
|
||||
)
|
||||
if not ret then
|
||||
rspamd_logger.errx(task, 'error connecting to redis')
|
||||
maybe_defer(task, rule)
|
||||
end
|
||||
end,
|
||||
http = function(task, formatted, rule)
|
||||
local function http_callback(err, code)
|
||||
if err then
|
||||
rspamd_logger.errx(task, 'got error %s in http callback', err)
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
if code ~= 200 then
|
||||
rspamd_logger.errx(task, 'got unexpected http status: %s', code)
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
return true
|
||||
end
|
||||
local hdrs = {}
|
||||
if rule.meta_headers then
|
||||
local gm = get_general_metadata(task, false, true)
|
||||
local pfx = rule.meta_header_prefix or 'X-Rspamd-'
|
||||
for k, v in pairs(gm) do
|
||||
if type(v) == 'table' then
|
||||
hdrs[pfx .. k] = ucl.to_format(v, 'json-compact')
|
||||
else
|
||||
hdrs[pfx .. k] = v
|
||||
end
|
||||
end
|
||||
end
|
||||
rspamd_http.request({
|
||||
task=task,
|
||||
url=rule.url,
|
||||
body=formatted,
|
||||
callback=http_callback,
|
||||
mime_type=rule.mime_type or settings.mime_type,
|
||||
headers=hdrs,
|
||||
})
|
||||
end,
|
||||
send_mail = function(task, formatted, rule, extra)
|
||||
local lua_smtp = require "lua_smtp"
|
||||
local function sendmail_cb(ret, err)
|
||||
if not ret then
|
||||
rspamd_logger.errx(task, 'SMTP export error: %s', err)
|
||||
maybe_defer(task, rule)
|
||||
end
|
||||
end
|
||||
|
||||
lua_smtp.sendmail({
|
||||
task = task,
|
||||
host = rule.smtp,
|
||||
port = rule.smtp_port or settings.smtp_port or 25,
|
||||
from = rule.mail_from or settings.mail_from,
|
||||
recipients = extra.mail_targets or rule.mail_to or settings.mail_to,
|
||||
helo = rule.helo or settings.helo,
|
||||
timeout = rule.timeout or settings.timeout,
|
||||
}, formatted, sendmail_cb)
|
||||
end,
|
||||
}
|
||||
|
||||
local opts = rspamd_config:get_all_opt(N)
|
||||
if not opts then return end
|
||||
local process_settings = {
|
||||
select = function(val)
|
||||
selectors.custom = assert(load(val))()
|
||||
end,
|
||||
format = function(val)
|
||||
formatters.custom = assert(load(val))()
|
||||
end,
|
||||
push = function(val)
|
||||
pushers.custom = assert(load(val))()
|
||||
end,
|
||||
custom_push = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
pushers[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
custom_select = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
selectors[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
custom_format = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
formatters[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
pusher_enabled = function(val)
|
||||
if type(val) == 'string' then
|
||||
if pushers[val] then
|
||||
settings.pusher_enabled[val] = true
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
|
||||
end
|
||||
elseif type(val) == 'table' then
|
||||
for _, v in ipairs(val) do
|
||||
if pushers[v] then
|
||||
settings.pusher_enabled[v] = true
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
|
||||
end
|
||||
end
|
||||
end
|
||||
end,
|
||||
}
|
||||
for k, v in pairs(opts) do
|
||||
local f = process_settings[k]
|
||||
if f then
|
||||
f(opts[k])
|
||||
else
|
||||
settings[k] = v
|
||||
end
|
||||
end
|
||||
if type(settings.rules) ~= 'table' then
|
||||
-- Legacy config
|
||||
settings.rules = {}
|
||||
if not next(settings.pusher_enabled) then
|
||||
if pushers.custom then
|
||||
rspamd_logger.infox(rspamd_config, 'Custom pusher implicitly enabled')
|
||||
settings.pusher_enabled.custom = true
|
||||
else
|
||||
-- Check legacy options
|
||||
if settings.url then
|
||||
rspamd_logger.warnx(rspamd_config, 'HTTP pusher implicitly enabled')
|
||||
settings.pusher_enabled.http = true
|
||||
end
|
||||
if settings.channel then
|
||||
rspamd_logger.warnx(rspamd_config, 'Redis Pubsub pusher implicitly enabled')
|
||||
settings.pusher_enabled.redis_pubsub = true
|
||||
end
|
||||
if settings.smtp and settings.mail_to then
|
||||
rspamd_logger.warnx(rspamd_config, 'SMTP pusher implicitly enabled')
|
||||
settings.pusher_enabled.send_mail = true
|
||||
end
|
||||
end
|
||||
end
|
||||
if not next(settings.pusher_enabled) then
|
||||
rspamd_logger.errx(rspamd_config, 'No push backend enabled')
|
||||
return
|
||||
end
|
||||
if settings.formatter then
|
||||
settings.format = formatters[settings.formatter]
|
||||
if not settings.format then
|
||||
rspamd_logger.errx(rspamd_config, 'No such formatter: %s', settings.formatter)
|
||||
return
|
||||
end
|
||||
end
|
||||
if settings.selector then
|
||||
settings.select = selectors[settings.selector]
|
||||
if not settings.select then
|
||||
rspamd_logger.errx(rspamd_config, 'No such selector: %s', settings.selector)
|
||||
return
|
||||
end
|
||||
end
|
||||
for k in pairs(settings.pusher_enabled) do
|
||||
local formatter = settings.pusher_format[k]
|
||||
local selector = settings.pusher_select[k]
|
||||
if not formatter then
|
||||
settings.pusher_format[k] = settings.formatter or 'default'
|
||||
rspamd_logger.infox(rspamd_config, 'Using default formatter for %s pusher', k)
|
||||
else
|
||||
if not formatters[formatter] then
|
||||
rspamd_logger.errx(rspamd_config, 'No such formatter: %s - disabling %s', formatter, k)
|
||||
settings.pusher_enabled.k = nil
|
||||
end
|
||||
end
|
||||
if not selector then
|
||||
settings.pusher_select[k] = settings.selector or 'default'
|
||||
rspamd_logger.infox(rspamd_config, 'Using default selector for %s pusher', k)
|
||||
else
|
||||
if not selectors[selector] then
|
||||
rspamd_logger.errx(rspamd_config, 'No such selector: %s - disabling %s', selector, k)
|
||||
settings.pusher_enabled.k = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.redis_pubsub then
|
||||
redis_params = rspamd_parse_redis_server(N)
|
||||
if not redis_params then
|
||||
rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
|
||||
settings.pusher_enabled.redis_pubsub = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'redis_pubsub'
|
||||
r.channel = settings.channel
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.redis_pubsub
|
||||
r.formatter = settings.pusher_format.redis_pubsub
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.http then
|
||||
if not settings.url then
|
||||
rspamd_logger.errx(rspamd_config, 'No URL is specified')
|
||||
settings.pusher_enabled.http = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'http'
|
||||
r.url = settings.url
|
||||
r.mime_type = settings.mime_type
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.http
|
||||
r.formatter = settings.pusher_format.http
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.send_mail then
|
||||
if not (settings.mail_to and settings.smtp) then
|
||||
rspamd_logger.errx(rspamd_config, 'No mail_to and/or smtp setting is specified')
|
||||
settings.pusher_enabled.send_mail = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'send_mail'
|
||||
r.mail_to = settings.mail_to
|
||||
r.mail_from = settings.mail_from
|
||||
r.helo = settings.hello
|
||||
r.smtp = settings.smtp
|
||||
r.smtp_port = settings.smtp_port
|
||||
r.email_template = settings.email_template
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.send_mail
|
||||
r.formatter = settings.pusher_format.send_mail
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if not next(settings.pusher_enabled) then
|
||||
rspamd_logger.errx(rspamd_config, 'No push backend enabled')
|
||||
return
|
||||
end
|
||||
elseif not next(settings.rules) then
|
||||
lua_util.debugm(N, rspamd_config, 'No rules enabled')
|
||||
return
|
||||
end
|
||||
if not settings.rules or not next(settings.rules) then
|
||||
rspamd_logger.errx(rspamd_config, 'No rules enabled')
|
||||
return
|
||||
end
|
||||
local backend_required_elements = {
|
||||
http = {
|
||||
'url',
|
||||
},
|
||||
smtp = {
|
||||
'mail_to',
|
||||
'smtp',
|
||||
},
|
||||
redis_pubsub = {
|
||||
'channel',
|
||||
},
|
||||
}
|
||||
local check_element = {
|
||||
selector = function(k, v)
|
||||
if not selectors[v] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid selector %s', k, v)
|
||||
return false
|
||||
else
|
||||
return true
|
||||
end
|
||||
end,
|
||||
formatter = function(k, v)
|
||||
if not formatters[v] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid formatter %s', k, v)
|
||||
return false
|
||||
else
|
||||
return true
|
||||
end
|
||||
end,
|
||||
}
|
||||
local backend_check = {
|
||||
default = function(k, rule)
|
||||
local reqset = backend_required_elements[rule.backend]
|
||||
if reqset then
|
||||
for _, e in ipairs(reqset) do
|
||||
if not rule[e] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s misses required setting %s', k, e)
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
for sett, v in pairs(rule) do
|
||||
local f = check_element[sett]
|
||||
if f then
|
||||
if not f(sett, v) then
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end,
|
||||
}
|
||||
backend_check.redis_pubsub = function(k, rule)
|
||||
if not redis_params then
|
||||
redis_params = rspamd_parse_redis_server(N)
|
||||
end
|
||||
if not redis_params then
|
||||
rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
|
||||
settings.rules[k] = nil
|
||||
else
|
||||
backend_check.default(k, rule)
|
||||
end
|
||||
end
|
||||
setmetatable(backend_check, {
|
||||
__index = function()
|
||||
return backend_check.default
|
||||
end,
|
||||
})
|
||||
for k, v in pairs(settings.rules) do
|
||||
if type(v) == 'table' then
|
||||
local backend = v.backend
|
||||
if not backend then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has no backend', k)
|
||||
settings.rules[k] = nil
|
||||
elseif not pushers[backend] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid backend %s', k, backend)
|
||||
settings.rules[k] = nil
|
||||
else
|
||||
local f = backend_check[backend]
|
||||
f(k, v)
|
||||
end
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has bad type: %s', k, type(v))
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
|
||||
local function gen_exporter(rule)
|
||||
return function (task)
|
||||
if task:has_flag('skip') then return end
|
||||
local selector = rule.selector or 'default'
|
||||
local selected = selectors[selector](task)
|
||||
if selected then
|
||||
lua_util.debugm(N, task, 'Message selected for processing')
|
||||
local formatter = rule.formatter or 'default'
|
||||
local formatted, extra = formatters[formatter](task, rule)
|
||||
if formatted then
|
||||
pushers[rule.backend](task, formatted, rule, extra)
|
||||
else
|
||||
lua_util.debugm(N, task, 'Formatter [%s] returned non-truthy value [%s]', formatter, formatted)
|
||||
end
|
||||
else
|
||||
lua_util.debugm(N, task, 'Selector [%s] returned non-truthy value [%s]', selector, selected)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if not next(settings.rules) then
|
||||
rspamd_logger.errx(rspamd_config, 'No rules enabled')
|
||||
lua_util.disable_module(N, "config")
|
||||
end
|
||||
for k, r in pairs(settings.rules) do
|
||||
rspamd_config:register_symbol({
|
||||
name = 'EXPORT_METADATA_' .. k,
|
||||
type = 'idempotent',
|
||||
callback = gen_exporter(r),
|
||||
priority = 10,
|
||||
flags = 'empty,explicit_disable,ignore_passthrough',
|
||||
})
|
||||
end
|
||||
@@ -1,11 +1,13 @@
|
||||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG SOGO_DEBIAN_REPOSITORY=http://packages.sogo.nu/nightly/5/debian/
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced
|
||||
ARG GOSU_VERSION=1.16
|
||||
ENV LC_ALL C
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG SOGO_DEBIAN_REPOSITORY=http://www.axis.cz/linux/debian
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.17
|
||||
ENV LC_ALL=C
|
||||
|
||||
# Prerequisites
|
||||
RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
@@ -21,7 +23,7 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
dirmngr \
|
||||
netcat \
|
||||
netcat-traditional \
|
||||
psmisc \
|
||||
wget \
|
||||
patch \
|
||||
@@ -32,7 +34,7 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
&& mkdir /usr/share/doc/sogo \
|
||||
&& touch /usr/share/doc/sogo/empty.sh \
|
||||
&& apt-key adv --keyserver keys.openpgp.org --recv-key 74FFC6D72B925A34B5D356BDF8A27B36A6E2EAE9 \
|
||||
&& echo "deb ${SOGO_DEBIAN_REPOSITORY} bullseye bullseye" > /etc/apt/sources.list.d/sogo.list \
|
||||
&& echo "deb [trusted=yes] ${SOGO_DEBIAN_REPOSITORY} ${DEBIAN_VERSION} sogo-v5" > /etc/apt/sources.list.d/sogo.list \
|
||||
&& apt-get update && apt-get install -y --no-install-recommends \
|
||||
sogo \
|
||||
sogo-activesync \
|
||||
@@ -53,4 +55,4 @@ RUN chmod +x /bootstrap-sogo.sh \
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -150,6 +150,8 @@ cat <<EOF > /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
<string>YES</string>
|
||||
<key>SOGoEncryptionKey</key>
|
||||
<string>${RAND_PASS}</string>
|
||||
<key>OCSAdminURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_admin</string>
|
||||
<key>OCSCacheFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_cache_folder</string>
|
||||
<key>OCSEMailAlarmsFolderURL</key>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
|
||||
@@ -2,8 +2,8 @@ FROM solr:7.7-slim
|
||||
|
||||
USER root
|
||||
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced
|
||||
ARG GOSU_VERSION=1.16
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.17
|
||||
|
||||
COPY solr.sh /
|
||||
COPY solr-config-7.7.0.xml /
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
if [[ "${FLATCURVE_EXPERIMENTAL}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "FLATCURVE_EXPERIMENTAL=y, skipping Solr but enabling Flatcurve as FTS for Dovecot!"
|
||||
echo "Solr will be removed in the future!"
|
||||
sleep 365d
|
||||
exit 0
|
||||
elif [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "SKIP_SOLR=y, skipping Solr..."
|
||||
echo "HINT: You could try the newer FTS Backend Flatcurve, which is currently in experimental state..."
|
||||
echo "Simply set FLATCURVE_EXPERIMENTAL=y inside your mailcow.conf and restart the stack afterwards!"
|
||||
echo "Solr will be removed in the future!"
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
@@ -57,5 +65,11 @@ if [[ "${1}" == "--bootstrap" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Starting up Solr..."
|
||||
echo -e "\e[31mSolr is deprecated! You can try the new FTS System now by enabling FLATCURVE_EXPERIMENTAL=y inside mailcow.conf and restarting the stack\e[0m"
|
||||
echo -e "\e[31mSolr will be removed completely soon!\e[0m"
|
||||
|
||||
sleep 15
|
||||
|
||||
exec gosu solr solr-foreground
|
||||
|
||||
|
||||
@@ -1,23 +1,36 @@
|
||||
FROM alpine:3.17
|
||||
FROM alpine:3.20
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk add --update --no-cache \
|
||||
curl \
|
||||
bind-tools \
|
||||
coreutils \
|
||||
unbound \
|
||||
bash \
|
||||
openssl \
|
||||
drill \
|
||||
tzdata \
|
||||
syslog-ng \
|
||||
supervisor \
|
||||
&& curl -o /etc/unbound/root.hints https://www.internic.net/domain/named.cache \
|
||||
&& chown root:unbound /etc/unbound \
|
||||
&& adduser unbound tty \
|
||||
&& adduser unbound tty \
|
||||
&& chmod 775 /etc/unbound
|
||||
|
||||
EXPOSE 53/udp 53/tcp
|
||||
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
# healthcheck (dig, ping)
|
||||
COPY healthcheck.sh /healthcheck.sh
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
CMD ["/usr/sbin/unbound"]
|
||||
RUN chmod +x /healthcheck.sh
|
||||
HEALTHCHECK --interval=30s --timeout=10s \
|
||||
CMD sh -c '[ -f /tmp/healthcheck_status ] && [ "$(cat /tmp/healthcheck_status)" -eq 0 ] || exit 1'
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
102
data/Dockerfiles/unbound/healthcheck.sh
Normal file
102
data/Dockerfiles/unbound/healthcheck.sh
Normal file
@@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
STATUS_FILE="/tmp/healthcheck_status"
|
||||
RUNS=0
|
||||
|
||||
# Declare log function for logfile to stdout
|
||||
function log_to_stdout() {
|
||||
echo "$(date +"%Y-%m-%d %H:%M:%S"): $1"
|
||||
}
|
||||
|
||||
# General Ping function to check general pingability
|
||||
function check_ping() {
|
||||
declare -a ipstoping=("1.1.1.1" "8.8.8.8" "9.9.9.9")
|
||||
local fail_tolerance=1
|
||||
local failures=0
|
||||
|
||||
for ip in "${ipstoping[@]}" ; do
|
||||
success=false
|
||||
for ((i=1; i<=3; i++)); do
|
||||
ping -q -c 3 -w 5 "$ip" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
success=true
|
||||
break
|
||||
else
|
||||
log_to_stdout "Healthcheck: Failed to ping $ip on attempt $i. Trying again..."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$success" = false ]; then
|
||||
log_to_stdout "Healthcheck: Couldn't ping $ip after 3 attempts. Marking this IP as failed."
|
||||
((failures++))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $failures -gt $fail_tolerance ]; then
|
||||
log_to_stdout "Healthcheck: Too many ping failures ($fail_tolerance failures allowed, you got $failures failures), marking Healthcheck as unhealthy..."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
|
||||
}
|
||||
|
||||
# General DNS Resolve Check against Unbound Resolver himself
|
||||
function check_dns() {
|
||||
declare -a domains=("fuzzy.mailcow.email" "github.com" "hub.docker.com")
|
||||
local fail_tolerance=1
|
||||
local failures=0
|
||||
|
||||
for domain in "${domains[@]}" ; do
|
||||
success=false
|
||||
for ((i=1; i<=3; i++)); do
|
||||
dig_output=$(dig +short +timeout=2 +tries=1 "$domain" @127.0.0.1 2>/dev/null)
|
||||
dig_rc=$?
|
||||
|
||||
if [ $dig_rc -ne 0 ] || [ -z "$dig_output" ]; then
|
||||
log_to_stdout "Healthcheck: DNS Resolution Failed on attempt $i for $domain! Trying again..."
|
||||
else
|
||||
success=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$success" = false ]; then
|
||||
log_to_stdout "Healthcheck: DNS Resolution not possible after 3 attempts for $domain... Gave up!"
|
||||
((failures++))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $failures -gt $fail_tolerance ]; then
|
||||
log_to_stdout "Healthcheck: Too many DNS failures ($fail_tolerance failures allowed, you got $failures failures), marking Healthcheck as unhealthy..."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
while true; do
|
||||
|
||||
if [[ ${SKIP_UNBOUND_HEALTHCHECK} == "y" ]]; then
|
||||
log_to_stdout "Healthcheck: ALL CHECKS WERE SKIPPED! Unbound is healthy!"
|
||||
echo "0" > $STATUS_FILE
|
||||
sleep 365d
|
||||
fi
|
||||
|
||||
# run checks, if check is not returning 0 (return value if check is ok), healthcheck will exit with 1 (marked in docker as unhealthy)
|
||||
check_ping
|
||||
PING_STATUS=$?
|
||||
|
||||
check_dns
|
||||
DNS_STATUS=$?
|
||||
|
||||
if [ $PING_STATUS -ne 0 ] || [ $DNS_STATUS -ne 0 ]; then
|
||||
echo "1" > $STATUS_FILE
|
||||
|
||||
else
|
||||
echo "0" > $STATUS_FILE
|
||||
fi
|
||||
|
||||
sleep 30
|
||||
|
||||
done
|
||||
10
data/Dockerfiles/unbound/stop-supervisor.sh
Executable file
10
data/Dockerfiles/unbound/stop-supervisor.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
printf "READY\n";
|
||||
|
||||
while read line; do
|
||||
echo "Processing Event: $line" >&2;
|
||||
kill -3 $(cat "/var/run/supervisord.pid")
|
||||
done < /dev/stdin
|
||||
|
||||
rm -rf /tmp/healthcheck_status
|
||||
32
data/Dockerfiles/unbound/supervisord.conf
Normal file
32
data/Dockerfiles/unbound/supervisord.conf
Normal file
@@ -0,0 +1,32 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:syslog-ng]
|
||||
command=/usr/sbin/syslog-ng --foreground --no-caps
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:unbound]
|
||||
command=/usr/sbin/unbound
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[program:unbound-healthcheck]
|
||||
command=/bin/bash /healthcheck.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
21
data/Dockerfiles/unbound/syslog-ng.conf
Normal file
21
data/Dockerfiles/unbound/syslog-ng.conf
Normal file
@@ -0,0 +1,21 @@
|
||||
@version: 4.5
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
flush_lines(0);
|
||||
use_dns(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_dgram {
|
||||
unix-dgram("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
log {
|
||||
source(s_dgram);
|
||||
destination(d_stdout);
|
||||
};
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM alpine:3.17
|
||||
LABEL maintainer "André Peters <andre.peters@servercow.de>"
|
||||
FROM alpine:3.20
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# Installation
|
||||
RUN apk add --update \
|
||||
@@ -36,4 +37,4 @@ RUN apk add --update \
|
||||
COPY watchdog.sh /watchdog.sh
|
||||
COPY check_mysql_slavestatus.sh /usr/lib/nagios/plugins/check_mysql_slavestatus.sh
|
||||
|
||||
CMD /watchdog.sh
|
||||
CMD ["/watchdog.sh"]
|
||||
|
||||
@@ -19,9 +19,11 @@ fi
|
||||
|
||||
if [[ "${WATCHDOG_VERBOSE}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
SMTP_VERBOSE="--verbose"
|
||||
CURL_VERBOSE="--verbose"
|
||||
set -xv
|
||||
else
|
||||
SMTP_VERBOSE=""
|
||||
CURL_VERBOSE=""
|
||||
exec 2>/dev/null
|
||||
fi
|
||||
|
||||
@@ -31,7 +33,7 @@ if [[ ! -p /tmp/com_pipe ]]; then
|
||||
fi
|
||||
|
||||
# Wait for containers
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -97,7 +99,9 @@ log_msg() {
|
||||
echo $(date) $(printf '%s\n' "${1}")
|
||||
}
|
||||
|
||||
function mail_error() {
|
||||
function notify_error() {
|
||||
# Check if one of the notification options is enabled
|
||||
[[ -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ -z ${WATCHDOG_NOTIFY_WEBHOOK} ]] && return 0
|
||||
THROTTLE=
|
||||
[[ -z ${1} ]] && return 1
|
||||
# If exists, body will be the content of "/tmp/${1}", even if ${2} is set
|
||||
@@ -122,37 +126,61 @@ function mail_error() {
|
||||
else
|
||||
SUBJECT="${WATCHDOG_SUBJECT}: ${1}"
|
||||
fi
|
||||
IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}"
|
||||
for rcpt in "${MAIL_RCPTS[@]}"; do
|
||||
RCPT_DOMAIN=
|
||||
RCPT_MX=
|
||||
RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'})
|
||||
CHECK_FOR_VALID_MX=$(dig +short ${RCPT_DOMAIN} mx)
|
||||
if [[ -z ${CHECK_FOR_VALID_MX} ]]; then
|
||||
log_msg "Cannot determine MX for ${rcpt}, skipping email notification..."
|
||||
|
||||
# Send mail notification if enabled
|
||||
if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then
|
||||
IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}"
|
||||
for rcpt in "${MAIL_RCPTS[@]}"; do
|
||||
RCPT_DOMAIN=
|
||||
RCPT_MX=
|
||||
RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'})
|
||||
CHECK_FOR_VALID_MX=$(dig +short ${RCPT_DOMAIN} mx)
|
||||
if [[ -z ${CHECK_FOR_VALID_MX} ]]; then
|
||||
log_msg "Cannot determine MX for ${rcpt}, skipping email notification..."
|
||||
return 1
|
||||
fi
|
||||
[ -f "/tmp/${1}" ] && BODY="/tmp/${1}"
|
||||
timeout 10s ./smtp-cli --missing-modules-ok \
|
||||
"${SMTP_VERBOSE}" \
|
||||
--charset=UTF-8 \
|
||||
--subject="${SUBJECT}" \
|
||||
--body-plain="${BODY}" \
|
||||
--add-header="X-Priority: 1" \
|
||||
--to=${rcpt} \
|
||||
--from="watchdog@${MAILCOW_HOSTNAME}" \
|
||||
--hello-host=${MAILCOW_HOSTNAME} \
|
||||
--ipv4
|
||||
if [[ $? -eq 1 ]]; then # exit code 1 is fine
|
||||
log_msg "Sent notification email to ${rcpt}"
|
||||
else
|
||||
if [[ "${SMTP_VERBOSE}" == "" ]]; then
|
||||
log_msg "Error while sending notification email to ${rcpt}. You can enable verbose logging by setting 'WATCHDOG_VERBOSE=y' in mailcow.conf."
|
||||
else
|
||||
log_msg "Error while sending notification email to ${rcpt}."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Send webhook notification if enabled
|
||||
if [[ ! -z ${WATCHDOG_NOTIFY_WEBHOOK} ]]; then
|
||||
if [[ -z ${WATCHDOG_NOTIFY_WEBHOOK_BODY} ]]; then
|
||||
log_msg "No webhook body set, skipping webhook notification..."
|
||||
return 1
|
||||
fi
|
||||
[ -f "/tmp/${1}" ] && BODY="/tmp/${1}"
|
||||
timeout 10s ./smtp-cli --missing-modules-ok \
|
||||
"${SMTP_VERBOSE}" \
|
||||
--charset=UTF-8 \
|
||||
--subject="${SUBJECT}" \
|
||||
--body-plain="${BODY}" \
|
||||
--add-header="X-Priority: 1" \
|
||||
--to=${rcpt} \
|
||||
--from="watchdog@${MAILCOW_HOSTNAME}" \
|
||||
--hello-host=${MAILCOW_HOSTNAME} \
|
||||
--ipv4
|
||||
if [[ $? -eq 1 ]]; then # exit code 1 is fine
|
||||
log_msg "Sent notification email to ${rcpt}"
|
||||
else
|
||||
if [[ "${SMTP_VERBOSE}" == "" ]]; then
|
||||
log_msg "Error while sending notification email to ${rcpt}. You can enable verbose logging by setting 'WATCHDOG_VERBOSE=y' in mailcow.conf."
|
||||
else
|
||||
log_msg "Error while sending notification email to ${rcpt}."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Escape subject and body (https://stackoverflow.com/a/2705678)
|
||||
ESCAPED_SUBJECT=$(echo ${SUBJECT} | sed -e 's/[\/&]/\\&/g')
|
||||
ESCAPED_BODY=$(echo ${BODY} | sed -e 's/[\/&]/\\&/g')
|
||||
|
||||
# Replace subject and body placeholders
|
||||
WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed -e "s/\$SUBJECT\|\${SUBJECT}/$ESCAPED_SUBJECT/g" -e "s/\$BODY\|\${BODY}/$ESCAPED_BODY/g")
|
||||
|
||||
# POST to webhook
|
||||
curl -X POST -H "Content-Type: application/json" ${CURL_VERBOSE} -d "${WEBHOOK_BODY}" ${WATCHDOG_NOTIFY_WEBHOOK}
|
||||
|
||||
log_msg "Sent notification using webhook"
|
||||
fi
|
||||
}
|
||||
|
||||
get_container_ip() {
|
||||
@@ -167,12 +195,12 @@ get_container_ip() {
|
||||
else
|
||||
sleep 0.5
|
||||
# get long container id for exact match
|
||||
CONTAINER_ID=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
|
||||
CONTAINER_ID=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
|
||||
# returned id can have multiple elements (if scaled), shuffle for random test
|
||||
CONTAINER_ID=($(printf "%s\n" "${CONTAINER_ID[@]}" | shuf))
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
for matched_container in "${CONTAINER_ID[@]}"; do
|
||||
CONTAINER_IPS=($(curl --silent --insecure https://dockerapi/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
|
||||
CONTAINER_IPS=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
|
||||
for ip_match in "${CONTAINER_IPS[@]}"; do
|
||||
# grep will do nothing if one of these vars is empty
|
||||
[[ -z ${ip_match} ]] && continue
|
||||
@@ -197,7 +225,7 @@ get_container_ip() {
|
||||
# One-time check
|
||||
if grep -qi "$(echo ${IPV6_NETWORK} | cut -d: -f1-3)" <<< "$(ip a s)"; then
|
||||
if [[ -z "$(get_ipv6)" ]]; then
|
||||
mail_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection."
|
||||
notify_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection."
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -692,8 +720,8 @@ rspamd_checks() {
|
||||
From: watchdog@localhost
|
||||
|
||||
Empty
|
||||
' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/scan | jq -rc .default.required_score)
|
||||
if [[ ${SCORE} != "9999" ]]; then
|
||||
' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/scan | jq -rc .default.required_score | sed 's/\..*//' )
|
||||
if [[ ${SCORE} -ne 9999 ]]; then
|
||||
echo "Rspamd settings check failed, score returned: ${SCORE}" 2>> /tmp/rspamd-mailcow 1>&2
|
||||
err_count=$(( ${err_count} + 1))
|
||||
else
|
||||
@@ -746,8 +774,8 @@ olefy_checks() {
|
||||
}
|
||||
|
||||
# Notify about start
|
||||
if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then
|
||||
mail_error "watchdog-mailcow" "Watchdog started monitoring mailcow."
|
||||
if [[ ${WATCHDOG_NOTIFY_START} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
notify_error "watchdog-mailcow" "Watchdog started monitoring mailcow."
|
||||
fi
|
||||
|
||||
# Create watchdog agents
|
||||
@@ -1029,33 +1057,33 @@ while true; do
|
||||
fi
|
||||
if [[ ${com_pipe_answer} == "ratelimit" ]]; then
|
||||
log_msg "At least one ratelimit was applied"
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
|
||||
notify_error "${com_pipe_answer}"
|
||||
elif [[ ${com_pipe_answer} == "mail_queue_status" ]]; then
|
||||
log_msg "Mail queue status is critical"
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
|
||||
notify_error "${com_pipe_answer}"
|
||||
elif [[ ${com_pipe_answer} == "external_checks" ]]; then
|
||||
log_msg "Your mailcow is an open relay!"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!"
|
||||
notify_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!"
|
||||
elif [[ ${com_pipe_answer} == "mysql_repl_checks" ]]; then
|
||||
log_msg "MySQL replication is not working properly"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
# Once mail per 10 minutes
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the SQL replication status" 600
|
||||
notify_error "${com_pipe_answer}" "Please check the SQL replication status" 600
|
||||
elif [[ ${com_pipe_answer} == "dovecot_repl_checks" ]]; then
|
||||
log_msg "Dovecot replication is not working properly"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
# Once mail per 10 minutes
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600
|
||||
notify_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600
|
||||
elif [[ ${com_pipe_answer} == "certcheck" ]]; then
|
||||
log_msg "Certificates are about to expire"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
# Only mail once a day
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please renew your certificate" 86400
|
||||
notify_error "${com_pipe_answer}" "Please renew your certificate" 86400
|
||||
elif [[ ${com_pipe_answer} == "acme-mailcow" ]]; then
|
||||
log_msg "acme-mailcow did not complete successfully"
|
||||
# Define $2 to override message text, else print service was restarted at ...
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check acme-mailcow for further information."
|
||||
notify_error "${com_pipe_answer}" "Please check acme-mailcow for further information."
|
||||
elif [[ ${com_pipe_answer} == "fail2ban" ]]; then
|
||||
F2B_RES=($(timeout 4s ${REDIS_CMDLINE} --raw GET F2B_RES 2> /dev/null))
|
||||
if [[ ! -z "${F2B_RES}" ]]; then
|
||||
@@ -1065,18 +1093,18 @@ while true; do
|
||||
log_msg "Banned ${host}"
|
||||
rm /tmp/fail2ban 2> /dev/null
|
||||
timeout 2s whois "${host}" > /tmp/fail2ban
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && mail_error "${com_pipe_answer}" "IP ban: ${host}"
|
||||
[[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && notify_error "${com_pipe_answer}" "IP ban: ${host}"
|
||||
done
|
||||
fi
|
||||
elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
|
||||
kill -STOP ${BACKGROUND_TASKS[*]}
|
||||
sleep 10
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
if [[ "${com_pipe_answer}" == "php-fpm-mailcow" ]]; then
|
||||
HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
|
||||
HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
|
||||
fi
|
||||
S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
|
||||
S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
|
||||
if [ ${S_RUNNING} -lt 360 ]; then
|
||||
log_msg "Container is running for less than 360 seconds, skipping action..."
|
||||
elif [[ ! -z ${HAS_INITDB} ]]; then
|
||||
@@ -1084,8 +1112,8 @@ while true; do
|
||||
sleep 60
|
||||
else
|
||||
log_msg "Sending restart command to ${CONTAINER_ID}..."
|
||||
curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart
|
||||
[[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
|
||||
curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
notify_error "${com_pipe_answer}"
|
||||
log_msg "Wait for restarted container to settle and continue watching..."
|
||||
sleep 35
|
||||
fi
|
||||
@@ -1095,3 +1123,4 @@ while true; do
|
||||
kill -USR1 ${BACKGROUND_TASKS[*]}
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ server {
|
||||
deny all;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+)\.php(?:$|\/) {
|
||||
location ~ ^\/(?:index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|ocs-provider\/.+)\.php(?:$|\/) {
|
||||
fastcgi_split_path_info ^(.+?\.php)(\/.*|)$;
|
||||
set $path_info $fastcgi_path_info;
|
||||
try_files $fastcgi_script_name =404;
|
||||
@@ -105,7 +105,7 @@ server {
|
||||
fastcgi_read_timeout 1200;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:updater|oc[ms]-provider)(?:$|\/) {
|
||||
location ~ ^\/(?:updater|ocs-provider)(?:$|\/) {
|
||||
try_files $uri/ =404;
|
||||
index index.php;
|
||||
}
|
||||
|
||||
29
data/assets/templates/pw_reset_html.tpl
Normal file
29
data/assets/templates/pw_reset_html.tpl
Normal file
@@ -0,0 +1,29 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta name="x-apple-disable-message-reformatting" />
|
||||
<style>
|
||||
body {
|
||||
font-family: Helvetica, Arial, Sans-Serif;
|
||||
}
|
||||
/* mobile devices */
|
||||
@media all and (max-width: 480px) {
|
||||
.mob {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
Hello {{username2}},<br><br>
|
||||
|
||||
Somebody requested a new password for the {{hostname}} account associated with {{username}}.<br>
|
||||
<small>Date of the password reset request: {{date}}</small><br><br>
|
||||
|
||||
You can reset your password by clicking the link below:<br>
|
||||
<a href="{{link}}">{{link}}</a><br><br>
|
||||
|
||||
The link will be valid for the next {{token_lifetime}} minutes.<br><br>
|
||||
|
||||
If you did not request a new password, please ignore this email.<br>
|
||||
</body>
|
||||
</html>
|
||||
11
data/assets/templates/pw_reset_text.tpl
Normal file
11
data/assets/templates/pw_reset_text.tpl
Normal file
@@ -0,0 +1,11 @@
|
||||
Hello {{username2}},
|
||||
|
||||
Somebody requested a new password for the {{hostname}} account associated with {{username}}.
|
||||
Date of the password reset request: {{date}}
|
||||
|
||||
You can reset your password by clicking the link below:
|
||||
{{link}}
|
||||
|
||||
The link will be valid for the next {{token_lifetime}} minutes.
|
||||
|
||||
If you did not request a new password, please ignore this email.
|
||||
@@ -10,6 +10,7 @@
|
||||
auth_mechanisms = plain login
|
||||
#mail_debug = yes
|
||||
#auth_debug = yes
|
||||
#log_debug = category=fts-flatcurve # Activate Logging for Flatcurve FTS Searchings
|
||||
log_path = syslog
|
||||
disable_plaintext_auth = yes
|
||||
# Uncomment on NFS share
|
||||
@@ -194,9 +195,6 @@ plugin {
|
||||
acl_shared_dict = file:/var/vmail/shared-mailboxes.db
|
||||
acl = vfile
|
||||
acl_user = %u
|
||||
fts = solr
|
||||
fts_autoindex = yes
|
||||
fts_solr = url=http://solr:8983/solr/dovecot-fts/
|
||||
quota = dict:Userquota::proxy::sqlquota
|
||||
quota_rule2 = Trash:storage=+100%%
|
||||
sieve = /var/vmail/sieve/%u.sieve
|
||||
@@ -247,6 +245,9 @@ plugin {
|
||||
mail_log_events = delete undelete expunge copy mailbox_delete mailbox_rename
|
||||
mail_log_fields = uid box msgid size
|
||||
mail_log_cached_only = yes
|
||||
|
||||
# Try set mail_replica
|
||||
!include_try /etc/dovecot/mail_replica.conf
|
||||
}
|
||||
service quota-warning {
|
||||
executable = script /usr/local/bin/quota_notify.py
|
||||
@@ -302,6 +303,7 @@ replication_dsync_parameters = -d -l 30 -U -n INBOX
|
||||
!include_try /etc/dovecot/extra.conf
|
||||
!include_try /etc/dovecot/sogo-sso.conf
|
||||
!include_try /etc/dovecot/shared_namespace.conf
|
||||
!include_try /etc/dovecot/conf.d/fts.conf
|
||||
# </Includes>
|
||||
default_client_limit = 10400
|
||||
default_vsz_limit = 1024 M
|
||||
|
||||
@@ -289,5 +289,20 @@ namespace inbox {
|
||||
mailbox "Kladde" {
|
||||
special_use = \Drafts
|
||||
}
|
||||
mailbox "Πρόχειρα" {
|
||||
special_use = \Drafts
|
||||
}
|
||||
mailbox "Απεσταλμένα" {
|
||||
special_use = \Sent
|
||||
}
|
||||
mailbox "Κάδος απορριμάτων" {
|
||||
special_use = \Trash
|
||||
}
|
||||
mailbox "Ανεπιθύμητα" {
|
||||
special_use = \Junk
|
||||
}
|
||||
mailbox "Αρχειοθετημένα" {
|
||||
special_use = \Archive
|
||||
}
|
||||
prefix =
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
proxy_cache_path /tmp levels=1:2 keys_zone=sogo:10m inactive=24h max_size=1g;
|
||||
server_names_hash_bucket_size 64;
|
||||
server_names_hash_max_size 512;
|
||||
server_names_hash_bucket_size 128;
|
||||
|
||||
map $http_x_forwarded_proto $client_req_scheme {
|
||||
default $scheme;
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
listen ${HTTPS_PORT} ssl http2;
|
||||
listen [::]:${HTTPS_PORT} ssl http2;
|
||||
listen ${HTTPS_PORT} ssl;
|
||||
listen [::]:${HTTPS_PORT} ssl;
|
||||
http2 on;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
if /^\s*Received:.*Authenticated sender.*\(Postcow\)/
|
||||
#/^Received: from .*? \([\w-.]* \[.*?\]\)\s+\(Authenticated sender: (.+)\)\s+by.+\(Postcow\) with (E?SMTPS?A?) id ([A-F0-9]+).+;.*?/
|
||||
/^Received: from .*? \([\w-.]* \[.*?\]\)(.*|\n.*)\(Authenticated sender: (.+)\)\s+by.+\(Postcow\) with (.*)/
|
||||
/^Received: from .*? \([\w\-.]* \[.*?\]\)(.*|\n.*)\(Authenticated sender: (.+)\)\s+by.+\(Postcow\) with (.*)/
|
||||
REPLACE Received: from [127.0.0.1] (localhost [127.0.0.1]) by localhost (Mailerdaemon) with $3
|
||||
endif
|
||||
if /^\s*Received: from.* \(.*dovecot-mailcow.*mailcow-network.*\).*\(Postcow\)/
|
||||
@@ -12,7 +12,8 @@ if /^\s*Received: from.* \(.*rspamd-mailcow.*mailcow-network.*\).*\(Postcow\)/
|
||||
REPLACE Received: from rspamd (rspamd $3) by $4 (Postcow) with $5
|
||||
endif
|
||||
/^\s*X-Enigmail/ IGNORE
|
||||
/^\s*X-Mailer/ IGNORE
|
||||
# Not removing Mailer by default, might be signed
|
||||
#/^\s*X-Mailer/ IGNORE
|
||||
/^\s*X-Originating-IP/ IGNORE
|
||||
/^\s*X-Forward/ IGNORE
|
||||
# Not removing UA by default, might be signed
|
||||
|
||||
@@ -11,6 +11,7 @@ smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
|
||||
smtpd_relay_restrictions = permit_mynetworks,
|
||||
permit_sasl_authenticated,
|
||||
defer_unauth_destination
|
||||
smtpd_forbid_bare_newline = yes
|
||||
# alias maps are auto-generated in postfix.sh on startup
|
||||
alias_maps = hash:/etc/aliases
|
||||
alias_database = hash:/etc/aliases
|
||||
@@ -84,6 +85,7 @@ smtp_tls_security_level = dane
|
||||
smtpd_data_restrictions = reject_unauth_pipelining, permit
|
||||
smtpd_delay_reject = yes
|
||||
smtpd_error_sleep_time = 10s
|
||||
smtpd_forbid_bare_newline = yes
|
||||
smtpd_hard_error_limit = ${stress?1}${stress:5}
|
||||
smtpd_helo_required = yes
|
||||
smtpd_proxy_timeout = 600s
|
||||
@@ -112,14 +114,14 @@ smtpd_tls_loglevel = 1
|
||||
|
||||
# Mandatory protocols and ciphers are used when a connections is enforced to use TLS
|
||||
# Does _not_ apply to enforced incoming TLS settings per mailbox
|
||||
smtp_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
lmtp_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
smtp_tls_mandatory_protocols = >=TLSv1.2
|
||||
lmtp_tls_mandatory_protocols = >=TLSv1.2
|
||||
smtpd_tls_mandatory_protocols = >=TLSv1.2
|
||||
smtpd_tls_mandatory_ciphers = high
|
||||
|
||||
smtp_tls_protocols = !SSLv2, !SSLv3
|
||||
lmtp_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
smtpd_tls_protocols = !SSLv2, !SSLv3
|
||||
smtp_tls_protocols = >=TLSv1.2
|
||||
lmtp_tls_protocols = >=TLSv1.2
|
||||
smtpd_tls_protocols = >=TLSv1.2
|
||||
|
||||
smtpd_tls_security_level = may
|
||||
tls_preempt_cipherlist = yes
|
||||
@@ -160,13 +162,14 @@ transport_maps = pcre:/opt/postfix/conf/custom_transport.pcre,
|
||||
proxy:mysql:/opt/postfix/conf/sql/mysql_relay_ne.cf,
|
||||
proxy:mysql:/opt/postfix/conf/sql/mysql_transport_maps.cf
|
||||
smtp_sasl_auth_soft_bounce = no
|
||||
postscreen_discard_ehlo_keywords = silent-discard, dsn
|
||||
compatibility_level = 2
|
||||
postscreen_discard_ehlo_keywords = silent-discard, dsn, chunking
|
||||
smtpd_discard_ehlo_keywords = chunking, silent-discard
|
||||
compatibility_level = 3.7
|
||||
smtputf8_enable = no
|
||||
# Define protocols for SMTPS and submission service
|
||||
submission_smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
smtps_smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
submission_smtpd_tls_mandatory_protocols = >=TLSv1.2
|
||||
smtps_smtpd_tls_mandatory_protocols = >=TLSv1.2
|
||||
parent_domain_matches_subdomains = debug_peer_list,fast_flush_domains,mynetworks,qmqpd_authorized_clients
|
||||
|
||||
# DO NOT EDIT ANYTHING BELOW #
|
||||
# User overrides #
|
||||
# Overrides #
|
||||
|
||||
@@ -4,7 +4,6 @@ smtp inet n - n - 1 postscreen
|
||||
-o postscreen_upstream_proxy_protocol=haproxy
|
||||
-o syslog_name=haproxy
|
||||
smtpd pass - - n - - smtpd
|
||||
-o smtpd_helo_restrictions=permit_mynetworks,reject_non_fqdn_helo_hostname
|
||||
-o smtpd_sasl_auth_enable=no
|
||||
-o smtpd_sender_restrictions=permit_mynetworks,reject_unlisted_sender,reject_unknown_sender_domain
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
113
data/conf/rspamd/dynmaps/footer.php
Normal file
113
data/conf/rspamd/dynmaps/footer.php
Normal file
@@ -0,0 +1,113 @@
|
||||
<?php
|
||||
// File size is limited by Nginx site to 10M
|
||||
// To speed things up, we do not include prerequisites
|
||||
header('Content-Type: text/plain');
|
||||
require_once "vars.inc.php";
|
||||
// Do not show errors, we log to using error_log
|
||||
ini_set('error_reporting', 0);
|
||||
// Init database
|
||||
//$dsn = $database_type . ':host=' . $database_host . ';dbname=' . $database_name;
|
||||
$dsn = $database_type . ":unix_socket=" . $database_sock . ";dbname=" . $database_name;
|
||||
$opt = [
|
||||
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
|
||||
PDO::ATTR_DEFAULT_FETCH_MODE => PDO::FETCH_ASSOC,
|
||||
PDO::ATTR_EMULATE_PREPARES => false,
|
||||
];
|
||||
try {
|
||||
$pdo = new PDO($dsn, $database_user, $database_pass, $opt);
|
||||
}
|
||||
catch (PDOException $e) {
|
||||
error_log("FOOTER: " . $e . PHP_EOL);
|
||||
http_response_code(501);
|
||||
exit;
|
||||
}
|
||||
|
||||
if (!function_exists('getallheaders')) {
|
||||
function getallheaders() {
|
||||
if (!is_array($_SERVER)) {
|
||||
return array();
|
||||
}
|
||||
$headers = array();
|
||||
foreach ($_SERVER as $name => $value) {
|
||||
if (substr($name, 0, 5) == 'HTTP_') {
|
||||
$headers[str_replace(' ', '-', ucwords(strtolower(str_replace('_', ' ', substr($name, 5)))))] = $value;
|
||||
}
|
||||
}
|
||||
return $headers;
|
||||
}
|
||||
}
|
||||
|
||||
// Read headers
|
||||
$headers = getallheaders();
|
||||
// Get Domain
|
||||
$domain = $headers['Domain'];
|
||||
// Get Username
|
||||
$username = $headers['Username'];
|
||||
// Get From
|
||||
$from = $headers['From'];
|
||||
// define empty footer
|
||||
$empty_footer = json_encode(array(
|
||||
'html' => '',
|
||||
'plain' => '',
|
||||
'skip_replies' => 0,
|
||||
'vars' => array()
|
||||
));
|
||||
|
||||
error_log("FOOTER: checking for domain " . $domain . ", user " . $username . " and address " . $from . PHP_EOL);
|
||||
|
||||
try {
|
||||
// try get $target_domain if $domain is an alias_domain
|
||||
$stmt = $pdo->prepare("SELECT `target_domain` FROM `alias_domain`
|
||||
WHERE `alias_domain` = :alias_domain");
|
||||
$stmt->execute(array(
|
||||
':alias_domain' => $domain
|
||||
));
|
||||
$alias_domain = $stmt->fetch(PDO::FETCH_ASSOC);
|
||||
if (!$alias_domain) {
|
||||
$target_domain = $domain;
|
||||
} else {
|
||||
$target_domain = $alias_domain['target_domain'];
|
||||
}
|
||||
|
||||
// get footer associated with the domain
|
||||
$stmt = $pdo->prepare("SELECT `plain`, `html`, `mbox_exclude`, `alias_domain_exclude`, `skip_replies` FROM `domain_wide_footer`
|
||||
WHERE `domain` = :domain");
|
||||
$stmt->execute(array(
|
||||
':domain' => $target_domain
|
||||
));
|
||||
$footer = $stmt->fetch(PDO::FETCH_ASSOC);
|
||||
|
||||
// check if the sender is excluded
|
||||
if (in_array($from, json_decode($footer['mbox_exclude']))){
|
||||
$footer = false;
|
||||
}
|
||||
if (in_array($domain, json_decode($footer['alias_domain_exclude']))){
|
||||
$footer = false;
|
||||
}
|
||||
if (empty($footer)){
|
||||
echo $empty_footer;
|
||||
exit;
|
||||
}
|
||||
error_log("FOOTER: " . json_encode($footer) . PHP_EOL);
|
||||
|
||||
// footer will be applied
|
||||
// get custom mailbox attributes to insert into the footer
|
||||
$stmt = $pdo->prepare("SELECT `custom_attributes` FROM `mailbox` WHERE `username` = :username");
|
||||
$stmt->execute(array(
|
||||
':username' => $username
|
||||
));
|
||||
$custom_attributes = $stmt->fetch(PDO::FETCH_ASSOC)['custom_attributes'];
|
||||
if (empty($custom_attributes)){
|
||||
$custom_attributes = (object)array();
|
||||
}
|
||||
}
|
||||
catch (Exception $e) {
|
||||
error_log("FOOTER: " . $e->getMessage() . PHP_EOL);
|
||||
http_response_code(502);
|
||||
exit;
|
||||
}
|
||||
|
||||
|
||||
// return footer
|
||||
$footer["vars"] = $custom_attributes;
|
||||
echo json_encode($footer);
|
||||
@@ -21,6 +21,10 @@ FREEMAIL_TO_UNDISC_RCPT {
|
||||
SOGO_CONTACT_EXCLUDE {
|
||||
expression = "(-WHITELISTED_FWD_HOST | -g+:policies) & ^SOGO_CONTACT & !DMARC_POLICY_ALLOW";
|
||||
}
|
||||
# Remove MAILCOW_WHITE symbol for senders with broken policy recieved not from fwd hosts
|
||||
MAILCOW_WHITE_EXCLUDE {
|
||||
expression = "^MAILCOW_WHITE & (-DMARC_POLICY_REJECT | -DMARC_POLICY_QUARANTINE | -R_SPF_PERMFAIL) & !WHITELISTED_FWD_HOST";
|
||||
}
|
||||
# Spoofed header from and broken policy (excluding sieve host, rspamd host, whitelisted senders, authenticated senders and forward hosts)
|
||||
SPOOFED_UNAUTH {
|
||||
expression = "!MAILCOW_AUTH & !MAILCOW_WHITE & !RSPAMD_HOST & !SIEVE_HOST & MAILCOW_DOMAIN_HEADER_FROM & !WHITELISTED_FWD_HOST & -g+:policies";
|
||||
@@ -103,4 +107,4 @@ CLAMD_JS_MALWARE {
|
||||
expression = "CLAM_SECI_JS & !MAILCOW_WHITE";
|
||||
description = "JS malware found, Securite JS malware Flag set through ClamAV";
|
||||
score = 8;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,3 +6,4 @@ disable_monitoring = true;
|
||||
# In case a task times out (like DNS lookup), soft reject the message
|
||||
# instead of silently accepting the message without further processing.
|
||||
soft_reject_on_timeout = true;
|
||||
local_addrs = /etc/rspamd/custom/mailcow_networks.map;
|
||||
|
||||
9
data/conf/rspamd/local.d/ratelimit.conf
Normal file
9
data/conf/rspamd/local.d/ratelimit.conf
Normal file
@@ -0,0 +1,9 @@
|
||||
# Uncomment below to apply the ratelimits globally. Use Ratelimits inside mailcow UI to overwrite them for a specific domain/mailbox.
|
||||
# rates {
|
||||
# # Format: "1 / 1h" or "20 / 1m" etc.
|
||||
# to = "100 / 1s";
|
||||
# to_ip = "100 / 1s";
|
||||
# to_ip_from = "100 / 1s";
|
||||
# bounce_to = "100 / 1h";
|
||||
# bounce_to_ip = "7 / 1m";
|
||||
# }
|
||||
@@ -1,23 +1,8 @@
|
||||
rbls {
|
||||
sorbs {
|
||||
symbol = "RBL_SORBS";
|
||||
rbl = "dnsbl.sorbs.net";
|
||||
returncodes {
|
||||
# http:// www.sorbs.net/general/using.shtml
|
||||
RBL_SORBS_HTTP = "127.0.0.2";
|
||||
RBL_SORBS_SOCKS = "127.0.0.3";
|
||||
RBL_SORBS_MISC = "127.0.0.4";
|
||||
RBL_SORBS_SMTP = "127.0.0.5";
|
||||
RBL_SORBS_RECENT = "127.0.0.6";
|
||||
RBL_SORBS_WEB = "127.0.0.7";
|
||||
RBL_SORBS_DUL = "127.0.0.10";
|
||||
RBL_SORBS_BLOCK = "127.0.0.8";
|
||||
RBL_SORBS_ZOMBIE = "127.0.0.9";
|
||||
}
|
||||
}
|
||||
interserver_ip {
|
||||
symbol = "RBL_INTERSERVER_IP";
|
||||
rbl = "rbl.interserver.net";
|
||||
from = true;
|
||||
ipv6 = false;
|
||||
returncodes {
|
||||
RBL_INTERSERVER_BAD_IP = "127.0.0.2";
|
||||
@@ -35,4 +20,7 @@ rbls {
|
||||
RBL_INTERSERVER_BAD_URI = "127.0.0.2";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.include(try=true,override=true,priority=5) "$LOCAL_CONFDIR/custom/dqs-rbl.conf"
|
||||
|
||||
}
|
||||
@@ -5,46 +5,6 @@ symbols = {
|
||||
"RBL_UCEPROTECT_LEVEL2" {
|
||||
score = 1.5;
|
||||
}
|
||||
"RBL_SORBS" {
|
||||
score = 0.0;
|
||||
description = "Unrecognised result from SORBS RBL";
|
||||
}
|
||||
"RBL_SORBS_HTTP" {
|
||||
score = 2.5;
|
||||
description = "List of Open HTTP Proxy Servers.";
|
||||
}
|
||||
"RBL_SORBS_SOCKS" {
|
||||
score = 2.5;
|
||||
description = "List of Open SOCKS Proxy Servers.";
|
||||
}
|
||||
"RBL_SORBS_MISC" {
|
||||
score = 1.0;
|
||||
description = "List of open Proxy Servers not listed in the SOCKS or HTTP lists.";
|
||||
}
|
||||
"RBL_SORBS_SMTP" {
|
||||
score = 4.0;
|
||||
description = "List of Open SMTP relay servers.";
|
||||
}
|
||||
"RBL_SORBS_RECENT" {
|
||||
score = 2.0;
|
||||
description = "List of hosts that have been noted as sending spam/UCE/UBE to the admins of SORBS within the last 28 days (includes new.spam.dnsbl.sorbs.net).";
|
||||
}
|
||||
"RBL_SORBS_WEB" {
|
||||
score = 2.0;
|
||||
description = "List of web (WWW) servers which have spammer abusable vulnerabilities (e.g. FormMail scripts)";
|
||||
}
|
||||
"RBL_SORBS_DUL" {
|
||||
score = 2.0;
|
||||
description = "Dynamic IP Address ranges (NOT a Dial Up list!)";
|
||||
}
|
||||
"RBL_SORBS_BLOCK" {
|
||||
score = 0.5;
|
||||
description = "List of hosts demanding that they never be tested by SORBS.";
|
||||
}
|
||||
"RBL_SORBS_ZOMBIE" {
|
||||
score = 2.0;
|
||||
description = "List of networks hijacked from their original owners, some of which have already used for spamming.";
|
||||
}
|
||||
"RECEIVED_SPAMHAUS_XBL" {
|
||||
weight = 0.0;
|
||||
description = "Received address is listed in ZEN XBL";
|
||||
@@ -57,4 +17,261 @@ symbols = {
|
||||
score = 4.0;
|
||||
description = "Listed on Interserver RBL";
|
||||
}
|
||||
|
||||
"SPAMHAUS_ZEN" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"SH_AUTHBL_RECEIVED" {
|
||||
weight = 4.0;
|
||||
}
|
||||
"RBL_DBL_SPAM" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"RBL_DBL_PHISH" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"RBL_DBL_MALWARE" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"RBL_DBL_BOTNET" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"RBL_DBL_ABUSED_SPAM" {
|
||||
weight = 3.0;
|
||||
}
|
||||
"RBL_DBL_ABUSED_PHISH" {
|
||||
weight = 3.0;
|
||||
}
|
||||
"RBL_DBL_ABUSED_MALWARE" {
|
||||
weight = 3.0;
|
||||
}
|
||||
"RBL_DBL_ABUSED_BOTNET" {
|
||||
weight = 3.0;
|
||||
}
|
||||
"RBL_ZRD_VERY_FRESH_DOMAIN" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"RBL_ZRD_FRESH_DOMAIN" {
|
||||
weight = 4.0;
|
||||
}
|
||||
"ZRD_VERY_FRESH_DOMAIN" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"ZRD_FRESH_DOMAIN" {
|
||||
weight = 4.0;
|
||||
}
|
||||
"SH_EMAIL_DBL" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"SH_EMAIL_DBL_ABUSED" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"SH_EMAIL_ZRD_VERY_FRESH_DOMAIN" {
|
||||
weight = 7.0;
|
||||
}
|
||||
"SH_EMAIL_ZRD_FRESH_DOMAIN" {
|
||||
weight = 4.0;
|
||||
}
|
||||
"RBL_DBL_DONT_QUERY_IPS" {
|
||||
weight = 0.0;
|
||||
}
|
||||
"RBL_ZRD_DONT_QUERY_IPS" {
|
||||
weight = 0.0;
|
||||
}
|
||||
"SH_EMAIL_ZRD_DONT_QUERY_IPS" {
|
||||
weight = 0.0;
|
||||
}
|
||||
"SH_EMAIL_DBL_DONT_QUERY_IPS" {
|
||||
weight = 0.0;
|
||||
}
|
||||
"DBL" {
|
||||
weight = 0.0;
|
||||
description = "DBL unknown result";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_SPAM" {
|
||||
weight = 7;
|
||||
description = "DBL uribl spam";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_PHISH" {
|
||||
weight = 7;
|
||||
description = "DBL uribl phishing";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_MALWARE" {
|
||||
weight = 7;
|
||||
description = "DBL uribl malware";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_BOTNET" {
|
||||
weight = 7;
|
||||
description = "DBL uribl botnet C&C domain";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
|
||||
"DBLABUSED_SPAM_FULLURLS" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit spam";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBLABUSED_PHISH_FULLURLS" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit phish";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBLABUSED_MALWARE_FULLURLS" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit malware";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBLABUSED_BOTNET_FULLURLS" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit botnet";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"DBL_ABUSE" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit spam";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_ABUSE_REDIR" {
|
||||
weight = 1.5;
|
||||
description = "DBL uribl abused spammed redirector domain";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_ABUSE_PHISH" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit phish";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_ABUSE_MALWARE" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit malware";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_ABUSE_BOTNET" {
|
||||
weight = 5.5;
|
||||
description = "DBL uribl abused legit botnet C&C";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_PROHIBIT" {
|
||||
weight = 0.0;
|
||||
description = "DBL uribl IP queries prohibited!";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_BLOCKED_OPENRESOLVER" {
|
||||
weight = 0.0;
|
||||
description = "You are querying Spamhaus from an open resolver, please see https://www.spamhaus.org/returnc/pub/";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"DBL_BLOCKED" {
|
||||
weight = 0.0;
|
||||
description = "You are exceeding the query limit, please see https://www.spamhaus.org/returnc/vol/";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"SPAMHAUS_ZEN_URIBL" {
|
||||
weight = 0.0;
|
||||
description = "Spamhaus ZEN URIBL: Filtered result";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"URIBL_SBL" {
|
||||
weight = 6.5;
|
||||
description = "A domain in the message body resolves to an IP listed in Spamhaus SBL";
|
||||
one_shot = true;
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"URIBL_SBL_CSS" {
|
||||
weight = 6.5;
|
||||
description = "A domain in the message body resolves to an IP listed in Spamhaus SBL CSS";
|
||||
one_shot = true;
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"URIBL_PBL" {
|
||||
weight = 0.01;
|
||||
description = "A domain in the message body resolves to an IP listed in Spamhaus PBL";
|
||||
one_shot = true;
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"URIBL_DROP" {
|
||||
weight = 6.5;
|
||||
description = "A domain in the message body resolves to an IP listed in Spamhaus DROP";
|
||||
one_shot = true;
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"URIBL_XBL" {
|
||||
weight = 5.0;
|
||||
description = "A domain in the message body resolves to an IP listed in Spamhaus XBL";
|
||||
one_shot = true;
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
"SPAMHAUS_SBL_URL" {
|
||||
weight = 6.5;
|
||||
description = "A numeric URL in the message body is listed in Spamhaus SBL";
|
||||
one_shot = true;
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"SH_HBL_EMAIL" {
|
||||
weight = 7;
|
||||
description = "Email listed in HBL";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"SH_HBL_FILE_MALICIOUS" {
|
||||
weight = 7;
|
||||
description = "An attachment hash is listed in Spamhaus HBL as malicious";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"SH_HBL_FILE_SUSPICIOUS" {
|
||||
weight = 5;
|
||||
description = "An attachment hash is listed in Spamhaus HBL as suspicious";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"RBL_SPAMHAUS_CW_BTC" {
|
||||
score = 7;
|
||||
description = "Bitcoin found in Spamhaus cryptowallet list";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"RBL_SPAMHAUS_CW_ETH" {
|
||||
score = 7;
|
||||
description = "Ethereum found in Spamhaus cryptowallet list";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"RBL_SPAMHAUS_CW_BCH" {
|
||||
score = 7;
|
||||
description = "Bitcoinhash found in Spamhaus cryptowallet list";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"RBL_SPAMHAUS_CW_XMR" {
|
||||
score = 7;
|
||||
description = "Monero found in Spamhaus cryptowallet list";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"RBL_SPAMHAUS_CW_LTC" {
|
||||
score = 7;
|
||||
description = "Litecoin found in Spamhaus cryptowallet list";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"RBL_SPAMHAUS_CW_XRP" {
|
||||
score = 7;
|
||||
description = "Ripple found in Spamhaus cryptowallet list";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
"RBL_SPAMHAUS_HBL_URL" {
|
||||
score = 7;
|
||||
description = "URL found in spamhaus HBL blocklist";
|
||||
groups = ["spamhaus"];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
classifier "bayes" {
|
||||
# name = "custom"; # 'name' parameter must be set if multiple classifiers are defined
|
||||
learn_condition = 'return require("lua_bayes_learn").can_learn';
|
||||
new_schema = true;
|
||||
tokenizer {
|
||||
name = "osb";
|
||||
}
|
||||
backend = "redis";
|
||||
min_tokens = 11;
|
||||
min_learns = 5;
|
||||
new_schema = true;
|
||||
expire = 2592000;
|
||||
expire = 7776000;
|
||||
statfile {
|
||||
symbol = "BAYES_HAM";
|
||||
spam = false;
|
||||
|
||||
@@ -221,6 +221,16 @@ rspamd_config:register_symbol({
|
||||
local tagged_rcpt = task:get_symbol("TAGGED_RCPT")
|
||||
local mailcow_domain = task:get_symbol("RCPT_MAILCOW_DOMAIN")
|
||||
|
||||
local function remove_moo_tag()
|
||||
local moo_tag_header = task:get_header('X-Moo-Tag', false)
|
||||
if moo_tag_header then
|
||||
task:set_milter_reply({
|
||||
remove_headers = {['X-Moo-Tag'] = 0},
|
||||
})
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
if tagged_rcpt and tagged_rcpt[1].options and mailcow_domain then
|
||||
local tag = tagged_rcpt[1].options[1]
|
||||
rspamd_logger.infox("found tag: %s", tag)
|
||||
@@ -229,6 +239,7 @@ rspamd_config:register_symbol({
|
||||
|
||||
if action ~= 'no action' and action ~= 'greylist' then
|
||||
rspamd_logger.infox("skipping tag handler for action: %s", action)
|
||||
remove_moo_tag()
|
||||
return true
|
||||
end
|
||||
|
||||
@@ -243,6 +254,7 @@ rspamd_config:register_symbol({
|
||||
local function tag_callback_subfolder(err, data)
|
||||
if err or type(data) ~= 'string' then
|
||||
rspamd_logger.infox(rspamd_config, "subfolder tag handler for rcpt %s returned invalid or empty data (\"%s\") or error (\"%s\")", body, data, err)
|
||||
remove_moo_tag()
|
||||
else
|
||||
rspamd_logger.infox("Add X-Moo-Tag header")
|
||||
task:set_milter_reply({
|
||||
@@ -261,6 +273,7 @@ rspamd_config:register_symbol({
|
||||
)
|
||||
if not redis_ret_subfolder then
|
||||
rspamd_logger.infox(rspamd_config, "cannot make request to load tag handler for rcpt")
|
||||
remove_moo_tag()
|
||||
end
|
||||
|
||||
else
|
||||
@@ -268,7 +281,10 @@ rspamd_config:register_symbol({
|
||||
local sbj = task:get_header('Subject')
|
||||
new_sbj = '=?UTF-8?B?' .. tostring(util.encode_base64('[' .. tag .. '] ' .. sbj)) .. '?='
|
||||
task:set_milter_reply({
|
||||
remove_headers = {['Subject'] = 1},
|
||||
remove_headers = {
|
||||
['Subject'] = 1,
|
||||
['X-Moo-Tag'] = 0
|
||||
},
|
||||
add_headers = {['Subject'] = new_sbj}
|
||||
})
|
||||
end
|
||||
@@ -284,6 +300,7 @@ rspamd_config:register_symbol({
|
||||
)
|
||||
if not redis_ret_subject then
|
||||
rspamd_logger.infox(rspamd_config, "cannot make request to load tag handler for rcpt")
|
||||
remove_moo_tag()
|
||||
end
|
||||
|
||||
end
|
||||
@@ -295,6 +312,7 @@ rspamd_config:register_symbol({
|
||||
if #rcpt_split == 2 then
|
||||
if rcpt_split[1] == 'postmaster' then
|
||||
rspamd_logger.infox(rspamd_config, "not expanding postmaster alias")
|
||||
remove_moo_tag()
|
||||
else
|
||||
rspamd_http.request({
|
||||
task=task,
|
||||
@@ -307,7 +325,8 @@ rspamd_config:register_symbol({
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
else
|
||||
remove_moo_tag()
|
||||
end
|
||||
end,
|
||||
priority = 19
|
||||
@@ -503,3 +522,180 @@ rspamd_config:register_symbol({
|
||||
end
|
||||
end
|
||||
})
|
||||
|
||||
rspamd_config:register_symbol({
|
||||
name = 'MOO_FOOTER',
|
||||
type = 'prefilter',
|
||||
callback = function(task)
|
||||
local cjson = require "cjson"
|
||||
local lua_mime = require "lua_mime"
|
||||
local lua_util = require "lua_util"
|
||||
local rspamd_logger = require "rspamd_logger"
|
||||
local rspamd_http = require "rspamd_http"
|
||||
local envfrom = task:get_from(1)
|
||||
local uname = task:get_user()
|
||||
if not envfrom or not uname then
|
||||
return false
|
||||
end
|
||||
local uname = uname:lower()
|
||||
local env_from_domain = envfrom[1].domain:lower()
|
||||
local env_from_addr = envfrom[1].addr:lower()
|
||||
|
||||
-- determine newline type
|
||||
local function newline(task)
|
||||
local t = task:get_newlines_type()
|
||||
|
||||
if t == 'cr' then
|
||||
return '\r'
|
||||
elseif t == 'lf' then
|
||||
return '\n'
|
||||
end
|
||||
|
||||
return '\r\n'
|
||||
end
|
||||
-- retrieve footer
|
||||
local function footer_cb(err_message, code, data, headers)
|
||||
if err or type(data) ~= 'string' then
|
||||
rspamd_logger.infox(rspamd_config, "domain wide footer request for user %s returned invalid or empty data (\"%s\") or error (\"%s\")", uname, data, err)
|
||||
else
|
||||
|
||||
-- parse json string
|
||||
local footer = cjson.decode(data)
|
||||
if not footer then
|
||||
rspamd_logger.infox(rspamd_config, "parsing domain wide footer for user %s returned invalid or empty data (\"%s\") or error (\"%s\")", uname, data, err)
|
||||
else
|
||||
if footer and type(footer) == "table" and (footer.html and footer.html ~= "" or footer.plain and footer.plain ~= "") then
|
||||
rspamd_logger.infox(rspamd_config, "found domain wide footer for user %s: html=%s, plain=%s, vars=%s", uname, footer.html, footer.plain, footer.vars)
|
||||
|
||||
if footer.skip_replies ~= 0 then
|
||||
in_reply_to = task:get_header_raw('in-reply-to')
|
||||
if in_reply_to then
|
||||
rspamd_logger.infox(rspamd_config, "mail is a reply - skip footer")
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
local envfrom_mime = task:get_from(2)
|
||||
local from_name = ""
|
||||
if envfrom_mime and envfrom_mime[1].name then
|
||||
from_name = envfrom_mime[1].name
|
||||
elseif envfrom and envfrom[1].name then
|
||||
from_name = envfrom[1].name
|
||||
end
|
||||
|
||||
-- default replacements
|
||||
local replacements = {
|
||||
auth_user = uname,
|
||||
from_user = envfrom[1].user,
|
||||
from_name = from_name,
|
||||
from_addr = envfrom[1].addr,
|
||||
from_domain = envfrom[1].domain:lower()
|
||||
}
|
||||
-- add custom mailbox attributes
|
||||
if footer.vars and type(footer.vars) == "string" then
|
||||
local footer_vars = cjson.decode(footer.vars)
|
||||
|
||||
if type(footer_vars) == "table" then
|
||||
for key, value in pairs(footer_vars) do
|
||||
replacements[key] = value
|
||||
end
|
||||
end
|
||||
end
|
||||
if footer.html and footer.html ~= "" then
|
||||
footer.html = lua_util.jinja_template(footer.html, replacements, true)
|
||||
end
|
||||
if footer.plain and footer.plain ~= "" then
|
||||
footer.plain = lua_util.jinja_template(footer.plain, replacements, true)
|
||||
end
|
||||
|
||||
-- add footer
|
||||
local out = {}
|
||||
local rewrite = lua_mime.add_text_footer(task, footer.html, footer.plain) or {}
|
||||
|
||||
local seen_cte
|
||||
local newline_s = newline(task)
|
||||
|
||||
local function rewrite_ct_cb(name, hdr)
|
||||
if rewrite.need_rewrite_ct then
|
||||
if name:lower() == 'content-type' then
|
||||
local nct = string.format('%s: %s/%s; charset=utf-8',
|
||||
'Content-Type', rewrite.new_ct.type, rewrite.new_ct.subtype)
|
||||
out[#out + 1] = nct
|
||||
-- update Content-Type header
|
||||
task:set_milter_reply({
|
||||
remove_headers = {['Content-Type'] = 0},
|
||||
})
|
||||
task:set_milter_reply({
|
||||
add_headers = {['Content-Type'] = string.format('%s/%s; charset=utf-8', rewrite.new_ct.type, rewrite.new_ct.subtype)}
|
||||
})
|
||||
return
|
||||
elseif name:lower() == 'content-transfer-encoding' then
|
||||
out[#out + 1] = string.format('%s: %s',
|
||||
'Content-Transfer-Encoding', 'quoted-printable')
|
||||
-- update Content-Transfer-Encoding header
|
||||
task:set_milter_reply({
|
||||
remove_headers = {['Content-Transfer-Encoding'] = 0},
|
||||
})
|
||||
task:set_milter_reply({
|
||||
add_headers = {['Content-Transfer-Encoding'] = 'quoted-printable'}
|
||||
})
|
||||
seen_cte = true
|
||||
return
|
||||
end
|
||||
end
|
||||
out[#out + 1] = hdr.raw:gsub('\r?\n?$', '')
|
||||
end
|
||||
|
||||
task:headers_foreach(rewrite_ct_cb, {full = true})
|
||||
|
||||
if not seen_cte and rewrite.need_rewrite_ct then
|
||||
out[#out + 1] = string.format('%s: %s', 'Content-Transfer-Encoding', 'quoted-printable')
|
||||
end
|
||||
|
||||
-- End of headers
|
||||
out[#out + 1] = newline_s
|
||||
|
||||
if rewrite.out then
|
||||
for _,o in ipairs(rewrite.out) do
|
||||
out[#out + 1] = o
|
||||
end
|
||||
else
|
||||
out[#out + 1] = task:get_rawbody()
|
||||
end
|
||||
local out_parts = {}
|
||||
for _,o in ipairs(out) do
|
||||
if type(o) ~= 'table' then
|
||||
out_parts[#out_parts + 1] = o
|
||||
out_parts[#out_parts + 1] = newline_s
|
||||
else
|
||||
local removePrefix = "--\x0D\x0AContent-Type"
|
||||
if string.lower(string.sub(tostring(o[1]), 1, string.len(removePrefix))) == string.lower(removePrefix) then
|
||||
o[1] = string.sub(tostring(o[1]), string.len("--\x0D\x0A") + 1)
|
||||
end
|
||||
out_parts[#out_parts + 1] = o[1]
|
||||
if o[2] then
|
||||
out_parts[#out_parts + 1] = newline_s
|
||||
end
|
||||
end
|
||||
end
|
||||
task:set_message(out_parts)
|
||||
else
|
||||
rspamd_logger.infox(rspamd_config, "domain wide footer request for user %s returned invalid or empty data (\"%s\")", uname, data)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
-- fetch footer
|
||||
rspamd_http.request({
|
||||
task=task,
|
||||
url='http://nginx:8081/footer.php',
|
||||
body='',
|
||||
callback=footer_cb,
|
||||
headers={Domain=env_from_domain,Username=uname,From=env_from_addr},
|
||||
})
|
||||
|
||||
return true
|
||||
end,
|
||||
priority = 1
|
||||
})
|
||||
|
||||
@@ -52,7 +52,7 @@ $headers = getallheaders();
|
||||
|
||||
$qid = $headers['X-Rspamd-Qid'];
|
||||
$fuzzy = $headers['X-Rspamd-Fuzzy'];
|
||||
$subject = $headers['X-Rspamd-Subject'];
|
||||
$subject = iconv_mime_decode($headers['X-Rspamd-Subject']);
|
||||
$score = $headers['X-Rspamd-Score'];
|
||||
$rcpts = $headers['X-Rspamd-Rcpt'];
|
||||
$user = $headers['X-Rspamd-User'];
|
||||
|
||||
@@ -53,7 +53,7 @@ $qid = $headers['X-Rspamd-Qid'];
|
||||
$rcpts = $headers['X-Rspamd-Rcpt'];
|
||||
$sender = $headers['X-Rspamd-From'];
|
||||
$ip = $headers['X-Rspamd-Ip'];
|
||||
$subject = $headers['X-Rspamd-Subject'];
|
||||
$subject = iconv_mime_decode($headers['X-Rspamd-Subject']);
|
||||
$messageid= $json_body->message_id;
|
||||
$priority = 0;
|
||||
|
||||
|
||||
@@ -1,11 +1,3 @@
|
||||
rates {
|
||||
# Format: "1 / 1h" or "20 / 1m" etc. - global ratelimits are disabled by default
|
||||
to = "100 / 1s";
|
||||
to_ip = "100 / 1s";
|
||||
to_ip_from = "100 / 1s";
|
||||
bounce_to = "100 / 1h";
|
||||
bounce_to_ip = "7 / 1m";
|
||||
}
|
||||
whitelisted_rcpts = "postmaster,mailer-daemon";
|
||||
max_rcpt = 25;
|
||||
custom_keywords = "/etc/rspamd/lua/ratelimit.lua";
|
||||
|
||||
@@ -12,9 +12,13 @@
|
||||
SOGoJunkFolderName= "Junk";
|
||||
SOGoMailDomain = "sogo.local";
|
||||
SOGoEnableEMailAlarms = YES;
|
||||
SOGoMailHideInlineAttachments = YES;
|
||||
SOGoFoldersSendEMailNotifications = YES;
|
||||
SOGoForwardEnabled = YES;
|
||||
|
||||
// Option to set Users as admin to globally manage calendar permissions etc. Disabled by default
|
||||
// SOGoSuperUsernames = ("moo@example.com");
|
||||
|
||||
SOGoUIAdditionalJSFiles = (
|
||||
js/theme.js,
|
||||
js/custom-sogo.js
|
||||
@@ -37,6 +41,7 @@
|
||||
|
||||
SOGoLanguage = English;
|
||||
SOGoMailAuxiliaryUserAccountsEnabled = YES;
|
||||
// SOGoCreateIdentitiesDisabled = NO;
|
||||
SOGoMailCustomFromEnabled = YES;
|
||||
SOGoMailingMechanism = smtp;
|
||||
SOGoSMTPAuthenticationType = plain;
|
||||
@@ -83,6 +88,7 @@
|
||||
//SoDebugBaseURL = YES;
|
||||
//ImapDebugEnabled = YES;
|
||||
//SOGoEASDebugEnabled = YES;
|
||||
SOGoEASSearchInBody = YES; // Experimental. Enabled since 2023-10
|
||||
//LDAPDebugEnabled = YES;
|
||||
//PGDebugEnabled = YES;
|
||||
//MySQL4DebugEnabled = YES;
|
||||
|
||||
@@ -20,6 +20,6 @@
|
||||
<pre>BACKUP_LOCATION=/tmp/ ./helper-scripts/backup_and_restore.sh backup all</pre>
|
||||
<pre>docker compose down --volumes ; docker compose up -d</pre>
|
||||
<p>Make sure your timezone is correct. Use "America/New_York" for example, do not use spaces. Check <a href="https://en.wikipedia.org/wiki/List_of_tz_database_time_zones">here</a> for a list.</p>
|
||||
<br>Click to learn more about <a style="color:red;text-decoration:none;" href="https://mailcow.github.io/mailcow-dockerized-docs/#get-support" target="_blank">getting support.</a>
|
||||
<br>Click to learn more about <a style="color:red;text-decoration:none;" href="https://docs.mailcow.email/#get-support" target="_blank">getting support.</a>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -85,6 +85,8 @@ $cors_settings = cors('get');
|
||||
$cors_settings['allowed_origins'] = str_replace(", ", "\n", $cors_settings['allowed_origins']);
|
||||
$cors_settings['allowed_methods'] = explode(", ", $cors_settings['allowed_methods']);
|
||||
|
||||
$f2b_data = fail2ban('get');
|
||||
|
||||
$template = 'admin.twig';
|
||||
$template_data = [
|
||||
'tfa_data' => $tfa_data,
|
||||
@@ -101,17 +103,21 @@ $template_data = [
|
||||
'domains' => $domains,
|
||||
'all_domains' => $all_domains,
|
||||
'mailboxes' => $mailboxes,
|
||||
'f2b_data' => fail2ban('get'),
|
||||
'f2b_data' => $f2b_data,
|
||||
'f2b_banlist_url' => getBaseUrl() . "/api/v1/get/fail2ban/banlist/" . $f2b_data['banlist_id'],
|
||||
'q_data' => quarantine('settings'),
|
||||
'qn_data' => quota_notification('get'),
|
||||
'pw_reset_data' => reset_password('get_notification'),
|
||||
'rsettings_map' => file_get_contents('http://nginx:8081/settings.php'),
|
||||
'rsettings' => $rsettings,
|
||||
'rspamd_regex_maps' => $rspamd_regex_maps,
|
||||
'logo_specs' => customize('get', 'main_logo_specs'),
|
||||
'logo_dark_specs' => customize('get', 'main_logo_dark_specs'),
|
||||
'ip_check' => customize('get', 'ip_check'),
|
||||
'password_complexity' => password_complexity('get'),
|
||||
'show_rspamd_global_filters' => @$_SESSION['show_rspamd_global_filters'],
|
||||
'cors_settings' => $cors_settings,
|
||||
'is_https' => isset($_SERVER['HTTPS']) && $_SERVER['HTTPS'] === 'on',
|
||||
'lang_admin' => json_encode($lang['admin']),
|
||||
'lang_datatables' => json_encode($lang['datatables'])
|
||||
];
|
||||
|
||||
@@ -3137,6 +3137,86 @@ paths:
|
||||
type: string
|
||||
type: object
|
||||
summary: Update domain
|
||||
/api/v1/edit/domain/footer:
|
||||
post:
|
||||
responses:
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
response:
|
||||
value:
|
||||
- log:
|
||||
- mailbox
|
||||
- edit
|
||||
- domain_wide_footer
|
||||
- domains:
|
||||
- mailcow.tld
|
||||
html: "<br>foo {= foo =}"
|
||||
plain: "<foo {= foo =}"
|
||||
mbox_exclude:
|
||||
- moo@mailcow.tld
|
||||
- null
|
||||
msg:
|
||||
- domain_footer_modified
|
||||
- mailcow.tld
|
||||
type: success
|
||||
schema:
|
||||
properties:
|
||||
log:
|
||||
description: contains request object
|
||||
items: {}
|
||||
type: array
|
||||
msg:
|
||||
items: {}
|
||||
type: array
|
||||
type:
|
||||
enum:
|
||||
- success
|
||||
- danger
|
||||
- error
|
||||
type: string
|
||||
type: object
|
||||
description: OK
|
||||
headers: {}
|
||||
tags:
|
||||
- Domains
|
||||
description: >-
|
||||
You can update the footer of one or more domains per request.
|
||||
operationId: Update domain wide footer
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
example:
|
||||
attr:
|
||||
html: "<br>foo {= foo =}"
|
||||
plain: "foo {= foo =}"
|
||||
mbox_exclude:
|
||||
- moo@mailcow.tld
|
||||
items: mailcow.tld
|
||||
properties:
|
||||
attr:
|
||||
properties:
|
||||
html:
|
||||
description: Footer text in HTML format
|
||||
type: string
|
||||
plain:
|
||||
description: Footer text in PLAIN text format
|
||||
type: string
|
||||
mbox_exclude:
|
||||
description: Array of mailboxes to exclude from domain wide footer
|
||||
type: object
|
||||
type: object
|
||||
items:
|
||||
description: contains a list of domain names where you want to update the footer
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
type: object
|
||||
summary: Update domain wide footer
|
||||
/api/v1/edit/fail2ban:
|
||||
post:
|
||||
responses:
|
||||
@@ -3336,6 +3416,86 @@ paths:
|
||||
type: object
|
||||
type: object
|
||||
summary: Update mailbox
|
||||
/api/v1/edit/mailbox/custom-attribute:
|
||||
post:
|
||||
responses:
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
response:
|
||||
value:
|
||||
- log:
|
||||
- mailbox
|
||||
- edit
|
||||
- mailbox_custom_attribute
|
||||
- mailboxes:
|
||||
- moo@mailcow.tld
|
||||
attribute:
|
||||
- role
|
||||
- foo
|
||||
value:
|
||||
- cow
|
||||
- bar
|
||||
- null
|
||||
msg:
|
||||
- mailbox_modified
|
||||
- moo@mailcow.tld
|
||||
type: success
|
||||
schema:
|
||||
properties:
|
||||
log:
|
||||
description: contains request object
|
||||
items: {}
|
||||
type: array
|
||||
msg:
|
||||
items: {}
|
||||
type: array
|
||||
type:
|
||||
enum:
|
||||
- success
|
||||
- danger
|
||||
- error
|
||||
type: string
|
||||
type: object
|
||||
description: OK
|
||||
headers: {}
|
||||
tags:
|
||||
- Mailboxes
|
||||
description: >-
|
||||
You can update custom attributes of one or more mailboxes per request.
|
||||
operationId: Update mailbox custom attributes
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
example:
|
||||
attr:
|
||||
attribute:
|
||||
- role
|
||||
- foo
|
||||
value:
|
||||
- cow
|
||||
- bar
|
||||
items:
|
||||
- moo@mailcow.tld
|
||||
properties:
|
||||
attr:
|
||||
properties:
|
||||
attribute:
|
||||
description: Array of attribute keys
|
||||
type: object
|
||||
value:
|
||||
description: Array of attribute values
|
||||
type: object
|
||||
type: object
|
||||
items:
|
||||
description: contains list of mailboxes you want update
|
||||
type: object
|
||||
type: object
|
||||
summary: Update mailbox custom attributes
|
||||
/api/v1/edit/mailq:
|
||||
post:
|
||||
responses:
|
||||
@@ -5581,6 +5741,7 @@ paths:
|
||||
sogo_access: "1"
|
||||
tls_enforce_in: "0"
|
||||
tls_enforce_out: "0"
|
||||
custom_attributes: {}
|
||||
domain: domain3.tld
|
||||
is_relayed: 0
|
||||
local_part: info
|
||||
@@ -5646,6 +5807,40 @@ paths:
|
||||
items:
|
||||
type: string
|
||||
summary: Edit Cross-Origin Resource Sharing (CORS) settings
|
||||
"/api/v1/get/spam-score/{mailbox}":
|
||||
get:
|
||||
parameters:
|
||||
- description: name of mailbox or empty for current user - admin user will retrieve the global spam filter score
|
||||
in: path
|
||||
name: mailbox
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- description: e.g. api-key-string
|
||||
example: api-key-string
|
||||
in: header
|
||||
name: X-API-Key
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"401":
|
||||
$ref: "#/components/responses/Unauthorized"
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
examples:
|
||||
response:
|
||||
value:
|
||||
spam_score: "8,15"
|
||||
description: OK
|
||||
headers: {}
|
||||
tags:
|
||||
- Mailboxes
|
||||
description: >-
|
||||
Using this endpoint you can get the global spam filter score or the spam filter score of a certain mailbox.
|
||||
operationId: Get mailbox or global spam filter score
|
||||
summary: Get mailbox or global spam filter score
|
||||
|
||||
tags:
|
||||
- name: Domains
|
||||
|
||||
@@ -42,11 +42,6 @@ table.dataTable.dtr-inline.collapsed>tbody>tr.parent>th.dtr-control:before,
|
||||
table.dataTable td.dt-control:before {
|
||||
background-color: #979797 !important;
|
||||
}
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.child,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.child,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dataTables_empty {
|
||||
background-color: #fbfbfb;
|
||||
}
|
||||
table.dataTable.table-striped>tbody>tr>td {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
@@ -228,8 +228,8 @@ legend {
|
||||
margin-top: 20px;
|
||||
}
|
||||
.slave-info {
|
||||
padding: 15px 0px 15px 15px;
|
||||
font-weight: bold;
|
||||
color: orange;
|
||||
}
|
||||
.alert-hr {
|
||||
margin:3px 0px;
|
||||
@@ -357,6 +357,7 @@ button[aria-expanded='true'] > .caret {
|
||||
}
|
||||
|
||||
.progress {
|
||||
height: 16px;
|
||||
background-color: #d5d5d5;
|
||||
}
|
||||
|
||||
@@ -370,3 +371,22 @@ button[aria-expanded='true'] > .caret {
|
||||
.btn-check:checked+.btn-outline-secondary, .btn-check:active+.btn-outline-secondary, .btn-outline-secondary:active, .btn-outline-secondary.active, .btn-outline-secondary.dropdown-toggle.show {
|
||||
background-color: #f0f0f0 !important;
|
||||
}
|
||||
.btn-check:checked+.btn-light, .btn-check:active+.btn-light, .btn-light:active, .btn-light.active, .show>.btn-light.dropdown-toggle {
|
||||
color: #fff;
|
||||
background-color: #555;
|
||||
background-image: none;
|
||||
border-color: #4d4d4d;
|
||||
}
|
||||
.btn-check:checked+.btn-light:focus, .btn-check:active+.btn-light:focus, .btn-light:active:focus, .btn-light.active:focus, .show>.btn-light.dropdown-toggle:focus,
|
||||
.btn-check:focus+.btn-light, .btn-light:focus {
|
||||
box-shadow: none;
|
||||
}
|
||||
.btn-group>.btn:not(:last-of-type) {
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
.badge.bg-info > a,
|
||||
.badge.bg-danger > a {
|
||||
color: #fff !important;
|
||||
text-decoration: none;
|
||||
}
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
|
||||
@media (max-width: 767px) {
|
||||
.responsive-tabs .tab-pane {
|
||||
.responsive-tabs .tab-pane:not(.rsettings) {
|
||||
display: block !important;
|
||||
opacity: 1;
|
||||
}
|
||||
@@ -206,6 +206,19 @@
|
||||
.senders-mw220 {
|
||||
max-width: 100% !important;
|
||||
}
|
||||
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.dtr-control:before,
|
||||
table.dataTable td.dt-control:before {
|
||||
height: 2rem;
|
||||
width: 2rem;
|
||||
line-height: 2rem;
|
||||
margin-top: -15px;
|
||||
}
|
||||
|
||||
li .dtr-data {
|
||||
padding: 0;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 350px) {
|
||||
|
||||
@@ -1,90 +1,128 @@
|
||||
body {
|
||||
background-color: #414141;
|
||||
color: #e0e0e0;
|
||||
background-color: #1c1c1e;
|
||||
color: #f2f2f7;
|
||||
}
|
||||
|
||||
.card {
|
||||
border: 1px solid #1c1c1c;
|
||||
background-color: #3a3a3a;
|
||||
border: 1px solid #2c2c2e;
|
||||
background-color: #2c2c2e;
|
||||
}
|
||||
|
||||
legend {
|
||||
color: #f5f5f5;
|
||||
color: #f2f2f7;
|
||||
}
|
||||
|
||||
.card-header {
|
||||
color: #bbb;
|
||||
background-color: #2c2c2c;
|
||||
color: #8e8e93;
|
||||
background-color: #1c1c1e;
|
||||
border-color: transparent;
|
||||
}
|
||||
|
||||
.card-body {
|
||||
--bs-card-color: #bbb;
|
||||
}
|
||||
|
||||
.btn-secondary, .paginate_button, .page-link, .btn-light {
|
||||
color: #fff !important;
|
||||
background-color: #7a7a7a !important;
|
||||
border-color: #5c5c5c !important;
|
||||
color: #f2f2f7 !important;
|
||||
background-color: #5e5e5e !important;
|
||||
border-color: #4c4c4e !important;
|
||||
}
|
||||
|
||||
.btn-dark {
|
||||
color: #000 !important;;
|
||||
background-color: #f6f6f6 !important;;
|
||||
border-color: #ddd !important;;
|
||||
}
|
||||
.btn-check:checked+.btn-secondary, .btn-check:active+.btn-secondary, .btn-secondary:active, .btn-secondary.active, .show>.btn-secondary.dropdown-toggle {
|
||||
border-color: #7a7a7a !important;
|
||||
}
|
||||
.alert-secondary {
|
||||
color: #fff !important;
|
||||
background-color: #7a7a7a !important;
|
||||
border-color: #5c5c5c !important;
|
||||
}
|
||||
.bg-secondary {
|
||||
color: #fff !important;
|
||||
background-color: #7a7a7a !important;
|
||||
}
|
||||
.alert-secondary, .alert-secondary a, .alert-secondary .alert-link {
|
||||
color: #fff;
|
||||
}
|
||||
.page-item.active .page-link {
|
||||
background-color: #158cba !important;
|
||||
border-color: #127ba3 !important;
|
||||
color: #f2f2f7 !important;
|
||||
background-color: #242424 !important;
|
||||
border-color: #1c1c1e !important;
|
||||
}
|
||||
|
||||
.btn-secondary:focus, .btn-secondary:hover, .btn-group.open .dropdown-toggle.btn-secondary {
|
||||
background-color: #7a7a7a;
|
||||
border-color: #5c5c5c !important;
|
||||
color: #fff;
|
||||
background-color: #444444;
|
||||
border-color: #4c4c4e !important;
|
||||
color: #f2f2f7;
|
||||
}
|
||||
|
||||
.btn-check:checked+.btn-secondary, .btn-check:active+.btn-secondary, .btn-secondary:active, .btn-secondary.active, .show>.btn-secondary.dropdown-toggle {
|
||||
border-color: #5e5e5e !important;
|
||||
}
|
||||
|
||||
.alert-secondary {
|
||||
color: #f2f2f7 !important;
|
||||
background-color: #5e5e5e !important;
|
||||
border-color: #4c4c4e !important;
|
||||
}
|
||||
|
||||
.bg-secondary {
|
||||
color: #f2f2f7 !important;
|
||||
background-color: #5e5e5e !important;
|
||||
}
|
||||
|
||||
.alert-secondary, .alert-secondary a, .alert-secondary .alert-link {
|
||||
color: #f2f2f7;
|
||||
}
|
||||
|
||||
.page-item.active .page-link {
|
||||
background-color: #3e3e3e !important;
|
||||
border-color: #3e3e3e !important;
|
||||
}
|
||||
|
||||
.btn-secondary:focus, .btn-secondary:hover, .btn-group.open .dropdown-toggle.btn-secondary {
|
||||
background-color: #5e5e5e;
|
||||
border-color: #4c4c4e !important;
|
||||
color: #f2f2f7;
|
||||
}
|
||||
|
||||
.btn-secondary:disabled, .btn-secondary.disabled {
|
||||
border-color: #7a7a7a !important;
|
||||
border-color: #5e5e5e !important;
|
||||
}
|
||||
|
||||
.modal-content {
|
||||
background-color: #414141;
|
||||
--bs-modal-color: #bbb;
|
||||
background-color: #2c2c2e;
|
||||
}
|
||||
|
||||
.modal-header {
|
||||
border-bottom: 1px solid #161616;
|
||||
border-bottom: 1px solid #999;
|
||||
}
|
||||
|
||||
.modal-title {
|
||||
color: white;
|
||||
color: #bbb;
|
||||
}
|
||||
|
||||
.modal .btn-close {
|
||||
filter: invert(1) grayscale(100%) brightness(200%);
|
||||
}
|
||||
|
||||
.navbar.bg-light {
|
||||
background-color: #222222 !important;
|
||||
border-color: #181818;
|
||||
background-color: #1c1c1e !important;
|
||||
border-color: #2c2c2e;
|
||||
}
|
||||
|
||||
.nav-link {
|
||||
color: #ccc !important;
|
||||
color: #8e8e93 !important;
|
||||
}
|
||||
|
||||
.nav-tabs .nav-link.active, .nav-tabs .nav-item.show .nav-link {
|
||||
background: none;
|
||||
}
|
||||
|
||||
.nav-tabs, .nav-tabs .nav-link {
|
||||
border-color: #444444 !important;
|
||||
}
|
||||
|
||||
.nav-tabs .nav-link:not(.disabled):hover, .nav-tabs .nav-link:not(.disabled):focus, .nav-tabs .nav-link.active {
|
||||
border-bottom-color: #414141;
|
||||
border-bottom-color: #1c1c1e !important;
|
||||
}
|
||||
|
||||
.card .nav-tabs .nav-link:not(.disabled):hover, .card .nav-tabs .nav-link:not(.disabled):focus, .card .nav-tabs .nav-link.active {
|
||||
border-bottom-color: #2c2c2e !important;
|
||||
}
|
||||
|
||||
.table, .table-striped>tbody>tr:nth-of-type(odd)>*, tbody tr {
|
||||
color: #ccc !important;
|
||||
color: #f2f2f7 !important;
|
||||
}
|
||||
|
||||
.dropdown-menu {
|
||||
background-color: #585858;
|
||||
border: 1px solid #333;
|
||||
background-color: #424242;
|
||||
border: 1px solid #282828;
|
||||
}
|
||||
.dropdown-menu>li>a:focus, .dropdown-menu>li>a:hover {
|
||||
color: #fafafa;
|
||||
@@ -97,7 +135,7 @@ legend {
|
||||
color: #d4d4d4 !important;
|
||||
}
|
||||
tbody tr {
|
||||
color: #555;
|
||||
color: #ccc;
|
||||
}
|
||||
.navbar-default .navbar-nav>.open>a, .navbar-default .navbar-nav>.open>a:focus, .navbar-default .navbar-nav>.open>a:hover {
|
||||
color: #ccc;
|
||||
@@ -106,18 +144,15 @@ tbody tr {
|
||||
color: #ccc;
|
||||
}
|
||||
.list-group-item {
|
||||
background-color: #333;
|
||||
background-color: #282828;
|
||||
border: 1px solid #555;
|
||||
}
|
||||
.table-striped>tbody>tr:nth-of-type(odd) {
|
||||
background-color: #333;
|
||||
background-color: #424242;
|
||||
}
|
||||
table.dataTable>tbody>tr.child ul.dtr-details>li {
|
||||
border-bottom: 1px solid rgba(255, 255, 255, 0.13);
|
||||
}
|
||||
tbody tr {
|
||||
color: #ccc;
|
||||
}
|
||||
.label.label-last-login {
|
||||
color: #ccc !important;
|
||||
background-color: #555 !important;
|
||||
@@ -133,20 +168,23 @@ div.numberedtextarea-number {
|
||||
}
|
||||
.well {
|
||||
border: 1px solid #555;
|
||||
background-color: #333;
|
||||
background-color: #282828;
|
||||
}
|
||||
pre {
|
||||
color: #ccc;
|
||||
background-color: #333;
|
||||
background-color: #282828;
|
||||
border: 1px solid #555;
|
||||
}
|
||||
.form-control {
|
||||
background-color: transparent;
|
||||
}
|
||||
input.form-control, textarea.form-control {
|
||||
color: #e2e2e2 !important;
|
||||
background-color: #555 !important;
|
||||
background-color: #424242 !important;
|
||||
border: 1px solid #999;
|
||||
}
|
||||
input.form-control:focus, textarea.form-control {
|
||||
background-color: #555 !important;
|
||||
background-color: #424242 !important;
|
||||
}
|
||||
input.form-control:disabled, textarea.form-disabled {
|
||||
color: #a8a8a8 !important;
|
||||
@@ -154,16 +192,14 @@ input.form-control:disabled, textarea.form-disabled {
|
||||
}
|
||||
.input-group-addon {
|
||||
color: #ccc;
|
||||
background-color: #555 !important;
|
||||
background-color: #424242 !important;
|
||||
border: 1px solid #999;
|
||||
}
|
||||
.input-group-text {
|
||||
color: #ccc;
|
||||
background-color: #242424;
|
||||
background-color: #1c1c1c;
|
||||
}
|
||||
|
||||
|
||||
|
||||
.list-group-item {
|
||||
color: #ccc;
|
||||
}
|
||||
@@ -175,11 +211,11 @@ input.form-control:disabled, textarea.form-disabled {
|
||||
}
|
||||
.dropdown-item.active:hover {
|
||||
color: #fff !important;
|
||||
background-color: #31b1e4;
|
||||
background-color: #007aff;
|
||||
}
|
||||
.form-select {
|
||||
color: #e2e2e2!important;
|
||||
background-color: #555!important;
|
||||
background-color: #424242!important;
|
||||
border: 1px solid #999;
|
||||
}
|
||||
|
||||
@@ -191,31 +227,6 @@ input.form-control:disabled, textarea.form-disabled {
|
||||
color: #fff !important;
|
||||
}
|
||||
|
||||
|
||||
.table-secondary {
|
||||
--bs-table-bg: #7a7a7a;
|
||||
--bs-table-striped-bg: #e4e4e4;
|
||||
--bs-table-striped-color: #000;
|
||||
--bs-table-active-bg: #d8d8d8;
|
||||
--bs-table-active-color: #000;
|
||||
--bs-table-hover-bg: #dedede;
|
||||
--bs-table-hover-color: #000;
|
||||
color: #000;
|
||||
border-color: #d8d8d8;
|
||||
}
|
||||
|
||||
.table-light {
|
||||
--bs-table-bg: #f6f6f6;
|
||||
--bs-table-striped-bg: #eaeaea;
|
||||
--bs-table-striped-color: #000;
|
||||
--bs-table-active-bg: #dddddd;
|
||||
--bs-table-active-color: #000;
|
||||
--bs-table-hover-bg: #e4e4e4;
|
||||
--bs-table-hover-color: #000;
|
||||
color: #000;
|
||||
border-color: #dddddd;
|
||||
}
|
||||
|
||||
.form-control-plaintext {
|
||||
color: #e0e0e0;
|
||||
}
|
||||
@@ -289,12 +300,12 @@ a:hover {
|
||||
}
|
||||
|
||||
.tag-box {
|
||||
background-color: #555;
|
||||
border: 1px solid #999;
|
||||
background-color: #282828;
|
||||
border: 1px solid #555;
|
||||
}
|
||||
.tag-input {
|
||||
color: #fff;
|
||||
background-color: #555;
|
||||
background-color: #282828;
|
||||
}
|
||||
.tag-add {
|
||||
color: #ccc;
|
||||
@@ -303,43 +314,24 @@ a:hover {
|
||||
color: #d1d1d1;
|
||||
}
|
||||
|
||||
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before:hover,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.dtr-control:before:hover {
|
||||
background-color: #7a7a7a !important;
|
||||
}
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.dtr-control:before {
|
||||
background-color: #7a7a7a !important;
|
||||
border: 1.5px solid #5c5c5c !important;
|
||||
color: #fff !important;
|
||||
}
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr.parent>td.dtr-control:before,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr.parent>th.dtr-control:before {
|
||||
background-color: #949494;
|
||||
}
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.child,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.child,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dataTables_empty {
|
||||
background-color: #444444;
|
||||
}
|
||||
|
||||
.btn-check-label {
|
||||
color: #fff;
|
||||
}
|
||||
.btn-outline-secondary:hover {
|
||||
background-color: #c3c3c3;
|
||||
background-color: #5c5c5c;
|
||||
}
|
||||
.btn.btn-outline-secondary {
|
||||
color: #fff !important;
|
||||
color: #e0e0e0 !important;
|
||||
border-color: #7a7a7a !important;
|
||||
}
|
||||
.btn-check:checked+.btn-outline-secondary, .btn-check:active+.btn-outline-secondary, .btn-outline-secondary:active, .btn-outline-secondary.active, .btn-outline-secondary.dropdown-toggle.show {
|
||||
background-color: #9b9b9b !important;
|
||||
background-color: #7a7a7a !important;
|
||||
}
|
||||
.btn-check:checked+.btn-light, .btn-check:active+.btn-light, .btn-light:active, .btn-light.active, .show>.btn-light.dropdown-toggle {
|
||||
color: #f2f2f7 !important;
|
||||
background-color: #242424 !important;
|
||||
border-color: #1c1c1e !important;
|
||||
}
|
||||
|
||||
|
||||
|
||||
.btn-input-missing,
|
||||
.btn-input-missing:hover,
|
||||
.btn-input-missing:active,
|
||||
@@ -347,27 +339,119 @@ table.dataTable.dtr-inline.collapsed>tbody>tr>td.dataTables_empty {
|
||||
.btn-input-missing:active:hover,
|
||||
.btn-input-missing:active:focus {
|
||||
color: #fff !important;
|
||||
background-color: #ff2f24 !important;
|
||||
border-color: #e21207 !important;
|
||||
background-color: #ff3b30 !important;
|
||||
border-color: #ff3b30 !important;
|
||||
}
|
||||
|
||||
.inputMissingAttr {
|
||||
border-color: #FF4136 !important;
|
||||
border-color: #ff4136 !important;
|
||||
}
|
||||
|
||||
|
||||
.list-group-details {
|
||||
background: #444444;
|
||||
background: #555;
|
||||
}
|
||||
.list-group-header {
|
||||
background: #333;
|
||||
background: #444;
|
||||
}
|
||||
|
||||
span.mail-address-item {
|
||||
background-color: #333;
|
||||
background-color: #444;
|
||||
border-radius: 4px;
|
||||
border: 1px solid #555;
|
||||
padding: 2px 7px;
|
||||
display: inline-block;
|
||||
margin: 2px 6px 2px 0;
|
||||
}
|
||||
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before:hover,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.dtr-control:before:hover {
|
||||
background-color: #7a7a7a !important;
|
||||
}
|
||||
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.dtr-control:before {
|
||||
background-color: #7a7a7a !important;
|
||||
border: 1.5px solid #5c5c5c !important;
|
||||
color: #e0e0e0 !important;
|
||||
}
|
||||
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr.parent>td.dtr-control:before,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr.parent>th.dtr-control:before {
|
||||
background-color: #949494;
|
||||
}
|
||||
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.child,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>th.child,
|
||||
table.dataTable.dtr-inline.collapsed>tbody>tr>td.dataTables_empty {
|
||||
background-color: #414141;
|
||||
}
|
||||
|
||||
table.table, .table-striped>tbody>tr:nth-of-type(odd)>*, tbody tr {
|
||||
color: #ccc !important;
|
||||
}
|
||||
|
||||
.table-secondary {
|
||||
--bs-table-bg: #282828;
|
||||
--bs-table-striped-bg: #343434;
|
||||
--bs-table-striped-color: #f2f2f7;
|
||||
--bs-table-active-bg: #4c4c4c;
|
||||
--bs-table-active-color: #f2f2f7;
|
||||
--bs-table-hover-bg: #3a3a3a;
|
||||
--bs-table-hover-color: #f2f2f7;
|
||||
color: #ccc;
|
||||
border-color: #3a3a3a;
|
||||
}
|
||||
|
||||
.table-light {
|
||||
--bs-table-bg: #3a3a3a;
|
||||
--bs-table-striped-bg: #444444;
|
||||
--bs-table-striped-color: #f2f2f7;
|
||||
--bs-table-active-bg: #5c5c5c;
|
||||
--bs-table-active-color: #f2f2f7;
|
||||
--bs-table-hover-bg: #4c4c4c;
|
||||
--bs-table-hover-color: #f2f2f7;
|
||||
color: #ccc;
|
||||
border-color: #4c4c4c;
|
||||
}
|
||||
|
||||
.table-bordered {
|
||||
border-color: #3a3a3a;
|
||||
}
|
||||
|
||||
.table-bordered th,
|
||||
.table-bordered td {
|
||||
border-color: #3a3a3a !important;
|
||||
}
|
||||
|
||||
.table-bordered thead th,
|
||||
.table-bordered thead td {
|
||||
border-bottom-width: 2px;
|
||||
}
|
||||
|
||||
.table-striped>tbody>tr:nth-of-type(odd)>td,
|
||||
.table-striped>tbody>tr:nth-of-type(odd)>th {
|
||||
background-color: #282828;
|
||||
}
|
||||
|
||||
.table-hover>tbody>tr:hover {
|
||||
background-color: #343434;
|
||||
}
|
||||
|
||||
.table>:not(caption)>*>* {
|
||||
border-color: #5c5c5c;
|
||||
--bs-table-color-state:#bbb;
|
||||
--bs-table-bg: #3a3a3a;
|
||||
}
|
||||
.text-muted {
|
||||
--bs-secondary-color: #8e8e93;
|
||||
}
|
||||
input::placeholder {
|
||||
color: #8e8e93 !important;
|
||||
}
|
||||
|
||||
.form-select {
|
||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%238e8e93' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e");
|
||||
}
|
||||
.btn-light, .btn-light:hover {
|
||||
background-image: none;
|
||||
}
|
||||
@@ -39,9 +39,13 @@ foreach ($containers as $container => $container_info) {
|
||||
$StartedAt['month'],
|
||||
$StartedAt['day'],
|
||||
$StartedAt['year']));
|
||||
$user_tz = new DateTimeZone(getenv('TZ'));
|
||||
$date->setTimezone($user_tz);
|
||||
$started = $date->format('r');
|
||||
try {
|
||||
$user_tz = new DateTimeZone(getenv('TZ'));
|
||||
$date->setTimezone($user_tz);
|
||||
$started = $date->format('r');
|
||||
} catch(Exception $e) {
|
||||
$started = '?';
|
||||
}
|
||||
}
|
||||
else {
|
||||
$started = '?';
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user