mirror of
https://github.com/domainaware/parsedmarc.git
synced 2026-03-12 01:31:26 +00:00
Compare commits
101 Commits
8.18.2
...
copilot/re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2174f23eb5 | ||
|
|
febbb107c4 | ||
|
|
9a64b494e7 | ||
|
|
e93209c766 | ||
|
|
d1c22466be | ||
|
|
3d1b2522d3 | ||
|
|
af9ad568ec | ||
|
|
748164d177 | ||
|
|
487e5e1149 | ||
|
|
73010cf964 | ||
|
|
a4a5475aa8 | ||
|
|
dab78880df | ||
|
|
fb54e3b742 | ||
|
|
6799f10364 | ||
|
|
445c9565a4 | ||
|
|
4b786846ae | ||
|
|
23ae563cd8 | ||
|
|
cdd000e675 | ||
|
|
7d58abc67b | ||
|
|
a18ae439de | ||
|
|
d7061330a8 | ||
|
|
9d5654b8ec | ||
|
|
a0e0070dd0 | ||
|
|
cf3b7f2c29 | ||
|
|
d312522ab7 | ||
|
|
888d717476 | ||
|
|
1127f65fbb | ||
|
|
d017dfcddf | ||
|
|
5fae99aacc | ||
|
|
ba57368ac3 | ||
|
|
dc6ee5de98 | ||
|
|
158d63d205 | ||
|
|
f1933b906c | ||
|
|
4b98d795ff | ||
|
|
b1356f7dfc | ||
|
|
1969196e1a | ||
|
|
553f15f6a9 | ||
|
|
1fc9f638e2 | ||
|
|
48bff504b4 | ||
|
|
681b7cbf85 | ||
|
|
0922d6e83a | ||
|
|
baf3f95fb1 | ||
|
|
a51f945305 | ||
|
|
55dbf8e3db | ||
|
|
00267c9847 | ||
|
|
51356175e1 | ||
|
|
3be10d30dd | ||
|
|
98342ecac6 | ||
|
|
38a3d4eaae | ||
|
|
a05c230152 | ||
|
|
17bdc3a134 | ||
|
|
858be00f22 | ||
|
|
597ca64f9f | ||
|
|
c5dbe2c4dc | ||
|
|
082b3d355f | ||
|
|
2a7ce47bb1 | ||
|
|
9882405d96 | ||
|
|
fce84763b9 | ||
|
|
8a299b8600 | ||
|
|
b4c2b21547 | ||
|
|
865c249437 | ||
|
|
013859f10e | ||
|
|
6d4a31a120 | ||
|
|
45d3dc3b2e | ||
|
|
4bbd97dbaa | ||
|
|
5df152d469 | ||
|
|
d990bef342 | ||
|
|
caf77ca6d4 | ||
|
|
4b3d32c5a6 | ||
|
|
5df5c10f80 | ||
|
|
308d4657ab | ||
|
|
0f74e33094 | ||
|
|
9f339e11f5 | ||
|
|
391e84b717 | ||
|
|
8bf06ce5af | ||
|
|
2b7ae50a27 | ||
|
|
3feb478793 | ||
|
|
01630bb61c | ||
|
|
39347cb244 | ||
|
|
ed25526d59 | ||
|
|
880d7110fe | ||
|
|
d62001f5a4 | ||
|
|
0720bffcb6 | ||
|
|
fecd55a97d | ||
|
|
a121306eed | ||
|
|
980c9c7904 | ||
|
|
963f5d796f | ||
|
|
6532f3571b | ||
|
|
ea878443a8 | ||
|
|
9f6de41958 | ||
|
|
119192701c | ||
|
|
1d650be48a | ||
|
|
a85553fb18 | ||
|
|
5975d8eb21 | ||
|
|
87ae6175f2 | ||
|
|
68b93ed580 | ||
|
|
55508b513b | ||
|
|
71511c0cfc | ||
|
|
7c45812284 | ||
|
|
607a091a5f | ||
|
|
c308bf938c |
10
.github/workflows/docker.yml
vendored
10
.github/workflows/docker.yml
vendored
@@ -24,11 +24,11 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v3
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
@@ -40,16 +40,14 @@ jobs:
|
|||||||
type=semver,pattern={{major}}.{{minor}}
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
# https://github.com/docker/login-action/releases/tag/v2.0.0
|
uses: docker/login-action@v3
|
||||||
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b
|
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.REGISTRY }}
|
registry: ${{ env.REGISTRY }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
# https://github.com/docker/build-push-action/releases/tag/v3.0.0
|
uses: docker/build-push-action@v6
|
||||||
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8
|
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: ${{ github.event_name == 'release' }}
|
push: ${{ github.event_name == 'release' }}
|
||||||
|
|||||||
14
.github/workflows/python-tests.yml
vendored
14
.github/workflows/python-tests.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: elasticsearch:8.18.2
|
image: elasticsearch:8.19.7
|
||||||
env:
|
env:
|
||||||
discovery.type: single-node
|
discovery.type: single-node
|
||||||
cluster.name: parsedmarc-cluster
|
cluster.name: parsedmarc-cluster
|
||||||
@@ -30,18 +30,18 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install system dependencies
|
- name: Install system dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get -q update
|
||||||
sudo apt-get install -y libemail-outlook-message-perl
|
sudo apt-get -qy install libemail-outlook-message-perl
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
@@ -65,6 +65,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
hatch build
|
hatch build
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v5
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -106,7 +106,7 @@ ENV/
|
|||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
# VS Code launch config
|
# VS Code launch config
|
||||||
.vscode/launch.json
|
#.vscode/launch.json
|
||||||
|
|
||||||
# Visual Studio Code settings
|
# Visual Studio Code settings
|
||||||
#.vscode/
|
#.vscode/
|
||||||
@@ -142,3 +142,6 @@ scratch.py
|
|||||||
|
|
||||||
parsedmarc/resources/maps/base_reverse_dns.csv
|
parsedmarc/resources/maps/base_reverse_dns.csv
|
||||||
parsedmarc/resources/maps/unknown_base_reverse_dns.csv
|
parsedmarc/resources/maps/unknown_base_reverse_dns.csv
|
||||||
|
parsedmarc/resources/maps/sus_domains.csv
|
||||||
|
parsedmarc/resources/maps/unknown_domains.txt
|
||||||
|
*.bak
|
||||||
|
|||||||
45
.vscode/launch.json
vendored
Normal file
45
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
{
|
||||||
|
// Use IntelliSense to learn about possible attributes.
|
||||||
|
// Hover to view descriptions of existing attributes.
|
||||||
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Python Debugger: Current File",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "${file}",
|
||||||
|
"console": "integratedTerminal"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "tests.py",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "tests.py",
|
||||||
|
"console": "integratedTerminal"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "sample",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"module": "parsedmarc.cli",
|
||||||
|
"args": ["samples/private/sample"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "sortlists.py",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "sortlists.py",
|
||||||
|
"cwd": "${workspaceFolder}/parsedmarc/resources/maps",
|
||||||
|
"console": "integratedTerminal"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "find_unknown_base_reverse_dns.py",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "find_unknown_base_reverse_dns.py",
|
||||||
|
"cwd": "${workspaceFolder}/parsedmarc/resources/maps",
|
||||||
|
"console": "integratedTerminal"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
28
.vscode/settings.json
vendored
28
.vscode/settings.json
vendored
@@ -1,4 +1,14 @@
|
|||||||
{
|
{
|
||||||
|
"[python]": {
|
||||||
|
"editor.defaultFormatter": "charliermarsh.ruff",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
|
||||||
|
// Let Ruff handle lint fixes + import sorting on save
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.fixAll.ruff": "explicit",
|
||||||
|
"source.organizeImports.ruff": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
"markdownlint.config": {
|
"markdownlint.config": {
|
||||||
"MD024": false
|
"MD024": false
|
||||||
},
|
},
|
||||||
@@ -13,6 +23,7 @@
|
|||||||
"automodule",
|
"automodule",
|
||||||
"backported",
|
"backported",
|
||||||
"bellsouth",
|
"bellsouth",
|
||||||
|
"boto",
|
||||||
"brakhane",
|
"brakhane",
|
||||||
"Brightmail",
|
"Brightmail",
|
||||||
"CEST",
|
"CEST",
|
||||||
@@ -35,7 +46,9 @@
|
|||||||
"exampleuser",
|
"exampleuser",
|
||||||
"expiringdict",
|
"expiringdict",
|
||||||
"fieldlist",
|
"fieldlist",
|
||||||
|
"GELF",
|
||||||
"genindex",
|
"genindex",
|
||||||
|
"geoip",
|
||||||
"geoipupdate",
|
"geoipupdate",
|
||||||
"Geolite",
|
"Geolite",
|
||||||
"geolocation",
|
"geolocation",
|
||||||
@@ -44,7 +57,10 @@
|
|||||||
"hostnames",
|
"hostnames",
|
||||||
"htpasswd",
|
"htpasswd",
|
||||||
"httpasswd",
|
"httpasswd",
|
||||||
|
"httplib",
|
||||||
"IMAP",
|
"IMAP",
|
||||||
|
"imapclient",
|
||||||
|
"infile",
|
||||||
"Interaktive",
|
"Interaktive",
|
||||||
"IPDB",
|
"IPDB",
|
||||||
"journalctl",
|
"journalctl",
|
||||||
@@ -60,17 +76,20 @@
|
|||||||
"mailrelay",
|
"mailrelay",
|
||||||
"mailsuite",
|
"mailsuite",
|
||||||
"maxdepth",
|
"maxdepth",
|
||||||
|
"MAXHEADERS",
|
||||||
"maxmind",
|
"maxmind",
|
||||||
"mbox",
|
"mbox",
|
||||||
"mfrom",
|
"mfrom",
|
||||||
"michaeldavie",
|
"michaeldavie",
|
||||||
"mikesiegel",
|
"mikesiegel",
|
||||||
|
"Mimecast",
|
||||||
"mitigations",
|
"mitigations",
|
||||||
"MMDB",
|
"MMDB",
|
||||||
"modindex",
|
"modindex",
|
||||||
"msgconvert",
|
"msgconvert",
|
||||||
"msgraph",
|
"msgraph",
|
||||||
"MSSP",
|
"MSSP",
|
||||||
|
"multiprocess",
|
||||||
"Munge",
|
"Munge",
|
||||||
"ndjson",
|
"ndjson",
|
||||||
"newkey",
|
"newkey",
|
||||||
@@ -80,14 +99,19 @@
|
|||||||
"nosecureimap",
|
"nosecureimap",
|
||||||
"nosniff",
|
"nosniff",
|
||||||
"nwettbewerb",
|
"nwettbewerb",
|
||||||
|
"opensearch",
|
||||||
|
"opensearchpy",
|
||||||
"parsedmarc",
|
"parsedmarc",
|
||||||
"passsword",
|
"passsword",
|
||||||
"Postorius",
|
"Postorius",
|
||||||
"premade",
|
"premade",
|
||||||
"procs",
|
"procs",
|
||||||
"publicsuffix",
|
"publicsuffix",
|
||||||
|
"publicsuffixlist",
|
||||||
"publixsuffix",
|
"publixsuffix",
|
||||||
|
"pygelf",
|
||||||
"pypy",
|
"pypy",
|
||||||
|
"pytest",
|
||||||
"quickstart",
|
"quickstart",
|
||||||
"Reindex",
|
"Reindex",
|
||||||
"replyto",
|
"replyto",
|
||||||
@@ -95,10 +119,13 @@
|
|||||||
"Rollup",
|
"Rollup",
|
||||||
"Rpdm",
|
"Rpdm",
|
||||||
"SAMEORIGIN",
|
"SAMEORIGIN",
|
||||||
|
"sdist",
|
||||||
"Servernameone",
|
"Servernameone",
|
||||||
"setuptools",
|
"setuptools",
|
||||||
"smartquotes",
|
"smartquotes",
|
||||||
"SMTPTLS",
|
"SMTPTLS",
|
||||||
|
"sortlists",
|
||||||
|
"sortmaps",
|
||||||
"sourcetype",
|
"sourcetype",
|
||||||
"STARTTLS",
|
"STARTTLS",
|
||||||
"tasklist",
|
"tasklist",
|
||||||
@@ -111,6 +138,7 @@
|
|||||||
"truststore",
|
"truststore",
|
||||||
"Übersicht",
|
"Übersicht",
|
||||||
"uids",
|
"uids",
|
||||||
|
"Uncategorized",
|
||||||
"unparasable",
|
"unparasable",
|
||||||
"uper",
|
"uper",
|
||||||
"urllib",
|
"urllib",
|
||||||
|
|||||||
629
CHANGELOG.md
629
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
|||||||
ARG BASE_IMAGE=python:3.9-slim
|
ARG BASE_IMAGE=python:3.13-slim
|
||||||
ARG USERNAME=parsedmarc
|
ARG USERNAME=parsedmarc
|
||||||
ARG USER_UID=1000
|
ARG USER_UID=1000
|
||||||
ARG USER_GID=$USER_UID
|
ARG USER_GID=$USER_UID
|
||||||
|
|||||||
41
README.md
41
README.md
@@ -9,7 +9,7 @@ Package](https://img.shields.io/pypi/v/parsedmarc.svg)](https://pypi.org/project
|
|||||||
[](https://pypistats.org/packages/parsedmarc)
|
[](https://pypistats.org/packages/parsedmarc)
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://github.com/domainaware/parsedmarc/raw/master/docs/source/_static/screenshots/dmarc-summary-charts.png?raw=true" alt="A screenshot of DMARC summary charts in Kibana"/>
|
<img src="https://raw.githubusercontent.com/domainaware/parsedmarc/refs/heads/master/docs/source/_static/screenshots/dmarc-summary-charts.png?raw=true" alt="A screenshot of DMARC summary charts in Kibana"/>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
`parsedmarc` is a Python module and CLI utility for parsing DMARC
|
`parsedmarc` is a Python module and CLI utility for parsing DMARC
|
||||||
@@ -23,25 +23,42 @@ ProofPoint Email Fraud Defense, and Valimail.
|
|||||||
|
|
||||||
## Help Wanted
|
## Help Wanted
|
||||||
|
|
||||||
This project is maintained by one developer. Please consider
|
This project is maintained by one developer. Please consider reviewing the open
|
||||||
reviewing the open
|
[issues](https://github.com/domainaware/parsedmarc/issues) to see how you can
|
||||||
[issues](https://github.com/domainaware/parsedmarc/issues) to see how
|
contribute code, documentation, or user support. Assistance on the pinned
|
||||||
you can contribute code, documentation, or user support. Assistance on
|
issues would be particularly helpful.
|
||||||
the pinned issues would be particularly helpful.
|
|
||||||
|
|
||||||
Thanks to all
|
Thanks to all
|
||||||
[contributors](https://github.com/domainaware/parsedmarc/graphs/contributors)!
|
[contributors](https://github.com/domainaware/parsedmarc/graphs/contributors)!
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Parses draft and 1.0 standard aggregate/rua reports
|
- Parses draft and 1.0 standard aggregate/rua DMARC reports
|
||||||
- Parses forensic/failure/ruf reports
|
- Parses forensic/failure/ruf DMARC reports
|
||||||
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail
|
- Parses reports from SMTP TLS Reporting
|
||||||
API
|
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail API
|
||||||
- Transparently handles gzip or zip compressed reports
|
- Transparently handles gzip or zip compressed reports
|
||||||
- Consistent data structures
|
- Consistent data structures
|
||||||
- Simple JSON and/or CSV output
|
- Simple JSON and/or CSV output
|
||||||
- Optionally email the results
|
- Optionally email the results
|
||||||
- Optionally send the results to Elasticsearch, Opensearch, and/or Splunk, for use
|
- Optionally send the results to Elasticsearch, Opensearch, and/or Splunk, for
|
||||||
with premade dashboards
|
use with premade dashboards
|
||||||
- Optionally send reports to Apache Kafka
|
- Optionally send reports to Apache Kafka
|
||||||
|
|
||||||
|
## Python Compatibility
|
||||||
|
|
||||||
|
This project supports the following Python versions, which are either actively maintained or are the default versions
|
||||||
|
for RHEL or Debian.
|
||||||
|
|
||||||
|
| Version | Supported | Reason |
|
||||||
|
|---------|-----------|------------------------------------------------------------|
|
||||||
|
| < 3.6 | ❌ | End of Life (EOL) |
|
||||||
|
| 3.6 | ❌ | Used in RHEL 8, but not supported by project dependencies |
|
||||||
|
| 3.7 | ❌ | End of Life (EOL) |
|
||||||
|
| 3.8 | ❌ | End of Life (EOL) |
|
||||||
|
| 3.9 | ✅ | Supported until August 2026 (Debian 11); May 2032 (RHEL 9) |
|
||||||
|
| 3.10 | ✅ | Actively maintained |
|
||||||
|
| 3.11 | ✅ | Actively maintained; supported until June 2028 (Debian 12) |
|
||||||
|
| 3.12 | ✅ | Actively maintained; supported until May 2035 (RHEL 10) |
|
||||||
|
| 3.13 | ✅ | Actively maintained; supported until June 2030 (Debian 13) |
|
||||||
|
| 3.14 | ❌ | Not currently supported due to Not currently supported due to [this imapclient bug](https://github.com/mjs/imapclient/issues/618)|
|
||||||
|
|||||||
12
build.sh
12
build.sh
@@ -9,17 +9,19 @@ fi
|
|||||||
. venv/bin/activate
|
. venv/bin/activate
|
||||||
pip install .[build]
|
pip install .[build]
|
||||||
ruff format .
|
ruff format .
|
||||||
ruff check .
|
|
||||||
cd docs
|
cd docs
|
||||||
make clean
|
make clean
|
||||||
make html
|
make html
|
||||||
touch build/html/.nojekyll
|
touch build/html/.nojekyll
|
||||||
if [ -d "./../parsedmarc-docs" ]; then
|
if [ -d "../../parsedmarc-docs" ]; then
|
||||||
cp -rf build/html/* ../../parsedmarc-docs/
|
cp -rf build/html/* ../../parsedmarc-docs/
|
||||||
fi
|
fi
|
||||||
cd ..
|
cd ..
|
||||||
sort -o "parsedmarc/resources/maps/known_unknown_base_reverse_dns.txt" "parsedmarc/resources/maps/known_unknown_base_reverse_dns.txt"
|
cd parsedmarc/resources/maps
|
||||||
./sortmaps.py
|
python3 sortlists.py
|
||||||
|
echo "Checking for invalid UTF-8 bytes in base_reverse_dns_map.csv"
|
||||||
|
python3 find_bad_utf8.py base_reverse_dns_map.csv
|
||||||
|
cd ../../..
|
||||||
python3 tests.py
|
python3 tests.py
|
||||||
rm -rf dist/ build/
|
rm -rf dist/ build/
|
||||||
hatch build
|
hatch build
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
version: '3.7'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.3.1
|
image: docker.elastic.co/elasticsearch/elasticsearch:8.19.7
|
||||||
environment:
|
environment:
|
||||||
- network.host=127.0.0.1
|
- network.host=127.0.0.1
|
||||||
- http.host=0.0.0.0
|
- http.host=0.0.0.0
|
||||||
@@ -14,7 +12,7 @@ services:
|
|||||||
- xpack.security.enabled=false
|
- xpack.security.enabled=false
|
||||||
- xpack.license.self_generated.type=basic
|
- xpack.license.self_generated.type=basic
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:9200:9200
|
- "127.0.0.1:9200:9200"
|
||||||
ulimits:
|
ulimits:
|
||||||
memlock:
|
memlock:
|
||||||
soft: -1
|
soft: -1
|
||||||
@@ -30,7 +28,7 @@ services:
|
|||||||
retries: 24
|
retries: 24
|
||||||
|
|
||||||
opensearch:
|
opensearch:
|
||||||
image: opensearchproject/opensearch:2.18.0
|
image: opensearchproject/opensearch:2
|
||||||
environment:
|
environment:
|
||||||
- network.host=127.0.0.1
|
- network.host=127.0.0.1
|
||||||
- http.host=0.0.0.0
|
- http.host=0.0.0.0
|
||||||
@@ -41,7 +39,7 @@ services:
|
|||||||
- bootstrap.memory_lock=true
|
- bootstrap.memory_lock=true
|
||||||
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD}
|
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD}
|
||||||
ports:
|
ports:
|
||||||
- 127.0.0.1:9201:9200
|
- "127.0.0.1:9201:9200"
|
||||||
ulimits:
|
ulimits:
|
||||||
memlock:
|
memlock:
|
||||||
soft: -1
|
soft: -1
|
||||||
|
|||||||
@@ -21,7 +21,6 @@
|
|||||||
:members:
|
:members:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## parsedmarc.splunk
|
## parsedmarc.splunk
|
||||||
|
|
||||||
```{eval-rst}
|
```{eval-rst}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ from parsedmarc import __version__
|
|||||||
# -- Project information -----------------------------------------------------
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
project = "parsedmarc"
|
project = "parsedmarc"
|
||||||
copyright = "2018 - 2023, Sean Whalen and contributors"
|
copyright = "2018 - 2025, Sean Whalen and contributors"
|
||||||
author = "Sean Whalen and contributors"
|
author = "Sean Whalen and contributors"
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
|||||||
@@ -33,17 +33,36 @@ and Valimail.
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- Parses draft and 1.0 standard aggregate/rua reports
|
- Parses draft and 1.0 standard aggregate/rua DMARC reports
|
||||||
- Parses forensic/failure/ruf reports
|
- Parses forensic/failure/ruf DMARC reports
|
||||||
|
- Parses reports from SMTP TLS Reporting
|
||||||
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail API
|
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail API
|
||||||
- Transparently handles gzip or zip compressed reports
|
- Transparently handles gzip or zip compressed reports
|
||||||
- Consistent data structures
|
- Consistent data structures
|
||||||
- Simple JSON and/or CSV output
|
- Simple JSON and/or CSV output
|
||||||
- Optionally email the results
|
- Optionally email the results
|
||||||
- Optionally send the results to Elasticsearch/OpenSearch and/or Splunk, for use with
|
- Optionally send the results to Elasticsearch, Opensearch, and/or Splunk, for use
|
||||||
premade dashboards
|
with premade dashboards
|
||||||
- Optionally send reports to Apache Kafka
|
- Optionally send reports to Apache Kafka
|
||||||
|
|
||||||
|
## Python Compatibility
|
||||||
|
|
||||||
|
This project supports the following Python versions, which are either actively maintained or are the default versions
|
||||||
|
for RHEL or Debian.
|
||||||
|
|
||||||
|
| Version | Supported | Reason |
|
||||||
|
|---------|-----------|------------------------------------------------------------|
|
||||||
|
| < 3.6 | ❌ | End of Life (EOL) |
|
||||||
|
| 3.6 | ❌ | Used in RHEL 8, but not supported by project dependencies |
|
||||||
|
| 3.7 | ❌ | End of Life (EOL) |
|
||||||
|
| 3.8 | ❌ | End of Life (EOL) |
|
||||||
|
| 3.9 | ✅ | Supported until August 2026 (Debian 11); May 2032 (RHEL 9) |
|
||||||
|
| 3.10 | ✅ | Actively maintained |
|
||||||
|
| 3.11 | ✅ | Actively maintained; supported until June 2028 (Debian 12) |
|
||||||
|
| 3.12 | ✅ | Actively maintained; supported until May 2035 (RHEL 10) |
|
||||||
|
| 3.13 | ✅ | Actively maintained; supported until June 2030 (Debian 13) |
|
||||||
|
| 3.14 | ❌ | Not currently supported due to [this imapclient bug](https://github.com/mjs/imapclient/issues/618)|
|
||||||
|
|
||||||
```{toctree}
|
```{toctree}
|
||||||
:caption: 'Contents'
|
:caption: 'Contents'
|
||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|||||||
@@ -199,7 +199,7 @@ sudo apt-get install libemail-outlook-message-perl
|
|||||||
[geoipupdate releases page on github]: https://github.com/maxmind/geoipupdate/releases
|
[geoipupdate releases page on github]: https://github.com/maxmind/geoipupdate/releases
|
||||||
[ip to country lite database]: https://db-ip.com/db/download/ip-to-country-lite
|
[ip to country lite database]: https://db-ip.com/db/download/ip-to-country-lite
|
||||||
[license keys]: https://www.maxmind.com/en/accounts/current/license-key
|
[license keys]: https://www.maxmind.com/en/accounts/current/license-key
|
||||||
[maxmind geoipupdate page]: https://dev.maxmind.com/geoip/geoipupdate/
|
[maxmind geoipupdate page]: https://dev.maxmind.com/geoip/updating-databases/
|
||||||
[maxmind geolite2 country database]: https://dev.maxmind.com/geoip/geolite2-free-geolocation-data
|
[maxmind geolite2 country database]: https://dev.maxmind.com/geoip/geolite2-free-geolocation-data
|
||||||
[registering for a free geolite2 account]: https://www.maxmind.com/en/geolite2/signup
|
[registering for a free geolite2 account]: https://www.maxmind.com/en/geolite2/signup
|
||||||
[to comply with various privacy regulations]: https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/
|
[to comply with various privacy regulations]: https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/
|
||||||
|
|||||||
@@ -23,6 +23,8 @@ of the report schema.
|
|||||||
"report_id": "9391651994964116463",
|
"report_id": "9391651994964116463",
|
||||||
"begin_date": "2012-04-27 20:00:00",
|
"begin_date": "2012-04-27 20:00:00",
|
||||||
"end_date": "2012-04-28 19:59:59",
|
"end_date": "2012-04-28 19:59:59",
|
||||||
|
"timespan_requires_normalization": false,
|
||||||
|
"original_timespan_seconds": 86399,
|
||||||
"errors": []
|
"errors": []
|
||||||
},
|
},
|
||||||
"policy_published": {
|
"policy_published": {
|
||||||
@@ -39,8 +41,10 @@ of the report schema.
|
|||||||
"source": {
|
"source": {
|
||||||
"ip_address": "72.150.241.94",
|
"ip_address": "72.150.241.94",
|
||||||
"country": "US",
|
"country": "US",
|
||||||
"reverse_dns": "adsl-72-150-241-94.shv.bellsouth.net",
|
"reverse_dns": null,
|
||||||
"base_domain": "bellsouth.net"
|
"base_domain": null,
|
||||||
|
"name": null,
|
||||||
|
"type": null
|
||||||
},
|
},
|
||||||
"count": 2,
|
"count": 2,
|
||||||
"alignment": {
|
"alignment": {
|
||||||
@@ -74,7 +78,10 @@ of the report schema.
|
|||||||
"result": "pass"
|
"result": "pass"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
"normalized_timespan": false,
|
||||||
|
"interval_begin": "2012-04-28 00:00:00",
|
||||||
|
"interval_end": "2012-04-28 23:59:59"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -83,8 +90,10 @@ of the report schema.
|
|||||||
### CSV aggregate report
|
### CSV aggregate report
|
||||||
|
|
||||||
```text
|
```text
|
||||||
xml_schema,org_name,org_email,org_extra_contact_info,report_id,begin_date,end_date,errors,domain,adkim,aspf,p,sp,pct,fo,source_ip_address,source_country,source_reverse_dns,source_base_domain,count,spf_aligned,dkim_aligned,dmarc_aligned,disposition,policy_override_reasons,policy_override_comments,envelope_from,header_from,envelope_to,dkim_domains,dkim_selectors,dkim_results,spf_domains,spf_scopes,spf_results
|
xml_schema,org_name,org_email,org_extra_contact_info,report_id,begin_date,end_date,normalized_timespan,errors,domain,adkim,aspf,p,sp,pct,fo,source_ip_address,source_country,source_reverse_dns,source_base_domain,source_name,source_type,count,spf_aligned,dkim_aligned,dmarc_aligned,disposition,policy_override_reasons,policy_override_comments,envelope_from,header_from,envelope_to,dkim_domains,dkim_selectors,dkim_results,spf_domains,spf_scopes,spf_results
|
||||||
draft,acme.com,noreply-dmarc-support@acme.com,http://acme.com/dmarc/support,9391651994964116463,2012-04-27 20:00:00,2012-04-28 19:59:59,,example.com,r,r,none,none,100,0,72.150.241.94,US,adsl-72-150-241-94.shv.bellsouth.net,bellsouth.net,2,True,False,True,none,,,example.com,example.com,,example.com,none,fail,example.com,mfrom,pass
|
draft,acme.com,noreply-dmarc-support@acme.com,http://acme.com/dmarc/support,9391651994964116463,2012-04-28 00:00:00,2012-04-28 23:59:59,False,,example.com,r,r,none,none,100,0,72.150.241.94,US,,,,,2,True,False,True,none,,,example.com,example.com,,example.com,none,fail,example.com,mfrom,pass
|
||||||
|
draft,acme.com,noreply-dmarc-support@acme.com,http://acme.com/dmarc/support,9391651994964116463,2012-04-28 00:00:00,2012-04-28 23:59:59,False,,example.com,r,r,none,none,100,0,72.150.241.94,US,,,,,2,True,False,True,none,,,example.com,example.com,,example.com,none,fail,example.com,mfrom,pass
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample forensic report output
|
## Sample forensic report output
|
||||||
|
|||||||
@@ -4,47 +4,50 @@
|
|||||||
|
|
||||||
```text
|
```text
|
||||||
usage: parsedmarc [-h] [-c CONFIG_FILE] [--strip-attachment-payloads] [-o OUTPUT]
|
usage: parsedmarc [-h] [-c CONFIG_FILE] [--strip-attachment-payloads] [-o OUTPUT]
|
||||||
[--aggregate-json-filename AGGREGATE_JSON_FILENAME]
|
[--aggregate-json-filename AGGREGATE_JSON_FILENAME] [--forensic-json-filename FORENSIC_JSON_FILENAME]
|
||||||
[--forensic-json-filename FORENSIC_JSON_FILENAME]
|
[--smtp-tls-json-filename SMTP_TLS_JSON_FILENAME] [--aggregate-csv-filename AGGREGATE_CSV_FILENAME]
|
||||||
[--aggregate-csv-filename AGGREGATE_CSV_FILENAME]
|
[--forensic-csv-filename FORENSIC_CSV_FILENAME] [--smtp-tls-csv-filename SMTP_TLS_CSV_FILENAME]
|
||||||
[--forensic-csv-filename FORENSIC_CSV_FILENAME]
|
[-n NAMESERVERS [NAMESERVERS ...]] [-t DNS_TIMEOUT] [--offline] [-s] [-w] [--verbose] [--debug]
|
||||||
[-n NAMESERVERS [NAMESERVERS ...]] [-t DNS_TIMEOUT] [--offline]
|
[--log-file LOG_FILE] [--no-prettify-json] [-v]
|
||||||
[-s] [--verbose] [--debug] [--log-file LOG_FILE] [-v]
|
[file_path ...]
|
||||||
[file_path ...]
|
|
||||||
|
|
||||||
Parses DMARC reports
|
Parses DMARC reports
|
||||||
|
|
||||||
positional arguments:
|
positional arguments:
|
||||||
file_path one or more paths to aggregate or forensic report
|
file_path one or more paths to aggregate or forensic report files, emails, or mbox files'
|
||||||
files, emails, or mbox files'
|
|
||||||
|
|
||||||
optional arguments:
|
options:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-c CONFIG_FILE, --config-file CONFIG_FILE
|
-c CONFIG_FILE, --config-file CONFIG_FILE
|
||||||
a path to a configuration file (--silent implied)
|
a path to a configuration file (--silent implied)
|
||||||
--strip-attachment-payloads
|
--strip-attachment-payloads
|
||||||
remove attachment payloads from forensic report output
|
remove attachment payloads from forensic report output
|
||||||
-o OUTPUT, --output OUTPUT
|
-o OUTPUT, --output OUTPUT
|
||||||
write output files to the given directory
|
write output files to the given directory
|
||||||
--aggregate-json-filename AGGREGATE_JSON_FILENAME
|
--aggregate-json-filename AGGREGATE_JSON_FILENAME
|
||||||
filename for the aggregate JSON output file
|
filename for the aggregate JSON output file
|
||||||
--forensic-json-filename FORENSIC_JSON_FILENAME
|
--forensic-json-filename FORENSIC_JSON_FILENAME
|
||||||
filename for the forensic JSON output file
|
filename for the forensic JSON output file
|
||||||
--aggregate-csv-filename AGGREGATE_CSV_FILENAME
|
--smtp-tls-json-filename SMTP_TLS_JSON_FILENAME
|
||||||
filename for the aggregate CSV output file
|
filename for the SMTP TLS JSON output file
|
||||||
--forensic-csv-filename FORENSIC_CSV_FILENAME
|
--aggregate-csv-filename AGGREGATE_CSV_FILENAME
|
||||||
filename for the forensic CSV output file
|
filename for the aggregate CSV output file
|
||||||
-n NAMESERVERS [NAMESERVERS ...], --nameservers NAMESERVERS [NAMESERVERS ...]
|
--forensic-csv-filename FORENSIC_CSV_FILENAME
|
||||||
nameservers to query
|
filename for the forensic CSV output file
|
||||||
-t DNS_TIMEOUT, --dns_timeout DNS_TIMEOUT
|
--smtp-tls-csv-filename SMTP_TLS_CSV_FILENAME
|
||||||
number of seconds to wait for an answer from DNS
|
filename for the SMTP TLS CSV output file
|
||||||
(default: 2.0)
|
-n NAMESERVERS [NAMESERVERS ...], --nameservers NAMESERVERS [NAMESERVERS ...]
|
||||||
--offline do not make online queries for geolocation or DNS
|
nameservers to query
|
||||||
-s, --silent only print errors and warnings
|
-t DNS_TIMEOUT, --dns_timeout DNS_TIMEOUT
|
||||||
--verbose more verbose output
|
number of seconds to wait for an answer from DNS (default: 2.0)
|
||||||
--debug print debugging information
|
--offline do not make online queries for geolocation or DNS
|
||||||
--log-file LOG_FILE output logging to a file
|
-s, --silent only print errors
|
||||||
-v, --version show program's version number and exit
|
-w, --warnings print warnings in addition to errors
|
||||||
|
--verbose more verbose output
|
||||||
|
--debug print debugging information
|
||||||
|
--log-file LOG_FILE output logging to a file
|
||||||
|
--no-prettify-json output JSON in a single line without indentation
|
||||||
|
-v, --version show program's version number and exit
|
||||||
```
|
```
|
||||||
|
|
||||||
:::{note}
|
:::{note}
|
||||||
@@ -120,8 +123,10 @@ The full set of configuration options are:
|
|||||||
Elasticsearch, Splunk and/or S3
|
Elasticsearch, Splunk and/or S3
|
||||||
- `save_smtp_tls` - bool: Save SMTP-STS report data to
|
- `save_smtp_tls` - bool: Save SMTP-STS report data to
|
||||||
Elasticsearch, Splunk and/or S3
|
Elasticsearch, Splunk and/or S3
|
||||||
|
- `index_prefix_domain_map` - bool: A path mapping of Opensearch/Elasticsearch index prefixes to domain names
|
||||||
- `strip_attachment_payloads` - bool: Remove attachment
|
- `strip_attachment_payloads` - bool: Remove attachment
|
||||||
payloads from results
|
payloads from results
|
||||||
|
- `silent` - bool: Set this to `False` to output results to STDOUT
|
||||||
- `output` - str: Directory to place JSON and CSV files in. This is required if you set either of the JSON output file options.
|
- `output` - str: Directory to place JSON and CSV files in. This is required if you set either of the JSON output file options.
|
||||||
- `aggregate_json_filename` - str: filename for the aggregate
|
- `aggregate_json_filename` - str: filename for the aggregate
|
||||||
JSON output file
|
JSON output file
|
||||||
@@ -167,7 +172,7 @@ The full set of configuration options are:
|
|||||||
IDLE response or the number of seconds until the next
|
IDLE response or the number of seconds until the next
|
||||||
mail check (Default: `30`)
|
mail check (Default: `30`)
|
||||||
- `since` - str: Search for messages since certain time. (Examples: `5m|3h|2d|1w`)
|
- `since` - str: Search for messages since certain time. (Examples: `5m|3h|2d|1w`)
|
||||||
Acceptable units - {"m":"minutes", "h":"hours", "d":"days", "w":"weeks"}).
|
Acceptable units - {"m":"minutes", "h":"hours", "d":"days", "w":"weeks"}.
|
||||||
Defaults to `1d` if incorrect value is provided.
|
Defaults to `1d` if incorrect value is provided.
|
||||||
- `imap`
|
- `imap`
|
||||||
- `host` - str: The IMAP server hostname or IP address
|
- `host` - str: The IMAP server hostname or IP address
|
||||||
@@ -252,7 +257,7 @@ The full set of configuration options are:
|
|||||||
:::
|
:::
|
||||||
- `user` - str: Basic auth username
|
- `user` - str: Basic auth username
|
||||||
- `password` - str: Basic auth password
|
- `password` - str: Basic auth password
|
||||||
- `apiKey` - str: API key
|
- `api_key` - str: API key
|
||||||
- `ssl` - bool: Use an encrypted SSL/TLS connection
|
- `ssl` - bool: Use an encrypted SSL/TLS connection
|
||||||
(Default: `True`)
|
(Default: `True`)
|
||||||
- `timeout` - float: Timeout in seconds (Default: 60)
|
- `timeout` - float: Timeout in seconds (Default: 60)
|
||||||
@@ -275,7 +280,7 @@ The full set of configuration options are:
|
|||||||
:::
|
:::
|
||||||
- `user` - str: Basic auth username
|
- `user` - str: Basic auth username
|
||||||
- `password` - str: Basic auth password
|
- `password` - str: Basic auth password
|
||||||
- `apiKey` - str: API key
|
- `api_key` - str: API key
|
||||||
- `ssl` - bool: Use an encrypted SSL/TLS connection
|
- `ssl` - bool: Use an encrypted SSL/TLS connection
|
||||||
(Default: `True`)
|
(Default: `True`)
|
||||||
- `timeout` - float: Timeout in seconds (Default: 60)
|
- `timeout` - float: Timeout in seconds (Default: 60)
|
||||||
@@ -369,7 +374,7 @@ The full set of configuration options are:
|
|||||||
- `mode` - str: The GELF transport type to use. Valid modes: `tcp`, `udp`, `tls`
|
- `mode` - str: The GELF transport type to use. Valid modes: `tcp`, `udp`, `tls`
|
||||||
|
|
||||||
- `maildir`
|
- `maildir`
|
||||||
- `reports_folder` - str: Full path for mailbox maidir location (Default: `INBOX`)
|
- `maildir_path` - str: Full path for mailbox maidir location (Default: `INBOX`)
|
||||||
- `maildir_create` - bool: Create maildir if not present (Default: False)
|
- `maildir_create` - bool: Create maildir if not present (Default: False)
|
||||||
|
|
||||||
- `webhook` - Post the individual reports to a webhook url with the report as the JSON body
|
- `webhook` - Post the individual reports to a webhook url with the report as the JSON body
|
||||||
@@ -445,6 +450,28 @@ PUT _cluster/settings
|
|||||||
Increasing this value increases resource usage.
|
Increasing this value increases resource usage.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## Multi-tenant support
|
||||||
|
|
||||||
|
Starting in `8.19.0`, ParseDMARC provides multi-tenant support by placing data into separate OpenSearch or Elasticsearch index prefixes. To set this up, create a YAML file that is formatted where each key is a tenant name, and the value is a list of domains related to that tenant, not including subdomains, like this:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
example:
|
||||||
|
- example.com
|
||||||
|
- example.net
|
||||||
|
- example.org
|
||||||
|
|
||||||
|
whalensolutions:
|
||||||
|
- whalensolutions.com
|
||||||
|
```
|
||||||
|
|
||||||
|
Save it to disk where the user running ParseDMARC can read it, then set `index_prefix_domain_map` to that filepath in the `[general]` section of the ParseDMARC configuration file and do not set an `index_prefix` option in the `[elasticsearch]` or `[opensearch]` sections.
|
||||||
|
|
||||||
|
When configured correctly, if ParseDMARC finds that a report is related to a domain in the mapping, the report will be saved in an index name that has the tenant name prefixed to it with a trailing underscore. Then, you can use the security features of Opensearch or the ELK stack to only grant users access to the indexes that they need.
|
||||||
|
|
||||||
|
:::{note}
|
||||||
|
A domain cannot be used in multiple tenant lists. Only the first prefix list that contains the matching domain is used.
|
||||||
|
:::
|
||||||
|
|
||||||
## Running parsedmarc as a systemd service
|
## Running parsedmarc as a systemd service
|
||||||
|
|
||||||
Use systemd to run `parsedmarc` as a service and process reports as
|
Use systemd to run `parsedmarc` as a service and process reports as
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,7 @@ from configparser import ConfigParser
|
|||||||
from glob import glob
|
from glob import glob
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
from collections import OrderedDict
|
import yaml
|
||||||
import json
|
import json
|
||||||
from ssl import CERT_NONE, create_default_context
|
from ssl import CERT_NONE, create_default_context
|
||||||
from multiprocessing import Pipe, Process
|
from multiprocessing import Pipe, Process
|
||||||
@@ -46,7 +46,7 @@ from parsedmarc.mail import (
|
|||||||
from parsedmarc.mail.graph import AuthMethod
|
from parsedmarc.mail.graph import AuthMethod
|
||||||
|
|
||||||
from parsedmarc.log import logger
|
from parsedmarc.log import logger
|
||||||
from parsedmarc.utils import is_mbox, get_reverse_dns
|
from parsedmarc.utils import is_mbox, get_reverse_dns, get_base_domain
|
||||||
from parsedmarc import SEEN_AGGREGATE_REPORT_IDS
|
from parsedmarc import SEEN_AGGREGATE_REPORT_IDS
|
||||||
|
|
||||||
http.client._MAXHEADERS = 200 # pylint:disable=protected-access
|
http.client._MAXHEADERS = 200 # pylint:disable=protected-access
|
||||||
@@ -76,6 +76,7 @@ def cli_parse(
|
|||||||
always_use_local_files,
|
always_use_local_files,
|
||||||
reverse_dns_map_path,
|
reverse_dns_map_path,
|
||||||
reverse_dns_map_url,
|
reverse_dns_map_url,
|
||||||
|
normalize_timespan_threshold_hours,
|
||||||
conn,
|
conn,
|
||||||
):
|
):
|
||||||
"""Separated this function for multiprocessing"""
|
"""Separated this function for multiprocessing"""
|
||||||
@@ -90,6 +91,7 @@ def cli_parse(
|
|||||||
nameservers=nameservers,
|
nameservers=nameservers,
|
||||||
dns_timeout=dns_timeout,
|
dns_timeout=dns_timeout,
|
||||||
strip_attachment_payloads=sa,
|
strip_attachment_payloads=sa,
|
||||||
|
normalize_timespan_threshold_hours=normalize_timespan_threshold_hours,
|
||||||
)
|
)
|
||||||
conn.send([file_results, file_path])
|
conn.send([file_results, file_path])
|
||||||
except ParserError as error:
|
except ParserError as error:
|
||||||
@@ -101,8 +103,35 @@ def cli_parse(
|
|||||||
def _main():
|
def _main():
|
||||||
"""Called when the module is executed"""
|
"""Called when the module is executed"""
|
||||||
|
|
||||||
|
def get_index_prefix(report):
|
||||||
|
if index_prefix_domain_map is None:
|
||||||
|
return None
|
||||||
|
if "policy_published" in report:
|
||||||
|
domain = report["policy_published"]["domain"]
|
||||||
|
elif "reported_domain" in report:
|
||||||
|
domain = report("reported_domain")
|
||||||
|
elif "policies" in report:
|
||||||
|
domain = report["policies"][0]["domain"]
|
||||||
|
if domain:
|
||||||
|
domain = get_base_domain(domain)
|
||||||
|
for prefix in index_prefix_domain_map:
|
||||||
|
if domain in index_prefix_domain_map[prefix]:
|
||||||
|
prefix = (
|
||||||
|
prefix.lower()
|
||||||
|
.strip()
|
||||||
|
.strip("_")
|
||||||
|
.replace(" ", "_")
|
||||||
|
.replace("-", "_")
|
||||||
|
)
|
||||||
|
prefix = f"{prefix}_"
|
||||||
|
return prefix
|
||||||
|
return None
|
||||||
|
|
||||||
def process_reports(reports_):
|
def process_reports(reports_):
|
||||||
output_str = "{0}\n".format(json.dumps(reports_, ensure_ascii=False, indent=2))
|
indent_value = 2 if opts.prettify_json else None
|
||||||
|
output_str = "{0}\n".format(
|
||||||
|
json.dumps(reports_, ensure_ascii=False, indent=indent_value)
|
||||||
|
)
|
||||||
|
|
||||||
if not opts.silent:
|
if not opts.silent:
|
||||||
print(output_str)
|
print(output_str)
|
||||||
@@ -126,7 +155,8 @@ def _main():
|
|||||||
elastic.save_aggregate_report_to_elasticsearch(
|
elastic.save_aggregate_report_to_elasticsearch(
|
||||||
report,
|
report,
|
||||||
index_suffix=opts.elasticsearch_index_suffix,
|
index_suffix=opts.elasticsearch_index_suffix,
|
||||||
index_prefix=opts.elasticsearch_index_prefix,
|
index_prefix=opts.elasticsearch_index_prefix
|
||||||
|
or get_index_prefix(report),
|
||||||
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
||||||
number_of_shards=shards,
|
number_of_shards=shards,
|
||||||
number_of_replicas=replicas,
|
number_of_replicas=replicas,
|
||||||
@@ -147,7 +177,8 @@ def _main():
|
|||||||
opensearch.save_aggregate_report_to_opensearch(
|
opensearch.save_aggregate_report_to_opensearch(
|
||||||
report,
|
report,
|
||||||
index_suffix=opts.opensearch_index_suffix,
|
index_suffix=opts.opensearch_index_suffix,
|
||||||
index_prefix=opts.opensearch_index_prefix,
|
index_prefix=opts.opensearch_index_prefix
|
||||||
|
or get_index_prefix(report),
|
||||||
monthly_indexes=opts.opensearch_monthly_indexes,
|
monthly_indexes=opts.opensearch_monthly_indexes,
|
||||||
number_of_shards=shards,
|
number_of_shards=shards,
|
||||||
number_of_replicas=replicas,
|
number_of_replicas=replicas,
|
||||||
@@ -189,8 +220,9 @@ def _main():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if opts.webhook_aggregate_url:
|
if opts.webhook_aggregate_url:
|
||||||
|
indent_value = 2 if opts.prettify_json else None
|
||||||
webhook_client.save_aggregate_report_to_webhook(
|
webhook_client.save_aggregate_report_to_webhook(
|
||||||
json.dumps(report, ensure_ascii=False, indent=2)
|
json.dumps(report, ensure_ascii=False, indent=indent_value)
|
||||||
)
|
)
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||||
@@ -212,7 +244,8 @@ def _main():
|
|||||||
elastic.save_forensic_report_to_elasticsearch(
|
elastic.save_forensic_report_to_elasticsearch(
|
||||||
report,
|
report,
|
||||||
index_suffix=opts.elasticsearch_index_suffix,
|
index_suffix=opts.elasticsearch_index_suffix,
|
||||||
index_prefix=opts.elasticsearch_index_prefix,
|
index_prefix=opts.elasticsearch_index_prefix
|
||||||
|
or get_index_prefix(report),
|
||||||
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
||||||
number_of_shards=shards,
|
number_of_shards=shards,
|
||||||
number_of_replicas=replicas,
|
number_of_replicas=replicas,
|
||||||
@@ -231,7 +264,8 @@ def _main():
|
|||||||
opensearch.save_forensic_report_to_opensearch(
|
opensearch.save_forensic_report_to_opensearch(
|
||||||
report,
|
report,
|
||||||
index_suffix=opts.opensearch_index_suffix,
|
index_suffix=opts.opensearch_index_suffix,
|
||||||
index_prefix=opts.opensearch_index_prefix,
|
index_prefix=opts.opensearch_index_prefix
|
||||||
|
or get_index_prefix(report),
|
||||||
monthly_indexes=opts.opensearch_monthly_indexes,
|
monthly_indexes=opts.opensearch_monthly_indexes,
|
||||||
number_of_shards=shards,
|
number_of_shards=shards,
|
||||||
number_of_replicas=replicas,
|
number_of_replicas=replicas,
|
||||||
@@ -271,8 +305,9 @@ def _main():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if opts.webhook_forensic_url:
|
if opts.webhook_forensic_url:
|
||||||
|
indent_value = 2 if opts.prettify_json else None
|
||||||
webhook_client.save_forensic_report_to_webhook(
|
webhook_client.save_forensic_report_to_webhook(
|
||||||
json.dumps(report, ensure_ascii=False, indent=2)
|
json.dumps(report, ensure_ascii=False, indent=indent_value)
|
||||||
)
|
)
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||||
@@ -294,7 +329,8 @@ def _main():
|
|||||||
elastic.save_smtp_tls_report_to_elasticsearch(
|
elastic.save_smtp_tls_report_to_elasticsearch(
|
||||||
report,
|
report,
|
||||||
index_suffix=opts.elasticsearch_index_suffix,
|
index_suffix=opts.elasticsearch_index_suffix,
|
||||||
index_prefix=opts.elasticsearch_index_prefix,
|
index_prefix=opts.elasticsearch_index_prefix
|
||||||
|
or get_index_prefix(report),
|
||||||
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
||||||
number_of_shards=shards,
|
number_of_shards=shards,
|
||||||
number_of_replicas=replicas,
|
number_of_replicas=replicas,
|
||||||
@@ -313,7 +349,8 @@ def _main():
|
|||||||
opensearch.save_smtp_tls_report_to_opensearch(
|
opensearch.save_smtp_tls_report_to_opensearch(
|
||||||
report,
|
report,
|
||||||
index_suffix=opts.opensearch_index_suffix,
|
index_suffix=opts.opensearch_index_suffix,
|
||||||
index_prefix=opts.opensearch_index_prefix,
|
index_prefix=opts.opensearch_index_prefix
|
||||||
|
or get_index_prefix(report),
|
||||||
monthly_indexes=opts.opensearch_monthly_indexes,
|
monthly_indexes=opts.opensearch_monthly_indexes,
|
||||||
number_of_shards=shards,
|
number_of_shards=shards,
|
||||||
number_of_replicas=replicas,
|
number_of_replicas=replicas,
|
||||||
@@ -353,8 +390,9 @@ def _main():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if opts.webhook_smtp_tls_url:
|
if opts.webhook_smtp_tls_url:
|
||||||
|
indent_value = 2 if opts.prettify_json else None
|
||||||
webhook_client.save_smtp_tls_report_to_webhook(
|
webhook_client.save_smtp_tls_report_to_webhook(
|
||||||
json.dumps(report, ensure_ascii=False, indent=2)
|
json.dumps(report, ensure_ascii=False, indent=indent_value)
|
||||||
)
|
)
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||||
@@ -475,6 +513,12 @@ def _main():
|
|||||||
"--debug", action="store_true", help="print debugging information"
|
"--debug", action="store_true", help="print debugging information"
|
||||||
)
|
)
|
||||||
arg_parser.add_argument("--log-file", default=None, help="output logging to a file")
|
arg_parser.add_argument("--log-file", default=None, help="output logging to a file")
|
||||||
|
arg_parser.add_argument(
|
||||||
|
"--no-prettify-json",
|
||||||
|
action="store_false",
|
||||||
|
dest="prettify_json",
|
||||||
|
help="output JSON in a single line without indentation",
|
||||||
|
)
|
||||||
arg_parser.add_argument("-v", "--version", action="version", version=__version__)
|
arg_parser.add_argument("-v", "--version", action="version", version=__version__)
|
||||||
|
|
||||||
aggregate_reports = []
|
aggregate_reports = []
|
||||||
@@ -504,6 +548,7 @@ def _main():
|
|||||||
dns_timeout=args.dns_timeout,
|
dns_timeout=args.dns_timeout,
|
||||||
debug=args.debug,
|
debug=args.debug,
|
||||||
verbose=args.verbose,
|
verbose=args.verbose,
|
||||||
|
prettify_json=args.prettify_json,
|
||||||
save_aggregate=False,
|
save_aggregate=False,
|
||||||
save_forensic=False,
|
save_forensic=False,
|
||||||
save_smtp_tls=False,
|
save_smtp_tls=False,
|
||||||
@@ -547,7 +592,7 @@ def _main():
|
|||||||
elasticsearch_monthly_indexes=False,
|
elasticsearch_monthly_indexes=False,
|
||||||
elasticsearch_username=None,
|
elasticsearch_username=None,
|
||||||
elasticsearch_password=None,
|
elasticsearch_password=None,
|
||||||
elasticsearch_apiKey=None,
|
elasticsearch_api_key=None,
|
||||||
opensearch_hosts=None,
|
opensearch_hosts=None,
|
||||||
opensearch_timeout=60,
|
opensearch_timeout=60,
|
||||||
opensearch_number_of_shards=1,
|
opensearch_number_of_shards=1,
|
||||||
@@ -559,7 +604,7 @@ def _main():
|
|||||||
opensearch_monthly_indexes=False,
|
opensearch_monthly_indexes=False,
|
||||||
opensearch_username=None,
|
opensearch_username=None,
|
||||||
opensearch_password=None,
|
opensearch_password=None,
|
||||||
opensearch_apiKey=None,
|
opensearch_api_key=None,
|
||||||
kafka_hosts=None,
|
kafka_hosts=None,
|
||||||
kafka_username=None,
|
kafka_username=None,
|
||||||
kafka_password=None,
|
kafka_password=None,
|
||||||
@@ -615,6 +660,7 @@ def _main():
|
|||||||
webhook_forensic_url=None,
|
webhook_forensic_url=None,
|
||||||
webhook_smtp_tls_url=None,
|
webhook_smtp_tls_url=None,
|
||||||
webhook_timeout=60,
|
webhook_timeout=60,
|
||||||
|
normalize_timespan_threshold_hours=24.0,
|
||||||
)
|
)
|
||||||
args = arg_parser.parse_args()
|
args = arg_parser.parse_args()
|
||||||
|
|
||||||
@@ -625,9 +671,19 @@ def _main():
|
|||||||
exit(-1)
|
exit(-1)
|
||||||
opts.silent = True
|
opts.silent = True
|
||||||
config = ConfigParser()
|
config = ConfigParser()
|
||||||
|
index_prefix_domain_map = None
|
||||||
config.read(args.config_file)
|
config.read(args.config_file)
|
||||||
if "general" in config.sections():
|
if "general" in config.sections():
|
||||||
general_config = config["general"]
|
general_config = config["general"]
|
||||||
|
if "silent" in general_config:
|
||||||
|
opts.silent = general_config.getboolean("silent")
|
||||||
|
if "normalize_timespan_threshold_hours" in general_config:
|
||||||
|
opts.normalize_timespan_threshold_hours = general_config.getfloat(
|
||||||
|
"normalize_timespan_threshold_hours"
|
||||||
|
)
|
||||||
|
if "index_prefix_domain_map" in general_config:
|
||||||
|
with open(general_config["index_prefix_domain_map"]) as f:
|
||||||
|
index_prefix_domain_map = yaml.safe_load(f)
|
||||||
if "offline" in general_config:
|
if "offline" in general_config:
|
||||||
opts.offline = general_config.getboolean("offline")
|
opts.offline = general_config.getboolean("offline")
|
||||||
if "strip_attachment_payloads" in general_config:
|
if "strip_attachment_payloads" in general_config:
|
||||||
@@ -672,11 +728,11 @@ def _main():
|
|||||||
)
|
)
|
||||||
exit(-1)
|
exit(-1)
|
||||||
if "save_aggregate" in general_config:
|
if "save_aggregate" in general_config:
|
||||||
opts.save_aggregate = general_config["save_aggregate"]
|
opts.save_aggregate = general_config.getboolean("save_aggregate")
|
||||||
if "save_forensic" in general_config:
|
if "save_forensic" in general_config:
|
||||||
opts.save_forensic = general_config["save_forensic"]
|
opts.save_forensic = general_config.getboolean("save_forensic")
|
||||||
if "save_smtp_tls" in general_config:
|
if "save_smtp_tls" in general_config:
|
||||||
opts.save_smtp_tls = general_config["save_smtp_tls"]
|
opts.save_smtp_tls = general_config.getboolean("save_smtp_tls")
|
||||||
if "debug" in general_config:
|
if "debug" in general_config:
|
||||||
opts.debug = general_config.getboolean("debug")
|
opts.debug = general_config.getboolean("debug")
|
||||||
if "verbose" in general_config:
|
if "verbose" in general_config:
|
||||||
@@ -701,6 +757,8 @@ def _main():
|
|||||||
opts.reverse_dns_map_path = general_config["reverse_dns_path"]
|
opts.reverse_dns_map_path = general_config["reverse_dns_path"]
|
||||||
if "reverse_dns_map_url" in general_config:
|
if "reverse_dns_map_url" in general_config:
|
||||||
opts.reverse_dns_map_url = general_config["reverse_dns_url"]
|
opts.reverse_dns_map_url = general_config["reverse_dns_url"]
|
||||||
|
if "prettify_json" in general_config:
|
||||||
|
opts.prettify_json = general_config.getboolean("prettify_json")
|
||||||
|
|
||||||
if "mailbox" in config.sections():
|
if "mailbox" in config.sections():
|
||||||
mailbox_config = config["mailbox"]
|
mailbox_config = config["mailbox"]
|
||||||
@@ -745,8 +803,9 @@ def _main():
|
|||||||
if "ssl" in imap_config:
|
if "ssl" in imap_config:
|
||||||
opts.imap_ssl = imap_config.getboolean("ssl")
|
opts.imap_ssl = imap_config.getboolean("ssl")
|
||||||
if "skip_certificate_verification" in imap_config:
|
if "skip_certificate_verification" in imap_config:
|
||||||
imap_verify = imap_config.getboolean("skip_certificate_verification")
|
opts.imap_skip_certificate_verification = imap_config.getboolean(
|
||||||
opts.imap_skip_certificate_verification = imap_verify
|
"skip_certificate_verification"
|
||||||
|
)
|
||||||
if "user" in imap_config:
|
if "user" in imap_config:
|
||||||
opts.imap_user = imap_config["user"]
|
opts.imap_user = imap_config["user"]
|
||||||
else:
|
else:
|
||||||
@@ -922,8 +981,12 @@ def _main():
|
|||||||
opts.elasticsearch_username = elasticsearch_config["user"]
|
opts.elasticsearch_username = elasticsearch_config["user"]
|
||||||
if "password" in elasticsearch_config:
|
if "password" in elasticsearch_config:
|
||||||
opts.elasticsearch_password = elasticsearch_config["password"]
|
opts.elasticsearch_password = elasticsearch_config["password"]
|
||||||
|
# Until 8.20
|
||||||
if "apiKey" in elasticsearch_config:
|
if "apiKey" in elasticsearch_config:
|
||||||
opts.elasticsearch_apiKey = elasticsearch_config["apiKey"]
|
opts.elasticsearch_apiKey = elasticsearch_config["apiKey"]
|
||||||
|
# Since 8.20
|
||||||
|
if "api_key" in elasticsearch_config:
|
||||||
|
opts.elasticsearch_apiKey = elasticsearch_config["api_key"]
|
||||||
|
|
||||||
if "opensearch" in config:
|
if "opensearch" in config:
|
||||||
opensearch_config = config["opensearch"]
|
opensearch_config = config["opensearch"]
|
||||||
@@ -958,8 +1021,12 @@ def _main():
|
|||||||
opts.opensearch_username = opensearch_config["user"]
|
opts.opensearch_username = opensearch_config["user"]
|
||||||
if "password" in opensearch_config:
|
if "password" in opensearch_config:
|
||||||
opts.opensearch_password = opensearch_config["password"]
|
opts.opensearch_password = opensearch_config["password"]
|
||||||
|
# Until 8.20
|
||||||
if "apiKey" in opensearch_config:
|
if "apiKey" in opensearch_config:
|
||||||
opts.opensearch_apiKey = opensearch_config["apiKey"]
|
opts.opensearch_apiKey = opensearch_config["apiKey"]
|
||||||
|
# Since 8.20
|
||||||
|
if "api_key" in opensearch_config:
|
||||||
|
opts.opensearch_apiKey = opensearch_config["api_key"]
|
||||||
|
|
||||||
if "splunk_hec" in config.sections():
|
if "splunk_hec" in config.sections():
|
||||||
hec_config = config["splunk_hec"]
|
hec_config = config["splunk_hec"]
|
||||||
@@ -1116,7 +1183,9 @@ def _main():
|
|||||||
)
|
)
|
||||||
opts.gmail_api_scopes = _str_to_list(opts.gmail_api_scopes)
|
opts.gmail_api_scopes = _str_to_list(opts.gmail_api_scopes)
|
||||||
if "oauth2_port" in gmail_api_config:
|
if "oauth2_port" in gmail_api_config:
|
||||||
opts.gmail_api_oauth2_port = gmail_api_config.get("oauth2_port", 8080)
|
opts.gmail_api_oauth2_port = gmail_api_config.getint(
|
||||||
|
"oauth2_port", 8080
|
||||||
|
)
|
||||||
|
|
||||||
if "maildir" in config.sections():
|
if "maildir" in config.sections():
|
||||||
maildir_api_config = config["maildir"]
|
maildir_api_config = config["maildir"]
|
||||||
@@ -1167,7 +1236,7 @@ def _main():
|
|||||||
if "smtp_tls_url" in webhook_config:
|
if "smtp_tls_url" in webhook_config:
|
||||||
opts.webhook_smtp_tls_url = webhook_config["smtp_tls_url"]
|
opts.webhook_smtp_tls_url = webhook_config["smtp_tls_url"]
|
||||||
if "timeout" in webhook_config:
|
if "timeout" in webhook_config:
|
||||||
opts.webhook_timeout = webhook_config["timeout"]
|
opts.webhook_timeout = webhook_config.getint("timeout")
|
||||||
|
|
||||||
logger.setLevel(logging.ERROR)
|
logger.setLevel(logging.ERROR)
|
||||||
|
|
||||||
@@ -1218,11 +1287,11 @@ def _main():
|
|||||||
es_smtp_tls_index = "{0}{1}".format(prefix, es_smtp_tls_index)
|
es_smtp_tls_index = "{0}{1}".format(prefix, es_smtp_tls_index)
|
||||||
elastic.set_hosts(
|
elastic.set_hosts(
|
||||||
opts.elasticsearch_hosts,
|
opts.elasticsearch_hosts,
|
||||||
opts.elasticsearch_ssl,
|
use_ssl=opts.elasticsearch_ssl,
|
||||||
opts.elasticsearch_ssl_cert_path,
|
ssl_cert_path=opts.elasticsearch_ssl_cert_path,
|
||||||
opts.elasticsearch_username,
|
username=opts.elasticsearch_username,
|
||||||
opts.elasticsearch_password,
|
password=opts.elasticsearch_password,
|
||||||
opts.elasticsearch_apiKey,
|
api_key=opts.elasticsearch_api_key,
|
||||||
timeout=opts.elasticsearch_timeout,
|
timeout=opts.elasticsearch_timeout,
|
||||||
)
|
)
|
||||||
elastic.migrate_indexes(
|
elastic.migrate_indexes(
|
||||||
@@ -1250,11 +1319,11 @@ def _main():
|
|||||||
os_smtp_tls_index = "{0}{1}".format(prefix, os_smtp_tls_index)
|
os_smtp_tls_index = "{0}{1}".format(prefix, os_smtp_tls_index)
|
||||||
opensearch.set_hosts(
|
opensearch.set_hosts(
|
||||||
opts.opensearch_hosts,
|
opts.opensearch_hosts,
|
||||||
opts.opensearch_ssl,
|
use_ssl=opts.opensearch_ssl,
|
||||||
opts.opensearch_ssl_cert_path,
|
ssl_cert_path=opts.opensearch_ssl_cert_path,
|
||||||
opts.opensearch_username,
|
username=opts.opensearch_username,
|
||||||
opts.opensearch_password,
|
password=opts.opensearch_password,
|
||||||
opts.opensearch_apiKey,
|
api_key=opts.opensearch_api_key,
|
||||||
timeout=opts.opensearch_timeout,
|
timeout=opts.opensearch_timeout,
|
||||||
)
|
)
|
||||||
opensearch.migrate_indexes(
|
opensearch.migrate_indexes(
|
||||||
@@ -1392,6 +1461,7 @@ def _main():
|
|||||||
opts.always_use_local_files,
|
opts.always_use_local_files,
|
||||||
opts.reverse_dns_map_path,
|
opts.reverse_dns_map_path,
|
||||||
opts.reverse_dns_map_url,
|
opts.reverse_dns_map_url,
|
||||||
|
opts.normalize_timespan_threshold_hours,
|
||||||
child_conn,
|
child_conn,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
@@ -1410,7 +1480,7 @@ def _main():
|
|||||||
pbar.update(counter - pbar.n)
|
pbar.update(counter - pbar.n)
|
||||||
|
|
||||||
for result in results:
|
for result in results:
|
||||||
if type(result[0]) is ParserError:
|
if isinstance(result[0], ParserError) or result[0] is None:
|
||||||
logger.error("Failed to parse {0} - {1}".format(result[1], result[0]))
|
logger.error("Failed to parse {0} - {1}".format(result[1], result[0]))
|
||||||
else:
|
else:
|
||||||
if result[0]["report_type"] == "aggregate":
|
if result[0]["report_type"] == "aggregate":
|
||||||
@@ -1442,6 +1512,7 @@ def _main():
|
|||||||
reverse_dns_map_path=opts.reverse_dns_map_path,
|
reverse_dns_map_path=opts.reverse_dns_map_path,
|
||||||
reverse_dns_map_url=opts.reverse_dns_map_url,
|
reverse_dns_map_url=opts.reverse_dns_map_url,
|
||||||
offline=opts.offline,
|
offline=opts.offline,
|
||||||
|
normalize_timespan_threshold_hours=opts.normalize_timespan_threshold_hours,
|
||||||
)
|
)
|
||||||
aggregate_reports += reports["aggregate_reports"]
|
aggregate_reports += reports["aggregate_reports"]
|
||||||
forensic_reports += reports["forensic_reports"]
|
forensic_reports += reports["forensic_reports"]
|
||||||
@@ -1460,7 +1531,7 @@ def _main():
|
|||||||
if opts.imap_skip_certificate_verification:
|
if opts.imap_skip_certificate_verification:
|
||||||
logger.debug("Skipping IMAP certificate verification")
|
logger.debug("Skipping IMAP certificate verification")
|
||||||
verify = False
|
verify = False
|
||||||
if opts.imap_ssl is False:
|
if not opts.imap_ssl:
|
||||||
ssl = False
|
ssl = False
|
||||||
|
|
||||||
mailbox_connection = IMAPConnection(
|
mailbox_connection = IMAPConnection(
|
||||||
@@ -1551,6 +1622,7 @@ def _main():
|
|||||||
test=opts.mailbox_test,
|
test=opts.mailbox_test,
|
||||||
strip_attachment_payloads=opts.strip_attachment_payloads,
|
strip_attachment_payloads=opts.strip_attachment_payloads,
|
||||||
since=opts.mailbox_since,
|
since=opts.mailbox_since,
|
||||||
|
normalize_timespan_threshold_hours=opts.normalize_timespan_threshold_hours,
|
||||||
)
|
)
|
||||||
|
|
||||||
aggregate_reports += reports["aggregate_reports"]
|
aggregate_reports += reports["aggregate_reports"]
|
||||||
@@ -1561,7 +1633,7 @@ def _main():
|
|||||||
logger.exception("Mailbox Error")
|
logger.exception("Mailbox Error")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
results = OrderedDict(
|
results = dict(
|
||||||
[
|
[
|
||||||
("aggregate_reports", aggregate_reports),
|
("aggregate_reports", aggregate_reports),
|
||||||
("forensic_reports", forensic_reports),
|
("forensic_reports", forensic_reports),
|
||||||
@@ -1586,6 +1658,7 @@ def _main():
|
|||||||
username=opts.smtp_user,
|
username=opts.smtp_user,
|
||||||
password=opts.smtp_password,
|
password=opts.smtp_password,
|
||||||
subject=opts.smtp_subject,
|
subject=opts.smtp_subject,
|
||||||
|
require_encryption=opts.smtp_ssl,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed to email results")
|
logger.exception("Failed to email results")
|
||||||
@@ -1612,6 +1685,7 @@ def _main():
|
|||||||
reverse_dns_map_path=opts.reverse_dns_map_path,
|
reverse_dns_map_path=opts.reverse_dns_map_path,
|
||||||
reverse_dns_map_url=opts.reverse_dns_map_url,
|
reverse_dns_map_url=opts.reverse_dns_map_url,
|
||||||
offline=opts.offline,
|
offline=opts.offline,
|
||||||
|
normalize_timespan_threshold_hours=opts.normalize_timespan_threshold_hours,
|
||||||
)
|
)
|
||||||
except FileExistsError as error:
|
except FileExistsError as error:
|
||||||
logger.error("{0}".format(error.__str__()))
|
logger.error("{0}".format(error.__str__()))
|
||||||
|
|||||||
3
parsedmarc/constants.py
Normal file
3
parsedmarc/constants.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
__version__ = "9.0.5"
|
||||||
|
|
||||||
|
USER_AGENT = f"parsedmarc/{__version__}"
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from collections import OrderedDict
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Optional, Union, Any
|
||||||
|
|
||||||
|
|
||||||
from elasticsearch_dsl.search import Q
|
from elasticsearch_dsl.search import Q
|
||||||
from elasticsearch_dsl import (
|
from elasticsearch_dsl import (
|
||||||
@@ -67,6 +70,8 @@ class _AggregateReportDoc(Document):
|
|||||||
date_range = Date()
|
date_range = Date()
|
||||||
date_begin = Date()
|
date_begin = Date()
|
||||||
date_end = Date()
|
date_end = Date()
|
||||||
|
normalized_timespan = Boolean()
|
||||||
|
original_timespan_seconds = Integer
|
||||||
errors = Text()
|
errors = Text()
|
||||||
published_policy = Object(_PublishedPolicy)
|
published_policy = Object(_PublishedPolicy)
|
||||||
source_ip_address = Ip()
|
source_ip_address = Ip()
|
||||||
@@ -87,15 +92,15 @@ class _AggregateReportDoc(Document):
|
|||||||
dkim_results = Nested(_DKIMResult)
|
dkim_results = Nested(_DKIMResult)
|
||||||
spf_results = Nested(_SPFResult)
|
spf_results = Nested(_SPFResult)
|
||||||
|
|
||||||
def add_policy_override(self, type_, comment):
|
def add_policy_override(self, type_: str, comment: str):
|
||||||
self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment))
|
self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment))
|
||||||
|
|
||||||
def add_dkim_result(self, domain, selector, result):
|
def add_dkim_result(self, domain: str, selector: str, result: _DKIMResult):
|
||||||
self.dkim_results.append(
|
self.dkim_results.append(
|
||||||
_DKIMResult(domain=domain, selector=selector, result=result)
|
_DKIMResult(domain=domain, selector=selector, result=result)
|
||||||
)
|
)
|
||||||
|
|
||||||
def add_spf_result(self, domain, scope, result):
|
def add_spf_result(self, domain: str, scope: str, result: _SPFResult):
|
||||||
self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result))
|
self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result))
|
||||||
|
|
||||||
def save(self, **kwargs):
|
def save(self, **kwargs):
|
||||||
@@ -131,21 +136,21 @@ class _ForensicSampleDoc(InnerDoc):
|
|||||||
body = Text()
|
body = Text()
|
||||||
attachments = Nested(_EmailAttachmentDoc)
|
attachments = Nested(_EmailAttachmentDoc)
|
||||||
|
|
||||||
def add_to(self, display_name, address):
|
def add_to(self, display_name: str, address: str):
|
||||||
self.to.append(_EmailAddressDoc(display_name=display_name, address=address))
|
self.to.append(_EmailAddressDoc(display_name=display_name, address=address))
|
||||||
|
|
||||||
def add_reply_to(self, display_name, address):
|
def add_reply_to(self, display_name: str, address: str):
|
||||||
self.reply_to.append(
|
self.reply_to.append(
|
||||||
_EmailAddressDoc(display_name=display_name, address=address)
|
_EmailAddressDoc(display_name=display_name, address=address)
|
||||||
)
|
)
|
||||||
|
|
||||||
def add_cc(self, display_name, address):
|
def add_cc(self, display_name: str, address: str):
|
||||||
self.cc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
self.cc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
||||||
|
|
||||||
def add_bcc(self, display_name, address):
|
def add_bcc(self, display_name: str, address: str):
|
||||||
self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
||||||
|
|
||||||
def add_attachment(self, filename, content_type, sha256):
|
def add_attachment(self, filename: str, content_type: str, sha256: str):
|
||||||
self.attachments.append(
|
self.attachments.append(
|
||||||
_EmailAttachmentDoc(
|
_EmailAttachmentDoc(
|
||||||
filename=filename, content_type=content_type, sha256=sha256
|
filename=filename, content_type=content_type, sha256=sha256
|
||||||
@@ -197,15 +202,15 @@ class _SMTPTLSPolicyDoc(InnerDoc):
|
|||||||
|
|
||||||
def add_failure_details(
|
def add_failure_details(
|
||||||
self,
|
self,
|
||||||
result_type,
|
result_type: Optional[str] = None,
|
||||||
ip_address,
|
ip_address: Optional[str] = None,
|
||||||
receiving_ip,
|
receiving_ip: Optional[str] = None,
|
||||||
receiving_mx_helo,
|
receiving_mx_helo: Optional[str] = None,
|
||||||
failed_session_count,
|
failed_session_count: Optional[int] = None,
|
||||||
sending_mta_ip=None,
|
sending_mta_ip: Optional[str] = None,
|
||||||
receiving_mx_hostname=None,
|
receiving_mx_hostname: Optional[str] = None,
|
||||||
additional_information_uri=None,
|
additional_information_uri: Optional[str] = None,
|
||||||
failure_reason_code=None,
|
failure_reason_code: Union[str, int, None] = None,
|
||||||
):
|
):
|
||||||
_details = _SMTPTLSFailureDetailsDoc(
|
_details = _SMTPTLSFailureDetailsDoc(
|
||||||
result_type=result_type,
|
result_type=result_type,
|
||||||
@@ -235,13 +240,14 @@ class _SMTPTLSReportDoc(Document):
|
|||||||
|
|
||||||
def add_policy(
|
def add_policy(
|
||||||
self,
|
self,
|
||||||
policy_type,
|
policy_type: str,
|
||||||
policy_domain,
|
policy_domain: str,
|
||||||
successful_session_count,
|
successful_session_count: int,
|
||||||
failed_session_count,
|
failed_session_count: int,
|
||||||
policy_string=None,
|
*,
|
||||||
mx_host_patterns=None,
|
policy_string: Optional[str] = None,
|
||||||
failure_details=None,
|
mx_host_patterns: Optional[list[str]] = None,
|
||||||
|
failure_details: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.policies.append(
|
self.policies.append(
|
||||||
policy_type=policy_type,
|
policy_type=policy_type,
|
||||||
@@ -259,24 +265,25 @@ class AlreadySaved(ValueError):
|
|||||||
|
|
||||||
|
|
||||||
def set_hosts(
|
def set_hosts(
|
||||||
hosts,
|
hosts: Union[str, list[str]],
|
||||||
use_ssl=False,
|
*,
|
||||||
ssl_cert_path=None,
|
use_ssl: Optional[bool] = False,
|
||||||
username=None,
|
ssl_cert_path: Optional[str] = None,
|
||||||
password=None,
|
username: Optional[str] = None,
|
||||||
apiKey=None,
|
password: Optional[str] = None,
|
||||||
timeout=60.0,
|
api_key: Optional[str] = None,
|
||||||
|
timeout: Optional[float] = 60.0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Sets the Elasticsearch hosts to use
|
Sets the Elasticsearch hosts to use
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
hosts (str): A single hostname or URL, or list of hostnames or URLs
|
hosts (Union[str, list[str]]): A single hostname or URL, or list of hostnames or URLs
|
||||||
use_ssl (bool): Use a HTTPS connection to the server
|
use_ssl (bool): Use an HTTPS connection to the server
|
||||||
ssl_cert_path (str): Path to the certificate chain
|
ssl_cert_path (str): Path to the certificate chain
|
||||||
username (str): The username to use for authentication
|
username (str): The username to use for authentication
|
||||||
password (str): The password to use for authentication
|
password (str): The password to use for authentication
|
||||||
apiKey (str): The Base64 encoded API key to use for authentication
|
api_key (str): The Base64 encoded API key to use for authentication
|
||||||
timeout (float): Timeout in seconds
|
timeout (float): Timeout in seconds
|
||||||
"""
|
"""
|
||||||
if not isinstance(hosts, list):
|
if not isinstance(hosts, list):
|
||||||
@@ -289,14 +296,14 @@ def set_hosts(
|
|||||||
conn_params["ca_certs"] = ssl_cert_path
|
conn_params["ca_certs"] = ssl_cert_path
|
||||||
else:
|
else:
|
||||||
conn_params["verify_certs"] = False
|
conn_params["verify_certs"] = False
|
||||||
if username:
|
if username and password:
|
||||||
conn_params["http_auth"] = username + ":" + password
|
conn_params["http_auth"] = username + ":" + password
|
||||||
if apiKey:
|
if api_key:
|
||||||
conn_params["api_key"] = apiKey
|
conn_params["api_key"] = api_key
|
||||||
connections.create_connection(**conn_params)
|
connections.create_connection(**conn_params)
|
||||||
|
|
||||||
|
|
||||||
def create_indexes(names, settings=None):
|
def create_indexes(names: list[str], settings: Optional[dict[str, Any]] = None):
|
||||||
"""
|
"""
|
||||||
Create Elasticsearch indexes
|
Create Elasticsearch indexes
|
||||||
|
|
||||||
@@ -319,7 +326,10 @@ def create_indexes(names, settings=None):
|
|||||||
raise ElasticsearchError("Elasticsearch error: {0}".format(e.__str__()))
|
raise ElasticsearchError("Elasticsearch error: {0}".format(e.__str__()))
|
||||||
|
|
||||||
|
|
||||||
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
|
def migrate_indexes(
|
||||||
|
aggregate_indexes: Optional[list[str]] = None,
|
||||||
|
forensic_indexes: Optional[list[str]] = None,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Updates index mappings
|
Updates index mappings
|
||||||
|
|
||||||
@@ -366,18 +376,18 @@ def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
|
|||||||
|
|
||||||
|
|
||||||
def save_aggregate_report_to_elasticsearch(
|
def save_aggregate_report_to_elasticsearch(
|
||||||
aggregate_report,
|
aggregate_report: dict[str, Any],
|
||||||
index_suffix=None,
|
index_suffix: Optional[str] = None,
|
||||||
index_prefix=None,
|
index_prefix: Optional[str] = None,
|
||||||
monthly_indexes=False,
|
monthly_indexes: Optional[bool] = False,
|
||||||
number_of_shards=1,
|
number_of_shards: Optional[int] = 1,
|
||||||
number_of_replicas=0,
|
number_of_replicas: Optional[int] = 0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Saves a parsed DMARC aggregate report to Elasticsearch
|
Saves a parsed DMARC aggregate report to Elasticsearch
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
aggregate_report (OrderedDict): A parsed forensic report
|
aggregate_report (dict): A parsed forensic report
|
||||||
index_suffix (str): The suffix of the name of the index to save to
|
index_suffix (str): The suffix of the name of the index to save to
|
||||||
index_prefix (str): The prefix of the name of the index to save to
|
index_prefix (str): The prefix of the name of the index to save to
|
||||||
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
||||||
@@ -395,15 +405,11 @@ def save_aggregate_report_to_elasticsearch(
|
|||||||
domain = aggregate_report["policy_published"]["domain"]
|
domain = aggregate_report["policy_published"]["domain"]
|
||||||
begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True)
|
begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True)
|
||||||
end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True)
|
end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True)
|
||||||
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
|
||||||
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
|
||||||
if monthly_indexes:
|
if monthly_indexes:
|
||||||
index_date = begin_date.strftime("%Y-%m")
|
index_date = begin_date.strftime("%Y-%m")
|
||||||
else:
|
else:
|
||||||
index_date = begin_date.strftime("%Y-%m-%d")
|
index_date = begin_date.strftime("%Y-%m-%d")
|
||||||
aggregate_report["begin_date"] = begin_date
|
|
||||||
aggregate_report["end_date"] = end_date
|
|
||||||
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
|
|
||||||
|
|
||||||
org_name_query = Q(dict(match_phrase=dict(org_name=org_name)))
|
org_name_query = Q(dict(match_phrase=dict(org_name=org_name)))
|
||||||
report_id_query = Q(dict(match_phrase=dict(report_id=report_id)))
|
report_id_query = Q(dict(match_phrase=dict(report_id=report_id)))
|
||||||
@@ -425,6 +431,9 @@ def save_aggregate_report_to_elasticsearch(
|
|||||||
try:
|
try:
|
||||||
existing = search.execute()
|
existing = search.execute()
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
|
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
||||||
|
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
||||||
|
|
||||||
raise ElasticsearchError(
|
raise ElasticsearchError(
|
||||||
"Elasticsearch's search for existing report \
|
"Elasticsearch's search for existing report \
|
||||||
error: {}".format(error_.__str__())
|
error: {}".format(error_.__str__())
|
||||||
@@ -450,6 +459,17 @@ def save_aggregate_report_to_elasticsearch(
|
|||||||
)
|
)
|
||||||
|
|
||||||
for record in aggregate_report["records"]:
|
for record in aggregate_report["records"]:
|
||||||
|
begin_date = human_timestamp_to_datetime(record["interval_begin"], to_utc=True)
|
||||||
|
end_date = human_timestamp_to_datetime(record["interval_end"], to_utc=True)
|
||||||
|
normalized_timespan = record["normalized_timespan"]
|
||||||
|
|
||||||
|
if monthly_indexes:
|
||||||
|
index_date = begin_date.strftime("%Y-%m")
|
||||||
|
else:
|
||||||
|
index_date = begin_date.strftime("%Y-%m-%d")
|
||||||
|
aggregate_report["begin_date"] = begin_date
|
||||||
|
aggregate_report["end_date"] = end_date
|
||||||
|
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
|
||||||
agg_doc = _AggregateReportDoc(
|
agg_doc = _AggregateReportDoc(
|
||||||
xml_schema=aggregate_report["xml_schema"],
|
xml_schema=aggregate_report["xml_schema"],
|
||||||
org_name=metadata["org_name"],
|
org_name=metadata["org_name"],
|
||||||
@@ -457,8 +477,9 @@ def save_aggregate_report_to_elasticsearch(
|
|||||||
org_extra_contact_info=metadata["org_extra_contact_info"],
|
org_extra_contact_info=metadata["org_extra_contact_info"],
|
||||||
report_id=metadata["report_id"],
|
report_id=metadata["report_id"],
|
||||||
date_range=date_range,
|
date_range=date_range,
|
||||||
date_begin=aggregate_report["begin_date"],
|
date_begin=begin_date,
|
||||||
date_end=aggregate_report["end_date"],
|
date_end=end_date,
|
||||||
|
normalized_timespan=normalized_timespan,
|
||||||
errors=metadata["errors"],
|
errors=metadata["errors"],
|
||||||
published_policy=published_policy,
|
published_policy=published_policy,
|
||||||
source_ip_address=record["source"]["ip_address"],
|
source_ip_address=record["source"]["ip_address"],
|
||||||
@@ -517,18 +538,18 @@ def save_aggregate_report_to_elasticsearch(
|
|||||||
|
|
||||||
|
|
||||||
def save_forensic_report_to_elasticsearch(
|
def save_forensic_report_to_elasticsearch(
|
||||||
forensic_report,
|
forensic_report: dict[str, Any],
|
||||||
index_suffix=None,
|
index_suffix: Optional[Any] = None,
|
||||||
index_prefix=None,
|
index_prefix: Optional[str] = None,
|
||||||
monthly_indexes=False,
|
monthly_indexes: Optional[bool] = False,
|
||||||
number_of_shards=1,
|
number_of_shards: int = 1,
|
||||||
number_of_replicas=0,
|
number_of_replicas: int = 0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Saves a parsed DMARC forensic report to Elasticsearch
|
Saves a parsed DMARC forensic report to Elasticsearch
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
forensic_report (OrderedDict): A parsed forensic report
|
forensic_report (dict): A parsed forensic report
|
||||||
index_suffix (str): The suffix of the name of the index to save to
|
index_suffix (str): The suffix of the name of the index to save to
|
||||||
index_prefix (str): The prefix of the name of the index to save to
|
index_prefix (str): The prefix of the name of the index to save to
|
||||||
monthly_indexes (bool): Use monthly indexes instead of daily
|
monthly_indexes (bool): Use monthly indexes instead of daily
|
||||||
@@ -548,7 +569,7 @@ def save_forensic_report_to_elasticsearch(
|
|||||||
sample_date = forensic_report["parsed_sample"]["date"]
|
sample_date = forensic_report["parsed_sample"]["date"]
|
||||||
sample_date = human_timestamp_to_datetime(sample_date)
|
sample_date = human_timestamp_to_datetime(sample_date)
|
||||||
original_headers = forensic_report["parsed_sample"]["headers"]
|
original_headers = forensic_report["parsed_sample"]["headers"]
|
||||||
headers = OrderedDict()
|
headers = dict()
|
||||||
for original_header in original_headers:
|
for original_header in original_headers:
|
||||||
headers[original_header.lower()] = original_headers[original_header]
|
headers[original_header.lower()] = original_headers[original_header]
|
||||||
|
|
||||||
@@ -684,18 +705,18 @@ def save_forensic_report_to_elasticsearch(
|
|||||||
|
|
||||||
|
|
||||||
def save_smtp_tls_report_to_elasticsearch(
|
def save_smtp_tls_report_to_elasticsearch(
|
||||||
report,
|
report: dict[str, Any],
|
||||||
index_suffix=None,
|
index_suffix: Optional[str] = None,
|
||||||
index_prefix=None,
|
index_prefix: Optional[str] = None,
|
||||||
monthly_indexes=False,
|
monthly_indexes: Optional[bool] = False,
|
||||||
number_of_shards=1,
|
number_of_shards: Optional[int] = 1,
|
||||||
number_of_replicas=0,
|
number_of_replicas: Optional[int] = 0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Saves a parsed SMTP TLS report to Elasticsearch
|
Saves a parsed SMTP TLS report to Elasticsearch
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
report (OrderedDict): A parsed SMTP TLS report
|
report (dict): A parsed SMTP TLS report
|
||||||
index_suffix (str): The suffix of the name of the index to save to
|
index_suffix (str): The suffix of the name of the index to save to
|
||||||
index_prefix (str): The prefix of the name of the index to save to
|
index_prefix (str): The prefix of the name of the index to save to
|
||||||
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
||||||
@@ -781,7 +802,7 @@ def save_smtp_tls_report_to_elasticsearch(
|
|||||||
policy_doc = _SMTPTLSPolicyDoc(
|
policy_doc = _SMTPTLSPolicyDoc(
|
||||||
policy_domain=policy["policy_domain"],
|
policy_domain=policy["policy_domain"],
|
||||||
policy_type=policy["policy_type"],
|
policy_type=policy["policy_type"],
|
||||||
succesful_session_count=policy["successful_session_count"],
|
successful_session_count=policy["successful_session_count"],
|
||||||
failed_session_count=policy["failed_session_count"],
|
failed_session_count=policy["failed_session_count"],
|
||||||
policy_string=policy_strings,
|
policy_string=policy_strings,
|
||||||
mx_host_patterns=mx_host_patterns,
|
mx_host_patterns=mx_host_patterns,
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
import json
|
import json
|
||||||
@@ -48,7 +52,9 @@ class GelfClient(object):
|
|||||||
)
|
)
|
||||||
self.logger.addHandler(self.handler)
|
self.logger.addHandler(self.handler)
|
||||||
|
|
||||||
def save_aggregate_report_to_gelf(self, aggregate_reports):
|
def save_aggregate_report_to_gelf(
|
||||||
|
self, aggregate_reports: list[dict[str, Any]]
|
||||||
|
):
|
||||||
rows = parsed_aggregate_reports_to_csv_rows(aggregate_reports)
|
rows = parsed_aggregate_reports_to_csv_rows(aggregate_reports)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
log_context_data.parsedmarc = row
|
log_context_data.parsedmarc = row
|
||||||
@@ -56,12 +62,14 @@ class GelfClient(object):
|
|||||||
|
|
||||||
log_context_data.parsedmarc = None
|
log_context_data.parsedmarc = None
|
||||||
|
|
||||||
def save_forensic_report_to_gelf(self, forensic_reports):
|
def save_forensic_report_to_gelf(
|
||||||
|
self, forensic_reports: list[dict[str, Any]]
|
||||||
|
):
|
||||||
rows = parsed_forensic_reports_to_csv_rows(forensic_reports)
|
rows = parsed_forensic_reports_to_csv_rows(forensic_reports)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
self.logger.info(json.dumps(row))
|
self.logger.info(json.dumps(row))
|
||||||
|
|
||||||
def save_smtp_tls_report_to_gelf(self, smtp_tls_reports):
|
def save_smtp_tls_report_to_gelf(self, smtp_tls_reports: dict[str, Any]):
|
||||||
rows = parsed_smtp_tls_reports_to_csv_rows(smtp_tls_reports)
|
rows = parsed_smtp_tls_reports_to_csv_rows(smtp_tls_reports)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
self.logger.info(json.dumps(row))
|
self.logger.info(json.dumps(row))
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Optional, Union
|
||||||
|
from ssl import SSLContext
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from ssl import create_default_context
|
from ssl import create_default_context
|
||||||
|
|
||||||
from kafka import KafkaProducer
|
from kafka import KafkaProducer
|
||||||
from kafka.errors import NoBrokersAvailable, UnknownTopicOrPartitionError
|
from kafka.errors import NoBrokersAvailable, UnknownTopicOrPartitionError
|
||||||
from collections import OrderedDict
|
|
||||||
from parsedmarc.utils import human_timestamp_to_datetime
|
from parsedmarc.utils import human_timestamp_to_datetime
|
||||||
|
|
||||||
from parsedmarc import __version__
|
from parsedmarc import __version__
|
||||||
@@ -18,7 +22,13 @@ class KafkaError(RuntimeError):
|
|||||||
|
|
||||||
class KafkaClient(object):
|
class KafkaClient(object):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, kafka_hosts, ssl=False, username=None, password=None, ssl_context=None
|
self,
|
||||||
|
kafka_hosts: list[str],
|
||||||
|
*,
|
||||||
|
ssl: Optional[bool] = False,
|
||||||
|
username: Optional[str] = None,
|
||||||
|
password: Optional[str] = None,
|
||||||
|
ssl_context: Optional[SSLContext] = None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the Kafka client
|
Initializes the Kafka client
|
||||||
@@ -28,7 +38,7 @@ class KafkaClient(object):
|
|||||||
ssl (bool): Use a SSL/TLS connection
|
ssl (bool): Use a SSL/TLS connection
|
||||||
username (str): An optional username
|
username (str): An optional username
|
||||||
password (str): An optional password
|
password (str): An optional password
|
||||||
ssl_context: SSL context options
|
ssl_context (SSLContext): SSL context options
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
``use_ssl=True`` is implied when a username or password are
|
``use_ssl=True`` is implied when a username or password are
|
||||||
@@ -55,7 +65,7 @@ class KafkaClient(object):
|
|||||||
raise KafkaError("No Kafka brokers available")
|
raise KafkaError("No Kafka brokers available")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def strip_metadata(report):
|
def strip_metadata(report: dict[str, Any]):
|
||||||
"""
|
"""
|
||||||
Duplicates org_name, org_email and report_id into JSON root
|
Duplicates org_name, org_email and report_id into JSON root
|
||||||
and removes report_metadata key to bring it more inline
|
and removes report_metadata key to bring it more inline
|
||||||
@@ -69,7 +79,7 @@ class KafkaClient(object):
|
|||||||
return report
|
return report
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_daterange(report):
|
def generate_date_range(report: dict[str, Any]):
|
||||||
"""
|
"""
|
||||||
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
|
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
|
||||||
based on begin and end dates for easier parsing in Kibana.
|
based on begin and end dates for easier parsing in Kibana.
|
||||||
@@ -86,7 +96,11 @@ class KafkaClient(object):
|
|||||||
logger.debug("date_range is {}".format(date_range))
|
logger.debug("date_range is {}".format(date_range))
|
||||||
return date_range
|
return date_range
|
||||||
|
|
||||||
def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic):
|
def save_aggregate_reports_to_kafka(
|
||||||
|
self,
|
||||||
|
aggregate_reports: Union[dict[str, Any], list[dict[str, Any]]],
|
||||||
|
aggregate_topic: str,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Saves aggregate DMARC reports to Kafka
|
Saves aggregate DMARC reports to Kafka
|
||||||
|
|
||||||
@@ -96,16 +110,14 @@ class KafkaClient(object):
|
|||||||
aggregate_topic (str): The name of the Kafka topic
|
aggregate_topic (str): The name of the Kafka topic
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if isinstance(aggregate_reports, dict) or isinstance(
|
if isinstance(aggregate_reports, dict):
|
||||||
aggregate_reports, OrderedDict
|
|
||||||
):
|
|
||||||
aggregate_reports = [aggregate_reports]
|
aggregate_reports = [aggregate_reports]
|
||||||
|
|
||||||
if len(aggregate_reports) < 1:
|
if len(aggregate_reports) < 1:
|
||||||
return
|
return
|
||||||
|
|
||||||
for report in aggregate_reports:
|
for report in aggregate_reports:
|
||||||
report["date_range"] = self.generate_daterange(report)
|
report["date_range"] = self.generate_date_range(report)
|
||||||
report = self.strip_metadata(report)
|
report = self.strip_metadata(report)
|
||||||
|
|
||||||
for slice in report["records"]:
|
for slice in report["records"]:
|
||||||
@@ -129,7 +141,11 @@ class KafkaClient(object):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise KafkaError("Kafka error: {0}".format(e.__str__()))
|
raise KafkaError("Kafka error: {0}".format(e.__str__()))
|
||||||
|
|
||||||
def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic):
|
def save_forensic_reports_to_kafka(
|
||||||
|
self,
|
||||||
|
forensic_reports: Union[dict[str, Any], list[dict[str, Any]]],
|
||||||
|
forensic_topic: str,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Saves forensic DMARC reports to Kafka, sends individual
|
Saves forensic DMARC reports to Kafka, sends individual
|
||||||
records (slices) since Kafka requires messages to be <= 1MB
|
records (slices) since Kafka requires messages to be <= 1MB
|
||||||
@@ -159,7 +175,11 @@ class KafkaClient(object):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise KafkaError("Kafka error: {0}".format(e.__str__()))
|
raise KafkaError("Kafka error: {0}".format(e.__str__()))
|
||||||
|
|
||||||
def save_smtp_tls_reports_to_kafka(self, smtp_tls_reports, smtp_tls_topic):
|
def save_smtp_tls_reports_to_kafka(
|
||||||
|
self,
|
||||||
|
smtp_tls_reports: Union[list[dict[str, Any]], dict[str, Any]],
|
||||||
|
smtp_tls_topic: str,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Saves SMTP TLS reports to Kafka, sends individual
|
Saves SMTP TLS reports to Kafka, sends individual
|
||||||
records (slices) since Kafka requires messages to be <= 1MB
|
records (slices) since Kafka requires messages to be <= 1MB
|
||||||
|
|||||||
@@ -1,4 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from parsedmarc.log import logger
|
from parsedmarc.log import logger
|
||||||
from azure.core.exceptions import HttpResponseError
|
from azure.core.exceptions import HttpResponseError
|
||||||
from azure.identity import ClientSecretCredential
|
from azure.identity import ClientSecretCredential
|
||||||
@@ -102,7 +107,12 @@ class LogAnalyticsClient(object):
|
|||||||
"Invalid configuration. " + "One or more required settings are missing."
|
"Invalid configuration. " + "One or more required settings are missing."
|
||||||
)
|
)
|
||||||
|
|
||||||
def publish_json(self, results, logs_client: LogsIngestionClient, dcr_stream: str):
|
def publish_json(
|
||||||
|
self,
|
||||||
|
results,
|
||||||
|
logs_client: LogsIngestionClient,
|
||||||
|
dcr_stream: str,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Background function to publish given
|
Background function to publish given
|
||||||
DMARC report to specific Data Collection Rule.
|
DMARC report to specific Data Collection Rule.
|
||||||
@@ -121,7 +131,11 @@ class LogAnalyticsClient(object):
|
|||||||
raise LogAnalyticsException("Upload failed: {error}".format(error=e))
|
raise LogAnalyticsException("Upload failed: {error}".format(error=e))
|
||||||
|
|
||||||
def publish_results(
|
def publish_results(
|
||||||
self, results, save_aggregate: bool, save_forensic: bool, save_smtp_tls: bool
|
self,
|
||||||
|
results: dict[str, dict[str, Any]],
|
||||||
|
save_aggregate: bool,
|
||||||
|
save_forensic: bool,
|
||||||
|
save_smtp_tls: bool,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Function to publish DMARC and/or SMTP TLS reports to Log Analytics
|
Function to publish DMARC and/or SMTP TLS reports to Log Analytics
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from base64 import urlsafe_b64decode
|
from base64 import urlsafe_b64decode
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -152,3 +156,4 @@ class GmailConnection(MailboxConnection):
|
|||||||
for label in labels:
|
for label in labels:
|
||||||
if label_name == label["id"] or label_name == label["name"]:
|
if label_name == label["id"] or label_name == label["name"]:
|
||||||
return label["id"]
|
return label["id"]
|
||||||
|
return ""
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
from imapclient.exceptions import IMAPClientError
|
from imapclient.exceptions import IMAPClientError
|
||||||
@@ -11,14 +17,15 @@ from parsedmarc.mail.mailbox_connection import MailboxConnection
|
|||||||
class IMAPConnection(MailboxConnection):
|
class IMAPConnection(MailboxConnection):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
host=None,
|
host: Optional[str] = None,
|
||||||
user=None,
|
*,
|
||||||
password=None,
|
user: Optional[str] = None,
|
||||||
port=None,
|
password: Optional[str] = None,
|
||||||
ssl=True,
|
port: Optional[str] = None,
|
||||||
verify=True,
|
ssl: Optional[bool] = True,
|
||||||
timeout=30,
|
verify: Optional[bool] = True,
|
||||||
max_retries=4,
|
timeout: Optional[int] = 30,
|
||||||
|
max_retries: Optional[int] = 4,
|
||||||
):
|
):
|
||||||
self._username = user
|
self._username = user
|
||||||
self._password = password
|
self._password = password
|
||||||
@@ -45,13 +52,13 @@ class IMAPConnection(MailboxConnection):
|
|||||||
else:
|
else:
|
||||||
return self._client.search()
|
return self._client.search()
|
||||||
|
|
||||||
def fetch_message(self, message_id):
|
def fetch_message(self, message_id: int):
|
||||||
return self._client.fetch_message(message_id, parse=False)
|
return self._client.fetch_message(message_id, parse=False)
|
||||||
|
|
||||||
def delete_message(self, message_id: str):
|
def delete_message(self, message_id: int):
|
||||||
self._client.delete_messages([message_id])
|
self._client.delete_messages([message_id])
|
||||||
|
|
||||||
def move_message(self, message_id: str, folder_name: str):
|
def move_message(self, message_id: int, folder_name: str):
|
||||||
self._client.move_messages([message_id], folder_name)
|
self._client.move_messages([message_id], folder_name)
|
||||||
|
|
||||||
def keepalive(self):
|
def keepalive(self):
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from typing import List
|
|
||||||
|
|
||||||
|
|
||||||
class MailboxConnection(ABC):
|
class MailboxConnection(ABC):
|
||||||
@@ -10,7 +13,7 @@ class MailboxConnection(ABC):
|
|||||||
def create_folder(self, folder_name: str):
|
def create_folder(self, folder_name: str):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def fetch_messages(self, reports_folder: str, **kwargs) -> List[str]:
|
def fetch_messages(self, reports_folder: str, **kwargs) -> list[str]:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def fetch_message(self, message_id) -> str:
|
def fetch_message(self, message_id) -> str:
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
from parsedmarc.log import logger
|
from parsedmarc.log import logger
|
||||||
@@ -9,8 +15,8 @@ import os
|
|||||||
class MaildirConnection(MailboxConnection):
|
class MaildirConnection(MailboxConnection):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
maildir_path=None,
|
maildir_path: Optional[bool] = None,
|
||||||
maildir_create=False,
|
maildir_create: Optional[bool] = False,
|
||||||
):
|
):
|
||||||
self._maildir_path = maildir_path
|
self._maildir_path = maildir_path
|
||||||
self._maildir_create = maildir_create
|
self._maildir_create = maildir_create
|
||||||
@@ -36,7 +42,7 @@ class MaildirConnection(MailboxConnection):
|
|||||||
def fetch_messages(self, reports_folder: str, **kwargs):
|
def fetch_messages(self, reports_folder: str, **kwargs):
|
||||||
return self._client.keys()
|
return self._client.keys()
|
||||||
|
|
||||||
def fetch_message(self, message_id):
|
def fetch_message(self, message_id: str):
|
||||||
return self._client.get(message_id).as_string()
|
return self._client.get(message_id).as_string()
|
||||||
|
|
||||||
def delete_message(self, message_id: str):
|
def delete_message(self, message_id: str):
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from collections import OrderedDict
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Optional, Union, Any
|
||||||
|
|
||||||
|
|
||||||
from opensearchpy import (
|
from opensearchpy import (
|
||||||
Q,
|
Q,
|
||||||
@@ -67,6 +70,8 @@ class _AggregateReportDoc(Document):
|
|||||||
date_range = Date()
|
date_range = Date()
|
||||||
date_begin = Date()
|
date_begin = Date()
|
||||||
date_end = Date()
|
date_end = Date()
|
||||||
|
normalized_timespan = Boolean()
|
||||||
|
original_timespan_seconds = Integer
|
||||||
errors = Text()
|
errors = Text()
|
||||||
published_policy = Object(_PublishedPolicy)
|
published_policy = Object(_PublishedPolicy)
|
||||||
source_ip_address = Ip()
|
source_ip_address = Ip()
|
||||||
@@ -87,15 +92,15 @@ class _AggregateReportDoc(Document):
|
|||||||
dkim_results = Nested(_DKIMResult)
|
dkim_results = Nested(_DKIMResult)
|
||||||
spf_results = Nested(_SPFResult)
|
spf_results = Nested(_SPFResult)
|
||||||
|
|
||||||
def add_policy_override(self, type_, comment):
|
def add_policy_override(self, type_: str, comment: str):
|
||||||
self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment))
|
self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment))
|
||||||
|
|
||||||
def add_dkim_result(self, domain, selector, result):
|
def add_dkim_result(self, domain: str, selector: str, result: _DKIMResult):
|
||||||
self.dkim_results.append(
|
self.dkim_results.append(
|
||||||
_DKIMResult(domain=domain, selector=selector, result=result)
|
_DKIMResult(domain=domain, selector=selector, result=result)
|
||||||
)
|
)
|
||||||
|
|
||||||
def add_spf_result(self, domain, scope, result):
|
def add_spf_result(self, domain: str, scope: str, result: _SPFResult):
|
||||||
self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result))
|
self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result))
|
||||||
|
|
||||||
def save(self, **kwargs):
|
def save(self, **kwargs):
|
||||||
@@ -131,21 +136,21 @@ class _ForensicSampleDoc(InnerDoc):
|
|||||||
body = Text()
|
body = Text()
|
||||||
attachments = Nested(_EmailAttachmentDoc)
|
attachments = Nested(_EmailAttachmentDoc)
|
||||||
|
|
||||||
def add_to(self, display_name, address):
|
def add_to(self, display_name: str, address: str):
|
||||||
self.to.append(_EmailAddressDoc(display_name=display_name, address=address))
|
self.to.append(_EmailAddressDoc(display_name=display_name, address=address))
|
||||||
|
|
||||||
def add_reply_to(self, display_name, address):
|
def add_reply_to(self, display_name: str, address: str):
|
||||||
self.reply_to.append(
|
self.reply_to.append(
|
||||||
_EmailAddressDoc(display_name=display_name, address=address)
|
_EmailAddressDoc(display_name=display_name, address=address)
|
||||||
)
|
)
|
||||||
|
|
||||||
def add_cc(self, display_name, address):
|
def add_cc(self, display_name: str, address: str):
|
||||||
self.cc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
self.cc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
||||||
|
|
||||||
def add_bcc(self, display_name, address):
|
def add_bcc(self, display_name: str, address: str):
|
||||||
self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address))
|
||||||
|
|
||||||
def add_attachment(self, filename, content_type, sha256):
|
def add_attachment(self, filename: str, content_type: str, sha256: str):
|
||||||
self.attachments.append(
|
self.attachments.append(
|
||||||
_EmailAttachmentDoc(
|
_EmailAttachmentDoc(
|
||||||
filename=filename, content_type=content_type, sha256=sha256
|
filename=filename, content_type=content_type, sha256=sha256
|
||||||
@@ -197,15 +202,15 @@ class _SMTPTLSPolicyDoc(InnerDoc):
|
|||||||
|
|
||||||
def add_failure_details(
|
def add_failure_details(
|
||||||
self,
|
self,
|
||||||
result_type,
|
result_type: Optional[str] = None,
|
||||||
ip_address,
|
ip_address: Optional[str] = None,
|
||||||
receiving_ip,
|
receiving_ip: Optional[str] = None,
|
||||||
receiving_mx_helo,
|
receiving_mx_helo: Optional[str] = None,
|
||||||
failed_session_count,
|
failed_session_count: Optional[int] = None,
|
||||||
sending_mta_ip=None,
|
sending_mta_ip: Optional[str] = None,
|
||||||
receiving_mx_hostname=None,
|
receiving_mx_hostname: Optional[str] = None,
|
||||||
additional_information_uri=None,
|
additional_information_uri: Optional[str] = None,
|
||||||
failure_reason_code=None,
|
failure_reason_code: Union[str, int, None] = None,
|
||||||
):
|
):
|
||||||
_details = _SMTPTLSFailureDetailsDoc(
|
_details = _SMTPTLSFailureDetailsDoc(
|
||||||
result_type=result_type,
|
result_type=result_type,
|
||||||
@@ -235,13 +240,14 @@ class _SMTPTLSReportDoc(Document):
|
|||||||
|
|
||||||
def add_policy(
|
def add_policy(
|
||||||
self,
|
self,
|
||||||
policy_type,
|
policy_type: str,
|
||||||
policy_domain,
|
policy_domain: str,
|
||||||
successful_session_count,
|
successful_session_count: int,
|
||||||
failed_session_count,
|
failed_session_count: int,
|
||||||
policy_string=None,
|
*,
|
||||||
mx_host_patterns=None,
|
policy_string: Optional[str] = None,
|
||||||
failure_details=None,
|
mx_host_patterns: Optional[list[str]] = None,
|
||||||
|
failure_details: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.policies.append(
|
self.policies.append(
|
||||||
policy_type=policy_type,
|
policy_type=policy_type,
|
||||||
@@ -259,24 +265,25 @@ class AlreadySaved(ValueError):
|
|||||||
|
|
||||||
|
|
||||||
def set_hosts(
|
def set_hosts(
|
||||||
hosts,
|
hosts: Union[str, list[str]],
|
||||||
use_ssl=False,
|
*,
|
||||||
ssl_cert_path=None,
|
use_ssl: Optional[bool] = False,
|
||||||
username=None,
|
ssl_cert_path: Optional[str] = None,
|
||||||
password=None,
|
username: Optional[str] = None,
|
||||||
apiKey=None,
|
password: Optional[str] = None,
|
||||||
timeout=60.0,
|
api_key: Optional[str] = None,
|
||||||
|
timeout: Optional[float] = 60.0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Sets the OpenSearch hosts to use
|
Sets the OpenSearch hosts to use
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
hosts (str|list): A hostname or URL, or list of hostnames or URLs
|
hosts (str|list[str]): A single hostname or URL, or list of hostnames or URLs
|
||||||
use_ssl (bool): Use an HTTPS connection to the server
|
use_ssl (bool): Use an HTTPS connection to the server
|
||||||
ssl_cert_path (str): Path to the certificate chain
|
ssl_cert_path (str): Path to the certificate chain
|
||||||
username (str): The username to use for authentication
|
username (str): The username to use for authentication
|
||||||
password (str): The password to use for authentication
|
password (str): The password to use for authentication
|
||||||
apiKey (str): The Base64 encoded API key to use for authentication
|
api_key (str): The Base64 encoded API key to use for authentication
|
||||||
timeout (float): Timeout in seconds
|
timeout (float): Timeout in seconds
|
||||||
"""
|
"""
|
||||||
if not isinstance(hosts, list):
|
if not isinstance(hosts, list):
|
||||||
@@ -289,14 +296,14 @@ def set_hosts(
|
|||||||
conn_params["ca_certs"] = ssl_cert_path
|
conn_params["ca_certs"] = ssl_cert_path
|
||||||
else:
|
else:
|
||||||
conn_params["verify_certs"] = False
|
conn_params["verify_certs"] = False
|
||||||
if username:
|
if username and password:
|
||||||
conn_params["http_auth"] = username + ":" + password
|
conn_params["http_auth"] = username + ":" + password
|
||||||
if apiKey:
|
if api_key:
|
||||||
conn_params["api_key"] = apiKey
|
conn_params["api_key"] = api_key
|
||||||
connections.create_connection(**conn_params)
|
connections.create_connection(**conn_params)
|
||||||
|
|
||||||
|
|
||||||
def create_indexes(names, settings=None):
|
def create_indexes(names: list[str], settings: Optional[dict[str, Any]] = None):
|
||||||
"""
|
"""
|
||||||
Create OpenSearch indexes
|
Create OpenSearch indexes
|
||||||
|
|
||||||
@@ -319,7 +326,10 @@ def create_indexes(names, settings=None):
|
|||||||
raise OpenSearchError("OpenSearch error: {0}".format(e.__str__()))
|
raise OpenSearchError("OpenSearch error: {0}".format(e.__str__()))
|
||||||
|
|
||||||
|
|
||||||
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
|
def migrate_indexes(
|
||||||
|
aggregate_indexes: Optional[list[str]] = None,
|
||||||
|
forensic_indexes: Optional[list[str]] = None,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Updates index mappings
|
Updates index mappings
|
||||||
|
|
||||||
@@ -366,18 +376,18 @@ def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
|
|||||||
|
|
||||||
|
|
||||||
def save_aggregate_report_to_opensearch(
|
def save_aggregate_report_to_opensearch(
|
||||||
aggregate_report,
|
aggregate_report: dict[str, Any],
|
||||||
index_suffix=None,
|
index_suffix: Optional[str] = None,
|
||||||
index_prefix=None,
|
index_prefix: Optional[str] = None,
|
||||||
monthly_indexes=False,
|
monthly_indexes: Optional[bool] = False,
|
||||||
number_of_shards=1,
|
number_of_shards: Optional[int] = 1,
|
||||||
number_of_replicas=0,
|
number_of_replicas: Optional[int] = 0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Saves a parsed DMARC aggregate report to OpenSearch
|
Saves a parsed DMARC aggregate report to OpenSearch
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
aggregate_report (OrderedDict): A parsed forensic report
|
aggregate_report (dict): A parsed forensic report
|
||||||
index_suffix (str): The suffix of the name of the index to save to
|
index_suffix (str): The suffix of the name of the index to save to
|
||||||
index_prefix (str): The prefix of the name of the index to save to
|
index_prefix (str): The prefix of the name of the index to save to
|
||||||
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
||||||
@@ -395,15 +405,11 @@ def save_aggregate_report_to_opensearch(
|
|||||||
domain = aggregate_report["policy_published"]["domain"]
|
domain = aggregate_report["policy_published"]["domain"]
|
||||||
begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True)
|
begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True)
|
||||||
end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True)
|
end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True)
|
||||||
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
|
||||||
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
|
||||||
if monthly_indexes:
|
if monthly_indexes:
|
||||||
index_date = begin_date.strftime("%Y-%m")
|
index_date = begin_date.strftime("%Y-%m")
|
||||||
else:
|
else:
|
||||||
index_date = begin_date.strftime("%Y-%m-%d")
|
index_date = begin_date.strftime("%Y-%m-%d")
|
||||||
aggregate_report["begin_date"] = begin_date
|
|
||||||
aggregate_report["end_date"] = end_date
|
|
||||||
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
|
|
||||||
|
|
||||||
org_name_query = Q(dict(match_phrase=dict(org_name=org_name)))
|
org_name_query = Q(dict(match_phrase=dict(org_name=org_name)))
|
||||||
report_id_query = Q(dict(match_phrase=dict(report_id=report_id)))
|
report_id_query = Q(dict(match_phrase=dict(report_id=report_id)))
|
||||||
@@ -425,6 +431,9 @@ def save_aggregate_report_to_opensearch(
|
|||||||
try:
|
try:
|
||||||
existing = search.execute()
|
existing = search.execute()
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
|
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
||||||
|
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
|
||||||
|
|
||||||
raise OpenSearchError(
|
raise OpenSearchError(
|
||||||
"OpenSearch's search for existing report \
|
"OpenSearch's search for existing report \
|
||||||
error: {}".format(error_.__str__())
|
error: {}".format(error_.__str__())
|
||||||
@@ -450,6 +459,17 @@ def save_aggregate_report_to_opensearch(
|
|||||||
)
|
)
|
||||||
|
|
||||||
for record in aggregate_report["records"]:
|
for record in aggregate_report["records"]:
|
||||||
|
begin_date = human_timestamp_to_datetime(record["interval_begin"], to_utc=True)
|
||||||
|
end_date = human_timestamp_to_datetime(record["interval_end"], to_utc=True)
|
||||||
|
normalized_timespan = record["normalized_timespan"]
|
||||||
|
|
||||||
|
if monthly_indexes:
|
||||||
|
index_date = begin_date.strftime("%Y-%m")
|
||||||
|
else:
|
||||||
|
index_date = begin_date.strftime("%Y-%m-%d")
|
||||||
|
aggregate_report["begin_date"] = begin_date
|
||||||
|
aggregate_report["end_date"] = end_date
|
||||||
|
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
|
||||||
agg_doc = _AggregateReportDoc(
|
agg_doc = _AggregateReportDoc(
|
||||||
xml_schema=aggregate_report["xml_schema"],
|
xml_schema=aggregate_report["xml_schema"],
|
||||||
org_name=metadata["org_name"],
|
org_name=metadata["org_name"],
|
||||||
@@ -457,8 +477,9 @@ def save_aggregate_report_to_opensearch(
|
|||||||
org_extra_contact_info=metadata["org_extra_contact_info"],
|
org_extra_contact_info=metadata["org_extra_contact_info"],
|
||||||
report_id=metadata["report_id"],
|
report_id=metadata["report_id"],
|
||||||
date_range=date_range,
|
date_range=date_range,
|
||||||
date_begin=aggregate_report["begin_date"],
|
date_begin=begin_date,
|
||||||
date_end=aggregate_report["end_date"],
|
date_end=end_date,
|
||||||
|
normalized_timespan=normalized_timespan,
|
||||||
errors=metadata["errors"],
|
errors=metadata["errors"],
|
||||||
published_policy=published_policy,
|
published_policy=published_policy,
|
||||||
source_ip_address=record["source"]["ip_address"],
|
source_ip_address=record["source"]["ip_address"],
|
||||||
@@ -517,18 +538,18 @@ def save_aggregate_report_to_opensearch(
|
|||||||
|
|
||||||
|
|
||||||
def save_forensic_report_to_opensearch(
|
def save_forensic_report_to_opensearch(
|
||||||
forensic_report,
|
forensic_report: dict[str, Any],
|
||||||
index_suffix=None,
|
index_suffix: Optional[str] = None,
|
||||||
index_prefix=None,
|
index_prefix: Optional[str] = None,
|
||||||
monthly_indexes=False,
|
monthly_indexes: Optional[bool] = False,
|
||||||
number_of_shards=1,
|
number_of_shards: int = 1,
|
||||||
number_of_replicas=0,
|
number_of_replicas: int = 0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Saves a parsed DMARC forensic report to OpenSearch
|
Saves a parsed DMARC forensic report to OpenSearch
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
forensic_report (OrderedDict): A parsed forensic report
|
forensic_report (dict): A parsed forensic report
|
||||||
index_suffix (str): The suffix of the name of the index to save to
|
index_suffix (str): The suffix of the name of the index to save to
|
||||||
index_prefix (str): The prefix of the name of the index to save to
|
index_prefix (str): The prefix of the name of the index to save to
|
||||||
monthly_indexes (bool): Use monthly indexes instead of daily
|
monthly_indexes (bool): Use monthly indexes instead of daily
|
||||||
@@ -548,7 +569,7 @@ def save_forensic_report_to_opensearch(
|
|||||||
sample_date = forensic_report["parsed_sample"]["date"]
|
sample_date = forensic_report["parsed_sample"]["date"]
|
||||||
sample_date = human_timestamp_to_datetime(sample_date)
|
sample_date = human_timestamp_to_datetime(sample_date)
|
||||||
original_headers = forensic_report["parsed_sample"]["headers"]
|
original_headers = forensic_report["parsed_sample"]["headers"]
|
||||||
headers = OrderedDict()
|
headers = dict()
|
||||||
for original_header in original_headers:
|
for original_header in original_headers:
|
||||||
headers[original_header.lower()] = original_headers[original_header]
|
headers[original_header.lower()] = original_headers[original_header]
|
||||||
|
|
||||||
@@ -684,18 +705,18 @@ def save_forensic_report_to_opensearch(
|
|||||||
|
|
||||||
|
|
||||||
def save_smtp_tls_report_to_opensearch(
|
def save_smtp_tls_report_to_opensearch(
|
||||||
report,
|
report: dict[str, Any],
|
||||||
index_suffix=None,
|
index_suffix: Optional[str] = None,
|
||||||
index_prefix=None,
|
index_prefix: Optional[str] = None,
|
||||||
monthly_indexes=False,
|
monthly_indexes: Optional[bool] = False,
|
||||||
number_of_shards=1,
|
number_of_shards: Optional[int] = 1,
|
||||||
number_of_replicas=0,
|
number_of_replicas: Optional[int] = 0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Saves a parsed SMTP TLS report to OpenSearch
|
Saves a parsed SMTP TLS report to OpenSearch
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
report (OrderedDict): A parsed SMTP TLS report
|
report (dict): A parsed SMTP TLS report
|
||||||
index_suffix (str): The suffix of the name of the index to save to
|
index_suffix (str): The suffix of the name of the index to save to
|
||||||
index_prefix (str): The prefix of the name of the index to save to
|
index_prefix (str): The prefix of the name of the index to save to
|
||||||
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
monthly_indexes (bool): Use monthly indexes instead of daily indexes
|
||||||
@@ -705,7 +726,7 @@ def save_smtp_tls_report_to_opensearch(
|
|||||||
Raises:
|
Raises:
|
||||||
AlreadySaved
|
AlreadySaved
|
||||||
"""
|
"""
|
||||||
logger.info("Saving aggregate report to OpenSearch")
|
logger.info("Saving SMTP TLS report to OpenSearch")
|
||||||
org_name = report["organization_name"]
|
org_name = report["organization_name"]
|
||||||
report_id = report["report_id"]
|
report_id = report["report_id"]
|
||||||
begin_date = human_timestamp_to_datetime(report["begin_date"], to_utc=True)
|
begin_date = human_timestamp_to_datetime(report["begin_date"], to_utc=True)
|
||||||
@@ -781,7 +802,7 @@ def save_smtp_tls_report_to_opensearch(
|
|||||||
policy_doc = _SMTPTLSPolicyDoc(
|
policy_doc = _SMTPTLSPolicyDoc(
|
||||||
policy_domain=policy["policy_domain"],
|
policy_domain=policy["policy_domain"],
|
||||||
policy_type=policy["policy_type"],
|
policy_type=policy["policy_type"],
|
||||||
succesful_session_count=policy["successful_session_count"],
|
successful_session_count=policy["successful_session_count"],
|
||||||
failed_session_count=policy["failed_session_count"],
|
failed_session_count=policy["failed_session_count"],
|
||||||
policy_string=policy_strings,
|
policy_string=policy_strings,
|
||||||
mx_host_patterns=mx_host_patterns,
|
mx_host_patterns=mx_host_patterns,
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
A mapping is meant to make it easier to identify who or what a sending source is. Please consider contributing
|
A mapping is meant to make it easier to identify who or what a sending source is. Please consider contributing
|
||||||
additional mappings in a GitHub Pull Request.
|
additional mappings in a GitHub Pull Request.
|
||||||
|
|
||||||
|
Do not open these CSV files in Excel. It will replace Unicode characters with question marks. Use LibreOffice Calc instead.
|
||||||
|
|
||||||
## base_reverse_dns_map.csv
|
## base_reverse_dns_map.csv
|
||||||
|
|
||||||
A CSV file with three fields: `base_reverse_dns`, `name`, and `type`.
|
A CSV file with three fields: `base_reverse_dns`, `name`, and `type`.
|
||||||
@@ -25,6 +27,7 @@ The `service_type` is based on the following rule precedence:
|
|||||||
- Agriculture
|
- Agriculture
|
||||||
- Automotive
|
- Automotive
|
||||||
- Beauty
|
- Beauty
|
||||||
|
- Conglomerate
|
||||||
- Construction
|
- Construction
|
||||||
- Consulting
|
- Consulting
|
||||||
- Defense
|
- Defense
|
||||||
@@ -41,6 +44,7 @@ The `service_type` is based on the following rule precedence:
|
|||||||
- IaaS
|
- IaaS
|
||||||
- Industrial
|
- Industrial
|
||||||
- ISP
|
- ISP
|
||||||
|
- Legal
|
||||||
- Logistics
|
- Logistics
|
||||||
- Manufacturing
|
- Manufacturing
|
||||||
- Marketing
|
- Marketing
|
||||||
@@ -50,6 +54,7 @@ The `service_type` is based on the following rule precedence:
|
|||||||
- Nonprofit
|
- Nonprofit
|
||||||
- PaaS
|
- PaaS
|
||||||
- Photography
|
- Photography
|
||||||
|
- Physical Security
|
||||||
- Print
|
- Print
|
||||||
- Publishing
|
- Publishing
|
||||||
- Real Estate
|
- Real Estate
|
||||||
@@ -72,12 +77,16 @@ A list of reverse DNS base domains that could not be identified as belonging to
|
|||||||
|
|
||||||
## base_reverse_dns.csv
|
## base_reverse_dns.csv
|
||||||
|
|
||||||
A CSV with the fields `source_name` and optionally `message_countcount`. This CSV can be generated byy exporting the base DNS data from the Kibana on Splunk dashboards provided by parsedmarc. This file is not tracked by Git.
|
A CSV with the fields `source_name` and optionally `message_count`. This CSV can be generated by exporting the base DNS data from the Kibana or Splunk dashboards provided by parsedmarc. This file is not tracked by Git.
|
||||||
|
|
||||||
## unknown_base_reverse_dns.csv
|
## unknown_base_reverse_dns.csv
|
||||||
|
|
||||||
A CSV file with the fields `source_name` and `message_count`. This file is not tracked by Git.
|
A CSV file with the fields `source_name` and `message_count`. This file is not tracked by Git.
|
||||||
|
|
||||||
|
## find_bad_utf8.py
|
||||||
|
|
||||||
|
Locates invalid UTF-8 bytes in files and optionally tries to current them. Generated by GPT5. Helped me find where I had introduced invalid bytes in `base_reverse_dns_map.csv`.
|
||||||
|
|
||||||
## find_unknown_base_reverse_dns.py
|
## find_unknown_base_reverse_dns.py
|
||||||
|
|
||||||
This is a python script that reads the domains in `base_reverse_dns.csv` and writes the domains that are not in `base_reverse_dns_map.csv` or `known_unknown_base_reverse_dns.txt` to `unknown_base_reverse_dns.csv`. This is useful for identifying potential additional domains to contribute to `base_reverse_dns_map.csv` and `known_unknown_base_reverse_dns.txt`.
|
This is a python script that reads the domains in `base_reverse_dns.csv` and writes the domains that are not in `base_reverse_dns_map.csv` or `known_unknown_base_reverse_dns.txt` to `unknown_base_reverse_dns.csv`. This is useful for identifying potential additional domains to contribute to `base_reverse_dns_map.csv` and `known_unknown_base_reverse_dns.txt`.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
44
parsedmarc/resources/maps/base_reverse_dns_types.txt
Normal file
44
parsedmarc/resources/maps/base_reverse_dns_types.txt
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
Agriculture
|
||||||
|
Automotive
|
||||||
|
Beauty
|
||||||
|
Conglomerate
|
||||||
|
Construction
|
||||||
|
Consulting
|
||||||
|
Defense
|
||||||
|
Education
|
||||||
|
Email Provider
|
||||||
|
Email Security
|
||||||
|
Entertainment
|
||||||
|
Event Planning
|
||||||
|
Finance
|
||||||
|
Food
|
||||||
|
Government
|
||||||
|
Government Media
|
||||||
|
Healthcare
|
||||||
|
ISP
|
||||||
|
IaaS
|
||||||
|
Industrial
|
||||||
|
Legal
|
||||||
|
Logistics
|
||||||
|
MSP
|
||||||
|
MSSP
|
||||||
|
Manufacturing
|
||||||
|
Marketing
|
||||||
|
News
|
||||||
|
Nonprofit
|
||||||
|
PaaS
|
||||||
|
Photography
|
||||||
|
Physical Security
|
||||||
|
Print
|
||||||
|
Publishing
|
||||||
|
Real Estate
|
||||||
|
Retail
|
||||||
|
SaaS
|
||||||
|
Science
|
||||||
|
Search Engine
|
||||||
|
Social Media
|
||||||
|
Sports
|
||||||
|
Staffing
|
||||||
|
Technology
|
||||||
|
Travel
|
||||||
|
Web Host
|
||||||
488
parsedmarc/resources/maps/find_bad_utf8.py
Executable file
488
parsedmarc/resources/maps/find_bad_utf8.py
Executable file
@@ -0,0 +1,488 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import codecs
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import shutil
|
||||||
|
from typing import List, Tuple
|
||||||
|
|
||||||
|
"""
|
||||||
|
Locates and optionally corrects bad UTF-8 bytes in a file.
|
||||||
|
Generated by GPT-5 Use at your own risk.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# UTF-8 scanning
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def scan_line_for_utf8_errors(
|
||||||
|
line_bytes: bytes, line_no: int, base_offset: int, context: int
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Scan one line of raw bytes for UTF-8 decoding errors.
|
||||||
|
Returns a list of dicts describing each error.
|
||||||
|
"""
|
||||||
|
pos = 0
|
||||||
|
results = []
|
||||||
|
while pos < len(line_bytes):
|
||||||
|
dec = codecs.getincrementaldecoder("utf-8")("strict")
|
||||||
|
try:
|
||||||
|
dec.decode(line_bytes[pos:], final=True)
|
||||||
|
break
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
rel_index = e.start
|
||||||
|
abs_index_in_line = pos + rel_index
|
||||||
|
abs_offset = base_offset + abs_index_in_line
|
||||||
|
|
||||||
|
start_ctx = max(0, abs_index_in_line - context)
|
||||||
|
end_ctx = min(len(line_bytes), abs_index_in_line + 1 + context)
|
||||||
|
ctx_bytes = line_bytes[start_ctx:end_ctx]
|
||||||
|
bad_byte = line_bytes[abs_index_in_line : abs_index_in_line + 1]
|
||||||
|
col = abs_index_in_line + 1 # 1-based byte column
|
||||||
|
|
||||||
|
results.append(
|
||||||
|
{
|
||||||
|
"line": line_no,
|
||||||
|
"column": col,
|
||||||
|
"abs_offset": abs_offset,
|
||||||
|
"bad_byte_hex": bad_byte.hex(),
|
||||||
|
"context_hex": ctx_bytes.hex(),
|
||||||
|
"context_preview": ctx_bytes.decode("utf-8", errors="replace"),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
# Move past the offending byte and continue
|
||||||
|
pos = abs_index_in_line + 1
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def scan_file_for_utf8_errors(path: str, context: int, limit: int):
|
||||||
|
errors_found = 0
|
||||||
|
limit_val = limit if limit != 0 else float("inf")
|
||||||
|
|
||||||
|
with open(path, "rb") as f:
|
||||||
|
total_offset = 0
|
||||||
|
line_no = 0
|
||||||
|
while True:
|
||||||
|
line = f.readline()
|
||||||
|
if not line:
|
||||||
|
break
|
||||||
|
line_no += 1
|
||||||
|
results = scan_line_for_utf8_errors(line, line_no, total_offset, context)
|
||||||
|
for r in results:
|
||||||
|
errors_found += 1
|
||||||
|
print(
|
||||||
|
f"[ERROR {errors_found}] Line {r['line']}, Column {r['column']}, "
|
||||||
|
f"Absolute byte offset {r['abs_offset']}"
|
||||||
|
)
|
||||||
|
print(f" Bad byte: 0x{r['bad_byte_hex']}")
|
||||||
|
print(f" Context (hex): {r['context_hex']}")
|
||||||
|
print(f" Context (preview): {r['context_preview']}")
|
||||||
|
print()
|
||||||
|
if errors_found >= limit_val:
|
||||||
|
print(f"Reached limit of {limit} errors. Stopping.")
|
||||||
|
return errors_found
|
||||||
|
total_offset += len(line)
|
||||||
|
|
||||||
|
if errors_found == 0:
|
||||||
|
print("No invalid UTF-8 bytes found. 🎉")
|
||||||
|
else:
|
||||||
|
print(f"Found {errors_found} invalid UTF-8 byte(s).")
|
||||||
|
return errors_found
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Whole-file conversion
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def detect_encoding_text(path: str) -> Tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Use charset-normalizer to detect file encoding.
|
||||||
|
Return (encoding_name, decoded_text). Falls back to cp1252 if needed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from charset_normalizer import from_path
|
||||||
|
except ImportError:
|
||||||
|
print(
|
||||||
|
"Please install charset-normalizer: pip install charset-normalizer",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
sys.exit(4)
|
||||||
|
|
||||||
|
matches = from_path(path)
|
||||||
|
match = matches.best()
|
||||||
|
if match is None or match.encoding is None:
|
||||||
|
# Fallback heuristic for Western single-byte text
|
||||||
|
with open(path, "rb") as fb:
|
||||||
|
data = fb.read()
|
||||||
|
try:
|
||||||
|
return "cp1252", data.decode("cp1252", errors="strict")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
print("Unable to detect encoding reliably.", file=sys.stderr)
|
||||||
|
sys.exit(5)
|
||||||
|
|
||||||
|
return match.encoding, str(match)
|
||||||
|
|
||||||
|
|
||||||
|
def convert_to_utf8(src_path: str, out_path: str, src_encoding: str = None) -> str:
|
||||||
|
"""
|
||||||
|
Convert an entire file to UTF-8 (re-decoding everything).
|
||||||
|
If src_encoding is provided, use it; else auto-detect.
|
||||||
|
Returns the encoding actually used.
|
||||||
|
"""
|
||||||
|
if src_encoding:
|
||||||
|
with open(src_path, "rb") as fb:
|
||||||
|
data = fb.read()
|
||||||
|
try:
|
||||||
|
text = data.decode(src_encoding, errors="strict")
|
||||||
|
except LookupError:
|
||||||
|
print(f"Unknown encoding: {src_encoding}", file=sys.stderr)
|
||||||
|
sys.exit(6)
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
print(f"Decoding failed with {src_encoding}: {e}", file=sys.stderr)
|
||||||
|
sys.exit(7)
|
||||||
|
used = src_encoding
|
||||||
|
else:
|
||||||
|
used, text = detect_encoding_text(src_path)
|
||||||
|
|
||||||
|
with open(out_path, "w", encoding="utf-8", newline="") as fw:
|
||||||
|
fw.write(text)
|
||||||
|
return used
|
||||||
|
|
||||||
|
|
||||||
|
def verify_utf8_file(path: str) -> Tuple[bool, str]:
|
||||||
|
try:
|
||||||
|
with open(path, "rb") as fb:
|
||||||
|
fb.read().decode("utf-8", errors="strict")
|
||||||
|
return True, ""
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
return False, str(e)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Targeted single-byte fixer
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def iter_lines_with_offsets(b: bytes):
|
||||||
|
"""
|
||||||
|
Yield (line_bytes, line_start_abs_offset). Preserves LF/CRLF/CR in bytes.
|
||||||
|
"""
|
||||||
|
start = 0
|
||||||
|
for i, byte in enumerate(b):
|
||||||
|
if byte == 0x0A: # LF
|
||||||
|
yield b[start : i + 1], start
|
||||||
|
start = i + 1
|
||||||
|
if start < len(b):
|
||||||
|
yield b[start:], start
|
||||||
|
|
||||||
|
|
||||||
|
def detect_probable_fallbacks() -> List[str]:
|
||||||
|
# Good defaults for Western/Portuguese text
|
||||||
|
return ["cp1252", "iso-8859-1", "iso-8859-15"]
|
||||||
|
|
||||||
|
|
||||||
|
def repair_mixed_utf8_line(line: bytes, base_offset: int, fallback_chain: List[str]):
|
||||||
|
"""
|
||||||
|
Strictly validate UTF-8 and fix *only* the exact offending byte when an error occurs.
|
||||||
|
This avoids touching adjacent valid UTF-8 (prevents mojibake like 'é').
|
||||||
|
"""
|
||||||
|
out_fragments: List[str] = []
|
||||||
|
fixes = []
|
||||||
|
pos = 0
|
||||||
|
n = len(line)
|
||||||
|
|
||||||
|
while pos < n:
|
||||||
|
dec = codecs.getincrementaldecoder("utf-8")("strict")
|
||||||
|
try:
|
||||||
|
s = dec.decode(line[pos:], final=True)
|
||||||
|
out_fragments.append(s)
|
||||||
|
break
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
# Append the valid prefix before the error
|
||||||
|
if e.start > 0:
|
||||||
|
out_fragments.append(
|
||||||
|
line[pos : pos + e.start].decode("utf-8", errors="strict")
|
||||||
|
)
|
||||||
|
|
||||||
|
bad_index = pos + e.start # absolute index in 'line'
|
||||||
|
bad_slice = line[bad_index : bad_index + 1] # FIX EXACTLY ONE BYTE
|
||||||
|
|
||||||
|
# Decode that single byte using the first working fallback
|
||||||
|
decoded = None
|
||||||
|
used_enc = None
|
||||||
|
for enc in fallback_chain:
|
||||||
|
try:
|
||||||
|
decoded = bad_slice.decode(enc, errors="strict")
|
||||||
|
used_enc = enc
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
if decoded is None:
|
||||||
|
# latin-1 always succeeds (byte->same code point)
|
||||||
|
decoded = bad_slice.decode("latin-1")
|
||||||
|
used_enc = "latin-1 (fallback)"
|
||||||
|
|
||||||
|
out_fragments.append(decoded)
|
||||||
|
|
||||||
|
# Log the fix
|
||||||
|
col_1based = bad_index + 1 # byte-based column
|
||||||
|
fixes.append(
|
||||||
|
{
|
||||||
|
"line_base_offset": base_offset,
|
||||||
|
"line": None, # caller fills line number
|
||||||
|
"column": col_1based,
|
||||||
|
"abs_offset": base_offset + bad_index,
|
||||||
|
"bad_bytes_hex": bad_slice.hex(),
|
||||||
|
"used_encoding": used_enc,
|
||||||
|
"replacement_preview": decoded,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Advance exactly one byte past the offending byte and continue
|
||||||
|
pos = bad_index + 1
|
||||||
|
|
||||||
|
return "".join(out_fragments), fixes
|
||||||
|
|
||||||
|
|
||||||
|
def targeted_fix_to_utf8(
|
||||||
|
src_path: str,
|
||||||
|
out_path: str,
|
||||||
|
fallback_chain: List[str],
|
||||||
|
dry_run: bool,
|
||||||
|
max_fixes: int,
|
||||||
|
):
|
||||||
|
with open(src_path, "rb") as fb:
|
||||||
|
data = fb.read()
|
||||||
|
|
||||||
|
total_fixes = 0
|
||||||
|
repaired_lines: List[str] = []
|
||||||
|
line_no = 0
|
||||||
|
max_val = max_fixes if max_fixes != 0 else float("inf")
|
||||||
|
|
||||||
|
for line_bytes, base_offset in iter_lines_with_offsets(data):
|
||||||
|
line_no += 1
|
||||||
|
# Fast path: keep lines that are already valid UTF-8
|
||||||
|
try:
|
||||||
|
repaired_lines.append(line_bytes.decode("utf-8", errors="strict"))
|
||||||
|
continue
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
fixed_text, fixes = repair_mixed_utf8_line(
|
||||||
|
line_bytes, base_offset, fallback_chain=fallback_chain
|
||||||
|
)
|
||||||
|
for f in fixes:
|
||||||
|
f["line"] = line_no
|
||||||
|
|
||||||
|
repaired_lines.append(fixed_text)
|
||||||
|
|
||||||
|
# Log fixes
|
||||||
|
for f in fixes:
|
||||||
|
total_fixes += 1
|
||||||
|
print(
|
||||||
|
f"[FIX {total_fixes}] Line {f['line']}, Column {f['column']}, Abs offset {f['abs_offset']}"
|
||||||
|
)
|
||||||
|
print(f" Bad bytes: 0x{f['bad_bytes_hex']}")
|
||||||
|
print(f" Used encoding: {f['used_encoding']}")
|
||||||
|
preview = f["replacement_preview"].replace("\r", "\\r").replace("\n", "\\n")
|
||||||
|
if len(preview) > 40:
|
||||||
|
preview = preview[:40] + "…"
|
||||||
|
print(f" Replacement preview: {preview}")
|
||||||
|
print()
|
||||||
|
if total_fixes >= max_val:
|
||||||
|
print(f"Reached max fixes limit ({max_fixes}). Stopping scan.")
|
||||||
|
break
|
||||||
|
if total_fixes >= max_val:
|
||||||
|
break
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(f"Dry run complete. Detected {total_fixes} fix(es). No file written.")
|
||||||
|
return total_fixes
|
||||||
|
|
||||||
|
# Join and verify result can be encoded to UTF-8
|
||||||
|
repaired_text = "".join(repaired_lines)
|
||||||
|
try:
|
||||||
|
repaired_text.encode("utf-8", errors="strict")
|
||||||
|
except UnicodeEncodeError as e:
|
||||||
|
print(f"Internal error: repaired text not valid UTF-8: {e}", file=sys.stderr)
|
||||||
|
sys.exit(3)
|
||||||
|
|
||||||
|
with open(out_path, "w", encoding="utf-8", newline="") as fw:
|
||||||
|
fw.write(repaired_text)
|
||||||
|
|
||||||
|
print(f"Fixed file written to: {out_path}")
|
||||||
|
print(f"Total fixes applied: {total_fixes}")
|
||||||
|
return total_fixes
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# CLI
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
ap = argparse.ArgumentParser(
|
||||||
|
description=(
|
||||||
|
"Scan for invalid UTF-8; optionally convert whole file or fix only invalid bytes.\n\n"
|
||||||
|
"By default, --convert and --fix **edit the input file in place** and create a backup "
|
||||||
|
"named '<input>.bak' before writing. If you pass --output, the original file is left "
|
||||||
|
"unchanged and no backup is created. Use --dry-run to preview fixes without writing."
|
||||||
|
),
|
||||||
|
formatter_class=argparse.RawTextHelpFormatter,
|
||||||
|
)
|
||||||
|
ap.add_argument("path", help="Path to the CSV/text file")
|
||||||
|
ap.add_argument(
|
||||||
|
"--context",
|
||||||
|
type=int,
|
||||||
|
default=20,
|
||||||
|
help="Bytes of context to show around errors (default: 20)",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--limit",
|
||||||
|
type=int,
|
||||||
|
default=100,
|
||||||
|
help="Max errors to report during scan (0 = unlimited)",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--skip-scan", action="store_true", help="Skip initial scan for speed"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Whole-file convert
|
||||||
|
ap.add_argument(
|
||||||
|
"--convert",
|
||||||
|
action="store_true",
|
||||||
|
help="Convert entire file to UTF-8 using auto/forced encoding "
|
||||||
|
"(in-place by default; creates '<input>.bak').",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--encoding",
|
||||||
|
help="Force source encoding for --convert or first fallback for --fix",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--output",
|
||||||
|
help="Write to this path instead of in-place (no .bak is created in that case)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Targeted fix
|
||||||
|
ap.add_argument(
|
||||||
|
"--fix",
|
||||||
|
action="store_true",
|
||||||
|
help="Fix only invalid byte(s) via fallback encodings "
|
||||||
|
"(in-place by default; creates '<input>.bak').",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--fallbacks",
|
||||||
|
help="Comma-separated fallback encodings (default: cp1252,iso-8859-1,iso-8859-15)",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--dry-run",
|
||||||
|
action="store_true",
|
||||||
|
help="(fix) Print fixes but do not write or create a .bak",
|
||||||
|
)
|
||||||
|
ap.add_argument(
|
||||||
|
"--max-fixes",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="(fix) Stop after N fixes (0 = unlimited)",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = ap.parse_args()
|
||||||
|
path = args.path
|
||||||
|
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
print(f"File not found: {path}", file=sys.stderr)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
# Optional scan first
|
||||||
|
if not args.skip_scan:
|
||||||
|
scan_file_for_utf8_errors(path, context=args.context, limit=args.limit)
|
||||||
|
|
||||||
|
# Mode selection guards
|
||||||
|
if args.convert and args.fix:
|
||||||
|
print("Choose either --convert or --fix (not both).", file=sys.stderr)
|
||||||
|
sys.exit(9)
|
||||||
|
if not args.convert and not args.fix and args.skip_scan:
|
||||||
|
print("No action selected (use --convert or --fix).")
|
||||||
|
return
|
||||||
|
if not args.convert and not args.fix:
|
||||||
|
# User only wanted a scan
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine output path and backup behavior
|
||||||
|
# In-place by default: create '<input>.bak' before overwriting.
|
||||||
|
if args.output:
|
||||||
|
out_path = args.output
|
||||||
|
in_place = False
|
||||||
|
else:
|
||||||
|
out_path = path
|
||||||
|
in_place = True
|
||||||
|
|
||||||
|
# CONVERT mode
|
||||||
|
if args.convert:
|
||||||
|
print("\n[CONVERT MODE] Converting file to UTF-8...")
|
||||||
|
if in_place:
|
||||||
|
# Create backup before overwriting original
|
||||||
|
backup_path = path + ".bak"
|
||||||
|
shutil.copy2(path, backup_path)
|
||||||
|
print(f"Backup created: {backup_path}")
|
||||||
|
used = convert_to_utf8(path, out_path, src_encoding=args.encoding)
|
||||||
|
print(f"Source encoding used: {used}")
|
||||||
|
print(f"Saved UTF-8 file as: {out_path}")
|
||||||
|
ok, err = verify_utf8_file(out_path)
|
||||||
|
if ok:
|
||||||
|
print("Verification: output is valid UTF-8 ✅")
|
||||||
|
else:
|
||||||
|
print(f"Verification failed: {err}")
|
||||||
|
sys.exit(8)
|
||||||
|
return
|
||||||
|
|
||||||
|
# FIX mode (targeted, single-byte)
|
||||||
|
if args.fix:
|
||||||
|
print("\n[FIX MODE] Fixing only invalid bytes to UTF-8...")
|
||||||
|
if args.dry_run:
|
||||||
|
# Dry-run: never write or create backup
|
||||||
|
out_path_effective = os.devnull
|
||||||
|
in_place_effective = False
|
||||||
|
else:
|
||||||
|
out_path_effective = out_path
|
||||||
|
in_place_effective = in_place
|
||||||
|
|
||||||
|
# Build fallback chain (if --encoding provided, try it first)
|
||||||
|
if args.fallbacks:
|
||||||
|
fallback_chain = [e.strip() for e in args.fallbacks.split(",") if e.strip()]
|
||||||
|
else:
|
||||||
|
fallback_chain = detect_probable_fallbacks()
|
||||||
|
if args.encoding and args.encoding not in fallback_chain:
|
||||||
|
fallback_chain = [args.encoding] + fallback_chain
|
||||||
|
|
||||||
|
if in_place_effective:
|
||||||
|
# Create backup before overwriting original (only when actually writing)
|
||||||
|
backup_path = path + ".bak"
|
||||||
|
shutil.copy2(path, backup_path)
|
||||||
|
print(f"Backup created: {backup_path}")
|
||||||
|
|
||||||
|
fix_count = targeted_fix_to_utf8(
|
||||||
|
path,
|
||||||
|
out_path_effective,
|
||||||
|
fallback_chain=fallback_chain,
|
||||||
|
dry_run=args.dry_run,
|
||||||
|
max_fixes=args.max_fixes,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not args.dry_run:
|
||||||
|
ok, err = verify_utf8_file(out_path_effective)
|
||||||
|
if ok:
|
||||||
|
print("Verification: output is valid UTF-8 ✅")
|
||||||
|
print(f"Fix mode completed — {fix_count} byte(s) corrected.")
|
||||||
|
else:
|
||||||
|
print(f"Verification failed: {err}")
|
||||||
|
sys.exit(8)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import csv
|
import csv
|
||||||
|
|
||||||
@@ -9,60 +8,66 @@ def _main():
|
|||||||
input_csv_file_path = "base_reverse_dns.csv"
|
input_csv_file_path = "base_reverse_dns.csv"
|
||||||
base_reverse_dns_map_file_path = "base_reverse_dns_map.csv"
|
base_reverse_dns_map_file_path = "base_reverse_dns_map.csv"
|
||||||
known_unknown_list_file_path = "known_unknown_base_reverse_dns.txt"
|
known_unknown_list_file_path = "known_unknown_base_reverse_dns.txt"
|
||||||
|
psl_overrides_file_path = "psl_overrides.txt"
|
||||||
output_csv_file_path = "unknown_base_reverse_dns.csv"
|
output_csv_file_path = "unknown_base_reverse_dns.csv"
|
||||||
|
|
||||||
csv_headers = ["source_name", "message_count"]
|
csv_headers = ["source_name", "message_count"]
|
||||||
|
|
||||||
|
known_unknown_domains = []
|
||||||
|
psl_overrides = []
|
||||||
|
known_domains = []
|
||||||
output_rows = []
|
output_rows = []
|
||||||
|
|
||||||
logging.basicConfig()
|
def load_list(file_path, list_var):
|
||||||
logger = logging.getLogger(__name__)
|
if not os.path.exists(file_path):
|
||||||
logger.setLevel(logging.INFO)
|
print(f"Error: {file_path} does not exist")
|
||||||
|
print(f"Loading {file_path}")
|
||||||
|
with open(file_path) as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
domain = line.lower().strip()
|
||||||
|
if domain in list_var:
|
||||||
|
print(f"Error: {domain} is in {file_path} multiple times")
|
||||||
|
exit(1)
|
||||||
|
elif domain != "":
|
||||||
|
list_var.append(domain)
|
||||||
|
|
||||||
for p in [
|
load_list(known_unknown_list_file_path, known_unknown_domains)
|
||||||
input_csv_file_path,
|
load_list(psl_overrides_file_path, psl_overrides)
|
||||||
base_reverse_dns_map_file_path,
|
if not os.path.exists(base_reverse_dns_map_file_path):
|
||||||
known_unknown_list_file_path,
|
print(f"Error: {base_reverse_dns_map_file_path} does not exist")
|
||||||
]:
|
print(f"Loading {base_reverse_dns_map_file_path}")
|
||||||
if not os.path.exists(p):
|
|
||||||
logger.error(f"{p} does not exist")
|
|
||||||
exit(1)
|
|
||||||
logger.info(f"Loading {known_unknown_list_file_path}")
|
|
||||||
known_unknown_domains = []
|
|
||||||
with open(known_unknown_list_file_path) as f:
|
|
||||||
for line in f.readlines():
|
|
||||||
domain = line.lower().strip()
|
|
||||||
if domain in known_unknown_domains:
|
|
||||||
logger.warning(
|
|
||||||
f"{domain} is in {known_unknown_list_file_path} multiple times"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
known_unknown_domains.append(domain)
|
|
||||||
logger.info(f"Loading {base_reverse_dns_map_file_path}")
|
|
||||||
known_domains = []
|
|
||||||
with open(base_reverse_dns_map_file_path) as f:
|
with open(base_reverse_dns_map_file_path) as f:
|
||||||
for row in csv.DictReader(f):
|
for row in csv.DictReader(f):
|
||||||
domain = row["base_reverse_dns"].lower().strip()
|
domain = row["base_reverse_dns"].lower().strip()
|
||||||
if domain in known_domains:
|
if domain in known_domains:
|
||||||
logger.warning(
|
print(
|
||||||
f"{domain} is in {base_reverse_dns_map_file_path} multiple times"
|
f"Error: {domain} is in {base_reverse_dns_map_file_path} multiple times"
|
||||||
)
|
)
|
||||||
|
exit()
|
||||||
else:
|
else:
|
||||||
known_domains.append(domain)
|
known_domains.append(domain)
|
||||||
if domain in known_unknown_domains and known_domains:
|
if domain in known_unknown_domains and known_domains:
|
||||||
pass
|
print(
|
||||||
logger.warning(
|
f"Error:{domain} is in {known_unknown_list_file_path} and \
|
||||||
f"{domain} is in {known_unknown_list_file_path} and {base_reverse_dns_map_file_path}"
|
{base_reverse_dns_map_file_path}"
|
||||||
)
|
)
|
||||||
|
exit(1)
|
||||||
logger.info(f"Checking domains against {base_reverse_dns_map_file_path}")
|
if not os.path.exists(input_csv_file_path):
|
||||||
|
print(f"Error: {base_reverse_dns_map_file_path} does not exist")
|
||||||
|
exit(1)
|
||||||
with open(input_csv_file_path) as f:
|
with open(input_csv_file_path) as f:
|
||||||
for row in csv.DictReader(f):
|
for row in csv.DictReader(f):
|
||||||
domain = row["source_name"].lower().strip()
|
domain = row["source_name"].lower().strip()
|
||||||
|
if domain == "":
|
||||||
|
continue
|
||||||
|
for psl_domain in psl_overrides:
|
||||||
|
if domain.endswith(psl_domain):
|
||||||
|
domain = psl_domain.strip(".").strip("-")
|
||||||
|
break
|
||||||
if domain not in known_domains and domain not in known_unknown_domains:
|
if domain not in known_domains and domain not in known_unknown_domains:
|
||||||
logger.info(f"New unknown domain found: {domain}")
|
print(f"New unknown domain found: {domain}")
|
||||||
output_rows.append(row)
|
output_rows.append(row)
|
||||||
logger.info(f"Writing {output_csv_file_path}")
|
print(f"Writing {output_csv_file_path}")
|
||||||
with open(output_csv_file_path, "w") as f:
|
with open(output_csv_file_path, "w") as f:
|
||||||
writer = csv.DictWriter(f, fieldnames=csv_headers)
|
writer = csv.DictWriter(f, fieldnames=csv_headers)
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
|
|||||||
@@ -1,125 +1,601 @@
|
|||||||
200.in-addr.arpa
|
1jli.site
|
||||||
|
26.107
|
||||||
|
444qcuhilla.com
|
||||||
|
4xr1.com
|
||||||
|
9services.com
|
||||||
|
a7e.ru
|
||||||
|
a94434500-blog.com
|
||||||
|
aams8.jp
|
||||||
|
abv-10.top
|
||||||
|
acemail.co.in
|
||||||
|
activaicon.com
|
||||||
|
adcritic.net
|
||||||
adlucrumnewsletter.com
|
adlucrumnewsletter.com
|
||||||
admin.corpivensa.gob.ve
|
admin.corpivensa.gob.ve
|
||||||
|
advantageiq.com
|
||||||
|
advrider.ro
|
||||||
aerospacevitro.us.com
|
aerospacevitro.us.com
|
||||||
|
agenturserver.de
|
||||||
|
aghories.com
|
||||||
|
ai270.net
|
||||||
albagroup-eg.com
|
albagroup-eg.com
|
||||||
|
alchemy.net
|
||||||
|
alohabeachcamp.net
|
||||||
|
alsiscad.com
|
||||||
|
aluminumpipetubing.com
|
||||||
|
americanstorageca.com
|
||||||
|
amplusserver.info
|
||||||
|
anchorfundhub.com
|
||||||
|
anglishment.com
|
||||||
anteldata.net.uy
|
anteldata.net.uy
|
||||||
|
antis.edu
|
||||||
antonaoll.com
|
antonaoll.com
|
||||||
|
anviklass.org
|
||||||
|
anwrgrp.lat
|
||||||
aosau.net
|
aosau.net
|
||||||
arandomserver.com
|
arandomserver.com
|
||||||
|
aransk.ru
|
||||||
|
ardcs.cn
|
||||||
|
armninl.met
|
||||||
|
as29550.net
|
||||||
|
asahachimaru.com
|
||||||
|
aserv.co.za
|
||||||
asmecam.it
|
asmecam.it
|
||||||
|
ateky.net.br
|
||||||
|
aurelienvos.com
|
||||||
|
automatech.lat
|
||||||
|
avistaadvantage.com
|
||||||
b8sales.com
|
b8sales.com
|
||||||
|
bahjs.com
|
||||||
|
baliaura.com
|
||||||
|
banaras.co
|
||||||
|
bearandbullmarketnews.com
|
||||||
bestinvestingtime.com
|
bestinvestingtime.com
|
||||||
|
bhjui.com
|
||||||
biocorp.com
|
biocorp.com
|
||||||
bisno1.co.jp
|
biosophy.net
|
||||||
|
bitter-echo.com
|
||||||
|
bizhostingservices.com
|
||||||
|
blguss.com
|
||||||
|
bluenet.ch
|
||||||
bluhosting.com
|
bluhosting.com
|
||||||
|
bnasg.com
|
||||||
bodiax.pp.ua
|
bodiax.pp.ua
|
||||||
bost-law.com
|
bost-law.com
|
||||||
|
brainity.com
|
||||||
|
brazalnde.net
|
||||||
|
brellatransplc.shop
|
||||||
brnonet.cz
|
brnonet.cz
|
||||||
|
broadwaycover.com
|
||||||
brushinglegal.de
|
brushinglegal.de
|
||||||
|
brw.net
|
||||||
|
btes.tv
|
||||||
|
budgeteasehub.com
|
||||||
|
buoytoys.com
|
||||||
|
buyjapanese.jp
|
||||||
|
c53dw7m24rj.com
|
||||||
|
cahtelrandom.org
|
||||||
|
casadelmarsamara.com
|
||||||
|
cashflowmasterypro.com
|
||||||
|
cavabeen.com
|
||||||
|
cbti.net
|
||||||
|
centralmalaysia.com
|
||||||
|
chauffeurplan.co.uk
|
||||||
|
checkpox.fun
|
||||||
|
chegouseuvlache.org
|
||||||
|
chinaxingyu.xyz
|
||||||
christus.mx
|
christus.mx
|
||||||
|
churchills.market
|
||||||
|
ci-xyz.fit
|
||||||
|
cisumrecords.com
|
||||||
|
ckaik.cn
|
||||||
|
clcktoact.com
|
||||||
|
cli-eurosignal.cz
|
||||||
|
cloud-admin.it
|
||||||
cloud-edm.com
|
cloud-edm.com
|
||||||
|
cloudflare-email.org
|
||||||
|
cloudhosting.rs
|
||||||
cloudlogin.co
|
cloudlogin.co
|
||||||
|
cloudplatformpro.com
|
||||||
cnode.io
|
cnode.io
|
||||||
|
cntcloud.com
|
||||||
|
code-it.net
|
||||||
|
codefriend.top
|
||||||
|
colombiaceropapel.org
|
||||||
commerceinsurance.com
|
commerceinsurance.com
|
||||||
|
comsharempc.com
|
||||||
|
conexiona.com
|
||||||
coolblaze.com
|
coolblaze.com
|
||||||
|
coowo.com
|
||||||
|
corpemail.net
|
||||||
|
cp2-myorderbox.com
|
||||||
cps.com.ar
|
cps.com.ar
|
||||||
|
crnagora.net
|
||||||
|
cross-d-bar-troutranch.com
|
||||||
|
ctla.co.kr
|
||||||
|
cumbalikonakhotel.com
|
||||||
|
currencyexconverter.com
|
||||||
|
daakbabu.com
|
||||||
|
daikinmae.com
|
||||||
|
dairyvalley.com.my
|
||||||
|
dastans.ru
|
||||||
|
datahost36.de
|
||||||
|
ddii.network
|
||||||
|
deep-sek.shop
|
||||||
|
deetownsounds.com
|
||||||
|
descarca-counter-strike.net
|
||||||
detrot.xyz
|
detrot.xyz
|
||||||
|
dettlaffinc.com
|
||||||
|
dextoolse.net
|
||||||
|
digestivedaily.com
|
||||||
digi.net.my
|
digi.net.my
|
||||||
|
dinofelis.cn
|
||||||
|
diwkyncbi.top
|
||||||
dkginternet.com
|
dkginternet.com
|
||||||
|
dnexpress.info
|
||||||
|
dns-oid.com
|
||||||
|
dnsindia.net
|
||||||
|
domainserver.ne.jp
|
||||||
|
domconfig.com
|
||||||
doorsrv.com
|
doorsrv.com
|
||||||
|
dreampox.fun
|
||||||
dreamtechmedia.com
|
dreamtechmedia.com
|
||||||
ds.network
|
ds.network
|
||||||
|
dss-group.net
|
||||||
|
dvj.theworkpc.com
|
||||||
|
dwlcka.com
|
||||||
|
dynamic-wiretel.in
|
||||||
|
dyntcorp.com
|
||||||
|
easternkingspei.com
|
||||||
|
economiceagles.com
|
||||||
|
egosimail.com
|
||||||
|
eliotporterphotos.us
|
||||||
|
emailgids.net
|
||||||
emailperegrine.com
|
emailperegrine.com
|
||||||
|
entendercopilot.com
|
||||||
|
entretothom.net
|
||||||
|
epaycontrol.com
|
||||||
|
epicinvestmentsreview.co
|
||||||
|
epicinvestmentsreview.com
|
||||||
|
epik.com
|
||||||
epsilon-group.com
|
epsilon-group.com
|
||||||
|
erestaff.com
|
||||||
|
euro-trade-gmbh.com
|
||||||
|
example.com
|
||||||
|
exposervers.com-new
|
||||||
|
extendcp.co.uk
|
||||||
eyecandyhosting.xyz
|
eyecandyhosting.xyz
|
||||||
|
fastwebnet.it
|
||||||
|
fd9ing7wfn.com
|
||||||
|
feipnghardware.com
|
||||||
fetscorp.shop
|
fetscorp.shop
|
||||||
|
fewo-usedom.net
|
||||||
|
fin-crime.com
|
||||||
|
financeaimpoint.com
|
||||||
|
financeupward.com
|
||||||
|
firmflat.com
|
||||||
|
flex-video.bnr.la
|
||||||
|
flourishfusionlife.com
|
||||||
formicidaehunt.net
|
formicidaehunt.net
|
||||||
fosterheap.com
|
fosterheap.com
|
||||||
|
fredi.shop
|
||||||
|
frontiernet.net
|
||||||
|
ftifb7tk3c.com
|
||||||
|
gamersprotectionvpn.online
|
||||||
gendns.com
|
gendns.com
|
||||||
|
getgreencardsfast.com
|
||||||
|
getthatroi.com
|
||||||
|
gibbshosting.com
|
||||||
|
gigidea.net
|
||||||
|
giize.com
|
||||||
ginous.eu.com
|
ginous.eu.com
|
||||||
|
gis.net
|
||||||
gist-th.com
|
gist-th.com
|
||||||
|
globalglennpartners.com
|
||||||
|
goldsboroughplace.com
|
||||||
gophermedia.com
|
gophermedia.com
|
||||||
gqlists.us.com
|
gqlists.us.com
|
||||||
gratzl.de
|
gratzl.de
|
||||||
|
greatestworldnews.com
|
||||||
|
greennutritioncare.com
|
||||||
|
gsbb.com
|
||||||
|
gumbolimbo.net
|
||||||
|
h-serv.co.uk
|
||||||
|
haedefpartners.com
|
||||||
|
halcyon-aboveboard.com
|
||||||
|
hanzubon.org
|
||||||
|
healthfuljourneyjoy.com
|
||||||
hgnbroken.us.com
|
hgnbroken.us.com
|
||||||
|
highwey-diesel.com
|
||||||
|
hirofactory.com
|
||||||
|
hjd.asso.fr
|
||||||
|
hongchenggco.pro
|
||||||
|
hongkongtaxi.co
|
||||||
|
hopsinthehanger.com
|
||||||
|
hosted-by-worldstream.net
|
||||||
|
hostelsucre.com
|
||||||
hosting1337.com
|
hosting1337.com
|
||||||
|
hostinghane.com
|
||||||
|
hostinglotus.cloud
|
||||||
hostingmichigan.com
|
hostingmichigan.com
|
||||||
|
hostiran.name
|
||||||
|
hostmnl.com
|
||||||
hostname.localhost
|
hostname.localhost
|
||||||
hostnetwork.com
|
hostnetwork.com
|
||||||
|
hosts.net.nz
|
||||||
|
hostserv.eu
|
||||||
hostwhitelabel.com
|
hostwhitelabel.com
|
||||||
|
hpms1.jp
|
||||||
|
hunariojmk.net
|
||||||
|
hunriokinmuim.net
|
||||||
|
hypericine.com
|
||||||
|
i-mecca.net
|
||||||
|
iaasdns.com
|
||||||
|
iam.net.ma
|
||||||
|
iconmarketingguy.com
|
||||||
idcfcloud.net
|
idcfcloud.net
|
||||||
|
idealconcept.live
|
||||||
|
igmohji.com
|
||||||
|
igppevents.org.uk
|
||||||
|
ihglobaldns.com
|
||||||
|
ilmessicano.com
|
||||||
|
imjtmn.cn
|
||||||
immenzaces.com
|
immenzaces.com
|
||||||
|
in-addr-arpa
|
||||||
|
in-addr.arpa
|
||||||
|
indsalelimited.com
|
||||||
|
indulgent-holistic.com
|
||||||
|
industechint.org
|
||||||
|
inshaaegypt.com
|
||||||
|
intal.uz
|
||||||
|
interfarma.kz
|
||||||
|
intocpanel.com
|
||||||
|
ip-147-135-108.us
|
||||||
|
ip-178-33-109.eu
|
||||||
|
ip-ptr.tech
|
||||||
|
iswhatpercent.com
|
||||||
|
itsidc.com
|
||||||
|
itwebs.com
|
||||||
|
iuon.net
|
||||||
ivol.co
|
ivol.co
|
||||||
jalanet.co.id
|
jalanet.co.id
|
||||||
|
jimishare.com
|
||||||
|
jlccptt.net.cn
|
||||||
|
jlenterprises.co.uk
|
||||||
|
jmontalto.com
|
||||||
|
joyomokei.com
|
||||||
|
jumanra.org
|
||||||
|
justlongshirts.com
|
||||||
kahlaa.com
|
kahlaa.com
|
||||||
|
kaw.theworkpc.com
|
||||||
kbronet.com.tw
|
kbronet.com.tw
|
||||||
kdnursing.org
|
kdnursing.org
|
||||||
|
kielnet.net
|
||||||
|
kihy.theworkpc.com
|
||||||
|
kingschurchwirral.org
|
||||||
kitchenaildbd.com
|
kitchenaildbd.com
|
||||||
|
klaomi.shop
|
||||||
|
knkconsult.net
|
||||||
|
kohshikai.com
|
||||||
|
krhfund.org
|
||||||
|
krillaglass.com
|
||||||
|
lancorhomes.com
|
||||||
|
landpedia.org
|
||||||
|
lanzatuseo.es
|
||||||
|
layerdns.cloud
|
||||||
|
learninglinked.com
|
||||||
legenditds.com
|
legenditds.com
|
||||||
|
levertechcentre.com
|
||||||
|
lhost.no
|
||||||
|
lideri.net.br
|
||||||
lighthouse-media.com
|
lighthouse-media.com
|
||||||
|
lightpath.net
|
||||||
|
limogesporcelainboxes.com
|
||||||
|
lindsaywalt.net
|
||||||
|
linuxsunucum.com
|
||||||
|
listertermoformadoa.com
|
||||||
|
llsend.com
|
||||||
|
local.net
|
||||||
lohkal.com
|
lohkal.com
|
||||||
|
londionrtim.net
|
||||||
lonestarmm.net
|
lonestarmm.net
|
||||||
|
longmarquis.com
|
||||||
|
longwoodmgmt.com
|
||||||
|
lse.kz
|
||||||
|
lunvoy.com
|
||||||
|
luxarpro.ru
|
||||||
|
lwl-puehringer.at
|
||||||
|
lynx.net.lb
|
||||||
|
lyse.net
|
||||||
|
m-sender.com.ua
|
||||||
|
maggiolicloud.it
|
||||||
magnetmail.net
|
magnetmail.net
|
||||||
|
magnumgo.uz
|
||||||
|
maia11.com
|
||||||
|
mail-fire.com
|
||||||
|
mailsentinel.net
|
||||||
|
mailset.cn
|
||||||
|
malardino.net
|
||||||
|
managed-vps.net
|
||||||
manhattanbulletpoint.com
|
manhattanbulletpoint.com
|
||||||
|
manpowerservices.com
|
||||||
|
marketmysterycode.com
|
||||||
|
marketwizardspro.com
|
||||||
masterclassjournal.com
|
masterclassjournal.com
|
||||||
|
matroguel.cam
|
||||||
|
maximpactipo.com
|
||||||
|
mechanicalwalk.store
|
||||||
|
mediavobis.com
|
||||||
|
meqlobal.com
|
||||||
|
mgts.by
|
||||||
|
migrans.net
|
||||||
|
miixta.com
|
||||||
|
milleniumsrv.com
|
||||||
|
mindworksunlimited.com
|
||||||
|
mirth-gale.com
|
||||||
|
misorpresa.com
|
||||||
|
mitomobile.com
|
||||||
|
mitsubachi-kibako.net
|
||||||
|
mjinn.com
|
||||||
|
mkegs.shop
|
||||||
|
mobius.fr
|
||||||
|
model-ac.ink
|
||||||
moderntradingnews.com
|
moderntradingnews.com
|
||||||
|
monnaiegroup.com
|
||||||
|
monopolizeright.com
|
||||||
moonjaws.com
|
moonjaws.com
|
||||||
|
morningnewscatcher.com
|
||||||
motion4ever.net
|
motion4ever.net
|
||||||
mschosting.com
|
mschosting.com
|
||||||
|
msdp1.com
|
||||||
mspnet.pro
|
mspnet.pro
|
||||||
mts-nn.ru
|
mts-nn.ru
|
||||||
|
multifamilydesign.com
|
||||||
|
mxserver.ro
|
||||||
mxthunder.net
|
mxthunder.net
|
||||||
|
my-ihor.ru
|
||||||
|
mycloudmailbox.com
|
||||||
|
myfriendforum.com
|
||||||
myrewards.net
|
myrewards.net
|
||||||
mysagestore.com
|
mysagestore.com
|
||||||
|
mysecurewebserver.com
|
||||||
|
myshanet.net
|
||||||
|
myvps.jp
|
||||||
|
mywedsite.net
|
||||||
|
mywic.eu
|
||||||
|
name.tools
|
||||||
|
nanshenqfurniture.com
|
||||||
|
nask.pl
|
||||||
|
navertise.net
|
||||||
|
ncbb.kz
|
||||||
ncport.ru
|
ncport.ru
|
||||||
|
ncsdi.ws
|
||||||
nebdig.com
|
nebdig.com
|
||||||
neovet-base.ru
|
neovet-base.ru
|
||||||
|
netbri.com
|
||||||
|
netcentertelecom.net.br
|
||||||
|
neti.ee
|
||||||
|
netkl.org
|
||||||
|
newinvestingguide.com
|
||||||
|
newwallstreetcode.com
|
||||||
|
ngvcv.cn
|
||||||
nic.name
|
nic.name
|
||||||
nidix.net
|
nidix.net
|
||||||
|
nieuwedagnetwerk.net
|
||||||
|
nlscanme.com
|
||||||
|
nmeuh.cn
|
||||||
|
noisndametal.com
|
||||||
|
nucleusemail.com
|
||||||
|
nutriboostlife.com
|
||||||
|
nwo.giize.com
|
||||||
|
nwwhalewatchers.org
|
||||||
|
ny.adsl
|
||||||
|
nyt1.com
|
||||||
|
offerslatedeals.com
|
||||||
|
office365.us
|
||||||
ogicom.net
|
ogicom.net
|
||||||
|
olivettilexikon.co.uk
|
||||||
omegabrasil.inf.br
|
omegabrasil.inf.br
|
||||||
onnet21.com
|
onnet21.com
|
||||||
|
onumubunumu.com
|
||||||
|
oppt-ac.fit
|
||||||
|
orbitel.net.co
|
||||||
|
orfsurface.com
|
||||||
|
orientalspot.com
|
||||||
|
outsidences.com
|
||||||
ovaltinalization.co
|
ovaltinalization.co
|
||||||
overta.ru
|
overta.ru
|
||||||
|
ox28vgrurc.com
|
||||||
|
pamulang.net
|
||||||
|
panaltyspot.space
|
||||||
|
panolacountysheriffms.com
|
||||||
passionatesmiles.com
|
passionatesmiles.com
|
||||||
|
paulinelam.com
|
||||||
|
pdi-corp.com
|
||||||
|
peloquinbeck.com
|
||||||
|
perimetercenter.net
|
||||||
|
permanentscreen.com
|
||||||
|
permasteellisagroup.com
|
||||||
|
perumkijhyu.net
|
||||||
|
pesnia.com.ua
|
||||||
|
ph8ltwdi12o.com
|
||||||
|
pharmada.com.de
|
||||||
|
phdns3.es
|
||||||
|
pigelixval1.com
|
||||||
|
pipefittingsindia.com
|
||||||
planethoster.net
|
planethoster.net
|
||||||
|
playamedia.io
|
||||||
|
plesk.page
|
||||||
pmnhost.net
|
pmnhost.net
|
||||||
|
pokiloandhu.net
|
||||||
|
pokupki5.ru
|
||||||
|
polandi.net
|
||||||
popiup.com
|
popiup.com
|
||||||
|
ports.net
|
||||||
|
posolstvostilya.com
|
||||||
|
potia.net
|
||||||
prima.com.ar
|
prima.com.ar
|
||||||
prima.net.ar
|
prima.net.ar
|
||||||
|
profsol.co.uk
|
||||||
|
prohealthmotion.com
|
||||||
|
promooffermarket.site
|
||||||
proudserver.com
|
proudserver.com
|
||||||
|
proxado.com
|
||||||
|
psnm.ru
|
||||||
|
pvcwindowsprices.live
|
||||||
qontenciplc.autos
|
qontenciplc.autos
|
||||||
|
quakeclick.com
|
||||||
|
quasarstate.store
|
||||||
|
quatthonggiotico.com
|
||||||
|
qxyxab44njd.com
|
||||||
|
radianthealthrenaissance.com
|
||||||
|
rapidns.com
|
||||||
raxa.host
|
raxa.host
|
||||||
|
reberte.com
|
||||||
|
reethvikintl.com
|
||||||
|
regruhosting.ru
|
||||||
|
reliablepanel.com
|
||||||
|
rgb365.eu
|
||||||
|
riddlecamera.net
|
||||||
|
riddletrends.com
|
||||||
|
roccopugliese.com
|
||||||
|
runnin-rebels.com
|
||||||
|
rupar.puglia.it
|
||||||
|
rwdhosting.ca
|
||||||
|
s500host.com
|
||||||
|
sageevents.co.ke
|
||||||
sahacker-2020.com
|
sahacker-2020.com
|
||||||
samsales.site
|
samsales.site
|
||||||
|
sante-lorraine.fr
|
||||||
|
saransk.ru
|
||||||
satirogluet.com
|
satirogluet.com
|
||||||
securednshost.com
|
scioncontacts.com
|
||||||
|
sdcc.my
|
||||||
|
seaspraymta3.net
|
||||||
|
secorp.mx
|
||||||
securen.net
|
securen.net
|
||||||
securerelay.in
|
securerelay.in
|
||||||
securev.net
|
securev.net
|
||||||
|
seductiveeyes.com
|
||||||
|
seizethedayconsulting.com
|
||||||
|
serroplast.shop
|
||||||
|
server290.com
|
||||||
|
server342.com
|
||||||
|
server3559.cc
|
||||||
servershost.biz
|
servershost.biz
|
||||||
|
sfek.kz
|
||||||
|
sgnetway.net
|
||||||
|
shopfox.ca
|
||||||
|
silvestrejaguar.sbs
|
||||||
|
silvestreonca.sbs
|
||||||
|
simplediagnostics.org
|
||||||
|
siriuscloud.jp
|
||||||
|
sisglobalresearch.com
|
||||||
|
sixpacklink.net
|
||||||
|
sjestyle.com
|
||||||
smallvillages.com
|
smallvillages.com
|
||||||
|
smartape-vps.com
|
||||||
solusoftware.com
|
solusoftware.com
|
||||||
|
sourcedns.com
|
||||||
|
southcoastwebhosting12.com
|
||||||
|
specialtvvs.com
|
||||||
spiritualtechnologies.io
|
spiritualtechnologies.io
|
||||||
sprout.org
|
sprout.org
|
||||||
|
srv.cat
|
||||||
stableserver.net
|
stableserver.net
|
||||||
|
statlerfa.co.uk
|
||||||
|
stock-smtp.top
|
||||||
|
stockepictigers.com
|
||||||
stockexchangejournal.com
|
stockexchangejournal.com
|
||||||
|
subterranean-concave.com
|
||||||
suksangroup.com
|
suksangroup.com
|
||||||
|
swissbluetopaz.com
|
||||||
|
switer.shop
|
||||||
|
sysop4.com
|
||||||
system.eu.com
|
system.eu.com
|
||||||
|
szhongbing.com
|
||||||
t-jon.com
|
t-jon.com
|
||||||
|
tacaindo.net
|
||||||
|
tacom.tj
|
||||||
|
tankertelz.co
|
||||||
|
tataidc.com
|
||||||
|
teamveiw.com
|
||||||
|
tecnoxia.net
|
||||||
|
tel-xyz.fit
|
||||||
tenkids.net
|
tenkids.net
|
||||||
|
terminavalley.com
|
||||||
thaicloudsolutions.com
|
thaicloudsolutions.com
|
||||||
|
thaikinghost.com
|
||||||
thaimonster.com
|
thaimonster.com
|
||||||
|
thegermainetruth.net
|
||||||
|
thehandmaderose.com
|
||||||
|
thepushcase.com
|
||||||
|
ticdns.com
|
||||||
|
tigo.bo
|
||||||
|
toledofibra.net.br
|
||||||
|
topdns.com
|
||||||
|
totaal.net
|
||||||
|
totalplay.net
|
||||||
|
tqh.ro
|
||||||
|
traderlearningcenter.com
|
||||||
|
tradeukraine.site
|
||||||
|
traveleza.com
|
||||||
|
trwww.com
|
||||||
|
tsuzakij.com
|
||||||
tullostrucking.com
|
tullostrucking.com
|
||||||
|
turbinetrends.com
|
||||||
|
twincitiesdistinctivehomes.com
|
||||||
|
tylerfordonline.com
|
||||||
|
uiyum.com
|
||||||
|
ultragate.com
|
||||||
|
uneedacollie.com
|
||||||
|
unified.services
|
||||||
unite.services
|
unite.services
|
||||||
urawasl.com
|
urawasl.com
|
||||||
us.servername.us
|
us.servername.us
|
||||||
|
vagebond.net
|
||||||
|
varvia.de
|
||||||
|
vbcploo.com
|
||||||
|
vdc.vn
|
||||||
vendimetry.com
|
vendimetry.com
|
||||||
vibrantwellnesscorp.com
|
vibrantwellnesscorp.com
|
||||||
|
virtualine.org
|
||||||
|
visit.docotor
|
||||||
|
viviotech.us
|
||||||
|
vlflgl.com
|
||||||
|
volganet.ru
|
||||||
|
vrns.net
|
||||||
|
vulterdi.edu
|
||||||
|
vvondertex.com
|
||||||
wallstreetsgossip.com
|
wallstreetsgossip.com
|
||||||
|
wamego.net
|
||||||
|
wanekoohost.com
|
||||||
|
wealthexpertisepro.com
|
||||||
|
web-login.eu
|
||||||
weblinkinternational.com
|
weblinkinternational.com
|
||||||
|
webnox.io
|
||||||
|
websale.net
|
||||||
|
welllivinghive.com
|
||||||
|
westparkcom.com
|
||||||
|
wetransfer-eu.com
|
||||||
|
wheelch.me
|
||||||
|
whoflew.com
|
||||||
|
whpservers.com
|
||||||
|
wisdomhard.com
|
||||||
|
wisewealthcircle.com
|
||||||
|
wisvis.com
|
||||||
|
wodeniowa.com
|
||||||
|
wordpresshosting.xyz
|
||||||
|
wsiph2.com
|
||||||
|
xnt.mx
|
||||||
|
xodiax.com
|
||||||
|
xpnuf.cn
|
||||||
xsfati.us.com
|
xsfati.us.com
|
||||||
xspmail.jp
|
xspmail.jp
|
||||||
|
yourciviccompass.com
|
||||||
|
yourinvestworkbook.com
|
||||||
|
yoursitesecure.net
|
||||||
zerowebhosting.net
|
zerowebhosting.net
|
||||||
|
zmml.uk
|
||||||
znlc.jp
|
znlc.jp
|
||||||
|
ztomy.com
|
||||||
|
|||||||
23
parsedmarc/resources/maps/psl_overrides.txt
Normal file
23
parsedmarc/resources/maps/psl_overrides.txt
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
-applefibernet.com
|
||||||
|
-c3.net.pl
|
||||||
|
-celsiainternet.com
|
||||||
|
-clientes-izzi.mx
|
||||||
|
-clientes-zap-izzi.mx
|
||||||
|
-imnet.com.br
|
||||||
|
-mcnbd.com
|
||||||
|
-smile.com.bd
|
||||||
|
-tataidc.co.in
|
||||||
|
-veloxfiber.com.br
|
||||||
|
-wconect.com.br
|
||||||
|
.amazonaws.com
|
||||||
|
.cloudaccess.net
|
||||||
|
.ddnsgeek.com
|
||||||
|
.fastvps-server.com
|
||||||
|
.in-addr-arpa
|
||||||
|
.in-addr.arpa
|
||||||
|
.kasserver.com
|
||||||
|
.kinghost.net
|
||||||
|
.linode.com
|
||||||
|
.linodeusercontent.com
|
||||||
|
.na4u.ru
|
||||||
|
.sakura.ne.jp
|
||||||
184
parsedmarc/resources/maps/sortlists.py
Executable file
184
parsedmarc/resources/maps/sortlists.py
Executable file
@@ -0,0 +1,184 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
import csv
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Mapping, Iterable, Optional, Collection, Union, List, Dict
|
||||||
|
|
||||||
|
|
||||||
|
class CSVValidationError(Exception):
|
||||||
|
def __init__(self, errors: list[str]):
|
||||||
|
super().__init__("\n".join(errors))
|
||||||
|
self.errors = errors
|
||||||
|
|
||||||
|
|
||||||
|
def sort_csv(
|
||||||
|
filepath: Union[str, Path],
|
||||||
|
field: str,
|
||||||
|
*,
|
||||||
|
sort_field_value_must_be_unique: bool = True,
|
||||||
|
strip_whitespace: bool = True,
|
||||||
|
fields_to_lowercase: Optional[Iterable[str]] = None,
|
||||||
|
case_insensitive_sort: bool = False,
|
||||||
|
required_fields: Optional[Iterable[str]] = None,
|
||||||
|
allowed_values: Optional[Mapping[str, Collection[str]]] = None,
|
||||||
|
) -> List[Dict[str, str]]:
|
||||||
|
"""
|
||||||
|
Read a CSV, optionally normalize rows (strip whitespace, lowercase certain fields),
|
||||||
|
validate field values, and write the sorted CSV back to the same path.
|
||||||
|
|
||||||
|
- filepath: Path to the CSV to sort.
|
||||||
|
- field: The field name to sort by.
|
||||||
|
- fields_to_lowercase: Permanently lowercases these field(s) in the data.
|
||||||
|
- strip_whitespace: Remove all whitespace at the beginning and of field values.
|
||||||
|
- case_insensitive_sort: Ignore case when sorting without changing values.
|
||||||
|
- required_fields: A list of fields that must have data in all rows.
|
||||||
|
- allowed_values: A mapping of allowed values for fields.
|
||||||
|
"""
|
||||||
|
path = Path(filepath)
|
||||||
|
required_fields = set(required_fields or [])
|
||||||
|
lower_set = set(fields_to_lowercase or [])
|
||||||
|
allowed_sets = {k: set(v) for k, v in (allowed_values or {}).items()}
|
||||||
|
if sort_field_value_must_be_unique:
|
||||||
|
seen_sort_field_values = []
|
||||||
|
|
||||||
|
with path.open("r", newline="") as infile:
|
||||||
|
reader = csv.DictReader(infile)
|
||||||
|
fieldnames = reader.fieldnames or []
|
||||||
|
if field not in fieldnames:
|
||||||
|
raise CSVValidationError([f"Missing sort column: {field!r}"])
|
||||||
|
missing_headers = required_fields - set(fieldnames)
|
||||||
|
if missing_headers:
|
||||||
|
raise CSVValidationError(
|
||||||
|
[f"Missing required header(s): {sorted(missing_headers)}"]
|
||||||
|
)
|
||||||
|
rows = list(reader)
|
||||||
|
|
||||||
|
def normalize_row(row: Dict[str, str]) -> None:
|
||||||
|
if strip_whitespace:
|
||||||
|
for k, v in row.items():
|
||||||
|
if isinstance(v, str):
|
||||||
|
row[k] = v.strip()
|
||||||
|
for fld in lower_set:
|
||||||
|
if fld in row and isinstance(row[fld], str):
|
||||||
|
row[fld] = row[fld].lower()
|
||||||
|
|
||||||
|
def validate_row(
|
||||||
|
row: Dict[str, str], sort_field: str, line_no: int, errors: list[str]
|
||||||
|
) -> None:
|
||||||
|
if sort_field_value_must_be_unique:
|
||||||
|
if row[sort_field] in seen_sort_field_values:
|
||||||
|
errors.append(f"Line {line_no}: Duplicate row for '{row[sort_field]}'")
|
||||||
|
else:
|
||||||
|
seen_sort_field_values.append(row[sort_field])
|
||||||
|
for rf in required_fields:
|
||||||
|
val = row.get(rf)
|
||||||
|
if val is None or val == "":
|
||||||
|
errors.append(
|
||||||
|
f"Line {line_no}: Missing value for required field '{rf}'"
|
||||||
|
)
|
||||||
|
for field, allowed_values in allowed_sets.items():
|
||||||
|
if field in row:
|
||||||
|
val = row[field]
|
||||||
|
if val not in allowed_values:
|
||||||
|
errors.append(
|
||||||
|
f"Line {line_no}: '{val}' is not an allowed value for '{field}' "
|
||||||
|
f"(allowed: {sorted(allowed_values)})"
|
||||||
|
)
|
||||||
|
|
||||||
|
errors: list[str] = []
|
||||||
|
for idx, row in enumerate(rows, start=2): # header is line 1
|
||||||
|
normalize_row(row)
|
||||||
|
validate_row(row, field, idx, errors)
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
raise CSVValidationError(errors)
|
||||||
|
|
||||||
|
def sort_key(r: Dict[str, str]):
|
||||||
|
v = r.get(field, "")
|
||||||
|
if isinstance(v, str) and case_insensitive_sort:
|
||||||
|
return v.casefold()
|
||||||
|
return v
|
||||||
|
|
||||||
|
rows.sort(key=sort_key)
|
||||||
|
|
||||||
|
with open(filepath, "w", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=fieldnames)
|
||||||
|
writer.writeheader()
|
||||||
|
writer.writerows(rows)
|
||||||
|
|
||||||
|
|
||||||
|
def sort_list_file(
|
||||||
|
filepath: Union[str, Path],
|
||||||
|
*,
|
||||||
|
lowercase: bool = True,
|
||||||
|
strip: bool = True,
|
||||||
|
deduplicate: bool = True,
|
||||||
|
remove_blank_lines: bool = True,
|
||||||
|
ending_newline: bool = True,
|
||||||
|
newline: Optional[str] = "\n",
|
||||||
|
):
|
||||||
|
"""Read a list from a file, sort it, optionally strip and deduplicate the values,
|
||||||
|
then write that list back to the file.
|
||||||
|
|
||||||
|
- Filepath: The path to the file.
|
||||||
|
- lowercase: Lowercase all values prior to sorting.
|
||||||
|
- remove_blank_lines: Remove any plank lines.
|
||||||
|
- ending_newline: End the file with a newline, even if remove_blank_lines is true.
|
||||||
|
- newline: The newline character to use.
|
||||||
|
"""
|
||||||
|
with open(filepath, mode="r", newline=newline) as infile:
|
||||||
|
lines = infile.readlines()
|
||||||
|
for i in range(len(lines)):
|
||||||
|
if lowercase:
|
||||||
|
lines[i] = lines[i].lower()
|
||||||
|
if strip:
|
||||||
|
lines[i] = lines[i].strip()
|
||||||
|
if deduplicate:
|
||||||
|
lines = list(set(lines))
|
||||||
|
if remove_blank_lines:
|
||||||
|
while "" in lines:
|
||||||
|
lines.remove("")
|
||||||
|
lines = sorted(lines)
|
||||||
|
if ending_newline:
|
||||||
|
if lines[-1] != "":
|
||||||
|
lines.append("")
|
||||||
|
with open(filepath, mode="w", newline=newline) as outfile:
|
||||||
|
outfile.write("\n".join(lines))
|
||||||
|
|
||||||
|
|
||||||
|
def _main():
|
||||||
|
map_file = "base_reverse_dns_map.csv"
|
||||||
|
map_key = "base_reverse_dns"
|
||||||
|
list_files = ["known_unknown_base_reverse_dns.txt", "psl_overrides.txt"]
|
||||||
|
types_file = "base_reverse_dns_types.txt"
|
||||||
|
|
||||||
|
with open(types_file) as f:
|
||||||
|
types = f.readlines()
|
||||||
|
while "" in types:
|
||||||
|
types.remove("")
|
||||||
|
|
||||||
|
map_allowed_values = {"Type": types}
|
||||||
|
|
||||||
|
for list_file in list_files:
|
||||||
|
if not os.path.exists(list_file):
|
||||||
|
print(f"Error: {list_file} does not exist")
|
||||||
|
exit(1)
|
||||||
|
sort_list_file(list_file)
|
||||||
|
if not os.path.exists(types_file):
|
||||||
|
print(f"Error: {types_file} does not exist")
|
||||||
|
exit(1)
|
||||||
|
sort_list_file(types_file, lowercase=False)
|
||||||
|
if not os.path.exists(map_file):
|
||||||
|
print(f"Error: {map_file} does not exist")
|
||||||
|
exit(1)
|
||||||
|
try:
|
||||||
|
sort_csv(map_file, map_key, allowed_values=map_allowed_values)
|
||||||
|
except CSVValidationError as e:
|
||||||
|
print(f"{map_file} did not validate: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
_main()
|
||||||
@@ -1,5 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
@@ -8,16 +12,16 @@ from parsedmarc.utils import human_timestamp_to_datetime
|
|||||||
|
|
||||||
|
|
||||||
class S3Client(object):
|
class S3Client(object):
|
||||||
"""A client for a Amazon S3"""
|
"""A client for interacting with Amazon S3"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
bucket_name,
|
bucket_name: str,
|
||||||
bucket_path,
|
bucket_path: str,
|
||||||
region_name,
|
region_name: str,
|
||||||
endpoint_url,
|
endpoint_url: str,
|
||||||
access_key_id,
|
access_key_id: str,
|
||||||
secret_access_key,
|
secret_access_key: str,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the S3Client
|
Initializes the S3Client
|
||||||
@@ -47,18 +51,18 @@ class S3Client(object):
|
|||||||
aws_access_key_id=access_key_id,
|
aws_access_key_id=access_key_id,
|
||||||
aws_secret_access_key=secret_access_key,
|
aws_secret_access_key=secret_access_key,
|
||||||
)
|
)
|
||||||
self.bucket = self.s3.Bucket(self.bucket_name)
|
self.bucket: Any = self.s3.Bucket(self.bucket_name)
|
||||||
|
|
||||||
def save_aggregate_report_to_s3(self, report):
|
def save_aggregate_report_to_s3(self, report: dict[str, Any]):
|
||||||
self.save_report_to_s3(report, "aggregate")
|
self.save_report_to_s3(report, "aggregate")
|
||||||
|
|
||||||
def save_forensic_report_to_s3(self, report):
|
def save_forensic_report_to_s3(self, report: dict[str, Any]):
|
||||||
self.save_report_to_s3(report, "forensic")
|
self.save_report_to_s3(report, "forensic")
|
||||||
|
|
||||||
def save_smtp_tls_report_to_s3(self, report):
|
def save_smtp_tls_report_to_s3(self, report: dict[str, Any]):
|
||||||
self.save_report_to_s3(report, "smtp_tls")
|
self.save_report_to_s3(report, "smtp_tls")
|
||||||
|
|
||||||
def save_report_to_s3(self, report, report_type):
|
def save_report_to_s3(self, report: dict[str, Any], report_type: str):
|
||||||
if report_type == "smtp_tls":
|
if report_type == "smtp_tls":
|
||||||
report_date = report["begin_date"]
|
report_date = report["begin_date"]
|
||||||
report_id = report["report_id"]
|
report_id = report["report_id"]
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Union
|
||||||
|
|
||||||
|
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
import socket
|
import socket
|
||||||
import json
|
import json
|
||||||
@@ -5,7 +12,7 @@ import json
|
|||||||
import urllib3
|
import urllib3
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from parsedmarc import __version__
|
from parsedmarc.constants import USER_AGENT
|
||||||
from parsedmarc.log import logger
|
from parsedmarc.log import logger
|
||||||
from parsedmarc.utils import human_timestamp_to_unix_timestamp
|
from parsedmarc.utils import human_timestamp_to_unix_timestamp
|
||||||
|
|
||||||
@@ -23,7 +30,13 @@ class HECClient(object):
|
|||||||
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTinput#services.2Fcollector
|
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTinput#services.2Fcollector
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, url, access_token, index, source="parsedmarc", verify=True, timeout=60
|
self,
|
||||||
|
url: str,
|
||||||
|
access_token: str,
|
||||||
|
index: str,
|
||||||
|
source: str = "parsedmarc",
|
||||||
|
verify=True,
|
||||||
|
timeout=60,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the HECClient
|
Initializes the HECClient
|
||||||
@@ -37,9 +50,9 @@ class HECClient(object):
|
|||||||
timeout (float): Number of seconds to wait for the server to send
|
timeout (float): Number of seconds to wait for the server to send
|
||||||
data before giving up
|
data before giving up
|
||||||
"""
|
"""
|
||||||
url = urlparse(url)
|
parsed_url = urlparse(url)
|
||||||
self.url = "{0}://{1}/services/collector/event/1.0".format(
|
self.url = "{0}://{1}/services/collector/event/1.0".format(
|
||||||
url.scheme, url.netloc
|
parsed_url.scheme, parsed_url.netloc
|
||||||
)
|
)
|
||||||
self.access_token = access_token.lstrip("Splunk ")
|
self.access_token = access_token.lstrip("Splunk ")
|
||||||
self.index = index
|
self.index = index
|
||||||
@@ -48,14 +61,19 @@ class HECClient(object):
|
|||||||
self.session = requests.Session()
|
self.session = requests.Session()
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.session.verify = verify
|
self.session.verify = verify
|
||||||
self._common_data = dict(host=self.host, source=self.source, index=self.index)
|
self._common_data: dict[str, Union[str, int, float, dict]] = dict(
|
||||||
|
host=self.host, source=self.source, index=self.index
|
||||||
|
)
|
||||||
|
|
||||||
self.session.headers = {
|
self.session.headers = {
|
||||||
"User-Agent": "parsedmarc/{0}".format(__version__),
|
"User-Agent": USER_AGENT,
|
||||||
"Authorization": "Splunk {0}".format(self.access_token),
|
"Authorization": "Splunk {0}".format(self.access_token),
|
||||||
}
|
}
|
||||||
|
|
||||||
def save_aggregate_reports_to_splunk(self, aggregate_reports):
|
def save_aggregate_reports_to_splunk(
|
||||||
|
self,
|
||||||
|
aggregate_reports: Union[list[dict[str, Any]], dict[str, Any]],
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Saves aggregate DMARC reports to Splunk
|
Saves aggregate DMARC reports to Splunk
|
||||||
|
|
||||||
@@ -75,9 +93,12 @@ class HECClient(object):
|
|||||||
json_str = ""
|
json_str = ""
|
||||||
for report in aggregate_reports:
|
for report in aggregate_reports:
|
||||||
for record in report["records"]:
|
for record in report["records"]:
|
||||||
new_report = dict()
|
new_report: dict[str, Union[str, int, float, dict]] = dict()
|
||||||
for metadata in report["report_metadata"]:
|
for metadata in report["report_metadata"]:
|
||||||
new_report[metadata] = report["report_metadata"][metadata]
|
new_report[metadata] = report["report_metadata"][metadata]
|
||||||
|
new_report["interval_begin"] = record["interval_begin"]
|
||||||
|
new_report["interval_end"] = record["interval_end"]
|
||||||
|
new_report["normalized_timespan"] = record["normalized_timespan"]
|
||||||
new_report["published_policy"] = report["policy_published"]
|
new_report["published_policy"] = report["policy_published"]
|
||||||
new_report["source_ip_address"] = record["source"]["ip_address"]
|
new_report["source_ip_address"] = record["source"]["ip_address"]
|
||||||
new_report["source_country"] = record["source"]["country"]
|
new_report["source_country"] = record["source"]["country"]
|
||||||
@@ -98,7 +119,9 @@ class HECClient(object):
|
|||||||
new_report["spf_results"] = record["auth_results"]["spf"]
|
new_report["spf_results"] = record["auth_results"]["spf"]
|
||||||
|
|
||||||
data["sourcetype"] = "dmarc:aggregate"
|
data["sourcetype"] = "dmarc:aggregate"
|
||||||
timestamp = human_timestamp_to_unix_timestamp(new_report["begin_date"])
|
timestamp = human_timestamp_to_unix_timestamp(
|
||||||
|
new_report["interval_begin"]
|
||||||
|
)
|
||||||
data["time"] = timestamp
|
data["time"] = timestamp
|
||||||
data["event"] = new_report.copy()
|
data["event"] = new_report.copy()
|
||||||
json_str += "{0}\n".format(json.dumps(data))
|
json_str += "{0}\n".format(json.dumps(data))
|
||||||
@@ -113,7 +136,10 @@ class HECClient(object):
|
|||||||
if response["code"] != 0:
|
if response["code"] != 0:
|
||||||
raise SplunkError(response["text"])
|
raise SplunkError(response["text"])
|
||||||
|
|
||||||
def save_forensic_reports_to_splunk(self, forensic_reports):
|
def save_forensic_reports_to_splunk(
|
||||||
|
self,
|
||||||
|
forensic_reports: Union[list[dict[str, Any]], dict[str, Any]],
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Saves forensic DMARC reports to Splunk
|
Saves forensic DMARC reports to Splunk
|
||||||
|
|
||||||
@@ -147,7 +173,9 @@ class HECClient(object):
|
|||||||
if response["code"] != 0:
|
if response["code"] != 0:
|
||||||
raise SplunkError(response["text"])
|
raise SplunkError(response["text"])
|
||||||
|
|
||||||
def save_smtp_tls_reports_to_splunk(self, reports):
|
def save_smtp_tls_reports_to_splunk(
|
||||||
|
self, reports: Union[list[dict[str, Any]], dict[str, Any]]
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Saves aggregate DMARC reports to Splunk
|
Saves aggregate DMARC reports to Splunk
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,14 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from parsedmarc import (
|
from parsedmarc import (
|
||||||
@@ -14,7 +21,7 @@ from parsedmarc import (
|
|||||||
class SyslogClient(object):
|
class SyslogClient(object):
|
||||||
"""A client for Syslog"""
|
"""A client for Syslog"""
|
||||||
|
|
||||||
def __init__(self, server_name, server_port):
|
def __init__(self, server_name: str, server_port: int):
|
||||||
"""
|
"""
|
||||||
Initializes the SyslogClient
|
Initializes the SyslogClient
|
||||||
Args:
|
Args:
|
||||||
@@ -28,17 +35,23 @@ class SyslogClient(object):
|
|||||||
log_handler = logging.handlers.SysLogHandler(address=(server_name, server_port))
|
log_handler = logging.handlers.SysLogHandler(address=(server_name, server_port))
|
||||||
self.logger.addHandler(log_handler)
|
self.logger.addHandler(log_handler)
|
||||||
|
|
||||||
def save_aggregate_report_to_syslog(self, aggregate_reports):
|
def save_aggregate_report_to_syslog(
|
||||||
|
self, aggregate_reports: list[dict[str, Any]]
|
||||||
|
):
|
||||||
rows = parsed_aggregate_reports_to_csv_rows(aggregate_reports)
|
rows = parsed_aggregate_reports_to_csv_rows(aggregate_reports)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
self.logger.info(json.dumps(row))
|
self.logger.info(json.dumps(row))
|
||||||
|
|
||||||
def save_forensic_report_to_syslog(self, forensic_reports):
|
def save_forensic_report_to_syslog(
|
||||||
|
self, forensic_reports: list[dict[str, Any]]
|
||||||
|
):
|
||||||
rows = parsed_forensic_reports_to_csv_rows(forensic_reports)
|
rows = parsed_forensic_reports_to_csv_rows(forensic_reports)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
self.logger.info(json.dumps(row))
|
self.logger.info(json.dumps(row))
|
||||||
|
|
||||||
def save_smtp_tls_report_to_syslog(self, smtp_tls_reports):
|
def save_smtp_tls_report_to_syslog(
|
||||||
|
self, smtp_tls_reports: list[dict[str, Any]]
|
||||||
|
):
|
||||||
rows = parsed_smtp_tls_reports_to_csv_rows(smtp_tls_reports)
|
rows = parsed_smtp_tls_reports_to_csv_rows(smtp_tls_reports)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
self.logger.info(json.dumps(row))
|
self.logger.info(json.dumps(row))
|
||||||
|
|||||||
@@ -1,11 +1,17 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
"""Utility functions that might be useful for other projects"""
|
"""Utility functions that might be useful for other projects"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Optional, Union, TypedDict, Any
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from datetime import timezone
|
from datetime import timezone
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from collections import OrderedDict
|
from expiringdict import ExpiringDict
|
||||||
import tempfile
|
import tempfile
|
||||||
import subprocess
|
import subprocess
|
||||||
import shutil
|
import shutil
|
||||||
@@ -37,13 +43,19 @@ import requests
|
|||||||
from parsedmarc.log import logger
|
from parsedmarc.log import logger
|
||||||
import parsedmarc.resources.dbip
|
import parsedmarc.resources.dbip
|
||||||
import parsedmarc.resources.maps
|
import parsedmarc.resources.maps
|
||||||
|
from parsedmarc.constants import USER_AGENT
|
||||||
|
|
||||||
parenthesis_regex = re.compile(r"\s*\(.*\)\s*")
|
parenthesis_regex = re.compile(r"\s*\(.*\)\s*")
|
||||||
|
|
||||||
null_file = open(os.devnull, "w")
|
null_file = open(os.devnull, "w")
|
||||||
mailparser_logger = logging.getLogger("mailparser")
|
mailparser_logger = logging.getLogger("mailparser")
|
||||||
mailparser_logger.setLevel(logging.CRITICAL)
|
mailparser_logger.setLevel(logging.CRITICAL)
|
||||||
|
psl = publicsuffixlist.PublicSuffixList()
|
||||||
|
psl_overrides_path = str(files(parsedmarc.resources.maps).joinpath("psl_overrides.txt"))
|
||||||
|
with open(psl_overrides_path) as f:
|
||||||
|
psl_overrides = [line.rstrip() for line in f.readlines()]
|
||||||
|
while "" in psl_overrides:
|
||||||
|
psl_overrides.remove("")
|
||||||
|
|
||||||
|
|
||||||
class EmailParserError(RuntimeError):
|
class EmailParserError(RuntimeError):
|
||||||
@@ -54,12 +66,20 @@ class DownloadError(RuntimeError):
|
|||||||
"""Raised when an error occurs when downloading a file"""
|
"""Raised when an error occurs when downloading a file"""
|
||||||
|
|
||||||
|
|
||||||
def decode_base64(data):
|
class EmailAddress(TypedDict):
|
||||||
|
"""Parsed email address information"""
|
||||||
|
display_name: Optional[str]
|
||||||
|
address: str
|
||||||
|
local: Optional[str]
|
||||||
|
domain: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
def decode_base64(data: str) -> bytes:
|
||||||
"""
|
"""
|
||||||
Decodes a base64 string, with padding being optional
|
Decodes a base64 string, with padding being optional
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
data: A base64 encoded string
|
data (str): A base64 encoded string
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bytes: The decoded bytes
|
bytes: The decoded bytes
|
||||||
@@ -72,13 +92,14 @@ def decode_base64(data):
|
|||||||
return base64.b64decode(data)
|
return base64.b64decode(data)
|
||||||
|
|
||||||
|
|
||||||
def get_base_domain(domain):
|
def get_base_domain(domain: str) -> str:
|
||||||
"""
|
"""
|
||||||
Gets the base domain name for the given domain
|
Gets the base domain name for the given domain
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
Results are based on a list of public domain suffixes at
|
Results are based on a list of public domain suffixes at
|
||||||
https://publicsuffix.org/list/public_suffix_list.dat.
|
https://publicsuffix.org/list/public_suffix_list.dat and overrides included in
|
||||||
|
parsedmarc.resources.maps.psl_overrides.txt
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
domain (str): A domain or subdomain
|
domain (str): A domain or subdomain
|
||||||
@@ -87,11 +108,22 @@ def get_base_domain(domain):
|
|||||||
str: The base domain of the given domain
|
str: The base domain of the given domain
|
||||||
|
|
||||||
"""
|
"""
|
||||||
psl = publicsuffixlist.PublicSuffixList()
|
domain = domain.lower()
|
||||||
return psl.privatesuffix(domain)
|
publicsuffix = psl.privatesuffix(domain)
|
||||||
|
for override in psl_overrides:
|
||||||
|
if domain.endswith(override):
|
||||||
|
return override.strip(".").strip("-")
|
||||||
|
return publicsuffix
|
||||||
|
|
||||||
|
|
||||||
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):
|
def query_dns(
|
||||||
|
domain: str,
|
||||||
|
record_type: str,
|
||||||
|
*,
|
||||||
|
cache: Optional[ExpiringDict] = None,
|
||||||
|
nameservers: list[str] = None,
|
||||||
|
timeout: int = 2.0,
|
||||||
|
) -> list[str]:
|
||||||
"""
|
"""
|
||||||
Queries DNS
|
Queries DNS
|
||||||
|
|
||||||
@@ -152,7 +184,13 @@ def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):
|
|||||||
return records
|
return records
|
||||||
|
|
||||||
|
|
||||||
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):
|
def get_reverse_dns(
|
||||||
|
ip_address,
|
||||||
|
*,
|
||||||
|
cache: Optional[ExpiringDict] = None,
|
||||||
|
nameservers: list[str] = None,
|
||||||
|
timeout: int = 2.0,
|
||||||
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Resolves an IP address to a hostname using a reverse DNS query
|
Resolves an IP address to a hostname using a reverse DNS query
|
||||||
|
|
||||||
@@ -180,7 +218,7 @@ def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):
|
|||||||
return hostname
|
return hostname
|
||||||
|
|
||||||
|
|
||||||
def timestamp_to_datetime(timestamp):
|
def timestamp_to_datetime(timestamp: int) -> datetime:
|
||||||
"""
|
"""
|
||||||
Converts a UNIX/DMARC timestamp to a Python ``datetime`` object
|
Converts a UNIX/DMARC timestamp to a Python ``datetime`` object
|
||||||
|
|
||||||
@@ -193,7 +231,7 @@ def timestamp_to_datetime(timestamp):
|
|||||||
return datetime.fromtimestamp(int(timestamp))
|
return datetime.fromtimestamp(int(timestamp))
|
||||||
|
|
||||||
|
|
||||||
def timestamp_to_human(timestamp):
|
def timestamp_to_human(timestamp: int) -> str:
|
||||||
"""
|
"""
|
||||||
Converts a UNIX/DMARC timestamp to a human-readable string
|
Converts a UNIX/DMARC timestamp to a human-readable string
|
||||||
|
|
||||||
@@ -206,7 +244,9 @@ def timestamp_to_human(timestamp):
|
|||||||
return timestamp_to_datetime(timestamp).strftime("%Y-%m-%d %H:%M:%S")
|
return timestamp_to_datetime(timestamp).strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
|
|
||||||
def human_timestamp_to_datetime(human_timestamp, to_utc=False):
|
def human_timestamp_to_datetime(
|
||||||
|
human_timestamp: str, *, to_utc: Optional[bool] = False
|
||||||
|
) -> datetime:
|
||||||
"""
|
"""
|
||||||
Converts a human-readable timestamp into a Python ``datetime`` object
|
Converts a human-readable timestamp into a Python ``datetime`` object
|
||||||
|
|
||||||
@@ -225,7 +265,7 @@ def human_timestamp_to_datetime(human_timestamp, to_utc=False):
|
|||||||
return dt.astimezone(timezone.utc) if to_utc else dt
|
return dt.astimezone(timezone.utc) if to_utc else dt
|
||||||
|
|
||||||
|
|
||||||
def human_timestamp_to_unix_timestamp(human_timestamp):
|
def human_timestamp_to_unix_timestamp(human_timestamp: str) -> int:
|
||||||
"""
|
"""
|
||||||
Converts a human-readable timestamp into a UNIX timestamp
|
Converts a human-readable timestamp into a UNIX timestamp
|
||||||
|
|
||||||
@@ -239,7 +279,7 @@ def human_timestamp_to_unix_timestamp(human_timestamp):
|
|||||||
return human_timestamp_to_datetime(human_timestamp).timestamp()
|
return human_timestamp_to_datetime(human_timestamp).timestamp()
|
||||||
|
|
||||||
|
|
||||||
def get_ip_address_country(ip_address, db_path=None):
|
def get_ip_address_country(ip_address: str, *, db_path: Optional[str] = None) -> str:
|
||||||
"""
|
"""
|
||||||
Returns the ISO code for the country associated
|
Returns the ISO code for the country associated
|
||||||
with the given IPv4 or IPv6 address
|
with the given IPv4 or IPv6 address
|
||||||
@@ -266,7 +306,7 @@ def get_ip_address_country(ip_address, db_path=None):
|
|||||||
]
|
]
|
||||||
|
|
||||||
if db_path is not None:
|
if db_path is not None:
|
||||||
if os.path.isfile(db_path) is False:
|
if not os.path.isfile(db_path):
|
||||||
db_path = None
|
db_path = None
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"No file exists at {db_path}. Falling back to an "
|
f"No file exists at {db_path}. Falling back to an "
|
||||||
@@ -302,13 +342,14 @@ def get_ip_address_country(ip_address, db_path=None):
|
|||||||
|
|
||||||
|
|
||||||
def get_service_from_reverse_dns_base_domain(
|
def get_service_from_reverse_dns_base_domain(
|
||||||
base_domain,
|
base_domain: str,
|
||||||
always_use_local_file=False,
|
*,
|
||||||
local_file_path=None,
|
always_use_local_file: Optional[bool] = False,
|
||||||
url=None,
|
local_file_path: Optional[str] = None,
|
||||||
offline=False,
|
url: Optional[str] = None,
|
||||||
reverse_dns_map=None,
|
offline: Optional[bool] = False,
|
||||||
):
|
reverse_dns_map: Optional[dict[str, Any]] = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Returns the service name of a given base domain name from reverse DNS.
|
Returns the service name of a given base domain name from reverse DNS.
|
||||||
|
|
||||||
@@ -345,7 +386,8 @@ def get_service_from_reverse_dns_base_domain(
|
|||||||
if not (offline or always_use_local_file) and len(reverse_dns_map) == 0:
|
if not (offline or always_use_local_file) and len(reverse_dns_map) == 0:
|
||||||
try:
|
try:
|
||||||
logger.debug(f"Trying to fetch reverse DNS map from {url}...")
|
logger.debug(f"Trying to fetch reverse DNS map from {url}...")
|
||||||
response = requests.get(url)
|
headers = {"User-Agent": USER_AGENT}
|
||||||
|
response = requests.get(url, headers=headers)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
csv_file.write(response.text)
|
csv_file.write(response.text)
|
||||||
csv_file.seek(0)
|
csv_file.seek(0)
|
||||||
@@ -355,6 +397,7 @@ def get_service_from_reverse_dns_base_domain(
|
|||||||
except Exception:
|
except Exception:
|
||||||
logger.warning("Not a valid CSV file")
|
logger.warning("Not a valid CSV file")
|
||||||
csv_file.seek(0)
|
csv_file.seek(0)
|
||||||
|
logging.debug("Response body:")
|
||||||
logger.debug(csv_file.read())
|
logger.debug(csv_file.read())
|
||||||
|
|
||||||
if len(reverse_dns_map) == 0:
|
if len(reverse_dns_map) == 0:
|
||||||
@@ -375,17 +418,18 @@ def get_service_from_reverse_dns_base_domain(
|
|||||||
|
|
||||||
|
|
||||||
def get_ip_address_info(
|
def get_ip_address_info(
|
||||||
ip_address,
|
ip_address: str,
|
||||||
ip_db_path=None,
|
*,
|
||||||
reverse_dns_map_path=None,
|
ip_db_path: Optional[str] = None,
|
||||||
always_use_local_files=False,
|
reverse_dns_map_path: Optional[str] = None,
|
||||||
reverse_dns_map_url=None,
|
always_use_local_files: Optional[bool] = False,
|
||||||
cache=None,
|
reverse_dns_map_url: Optional[str] = None,
|
||||||
reverse_dns_map=None,
|
cache: Optional[ExpiringDict] = None,
|
||||||
offline=False,
|
reverse_dns_map: Optional[dict[str, Any]] = None,
|
||||||
nameservers=None,
|
offline: Optional[bool] = False,
|
||||||
timeout=2.0,
|
nameservers: Optional[list[str]] = None,
|
||||||
):
|
timeout: Optional[float] = 2.0,
|
||||||
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Returns reverse DNS and country information for the given IP address
|
Returns reverse DNS and country information for the given IP address
|
||||||
|
|
||||||
@@ -403,7 +447,7 @@ def get_ip_address_info(
|
|||||||
timeout (float): Sets the DNS timeout in seconds
|
timeout (float): Sets the DNS timeout in seconds
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
OrderedDict: ``ip_address``, ``reverse_dns``
|
dict: ``ip_address``, ``reverse_dns``, ``country``
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ip_address = ip_address.lower()
|
ip_address = ip_address.lower()
|
||||||
@@ -412,7 +456,7 @@ def get_ip_address_info(
|
|||||||
if info:
|
if info:
|
||||||
logger.debug(f"IP address {ip_address} was found in cache")
|
logger.debug(f"IP address {ip_address} was found in cache")
|
||||||
return info
|
return info
|
||||||
info = OrderedDict()
|
info = dict()
|
||||||
info["ip_address"] = ip_address
|
info["ip_address"] = ip_address
|
||||||
if offline:
|
if offline:
|
||||||
reverse_dns = None
|
reverse_dns = None
|
||||||
@@ -450,7 +494,7 @@ def get_ip_address_info(
|
|||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
def parse_email_address(original_address):
|
def parse_email_address(original_address: str) -> EmailAddress:
|
||||||
if original_address[0] == "":
|
if original_address[0] == "":
|
||||||
display_name = None
|
display_name = None
|
||||||
else:
|
else:
|
||||||
@@ -463,17 +507,15 @@ def parse_email_address(original_address):
|
|||||||
local = address_parts[0].lower()
|
local = address_parts[0].lower()
|
||||||
domain = address_parts[-1].lower()
|
domain = address_parts[-1].lower()
|
||||||
|
|
||||||
return OrderedDict(
|
return {
|
||||||
[
|
"display_name": display_name,
|
||||||
("display_name", display_name),
|
"address": address,
|
||||||
("address", address),
|
"local": local,
|
||||||
("local", local),
|
"domain": domain,
|
||||||
("domain", domain),
|
}
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_filename_safe_string(string):
|
def get_filename_safe_string(string: str) -> str:
|
||||||
"""
|
"""
|
||||||
Converts a string to a string that is safe for a filename
|
Converts a string to a string that is safe for a filename
|
||||||
|
|
||||||
@@ -495,7 +537,7 @@ def get_filename_safe_string(string):
|
|||||||
return string
|
return string
|
||||||
|
|
||||||
|
|
||||||
def is_mbox(path):
|
def is_mbox(path: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Checks if the given content is an MBOX mailbox file
|
Checks if the given content is an MBOX mailbox file
|
||||||
|
|
||||||
@@ -516,7 +558,7 @@ def is_mbox(path):
|
|||||||
return _is_mbox
|
return _is_mbox
|
||||||
|
|
||||||
|
|
||||||
def is_outlook_msg(content):
|
def is_outlook_msg(content: Union[bytes, Any]) -> bool:
|
||||||
"""
|
"""
|
||||||
Checks if the given content is an Outlook msg OLE/MSG file
|
Checks if the given content is an Outlook msg OLE/MSG file
|
||||||
|
|
||||||
@@ -531,7 +573,7 @@ def is_outlook_msg(content):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def convert_outlook_msg(msg_bytes):
|
def convert_outlook_msg(msg_bytes: bytes) -> str:
|
||||||
"""
|
"""
|
||||||
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
|
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
|
||||||
standard RFC 822 format
|
standard RFC 822 format
|
||||||
@@ -549,13 +591,14 @@ def convert_outlook_msg(msg_bytes):
|
|||||||
os.chdir(tmp_dir)
|
os.chdir(tmp_dir)
|
||||||
with open("sample.msg", "wb") as msg_file:
|
with open("sample.msg", "wb") as msg_file:
|
||||||
msg_file.write(msg_bytes)
|
msg_file.write(msg_bytes)
|
||||||
|
rfc822_bytes: bytes
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
["msgconvert", "sample.msg"], stdout=null_file, stderr=null_file
|
["msgconvert", "sample.msg"], stdout=null_file, stderr=null_file
|
||||||
)
|
)
|
||||||
eml_path = "sample.eml"
|
eml_path = "sample.eml"
|
||||||
with open(eml_path, "rb") as eml_file:
|
with open(eml_path, "rb") as eml_file:
|
||||||
rfc822 = eml_file.read()
|
rfc822_bytes = eml_file.read()
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise EmailParserError(
|
raise EmailParserError(
|
||||||
"Failed to convert Outlook MSG: msgconvert utility not found"
|
"Failed to convert Outlook MSG: msgconvert utility not found"
|
||||||
@@ -564,10 +607,12 @@ def convert_outlook_msg(msg_bytes):
|
|||||||
os.chdir(orig_dir)
|
os.chdir(orig_dir)
|
||||||
shutil.rmtree(tmp_dir)
|
shutil.rmtree(tmp_dir)
|
||||||
|
|
||||||
return rfc822
|
return rfc822_bytes.decode("utf-8", errors="replace")
|
||||||
|
|
||||||
|
|
||||||
def parse_email(data, strip_attachment_payloads=False):
|
def parse_email(
|
||||||
|
data: Union[bytes, str], *, strip_attachment_payloads: Optional[bool] = False
|
||||||
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
A simplified email parser
|
A simplified email parser
|
||||||
|
|
||||||
@@ -582,7 +627,8 @@ def parse_email(data, strip_attachment_payloads=False):
|
|||||||
if isinstance(data, bytes):
|
if isinstance(data, bytes):
|
||||||
if is_outlook_msg(data):
|
if is_outlook_msg(data):
|
||||||
data = convert_outlook_msg(data)
|
data = convert_outlook_msg(data)
|
||||||
data = data.decode("utf-8", errors="replace")
|
else:
|
||||||
|
data = data.decode("utf-8", errors="replace")
|
||||||
parsed_email = mailparser.parse_from_string(data)
|
parsed_email = mailparser.parse_from_string(data)
|
||||||
headers = json.loads(parsed_email.headers_json).copy()
|
headers = json.loads(parsed_email.headers_json).copy()
|
||||||
parsed_email = json.loads(parsed_email.mail_json).copy()
|
parsed_email = json.loads(parsed_email.mail_json).copy()
|
||||||
|
|||||||
@@ -1,12 +1,25 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from parsedmarc import logger
|
from parsedmarc import logger
|
||||||
|
from parsedmarc.constants import USER_AGENT
|
||||||
|
|
||||||
|
|
||||||
class WebhookClient(object):
|
class WebhookClient(object):
|
||||||
"""A client for webhooks"""
|
"""A client for webhooks"""
|
||||||
|
|
||||||
def __init__(self, aggregate_url, forensic_url, smtp_tls_url, timeout=60):
|
def __init__(
|
||||||
|
self,
|
||||||
|
aggregate_url: str,
|
||||||
|
forensic_url: str,
|
||||||
|
smtp_tls_url: str,
|
||||||
|
timeout: Optional[int] = 60,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the WebhookClient
|
Initializes the WebhookClient
|
||||||
Args:
|
Args:
|
||||||
@@ -21,29 +34,31 @@ class WebhookClient(object):
|
|||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.session = requests.Session()
|
self.session = requests.Session()
|
||||||
self.session.headers = {
|
self.session.headers = {
|
||||||
"User-Agent": "parsedmarc",
|
"User-Agent": USER_AGENT,
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
}
|
}
|
||||||
|
|
||||||
def save_forensic_report_to_webhook(self, report):
|
def save_forensic_report_to_webhook(self, report: str):
|
||||||
try:
|
try:
|
||||||
self._send_to_webhook(self.forensic_url, report)
|
self._send_to_webhook(self.forensic_url, report)
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||||
|
|
||||||
def save_smtp_tls_report_to_webhook(self, report):
|
def save_smtp_tls_report_to_webhook(self, report: str):
|
||||||
try:
|
try:
|
||||||
self._send_to_webhook(self.smtp_tls_url, report)
|
self._send_to_webhook(self.smtp_tls_url, report)
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||||
|
|
||||||
def save_aggregate_report_to_webhook(self, report):
|
def save_aggregate_report_to_webhook(self, report: str):
|
||||||
try:
|
try:
|
||||||
self._send_to_webhook(self.aggregate_url, report)
|
self._send_to_webhook(self.aggregate_url, report)
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||||
|
|
||||||
def _send_to_webhook(self, webhook_url, payload):
|
def _send_to_webhook(
|
||||||
|
self, webhook_url: str, payload: Union[bytes, str, dict[str, Any]]
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
self.session.post(webhook_url, data=payload, timeout=self.timeout)
|
self.session.post(webhook_url, data=payload, timeout=self.timeout)
|
||||||
except Exception as error_:
|
except Exception as error_:
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
requires = [
|
requires = [
|
||||||
"hatchling>=1.27.0",
|
"hatchling>=1.27.0",
|
||||||
]
|
]
|
||||||
|
requires_python = ">=3.9,<3.14"
|
||||||
build-backend = "hatchling.build"
|
build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
@@ -28,6 +29,7 @@ classifiers = [
|
|||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
"Programming Language :: Python :: 3"
|
"Programming Language :: Python :: 3"
|
||||||
]
|
]
|
||||||
|
requires-python = ">=3.9, <3.14"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"azure-identity>=1.8.0",
|
"azure-identity>=1.8.0",
|
||||||
"azure-monitor-ingestion>=1.0.0",
|
"azure-monitor-ingestion>=1.0.0",
|
||||||
@@ -55,6 +57,7 @@ dependencies = [
|
|||||||
"tqdm>=4.31.1",
|
"tqdm>=4.31.1",
|
||||||
"urllib3>=1.25.7",
|
"urllib3>=1.25.7",
|
||||||
"xmltodict>=0.12.0",
|
"xmltodict>=0.12.0",
|
||||||
|
"PyYAML>=6.0.3"
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
@@ -76,9 +79,20 @@ parsedmarc = "parsedmarc.cli:_main"
|
|||||||
Homepage = "https://domainaware.github.io/parsedmarc"
|
Homepage = "https://domainaware.github.io/parsedmarc"
|
||||||
|
|
||||||
[tool.hatch.version]
|
[tool.hatch.version]
|
||||||
path = "parsedmarc/__init__.py"
|
path = "parsedmarc/constants.py"
|
||||||
|
|
||||||
[tool.hatch.build.targets.sdist]
|
[tool.hatch.build.targets.sdist]
|
||||||
include = [
|
include = [
|
||||||
"/parsedmarc",
|
"/parsedmarc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[tool.hatch.build]
|
||||||
|
exclude = [
|
||||||
|
"base_reverse_dns.csv",
|
||||||
|
"find_bad_utf8.py",
|
||||||
|
"find_unknown_base_reverse_dns.py",
|
||||||
|
"unknown_base_reverse_dns.csv",
|
||||||
|
"sortmaps.py",
|
||||||
|
"README.md",
|
||||||
|
"*.bak"
|
||||||
|
]
|
||||||
|
|||||||
25
sortmaps.py
25
sortmaps.py
@@ -1,25 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
import csv
|
|
||||||
|
|
||||||
|
|
||||||
maps_dir = os.path.join("parsedmarc", "resources", "maps")
|
|
||||||
csv_files = glob.glob(os.path.join(maps_dir, "*.csv"))
|
|
||||||
|
|
||||||
|
|
||||||
def sort_csv(filepath, column=0):
|
|
||||||
with open(filepath, mode="r", newline="") as infile:
|
|
||||||
reader = csv.reader(infile)
|
|
||||||
header = next(reader)
|
|
||||||
sorted_rows = sorted(reader, key=lambda row: row[column])
|
|
||||||
|
|
||||||
with open(filepath, mode="w", newline="\n") as outfile:
|
|
||||||
writer = csv.writer(outfile)
|
|
||||||
writer.writerow(header)
|
|
||||||
writer.writerows(sorted_rows)
|
|
||||||
|
|
||||||
|
|
||||||
for csv_file in csv_files:
|
|
||||||
sort_csv(csv_file)
|
|
||||||
107
splunk/smtp_tls_dashboard.xml
Normal file
107
splunk/smtp_tls_dashboard.xml
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
<form version="1.1" theme="dark">
|
||||||
|
<label>SMTP TLS Reporting</label>
|
||||||
|
<fieldset submitButton="false" autoRun="true">
|
||||||
|
<input type="time" token="time">
|
||||||
|
<label></label>
|
||||||
|
<default>
|
||||||
|
<earliest>-7d@h</earliest>
|
||||||
|
<latest>now</latest>
|
||||||
|
</default>
|
||||||
|
</input>
|
||||||
|
<input type="text" token="organization_name" searchWhenChanged="true">
|
||||||
|
<label>Organization name</label>
|
||||||
|
<default>*</default>
|
||||||
|
<initialValue>*</initialValue>
|
||||||
|
</input>
|
||||||
|
<input type="text" token="policy_domain">
|
||||||
|
<label>Policy domain</label>
|
||||||
|
<default>*</default>
|
||||||
|
<initialValue>*</initialValue>
|
||||||
|
</input>
|
||||||
|
<input type="dropdown" token="policy_type" searchWhenChanged="true">
|
||||||
|
<label>Policy type</label>
|
||||||
|
<choice value="*">Any</choice>
|
||||||
|
<choice value="tlsa">tlsa</choice>
|
||||||
|
<choice value="sts">sts</choice>
|
||||||
|
<choice value="no-policy-found">no-policy-found</choice>
|
||||||
|
<default>*</default>
|
||||||
|
<initialValue>*</initialValue>
|
||||||
|
</input>
|
||||||
|
</fieldset>
|
||||||
|
<row>
|
||||||
|
<panel>
|
||||||
|
<title>Reporting organizations</title>
|
||||||
|
<table>
|
||||||
|
<search>
|
||||||
|
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$
|
||||||
|
| rename policies{}.policy_domain as policy_domain
|
||||||
|
| rename policies{}.policy_type as policy_type
|
||||||
|
| rename policies{}.failed_session_count as failed_sessions
|
||||||
|
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
|
||||||
|
| rename policies{}.successful_session_count as successful_sessions
|
||||||
|
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
|
||||||
|
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
|
||||||
|
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
|
||||||
|
| rename policies{}.failure_details{}.result_type as failure_type
|
||||||
|
| fillnull value=0 failed_sessions
|
||||||
|
| stats sum(failed_sessions) as failed_sessions sum(successful_sessions) as successful_sessions by organization_name
|
||||||
|
| sort -successful_sessions 0</query>
|
||||||
|
<earliest>$time.earliest$</earliest>
|
||||||
|
<latest>$time.latest$</latest>
|
||||||
|
</search>
|
||||||
|
<option name="drilldown">none</option>
|
||||||
|
<option name="refresh.display">progressbar</option>
|
||||||
|
</table>
|
||||||
|
</panel>
|
||||||
|
<panel>
|
||||||
|
<title>Domains</title>
|
||||||
|
<table>
|
||||||
|
<search>
|
||||||
|
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$
|
||||||
|
| rename policies{}.policy_domain as policy_domain
|
||||||
|
| rename policies{}.policy_type as policy_type
|
||||||
|
| rename policies{}.failed_session_count as failed_sessions
|
||||||
|
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
|
||||||
|
| rename policies{}.successful_session_count as successful_sessions
|
||||||
|
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
|
||||||
|
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
|
||||||
|
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
|
||||||
|
| rename policies{}.failure_details{}.result_type as failure_type
|
||||||
|
| fillnull value=0 failed_sessions
|
||||||
|
| stats sum(failed_sessions) as failed_sessions sum(successful_sessions) as successful_sessions by policy_domain
|
||||||
|
| sort -successful_sessions 0</query>
|
||||||
|
<earliest>$time.earliest$</earliest>
|
||||||
|
<latest>$time.latest$</latest>
|
||||||
|
</search>
|
||||||
|
<option name="drilldown">none</option>
|
||||||
|
<option name="refresh.display">progressbar</option>
|
||||||
|
</table>
|
||||||
|
</panel>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<panel>
|
||||||
|
<title>Failure details</title>
|
||||||
|
<table>
|
||||||
|
<search>
|
||||||
|
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$ policies{}.failure_details{}.result_type=*
|
||||||
|
| rename policies{}.policy_domain as policy_domain
|
||||||
|
| rename policies{}.policy_type as policy_type
|
||||||
|
| rename policies{}.failed_session_count as failed_sessions
|
||||||
|
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
|
||||||
|
| rename policies{}.successful_session_count as successful_sessions
|
||||||
|
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
|
||||||
|
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
|
||||||
|
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
|
||||||
|
| fillnull value=0 failed_sessions
|
||||||
|
| rename policies{}.failure_details{}.result_type as failure_type
|
||||||
|
| table _time organization_name policy_domain policy_type failed_sessions successful_sessions sending_mta_ip receiving_ip receiving_mx_hostname failure_type
|
||||||
|
| sort by -_time 0</query>
|
||||||
|
<earliest>$time.earliest$</earliest>
|
||||||
|
<latest>$time.latest$</latest>
|
||||||
|
</search>
|
||||||
|
<option name="drilldown">none</option>
|
||||||
|
<option name="refresh.display">progressbar</option>
|
||||||
|
</table>
|
||||||
|
</panel>
|
||||||
|
</row>
|
||||||
|
</form>
|
||||||
39
tests.py
39
tests.py
@@ -1,3 +1,6 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import absolute_import, print_function, unicode_literals
|
from __future__ import absolute_import, print_function, unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -43,11 +46,12 @@ class Test(unittest.TestCase):
|
|||||||
|
|
||||||
def testExtractReportXMLComparator(self):
|
def testExtractReportXMLComparator(self):
|
||||||
"""Test XML comparator function"""
|
"""Test XML comparator function"""
|
||||||
print()
|
xmlnice_file = open("samples/extract_report/nice-input.xml")
|
||||||
xmlnice = open("samples/extract_report/nice-input.xml").read()
|
xmlnice = xmlnice_file.read()
|
||||||
print(xmlnice)
|
xmlnice_file.close()
|
||||||
xmlchanged = minify_xml(open("samples/extract_report/changed-input.xml").read())
|
xmlchanged_file = open("samples/extract_report/changed-input.xml")
|
||||||
print(xmlchanged)
|
xmlchanged = minify_xml(xmlchanged_file.read())
|
||||||
|
xmlchanged_file.close()
|
||||||
self.assertTrue(compare_xml(xmlnice, xmlnice))
|
self.assertTrue(compare_xml(xmlnice, xmlnice))
|
||||||
self.assertTrue(compare_xml(xmlchanged, xmlchanged))
|
self.assertTrue(compare_xml(xmlchanged, xmlchanged))
|
||||||
self.assertFalse(compare_xml(xmlnice, xmlchanged))
|
self.assertFalse(compare_xml(xmlnice, xmlchanged))
|
||||||
@@ -62,7 +66,9 @@ class Test(unittest.TestCase):
|
|||||||
data = f.read()
|
data = f.read()
|
||||||
print("Testing {0}: ".format(file), end="")
|
print("Testing {0}: ".format(file), end="")
|
||||||
xmlout = parsedmarc.extract_report(data)
|
xmlout = parsedmarc.extract_report(data)
|
||||||
xmlin = open("samples/extract_report/nice-input.xml").read()
|
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||||
|
xmlin = xmlin_file.read()
|
||||||
|
xmlin_file.close()
|
||||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||||
print("Passed!")
|
print("Passed!")
|
||||||
|
|
||||||
@@ -71,8 +77,10 @@ class Test(unittest.TestCase):
|
|||||||
print()
|
print()
|
||||||
file = "samples/extract_report/nice-input.xml"
|
file = "samples/extract_report/nice-input.xml"
|
||||||
print("Testing {0}: ".format(file), end="")
|
print("Testing {0}: ".format(file), end="")
|
||||||
xmlout = parsedmarc.extract_report(file)
|
xmlout = parsedmarc.extract_report_from_file_path(file)
|
||||||
xmlin = open("samples/extract_report/nice-input.xml").read()
|
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||||
|
xmlin = xmlin_file.read()
|
||||||
|
xmlin_file.close()
|
||||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||||
print("Passed!")
|
print("Passed!")
|
||||||
|
|
||||||
@@ -82,7 +90,9 @@ class Test(unittest.TestCase):
|
|||||||
file = "samples/extract_report/nice-input.xml.gz"
|
file = "samples/extract_report/nice-input.xml.gz"
|
||||||
print("Testing {0}: ".format(file), end="")
|
print("Testing {0}: ".format(file), end="")
|
||||||
xmlout = parsedmarc.extract_report_from_file_path(file)
|
xmlout = parsedmarc.extract_report_from_file_path(file)
|
||||||
xmlin = open("samples/extract_report/nice-input.xml").read()
|
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||||
|
xmlin = xmlin_file.read()
|
||||||
|
xmlin_file.close()
|
||||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||||
print("Passed!")
|
print("Passed!")
|
||||||
|
|
||||||
@@ -92,12 +102,13 @@ class Test(unittest.TestCase):
|
|||||||
file = "samples/extract_report/nice-input.xml.zip"
|
file = "samples/extract_report/nice-input.xml.zip"
|
||||||
print("Testing {0}: ".format(file), end="")
|
print("Testing {0}: ".format(file), end="")
|
||||||
xmlout = parsedmarc.extract_report_from_file_path(file)
|
xmlout = parsedmarc.extract_report_from_file_path(file)
|
||||||
print(xmlout)
|
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||||
xmlin = minify_xml(open("samples/extract_report/nice-input.xml").read())
|
xmlin = minify_xml(xmlin_file.read())
|
||||||
print(xmlin)
|
xmlin_file.close()
|
||||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||||
xmlin = minify_xml(open("samples/extract_report/changed-input.xml").read())
|
xmlin_file = open("samples/extract_report/changed-input.xml")
|
||||||
print(xmlin)
|
xmlin = xmlin_file.read()
|
||||||
|
xmlin_file.close()
|
||||||
self.assertFalse(compare_xml(xmlout, xmlin))
|
self.assertFalse(compare_xml(xmlout, xmlin))
|
||||||
print("Passed!")
|
print("Passed!")
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user