Compare commits

..

2 Commits

Author SHA1 Message Date
Sean Whalen
66506056ac Upgrade elasticsearch version to >=8.0.0 2025-03-22 14:15:05 -04:00
Sean Whalen
eb912ce68d Upgrade elasticsearch version from 7.4.0 to 8.17.1 2025-03-22 14:07:52 -04:00
60 changed files with 1703 additions and 7639 deletions

View File

@@ -1,72 +0,0 @@
name: Bug report
description: Report a reproducible parsedmarc bug
title: "[Bug]: "
labels:
- bug
body:
- type: input
id: version
attributes:
label: parsedmarc version
description: Include the parsedmarc version or commit if known.
placeholder: 9.x.x
validations:
required: true
- type: dropdown
id: input_backend
attributes:
label: Input backend
description: Which input path or mailbox backend is involved?
options:
- IMAP
- MS Graph
- Gmail API
- Maildir
- mbox
- Local file / direct parse
- Other
validations:
required: true
- type: textarea
id: environment
attributes:
label: Environment
description: Runtime, container image, OS, Python version, or deployment details.
placeholder: Docker on Debian, Python 3.12, parsedmarc installed from PyPI
validations:
required: true
- type: textarea
id: config
attributes:
label: Sanitized config
description: Include the relevant config fragment with secrets removed.
render: ini
- type: textarea
id: steps
attributes:
label: Steps to reproduce
description: Describe the smallest reproducible sequence you can.
placeholder: |
1. Configure parsedmarc with ...
2. Run ...
3. Observe ...
validations:
required: true
- type: textarea
id: expected_actual
attributes:
label: Expected vs actual behavior
description: What did you expect, and what happened instead?
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs or traceback
description: Paste sanitized logs or a traceback if available.
render: text
- type: textarea
id: samples
attributes:
label: Sample report availability
description: If you can share a sanitized sample report or message, note that here.

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: true
contact_links:
- name: Security issue
url: https://github.com/domainaware/parsedmarc/security/policy
about: Please use the security policy and avoid filing public issues for undisclosed vulnerabilities.

View File

@@ -1,30 +0,0 @@
name: Feature request
description: Suggest a new feature or behavior change
title: "[Feature]: "
labels:
- enhancement
body:
- type: textarea
id: problem
attributes:
label: Problem statement
description: What workflow or limitation are you trying to solve?
validations:
required: true
- type: textarea
id: proposal
attributes:
label: Proposed behavior
description: Describe the feature or behavior you want.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Alternatives considered
description: Describe workarounds or alternative approaches you considered.
- type: textarea
id: impact
attributes:
label: Compatibility or operational impact
description: Note config, output, performance, or deployment implications if relevant.

View File

@@ -1,24 +0,0 @@
## Summary
-
## Why
-
## Testing
-
## Backward Compatibility / Risk
-
## Related Issue
- Closes #
## Checklist
- [ ] Tests added or updated if behavior changed
- [ ] Docs updated if config or user-facing behavior changed

View File

@@ -24,11 +24,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@v3
with:
images: |
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
@@ -40,14 +40,16 @@ jobs:
type=semver,pattern={{major}}.{{minor}}
- name: Log in to the Container registry
uses: docker/login-action@v3
# https://github.com/docker/login-action/releases/tag/v2.0.0
uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker image
uses: docker/build-push-action@v6
# https://github.com/docker/build-push-action/releases/tag/v3.0.0
uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8
with:
context: .
push: ${{ github.event_name == 'release' }}

View File

@@ -10,67 +10,43 @@ on:
branches: [ master ]
jobs:
lint-docs-build:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.13"
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install .[build]
- name: Check code style
run: |
ruff check .
- name: Test building documentation
run: |
cd docs
make html
- name: Test building packages
run: |
hatch build
test:
needs: lint-docs-build
runs-on: ubuntu-latest
services:
elasticsearch:
image: elasticsearch:8.19.7
env:
discovery.type: single-node
cluster.name: parsedmarc-cluster
discovery.seed_hosts: elasticsearch
bootstrap.memory_lock: true
xpack.security.enabled: false
xpack.license.self_generated.type: basic
ports:
- 9200:9200
- 9300:9300
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
sudo apt-get -q update
sudo apt-get -qy install libemail-outlook-message-perl
sudo apt-get update
sudo apt-get install -y libemail-outlook-message-perl
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg
sudo apt-get install apt-transport-https
echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/8.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-8.x.list
sudo apt-get update && sudo apt-get install elasticsearch
sudo sed -i 's/xpack.security.enabled: true/xpack.security.enabled: false/' /etc/elasticsearch/elasticsearch.yml
sudo systemctl restart elasticsearch
sudo systemctl --no-pager status elasticsearch
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install .[build]
- name: Test building documentation
run: |
cd docs
make html
- name: Check code style
run: |
ruff check .
- name: Run unit tests
run: |
pytest --cov --cov-report=xml tests.py
@@ -79,7 +55,10 @@ jobs:
pip install -e .
parsedmarc --debug -c ci.ini samples/aggregate/*
parsedmarc --debug -c ci.ini samples/forensic/*
- name: Test building packages
run: |
hatch build
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}

8
.gitignore vendored
View File

@@ -106,7 +106,7 @@ ENV/
.idea/
# VS Code launch config
#.vscode/launch.json
.vscode/launch.json
# Visual Studio Code settings
#.vscode/
@@ -139,9 +139,3 @@ samples/private
parsedmarc.ini
scratch.py
parsedmarc/resources/maps/base_reverse_dns.csv
parsedmarc/resources/maps/unknown_base_reverse_dns.csv
parsedmarc/resources/maps/sus_domains.csv
parsedmarc/resources/maps/unknown_domains.txt
*.bak

45
.vscode/launch.json vendored
View File

@@ -1,45 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python Debugger: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal"
},
{
"name": "tests.py",
"type": "debugpy",
"request": "launch",
"program": "tests.py",
"console": "integratedTerminal"
},
{
"name": "sample",
"type": "debugpy",
"request": "launch",
"module": "parsedmarc.cli",
"args": ["samples/private/sample"]
},
{
"name": "sortlists.py",
"type": "debugpy",
"request": "launch",
"program": "sortlists.py",
"cwd": "${workspaceFolder}/parsedmarc/resources/maps",
"console": "integratedTerminal"
},
{
"name": "find_unknown_base_reverse_dns.py",
"type": "debugpy",
"request": "launch",
"program": "find_unknown_base_reverse_dns.py",
"cwd": "${workspaceFolder}/parsedmarc/resources/maps",
"console": "integratedTerminal"
}
]
}

284
.vscode/settings.json vendored
View File

@@ -1,166 +1,132 @@
{
"[python]": {
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.formatOnSave": true,
// Let Ruff handle lint fixes + import sorting on save
"editor.codeActionsOnSave": {
"source.fixAll.ruff": "explicit",
"source.organizeImports.ruff": "explicit"
}
},
"markdownlint.config": {
"MD024": false
},
"cSpell.words": [
"adkim",
"akamaiedge",
"amsmath",
"andrewmcgilvray",
"arcname",
"aspf",
"autoclass",
"automodule",
"backported",
"bellsouth",
"boto",
"brakhane",
"Brightmail",
"CEST",
"CHACHA",
"checkdmarc",
"Codecov",
"confnew",
"dateparser",
"dateutil",
"Davmail",
"DBIP",
"dearmor",
"deflist",
"devel",
"DMARC",
"Dmarcian",
"dnspython",
"dollarmath",
"dpkg",
"exampleuser",
"expiringdict",
"fieldlist",
"GELF",
"genindex",
"geoip",
"geoipupdate",
"Geolite",
"geolocation",
"githubpages",
"Grafana",
"hostnames",
"htpasswd",
"httpasswd",
"httplib",
"ifhost",
"IMAP",
"imapclient",
"infile",
"Interaktive",
"IPDB",
"journalctl",
"kafkaclient",
"keepalive",
"keyout",
"keyrings",
"Leeman",
"libemail",
"linkify",
"LISTSERV",
"loganalytics",
"lxml",
"mailparser",
"mailrelay",
"mailsuite",
"maxdepth",
"MAXHEADERS",
"maxmind",
"mbox",
"mfrom",
"mhdw",
"michaeldavie",
"mikesiegel",
"Mimecast",
"mitigations",
"MMDB",
"modindex",
"msgconvert",
"msgraph",
"MSSP",
"multiprocess",
"Munge",
"ndjson",
"newkey",
"Nhcm",
"nojekyll",
"nondigest",
"nosecureimap",
"nosniff",
"nwettbewerb",
"opensearch",
"opensearchpy",
"parsedmarc",
"passsword",
"pbar",
"Postorius",
"premade",
"privatesuffix",
"procs",
"publicsuffix",
"publicsuffixlist",
"publixsuffix",
"pygelf",
"pypy",
"pytest",
"quickstart",
"Reindex",
"replyto",
"reversename",
"Rollup",
"Rpdm",
"SAMEORIGIN",
"sdist",
"Servernameone",
"setuptools",
"smartquotes",
"SMTPTLS",
"sortlists",
"sortmaps",
"sourcetype",
"STARTTLS",
"tasklist",
"timespan",
"tlsa",
"tlsrpt",
"toctree",
"TQDDM",
"tqdm",
"truststore",
"Übersicht",
"uids",
"Uncategorized",
"unparasable",
"uper",
"urllib",
"Valimail",
"venv",
"Vhcw",
"viewcode",
"virtualenv",
"WBITS",
"webmail",
"Wettbewerber",
"Whalen",
"whitespaces",
"xennn",
"xmltodict",
"xpack",
"zscholl"
"adkim",
"akamaiedge",
"amsmath",
"andrewmcgilvray",
"arcname",
"aspf",
"autoclass",
"automodule",
"backported",
"bellsouth",
"brakhane",
"Brightmail",
"CEST",
"CHACHA",
"checkdmarc",
"Codecov",
"confnew",
"dateparser",
"dateutil",
"Davmail",
"DBIP",
"dearmor",
"deflist",
"devel",
"DMARC",
"Dmarcian",
"dnspython",
"dollarmath",
"dpkg",
"exampleuser",
"expiringdict",
"fieldlist",
"genindex",
"geoipupdate",
"Geolite",
"geolocation",
"githubpages",
"Grafana",
"hostnames",
"htpasswd",
"httpasswd",
"IMAP",
"Interaktive",
"IPDB",
"journalctl",
"keepalive",
"keyout",
"keyrings",
"Leeman",
"libemail",
"linkify",
"LISTSERV",
"lxml",
"mailparser",
"mailrelay",
"mailsuite",
"maxdepth",
"maxmind",
"mbox",
"mfrom",
"michaeldavie",
"mikesiegel",
"mitigations",
"MMDB",
"modindex",
"msgconvert",
"msgraph",
"MSSP",
"Munge",
"ndjson",
"newkey",
"Nhcm",
"nojekyll",
"nondigest",
"nosecureimap",
"nosniff",
"nwettbewerb",
"parsedmarc",
"passsword",
"Postorius",
"premade",
"procs",
"publicsuffix",
"publixsuffix",
"pypy",
"quickstart",
"Reindex",
"replyto",
"reversename",
"Rollup",
"Rpdm",
"SAMEORIGIN",
"Servernameone",
"setuptools",
"smartquotes",
"SMTPTLS",
"sourcetype",
"STARTTLS",
"tasklist",
"timespan",
"tlsa",
"tlsrpt",
"toctree",
"TQDDM",
"tqdm",
"truststore",
"Übersicht",
"uids",
"unparasable",
"uper",
"urllib",
"Valimail",
"venv",
"Vhcw",
"viewcode",
"virtualenv",
"WBITS",
"webmail",
"Wettbewerber",
"Whalen",
"whitespaces",
"xennn",
"xmltodict",
"xpack",
"zscholl"
],
}

View File

@@ -1,64 +0,0 @@
# AGENTS.md
This file provides guidance to AI agents when working with code in this repository.
## Project Overview
parsedmarc is a Python module and CLI utility for parsing DMARC aggregate (RUA), forensic (RUF), and SMTP TLS reports. It reads reports from IMAP, Microsoft Graph, Gmail API, Maildir, mbox files, or direct file paths, and outputs to JSON/CSV, Elasticsearch, OpenSearch, Splunk, Kafka, S3, Azure Log Analytics, syslog, or webhooks.
## Common Commands
```bash
# Install with dev/build dependencies
pip install .[build]
# Run all tests with coverage
pytest --cov --cov-report=xml tests.py
# Run a single test
pytest tests.py::Test::testAggregateSamples
# Lint and format
ruff check .
ruff format .
# Test CLI with sample reports
parsedmarc --debug -c ci.ini samples/aggregate/*
parsedmarc --debug -c ci.ini samples/forensic/*
# Build docs
cd docs && make html
# Build distribution
hatch build
```
To skip DNS lookups during testing, set `GITHUB_ACTIONS=true`.
## Architecture
**Data flow:** Input sources → CLI (`cli.py:_main`) → Parse (`__init__.py`) → Enrich (DNS/GeoIP via `utils.py`) → Output integrations
### Key modules
- `parsedmarc/__init__.py` — Core parsing logic. Main functions: `parse_report_file()`, `parse_report_email()`, `parse_aggregate_report_xml()`, `parse_forensic_report()`, `parse_smtp_tls_report_json()`, `get_dmarc_reports_from_mailbox()`, `watch_inbox()`
- `parsedmarc/cli.py` — CLI entry point (`_main`), config file parsing, output orchestration
- `parsedmarc/types.py` — TypedDict definitions for all report types (`AggregateReport`, `ForensicReport`, `SMTPTLSReport`, `ParsingResults`)
- `parsedmarc/utils.py` — IP/DNS/GeoIP enrichment, base64 decoding, compression handling
- `parsedmarc/mail/` — Polymorphic mail connections: `IMAPConnection`, `GmailConnection`, `MSGraphConnection`, `MaildirConnection`
- `parsedmarc/{elastic,opensearch,splunk,kafkaclient,loganalytics,syslog,s3,webhook,gelf}.py` — Output integrations
### Report type system
`ReportType = Literal["aggregate", "forensic", "smtp_tls"]`. Exception hierarchy: `ParserError``InvalidDMARCReport``InvalidAggregateReport`/`InvalidForensicReport`, and `InvalidSMTPTLSReport`.
### Caching
IP address info cached for 4 hours, seen aggregate report IDs cached for 1 hour (via `ExpiringDict`).
## Code Style
- Ruff for formatting and linting (configured in `.vscode/settings.json`)
- TypedDict for structured data, type hints throughout
- Python ≥3.10 required
- Tests are in a single `tests.py` file using unittest; sample reports live in `samples/`

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +0,0 @@
# CLAUD.md
@AGENTS.md

View File

@@ -1,78 +0,0 @@
# Contributing
Thanks for contributing to parsedmarc.
## Local setup
Use a virtual environment for local development.
```bash
python3 -m venv .venv
. .venv/bin/activate
python -m pip install --upgrade pip
pip install .[build]
```
## Before opening a pull request
Run the checks that match your change:
```bash
ruff check .
pytest --cov --cov-report=xml tests.py
```
If you changed documentation:
```bash
cd docs
make html
```
If you changed CLI behavior or parsing logic, it is also useful to exercise the
sample reports:
```bash
parsedmarc --debug -c ci.ini samples/aggregate/*
parsedmarc --debug -c ci.ini samples/forensic/*
```
To skip DNS lookups during tests, set:
```bash
GITHUB_ACTIONS=true
```
## Pull request guidelines
- Keep pull requests small and focused. Separate bug fixes, docs updates, and
repo-maintenance changes where practical.
- Add or update tests when behavior changes.
- Update docs when configuration or user-facing behavior changes.
- Include a short summary, the reason for the change, and the testing you ran.
- Link the related issue when there is one.
## Branch maintenance
Upstream `master` may move quickly. Before asking for review or after another PR
lands, rebase your branch onto the current upstream branch and force-push with
lease if needed:
```bash
git fetch upstream
git rebase upstream/master
git push --force-with-lease
```
## CI and coverage
GitHub Actions is the source of truth for linting, docs, and test status.
Codecov patch coverage is usually the most relevant signal for small PRs. Project
coverage can be noisier when the base comparison is stale, so interpret it in
the context of the actual diff.
## Questions
Use GitHub issues for bugs and feature requests. If you are not sure whether a
change is wanted, opening an issue first is usually the safest path.

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=python:3.13-slim
ARG BASE_IMAGE=python:3.9-slim
ARG USERNAME=parsedmarc
ARG USER_UID=1000
ARG USER_GID=$USER_UID

View File

@@ -9,7 +9,7 @@ Package](https://img.shields.io/pypi/v/parsedmarc.svg)](https://pypi.org/project
[![PyPI - Downloads](https://img.shields.io/pypi/dm/parsedmarc?color=blue)](https://pypistats.org/packages/parsedmarc)
<p align="center">
<img src="https://raw.githubusercontent.com/domainaware/parsedmarc/refs/heads/master/docs/source/_static/screenshots/dmarc-summary-charts.png?raw=true" alt="A screenshot of DMARC summary charts in Kibana"/>
<img src="https://github.com/domainaware/parsedmarc/raw/master/docs/source/_static/screenshots/dmarc-summary-charts.png?raw=true" alt="A screenshot of DMARC summary charts in Kibana"/>
</p>
`parsedmarc` is a Python module and CLI utility for parsing DMARC
@@ -23,42 +23,25 @@ ProofPoint Email Fraud Defense, and Valimail.
## Help Wanted
This project is maintained by one developer. Please consider reviewing the open
[issues](https://github.com/domainaware/parsedmarc/issues) to see how you can
contribute code, documentation, or user support. Assistance on the pinned
issues would be particularly helpful.
This project is maintained by one developer. Please consider
reviewing the open
[issues](https://github.com/domainaware/parsedmarc/issues) to see how
you can contribute code, documentation, or user support. Assistance on
the pinned issues would be particularly helpful.
Thanks to all
[contributors](https://github.com/domainaware/parsedmarc/graphs/contributors)!
## Features
- Parses draft and 1.0 standard aggregate/rua DMARC reports
- Parses forensic/failure/ruf DMARC reports
- Parses reports from SMTP TLS Reporting
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail API
- Parses draft and 1.0 standard aggregate/rua reports
- Parses forensic/failure/ruf reports
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail
API
- Transparently handles gzip or zip compressed reports
- Consistent data structures
- Simple JSON and/or CSV output
- Optionally email the results
- Optionally send the results to Elasticsearch, Opensearch, and/or Splunk, for
use with premade dashboards
- Optionally send the results to Elasticsearch, Opensearch, and/or Splunk, for use
with premade dashboards
- Optionally send reports to Apache Kafka
## Python Compatibility
This project supports the following Python versions, which are either actively maintained or are the default versions
for RHEL or Debian.
| Version | Supported | Reason |
|---------|-----------|------------------------------------------------------------|
| < 3.6 | ❌ | End of Life (EOL) |
| 3.6 | ❌ | Used in RHEL 8, but not supported by project dependencies |
| 3.7 | ❌ | End of Life (EOL) |
| 3.8 | ❌ | End of Life (EOL) |
| 3.9 | ❌ | Used in Debian 11 and RHEL 9, but not supported by project dependencies |
| 3.10 | ✅ | Actively maintained |
| 3.11 | ✅ | Actively maintained; supported until June 2028 (Debian 12) |
| 3.12 | ✅ | Actively maintained; supported until May 2035 (RHEL 10) |
| 3.13 | ✅ | Actively maintained; supported until June 2030 (Debian 13) |
| 3.14 | ✅ | Supported (requires `imapclient>=3.1.0`) |

View File

@@ -1,29 +0,0 @@
# Security Policy
## Reporting a vulnerability
Please do not open a public GitHub issue for an undisclosed security
vulnerability. Use GitHub private vulnerability reporting in the Security tab of this project instead.
When reporting a vulnerability, include:
- the affected parsedmarc version or commit
- the component or integration involved
- clear reproduction details if available
- potential impact
- any suggested mitigation or workaround
## Supported versions
Security fixes will be applied to the latest released version and
the current `master` branch.
Older versions will not receive backported fixes.
## Disclosure process
After a report is received, maintainers can validate the issue, assess impact,
and coordinate a fix before public disclosure.
Please avoid publishing proof-of-concept details until maintainers have had a
reasonable opportunity to investigate and release a fix or mitigation.

View File

@@ -9,19 +9,16 @@ fi
. venv/bin/activate
pip install .[build]
ruff format .
ruff check .
cd docs
make clean
make html
touch build/html/.nojekyll
if [ -d "../../parsedmarc-docs" ]; then
if [ -d "./../parsedmarc-docs" ]; then
cp -rf build/html/* ../../parsedmarc-docs/
fi
cd ..
cd parsedmarc/resources/maps
python3 sortlists.py
echo "Checking for invalid UTF-8 bytes in base_reverse_dns_map.csv"
python3 find_bad_utf8.py base_reverse_dns_map.csv
cd ../../..
./sortmaps.py
python3 tests.py
rm -rf dist/ build/
hatch build
hatch build

1
ci.ini
View File

@@ -3,7 +3,6 @@ save_aggregate = True
save_forensic = True
save_smtp_tls = True
debug = True
offline = True
[elasticsearch]
hosts = http://localhost:9200

View File

@@ -1,11 +0,0 @@
codecov:
require_ci_to_pass: true
coverage:
status:
project:
default:
informational: true
patch:
default:
informational: false

View File

@@ -1,6 +1,8 @@
version: '3.7'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.19.7
image: docker.elastic.co/elasticsearch/elasticsearch:8.3.1
environment:
- network.host=127.0.0.1
- http.host=0.0.0.0
@@ -12,7 +14,7 @@ services:
- xpack.security.enabled=false
- xpack.license.self_generated.type=basic
ports:
- "127.0.0.1:9200:9200"
- 127.0.0.1:9200:9200
ulimits:
memlock:
soft: -1
@@ -28,7 +30,7 @@ services:
retries: 24
opensearch:
image: opensearchproject/opensearch:2
image: opensearchproject/opensearch:2.18.0
environment:
- network.host=127.0.0.1
- http.host=0.0.0.0
@@ -39,7 +41,7 @@ services:
- bootstrap.memory_lock=true
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD}
ports:
- "127.0.0.1:9201:9200"
- 127.0.0.1:9201:9200
ulimits:
memlock:
soft: -1

View File

@@ -21,6 +21,7 @@
:members:
```
## parsedmarc.splunk
```{eval-rst}
@@ -28,13 +29,6 @@
:members:
```
## parsedmarc.types
```{eval-rst}
.. automodule:: parsedmarc.types
:members:
```
## parsedmarc.utils
```{eval-rst}

View File

@@ -20,7 +20,7 @@ from parsedmarc import __version__
# -- Project information -----------------------------------------------------
project = "parsedmarc"
copyright = "2018 - 2025, Sean Whalen and contributors"
copyright = "2018 - 2023, Sean Whalen and contributors"
author = "Sean Whalen and contributors"
# The version info for the project you're documenting, acts as replacement for

View File

@@ -33,36 +33,17 @@ and Valimail.
## Features
- Parses draft and 1.0 standard aggregate/rua DMARC reports
- Parses forensic/failure/ruf DMARC reports
- Parses reports from SMTP TLS Reporting
- Parses draft and 1.0 standard aggregate/rua reports
- Parses forensic/failure/ruf reports
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail API
- Transparently handles gzip or zip compressed reports
- Consistent data structures
- Simple JSON and/or CSV output
- Optionally email the results
- Optionally send the results to Elasticsearch, Opensearch, and/or Splunk, for use
with premade dashboards
- Optionally send the results to Elasticsearch/OpenSearch and/or Splunk, for use with
premade dashboards
- Optionally send reports to Apache Kafka
## Python Compatibility
This project supports the following Python versions, which are either actively maintained or are the default versions
for RHEL or Debian.
| Version | Supported | Reason |
|---------|-----------|------------------------------------------------------------|
| < 3.6 | ❌ | End of Life (EOL) |
| 3.6 | ❌ | Used in RHEL 8, but not supported by project dependencies |
| 3.7 | ❌ | End of Life (EOL) |
| 3.8 | ❌ | End of Life (EOL) |
| 3.9 | ❌ | Used in Debian 11 and RHEL 9, but not supported by project dependencies |
| 3.10 | ✅ | Actively maintained |
| 3.11 | ✅ | Actively maintained; supported until June 2028 (Debian 12) |
| 3.12 | ✅ | Actively maintained; supported until May 2035 (RHEL 10) |
| 3.13 | ✅ | Actively maintained; supported until June 2030 (Debian 13) |
| 3.14 | ✅ | Supported (requires `imapclient>=3.1.0`) |
```{toctree}
:caption: 'Contents'
:maxdepth: 2

View File

@@ -162,10 +162,10 @@ sudo -u parsedmarc virtualenv /opt/parsedmarc/venv
```
CentOS/RHEL 8 systems use Python 3.6 by default, so on those systems
explicitly tell `virtualenv` to use `python3.10` instead
explicitly tell `virtualenv` to use `python3.9` instead
```bash
sudo -u parsedmarc virtualenv -p python3.10 /opt/parsedmarc/venv
sudo -u parsedmarc virtualenv -p python3.9 /opt/parsedmarc/venv
```
Activate the virtualenv
@@ -199,7 +199,7 @@ sudo apt-get install libemail-outlook-message-perl
[geoipupdate releases page on github]: https://github.com/maxmind/geoipupdate/releases
[ip to country lite database]: https://db-ip.com/db/download/ip-to-country-lite
[license keys]: https://www.maxmind.com/en/accounts/current/license-key
[maxmind geoipupdate page]: https://dev.maxmind.com/geoip/updating-databases/
[maxmind geoipupdate page]: https://dev.maxmind.com/geoip/geoipupdate/
[maxmind geolite2 country database]: https://dev.maxmind.com/geoip/geolite2-free-geolocation-data
[registering for a free geolite2 account]: https://www.maxmind.com/en/geolite2/signup
[to comply with various privacy regulations]: https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/

View File

@@ -23,8 +23,6 @@ of the report schema.
"report_id": "9391651994964116463",
"begin_date": "2012-04-27 20:00:00",
"end_date": "2012-04-28 19:59:59",
"timespan_requires_normalization": false,
"original_timespan_seconds": 86399,
"errors": []
},
"policy_published": {
@@ -41,10 +39,8 @@ of the report schema.
"source": {
"ip_address": "72.150.241.94",
"country": "US",
"reverse_dns": null,
"base_domain": null,
"name": null,
"type": null
"reverse_dns": "adsl-72-150-241-94.shv.bellsouth.net",
"base_domain": "bellsouth.net"
},
"count": 2,
"alignment": {
@@ -78,10 +74,7 @@ of the report schema.
"result": "pass"
}
]
},
"normalized_timespan": false,
"interval_begin": "2012-04-28 00:00:00",
"interval_end": "2012-04-28 23:59:59"
}
}
]
}
@@ -90,10 +83,8 @@ of the report schema.
### CSV aggregate report
```text
xml_schema,org_name,org_email,org_extra_contact_info,report_id,begin_date,end_date,normalized_timespan,errors,domain,adkim,aspf,p,sp,pct,fo,source_ip_address,source_country,source_reverse_dns,source_base_domain,source_name,source_type,count,spf_aligned,dkim_aligned,dmarc_aligned,disposition,policy_override_reasons,policy_override_comments,envelope_from,header_from,envelope_to,dkim_domains,dkim_selectors,dkim_results,spf_domains,spf_scopes,spf_results
draft,acme.com,noreply-dmarc-support@acme.com,http://acme.com/dmarc/support,9391651994964116463,2012-04-28 00:00:00,2012-04-28 23:59:59,False,,example.com,r,r,none,none,100,0,72.150.241.94,US,,,,,2,True,False,True,none,,,example.com,example.com,,example.com,none,fail,example.com,mfrom,pass
draft,acme.com,noreply-dmarc-support@acme.com,http://acme.com/dmarc/support,9391651994964116463,2012-04-28 00:00:00,2012-04-28 23:59:59,False,,example.com,r,r,none,none,100,0,72.150.241.94,US,,,,,2,True,False,True,none,,,example.com,example.com,,example.com,none,fail,example.com,mfrom,pass
xml_schema,org_name,org_email,org_extra_contact_info,report_id,begin_date,end_date,errors,domain,adkim,aspf,p,sp,pct,fo,source_ip_address,source_country,source_reverse_dns,source_base_domain,count,spf_aligned,dkim_aligned,dmarc_aligned,disposition,policy_override_reasons,policy_override_comments,envelope_from,header_from,envelope_to,dkim_domains,dkim_selectors,dkim_results,spf_domains,spf_scopes,spf_results
draft,acme.com,noreply-dmarc-support@acme.com,http://acme.com/dmarc/support,9391651994964116463,2012-04-27 20:00:00,2012-04-28 19:59:59,,example.com,r,r,none,none,100,0,72.150.241.94,US,adsl-72-150-241-94.shv.bellsouth.net,bellsouth.net,2,True,False,True,none,,,example.com,example.com,,example.com,none,fail,example.com,mfrom,pass
```
## Sample forensic report output

View File

@@ -4,50 +4,47 @@
```text
usage: parsedmarc [-h] [-c CONFIG_FILE] [--strip-attachment-payloads] [-o OUTPUT]
[--aggregate-json-filename AGGREGATE_JSON_FILENAME] [--forensic-json-filename FORENSIC_JSON_FILENAME]
[--smtp-tls-json-filename SMTP_TLS_JSON_FILENAME] [--aggregate-csv-filename AGGREGATE_CSV_FILENAME]
[--forensic-csv-filename FORENSIC_CSV_FILENAME] [--smtp-tls-csv-filename SMTP_TLS_CSV_FILENAME]
[-n NAMESERVERS [NAMESERVERS ...]] [-t DNS_TIMEOUT] [--offline] [-s] [-w] [--verbose] [--debug]
[--log-file LOG_FILE] [--no-prettify-json] [-v]
[file_path ...]
[--aggregate-json-filename AGGREGATE_JSON_FILENAME]
[--forensic-json-filename FORENSIC_JSON_FILENAME]
[--aggregate-csv-filename AGGREGATE_CSV_FILENAME]
[--forensic-csv-filename FORENSIC_CSV_FILENAME]
[-n NAMESERVERS [NAMESERVERS ...]] [-t DNS_TIMEOUT] [--offline]
[-s] [--verbose] [--debug] [--log-file LOG_FILE] [-v]
[file_path ...]
Parses DMARC reports
Parses DMARC reports
positional arguments:
file_path one or more paths to aggregate or forensic report files, emails, or mbox files'
positional arguments:
file_path one or more paths to aggregate or forensic report
files, emails, or mbox files'
options:
-h, --help show this help message and exit
-c CONFIG_FILE, --config-file CONFIG_FILE
a path to a configuration file (--silent implied)
--strip-attachment-payloads
remove attachment payloads from forensic report output
-o OUTPUT, --output OUTPUT
write output files to the given directory
--aggregate-json-filename AGGREGATE_JSON_FILENAME
filename for the aggregate JSON output file
--forensic-json-filename FORENSIC_JSON_FILENAME
filename for the forensic JSON output file
--smtp-tls-json-filename SMTP_TLS_JSON_FILENAME
filename for the SMTP TLS JSON output file
--aggregate-csv-filename AGGREGATE_CSV_FILENAME
filename for the aggregate CSV output file
--forensic-csv-filename FORENSIC_CSV_FILENAME
filename for the forensic CSV output file
--smtp-tls-csv-filename SMTP_TLS_CSV_FILENAME
filename for the SMTP TLS CSV output file
-n NAMESERVERS [NAMESERVERS ...], --nameservers NAMESERVERS [NAMESERVERS ...]
nameservers to query
-t DNS_TIMEOUT, --dns_timeout DNS_TIMEOUT
number of seconds to wait for an answer from DNS (default: 2.0)
--offline do not make online queries for geolocation or DNS
-s, --silent only print errors
-w, --warnings print warnings in addition to errors
--verbose more verbose output
--debug print debugging information
--log-file LOG_FILE output logging to a file
--no-prettify-json output JSON in a single line without indentation
-v, --version show program's version number and exit
optional arguments:
-h, --help show this help message and exit
-c CONFIG_FILE, --config-file CONFIG_FILE
a path to a configuration file (--silent implied)
--strip-attachment-payloads
remove attachment payloads from forensic report output
-o OUTPUT, --output OUTPUT
write output files to the given directory
--aggregate-json-filename AGGREGATE_JSON_FILENAME
filename for the aggregate JSON output file
--forensic-json-filename FORENSIC_JSON_FILENAME
filename for the forensic JSON output file
--aggregate-csv-filename AGGREGATE_CSV_FILENAME
filename for the aggregate CSV output file
--forensic-csv-filename FORENSIC_CSV_FILENAME
filename for the forensic CSV output file
-n NAMESERVERS [NAMESERVERS ...], --nameservers NAMESERVERS [NAMESERVERS ...]
nameservers to query
-t DNS_TIMEOUT, --dns_timeout DNS_TIMEOUT
number of seconds to wait for an answer from DNS
(default: 2.0)
--offline do not make online queries for geolocation or DNS
-s, --silent only print errors and warnings
--verbose more verbose output
--debug print debugging information
--log-file LOG_FILE output logging to a file
-v, --version show program's version number and exit
```
:::{note}
@@ -123,10 +120,8 @@ The full set of configuration options are:
Elasticsearch, Splunk and/or S3
- `save_smtp_tls` - bool: Save SMTP-STS report data to
Elasticsearch, Splunk and/or S3
- `index_prefix_domain_map` - bool: A path mapping of Opensearch/Elasticsearch index prefixes to domain names
- `strip_attachment_payloads` - bool: Remove attachment
payloads from results
- `silent` - bool: Set this to `False` to output results to STDOUT
- `output` - str: Directory to place JSON and CSV files in. This is required if you set either of the JSON output file options.
- `aggregate_json_filename` - str: filename for the aggregate
JSON output file
@@ -146,9 +141,6 @@ The full set of configuration options are:
- `dns_timeout` - float: DNS timeout period
- `debug` - bool: Print debugging messages
- `silent` - bool: Only print errors (Default: `True`)
- `fail_on_output_error` - bool: Exit with a non-zero status code if
any configured output destination fails while saving/publishing
reports (Default: `False`)
- `log_file` - str: Write log messages to a file at this path
- `n_procs` - int: Number of process to run in parallel when
parsing in CLI mode (Default: `1`)
@@ -174,8 +166,8 @@ The full set of configuration options are:
- `check_timeout` - int: Number of seconds to wait for a IMAP
IDLE response or the number of seconds until the next
mail check (Default: `30`)
- `since` - str: Search for messages since certain time. (Examples: `5m|3h|2d|1w`)
Acceptable units - {"m":"minutes", "h":"hours", "d":"days", "w":"weeks"}.
- `since` - str: Search for messages since certain time. (Examples: `5m|3h|2d|1w`)
Acceptable units - {"m":"minutes", "h":"hours", "d":"days", "w":"weeks"}).
Defaults to `1d` if incorrect value is provided.
- `imap`
- `host` - str: The IMAP server hostname or IP address
@@ -203,7 +195,7 @@ The full set of configuration options are:
- `password` - str: The IMAP password
- `msgraph`
- `auth_method` - str: Authentication method, valid types are
`UsernamePassword`, `DeviceCode`, `ClientSecret`, or `Certificate`
`UsernamePassword`, `DeviceCode`, or `ClientSecret`
(Default: `UsernamePassword`).
- `user` - str: The M365 user, required when the auth method is
UsernamePassword
@@ -211,11 +203,6 @@ The full set of configuration options are:
method is UsernamePassword
- `client_id` - str: The app registration's client ID
- `client_secret` - str: The app registration's secret
- `certificate_path` - str: Path to a PEM or PKCS12 certificate
including the private key. Required when the auth method is
`Certificate`
- `certificate_password` - str: Optional password for the
certificate file when using `Certificate` auth
- `tenant_id` - str: The Azure AD tenant ID. This is required
for all auth methods except UsernamePassword.
- `mailbox` - str: The mailbox name. This defaults to the
@@ -248,14 +235,11 @@ The full set of configuration options are:
group and use that as the group id.
```powershell
New-ApplicationAccessPolicy -AccessRight RestrictAccess
New-ApplicationAccessPolicy -AccessRight RestrictAccess
-AppId "<CLIENT_ID>" -PolicyScopeGroupId "<MAILBOX>"
-Description "Restrict access to dmarc reports mailbox."
```
The same application permission and mailbox scoping guidance
applies to the `Certificate` auth method.
:::
- `elasticsearch`
- `hosts` - str: A comma separated list of hostnames and ports
@@ -268,7 +252,7 @@ The full set of configuration options are:
:::
- `user` - str: Basic auth username
- `password` - str: Basic auth password
- `api_key` - str: API key
- `apiKey` - str: API key
- `ssl` - bool: Use an encrypted SSL/TLS connection
(Default: `True`)
- `timeout` - float: Timeout in seconds (Default: 60)
@@ -291,11 +275,7 @@ The full set of configuration options are:
:::
- `user` - str: Basic auth username
- `password` - str: Basic auth password
- `api_key` - str: API key
- `auth_type` - str: Authentication type: `basic` (default) or `awssigv4` (the key `authentication_type` is accepted as an alias for this option)
- `aws_region` - str: AWS region for SigV4 authentication
(required when `auth_type = awssigv4`)
- `aws_service` - str: AWS service for SigV4 signing (Default: `es`)
- `apiKey` - str: API key
- `ssl` - bool: Use an encrypted SSL/TLS connection
(Default: `True`)
- `timeout` - float: Timeout in seconds (Default: 60)
@@ -351,77 +331,16 @@ The full set of configuration options are:
- `secret_access_key` - str: The secret access key (Optional)
- `syslog`
- `server` - str: The Syslog server name or IP address
- `port` - int: The port to use (Default: `514`)
- `protocol` - str: The protocol to use: `udp`, `tcp`, or `tls` (Default: `udp`)
- `cafile_path` - str: Path to CA certificate file for TLS server verification (Optional)
- `certfile_path` - str: Path to client certificate file for TLS authentication (Optional)
- `keyfile_path` - str: Path to client private key file for TLS authentication (Optional)
- `timeout` - float: Connection timeout in seconds for TCP/TLS (Default: `5.0`)
- `retry_attempts` - int: Number of retry attempts for failed connections (Default: `3`)
- `retry_delay` - int: Delay in seconds between retry attempts (Default: `5`)
**Example UDP configuration (default):**
```ini
[syslog]
server = syslog.example.com
port = 514
```
**Example TCP configuration:**
```ini
[syslog]
server = syslog.example.com
port = 6514
protocol = tcp
timeout = 10.0
retry_attempts = 5
```
**Example TLS configuration with server verification:**
```ini
[syslog]
server = syslog.example.com
port = 6514
protocol = tls
cafile_path = /path/to/ca-cert.pem
timeout = 10.0
```
**Example TLS configuration with mutual authentication:**
```ini
[syslog]
server = syslog.example.com
port = 6514
protocol = tls
cafile_path = /path/to/ca-cert.pem
certfile_path = /path/to/client-cert.pem
keyfile_path = /path/to/client-key.pem
timeout = 10.0
retry_attempts = 3
retry_delay = 5
```
- `port` - int: The UDP port to use (Default: `514`)
- `gmail_api`
- `credentials_file` - str: Path to file containing the
credentials, None to disable (Default: `None`)
- `token_file` - str: Path to save the token file
(Default: `.token`)
- `auth_mode` - str: Authentication mode, `installed_app` (default)
or `service_account`
- `service_account_user` - str: Delegated mailbox user for Gmail
service account auth (required for domain-wide delegation). Also
accepted as `delegated_user` for backward compatibility.
:::{note}
credentials_file and token_file can be got with [quickstart](https://developers.google.com/gmail/api/quickstart/python).Please change the scope to `https://www.googleapis.com/auth/gmail.modify`.
:::
:::{note}
When `auth_mode = service_account`, `credentials_file` must point to a
Google service account key JSON file, and `token_file` is not used.
:::
- `include_spam_trash` - bool: Include messages in Spam and
Trash when searching reports (Default: `False`)
- `scopes` - str: Comma separated list of scopes to use when
@@ -450,7 +369,7 @@ The full set of configuration options are:
- `mode` - str: The GELF transport type to use. Valid modes: `tcp`, `udp`, `tls`
- `maildir`
- `maildir_path` - str: Full path for mailbox maidir location (Default: `INBOX`)
- `reports_folder` - str: Full path for mailbox maidir location (Default: `INBOX`)
- `maildir_create` - bool: Create maildir if not present (Default: False)
- `webhook` - Post the individual reports to a webhook url with the report as the JSON body
@@ -518,7 +437,7 @@ Update the limit to 2k per example:
PUT _cluster/settings
{
"persistent" : {
"cluster.max_shards_per_node" : 2000
"cluster.max_shards_per_node" : 2000
}
}
```
@@ -526,55 +445,6 @@ PUT _cluster/settings
Increasing this value increases resource usage.
:::
## Performance tuning
For large mailbox imports or backfills, parsedmarc can consume a noticeable amount
of memory, especially when it runs on the same host as Elasticsearch or
OpenSearch. The following settings can reduce peak memory usage and make long
imports more predictable:
- Reduce `mailbox.batch_size` to smaller values such as `100-500` instead of
processing a very large message set at once. Smaller batches trade throughput
for lower peak memory use and less sink pressure.
- Keep `n_procs` low for mailbox-heavy runs. In practice, `1-2` workers is often
a safer starting point for large backfills than aggressive parallelism.
- Use `mailbox.since` to process reports in smaller time windows such as `1d`,
`7d`, or another interval that fits the backlog. This makes it easier to catch
up incrementally instead of loading an entire mailbox history in one run.
- Set `strip_attachment_payloads = True` when forensic reports contain large
attachments and you do not need to retain the raw payloads in the parsed
output.
- Prefer running parsedmarc separately from Elasticsearch or OpenSearch, or
reserve enough RAM for both services if they must share a host.
- For very large imports, prefer incremental supervised runs, such as a
scheduler or systemd service, over infrequent massive backfills.
These are operational tuning recommendations rather than hard requirements, but
they are often enough to avoid memory pressure and reduce failures during
high-volume mailbox processing.
## Multi-tenant support
Starting in `8.19.0`, ParseDMARC provides multi-tenant support by placing data into separate OpenSearch or Elasticsearch index prefixes. To set this up, create a YAML file that is formatted where each key is a tenant name, and the value is a list of domains related to that tenant, not including subdomains, like this:
```yaml
example:
- example.com
- example.net
- example.org
whalensolutions:
- whalensolutions.com
```
Save it to disk where the user running ParseDMARC can read it, then set `index_prefix_domain_map` to that filepath in the `[general]` section of the ParseDMARC configuration file and do not set an `index_prefix` option in the `[elasticsearch]` or `[opensearch]` sections.
When configured correctly, if ParseDMARC finds that a report is related to a domain in the mapping, the report will be saved in an index name that has the tenant name prefixed to it with a trailing underscore. Then, you can use the security features of Opensearch or the ELK stack to only grant users access to the indexes that they need.
:::{note}
A domain cannot be used in multiple tenant lists. Only the first prefix list that contains the matching domain is used.
:::
## Running parsedmarc as a systemd service
Use systemd to run `parsedmarc` as a service and process reports as

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +0,0 @@
__version__ = "9.2.0"
USER_AGENT = f"parsedmarc/{__version__}"

View File

@@ -1,29 +1,27 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections import OrderedDict
from typing import Any, Optional, Union
from elasticsearch.helpers import reindex
from elasticsearch_dsl.search import Q
from elasticsearch_dsl import (
Boolean,
Date,
connections,
Object,
Document,
Index,
Nested,
InnerDoc,
Integer,
Ip,
Nested,
Object,
Search,
Text,
connections,
Boolean,
Ip,
Date,
Search,
)
from elasticsearch_dsl.search import Q
from elasticsearch.helpers import reindex
from parsedmarc import InvalidForensicReport
from parsedmarc.log import logger
from parsedmarc.utils import human_timestamp_to_datetime
from parsedmarc import InvalidForensicReport
class ElasticsearchError(Exception):
@@ -69,8 +67,6 @@ class _AggregateReportDoc(Document):
date_range = Date()
date_begin = Date()
date_end = Date()
normalized_timespan = Boolean()
original_timespan_seconds = Integer
errors = Text()
published_policy = Object(_PublishedPolicy)
source_ip_address = Ip()
@@ -91,18 +87,18 @@ class _AggregateReportDoc(Document):
dkim_results = Nested(_DKIMResult)
spf_results = Nested(_SPFResult)
def add_policy_override(self, type_: str, comment: str):
self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment)) # pyright: ignore[reportCallIssue]
def add_policy_override(self, type_, comment):
self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment))
def add_dkim_result(self, domain: str, selector: str, result: _DKIMResult):
def add_dkim_result(self, domain, selector, result):
self.dkim_results.append(
_DKIMResult(domain=domain, selector=selector, result=result)
) # pyright: ignore[reportCallIssue]
)
def add_spf_result(self, domain: str, scope: str, result: _SPFResult):
self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result)) # pyright: ignore[reportCallIssue]
def add_spf_result(self, domain, scope, result):
self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result))
def save(self, **kwargs): # pyright: ignore[reportIncompatibleMethodOverride]
def save(self, **kwargs):
self.passed_dmarc = False
self.passed_dmarc = self.spf_aligned or self.dkim_aligned
@@ -135,26 +131,26 @@ class _ForensicSampleDoc(InnerDoc):
body = Text()
attachments = Nested(_EmailAttachmentDoc)
def add_to(self, display_name: str, address: str):
self.to.append(_EmailAddressDoc(display_name=display_name, address=address)) # pyright: ignore[reportCallIssue]
def add_to(self, display_name, address):
self.to.append(_EmailAddressDoc(display_name=display_name, address=address))
def add_reply_to(self, display_name: str, address: str):
def add_reply_to(self, display_name, address):
self.reply_to.append(
_EmailAddressDoc(display_name=display_name, address=address)
) # pyright: ignore[reportCallIssue]
)
def add_cc(self, display_name: str, address: str):
self.cc.append(_EmailAddressDoc(display_name=display_name, address=address)) # pyright: ignore[reportCallIssue]
def add_cc(self, display_name, address):
self.cc.append(_EmailAddressDoc(display_name=display_name, address=address))
def add_bcc(self, display_name: str, address: str):
self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address)) # pyright: ignore[reportCallIssue]
def add_bcc(self, display_name, address):
self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address))
def add_attachment(self, filename: str, content_type: str, sha256: str):
def add_attachment(self, filename, content_type, sha256):
self.attachments.append(
_EmailAttachmentDoc(
filename=filename, content_type=content_type, sha256=sha256
)
) # pyright: ignore[reportCallIssue]
)
class _ForensicReportDoc(Document):
@@ -201,15 +197,15 @@ class _SMTPTLSPolicyDoc(InnerDoc):
def add_failure_details(
self,
result_type: Optional[str] = None,
ip_address: Optional[str] = None,
receiving_ip: Optional[str] = None,
receiving_mx_helo: Optional[str] = None,
failed_session_count: Optional[int] = None,
sending_mta_ip: Optional[str] = None,
receiving_mx_hostname: Optional[str] = None,
additional_information_uri: Optional[str] = None,
failure_reason_code: Union[str, int, None] = None,
result_type,
ip_address,
receiving_ip,
receiving_mx_helo,
failed_session_count,
sending_mta_ip=None,
receiving_mx_hostname=None,
additional_information_uri=None,
failure_reason_code=None,
):
_details = _SMTPTLSFailureDetailsDoc(
result_type=result_type,
@@ -222,7 +218,7 @@ class _SMTPTLSPolicyDoc(InnerDoc):
additional_information=additional_information_uri,
failure_reason_code=failure_reason_code,
)
self.failure_details.append(_details) # pyright: ignore[reportCallIssue]
self.failure_details.append(_details)
class _SMTPTLSReportDoc(Document):
@@ -239,14 +235,13 @@ class _SMTPTLSReportDoc(Document):
def add_policy(
self,
policy_type: str,
policy_domain: str,
successful_session_count: int,
failed_session_count: int,
*,
policy_string: Optional[str] = None,
mx_host_patterns: Optional[list[str]] = None,
failure_details: Optional[str] = None,
policy_type,
policy_domain,
successful_session_count,
failed_session_count,
policy_string=None,
mx_host_patterns=None,
failure_details=None,
):
self.policies.append(
policy_type=policy_type,
@@ -256,7 +251,7 @@ class _SMTPTLSReportDoc(Document):
policy_string=policy_string,
mx_host_patterns=mx_host_patterns,
failure_details=failure_details,
) # pyright: ignore[reportCallIssue]
)
class AlreadySaved(ValueError):
@@ -264,25 +259,24 @@ class AlreadySaved(ValueError):
def set_hosts(
hosts: Union[str, list[str]],
*,
use_ssl: bool = False,
ssl_cert_path: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
api_key: Optional[str] = None,
timeout: float = 60.0,
hosts,
use_ssl=False,
ssl_cert_path=None,
username=None,
password=None,
apiKey=None,
timeout=60.0,
):
"""
Sets the Elasticsearch hosts to use
Args:
hosts (str | list[str]): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use an HTTPS connection to the server
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
username (str): The username to use for authentication
password (str): The password to use for authentication
api_key (str): The Base64 encoded API key to use for authentication
apiKey (str): The Base64 encoded API key to use for authentication
timeout (float): Timeout in seconds
"""
if not isinstance(hosts, list):
@@ -295,14 +289,14 @@ def set_hosts(
conn_params["ca_certs"] = ssl_cert_path
else:
conn_params["verify_certs"] = False
if username and password:
if username:
conn_params["http_auth"] = username + ":" + password
if api_key:
conn_params["api_key"] = api_key
if apiKey:
conn_params["api_key"] = apiKey
connections.create_connection(**conn_params)
def create_indexes(names: list[str], settings: Optional[dict[str, Any]] = None):
def create_indexes(names, settings=None):
"""
Create Elasticsearch indexes
@@ -325,10 +319,7 @@ def create_indexes(names: list[str], settings: Optional[dict[str, Any]] = None):
raise ElasticsearchError("Elasticsearch error: {0}".format(e.__str__()))
def migrate_indexes(
aggregate_indexes: Optional[list[str]] = None,
forensic_indexes: Optional[list[str]] = None,
):
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
"""
Updates index mappings
@@ -367,7 +358,7 @@ def migrate_indexes(
}
Index(new_index_name).create()
Index(new_index_name).put_mapping(doc_type=doc, body=body)
reindex(connections.get_connection(), aggregate_index_name, new_index_name) # pyright: ignore[reportArgumentType]
reindex(connections.get_connection(), aggregate_index_name, new_index_name)
Index(aggregate_index_name).delete()
for forensic_index in forensic_indexes:
@@ -375,18 +366,18 @@ def migrate_indexes(
def save_aggregate_report_to_elasticsearch(
aggregate_report: dict[str, Any],
index_suffix: Optional[str] = None,
index_prefix: Optional[str] = None,
monthly_indexes: Optional[bool] = False,
number_of_shards: int = 1,
number_of_replicas: int = 0,
aggregate_report,
index_suffix=None,
index_prefix=None,
monthly_indexes=False,
number_of_shards=1,
number_of_replicas=0,
):
"""
Saves a parsed DMARC aggregate report to Elasticsearch
Args:
aggregate_report (dict): A parsed forensic report
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
index_prefix (str): The prefix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
@@ -404,17 +395,21 @@ def save_aggregate_report_to_elasticsearch(
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True)
end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True)
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
org_name_query = Q(dict(match_phrase=dict(org_name=org_name))) # type: ignore
report_id_query = Q(dict(match_phrase=dict(report_id=report_id))) # pyright: ignore[reportArgumentType]
domain_query = Q(dict(match_phrase={"published_policy.domain": domain})) # pyright: ignore[reportArgumentType]
begin_date_query = Q(dict(range=dict(date_begin=dict(gte=begin_date)))) # pyright: ignore[reportArgumentType]
end_date_query = Q(dict(range=dict(date_end=dict(lte=end_date)))) # pyright: ignore[reportArgumentType]
org_name_query = Q(dict(match_phrase=dict(org_name=org_name)))
report_id_query = Q(dict(match_phrase=dict(report_id=report_id)))
domain_query = Q(dict(match_phrase={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_begin=begin_date)))
end_date_query = Q(dict(match=dict(date_end=end_date)))
if index_suffix is not None:
search_index = "dmarc_aggregate_{0}*".format(index_suffix)
@@ -426,8 +421,6 @@ def save_aggregate_report_to_elasticsearch(
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
try:
existing = search.execute()
@@ -457,17 +450,6 @@ def save_aggregate_report_to_elasticsearch(
)
for record in aggregate_report["records"]:
begin_date = human_timestamp_to_datetime(record["interval_begin"], to_utc=True)
end_date = human_timestamp_to_datetime(record["interval_end"], to_utc=True)
normalized_timespan = record["normalized_timespan"]
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
agg_doc = _AggregateReportDoc(
xml_schema=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
@@ -475,9 +457,8 @@ def save_aggregate_report_to_elasticsearch(
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
date_begin=begin_date,
date_end=end_date,
normalized_timespan=normalized_timespan,
date_begin=aggregate_report["begin_date"],
date_end=aggregate_report["end_date"],
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
@@ -527,7 +508,7 @@ def save_aggregate_report_to_elasticsearch(
number_of_shards=number_of_shards, number_of_replicas=number_of_replicas
)
create_indexes([index], index_settings)
agg_doc.meta.index = index # pyright: ignore[reportOptionalMemberAccess, reportAttributeAccessIssue]
agg_doc.meta.index = index
try:
agg_doc.save()
@@ -536,18 +517,18 @@ def save_aggregate_report_to_elasticsearch(
def save_forensic_report_to_elasticsearch(
forensic_report: dict[str, Any],
index_suffix: Optional[Any] = None,
index_prefix: Optional[str] = None,
monthly_indexes: Optional[bool] = False,
number_of_shards: int = 1,
number_of_replicas: int = 0,
forensic_report,
index_suffix=None,
index_prefix=None,
monthly_indexes=False,
number_of_shards=1,
number_of_replicas=0,
):
"""
Saves a parsed DMARC forensic report to Elasticsearch
Args:
forensic_report (dict): A parsed forensic report
forensic_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
index_prefix (str): The prefix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily
@@ -567,12 +548,12 @@ def save_forensic_report_to_elasticsearch(
sample_date = forensic_report["parsed_sample"]["date"]
sample_date = human_timestamp_to_datetime(sample_date)
original_headers = forensic_report["parsed_sample"]["headers"]
headers: dict[str, Any] = {}
headers = OrderedDict()
for original_header in original_headers:
headers[original_header.lower()] = original_headers[original_header]
arrival_date = human_timestamp_to_datetime(forensic_report["arrival_date_utc"])
arrival_date_epoch_milliseconds = int(arrival_date.timestamp() * 1000)
arrival_date_human = forensic_report["arrival_date_utc"]
arrival_date = human_timestamp_to_datetime(arrival_date_human)
if index_suffix is not None:
search_index = "dmarc_forensic_{0}*".format(index_suffix)
@@ -581,39 +562,24 @@ def save_forensic_report_to_elasticsearch(
if index_prefix is not None:
search_index = "{0}{1}".format(index_prefix, search_index)
search = Search(index=search_index)
q = Q(dict(match=dict(arrival_date=arrival_date_epoch_milliseconds))) # pyright: ignore[reportArgumentType]
arrival_query = {"match": {"arrival_date": arrival_date}}
q = Q(arrival_query)
from_ = None
to_ = None
subject = None
if "from" in headers:
# We convert the FROM header from a string list to a flat string.
headers["from"] = headers["from"][0]
if headers["from"][0] == "":
headers["from"] = headers["from"][1]
else:
headers["from"] = " <".join(headers["from"]) + ">"
from_ = dict()
from_["sample.headers.from"] = headers["from"]
from_query = Q(dict(match_phrase=from_)) # pyright: ignore[reportArgumentType]
q = q & from_query
from_ = headers["from"]
from_query = {"match_phrase": {"sample.headers.from": from_}}
q = q & Q(from_query)
if "to" in headers:
# We convert the TO header from a string list to a flat string.
headers["to"] = headers["to"][0]
if headers["to"][0] == "":
headers["to"] = headers["to"][1]
else:
headers["to"] = " <".join(headers["to"]) + ">"
to_ = dict()
to_["sample.headers.to"] = headers["to"]
to_query = Q(dict(match_phrase=to_)) # pyright: ignore[reportArgumentType]
q = q & to_query
to_ = headers["to"]
to_query = {"match_phrase": {"sample.headers.to": to_}}
q = q & Q(to_query)
if "subject" in headers:
subject = headers["subject"]
subject_query = {"match_phrase": {"sample.headers.subject": subject}}
q = q & Q(subject_query) # pyright: ignore[reportArgumentType]
q = q & Q(subject_query)
search.query = q
existing = search.execute()
@@ -623,9 +589,7 @@ def save_forensic_report_to_elasticsearch(
"A forensic sample to {0} from {1} "
"with a subject of {2} and arrival date of {3} "
"already exists in "
"Elasticsearch".format(
to_, from_, subject, forensic_report["arrival_date_utc"]
)
"Elasticsearch".format(to_, from_, subject, arrival_date_human)
)
parsed_sample = forensic_report["parsed_sample"]
@@ -661,7 +625,7 @@ def save_forensic_report_to_elasticsearch(
user_agent=forensic_report["user_agent"],
version=forensic_report["version"],
original_mail_from=forensic_report["original_mail_from"],
arrival_date=arrival_date_epoch_milliseconds,
arrival_date=arrival_date,
domain=forensic_report["reported_domain"],
original_envelope_id=forensic_report["original_envelope_id"],
authentication_results=forensic_report["authentication_results"],
@@ -691,7 +655,7 @@ def save_forensic_report_to_elasticsearch(
number_of_shards=number_of_shards, number_of_replicas=number_of_replicas
)
create_indexes([index], index_settings)
forensic_doc.meta.index = index # pyright: ignore[reportAttributeAccessIssue, reportOptionalMemberAccess]
forensic_doc.meta.index = index
try:
forensic_doc.save()
except Exception as e:
@@ -703,18 +667,18 @@ def save_forensic_report_to_elasticsearch(
def save_smtp_tls_report_to_elasticsearch(
report: dict[str, Any],
index_suffix: Optional[str] = None,
index_prefix: Optional[str] = None,
monthly_indexes: bool = False,
number_of_shards: int = 1,
number_of_replicas: int = 0,
report,
index_suffix=None,
index_prefix=None,
monthly_indexes=False,
number_of_shards=1,
number_of_replicas=0,
):
"""
Saves a parsed SMTP TLS report to Elasticsearch
Args:
report (dict): A parsed SMTP TLS report
report (OrderedDict): A parsed SMTP TLS report
index_suffix (str): The suffix of the name of the index to save to
index_prefix (str): The prefix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
@@ -738,10 +702,10 @@ def save_smtp_tls_report_to_elasticsearch(
report["begin_date"] = begin_date
report["end_date"] = end_date
org_name_query = Q(dict(match_phrase=dict(org_name=org_name))) # pyright: ignore[reportArgumentType]
report_id_query = Q(dict(match_phrase=dict(report_id=report_id))) # pyright: ignore[reportArgumentType]
begin_date_query = Q(dict(match=dict(date_begin=begin_date))) # pyright: ignore[reportArgumentType]
end_date_query = Q(dict(match=dict(date_end=end_date))) # pyright: ignore[reportArgumentType]
org_name_query = Q(dict(match_phrase=dict(org_name=org_name)))
report_id_query = Q(dict(match_phrase=dict(report_id=report_id)))
begin_date_query = Q(dict(match=dict(date_begin=begin_date)))
end_date_query = Q(dict(match=dict(date_end=end_date)))
if index_suffix is not None:
search_index = "smtp_tls_{0}*".format(index_suffix)
@@ -800,7 +764,7 @@ def save_smtp_tls_report_to_elasticsearch(
policy_doc = _SMTPTLSPolicyDoc(
policy_domain=policy["policy_domain"],
policy_type=policy["policy_type"],
successful_session_count=policy["successful_session_count"],
succesful_session_count=policy["successful_session_count"],
failed_session_count=policy["failed_session_count"],
policy_string=policy_strings,
mx_host_patterns=mx_host_patterns,
@@ -842,10 +806,10 @@ def save_smtp_tls_report_to_elasticsearch(
additional_information_uri=additional_information_uri,
failure_reason_code=failure_reason_code,
)
smtp_tls_doc.policies.append(policy_doc) # pyright: ignore[reportCallIssue]
smtp_tls_doc.policies.append(policy_doc)
create_indexes([index], index_settings)
smtp_tls_doc.meta.index = index # pyright: ignore[reportOptionalMemberAccess, reportAttributeAccessIssue]
smtp_tls_doc.meta.index = index
try:
smtp_tls_doc.save()

View File

@@ -1,19 +1,17 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import logging
import logging.handlers
import json
import threading
from typing import Any
from pygelf import GelfTcpHandler, GelfTlsHandler, GelfUdpHandler
from parsedmarc import (
parsed_aggregate_reports_to_csv_rows,
parsed_forensic_reports_to_csv_rows,
parsed_smtp_tls_reports_to_csv_rows,
)
from pygelf import GelfTcpHandler, GelfUdpHandler, GelfTlsHandler
log_context_data = threading.local()
@@ -50,7 +48,7 @@ class GelfClient(object):
)
self.logger.addHandler(self.handler)
def save_aggregate_report_to_gelf(self, aggregate_reports: list[dict[str, Any]]):
def save_aggregate_report_to_gelf(self, aggregate_reports):
rows = parsed_aggregate_reports_to_csv_rows(aggregate_reports)
for row in rows:
log_context_data.parsedmarc = row
@@ -58,14 +56,12 @@ class GelfClient(object):
log_context_data.parsedmarc = None
def save_forensic_report_to_gelf(self, forensic_reports: list[dict[str, Any]]):
def save_forensic_report_to_gelf(self, forensic_reports):
rows = parsed_forensic_reports_to_csv_rows(forensic_reports)
for row in rows:
log_context_data.parsedmarc = row
self.logger.info("parsedmarc forensic report")
self.logger.info(json.dumps(row))
def save_smtp_tls_report_to_gelf(self, smtp_tls_reports: dict[str, Any]):
def save_smtp_tls_report_to_gelf(self, smtp_tls_reports):
rows = parsed_smtp_tls_reports_to_csv_rows(smtp_tls_reports)
for row in rows:
log_context_data.parsedmarc = row
self.logger.info("parsedmarc smtptls report")
self.logger.info(json.dumps(row))

View File

@@ -1,17 +1,15 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import json
from ssl import SSLContext, create_default_context
from typing import Any, Optional, Union
from ssl import create_default_context
from kafka import KafkaProducer
from kafka.errors import NoBrokersAvailable, UnknownTopicOrPartitionError
from collections import OrderedDict
from parsedmarc.utils import human_timestamp_to_datetime
from parsedmarc import __version__
from parsedmarc.log import logger
from parsedmarc.utils import human_timestamp_to_datetime
class KafkaError(RuntimeError):
@@ -20,13 +18,7 @@ class KafkaError(RuntimeError):
class KafkaClient(object):
def __init__(
self,
kafka_hosts: list[str],
*,
ssl: Optional[bool] = False,
username: Optional[str] = None,
password: Optional[str] = None,
ssl_context: Optional[SSLContext] = None,
self, kafka_hosts, ssl=False, username=None, password=None, ssl_context=None
):
"""
Initializes the Kafka client
@@ -36,7 +28,7 @@ class KafkaClient(object):
ssl (bool): Use a SSL/TLS connection
username (str): An optional username
password (str): An optional password
ssl_context (SSLContext): SSL context options
ssl_context: SSL context options
Notes:
``use_ssl=True`` is implied when a username or password are
@@ -46,7 +38,7 @@ class KafkaClient(object):
``$ConnectionString``, and the password is the
Azure Event Hub connection string.
"""
config: dict[str, Any] = dict(
config = dict(
value_serializer=lambda v: json.dumps(v).encode("utf-8"),
bootstrap_servers=kafka_hosts,
client_id="parsedmarc-{0}".format(__version__),
@@ -63,7 +55,7 @@ class KafkaClient(object):
raise KafkaError("No Kafka brokers available")
@staticmethod
def strip_metadata(report: dict[str, Any]):
def strip_metadata(report):
"""
Duplicates org_name, org_email and report_id into JSON root
and removes report_metadata key to bring it more inline
@@ -77,7 +69,7 @@ class KafkaClient(object):
return report
@staticmethod
def generate_date_range(report: dict[str, Any]):
def generate_daterange(report):
"""
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
@@ -94,11 +86,7 @@ class KafkaClient(object):
logger.debug("date_range is {}".format(date_range))
return date_range
def save_aggregate_reports_to_kafka(
self,
aggregate_reports: Union[dict[str, Any], list[dict[str, Any]]],
aggregate_topic: str,
):
def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic):
"""
Saves aggregate DMARC reports to Kafka
@@ -108,14 +96,16 @@ class KafkaClient(object):
aggregate_topic (str): The name of the Kafka topic
"""
if isinstance(aggregate_reports, dict):
if isinstance(aggregate_reports, dict) or isinstance(
aggregate_reports, OrderedDict
):
aggregate_reports = [aggregate_reports]
if len(aggregate_reports) < 1:
return
for report in aggregate_reports:
report["date_range"] = self.generate_date_range(report)
report["date_range"] = self.generate_daterange(report)
report = self.strip_metadata(report)
for slice in report["records"]:
@@ -139,11 +129,7 @@ class KafkaClient(object):
except Exception as e:
raise KafkaError("Kafka error: {0}".format(e.__str__()))
def save_forensic_reports_to_kafka(
self,
forensic_reports: Union[dict[str, Any], list[dict[str, Any]]],
forensic_topic: str,
):
def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic):
"""
Saves forensic DMARC reports to Kafka, sends individual
records (slices) since Kafka requires messages to be <= 1MB
@@ -173,11 +159,7 @@ class KafkaClient(object):
except Exception as e:
raise KafkaError("Kafka error: {0}".format(e.__str__()))
def save_smtp_tls_reports_to_kafka(
self,
smtp_tls_reports: Union[list[dict[str, Any]], dict[str, Any]],
smtp_tls_topic: str,
):
def save_smtp_tls_reports_to_kafka(self, smtp_tls_reports, smtp_tls_topic):
"""
Saves SMTP TLS reports to Kafka, sends individual
records (slices) since Kafka requires messages to be <= 1MB

View File

@@ -1,15 +1,9 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import Any
from parsedmarc.log import logger
from azure.core.exceptions import HttpResponseError
from azure.identity import ClientSecretCredential
from azure.monitor.ingestion import LogsIngestionClient
from parsedmarc.log import logger
class LogAnalyticsException(Exception):
"""Raised when an Elasticsearch error occurs"""
@@ -108,12 +102,7 @@ class LogAnalyticsClient(object):
"Invalid configuration. " + "One or more required settings are missing."
)
def publish_json(
self,
results,
logs_client: LogsIngestionClient,
dcr_stream: str,
):
def publish_json(self, results, logs_client: LogsIngestionClient, dcr_stream: str):
"""
Background function to publish given
DMARC report to specific Data Collection Rule.
@@ -132,11 +121,7 @@ class LogAnalyticsClient(object):
raise LogAnalyticsException("Upload failed: {error}".format(error=e))
def publish_results(
self,
results: dict[str, Any],
save_aggregate: bool,
save_forensic: bool,
save_smtp_tls: bool,
self, results, save_aggregate: bool, save_forensic: bool, save_smtp_tls: bool
):
"""
Function to publish DMARC and/or SMTP TLS reports to Log Analytics

View File

@@ -1,7 +1,3 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from base64 import urlsafe_b64decode
from functools import lru_cache
from pathlib import Path
@@ -10,7 +6,6 @@ from typing import List
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google.oauth2 import service_account
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
@@ -19,29 +14,7 @@ from parsedmarc.log import logger
from parsedmarc.mail.mailbox_connection import MailboxConnection
def _get_creds(
token_file,
credentials_file,
scopes,
oauth2_port,
auth_mode="installed_app",
service_account_user=None,
):
normalized_auth_mode = (auth_mode or "installed_app").strip().lower()
if normalized_auth_mode == "service_account":
creds = service_account.Credentials.from_service_account_file(
credentials_file,
scopes=scopes,
)
if service_account_user:
creds = creds.with_subject(service_account_user)
return creds
if normalized_auth_mode != "installed_app":
raise ValueError(
f"Unsupported Gmail auth_mode '{auth_mode}'. "
"Expected 'installed_app' or 'service_account'."
)
def _get_creds(token_file, credentials_file, scopes, oauth2_port):
creds = None
if Path(token_file).exists():
@@ -70,17 +43,8 @@ class GmailConnection(MailboxConnection):
reports_folder: str,
oauth2_port: int,
paginate_messages: bool,
auth_mode: str = "installed_app",
service_account_user: str | None = None,
):
creds = _get_creds(
token_file,
credentials_file,
scopes,
oauth2_port,
auth_mode=auth_mode,
service_account_user=service_account_user,
)
creds = _get_creds(token_file, credentials_file, scopes, oauth2_port)
self.service = build("gmail", "v1", credentials=creds)
self.include_spam_trash = include_spam_trash
self.reports_label_id = self._find_label_id_for_label(reports_folder)
@@ -148,17 +112,17 @@ class GmailConnection(MailboxConnection):
else:
return [id for id in self._fetch_all_message_ids(reports_label_id)]
def fetch_message(self, message_id) -> str:
def fetch_message(self, message_id):
msg = (
self.service.users()
.messages()
.get(userId="me", id=message_id, format="raw")
.execute()
)
return urlsafe_b64decode(msg["raw"]).decode(errors="replace")
return urlsafe_b64decode(msg["raw"])
def delete_message(self, message_id: str):
self.service.users().messages().delete(userId="me", id=message_id).execute()
self.service.users().messages().delete(userId="me", id=message_id)
def move_message(self, message_id: str, folder_name: str):
label_id = self._find_label_id_for_label(folder_name)
@@ -188,4 +152,3 @@ class GmailConnection(MailboxConnection):
for label in labels:
if label_name == label["id"] or label_name == label["name"]:
return label["id"]
return ""

View File

@@ -1,40 +1,30 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from enum import Enum
from functools import lru_cache
from pathlib import Path
from time import sleep
from typing import Any, List, Optional, Union
from typing import List, Optional
from azure.identity import (
UsernamePasswordCredential,
DeviceCodeCredential,
ClientSecretCredential,
CertificateCredential,
TokenCachePersistenceOptions,
AuthenticationRecord,
)
from msgraph.core import GraphClient
from requests.exceptions import RequestException
from parsedmarc.log import logger
from parsedmarc.mail.mailbox_connection import MailboxConnection
GRAPH_REQUEST_RETRY_ATTEMPTS = 3
GRAPH_REQUEST_RETRY_DELAY_SECONDS = 5
class AuthMethod(Enum):
DeviceCode = 1
UsernamePassword = 2
ClientSecret = 3
Certificate = 4
def _get_cache_args(token_path: Path, allow_unencrypted_storage):
cache_args: dict[str, Any] = {
cache_args = {
"cache_persistence_options": TokenCachePersistenceOptions(
name="parsedmarc", allow_unencrypted_storage=allow_unencrypted_storage
)
@@ -89,55 +79,30 @@ def _generate_credential(auth_method: str, token_path: Path, **kwargs):
tenant_id=kwargs["tenant_id"],
client_secret=kwargs["client_secret"],
)
elif auth_method == AuthMethod.Certificate.name:
cert_path = kwargs.get("certificate_path")
if not cert_path:
raise ValueError(
"certificate_path is required when auth_method is 'Certificate'"
)
credential = CertificateCredential(
client_id=kwargs["client_id"],
tenant_id=kwargs["tenant_id"],
certificate_path=cert_path,
password=kwargs.get("certificate_password"),
)
else:
raise RuntimeError(f"Auth method {auth_method} not found")
return credential
class MSGraphConnection(MailboxConnection):
_WELL_KNOWN_FOLDERS = {
"inbox": "inbox",
"archive": "archive",
"drafts": "drafts",
"sentitems": "sentitems",
"deleteditems": "deleteditems",
"junkemail": "junkemail",
}
def __init__(
self,
auth_method: str,
mailbox: str,
graph_url: str,
client_id: str,
client_secret: Optional[str],
username: Optional[str],
password: Optional[str],
client_secret: str,
username: str,
password: str,
tenant_id: str,
token_file: str,
allow_unencrypted_storage: bool,
certificate_path: Optional[str] = None,
certificate_password: Optional[Union[str, bytes]] = None,
):
token_path = Path(token_file)
credential = _generate_credential(
auth_method,
client_id=client_id,
client_secret=client_secret,
certificate_path=certificate_path,
certificate_password=certificate_password,
username=username,
password=password,
tenant_id=tenant_id,
@@ -148,10 +113,10 @@ class MSGraphConnection(MailboxConnection):
"credential": credential,
"cloud": graph_url,
}
if not isinstance(credential, (ClientSecretCredential, CertificateCredential)):
if not isinstance(credential, ClientSecretCredential):
scopes = ["Mail.ReadWrite"]
# Detect if mailbox is shared
if mailbox and username and username != mailbox:
if mailbox and username != mailbox:
scopes = ["Mail.ReadWrite.Shared"]
auth_record = credential.authenticate(scopes=scopes)
_cache_auth_record(auth_record, token_path)
@@ -160,23 +125,6 @@ class MSGraphConnection(MailboxConnection):
self._client = GraphClient(**client_params)
self.mailbox_name = mailbox
def _request_with_retries(self, method_name: str, *args, **kwargs):
for attempt in range(1, GRAPH_REQUEST_RETRY_ATTEMPTS + 1):
try:
return getattr(self._client, method_name)(*args, **kwargs)
except RequestException as error:
if attempt == GRAPH_REQUEST_RETRY_ATTEMPTS:
raise
logger.warning(
"Transient MS Graph %s error on attempt %s/%s: %s",
method_name.upper(),
attempt,
GRAPH_REQUEST_RETRY_ATTEMPTS,
error,
)
sleep(GRAPH_REQUEST_RETRY_DELAY_SECONDS)
raise RuntimeError("no retry attempts configured")
def create_folder(self, folder_name: str):
sub_url = ""
path_parts = folder_name.split("/")
@@ -191,7 +139,7 @@ class MSGraphConnection(MailboxConnection):
request_body = {"displayName": folder_name}
request_url = f"/users/{self.mailbox_name}/mailFolders{sub_url}"
resp = self._request_with_retries("post", request_url, json=request_body)
resp = self._client.post(request_url, json=request_body)
if resp.status_code == 409:
logger.debug(f"Folder {folder_name} already exists, skipping creation")
elif resp.status_code == 201:
@@ -199,9 +147,9 @@ class MSGraphConnection(MailboxConnection):
else:
logger.warning(f"Unknown response {resp.status_code} {resp.json()}")
def fetch_messages(self, reports_folder: str, **kwargs) -> List[str]:
def fetch_messages(self, folder_name: str, **kwargs) -> List[str]:
"""Returns a list of message UIDs in the specified folder"""
folder_id = self._find_folder_id_from_folder_path(reports_folder)
folder_id = self._find_folder_id_from_folder_path(folder_name)
url = f"/users/{self.mailbox_name}/mailFolders/{folder_id}/messages"
since = kwargs.get("since")
if not since:
@@ -214,14 +162,14 @@ class MSGraphConnection(MailboxConnection):
def _get_all_messages(self, url, batch_size, since):
messages: list
params: dict[str, Union[str, int]] = {"$select": "id"}
params = {"$select": "id"}
if since:
params["$filter"] = f"receivedDateTime ge {since}"
if batch_size and batch_size > 0:
params["$top"] = batch_size
else:
params["$top"] = 100
result = self._request_with_retries("get", url, params=params)
result = self._client.get(url, params=params)
if result.status_code != 200:
raise RuntimeError(f"Failed to fetch messages {result.text}")
messages = result.json()["value"]
@@ -229,7 +177,7 @@ class MSGraphConnection(MailboxConnection):
while "@odata.nextLink" in result.json() and (
since is not None or (batch_size == 0 or batch_size - len(messages) > 0)
):
result = self._request_with_retries("get", result.json()["@odata.nextLink"])
result = self._client.get(result.json()["@odata.nextLink"])
if result.status_code != 200:
raise RuntimeError(f"Failed to fetch messages {result.text}")
messages.extend(result.json()["value"])
@@ -238,7 +186,7 @@ class MSGraphConnection(MailboxConnection):
def mark_message_read(self, message_id: str):
"""Marks a message as read"""
url = f"/users/{self.mailbox_name}/messages/{message_id}"
resp = self._request_with_retries("patch", url, json={"isRead": "true"})
resp = self._client.patch(url, json={"isRead": "true"})
if resp.status_code != 200:
raise RuntimeWarning(
f"Failed to mark message read{resp.status_code}: {resp.json()}"
@@ -246,7 +194,7 @@ class MSGraphConnection(MailboxConnection):
def fetch_message(self, message_id: str, **kwargs):
url = f"/users/{self.mailbox_name}/messages/{message_id}/$value"
result = self._request_with_retries("get", url)
result = self._client.get(url)
if result.status_code != 200:
raise RuntimeWarning(
f"Failed to fetch message{result.status_code}: {result.json()}"
@@ -258,7 +206,7 @@ class MSGraphConnection(MailboxConnection):
def delete_message(self, message_id: str):
url = f"/users/{self.mailbox_name}/messages/{message_id}"
resp = self._request_with_retries("delete", url)
resp = self._client.delete(url)
if resp.status_code != 204:
raise RuntimeWarning(
f"Failed to delete message {resp.status_code}: {resp.json()}"
@@ -268,7 +216,7 @@ class MSGraphConnection(MailboxConnection):
folder_id = self._find_folder_id_from_folder_path(folder_name)
request_body = {"destinationId": folder_id}
url = f"/users/{self.mailbox_name}/messages/{message_id}/move"
resp = self._request_with_retries("post", url, json=request_body)
resp = self._client.post(url, json=request_body)
if resp.status_code != 201:
raise RuntimeWarning(
f"Failed to move message {resp.status_code}: {resp.json()}"
@@ -296,19 +244,6 @@ class MSGraphConnection(MailboxConnection):
else:
return self._find_folder_id_with_parent(folder_name, None)
def _get_well_known_folder_id(self, folder_name: str) -> Optional[str]:
folder_key = folder_name.lower().replace(" ", "").replace("-", "")
alias = self._WELL_KNOWN_FOLDERS.get(folder_key)
if alias is None:
return None
url = f"/users/{self.mailbox_name}/mailFolders/{alias}?$select=id,displayName"
folder_resp = self._request_with_retries("get", url)
if folder_resp.status_code != 200:
return None
payload = folder_resp.json()
return payload.get("id")
def _find_folder_id_with_parent(
self, folder_name: str, parent_folder_id: Optional[str]
):
@@ -317,12 +252,8 @@ class MSGraphConnection(MailboxConnection):
sub_url = f"/{parent_folder_id}/childFolders"
url = f"/users/{self.mailbox_name}/mailFolders{sub_url}"
filter = f"?$filter=displayName eq '{folder_name}'"
folders_resp = self._request_with_retries("get", url + filter)
folders_resp = self._client.get(url + filter)
if folders_resp.status_code != 200:
if parent_folder_id is None:
well_known_folder_id = self._get_well_known_folder_id(folder_name)
if well_known_folder_id:
return well_known_folder_id
raise RuntimeWarning(f"Failed to list folders.{folders_resp.json()}")
folders: list = folders_resp.json()["value"]
matched_folders = [

View File

@@ -1,9 +1,3 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import cast
from time import sleep
from imapclient.exceptions import IMAPClientError
@@ -17,14 +11,14 @@ from parsedmarc.mail.mailbox_connection import MailboxConnection
class IMAPConnection(MailboxConnection):
def __init__(
self,
host: str,
user: str,
password: str,
port: int = 993,
ssl: bool = True,
verify: bool = True,
timeout: int = 30,
max_retries: int = 4,
host=None,
user=None,
password=None,
port=None,
ssl=True,
verify=True,
timeout=30,
max_retries=4,
):
self._username = user
self._password = password
@@ -46,37 +40,19 @@ class IMAPConnection(MailboxConnection):
def fetch_messages(self, reports_folder: str, **kwargs):
self._client.select_folder(reports_folder)
since = kwargs.get("since")
if since is not None:
return self._client.search(f"SINCE {since}")
if since:
return self._client.search(["SINCE", since])
else:
return self._client.search()
def fetch_message(self, message_id: int):
return cast(str, self._client.fetch_message(message_id, parse=False))
def fetch_message(self, message_id):
return self._client.fetch_message(message_id, parse=False)
def delete_message(self, message_id: int):
try:
self._client.delete_messages([message_id])
except IMAPClientError as error:
logger.warning(
"IMAP delete fallback for message %s due to server error: %s",
message_id,
error,
)
self._client.add_flags([message_id], [r"\Deleted"], silent=True)
self._client.expunge()
def delete_message(self, message_id: str):
self._client.delete_messages([message_id])
def move_message(self, message_id: int, folder_name: str):
try:
self._client.move_messages([message_id], folder_name)
except IMAPClientError as error:
logger.warning(
"IMAP move fallback for message %s due to server error: %s",
message_id,
error,
)
self._client.copy([message_id], folder_name)
self.delete_message(message_id)
def move_message(self, message_id: str, folder_name: str):
self._client.move_messages([message_id], folder_name)
def keepalive(self):
self._client.noop()

View File

@@ -1,8 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from abc import ABC
from typing import List
class MailboxConnection(ABC):
@@ -13,16 +10,16 @@ class MailboxConnection(ABC):
def create_folder(self, folder_name: str):
raise NotImplementedError
def fetch_messages(self, reports_folder: str, **kwargs):
def fetch_messages(self, reports_folder: str, **kwargs) -> List[str]:
raise NotImplementedError
def fetch_message(self, message_id) -> str:
raise NotImplementedError
def delete_message(self, message_id):
def delete_message(self, message_id: str):
raise NotImplementedError
def move_message(self, message_id, folder_name: str):
def move_message(self, message_id: str, folder_name: str):
raise NotImplementedError
def keepalive(self):

View File

@@ -1,21 +1,16 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import mailbox
import os
from time import sleep
from typing import Dict
from parsedmarc.log import logger
from parsedmarc.mail.mailbox_connection import MailboxConnection
import mailbox
import os
class MaildirConnection(MailboxConnection):
def __init__(
self,
maildir_path: str,
maildir_create: bool = False,
maildir_path=None,
maildir_create=False,
):
self._maildir_path = maildir_path
self._maildir_create = maildir_create
@@ -32,31 +27,27 @@ class MaildirConnection(MailboxConnection):
)
raise Exception(ex)
self._client = mailbox.Maildir(maildir_path, create=maildir_create)
self._subfolder_client: Dict[str, mailbox.Maildir] = {}
self._subfolder_client = {}
def create_folder(self, folder_name: str):
self._subfolder_client[folder_name] = self._client.add_folder(folder_name)
self._client.add_folder(folder_name)
def fetch_messages(self, reports_folder: str, **kwargs):
return self._client.keys()
def fetch_message(self, message_id: str) -> str:
msg = self._client.get(message_id)
if msg is not None:
msg = msg.as_string()
if msg is not None:
return msg
return ""
def fetch_message(self, message_id):
return self._client.get(message_id).as_string()
def delete_message(self, message_id: str):
self._client.remove(message_id)
def move_message(self, message_id: str, folder_name: str):
message_data = self._client.get(message_id)
if message_data is None:
return
if folder_name not in self._subfolder_client:
self._subfolder_client[folder_name] = self._client.add_folder(folder_name)
if folder_name not in self._subfolder_client.keys():
self._subfolder_client = mailbox.Maildir(
os.join(self.maildir_path, folder_name), create=self.maildir_create
)
self._subfolder_client[folder_name].add(message_data)
self._client.remove(message_id)

View File

@@ -1,32 +1,27 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections import OrderedDict
from typing import Any, Optional, Union
import boto3
from opensearchpy import (
AWSV4SignerAuth,
Boolean,
Date,
Q,
connections,
Object,
Document,
Index,
Nested,
InnerDoc,
Integer,
Ip,
Nested,
Object,
Q,
RequestsHttpConnection,
Search,
Text,
connections,
Boolean,
Ip,
Date,
Search,
)
from opensearchpy.helpers import reindex
from parsedmarc import InvalidForensicReport
from parsedmarc.log import logger
from parsedmarc.utils import human_timestamp_to_datetime
from parsedmarc import InvalidForensicReport
class OpenSearchError(Exception):
@@ -72,8 +67,6 @@ class _AggregateReportDoc(Document):
date_range = Date()
date_begin = Date()
date_end = Date()
normalized_timespan = Boolean()
original_timespan_seconds = Integer
errors = Text()
published_policy = Object(_PublishedPolicy)
source_ip_address = Ip()
@@ -94,18 +87,18 @@ class _AggregateReportDoc(Document):
dkim_results = Nested(_DKIMResult)
spf_results = Nested(_SPFResult)
def add_policy_override(self, type_: str, comment: str):
def add_policy_override(self, type_, comment):
self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment))
def add_dkim_result(self, domain: str, selector: str, result: _DKIMResult):
def add_dkim_result(self, domain, selector, result):
self.dkim_results.append(
_DKIMResult(domain=domain, selector=selector, result=result)
)
def add_spf_result(self, domain: str, scope: str, result: _SPFResult):
def add_spf_result(self, domain, scope, result):
self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result))
def save(self, **kwargs): # pyright: ignore[reportIncompatibleMethodOverride]
def save(self, **kwargs):
self.passed_dmarc = False
self.passed_dmarc = self.spf_aligned or self.dkim_aligned
@@ -138,21 +131,21 @@ class _ForensicSampleDoc(InnerDoc):
body = Text()
attachments = Nested(_EmailAttachmentDoc)
def add_to(self, display_name: str, address: str):
def add_to(self, display_name, address):
self.to.append(_EmailAddressDoc(display_name=display_name, address=address))
def add_reply_to(self, display_name: str, address: str):
def add_reply_to(self, display_name, address):
self.reply_to.append(
_EmailAddressDoc(display_name=display_name, address=address)
)
def add_cc(self, display_name: str, address: str):
def add_cc(self, display_name, address):
self.cc.append(_EmailAddressDoc(display_name=display_name, address=address))
def add_bcc(self, display_name: str, address: str):
def add_bcc(self, display_name, address):
self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address))
def add_attachment(self, filename: str, content_type: str, sha256: str):
def add_attachment(self, filename, content_type, sha256):
self.attachments.append(
_EmailAttachmentDoc(
filename=filename, content_type=content_type, sha256=sha256
@@ -204,15 +197,15 @@ class _SMTPTLSPolicyDoc(InnerDoc):
def add_failure_details(
self,
result_type: Optional[str] = None,
ip_address: Optional[str] = None,
receiving_ip: Optional[str] = None,
receiving_mx_helo: Optional[str] = None,
failed_session_count: Optional[int] = None,
sending_mta_ip: Optional[str] = None,
receiving_mx_hostname: Optional[str] = None,
additional_information_uri: Optional[str] = None,
failure_reason_code: Union[str, int, None] = None,
result_type,
ip_address,
receiving_ip,
receiving_mx_helo,
failed_session_count,
sending_mta_ip=None,
receiving_mx_hostname=None,
additional_information_uri=None,
failure_reason_code=None,
):
_details = _SMTPTLSFailureDetailsDoc(
result_type=result_type,
@@ -242,14 +235,13 @@ class _SMTPTLSReportDoc(Document):
def add_policy(
self,
policy_type: str,
policy_domain: str,
successful_session_count: int,
failed_session_count: int,
*,
policy_string: Optional[str] = None,
mx_host_patterns: Optional[list[str]] = None,
failure_details: Optional[str] = None,
policy_type,
policy_domain,
successful_session_count,
failed_session_count,
policy_string=None,
mx_host_patterns=None,
failure_details=None,
):
self.policies.append(
policy_type=policy_type,
@@ -267,32 +259,25 @@ class AlreadySaved(ValueError):
def set_hosts(
hosts: Union[str, list[str]],
*,
use_ssl: Optional[bool] = False,
ssl_cert_path: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
api_key: Optional[str] = None,
timeout: Optional[float] = 60.0,
auth_type: str = "basic",
aws_region: Optional[str] = None,
aws_service: str = "es",
hosts,
use_ssl=False,
ssl_cert_path=None,
username=None,
password=None,
apiKey=None,
timeout=60.0,
):
"""
Sets the OpenSearch hosts to use
Args:
hosts (str|list[str]): A single hostname or URL, or list of hostnames or URLs
hosts (str|list): A hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use an HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
username (str): The username to use for authentication
password (str): The password to use for authentication
api_key (str): The Base64 encoded API key to use for authentication
apiKey (str): The Base64 encoded API key to use for authentication
timeout (float): Timeout in seconds
auth_type (str): OpenSearch auth mode: basic (default) or awssigv4
aws_region (str): AWS region for SigV4 auth (required for awssigv4)
aws_service (str): AWS service for SigV4 signing (default: es)
"""
if not isinstance(hosts, list):
hosts = [hosts]
@@ -304,36 +289,14 @@ def set_hosts(
conn_params["ca_certs"] = ssl_cert_path
else:
conn_params["verify_certs"] = False
normalized_auth_type = (auth_type or "basic").strip().lower()
if normalized_auth_type == "awssigv4":
if not aws_region:
raise OpenSearchError(
"OpenSearch AWS SigV4 auth requires 'aws_region' to be set"
)
session = boto3.Session()
credentials = session.get_credentials()
if credentials is None:
raise OpenSearchError(
"Unable to load AWS credentials for OpenSearch SigV4 authentication"
)
conn_params["http_auth"] = AWSV4SignerAuth(
credentials, aws_region, aws_service
)
conn_params["connection_class"] = RequestsHttpConnection
elif normalized_auth_type == "basic":
if username and password:
conn_params["http_auth"] = username + ":" + password
if api_key:
conn_params["api_key"] = api_key
else:
raise OpenSearchError(
f"Unsupported OpenSearch auth_type '{auth_type}'. "
"Expected 'basic' or 'awssigv4'."
)
if username:
conn_params["http_auth"] = username + ":" + password
if apiKey:
conn_params["api_key"] = apiKey
connections.create_connection(**conn_params)
def create_indexes(names: list[str], settings: Optional[dict[str, Any]] = None):
def create_indexes(names, settings=None):
"""
Create OpenSearch indexes
@@ -356,10 +319,7 @@ def create_indexes(names: list[str], settings: Optional[dict[str, Any]] = None):
raise OpenSearchError("OpenSearch error: {0}".format(e.__str__()))
def migrate_indexes(
aggregate_indexes: Optional[list[str]] = None,
forensic_indexes: Optional[list[str]] = None,
):
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):
"""
Updates index mappings
@@ -406,18 +366,18 @@ def migrate_indexes(
def save_aggregate_report_to_opensearch(
aggregate_report: dict[str, Any],
index_suffix: Optional[str] = None,
index_prefix: Optional[str] = None,
monthly_indexes: bool = False,
number_of_shards: int = 1,
number_of_replicas: int = 0,
aggregate_report,
index_suffix=None,
index_prefix=None,
monthly_indexes=False,
number_of_shards=1,
number_of_replicas=0,
):
"""
Saves a parsed DMARC aggregate report to OpenSearch
Args:
aggregate_report (dict): A parsed forensic report
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
index_prefix (str): The prefix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
@@ -435,17 +395,21 @@ def save_aggregate_report_to_opensearch(
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True)
end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True)
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
org_name_query = Q(dict(match_phrase=dict(org_name=org_name)))
report_id_query = Q(dict(match_phrase=dict(report_id=report_id)))
domain_query = Q(dict(match_phrase={"published_policy.domain": domain}))
begin_date_query = Q(dict(range=dict(date_begin=dict(gte=begin_date))))
end_date_query = Q(dict(range=dict(date_end=dict(lte=end_date))))
begin_date_query = Q(dict(match=dict(date_begin=begin_date)))
end_date_query = Q(dict(match=dict(date_end=end_date)))
if index_suffix is not None:
search_index = "dmarc_aggregate_{0}*".format(index_suffix)
@@ -457,8 +421,6 @@ def save_aggregate_report_to_opensearch(
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ")
try:
existing = search.execute()
@@ -488,17 +450,6 @@ def save_aggregate_report_to_opensearch(
)
for record in aggregate_report["records"]:
begin_date = human_timestamp_to_datetime(record["interval_begin"], to_utc=True)
end_date = human_timestamp_to_datetime(record["interval_end"], to_utc=True)
normalized_timespan = record["normalized_timespan"]
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]]
agg_doc = _AggregateReportDoc(
xml_schema=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
@@ -506,9 +457,8 @@ def save_aggregate_report_to_opensearch(
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
date_begin=begin_date,
date_end=end_date,
normalized_timespan=normalized_timespan,
date_begin=aggregate_report["begin_date"],
date_end=aggregate_report["end_date"],
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
@@ -567,18 +517,18 @@ def save_aggregate_report_to_opensearch(
def save_forensic_report_to_opensearch(
forensic_report: dict[str, Any],
index_suffix: Optional[str] = None,
index_prefix: Optional[str] = None,
monthly_indexes: bool = False,
number_of_shards: int = 1,
number_of_replicas: int = 0,
forensic_report,
index_suffix=None,
index_prefix=None,
monthly_indexes=False,
number_of_shards=1,
number_of_replicas=0,
):
"""
Saves a parsed DMARC forensic report to OpenSearch
Args:
forensic_report (dict): A parsed forensic report
forensic_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
index_prefix (str): The prefix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily
@@ -598,12 +548,12 @@ def save_forensic_report_to_opensearch(
sample_date = forensic_report["parsed_sample"]["date"]
sample_date = human_timestamp_to_datetime(sample_date)
original_headers = forensic_report["parsed_sample"]["headers"]
headers: dict[str, Any] = {}
headers = OrderedDict()
for original_header in original_headers:
headers[original_header.lower()] = original_headers[original_header]
arrival_date = human_timestamp_to_datetime(forensic_report["arrival_date_utc"])
arrival_date_epoch_milliseconds = int(arrival_date.timestamp() * 1000)
arrival_date_human = forensic_report["arrival_date_utc"]
arrival_date = human_timestamp_to_datetime(arrival_date_human)
if index_suffix is not None:
search_index = "dmarc_forensic_{0}*".format(index_suffix)
@@ -612,35 +562,20 @@ def save_forensic_report_to_opensearch(
if index_prefix is not None:
search_index = "{0}{1}".format(index_prefix, search_index)
search = Search(index=search_index)
q = Q(dict(match=dict(arrival_date=arrival_date_epoch_milliseconds)))
arrival_query = {"match": {"arrival_date": arrival_date}}
q = Q(arrival_query)
from_ = None
to_ = None
subject = None
if "from" in headers:
# We convert the FROM header from a string list to a flat string.
headers["from"] = headers["from"][0]
if headers["from"][0] == "":
headers["from"] = headers["from"][1]
else:
headers["from"] = " <".join(headers["from"]) + ">"
from_ = dict()
from_["sample.headers.from"] = headers["from"]
from_query = Q(dict(match_phrase=from_))
q = q & from_query
from_ = headers["from"]
from_query = {"match_phrase": {"sample.headers.from": from_}}
q = q & Q(from_query)
if "to" in headers:
# We convert the TO header from a string list to a flat string.
headers["to"] = headers["to"][0]
if headers["to"][0] == "":
headers["to"] = headers["to"][1]
else:
headers["to"] = " <".join(headers["to"]) + ">"
to_ = dict()
to_["sample.headers.to"] = headers["to"]
to_query = Q(dict(match_phrase=to_))
q = q & to_query
to_ = headers["to"]
to_query = {"match_phrase": {"sample.headers.to": to_}}
q = q & Q(to_query)
if "subject" in headers:
subject = headers["subject"]
subject_query = {"match_phrase": {"sample.headers.subject": subject}}
@@ -654,9 +589,7 @@ def save_forensic_report_to_opensearch(
"A forensic sample to {0} from {1} "
"with a subject of {2} and arrival date of {3} "
"already exists in "
"OpenSearch".format(
to_, from_, subject, forensic_report["arrival_date_utc"]
)
"OpenSearch".format(to_, from_, subject, arrival_date_human)
)
parsed_sample = forensic_report["parsed_sample"]
@@ -692,7 +625,7 @@ def save_forensic_report_to_opensearch(
user_agent=forensic_report["user_agent"],
version=forensic_report["version"],
original_mail_from=forensic_report["original_mail_from"],
arrival_date=arrival_date_epoch_milliseconds,
arrival_date=arrival_date,
domain=forensic_report["reported_domain"],
original_envelope_id=forensic_report["original_envelope_id"],
authentication_results=forensic_report["authentication_results"],
@@ -734,18 +667,18 @@ def save_forensic_report_to_opensearch(
def save_smtp_tls_report_to_opensearch(
report: dict[str, Any],
index_suffix: Optional[str] = None,
index_prefix: Optional[str] = None,
monthly_indexes: bool = False,
number_of_shards: int = 1,
number_of_replicas: int = 0,
report,
index_suffix=None,
index_prefix=None,
monthly_indexes=False,
number_of_shards=1,
number_of_replicas=0,
):
"""
Saves a parsed SMTP TLS report to OpenSearch
Args:
report (dict): A parsed SMTP TLS report
report (OrderedDict): A parsed SMTP TLS report
index_suffix (str): The suffix of the name of the index to save to
index_prefix (str): The prefix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
@@ -755,7 +688,7 @@ def save_smtp_tls_report_to_opensearch(
Raises:
AlreadySaved
"""
logger.info("Saving SMTP TLS report to OpenSearch")
logger.info("Saving aggregate report to OpenSearch")
org_name = report["organization_name"]
report_id = report["report_id"]
begin_date = human_timestamp_to_datetime(report["begin_date"], to_utc=True)
@@ -831,7 +764,7 @@ def save_smtp_tls_report_to_opensearch(
policy_doc = _SMTPTLSPolicyDoc(
policy_domain=policy["policy_domain"],
policy_type=policy["policy_type"],
successful_session_count=policy["successful_session_count"],
succesful_session_count=policy["successful_session_count"],
failed_session_count=policy["failed_session_count"],
policy_string=policy_strings,
mx_host_patterns=mx_host_patterns,

View File

@@ -1,7 +1,7 @@
# About
`dbip-country-lite.mmdb` is provided by [dbip][dbip] under a
[Creative Commons Attribution 4.0 International License][cc].
[ Creative Commons Attribution 4.0 International License][cc].
[dbip]: https://db-ip.com/db/download/ip-to-country-lite
[dbip]: https://db-ip.com/db/lite.php
[cc]: http://creativecommons.org/licenses/by/4.0/

View File

@@ -3,8 +3,6 @@
A mapping is meant to make it easier to identify who or what a sending source is. Please consider contributing
additional mappings in a GitHub Pull Request.
Do not open these CSV files in Excel. It will replace Unicode characters with question marks. Use LibreOffice Calc instead.
## base_reverse_dns_map.csv
A CSV file with three fields: `base_reverse_dns`, `name`, and `type`.
@@ -21,72 +19,33 @@ The `service_type` is based on the following rule precedence:
3. All telecommunications providers that offer internet access are identified as `ISP`, even if they also offer other services, such as web hosting or email hosting.
4. All web hosting providers are identified as `Web Hosting`, even if the service also offers email hosting.
5. All email account providers are identified as `Email Provider`, no matter how or where they are hosted
6. All legitimate platforms offering their Software as a Service (SaaS) are identified as `SaaS`, regardless of industry. This helps simplify metrics.
6. All legitimate platforms offering their Software as a Service SaaS) are identified as `SaaS`, regardless of industry. This helps simplify metrics.
7. All other senders that use their own domain as a Reverse DNS base domain should be identified based on their industry
- Agriculture
- Automotive
- Beauty
- Conglomerate
- Construction
- Consulting
- Defense
- Education
- Email Provider
- Email Security
- Education
- Entertainment
- Event Planning
- Finance
- Food
- Government
- Government Media
- Healthcare
- IaaS
- Industrial
- ISP
- Legal
- Logistics
- Manufacturing
- Marketing
- MSP
- MSSP
- News
- Nonprofit
- PaaS
- Photography
- Physical Security
- Print
- Publishing
- Real Estate
- Retail
- SaaS
- Science
- Search Engine
- Social Media
- Sports
- Staffing
- Technology
- Travel
- Web Host
The file currently contains over 1,400 mappings from a wide variety of email sending sources.
## known_unknown_base_reverse_dns.txt
A list of reverse DNS base domains that could not be identified as belonging to a particular organization, service, or industry.
## base_reverse_dns.csv
A CSV with the fields `source_name` and optionally `message_count`. This CSV can be generated by exporting the base DNS data from the Kibana or Splunk dashboards provided by parsedmarc. This file is not tracked by Git.
## unknown_base_reverse_dns.csv
A CSV file with the fields `source_name` and `message_count`. This file is not tracked by Git.
## find_bad_utf8.py
Locates invalid UTF-8 bytes in files and optionally tries to current them. Generated by GPT5. Helped me find where I had introduced invalid bytes in `base_reverse_dns_map.csv`.
## find_unknown_base_reverse_dns.py
This is a python script that reads the domains in `base_reverse_dns.csv` and writes the domains that are not in `base_reverse_dns_map.csv` or `known_unknown_base_reverse_dns.txt` to `unknown_base_reverse_dns.csv`. This is useful for identifying potential additional domains to contribute to `base_reverse_dns_map.csv` and `known_unknown_base_reverse_dns.txt`.
The file currently contains over 600 mappings from a wide variety of email sending services, including large email
providers, SaaS platforms, small web hosts, and healthcare companies. Ideally this mapping will continuously grow to
include many other services and industries.

File diff suppressed because it is too large Load Diff

View File

@@ -1,44 +0,0 @@
Agriculture
Automotive
Beauty
Conglomerate
Construction
Consulting
Defense
Education
Email Provider
Email Security
Entertainment
Event Planning
Finance
Food
Government
Government Media
Healthcare
ISP
IaaS
Industrial
Legal
Logistics
MSP
MSSP
Manufacturing
Marketing
News
Nonprofit
PaaS
Photography
Physical Security
Print
Publishing
Real Estate
Retail
SaaS
Science
Search Engine
Social Media
Sports
Staffing
Technology
Travel
Web Host

View File

@@ -1,488 +0,0 @@
#!/usr/bin/env python3
import argparse
import codecs
import os
import sys
import shutil
from typing import List, Tuple
"""
Locates and optionally corrects bad UTF-8 bytes in a file.
Generated by GPT-5 Use at your own risk.
"""
# -------------------------
# UTF-8 scanning
# -------------------------
def scan_line_for_utf8_errors(
line_bytes: bytes, line_no: int, base_offset: int, context: int
):
"""
Scan one line of raw bytes for UTF-8 decoding errors.
Returns a list of dicts describing each error.
"""
pos = 0
results = []
while pos < len(line_bytes):
dec = codecs.getincrementaldecoder("utf-8")("strict")
try:
dec.decode(line_bytes[pos:], final=True)
break
except UnicodeDecodeError as e:
rel_index = e.start
abs_index_in_line = pos + rel_index
abs_offset = base_offset + abs_index_in_line
start_ctx = max(0, abs_index_in_line - context)
end_ctx = min(len(line_bytes), abs_index_in_line + 1 + context)
ctx_bytes = line_bytes[start_ctx:end_ctx]
bad_byte = line_bytes[abs_index_in_line : abs_index_in_line + 1]
col = abs_index_in_line + 1 # 1-based byte column
results.append(
{
"line": line_no,
"column": col,
"abs_offset": abs_offset,
"bad_byte_hex": bad_byte.hex(),
"context_hex": ctx_bytes.hex(),
"context_preview": ctx_bytes.decode("utf-8", errors="replace"),
}
)
# Move past the offending byte and continue
pos = abs_index_in_line + 1
return results
def scan_file_for_utf8_errors(path: str, context: int, limit: int):
errors_found = 0
limit_val = limit if limit != 0 else float("inf")
with open(path, "rb") as f:
total_offset = 0
line_no = 0
while True:
line = f.readline()
if not line:
break
line_no += 1
results = scan_line_for_utf8_errors(line, line_no, total_offset, context)
for r in results:
errors_found += 1
print(
f"[ERROR {errors_found}] Line {r['line']}, Column {r['column']}, "
f"Absolute byte offset {r['abs_offset']}"
)
print(f" Bad byte: 0x{r['bad_byte_hex']}")
print(f" Context (hex): {r['context_hex']}")
print(f" Context (preview): {r['context_preview']}")
print()
if errors_found >= limit_val:
print(f"Reached limit of {limit} errors. Stopping.")
return errors_found
total_offset += len(line)
if errors_found == 0:
print("No invalid UTF-8 bytes found. 🎉")
else:
print(f"Found {errors_found} invalid UTF-8 byte(s).")
return errors_found
# -------------------------
# Whole-file conversion
# -------------------------
def detect_encoding_text(path: str) -> Tuple[str, str]:
"""
Use charset-normalizer to detect file encoding.
Return (encoding_name, decoded_text). Falls back to cp1252 if needed.
"""
try:
from charset_normalizer import from_path
except ImportError:
print(
"Please install charset-normalizer: pip install charset-normalizer",
file=sys.stderr,
)
sys.exit(4)
matches = from_path(path)
match = matches.best()
if match is None or match.encoding is None:
# Fallback heuristic for Western single-byte text
with open(path, "rb") as fb:
data = fb.read()
try:
return "cp1252", data.decode("cp1252", errors="strict")
except UnicodeDecodeError:
print("Unable to detect encoding reliably.", file=sys.stderr)
sys.exit(5)
return match.encoding, str(match)
def convert_to_utf8(src_path: str, out_path: str, src_encoding: str = None) -> str:
"""
Convert an entire file to UTF-8 (re-decoding everything).
If src_encoding is provided, use it; else auto-detect.
Returns the encoding actually used.
"""
if src_encoding:
with open(src_path, "rb") as fb:
data = fb.read()
try:
text = data.decode(src_encoding, errors="strict")
except LookupError:
print(f"Unknown encoding: {src_encoding}", file=sys.stderr)
sys.exit(6)
except UnicodeDecodeError as e:
print(f"Decoding failed with {src_encoding}: {e}", file=sys.stderr)
sys.exit(7)
used = src_encoding
else:
used, text = detect_encoding_text(src_path)
with open(out_path, "w", encoding="utf-8", newline="") as fw:
fw.write(text)
return used
def verify_utf8_file(path: str) -> Tuple[bool, str]:
try:
with open(path, "rb") as fb:
fb.read().decode("utf-8", errors="strict")
return True, ""
except UnicodeDecodeError as e:
return False, str(e)
# -------------------------
# Targeted single-byte fixer
# -------------------------
def iter_lines_with_offsets(b: bytes):
"""
Yield (line_bytes, line_start_abs_offset). Preserves LF/CRLF/CR in bytes.
"""
start = 0
for i, byte in enumerate(b):
if byte == 0x0A: # LF
yield b[start : i + 1], start
start = i + 1
if start < len(b):
yield b[start:], start
def detect_probable_fallbacks() -> List[str]:
# Good defaults for Western/Portuguese text
return ["cp1252", "iso-8859-1", "iso-8859-15"]
def repair_mixed_utf8_line(line: bytes, base_offset: int, fallback_chain: List[str]):
"""
Strictly validate UTF-8 and fix *only* the exact offending byte when an error occurs.
This avoids touching adjacent valid UTF-8 (prevents mojibake like 'é').
"""
out_fragments: List[str] = []
fixes = []
pos = 0
n = len(line)
while pos < n:
dec = codecs.getincrementaldecoder("utf-8")("strict")
try:
s = dec.decode(line[pos:], final=True)
out_fragments.append(s)
break
except UnicodeDecodeError as e:
# Append the valid prefix before the error
if e.start > 0:
out_fragments.append(
line[pos : pos + e.start].decode("utf-8", errors="strict")
)
bad_index = pos + e.start # absolute index in 'line'
bad_slice = line[bad_index : bad_index + 1] # FIX EXACTLY ONE BYTE
# Decode that single byte using the first working fallback
decoded = None
used_enc = None
for enc in fallback_chain:
try:
decoded = bad_slice.decode(enc, errors="strict")
used_enc = enc
break
except Exception:
continue
if decoded is None:
# latin-1 always succeeds (byte->same code point)
decoded = bad_slice.decode("latin-1")
used_enc = "latin-1 (fallback)"
out_fragments.append(decoded)
# Log the fix
col_1based = bad_index + 1 # byte-based column
fixes.append(
{
"line_base_offset": base_offset,
"line": None, # caller fills line number
"column": col_1based,
"abs_offset": base_offset + bad_index,
"bad_bytes_hex": bad_slice.hex(),
"used_encoding": used_enc,
"replacement_preview": decoded,
}
)
# Advance exactly one byte past the offending byte and continue
pos = bad_index + 1
return "".join(out_fragments), fixes
def targeted_fix_to_utf8(
src_path: str,
out_path: str,
fallback_chain: List[str],
dry_run: bool,
max_fixes: int,
):
with open(src_path, "rb") as fb:
data = fb.read()
total_fixes = 0
repaired_lines: List[str] = []
line_no = 0
max_val = max_fixes if max_fixes != 0 else float("inf")
for line_bytes, base_offset in iter_lines_with_offsets(data):
line_no += 1
# Fast path: keep lines that are already valid UTF-8
try:
repaired_lines.append(line_bytes.decode("utf-8", errors="strict"))
continue
except UnicodeDecodeError:
pass
fixed_text, fixes = repair_mixed_utf8_line(
line_bytes, base_offset, fallback_chain=fallback_chain
)
for f in fixes:
f["line"] = line_no
repaired_lines.append(fixed_text)
# Log fixes
for f in fixes:
total_fixes += 1
print(
f"[FIX {total_fixes}] Line {f['line']}, Column {f['column']}, Abs offset {f['abs_offset']}"
)
print(f" Bad bytes: 0x{f['bad_bytes_hex']}")
print(f" Used encoding: {f['used_encoding']}")
preview = f["replacement_preview"].replace("\r", "\\r").replace("\n", "\\n")
if len(preview) > 40:
preview = preview[:40] + ""
print(f" Replacement preview: {preview}")
print()
if total_fixes >= max_val:
print(f"Reached max fixes limit ({max_fixes}). Stopping scan.")
break
if total_fixes >= max_val:
break
if dry_run:
print(f"Dry run complete. Detected {total_fixes} fix(es). No file written.")
return total_fixes
# Join and verify result can be encoded to UTF-8
repaired_text = "".join(repaired_lines)
try:
repaired_text.encode("utf-8", errors="strict")
except UnicodeEncodeError as e:
print(f"Internal error: repaired text not valid UTF-8: {e}", file=sys.stderr)
sys.exit(3)
with open(out_path, "w", encoding="utf-8", newline="") as fw:
fw.write(repaired_text)
print(f"Fixed file written to: {out_path}")
print(f"Total fixes applied: {total_fixes}")
return total_fixes
# -------------------------
# CLI
# -------------------------
def main():
ap = argparse.ArgumentParser(
description=(
"Scan for invalid UTF-8; optionally convert whole file or fix only invalid bytes.\n\n"
"By default, --convert and --fix **edit the input file in place** and create a backup "
"named '<input>.bak' before writing. If you pass --output, the original file is left "
"unchanged and no backup is created. Use --dry-run to preview fixes without writing."
),
formatter_class=argparse.RawTextHelpFormatter,
)
ap.add_argument("path", help="Path to the CSV/text file")
ap.add_argument(
"--context",
type=int,
default=20,
help="Bytes of context to show around errors (default: 20)",
)
ap.add_argument(
"--limit",
type=int,
default=100,
help="Max errors to report during scan (0 = unlimited)",
)
ap.add_argument(
"--skip-scan", action="store_true", help="Skip initial scan for speed"
)
# Whole-file convert
ap.add_argument(
"--convert",
action="store_true",
help="Convert entire file to UTF-8 using auto/forced encoding "
"(in-place by default; creates '<input>.bak').",
)
ap.add_argument(
"--encoding",
help="Force source encoding for --convert or first fallback for --fix",
)
ap.add_argument(
"--output",
help="Write to this path instead of in-place (no .bak is created in that case)",
)
# Targeted fix
ap.add_argument(
"--fix",
action="store_true",
help="Fix only invalid byte(s) via fallback encodings "
"(in-place by default; creates '<input>.bak').",
)
ap.add_argument(
"--fallbacks",
help="Comma-separated fallback encodings (default: cp1252,iso-8859-1,iso-8859-15)",
)
ap.add_argument(
"--dry-run",
action="store_true",
help="(fix) Print fixes but do not write or create a .bak",
)
ap.add_argument(
"--max-fixes",
type=int,
default=0,
help="(fix) Stop after N fixes (0 = unlimited)",
)
args = ap.parse_args()
path = args.path
if not os.path.isfile(path):
print(f"File not found: {path}", file=sys.stderr)
sys.exit(2)
# Optional scan first
if not args.skip_scan:
scan_file_for_utf8_errors(path, context=args.context, limit=args.limit)
# Mode selection guards
if args.convert and args.fix:
print("Choose either --convert or --fix (not both).", file=sys.stderr)
sys.exit(9)
if not args.convert and not args.fix and args.skip_scan:
print("No action selected (use --convert or --fix).")
return
if not args.convert and not args.fix:
# User only wanted a scan
return
# Determine output path and backup behavior
# In-place by default: create '<input>.bak' before overwriting.
if args.output:
out_path = args.output
in_place = False
else:
out_path = path
in_place = True
# CONVERT mode
if args.convert:
print("\n[CONVERT MODE] Converting file to UTF-8...")
if in_place:
# Create backup before overwriting original
backup_path = path + ".bak"
shutil.copy2(path, backup_path)
print(f"Backup created: {backup_path}")
used = convert_to_utf8(path, out_path, src_encoding=args.encoding)
print(f"Source encoding used: {used}")
print(f"Saved UTF-8 file as: {out_path}")
ok, err = verify_utf8_file(out_path)
if ok:
print("Verification: output is valid UTF-8 ✅")
else:
print(f"Verification failed: {err}")
sys.exit(8)
return
# FIX mode (targeted, single-byte)
if args.fix:
print("\n[FIX MODE] Fixing only invalid bytes to UTF-8...")
if args.dry_run:
# Dry-run: never write or create backup
out_path_effective = os.devnull
in_place_effective = False
else:
out_path_effective = out_path
in_place_effective = in_place
# Build fallback chain (if --encoding provided, try it first)
if args.fallbacks:
fallback_chain = [e.strip() for e in args.fallbacks.split(",") if e.strip()]
else:
fallback_chain = detect_probable_fallbacks()
if args.encoding and args.encoding not in fallback_chain:
fallback_chain = [args.encoding] + fallback_chain
if in_place_effective:
# Create backup before overwriting original (only when actually writing)
backup_path = path + ".bak"
shutil.copy2(path, backup_path)
print(f"Backup created: {backup_path}")
fix_count = targeted_fix_to_utf8(
path,
out_path_effective,
fallback_chain=fallback_chain,
dry_run=args.dry_run,
max_fixes=args.max_fixes,
)
if not args.dry_run:
ok, err = verify_utf8_file(out_path_effective)
if ok:
print("Verification: output is valid UTF-8 ✅")
print(f"Fix mode completed — {fix_count} byte(s) corrected.")
else:
print(f"Verification failed: {err}")
sys.exit(8)
return
if __name__ == "__main__":
main()

View File

@@ -1,78 +0,0 @@
#!/usr/bin/env python
import os
import csv
def _main():
input_csv_file_path = "base_reverse_dns.csv"
base_reverse_dns_map_file_path = "base_reverse_dns_map.csv"
known_unknown_list_file_path = "known_unknown_base_reverse_dns.txt"
psl_overrides_file_path = "psl_overrides.txt"
output_csv_file_path = "unknown_base_reverse_dns.csv"
csv_headers = ["source_name", "message_count"]
known_unknown_domains = []
psl_overrides = []
known_domains = []
output_rows = []
def load_list(file_path, list_var):
if not os.path.exists(file_path):
print(f"Error: {file_path} does not exist")
print(f"Loading {file_path}")
with open(file_path) as f:
for line in f.readlines():
domain = line.lower().strip()
if domain in list_var:
print(f"Error: {domain} is in {file_path} multiple times")
exit(1)
elif domain != "":
list_var.append(domain)
load_list(known_unknown_list_file_path, known_unknown_domains)
load_list(psl_overrides_file_path, psl_overrides)
if not os.path.exists(base_reverse_dns_map_file_path):
print(f"Error: {base_reverse_dns_map_file_path} does not exist")
print(f"Loading {base_reverse_dns_map_file_path}")
with open(base_reverse_dns_map_file_path) as f:
for row in csv.DictReader(f):
domain = row["base_reverse_dns"].lower().strip()
if domain in known_domains:
print(
f"Error: {domain} is in {base_reverse_dns_map_file_path} multiple times"
)
exit()
else:
known_domains.append(domain)
if domain in known_unknown_domains and known_domains:
print(
f"Error:{domain} is in {known_unknown_list_file_path} and \
{base_reverse_dns_map_file_path}"
)
exit(1)
if not os.path.exists(input_csv_file_path):
print(f"Error: {base_reverse_dns_map_file_path} does not exist")
exit(1)
with open(input_csv_file_path) as f:
for row in csv.DictReader(f):
domain = row["source_name"].lower().strip()
if domain == "":
continue
for psl_domain in psl_overrides:
if domain.endswith(psl_domain):
domain = psl_domain.strip(".").strip("-")
break
if domain not in known_domains and domain not in known_unknown_domains:
print(f"New unknown domain found: {domain}")
output_rows.append(row)
print(f"Writing {output_csv_file_path}")
with open(output_csv_file_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=csv_headers)
writer.writeheader()
writer.writerows(output_rows)
if __name__ == "__main__":
_main()

View File

@@ -1,601 +0,0 @@
1jli.site
26.107
444qcuhilla.com
4xr1.com
9services.com
a7e.ru
a94434500-blog.com
aams8.jp
abv-10.top
acemail.co.in
activaicon.com
adcritic.net
adlucrumnewsletter.com
admin.corpivensa.gob.ve
advantageiq.com
advrider.ro
aerospacevitro.us.com
agenturserver.de
aghories.com
ai270.net
albagroup-eg.com
alchemy.net
alohabeachcamp.net
alsiscad.com
aluminumpipetubing.com
americanstorageca.com
amplusserver.info
anchorfundhub.com
anglishment.com
anteldata.net.uy
antis.edu
antonaoll.com
anviklass.org
anwrgrp.lat
aosau.net
arandomserver.com
aransk.ru
ardcs.cn
armninl.met
as29550.net
asahachimaru.com
aserv.co.za
asmecam.it
ateky.net.br
aurelienvos.com
automatech.lat
avistaadvantage.com
b8sales.com
bahjs.com
baliaura.com
banaras.co
bearandbullmarketnews.com
bestinvestingtime.com
bhjui.com
biocorp.com
biosophy.net
bitter-echo.com
bizhostingservices.com
blguss.com
bluenet.ch
bluhosting.com
bnasg.com
bodiax.pp.ua
bost-law.com
brainity.com
brazalnde.net
brellatransplc.shop
brnonet.cz
broadwaycover.com
brushinglegal.de
brw.net
btes.tv
budgeteasehub.com
buoytoys.com
buyjapanese.jp
c53dw7m24rj.com
cahtelrandom.org
casadelmarsamara.com
cashflowmasterypro.com
cavabeen.com
cbti.net
centralmalaysia.com
chauffeurplan.co.uk
checkpox.fun
chegouseuvlache.org
chinaxingyu.xyz
christus.mx
churchills.market
ci-xyz.fit
cisumrecords.com
ckaik.cn
clcktoact.com
cli-eurosignal.cz
cloud-admin.it
cloud-edm.com
cloudflare-email.org
cloudhosting.rs
cloudlogin.co
cloudplatformpro.com
cnode.io
cntcloud.com
code-it.net
codefriend.top
colombiaceropapel.org
commerceinsurance.com
comsharempc.com
conexiona.com
coolblaze.com
coowo.com
corpemail.net
cp2-myorderbox.com
cps.com.ar
crnagora.net
cross-d-bar-troutranch.com
ctla.co.kr
cumbalikonakhotel.com
currencyexconverter.com
daakbabu.com
daikinmae.com
dairyvalley.com.my
dastans.ru
datahost36.de
ddii.network
deep-sek.shop
deetownsounds.com
descarca-counter-strike.net
detrot.xyz
dettlaffinc.com
dextoolse.net
digestivedaily.com
digi.net.my
dinofelis.cn
diwkyncbi.top
dkginternet.com
dnexpress.info
dns-oid.com
dnsindia.net
domainserver.ne.jp
domconfig.com
doorsrv.com
dreampox.fun
dreamtechmedia.com
ds.network
dss-group.net
dvj.theworkpc.com
dwlcka.com
dynamic-wiretel.in
dyntcorp.com
easternkingspei.com
economiceagles.com
egosimail.com
eliotporterphotos.us
emailgids.net
emailperegrine.com
entendercopilot.com
entretothom.net
epaycontrol.com
epicinvestmentsreview.co
epicinvestmentsreview.com
epik.com
epsilon-group.com
erestaff.com
euro-trade-gmbh.com
example.com
exposervers.com-new
extendcp.co.uk
eyecandyhosting.xyz
fastwebnet.it
fd9ing7wfn.com
feipnghardware.com
fetscorp.shop
fewo-usedom.net
fin-crime.com
financeaimpoint.com
financeupward.com
firmflat.com
flex-video.bnr.la
flourishfusionlife.com
formicidaehunt.net
fosterheap.com
fredi.shop
frontiernet.net
ftifb7tk3c.com
gamersprotectionvpn.online
gendns.com
getgreencardsfast.com
getthatroi.com
gibbshosting.com
gigidea.net
giize.com
ginous.eu.com
gis.net
gist-th.com
globalglennpartners.com
goldsboroughplace.com
gophermedia.com
gqlists.us.com
gratzl.de
greatestworldnews.com
greennutritioncare.com
gsbb.com
gumbolimbo.net
h-serv.co.uk
haedefpartners.com
halcyon-aboveboard.com
hanzubon.org
healthfuljourneyjoy.com
hgnbroken.us.com
highwey-diesel.com
hirofactory.com
hjd.asso.fr
hongchenggco.pro
hongkongtaxi.co
hopsinthehanger.com
hosted-by-worldstream.net
hostelsucre.com
hosting1337.com
hostinghane.com
hostinglotus.cloud
hostingmichigan.com
hostiran.name
hostmnl.com
hostname.localhost
hostnetwork.com
hosts.net.nz
hostserv.eu
hostwhitelabel.com
hpms1.jp
hunariojmk.net
hunriokinmuim.net
hypericine.com
i-mecca.net
iaasdns.com
iam.net.ma
iconmarketingguy.com
idcfcloud.net
idealconcept.live
igmohji.com
igppevents.org.uk
ihglobaldns.com
ilmessicano.com
imjtmn.cn
immenzaces.com
in-addr-arpa
in-addr.arpa
indsalelimited.com
indulgent-holistic.com
industechint.org
inshaaegypt.com
intal.uz
interfarma.kz
intocpanel.com
ip-147-135-108.us
ip-178-33-109.eu
ip-ptr.tech
iswhatpercent.com
itsidc.com
itwebs.com
iuon.net
ivol.co
jalanet.co.id
jimishare.com
jlccptt.net.cn
jlenterprises.co.uk
jmontalto.com
joyomokei.com
jumanra.org
justlongshirts.com
kahlaa.com
kaw.theworkpc.com
kbronet.com.tw
kdnursing.org
kielnet.net
kihy.theworkpc.com
kingschurchwirral.org
kitchenaildbd.com
klaomi.shop
knkconsult.net
kohshikai.com
krhfund.org
krillaglass.com
lancorhomes.com
landpedia.org
lanzatuseo.es
layerdns.cloud
learninglinked.com
legenditds.com
levertechcentre.com
lhost.no
lideri.net.br
lighthouse-media.com
lightpath.net
limogesporcelainboxes.com
lindsaywalt.net
linuxsunucum.com
listertermoformadoa.com
llsend.com
local.net
lohkal.com
londionrtim.net
lonestarmm.net
longmarquis.com
longwoodmgmt.com
lse.kz
lunvoy.com
luxarpro.ru
lwl-puehringer.at
lynx.net.lb
lyse.net
m-sender.com.ua
maggiolicloud.it
magnetmail.net
magnumgo.uz
maia11.com
mail-fire.com
mailsentinel.net
mailset.cn
malardino.net
managed-vps.net
manhattanbulletpoint.com
manpowerservices.com
marketmysterycode.com
marketwizardspro.com
masterclassjournal.com
matroguel.cam
maximpactipo.com
mechanicalwalk.store
mediavobis.com
meqlobal.com
mgts.by
migrans.net
miixta.com
milleniumsrv.com
mindworksunlimited.com
mirth-gale.com
misorpresa.com
mitomobile.com
mitsubachi-kibako.net
mjinn.com
mkegs.shop
mobius.fr
model-ac.ink
moderntradingnews.com
monnaiegroup.com
monopolizeright.com
moonjaws.com
morningnewscatcher.com
motion4ever.net
mschosting.com
msdp1.com
mspnet.pro
mts-nn.ru
multifamilydesign.com
mxserver.ro
mxthunder.net
my-ihor.ru
mycloudmailbox.com
myfriendforum.com
myrewards.net
mysagestore.com
mysecurewebserver.com
myshanet.net
myvps.jp
mywedsite.net
mywic.eu
name.tools
nanshenqfurniture.com
nask.pl
navertise.net
ncbb.kz
ncport.ru
ncsdi.ws
nebdig.com
neovet-base.ru
netbri.com
netcentertelecom.net.br
neti.ee
netkl.org
newinvestingguide.com
newwallstreetcode.com
ngvcv.cn
nic.name
nidix.net
nieuwedagnetwerk.net
nlscanme.com
nmeuh.cn
noisndametal.com
nucleusemail.com
nutriboostlife.com
nwo.giize.com
nwwhalewatchers.org
ny.adsl
nyt1.com
offerslatedeals.com
office365.us
ogicom.net
olivettilexikon.co.uk
omegabrasil.inf.br
onnet21.com
onumubunumu.com
oppt-ac.fit
orbitel.net.co
orfsurface.com
orientalspot.com
outsidences.com
ovaltinalization.co
overta.ru
ox28vgrurc.com
pamulang.net
panaltyspot.space
panolacountysheriffms.com
passionatesmiles.com
paulinelam.com
pdi-corp.com
peloquinbeck.com
perimetercenter.net
permanentscreen.com
permasteellisagroup.com
perumkijhyu.net
pesnia.com.ua
ph8ltwdi12o.com
pharmada.com.de
phdns3.es
pigelixval1.com
pipefittingsindia.com
planethoster.net
playamedia.io
plesk.page
pmnhost.net
pokiloandhu.net
pokupki5.ru
polandi.net
popiup.com
ports.net
posolstvostilya.com
potia.net
prima.com.ar
prima.net.ar
profsol.co.uk
prohealthmotion.com
promooffermarket.site
proudserver.com
proxado.com
psnm.ru
pvcwindowsprices.live
qontenciplc.autos
quakeclick.com
quasarstate.store
quatthonggiotico.com
qxyxab44njd.com
radianthealthrenaissance.com
rapidns.com
raxa.host
reberte.com
reethvikintl.com
regruhosting.ru
reliablepanel.com
rgb365.eu
riddlecamera.net
riddletrends.com
roccopugliese.com
runnin-rebels.com
rupar.puglia.it
rwdhosting.ca
s500host.com
sageevents.co.ke
sahacker-2020.com
samsales.site
sante-lorraine.fr
saransk.ru
satirogluet.com
scioncontacts.com
sdcc.my
seaspraymta3.net
secorp.mx
securen.net
securerelay.in
securev.net
seductiveeyes.com
seizethedayconsulting.com
serroplast.shop
server290.com
server342.com
server3559.cc
servershost.biz
sfek.kz
sgnetway.net
shopfox.ca
silvestrejaguar.sbs
silvestreonca.sbs
simplediagnostics.org
siriuscloud.jp
sisglobalresearch.com
sixpacklink.net
sjestyle.com
smallvillages.com
smartape-vps.com
solusoftware.com
sourcedns.com
southcoastwebhosting12.com
specialtvvs.com
spiritualtechnologies.io
sprout.org
srv.cat
stableserver.net
statlerfa.co.uk
stock-smtp.top
stockepictigers.com
stockexchangejournal.com
subterranean-concave.com
suksangroup.com
swissbluetopaz.com
switer.shop
sysop4.com
system.eu.com
szhongbing.com
t-jon.com
tacaindo.net
tacom.tj
tankertelz.co
tataidc.com
teamveiw.com
tecnoxia.net
tel-xyz.fit
tenkids.net
terminavalley.com
thaicloudsolutions.com
thaikinghost.com
thaimonster.com
thegermainetruth.net
thehandmaderose.com
thepushcase.com
ticdns.com
tigo.bo
toledofibra.net.br
topdns.com
totaal.net
totalplay.net
tqh.ro
traderlearningcenter.com
tradeukraine.site
traveleza.com
trwww.com
tsuzakij.com
tullostrucking.com
turbinetrends.com
twincitiesdistinctivehomes.com
tylerfordonline.com
uiyum.com
ultragate.com
uneedacollie.com
unified.services
unite.services
urawasl.com
us.servername.us
vagebond.net
varvia.de
vbcploo.com
vdc.vn
vendimetry.com
vibrantwellnesscorp.com
virtualine.org
visit.docotor
viviotech.us
vlflgl.com
volganet.ru
vrns.net
vulterdi.edu
vvondertex.com
wallstreetsgossip.com
wamego.net
wanekoohost.com
wealthexpertisepro.com
web-login.eu
weblinkinternational.com
webnox.io
websale.net
welllivinghive.com
westparkcom.com
wetransfer-eu.com
wheelch.me
whoflew.com
whpservers.com
wisdomhard.com
wisewealthcircle.com
wisvis.com
wodeniowa.com
wordpresshosting.xyz
wsiph2.com
xnt.mx
xodiax.com
xpnuf.cn
xsfati.us.com
xspmail.jp
yourciviccompass.com
yourinvestworkbook.com
yoursitesecure.net
zerowebhosting.net
zmml.uk
znlc.jp
ztomy.com

View File

@@ -1,23 +0,0 @@
-applefibernet.com
-c3.net.pl
-celsiainternet.com
-clientes-izzi.mx
-clientes-zap-izzi.mx
-imnet.com.br
-mcnbd.com
-smile.com.bd
-tataidc.co.in
-veloxfiber.com.br
-wconect.com.br
.amazonaws.com
.cloudaccess.net
.ddnsgeek.com
.fastvps-server.com
.in-addr-arpa
.in-addr.arpa
.kasserver.com
.kinghost.net
.linode.com
.linodeusercontent.com
.na4u.ru
.sakura.ne.jp

View File

@@ -1,184 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import os
import csv
from pathlib import Path
from typing import Mapping, Iterable, Optional, Collection, Union, List, Dict
class CSVValidationError(Exception):
def __init__(self, errors: list[str]):
super().__init__("\n".join(errors))
self.errors = errors
def sort_csv(
filepath: Union[str, Path],
field: str,
*,
sort_field_value_must_be_unique: bool = True,
strip_whitespace: bool = True,
fields_to_lowercase: Optional[Iterable[str]] = None,
case_insensitive_sort: bool = False,
required_fields: Optional[Iterable[str]] = None,
allowed_values: Optional[Mapping[str, Collection[str]]] = None,
) -> List[Dict[str, str]]:
"""
Read a CSV, optionally normalize rows (strip whitespace, lowercase certain fields),
validate field values, and write the sorted CSV back to the same path.
- filepath: Path to the CSV to sort.
- field: The field name to sort by.
- fields_to_lowercase: Permanently lowercases these field(s) in the data.
- strip_whitespace: Remove all whitespace at the beginning and of field values.
- case_insensitive_sort: Ignore case when sorting without changing values.
- required_fields: A list of fields that must have data in all rows.
- allowed_values: A mapping of allowed values for fields.
"""
path = Path(filepath)
required_fields = set(required_fields or [])
lower_set = set(fields_to_lowercase or [])
allowed_sets = {k: set(v) for k, v in (allowed_values or {}).items()}
if sort_field_value_must_be_unique:
seen_sort_field_values = []
with path.open("r", newline="") as infile:
reader = csv.DictReader(infile)
fieldnames = reader.fieldnames or []
if field not in fieldnames:
raise CSVValidationError([f"Missing sort column: {field!r}"])
missing_headers = required_fields - set(fieldnames)
if missing_headers:
raise CSVValidationError(
[f"Missing required header(s): {sorted(missing_headers)}"]
)
rows = list(reader)
def normalize_row(row: Dict[str, str]) -> None:
if strip_whitespace:
for k, v in row.items():
if isinstance(v, str):
row[k] = v.strip()
for fld in lower_set:
if fld in row and isinstance(row[fld], str):
row[fld] = row[fld].lower()
def validate_row(
row: Dict[str, str], sort_field: str, line_no: int, errors: list[str]
) -> None:
if sort_field_value_must_be_unique:
if row[sort_field] in seen_sort_field_values:
errors.append(f"Line {line_no}: Duplicate row for '{row[sort_field]}'")
else:
seen_sort_field_values.append(row[sort_field])
for rf in required_fields:
val = row.get(rf)
if val is None or val == "":
errors.append(
f"Line {line_no}: Missing value for required field '{rf}'"
)
for field, allowed_values in allowed_sets.items():
if field in row:
val = row[field]
if val not in allowed_values:
errors.append(
f"Line {line_no}: '{val}' is not an allowed value for '{field}' "
f"(allowed: {sorted(allowed_values)})"
)
errors: list[str] = []
for idx, row in enumerate(rows, start=2): # header is line 1
normalize_row(row)
validate_row(row, field, idx, errors)
if errors:
raise CSVValidationError(errors)
def sort_key(r: Dict[str, str]):
v = r.get(field, "")
if isinstance(v, str) and case_insensitive_sort:
return v.casefold()
return v
rows.sort(key=sort_key)
with open(filepath, "w", newline="") as outfile:
writer = csv.DictWriter(outfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(rows)
def sort_list_file(
filepath: Union[str, Path],
*,
lowercase: bool = True,
strip: bool = True,
deduplicate: bool = True,
remove_blank_lines: bool = True,
ending_newline: bool = True,
newline: Optional[str] = "\n",
):
"""Read a list from a file, sort it, optionally strip and deduplicate the values,
then write that list back to the file.
- Filepath: The path to the file.
- lowercase: Lowercase all values prior to sorting.
- remove_blank_lines: Remove any plank lines.
- ending_newline: End the file with a newline, even if remove_blank_lines is true.
- newline: The newline character to use.
"""
with open(filepath, mode="r", newline=newline) as infile:
lines = infile.readlines()
for i in range(len(lines)):
if lowercase:
lines[i] = lines[i].lower()
if strip:
lines[i] = lines[i].strip()
if deduplicate:
lines = list(set(lines))
if remove_blank_lines:
while "" in lines:
lines.remove("")
lines = sorted(lines)
if ending_newline:
if lines[-1] != "":
lines.append("")
with open(filepath, mode="w", newline=newline) as outfile:
outfile.write("\n".join(lines))
def _main():
map_file = "base_reverse_dns_map.csv"
map_key = "base_reverse_dns"
list_files = ["known_unknown_base_reverse_dns.txt", "psl_overrides.txt"]
types_file = "base_reverse_dns_types.txt"
with open(types_file) as f:
types = f.readlines()
while "" in types:
types.remove("")
map_allowed_values = {"Type": types}
for list_file in list_files:
if not os.path.exists(list_file):
print(f"Error: {list_file} does not exist")
exit(1)
sort_list_file(list_file)
if not os.path.exists(types_file):
print(f"Error: {types_file} does not exist")
exit(1)
sort_list_file(types_file, lowercase=False)
if not os.path.exists(map_file):
print(f"Error: {map_file} does not exist")
exit(1)
try:
sort_csv(map_file, map_key, allowed_values=map_allowed_values)
except CSVValidationError as e:
print(f"{map_file} did not validate: {e}")
if __name__ == "__main__":
_main()

View File

@@ -1,10 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import json
from typing import Any
import boto3
from parsedmarc.log import logger
@@ -12,16 +8,16 @@ from parsedmarc.utils import human_timestamp_to_datetime
class S3Client(object):
"""A client for interacting with Amazon S3"""
"""A client for a Amazon S3"""
def __init__(
self,
bucket_name: str,
bucket_path: str,
region_name: str,
endpoint_url: str,
access_key_id: str,
secret_access_key: str,
bucket_name,
bucket_path,
region_name,
endpoint_url,
access_key_id,
secret_access_key,
):
"""
Initializes the S3Client
@@ -51,18 +47,18 @@ class S3Client(object):
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
)
self.bucket = self.s3.Bucket(self.bucket_name) # type: ignore
self.bucket = self.s3.Bucket(self.bucket_name)
def save_aggregate_report_to_s3(self, report: dict[str, Any]):
def save_aggregate_report_to_s3(self, report):
self.save_report_to_s3(report, "aggregate")
def save_forensic_report_to_s3(self, report: dict[str, Any]):
def save_forensic_report_to_s3(self, report):
self.save_report_to_s3(report, "forensic")
def save_smtp_tls_report_to_s3(self, report: dict[str, Any]):
def save_smtp_tls_report_to_s3(self, report):
self.save_report_to_s3(report, "smtp_tls")
def save_report_to_s3(self, report: dict[str, Any], report_type: str):
def save_report_to_s3(self, report, report_type):
if report_type == "smtp_tls":
report_date = report["begin_date"]
report_id = report["report_id"]

View File

@@ -1,16 +1,11 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import json
import socket
from typing import Any, Union
from urllib.parse import urlparse
import socket
import json
import requests
import urllib3
import requests
from parsedmarc.constants import USER_AGENT
from parsedmarc import __version__
from parsedmarc.log import logger
from parsedmarc.utils import human_timestamp_to_unix_timestamp
@@ -28,13 +23,7 @@ class HECClient(object):
# http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTinput#services.2Fcollector
def __init__(
self,
url: str,
access_token: str,
index: str,
source: str = "parsedmarc",
verify=True,
timeout=60,
self, url, access_token, index, source="parsedmarc", verify=True, timeout=60
):
"""
Initializes the HECClient
@@ -48,9 +37,9 @@ class HECClient(object):
timeout (float): Number of seconds to wait for the server to send
data before giving up
"""
parsed_url = urlparse(url)
url = urlparse(url)
self.url = "{0}://{1}/services/collector/event/1.0".format(
parsed_url.scheme, parsed_url.netloc
url.scheme, url.netloc
)
self.access_token = access_token.lstrip("Splunk ")
self.index = index
@@ -59,19 +48,14 @@ class HECClient(object):
self.session = requests.Session()
self.timeout = timeout
self.session.verify = verify
self._common_data: dict[str, Union[str, int, float, dict]] = dict(
host=self.host, source=self.source, index=self.index
)
self._common_data = dict(host=self.host, source=self.source, index=self.index)
self.session.headers = {
"User-Agent": USER_AGENT,
"User-Agent": "parsedmarc/{0}".format(__version__),
"Authorization": "Splunk {0}".format(self.access_token),
}
def save_aggregate_reports_to_splunk(
self,
aggregate_reports: Union[list[dict[str, Any]], dict[str, Any]],
):
def save_aggregate_reports_to_splunk(self, aggregate_reports):
"""
Saves aggregate DMARC reports to Splunk
@@ -91,12 +75,9 @@ class HECClient(object):
json_str = ""
for report in aggregate_reports:
for record in report["records"]:
new_report: dict[str, Union[str, int, float, dict]] = dict()
new_report = dict()
for metadata in report["report_metadata"]:
new_report[metadata] = report["report_metadata"][metadata]
new_report["interval_begin"] = record["interval_begin"]
new_report["interval_end"] = record["interval_end"]
new_report["normalized_timespan"] = record["normalized_timespan"]
new_report["published_policy"] = report["policy_published"]
new_report["source_ip_address"] = record["source"]["ip_address"]
new_report["source_country"] = record["source"]["country"]
@@ -117,9 +98,7 @@ class HECClient(object):
new_report["spf_results"] = record["auth_results"]["spf"]
data["sourcetype"] = "dmarc:aggregate"
timestamp = human_timestamp_to_unix_timestamp(
new_report["interval_begin"]
)
timestamp = human_timestamp_to_unix_timestamp(new_report["begin_date"])
data["time"] = timestamp
data["event"] = new_report.copy()
json_str += "{0}\n".format(json.dumps(data))
@@ -134,10 +113,7 @@ class HECClient(object):
if response["code"] != 0:
raise SplunkError(response["text"])
def save_forensic_reports_to_splunk(
self,
forensic_reports: Union[list[dict[str, Any]], dict[str, Any]],
):
def save_forensic_reports_to_splunk(self, forensic_reports):
"""
Saves forensic DMARC reports to Splunk
@@ -171,9 +147,7 @@ class HECClient(object):
if response["code"] != 0:
raise SplunkError(response["text"])
def save_smtp_tls_reports_to_splunk(
self, reports: Union[list[dict[str, Any]], dict[str, Any]]
):
def save_smtp_tls_reports_to_splunk(self, reports):
"""
Saves aggregate DMARC reports to Splunk

View File

@@ -1,15 +1,8 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import json
import logging
import logging.handlers
import socket
import ssl
import time
from typing import Any, Optional
import json
from parsedmarc import (
parsed_aggregate_reports_to_csv_rows,
@@ -21,161 +14,31 @@ from parsedmarc import (
class SyslogClient(object):
"""A client for Syslog"""
def __init__(
self,
server_name: str,
server_port: int,
protocol: str = "udp",
cafile_path: Optional[str] = None,
certfile_path: Optional[str] = None,
keyfile_path: Optional[str] = None,
timeout: float = 5.0,
retry_attempts: int = 3,
retry_delay: int = 5,
):
def __init__(self, server_name, server_port):
"""
Initializes the SyslogClient
Args:
server_name (str): The Syslog server
server_port (int): The Syslog port
protocol (str): The protocol to use: "udp", "tcp", or "tls" (Default: "udp")
cafile_path (str): Path to CA certificate file for TLS server verification (Optional)
certfile_path (str): Path to client certificate file for TLS authentication (Optional)
keyfile_path (str): Path to client private key file for TLS authentication (Optional)
timeout (float): Connection timeout in seconds for TCP/TLS (Default: 5.0)
retry_attempts (int): Number of retry attempts for failed connections (Default: 3)
retry_delay (int): Delay in seconds between retry attempts (Default: 5)
server_port (int): The Syslog UDP port
"""
self.server_name = server_name
self.server_port = server_port
self.protocol = protocol.lower()
self.timeout = timeout
self.retry_attempts = retry_attempts
self.retry_delay = retry_delay
self.logger = logging.getLogger("parsedmarc_syslog")
self.logger.setLevel(logging.INFO)
# Create the appropriate syslog handler based on protocol
log_handler = self._create_syslog_handler(
server_name,
server_port,
self.protocol,
cafile_path,
certfile_path,
keyfile_path,
timeout,
retry_attempts,
retry_delay,
)
log_handler = logging.handlers.SysLogHandler(address=(server_name, server_port))
self.logger.addHandler(log_handler)
def _create_syslog_handler(
self,
server_name: str,
server_port: int,
protocol: str,
cafile_path: Optional[str],
certfile_path: Optional[str],
keyfile_path: Optional[str],
timeout: float,
retry_attempts: int,
retry_delay: int,
) -> logging.handlers.SysLogHandler:
"""
Creates a SysLogHandler with the specified protocol and TLS settings
"""
if protocol == "udp":
# UDP protocol (default, backward compatible)
return logging.handlers.SysLogHandler(
address=(server_name, server_port),
socktype=socket.SOCK_DGRAM,
)
elif protocol in ["tcp", "tls"]:
# TCP or TLS protocol with retry logic
for attempt in range(1, retry_attempts + 1):
try:
if protocol == "tcp":
# TCP without TLS
handler = logging.handlers.SysLogHandler(
address=(server_name, server_port),
socktype=socket.SOCK_STREAM,
)
# Set timeout on the socket
if hasattr(handler, "socket") and handler.socket:
handler.socket.settimeout(timeout)
return handler
else:
# TLS protocol
# Create SSL context with secure defaults
ssl_context = ssl.create_default_context()
# Explicitly set minimum TLS version to 1.2 for security
ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2
# Configure server certificate verification
if cafile_path:
ssl_context.load_verify_locations(cafile=cafile_path)
# Configure client certificate authentication
if certfile_path and keyfile_path:
ssl_context.load_cert_chain(
certfile=certfile_path,
keyfile=keyfile_path,
)
elif certfile_path or keyfile_path:
# Warn if only one of the two required parameters is provided
self.logger.warning(
"Both certfile_path and keyfile_path are required for "
"client certificate authentication. Client authentication "
"will not be used."
)
# Create TCP handler first
handler = logging.handlers.SysLogHandler(
address=(server_name, server_port),
socktype=socket.SOCK_STREAM,
)
# Wrap socket with TLS
if hasattr(handler, "socket") and handler.socket:
handler.socket = ssl_context.wrap_socket(
handler.socket,
server_hostname=server_name,
)
handler.socket.settimeout(timeout)
return handler
except Exception as e:
if attempt < retry_attempts:
self.logger.warning(
f"Syslog connection attempt {attempt}/{retry_attempts} failed: {e}. "
f"Retrying in {retry_delay} seconds..."
)
time.sleep(retry_delay)
else:
self.logger.error(
f"Syslog connection failed after {retry_attempts} attempts: {e}"
)
raise
else:
raise ValueError(
f"Invalid protocol '{protocol}'. Must be 'udp', 'tcp', or 'tls'."
)
def save_aggregate_report_to_syslog(self, aggregate_reports: list[dict[str, Any]]):
def save_aggregate_report_to_syslog(self, aggregate_reports):
rows = parsed_aggregate_reports_to_csv_rows(aggregate_reports)
for row in rows:
self.logger.info(json.dumps(row))
def save_forensic_report_to_syslog(self, forensic_reports: list[dict[str, Any]]):
def save_forensic_report_to_syslog(self, forensic_reports):
rows = parsed_forensic_reports_to_csv_rows(forensic_reports)
for row in rows:
self.logger.info(json.dumps(row))
def save_smtp_tls_report_to_syslog(self, smtp_tls_reports: list[dict[str, Any]]):
def save_smtp_tls_report_to_syslog(self, smtp_tls_reports):
rows = parsed_smtp_tls_reports_to_csv_rows(smtp_tls_reports)
for row in rows:
self.logger.info(json.dumps(row))

View File

@@ -1,220 +0,0 @@
from __future__ import annotations
from typing import Any, Dict, List, Literal, Optional, TypedDict, Union
# NOTE: This module is intentionally Python 3.10 compatible.
# - No PEP 604 unions (A | B)
# - No typing.NotRequired / Required (3.11+) to avoid an extra dependency.
# For optional keys, use total=False TypedDicts.
ReportType = Literal["aggregate", "forensic", "smtp_tls"]
class AggregateReportMetadata(TypedDict):
org_name: str
org_email: str
org_extra_contact_info: Optional[str]
report_id: str
begin_date: str
end_date: str
timespan_requires_normalization: bool
original_timespan_seconds: int
errors: List[str]
class AggregatePolicyPublished(TypedDict):
domain: str
adkim: str
aspf: str
p: str
sp: str
pct: str
fo: str
class IPSourceInfo(TypedDict):
ip_address: str
country: Optional[str]
reverse_dns: Optional[str]
base_domain: Optional[str]
name: Optional[str]
type: Optional[str]
class AggregateAlignment(TypedDict):
spf: bool
dkim: bool
dmarc: bool
class AggregateIdentifiers(TypedDict):
header_from: str
envelope_from: Optional[str]
envelope_to: Optional[str]
class AggregatePolicyOverrideReason(TypedDict):
type: Optional[str]
comment: Optional[str]
class AggregateAuthResultDKIM(TypedDict):
domain: str
result: str
selector: str
class AggregateAuthResultSPF(TypedDict):
domain: str
result: str
scope: str
class AggregateAuthResults(TypedDict):
dkim: List[AggregateAuthResultDKIM]
spf: List[AggregateAuthResultSPF]
class AggregatePolicyEvaluated(TypedDict):
disposition: str
dkim: str
spf: str
policy_override_reasons: List[AggregatePolicyOverrideReason]
class AggregateRecord(TypedDict):
interval_begin: str
interval_end: str
source: IPSourceInfo
count: int
alignment: AggregateAlignment
policy_evaluated: AggregatePolicyEvaluated
disposition: str
identifiers: AggregateIdentifiers
auth_results: AggregateAuthResults
class AggregateReport(TypedDict):
xml_schema: str
report_metadata: AggregateReportMetadata
policy_published: AggregatePolicyPublished
records: List[AggregateRecord]
class EmailAddress(TypedDict):
display_name: Optional[str]
address: str
local: Optional[str]
domain: Optional[str]
class EmailAttachment(TypedDict, total=False):
filename: Optional[str]
mail_content_type: Optional[str]
sha256: Optional[str]
ParsedEmail = TypedDict(
"ParsedEmail",
{
# This is a lightly-specified version of mailsuite/mailparser JSON.
# It focuses on the fields parsedmarc uses in forensic handling.
"headers": Dict[str, Any],
"subject": Optional[str],
"filename_safe_subject": Optional[str],
"date": Optional[str],
"from": EmailAddress,
"to": List[EmailAddress],
"cc": List[EmailAddress],
"bcc": List[EmailAddress],
"attachments": List[EmailAttachment],
"body": Optional[str],
"has_defects": bool,
"defects": Any,
"defects_categories": Any,
},
total=False,
)
class ForensicReport(TypedDict):
feedback_type: Optional[str]
user_agent: Optional[str]
version: Optional[str]
original_envelope_id: Optional[str]
original_mail_from: Optional[str]
original_rcpt_to: Optional[str]
arrival_date: str
arrival_date_utc: str
authentication_results: Optional[str]
delivery_result: Optional[str]
auth_failure: List[str]
authentication_mechanisms: List[str]
dkim_domain: Optional[str]
reported_domain: str
sample_headers_only: bool
source: IPSourceInfo
sample: str
parsed_sample: ParsedEmail
class SMTPTLSFailureDetails(TypedDict):
result_type: str
failed_session_count: int
class SMTPTLSFailureDetailsOptional(SMTPTLSFailureDetails, total=False):
sending_mta_ip: str
receiving_ip: str
receiving_mx_hostname: str
receiving_mx_helo: str
additional_info_uri: str
failure_reason_code: str
ip_address: str
class SMTPTLSPolicySummary(TypedDict):
policy_domain: str
policy_type: str
successful_session_count: int
failed_session_count: int
class SMTPTLSPolicy(SMTPTLSPolicySummary, total=False):
policy_strings: List[str]
mx_host_patterns: List[str]
failure_details: List[SMTPTLSFailureDetailsOptional]
class SMTPTLSReport(TypedDict):
organization_name: str
begin_date: str
end_date: str
contact_info: Union[str, List[str]]
report_id: str
policies: List[SMTPTLSPolicy]
class AggregateParsedReport(TypedDict):
report_type: Literal["aggregate"]
report: AggregateReport
class ForensicParsedReport(TypedDict):
report_type: Literal["forensic"]
report: ForensicReport
class SMTPTLSParsedReport(TypedDict):
report_type: Literal["smtp_tls"]
report: SMTPTLSReport
ParsedReport = Union[AggregateParsedReport, ForensicParsedReport, SMTPTLSParsedReport]
class ParsingResults(TypedDict):
aggregate_reports: List[AggregateReport]
forensic_reports: List[ForensicReport]
smtp_tls_reports: List[SMTPTLSReport]

View File

@@ -1,59 +1,48 @@
# -*- coding: utf-8 -*-
"""Utility functions that might be useful for other projects"""
from __future__ import annotations
import base64
import csv
import hashlib
import io
import json
import logging
import mailbox
import os
import re
import shutil
import subprocess
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from collections import OrderedDict
import tempfile
from datetime import datetime, timedelta, timezone
from typing import Optional, TypedDict, Union, cast
import subprocess
import shutil
import mailparser
from expiringdict import ExpiringDict
import json
import hashlib
import base64
import mailbox
import re
import csv
import io
try:
from importlib.resources import files
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<3 `importlib_resources`
from importlib.resources import files
# Try backported to PY<37 `importlib_resources`
import importlib_resources as pkg_resources
import dns.exception
import dns.resolver
from dateutil.parser import parse as parse_date
import dns.reversename
import dns.resolver
import dns.exception
import geoip2.database
import geoip2.errors
import publicsuffixlist
import requests
from dateutil.parser import parse as parse_date
from parsedmarc.log import logger
import parsedmarc.resources.dbip
import parsedmarc.resources.maps
from parsedmarc.constants import USER_AGENT
from parsedmarc.log import logger
parenthesis_regex = re.compile(r"\s*\(.*\)\s*")
null_file = open(os.devnull, "w")
mailparser_logger = logging.getLogger("mailparser")
mailparser_logger.setLevel(logging.CRITICAL)
psl = publicsuffixlist.PublicSuffixList()
psl_overrides_path = str(files(parsedmarc.resources.maps).joinpath("psl_overrides.txt"))
with open(psl_overrides_path) as f:
psl_overrides = [line.rstrip() for line in f.readlines()]
while "" in psl_overrides:
psl_overrides.remove("")
class EmailParserError(RuntimeError):
@@ -64,49 +53,31 @@ class DownloadError(RuntimeError):
"""Raised when an error occurs when downloading a file"""
class ReverseDNSService(TypedDict):
name: str
type: Optional[str]
ReverseDNSMap = dict[str, ReverseDNSService]
class IPAddressInfo(TypedDict):
ip_address: str
reverse_dns: Optional[str]
country: Optional[str]
base_domain: Optional[str]
name: Optional[str]
type: Optional[str]
def decode_base64(data: str) -> bytes:
def decode_base64(data):
"""
Decodes a base64 string, with padding being optional
Args:
data (str): A base64 encoded string
data: A base64 encoded string
Returns:
bytes: The decoded bytes
"""
data_bytes = bytes(data, encoding="ascii")
missing_padding = len(data_bytes) % 4
data = bytes(data, encoding="ascii")
missing_padding = len(data) % 4
if missing_padding != 0:
data_bytes += b"=" * (4 - missing_padding)
return base64.b64decode(data_bytes)
data += b"=" * (4 - missing_padding)
return base64.b64decode(data)
def get_base_domain(domain: str) -> Optional[str]:
def get_base_domain(domain):
"""
Gets the base domain name for the given domain
.. note::
Results are based on a list of public domain suffixes at
https://publicsuffix.org/list/public_suffix_list.dat and overrides included in
parsedmarc.resources.maps.psl_overrides.txt
https://publicsuffix.org/list/public_suffix_list.dat.
Args:
domain (str): A domain or subdomain
@@ -115,22 +86,11 @@ def get_base_domain(domain: str) -> Optional[str]:
str: The base domain of the given domain
"""
domain = domain.lower()
publicsuffix = psl.privatesuffix(domain)
for override in psl_overrides:
if domain.endswith(override):
return override.strip(".").strip("-")
return publicsuffix
psl = publicsuffixlist.PublicSuffixList()
return psl.privatesuffix(domain)
def query_dns(
domain: str,
record_type: str,
*,
cache: Optional[ExpiringDict] = None,
nameservers: Optional[list[str]] = None,
timeout: float = 2.0,
) -> list[str]:
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):
"""
Queries DNS
@@ -149,9 +109,9 @@ def query_dns(
record_type = record_type.upper()
cache_key = "{0}_{1}".format(domain, record_type)
if cache:
cached_records = cache.get(cache_key, None)
if isinstance(cached_records, list):
return cast(list[str], cached_records)
records = cache.get(cache_key, None)
if records:
return records
resolver = dns.resolver.Resolver()
timeout = float(timeout)
@@ -165,25 +125,33 @@ def query_dns(
resolver.nameservers = nameservers
resolver.timeout = timeout
resolver.lifetime = timeout
records = list(
map(
lambda r: r.to_text().replace('"', "").rstrip("."),
resolver.resolve(domain, record_type, lifetime=timeout),
if record_type == "TXT":
resource_records = list(
map(
lambda r: r.strings,
resolver.resolve(domain, record_type, lifetime=timeout),
)
)
_resource_record = [
resource_record[0][:0].join(resource_record)
for resource_record in resource_records
if resource_record
]
records = [r.decode() for r in _resource_record]
else:
records = list(
map(
lambda r: r.to_text().replace('"', "").rstrip("."),
resolver.resolve(domain, record_type, lifetime=timeout),
)
)
)
if cache:
cache[cache_key] = records
return records
def get_reverse_dns(
ip_address,
*,
cache: Optional[ExpiringDict] = None,
nameservers: Optional[list[str]] = None,
timeout: float = 2.0,
) -> Optional[str]:
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):
"""
Resolves an IP address to a hostname using a reverse DNS query
@@ -201,7 +169,7 @@ def get_reverse_dns(
try:
address = dns.reversename.from_address(ip_address)
hostname = query_dns(
str(address), "PTR", cache=cache, nameservers=nameservers, timeout=timeout
address, "PTR", cache=cache, nameservers=nameservers, timeout=timeout
)[0]
except dns.exception.DNSException as e:
@@ -211,7 +179,7 @@ def get_reverse_dns(
return hostname
def timestamp_to_datetime(timestamp: int) -> datetime:
def timestamp_to_datetime(timestamp):
"""
Converts a UNIX/DMARC timestamp to a Python ``datetime`` object
@@ -224,7 +192,7 @@ def timestamp_to_datetime(timestamp: int) -> datetime:
return datetime.fromtimestamp(int(timestamp))
def timestamp_to_human(timestamp: int) -> str:
def timestamp_to_human(timestamp):
"""
Converts a UNIX/DMARC timestamp to a human-readable string
@@ -237,9 +205,7 @@ def timestamp_to_human(timestamp: int) -> str:
return timestamp_to_datetime(timestamp).strftime("%Y-%m-%d %H:%M:%S")
def human_timestamp_to_datetime(
human_timestamp: str, *, to_utc: bool = False
) -> datetime:
def human_timestamp_to_datetime(human_timestamp, to_utc=False):
"""
Converts a human-readable timestamp into a Python ``datetime`` object
@@ -258,7 +224,7 @@ def human_timestamp_to_datetime(
return dt.astimezone(timezone.utc) if to_utc else dt
def human_timestamp_to_unix_timestamp(human_timestamp: str) -> int:
def human_timestamp_to_unix_timestamp(human_timestamp):
"""
Converts a human-readable timestamp into a UNIX timestamp
@@ -269,12 +235,10 @@ def human_timestamp_to_unix_timestamp(human_timestamp: str) -> int:
float: The converted timestamp
"""
human_timestamp = human_timestamp.replace("T", " ")
return int(human_timestamp_to_datetime(human_timestamp).timestamp())
return human_timestamp_to_datetime(human_timestamp).timestamp()
def get_ip_address_country(
ip_address: str, *, db_path: Optional[str] = None
) -> Optional[str]:
def get_ip_address_country(ip_address, db_path=None):
"""
Returns the ISO code for the country associated
with the given IPv4 or IPv6 address
@@ -301,7 +265,7 @@ def get_ip_address_country(
]
if db_path is not None:
if not os.path.isfile(db_path):
if os.path.isfile(db_path) is False:
db_path = None
logger.warning(
f"No file exists at {db_path}. Falling back to an "
@@ -316,13 +280,14 @@ def get_ip_address_country(
break
if db_path is None:
db_path = str(
files(parsedmarc.resources.dbip).joinpath("dbip-country-lite.mmdb")
)
with pkg_resources.path(
parsedmarc.resources.dbip, "dbip-country-lite.mmdb"
) as path:
db_path = path
db_age = datetime.now() - datetime.fromtimestamp(os.stat(db_path).st_mtime)
if db_age > timedelta(days=30):
logger.warning("IP database is more than a month old")
db_age = datetime.now() - datetime.fromtimestamp(os.stat(db_path).st_mtime)
if db_age > timedelta(days=30):
logger.warning("IP database is more than a month old")
db_reader = geoip2.database.Reader(db_path)
@@ -338,13 +303,12 @@ def get_ip_address_country(
def get_service_from_reverse_dns_base_domain(
base_domain,
*,
always_use_local_file: bool = False,
local_file_path: Optional[str] = None,
url: Optional[str] = None,
offline: bool = False,
reverse_dns_map: Optional[ReverseDNSMap] = None,
) -> ReverseDNSService:
always_use_local_file=False,
local_file_path=None,
url=None,
offline=False,
reverse_dns_map=None,
):
"""
Returns the service name of a given base domain name from reverse DNS.
@@ -361,6 +325,12 @@ def get_service_from_reverse_dns_base_domain(
the supplied reverse_dns_base_domain and the type will be None
"""
def load_csv(_csv_file):
reader = csv.DictReader(_csv_file)
for row in reader:
key = row["base_reverse_dns"].lower().strip()
reverse_dns_map[key] = dict(name=row["name"], type=row["type"])
base_domain = base_domain.lower().strip()
if url is None:
url = (
@@ -368,71 +338,49 @@ def get_service_from_reverse_dns_base_domain(
"/parsedmarc/master/parsedmarc/"
"resources/maps/base_reverse_dns_map.csv"
)
reverse_dns_map_value: ReverseDNSMap
if reverse_dns_map is None:
reverse_dns_map_value = {}
else:
reverse_dns_map_value = reverse_dns_map
def load_csv(_csv_file):
reader = csv.DictReader(_csv_file)
for row in reader:
key = row["base_reverse_dns"].lower().strip()
reverse_dns_map_value[key] = {
"name": row["name"],
"type": row["type"],
}
reverse_dns_map = dict()
csv_file = io.StringIO()
if not (offline or always_use_local_file) and len(reverse_dns_map_value) == 0:
if not (offline or always_use_local_file) and len(reverse_dns_map) == 0:
try:
logger.debug(f"Trying to fetch reverse DNS map from {url}...")
headers = {"User-Agent": USER_AGENT}
response = requests.get(url, headers=headers)
response = requests.get(url)
response.raise_for_status()
csv_file.write(response.text)
csv_file.seek(0)
load_csv(csv_file)
except requests.exceptions.RequestException as e:
logger.warning(f"Failed to fetch reverse DNS map: {e}")
except Exception:
logger.warning("Not a valid CSV file")
csv_file.seek(0)
logging.debug("Response body:")
logger.debug(csv_file.read())
if len(reverse_dns_map_value) == 0:
if len(reverse_dns_map) == 0:
logger.info("Loading included reverse DNS map...")
path = str(
files(parsedmarc.resources.maps).joinpath("base_reverse_dns_map.csv")
)
if local_file_path is not None:
path = local_file_path
with open(path) as csv_file:
load_csv(csv_file)
service: ReverseDNSService
with pkg_resources.path(
parsedmarc.resources.maps, "base_reverse_dns_map.csv"
) as path:
if local_file_path is not None:
path = local_file_path
with open(path) as csv_file:
load_csv(csv_file)
try:
service = reverse_dns_map_value[base_domain]
service = reverse_dns_map[base_domain]
except KeyError:
service = {"name": base_domain, "type": None}
service = dict(name=base_domain, type=None)
return service
def get_ip_address_info(
ip_address,
*,
ip_db_path: Optional[str] = None,
reverse_dns_map_path: Optional[str] = None,
always_use_local_files: bool = False,
reverse_dns_map_url: Optional[str] = None,
cache: Optional[ExpiringDict] = None,
reverse_dns_map: Optional[ReverseDNSMap] = None,
offline: bool = False,
nameservers: Optional[list[str]] = None,
timeout: float = 2.0,
) -> IPAddressInfo:
ip_db_path=None,
reverse_dns_map_path=None,
always_use_local_files=False,
reverse_dns_map_url=None,
cache=None,
reverse_dns_map=None,
offline=False,
nameservers=None,
timeout=2.0,
):
"""
Returns reverse DNS and country information for the given IP address
@@ -450,27 +398,17 @@ def get_ip_address_info(
timeout (float): Sets the DNS timeout in seconds
Returns:
dict: ``ip_address``, ``reverse_dns``, ``country``
OrderedDict: ``ip_address``, ``reverse_dns``
"""
ip_address = ip_address.lower()
if cache is not None:
cached_info = cache.get(ip_address, None)
if (
cached_info
and isinstance(cached_info, dict)
and "ip_address" in cached_info
):
info = cache.get(ip_address, None)
if info:
logger.debug(f"IP address {ip_address} was found in cache")
return cast(IPAddressInfo, cached_info)
info: IPAddressInfo = {
"ip_address": ip_address,
"reverse_dns": None,
"country": None,
"base_domain": None,
"name": None,
"type": None,
}
return info
info = OrderedDict()
info["ip_address"] = ip_address
if offline:
reverse_dns = None
else:
@@ -480,6 +418,9 @@ def get_ip_address_info(
country = get_ip_address_country(ip_address, db_path=ip_db_path)
info["country"] = country
info["reverse_dns"] = reverse_dns
info["base_domain"] = None
info["name"] = None
info["type"] = None
if reverse_dns is not None:
base_domain = get_base_domain(reverse_dns)
if base_domain is not None:
@@ -504,7 +445,7 @@ def get_ip_address_info(
return info
def parse_email_address(original_address: str) -> dict[str, Optional[str]]:
def parse_email_address(original_address):
if original_address[0] == "":
display_name = None
else:
@@ -517,15 +458,17 @@ def parse_email_address(original_address: str) -> dict[str, Optional[str]]:
local = address_parts[0].lower()
domain = address_parts[-1].lower()
return {
"display_name": display_name,
"address": address,
"local": local,
"domain": domain,
}
return OrderedDict(
[
("display_name", display_name),
("address", address),
("local", local),
("domain", domain),
]
)
def get_filename_safe_string(string: str) -> str:
def get_filename_safe_string(string):
"""
Converts a string to a string that is safe for a filename
@@ -547,7 +490,7 @@ def get_filename_safe_string(string: str) -> str:
return string
def is_mbox(path: str) -> bool:
def is_mbox(path):
"""
Checks if the given content is an MBOX mailbox file
@@ -568,7 +511,7 @@ def is_mbox(path: str) -> bool:
return _is_mbox
def is_outlook_msg(content) -> bool:
def is_outlook_msg(content):
"""
Checks if the given content is an Outlook msg OLE/MSG file
@@ -583,7 +526,7 @@ def is_outlook_msg(content) -> bool:
)
def convert_outlook_msg(msg_bytes: bytes) -> bytes:
def convert_outlook_msg(msg_bytes):
"""
Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to
standard RFC 822 format
@@ -592,7 +535,7 @@ def convert_outlook_msg(msg_bytes: bytes) -> bytes:
msg_bytes (bytes): the content of the .msg file
Returns:
A RFC 822 bytes payload
A RFC 822 string
"""
if not is_outlook_msg(msg_bytes):
raise ValueError("The supplied bytes are not an Outlook MSG file")
@@ -619,9 +562,7 @@ def convert_outlook_msg(msg_bytes: bytes) -> bytes:
return rfc822
def parse_email(
data: Union[bytes, str], *, strip_attachment_payloads: bool = False
) -> dict:
def parse_email(data, strip_attachment_payloads=False):
"""
A simplified email parser

View File

@@ -1,25 +1,12 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import Any, Optional, Union
import requests
from parsedmarc import logger
from parsedmarc.constants import USER_AGENT
class WebhookClient(object):
"""A client for webhooks"""
def __init__(
self,
aggregate_url: str,
forensic_url: str,
smtp_tls_url: str,
timeout: Optional[int] = 60,
):
def __init__(self, aggregate_url, forensic_url, smtp_tls_url, timeout=60):
"""
Initializes the WebhookClient
Args:
@@ -34,31 +21,29 @@ class WebhookClient(object):
self.timeout = timeout
self.session = requests.Session()
self.session.headers = {
"User-Agent": USER_AGENT,
"User-Agent": "parsedmarc",
"Content-Type": "application/json",
}
def save_forensic_report_to_webhook(self, report: str):
def save_forensic_report_to_webhook(self, report):
try:
self._send_to_webhook(self.forensic_url, report)
except Exception as error_:
logger.error("Webhook Error: {0}".format(error_.__str__()))
def save_smtp_tls_report_to_webhook(self, report: str):
def save_smtp_tls_report_to_webhook(self, report):
try:
self._send_to_webhook(self.smtp_tls_url, report)
except Exception as error_:
logger.error("Webhook Error: {0}".format(error_.__str__()))
def save_aggregate_report_to_webhook(self, report: str):
def save_aggregate_report_to_webhook(self, report):
try:
self._send_to_webhook(self.aggregate_url, report)
except Exception as error_:
logger.error("Webhook Error: {0}".format(error_.__str__()))
def _send_to_webhook(
self, webhook_url: str, payload: Union[bytes, str, dict[str, Any]]
):
def _send_to_webhook(self, webhook_url, payload):
try:
self.session.post(webhook_url, data=payload, timeout=self.timeout)
except Exception as error_:

View File

@@ -2,7 +2,6 @@
requires = [
"hatchling>=1.27.0",
]
requires_python = ">=3.10,<3.15"
build-backend = "hatchling.build"
[project]
@@ -29,15 +28,14 @@ classifiers = [
"Operating System :: OS Independent",
"Programming Language :: Python :: 3"
]
requires-python = ">=3.10"
dependencies = [
"azure-identity>=1.8.0",
"azure-monitor-ingestion>=1.0.0",
"boto3>=1.16.63",
"dateparser>=1.1.1",
"dnspython>=2.0.0",
"elasticsearch-dsl==7.4.0",
"elasticsearch<7.14.0",
"elasticsearch-dsl==8.17.1",
"elasticsearch<=8.0.0",
"expiringdict>=1.1.4",
"geoip2>=3.0.0",
"google-api-core>=2.4.0",
@@ -45,10 +43,10 @@ dependencies = [
"google-auth-httplib2>=0.1.0",
"google-auth-oauthlib>=0.4.6",
"google-auth>=2.3.3",
"imapclient>=3.1.0",
"imapclient>=2.1.0",
"kafka-python-ng>=2.2.2",
"lxml>=4.4.0",
"mailsuite>=1.11.2",
"mailsuite>=1.9.18",
"msgraph-core==0.2.2",
"opensearch-py>=2.4.2,<=3.0.0",
"publicsuffixlist>=0.10.0",
@@ -57,7 +55,6 @@ dependencies = [
"tqdm>=4.31.1",
"urllib3>=1.25.7",
"xmltodict>=0.12.0",
"PyYAML>=6.0.3"
]
[project.optional-dependencies]
@@ -79,20 +76,9 @@ parsedmarc = "parsedmarc.cli:_main"
Homepage = "https://domainaware.github.io/parsedmarc"
[tool.hatch.version]
path = "parsedmarc/constants.py"
path = "parsedmarc/__init__.py"
[tool.hatch.build.targets.sdist]
include = [
"/parsedmarc",
]
[tool.hatch.build]
exclude = [
"base_reverse_dns.csv",
"find_bad_utf8.py",
"find_unknown_base_reverse_dns.py",
"unknown_base_reverse_dns.csv",
"sortmaps.py",
"README.md",
"*.bak"
]

25
sortmaps.py Executable file
View File

@@ -0,0 +1,25 @@
#!/usr/bin/env python3
import os
import glob
import csv
maps_dir = os.path.join("parsedmarc", "resources", "maps")
csv_files = glob.glob(os.path.join(maps_dir, "*.csv"))
def sort_csv(filepath, column=0):
with open(filepath, mode="r", newline="") as infile:
reader = csv.reader(infile)
header = next(reader)
sorted_rows = sorted(reader, key=lambda row: row[column])
with open(filepath, mode="w", newline="\n") as outfile:
writer = csv.writer(outfile)
writer.writerow(header)
writer.writerows(sorted_rows)
for csv_file in csv_files:
sort_csv(csv_file)

View File

@@ -1,107 +0,0 @@
<form version="1.1" theme="dark">
<label>SMTP TLS Reporting</label>
<fieldset submitButton="false" autoRun="true">
<input type="time" token="time">
<label></label>
<default>
<earliest>-7d@h</earliest>
<latest>now</latest>
</default>
</input>
<input type="text" token="organization_name" searchWhenChanged="true">
<label>Organization name</label>
<default>*</default>
<initialValue>*</initialValue>
</input>
<input type="text" token="policy_domain">
<label>Policy domain</label>
<default>*</default>
<initialValue>*</initialValue>
</input>
<input type="dropdown" token="policy_type" searchWhenChanged="true">
<label>Policy type</label>
<choice value="*">Any</choice>
<choice value="tlsa">tlsa</choice>
<choice value="sts">sts</choice>
<choice value="no-policy-found">no-policy-found</choice>
<default>*</default>
<initialValue>*</initialValue>
</input>
</fieldset>
<row>
<panel>
<title>Reporting organizations</title>
<table>
<search>
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$
| rename policies{}.policy_domain as policy_domain
| rename policies{}.policy_type as policy_type
| rename policies{}.failed_session_count as failed_sessions
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
| rename policies{}.successful_session_count as successful_sessions
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
| rename policies{}.failure_details{}.result_type as failure_type
| fillnull value=0 failed_sessions
| stats sum(failed_sessions) as failed_sessions sum(successful_sessions) as successful_sessions by organization_name
| sort -successful_sessions 0</query>
<earliest>$time.earliest$</earliest>
<latest>$time.latest$</latest>
</search>
<option name="drilldown">none</option>
<option name="refresh.display">progressbar</option>
</table>
</panel>
<panel>
<title>Domains</title>
<table>
<search>
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$
| rename policies{}.policy_domain as policy_domain
| rename policies{}.policy_type as policy_type
| rename policies{}.failed_session_count as failed_sessions
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
| rename policies{}.successful_session_count as successful_sessions
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
| rename policies{}.failure_details{}.result_type as failure_type
| fillnull value=0 failed_sessions
| stats sum(failed_sessions) as failed_sessions sum(successful_sessions) as successful_sessions by policy_domain
| sort -successful_sessions 0</query>
<earliest>$time.earliest$</earliest>
<latest>$time.latest$</latest>
</search>
<option name="drilldown">none</option>
<option name="refresh.display">progressbar</option>
</table>
</panel>
</row>
<row>
<panel>
<title>Failure details</title>
<table>
<search>
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$ policies{}.failure_details{}.result_type=*
| rename policies{}.policy_domain as policy_domain
| rename policies{}.policy_type as policy_type
| rename policies{}.failed_session_count as failed_sessions
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
| rename policies{}.successful_session_count as successful_sessions
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
| fillnull value=0 failed_sessions
| rename policies{}.failure_details{}.result_type as failure_type
| table _time organization_name policy_domain policy_type failed_sessions successful_sessions sending_mta_ip receiving_ip receiving_mx_hostname failure_type
| sort by -_time 0</query>
<earliest>$time.earliest$</earliest>
<latest>$time.latest$</latest>
</search>
<option name="drilldown">none</option>
<option name="refresh.display">progressbar</option>
</table>
</panel>
</row>
</form>

1405
tests.py Executable file → Normal file

File diff suppressed because it is too large Load Diff