mirror of
https://github.com/domainaware/parsedmarc.git
synced 2026-02-19 16:06:22 +00:00
Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38a3d4eaae | ||
|
|
a05c230152 | ||
|
|
17bdc3a134 | ||
|
|
858be00f22 | ||
|
|
597ca64f9f | ||
|
|
c5dbe2c4dc | ||
|
|
082b3d355f | ||
|
|
2a7ce47bb1 | ||
|
|
9882405d96 | ||
|
|
fce84763b9 | ||
|
|
8a299b8600 | ||
|
|
b4c2b21547 | ||
|
|
865c249437 | ||
|
|
013859f10e | ||
|
|
6d4a31a120 | ||
|
|
45d3dc3b2e | ||
|
|
4bbd97dbaa | ||
|
|
5df152d469 | ||
|
|
d990bef342 | ||
|
|
caf77ca6d4 | ||
|
|
4b3d32c5a6 | ||
|
|
5df5c10f80 | ||
|
|
308d4657ab | ||
|
|
0f74e33094 | ||
|
|
9f339e11f5 | ||
|
|
391e84b717 | ||
|
|
8bf06ce5af | ||
|
|
2b7ae50a27 | ||
|
|
3feb478793 | ||
|
|
01630bb61c | ||
|
|
39347cb244 | ||
|
|
ed25526d59 | ||
|
|
880d7110fe | ||
|
|
d62001f5a4 | ||
|
|
0720bffcb6 | ||
|
|
fecd55a97d | ||
|
|
a121306eed | ||
|
|
980c9c7904 | ||
|
|
963f5d796f | ||
|
|
6532f3571b | ||
|
|
ea878443a8 | ||
|
|
9f6de41958 | ||
|
|
119192701c | ||
|
|
1d650be48a | ||
|
|
a85553fb18 | ||
|
|
5975d8eb21 | ||
|
|
87ae6175f2 | ||
|
|
68b93ed580 | ||
|
|
55508b513b | ||
|
|
71511c0cfc |
2
.github/workflows/python-tests.yml
vendored
2
.github/workflows/python-tests.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -106,7 +106,7 @@ ENV/
|
||||
.idea/
|
||||
|
||||
# VS Code launch config
|
||||
.vscode/launch.json
|
||||
#.vscode/launch.json
|
||||
|
||||
# Visual Studio Code settings
|
||||
#.vscode/
|
||||
@@ -142,3 +142,6 @@ scratch.py
|
||||
|
||||
parsedmarc/resources/maps/base_reverse_dns.csv
|
||||
parsedmarc/resources/maps/unknown_base_reverse_dns.csv
|
||||
parsedmarc/resources/maps/sus_domains.csv
|
||||
parsedmarc/resources/maps/unknown_domains.txt
|
||||
*.bak
|
||||
|
||||
54
.vscode/launch.json
vendored
Normal file
54
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Python Debugger: Current File",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "${file}",
|
||||
"console": "integratedTerminal"
|
||||
},
|
||||
{
|
||||
"name": "tests.py",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "tests.py",
|
||||
"console": "integratedTerminal"
|
||||
},
|
||||
{
|
||||
"name": "sample.eml",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "parsedmarc.cli",
|
||||
"args": ["samples/private/sample.eml"]
|
||||
},
|
||||
{
|
||||
"name": "find_sus_domains.py",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "find_sus_domains.py",
|
||||
"args": ["-i", "unknown_domains.txt", "-o", "sus_domains.csv"],
|
||||
"cwd": "${workspaceFolder}/parsedmarc/resources/maps",
|
||||
"console": "integratedTerminal"
|
||||
},
|
||||
{
|
||||
"name": "sortlists.py",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "sortlists.py",
|
||||
"cwd": "${workspaceFolder}/parsedmarc/resources/maps",
|
||||
"console": "integratedTerminal"
|
||||
},
|
||||
{
|
||||
"name": "find_unknown_base_reverse_dns.py",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "find_unknown_base_reverse_dns.py",
|
||||
"cwd": "${workspaceFolder}/parsedmarc/resources/maps",
|
||||
"console": "integratedTerminal"
|
||||
}
|
||||
]
|
||||
}
|
||||
12
.vscode/settings.json
vendored
12
.vscode/settings.json
vendored
@@ -13,6 +13,7 @@
|
||||
"automodule",
|
||||
"backported",
|
||||
"bellsouth",
|
||||
"boto",
|
||||
"brakhane",
|
||||
"Brightmail",
|
||||
"CEST",
|
||||
@@ -36,6 +37,7 @@
|
||||
"expiringdict",
|
||||
"fieldlist",
|
||||
"genindex",
|
||||
"geoip",
|
||||
"geoipupdate",
|
||||
"Geolite",
|
||||
"geolocation",
|
||||
@@ -44,7 +46,10 @@
|
||||
"hostnames",
|
||||
"htpasswd",
|
||||
"httpasswd",
|
||||
"httplib",
|
||||
"IMAP",
|
||||
"imapclient",
|
||||
"infile",
|
||||
"Interaktive",
|
||||
"IPDB",
|
||||
"journalctl",
|
||||
@@ -80,14 +85,18 @@
|
||||
"nosecureimap",
|
||||
"nosniff",
|
||||
"nwettbewerb",
|
||||
"opensearch",
|
||||
"parsedmarc",
|
||||
"passsword",
|
||||
"Postorius",
|
||||
"premade",
|
||||
"procs",
|
||||
"publicsuffix",
|
||||
"publicsuffixlist",
|
||||
"publixsuffix",
|
||||
"pygelf",
|
||||
"pypy",
|
||||
"pytest",
|
||||
"quickstart",
|
||||
"Reindex",
|
||||
"replyto",
|
||||
@@ -95,10 +104,13 @@
|
||||
"Rollup",
|
||||
"Rpdm",
|
||||
"SAMEORIGIN",
|
||||
"sdist",
|
||||
"Servernameone",
|
||||
"setuptools",
|
||||
"smartquotes",
|
||||
"SMTPTLS",
|
||||
"sortlists",
|
||||
"sortmaps",
|
||||
"sourcetype",
|
||||
"STARTTLS",
|
||||
"tasklist",
|
||||
|
||||
42
CHANGELOG.md
42
CHANGELOG.md
@@ -1,6 +1,46 @@
|
||||
Changelog
|
||||
=========
|
||||
|
||||
8.19.0
|
||||
------
|
||||
|
||||
- Add multi-tenant support via an index-prefix domain mapping file
|
||||
- PSL overrides so that services like AWS are correctly identified
|
||||
- Additional improvements to report type detection
|
||||
- Fix webhook timeout parsing (PR #623)
|
||||
- Output to STDOUT when the new general config boolean `silent` is set to `False` (Close #614)
|
||||
- Additional services added to `base_reverse_dns_map.csv`
|
||||
|
||||
8.18.9
|
||||
------
|
||||
|
||||
- Complete fix for #687 and more robust report type detection
|
||||
|
||||
8.18.8
|
||||
------
|
||||
|
||||
- Fix parsing emails with an uncompressed aggregate report attachment (Closes #607)
|
||||
- Add `--no-prettify-json` CLI option (PR #617)
|
||||
|
||||
8.18.7
|
||||
------
|
||||
|
||||
Removed improper spaces from `base_reverse_dns_map.csv` (Closes #612)
|
||||
|
||||
8.18.6
|
||||
------
|
||||
|
||||
- Fix since option to correctly work with weeks (PR #604)
|
||||
- Add 183 entries to `base_reverse_dns_map.csv`
|
||||
- Add 57 entries to `known_unknown_base_reverse_dns.txt`
|
||||
- Check for invalid UTF-8 bytes in `base_reverse_dns_map.csv` at build
|
||||
- Exclude unneeded items from the `parsedmarc.resources` module at build
|
||||
|
||||
8.18.5
|
||||
------
|
||||
|
||||
- Fix CSV download
|
||||
|
||||
8.18.4
|
||||
------
|
||||
|
||||
@@ -688,7 +728,7 @@ in the ``elasticsearch`` configuration file section (closes issue #78)
|
||||
-----
|
||||
|
||||
- Add filename and line number to logging output
|
||||
- Improved IMAP error handling
|
||||
- Improved IMAP error handling
|
||||
- Add CLI options
|
||||
|
||||
```text
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=python:3.9-slim
|
||||
ARG BASE_IMAGE=python:3.13-slim
|
||||
ARG USERNAME=parsedmarc
|
||||
ARG USER_UID=1000
|
||||
ARG USER_GID=$USER_UID
|
||||
|
||||
10
README.md
10
README.md
@@ -9,7 +9,7 @@ Package](https://img.shields.io/pypi/v/parsedmarc.svg)](https://pypi.org/project
|
||||
[](https://pypistats.org/packages/parsedmarc)
|
||||
|
||||
<p align="center">
|
||||
<img src="https://github.com/domainaware/parsedmarc/raw/master/docs/source/_static/screenshots/dmarc-summary-charts.png?raw=true" alt="A screenshot of DMARC summary charts in Kibana"/>
|
||||
<img src="https://raw.githubusercontent.com/domainaware/parsedmarc/refs/heads/master/docs/source/_static/screenshots/dmarc-summary-charts.png?raw=true" alt="A screenshot of DMARC summary charts in Kibana"/>
|
||||
</p>
|
||||
|
||||
`parsedmarc` is a Python module and CLI utility for parsing DMARC
|
||||
@@ -34,10 +34,10 @@ Thanks to all
|
||||
|
||||
## Features
|
||||
|
||||
- Parses draft and 1.0 standard aggregate/rua reports
|
||||
- Parses forensic/failure/ruf reports
|
||||
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail
|
||||
API
|
||||
- Parses draft and 1.0 standard aggregate/rua DMARC reports
|
||||
- Parses forensic/failure/ruf DMARC reports
|
||||
- Parses reports from SMTP TLS Reporting
|
||||
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail API
|
||||
- Transparently handles gzip or zip compressed reports
|
||||
- Consistent data structures
|
||||
- Simple JSON and/or CSV output
|
||||
|
||||
9
build.sh
9
build.sh
@@ -18,8 +18,11 @@ if [ -d "./../parsedmarc-docs" ]; then
|
||||
cp -rf build/html/* ../../parsedmarc-docs/
|
||||
fi
|
||||
cd ..
|
||||
sort -o "parsedmarc/resources/maps/known_unknown_base_reverse_dns.txt" "parsedmarc/resources/maps/known_unknown_base_reverse_dns.txt"
|
||||
./sortmaps.py
|
||||
cd parsedmarc/resources/maps
|
||||
python3 sortlists.py
|
||||
echo "Checking for invalid UTF-8 bytes in base_reverse_dns_map.csv"
|
||||
python3 find_bad_utf8.py base_reverse_dns_map.csv
|
||||
cd ../../..
|
||||
python3 tests.py
|
||||
rm -rf dist/ build/
|
||||
hatch build
|
||||
hatch build
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
:members:
|
||||
```
|
||||
|
||||
|
||||
## parsedmarc.splunk
|
||||
|
||||
```{eval-rst}
|
||||
|
||||
@@ -33,15 +33,16 @@ and Valimail.
|
||||
|
||||
## Features
|
||||
|
||||
- Parses draft and 1.0 standard aggregate/rua reports
|
||||
- Parses forensic/failure/ruf reports
|
||||
- Parses draft and 1.0 standard aggregate/rua DMARC reports
|
||||
- Parses forensic/failure/ruf DMARC reports
|
||||
- Parses reports from SMTP TLS Reporting
|
||||
- Can parse reports from an inbox over IMAP, Microsoft Graph, or Gmail API
|
||||
- Transparently handles gzip or zip compressed reports
|
||||
- Consistent data structures
|
||||
- Simple JSON and/or CSV output
|
||||
- Optionally email the results
|
||||
- Optionally send the results to Elasticsearch/OpenSearch and/or Splunk, for use with
|
||||
premade dashboards
|
||||
- Optionally send the results to Elasticsearch, Opensearch, and/or Splunk, for use
|
||||
with premade dashboards
|
||||
- Optionally send reports to Apache Kafka
|
||||
|
||||
```{toctree}
|
||||
|
||||
@@ -120,8 +120,10 @@ The full set of configuration options are:
|
||||
Elasticsearch, Splunk and/or S3
|
||||
- `save_smtp_tls` - bool: Save SMTP-STS report data to
|
||||
Elasticsearch, Splunk and/or S3
|
||||
- `index_prefix_domain_map` - bool: A path mapping of Opensearch/Elasticsearch index prefixes to domain names
|
||||
- `strip_attachment_payloads` - bool: Remove attachment
|
||||
payloads from results
|
||||
- `silent` - bool: Set this to `False` to output results to STDOUT
|
||||
- `output` - str: Directory to place JSON and CSV files in. This is required if you set either of the JSON output file options.
|
||||
- `aggregate_json_filename` - str: filename for the aggregate
|
||||
JSON output file
|
||||
@@ -369,7 +371,7 @@ The full set of configuration options are:
|
||||
- `mode` - str: The GELF transport type to use. Valid modes: `tcp`, `udp`, `tls`
|
||||
|
||||
- `maildir`
|
||||
- `reports_folder` - str: Full path for mailbox maidir location (Default: `INBOX`)
|
||||
- `maildir_path` - str: Full path for mailbox maidir location (Default: `INBOX`)
|
||||
- `maildir_create` - bool: Create maildir if not present (Default: False)
|
||||
|
||||
- `webhook` - Post the individual reports to a webhook url with the report as the JSON body
|
||||
@@ -445,6 +447,28 @@ PUT _cluster/settings
|
||||
Increasing this value increases resource usage.
|
||||
:::
|
||||
|
||||
## Multi-tenant support
|
||||
|
||||
Starting in `8.19.0`, ParseDMARC provides multi-tenant support by placing data into separate OpenSearch or Elasticsearch index prefixes. To set this up, create a YAML file that is formatted where each key is a tenant name, and the value is a list of domains related to that tenant, not including subdomains, like this:
|
||||
|
||||
```yaml
|
||||
example:
|
||||
- example.com
|
||||
- example.net
|
||||
- example.org
|
||||
|
||||
whalensolutions:
|
||||
- whalensolutions.com
|
||||
```
|
||||
|
||||
Save it to disk where the user running ParseDMARC can read it, then set `index_prefix_domain_map` to that filepath in the `[general]` section of the ParseDMARC configuration file and do not set an `index_prefix` option in the `[elasticsearch]` or `[opensearch]` sections.
|
||||
|
||||
When configured correctly, if ParseDMARC finds that a report is related to a domain in the mapping, the report will be saved in an index name that has the tenant name prefixed to it with a trailing underscore. Then, you can use the security features of Opensearch or the ELK stack to only grant users access to the indexes that they need.
|
||||
|
||||
:::{note}
|
||||
A domain cannot be used in multiple tenant lists. Only the first prefix list that contains the matching domain is used.
|
||||
:::
|
||||
|
||||
## Running parsedmarc as a systemd service
|
||||
|
||||
Use systemd to run `parsedmarc` as a service and process reports as
|
||||
|
||||
@@ -17,7 +17,7 @@ import zlib
|
||||
from base64 import b64decode
|
||||
from collections import OrderedDict
|
||||
from csv import DictWriter
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from io import BytesIO, StringIO
|
||||
from typing import Callable
|
||||
|
||||
@@ -1184,7 +1184,7 @@ def parse_report_email(
|
||||
input_ = input_.decode(encoding="utf8", errors="replace")
|
||||
msg = mailparser.parse_from_string(input_)
|
||||
msg_headers = json.loads(msg.headers_json)
|
||||
date = email.utils.format_datetime(datetime.utcnow())
|
||||
date = email.utils.format_datetime(datetime.now(timezone.utc))
|
||||
if "Date" in msg_headers:
|
||||
date = human_timestamp_to_datetime(msg_headers["Date"])
|
||||
msg = email.message_from_string(input_)
|
||||
@@ -1200,12 +1200,14 @@ def parse_report_email(
|
||||
if "Subject" in msg_headers:
|
||||
subject = msg_headers["Subject"]
|
||||
for part in msg.walk():
|
||||
content_type = part.get_content_type()
|
||||
content_type = part.get_content_type().lower()
|
||||
payload = part.get_payload()
|
||||
if not isinstance(payload, list):
|
||||
payload = [payload]
|
||||
payload = payload[0].__str__()
|
||||
if content_type == "message/feedback-report":
|
||||
if content_type.startswith("multipart/"):
|
||||
continue
|
||||
elif content_type == "message/feedback-report":
|
||||
try:
|
||||
if "Feedback-Type" in payload:
|
||||
feedback_report = payload
|
||||
@@ -1216,13 +1218,12 @@ def parse_report_email(
|
||||
feedback_report = feedback_report.replace("\\n", "\n")
|
||||
except (ValueError, TypeError, binascii.Error):
|
||||
feedback_report = payload
|
||||
|
||||
elif content_type == "text/rfc822-headers":
|
||||
sample = payload
|
||||
elif content_type == "message/rfc822":
|
||||
sample = payload
|
||||
elif content_type == "application/tlsrpt+json":
|
||||
if "{" not in payload:
|
||||
if not payload.strip().startswith("{"):
|
||||
payload = str(b64decode(payload))
|
||||
smtp_tls_report = parse_smtp_tls_report_json(payload)
|
||||
return OrderedDict(
|
||||
@@ -1234,7 +1235,6 @@ def parse_report_email(
|
||||
return OrderedDict(
|
||||
[("report_type", "smtp_tls"), ("report", smtp_tls_report)]
|
||||
)
|
||||
|
||||
elif content_type == "text/plain":
|
||||
if "A message claiming to be from you has failed" in payload:
|
||||
try:
|
||||
@@ -1261,13 +1261,14 @@ def parse_report_email(
|
||||
payload = b64decode(payload)
|
||||
if payload.startswith(MAGIC_ZIP) or payload.startswith(MAGIC_GZIP):
|
||||
payload = extract_report(payload)
|
||||
ns = nameservers
|
||||
if payload.startswith("{"):
|
||||
smtp_tls_report = parse_smtp_tls_report_json(payload)
|
||||
result = OrderedDict(
|
||||
[("report_type", "smtp_tls"), ("report", smtp_tls_report)]
|
||||
)
|
||||
return result
|
||||
if isinstance(payload, bytes):
|
||||
payload = payload.decode("utf-8", errors="replace")
|
||||
if payload.strip().startswith("{"):
|
||||
smtp_tls_report = parse_smtp_tls_report_json(payload)
|
||||
result = OrderedDict(
|
||||
[("report_type", "smtp_tls"), ("report", smtp_tls_report)]
|
||||
)
|
||||
elif payload.strip().startswith("<"):
|
||||
aggregate_report = parse_aggregate_report_xml(
|
||||
payload,
|
||||
ip_db_path=ip_db_path,
|
||||
@@ -1275,25 +1276,24 @@ def parse_report_email(
|
||||
reverse_dns_map_path=reverse_dns_map_path,
|
||||
reverse_dns_map_url=reverse_dns_map_url,
|
||||
offline=offline,
|
||||
nameservers=ns,
|
||||
nameservers=nameservers,
|
||||
timeout=dns_timeout,
|
||||
keep_alive=keep_alive,
|
||||
)
|
||||
result = OrderedDict(
|
||||
[("report_type", "aggregate"), ("report", aggregate_report)]
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except (TypeError, ValueError, binascii.Error):
|
||||
pass
|
||||
|
||||
except InvalidAggregateReport as e:
|
||||
error = (
|
||||
'Message with subject "{0}" '
|
||||
"is not a valid "
|
||||
"aggregate DMARC report: {1}".format(subject, e)
|
||||
except InvalidDMARCReport:
|
||||
error = 'Message with subject "{0}" is not a valid DMARC report'.format(
|
||||
subject
|
||||
)
|
||||
raise InvalidDMARCReport(error)
|
||||
raise ParserError(error)
|
||||
|
||||
except Exception as e:
|
||||
error = 'Unable to parse message with subject "{0}": {1}'.format(
|
||||
@@ -1580,7 +1580,7 @@ def get_dmarc_reports_from_mailbox(
|
||||
|
||||
if since:
|
||||
_since = 1440 # default one day
|
||||
if re.match(r"\d+[mhd]$", since):
|
||||
if re.match(r"\d+[mhdw]$", since):
|
||||
s = re.split(r"(\d+)", since)
|
||||
if s[2] == "m":
|
||||
_since = int(s[1])
|
||||
@@ -1604,14 +1604,18 @@ def get_dmarc_reports_from_mailbox(
|
||||
"Only days and weeks values in 'since' option are \
|
||||
considered for IMAP conections. Examples: 2d or 1w"
|
||||
)
|
||||
since = (datetime.utcnow() - timedelta(minutes=_since)).date()
|
||||
current_time = datetime.utcnow().date()
|
||||
since = (datetime.now(timezone.utc) - timedelta(minutes=_since)).date()
|
||||
current_time = datetime.now(timezone.utc).date()
|
||||
elif isinstance(connection, MSGraphConnection):
|
||||
since = (datetime.utcnow() - timedelta(minutes=_since)).isoformat() + "Z"
|
||||
current_time = datetime.utcnow().isoformat() + "Z"
|
||||
since = (
|
||||
datetime.now(timezone.utc) - timedelta(minutes=_since)
|
||||
).isoformat() + "Z"
|
||||
current_time = datetime.now(timezone.utc).isoformat() + "Z"
|
||||
elif isinstance(connection, GmailConnection):
|
||||
since = (datetime.utcnow() - timedelta(minutes=_since)).strftime("%s")
|
||||
current_time = datetime.utcnow().strftime("%s")
|
||||
since = (datetime.now(timezone.utc) - timedelta(minutes=_since)).strftime(
|
||||
"%s"
|
||||
)
|
||||
current_time = datetime.now(timezone.utc).strftime("%s")
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from configparser import ConfigParser
|
||||
from glob import glob
|
||||
import logging
|
||||
import math
|
||||
import yaml
|
||||
from collections import OrderedDict
|
||||
import json
|
||||
from ssl import CERT_NONE, create_default_context
|
||||
@@ -46,7 +47,7 @@ from parsedmarc.mail import (
|
||||
from parsedmarc.mail.graph import AuthMethod
|
||||
|
||||
from parsedmarc.log import logger
|
||||
from parsedmarc.utils import is_mbox, get_reverse_dns
|
||||
from parsedmarc.utils import is_mbox, get_reverse_dns, get_base_domain
|
||||
from parsedmarc import SEEN_AGGREGATE_REPORT_IDS
|
||||
|
||||
http.client._MAXHEADERS = 200 # pylint:disable=protected-access
|
||||
@@ -101,8 +102,35 @@ def cli_parse(
|
||||
def _main():
|
||||
"""Called when the module is executed"""
|
||||
|
||||
def get_index_prefix(report):
|
||||
if index_prefix_domain_map is None:
|
||||
return None
|
||||
if "policy_published" in report:
|
||||
domain = report["policy_published"]["domain"]
|
||||
elif "reported_domain" in report:
|
||||
domain = report("reported_domain")
|
||||
elif "policies" in report:
|
||||
domain = report["policies"][0]["domain"]
|
||||
if domain:
|
||||
domain = get_base_domain(domain)
|
||||
for prefix in index_prefix_domain_map:
|
||||
if domain in index_prefix_domain_map[prefix]:
|
||||
prefix = (
|
||||
prefix.lower()
|
||||
.strip()
|
||||
.strip("_")
|
||||
.replace(" ", "_")
|
||||
.replace("-", "_")
|
||||
)
|
||||
prefix = f"{prefix}_"
|
||||
return prefix
|
||||
return None
|
||||
|
||||
def process_reports(reports_):
|
||||
output_str = "{0}\n".format(json.dumps(reports_, ensure_ascii=False, indent=2))
|
||||
indent_value = 2 if opts.prettify_json else None
|
||||
output_str = "{0}\n".format(
|
||||
json.dumps(reports_, ensure_ascii=False, indent=indent_value)
|
||||
)
|
||||
|
||||
if not opts.silent:
|
||||
print(output_str)
|
||||
@@ -126,7 +154,8 @@ def _main():
|
||||
elastic.save_aggregate_report_to_elasticsearch(
|
||||
report,
|
||||
index_suffix=opts.elasticsearch_index_suffix,
|
||||
index_prefix=opts.elasticsearch_index_prefix,
|
||||
index_prefix=opts.elasticsearch_index_prefix
|
||||
or get_index_prefix(report),
|
||||
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
||||
number_of_shards=shards,
|
||||
number_of_replicas=replicas,
|
||||
@@ -147,7 +176,8 @@ def _main():
|
||||
opensearch.save_aggregate_report_to_opensearch(
|
||||
report,
|
||||
index_suffix=opts.opensearch_index_suffix,
|
||||
index_prefix=opts.opensearch_index_prefix,
|
||||
index_prefix=opts.opensearch_index_prefix
|
||||
or get_index_prefix(report),
|
||||
monthly_indexes=opts.opensearch_monthly_indexes,
|
||||
number_of_shards=shards,
|
||||
number_of_replicas=replicas,
|
||||
@@ -189,8 +219,9 @@ def _main():
|
||||
|
||||
try:
|
||||
if opts.webhook_aggregate_url:
|
||||
indent_value = 2 if opts.prettify_json else None
|
||||
webhook_client.save_aggregate_report_to_webhook(
|
||||
json.dumps(report, ensure_ascii=False, indent=2)
|
||||
json.dumps(report, ensure_ascii=False, indent=indent_value)
|
||||
)
|
||||
except Exception as error_:
|
||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||
@@ -212,7 +243,8 @@ def _main():
|
||||
elastic.save_forensic_report_to_elasticsearch(
|
||||
report,
|
||||
index_suffix=opts.elasticsearch_index_suffix,
|
||||
index_prefix=opts.elasticsearch_index_prefix,
|
||||
index_prefix=opts.elasticsearch_index_prefix
|
||||
or get_index_prefix(report),
|
||||
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
||||
number_of_shards=shards,
|
||||
number_of_replicas=replicas,
|
||||
@@ -231,7 +263,8 @@ def _main():
|
||||
opensearch.save_forensic_report_to_opensearch(
|
||||
report,
|
||||
index_suffix=opts.opensearch_index_suffix,
|
||||
index_prefix=opts.opensearch_index_prefix,
|
||||
index_prefix=opts.opensearch_index_prefix
|
||||
or get_index_prefix(report),
|
||||
monthly_indexes=opts.opensearch_monthly_indexes,
|
||||
number_of_shards=shards,
|
||||
number_of_replicas=replicas,
|
||||
@@ -271,8 +304,9 @@ def _main():
|
||||
|
||||
try:
|
||||
if opts.webhook_forensic_url:
|
||||
indent_value = 2 if opts.prettify_json else None
|
||||
webhook_client.save_forensic_report_to_webhook(
|
||||
json.dumps(report, ensure_ascii=False, indent=2)
|
||||
json.dumps(report, ensure_ascii=False, indent=indent_value)
|
||||
)
|
||||
except Exception as error_:
|
||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||
@@ -294,7 +328,8 @@ def _main():
|
||||
elastic.save_smtp_tls_report_to_elasticsearch(
|
||||
report,
|
||||
index_suffix=opts.elasticsearch_index_suffix,
|
||||
index_prefix=opts.elasticsearch_index_prefix,
|
||||
index_prefix=opts.elasticsearch_index_prefix
|
||||
or get_index_prefix(report),
|
||||
monthly_indexes=opts.elasticsearch_monthly_indexes,
|
||||
number_of_shards=shards,
|
||||
number_of_replicas=replicas,
|
||||
@@ -313,7 +348,8 @@ def _main():
|
||||
opensearch.save_smtp_tls_report_to_opensearch(
|
||||
report,
|
||||
index_suffix=opts.opensearch_index_suffix,
|
||||
index_prefix=opts.opensearch_index_prefix,
|
||||
index_prefix=opts.opensearch_index_prefix
|
||||
or get_index_prefix(report),
|
||||
monthly_indexes=opts.opensearch_monthly_indexes,
|
||||
number_of_shards=shards,
|
||||
number_of_replicas=replicas,
|
||||
@@ -353,8 +389,9 @@ def _main():
|
||||
|
||||
try:
|
||||
if opts.webhook_smtp_tls_url:
|
||||
indent_value = 2 if opts.prettify_json else None
|
||||
webhook_client.save_smtp_tls_report_to_webhook(
|
||||
json.dumps(report, ensure_ascii=False, indent=2)
|
||||
json.dumps(report, ensure_ascii=False, indent=indent_value)
|
||||
)
|
||||
except Exception as error_:
|
||||
logger.error("Webhook Error: {0}".format(error_.__str__()))
|
||||
@@ -475,6 +512,12 @@ def _main():
|
||||
"--debug", action="store_true", help="print debugging information"
|
||||
)
|
||||
arg_parser.add_argument("--log-file", default=None, help="output logging to a file")
|
||||
arg_parser.add_argument(
|
||||
"--no-prettify-json",
|
||||
action="store_false",
|
||||
dest="prettify_json",
|
||||
help="output JSON in a single line without indentation",
|
||||
)
|
||||
arg_parser.add_argument("-v", "--version", action="version", version=__version__)
|
||||
|
||||
aggregate_reports = []
|
||||
@@ -504,6 +547,7 @@ def _main():
|
||||
dns_timeout=args.dns_timeout,
|
||||
debug=args.debug,
|
||||
verbose=args.verbose,
|
||||
prettify_json=args.prettify_json,
|
||||
save_aggregate=False,
|
||||
save_forensic=False,
|
||||
save_smtp_tls=False,
|
||||
@@ -625,9 +669,16 @@ def _main():
|
||||
exit(-1)
|
||||
opts.silent = True
|
||||
config = ConfigParser()
|
||||
index_prefix_domain_map = None
|
||||
config.read(args.config_file)
|
||||
if "general" in config.sections():
|
||||
general_config = config["general"]
|
||||
if "silent" in general_config:
|
||||
if general_config["silent"].lower() == "false":
|
||||
opts.silent = False
|
||||
if "index_prefix_domain_map" in general_config:
|
||||
with open(general_config["index_prefix_domain_map"]) as f:
|
||||
index_prefix_domain_map = yaml.safe_load(f)
|
||||
if "offline" in general_config:
|
||||
opts.offline = general_config.getboolean("offline")
|
||||
if "strip_attachment_payloads" in general_config:
|
||||
@@ -701,6 +752,8 @@ def _main():
|
||||
opts.reverse_dns_map_path = general_config["reverse_dns_path"]
|
||||
if "reverse_dns_map_url" in general_config:
|
||||
opts.reverse_dns_map_url = general_config["reverse_dns_url"]
|
||||
if "prettify_json" in general_config:
|
||||
opts.prettify_json = general_config.getboolean("prettify_json")
|
||||
|
||||
if "mailbox" in config.sections():
|
||||
mailbox_config = config["mailbox"]
|
||||
@@ -1167,7 +1220,7 @@ def _main():
|
||||
if "smtp_tls_url" in webhook_config:
|
||||
opts.webhook_smtp_tls_url = webhook_config["smtp_tls_url"]
|
||||
if "timeout" in webhook_config:
|
||||
opts.webhook_timeout = webhook_config["timeout"]
|
||||
opts.webhook_timeout = webhook_config.getint("timeout")
|
||||
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
@@ -1586,6 +1639,7 @@ def _main():
|
||||
username=opts.smtp_user,
|
||||
password=opts.smtp_password,
|
||||
subject=opts.smtp_subject,
|
||||
require_encryption=opts.smtp_ssl,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to email results")
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
__version__ = "8.18.4"
|
||||
__version__ = "8.19.0"
|
||||
USER_AGENT = f"parsedmarc/{__version__}"
|
||||
|
||||
@@ -27,6 +27,7 @@ The `service_type` is based on the following rule precedence:
|
||||
- Agriculture
|
||||
- Automotive
|
||||
- Beauty
|
||||
- Conglomerate
|
||||
- Construction
|
||||
- Consulting
|
||||
- Defense
|
||||
@@ -43,6 +44,7 @@ The `service_type` is based on the following rule precedence:
|
||||
- IaaS
|
||||
- Industrial
|
||||
- ISP
|
||||
- Legal
|
||||
- Logistics
|
||||
- Manufacturing
|
||||
- Marketing
|
||||
@@ -52,6 +54,7 @@ The `service_type` is based on the following rule precedence:
|
||||
- Nonprofit
|
||||
- PaaS
|
||||
- Photography
|
||||
- Physical Security
|
||||
- Print
|
||||
- Publishing
|
||||
- Real Estate
|
||||
@@ -74,12 +77,16 @@ A list of reverse DNS base domains that could not be identified as belonging to
|
||||
|
||||
## base_reverse_dns.csv
|
||||
|
||||
A CSV with the fields `source_name` and optionally `message_count`. This CSV can be generated byy exporting the base DNS data from the Kibana on Splunk dashboards provided by parsedmarc. This file is not tracked by Git.
|
||||
A CSV with the fields `source_name` and optionally `message_count`. This CSV can be generated by exporting the base DNS data from the Kibana or Splunk dashboards provided by parsedmarc. This file is not tracked by Git.
|
||||
|
||||
## unknown_base_reverse_dns.csv
|
||||
|
||||
A CSV file with the fields `source_name` and `message_count`. This file is not tracked by Git.
|
||||
|
||||
## find_bad_utf8.py
|
||||
|
||||
Locates invalid UTF-8 bytes in files and optionally tries to current them. Generated by GPT5. Helped me find where I had introduced invalid bytes in `base_reverse_dns_map.csv`.
|
||||
|
||||
## find_unknown_base_reverse_dns.py
|
||||
|
||||
This is a python script that reads the domains in `base_reverse_dns.csv` and writes the domains that are not in `base_reverse_dns_map.csv` or `known_unknown_base_reverse_dns.txt` to `unknown_base_reverse_dns.csv`. This is useful for identifying potential additional domains to contribute to `base_reverse_dns_map.csv` and `known_unknown_base_reverse_dns.txt`.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
44
parsedmarc/resources/maps/base_reverse_dns_types.txt
Normal file
44
parsedmarc/resources/maps/base_reverse_dns_types.txt
Normal file
@@ -0,0 +1,44 @@
|
||||
Agriculture
|
||||
Automotive
|
||||
Beauty
|
||||
Conglomerate
|
||||
Construction
|
||||
Consulting
|
||||
Defense
|
||||
Education
|
||||
Email Provider
|
||||
Email Security
|
||||
Entertainment
|
||||
Event Planning
|
||||
Finance
|
||||
Food
|
||||
Government
|
||||
Government Media
|
||||
Healthcare
|
||||
ISP
|
||||
IaaS
|
||||
Industrial
|
||||
Legal
|
||||
Logistics
|
||||
MSP
|
||||
MSSP
|
||||
Manufacturing
|
||||
Marketing
|
||||
News
|
||||
Nonprofit
|
||||
PaaS
|
||||
Photography
|
||||
Physical Security
|
||||
Print
|
||||
Publishing
|
||||
Real Estate
|
||||
Retail
|
||||
SaaS
|
||||
Science
|
||||
Search Engine
|
||||
Social Media
|
||||
Sports
|
||||
Staffing
|
||||
Technology
|
||||
Travel
|
||||
Web Host
|
||||
488
parsedmarc/resources/maps/find_bad_utf8.py
Executable file
488
parsedmarc/resources/maps/find_bad_utf8.py
Executable file
@@ -0,0 +1,488 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
import argparse
|
||||
import codecs
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
from typing import List, Tuple
|
||||
|
||||
"""
|
||||
Locates and optionally corrects bad UTF-8 bytes in a file.
|
||||
Generated by GPT-5 Use at your own risk.
|
||||
"""
|
||||
|
||||
# -------------------------
|
||||
# UTF-8 scanning
|
||||
# -------------------------
|
||||
|
||||
|
||||
def scan_line_for_utf8_errors(
|
||||
line_bytes: bytes, line_no: int, base_offset: int, context: int
|
||||
):
|
||||
"""
|
||||
Scan one line of raw bytes for UTF-8 decoding errors.
|
||||
Returns a list of dicts describing each error.
|
||||
"""
|
||||
pos = 0
|
||||
results = []
|
||||
while pos < len(line_bytes):
|
||||
dec = codecs.getincrementaldecoder("utf-8")("strict")
|
||||
try:
|
||||
dec.decode(line_bytes[pos:], final=True)
|
||||
break
|
||||
except UnicodeDecodeError as e:
|
||||
rel_index = e.start
|
||||
abs_index_in_line = pos + rel_index
|
||||
abs_offset = base_offset + abs_index_in_line
|
||||
|
||||
start_ctx = max(0, abs_index_in_line - context)
|
||||
end_ctx = min(len(line_bytes), abs_index_in_line + 1 + context)
|
||||
ctx_bytes = line_bytes[start_ctx:end_ctx]
|
||||
bad_byte = line_bytes[abs_index_in_line : abs_index_in_line + 1]
|
||||
col = abs_index_in_line + 1 # 1-based byte column
|
||||
|
||||
results.append(
|
||||
{
|
||||
"line": line_no,
|
||||
"column": col,
|
||||
"abs_offset": abs_offset,
|
||||
"bad_byte_hex": bad_byte.hex(),
|
||||
"context_hex": ctx_bytes.hex(),
|
||||
"context_preview": ctx_bytes.decode("utf-8", errors="replace"),
|
||||
}
|
||||
)
|
||||
# Move past the offending byte and continue
|
||||
pos = abs_index_in_line + 1
|
||||
return results
|
||||
|
||||
|
||||
def scan_file_for_utf8_errors(path: str, context: int, limit: int):
|
||||
errors_found = 0
|
||||
limit_val = limit if limit != 0 else float("inf")
|
||||
|
||||
with open(path, "rb") as f:
|
||||
total_offset = 0
|
||||
line_no = 0
|
||||
while True:
|
||||
line = f.readline()
|
||||
if not line:
|
||||
break
|
||||
line_no += 1
|
||||
results = scan_line_for_utf8_errors(line, line_no, total_offset, context)
|
||||
for r in results:
|
||||
errors_found += 1
|
||||
print(
|
||||
f"[ERROR {errors_found}] Line {r['line']}, Column {r['column']}, "
|
||||
f"Absolute byte offset {r['abs_offset']}"
|
||||
)
|
||||
print(f" Bad byte: 0x{r['bad_byte_hex']}")
|
||||
print(f" Context (hex): {r['context_hex']}")
|
||||
print(f" Context (preview): {r['context_preview']}")
|
||||
print()
|
||||
if errors_found >= limit_val:
|
||||
print(f"Reached limit of {limit} errors. Stopping.")
|
||||
return errors_found
|
||||
total_offset += len(line)
|
||||
|
||||
if errors_found == 0:
|
||||
print("No invalid UTF-8 bytes found. 🎉")
|
||||
else:
|
||||
print(f"Found {errors_found} invalid UTF-8 byte(s).")
|
||||
return errors_found
|
||||
|
||||
|
||||
# -------------------------
|
||||
# Whole-file conversion
|
||||
# -------------------------
|
||||
|
||||
|
||||
def detect_encoding_text(path: str) -> Tuple[str, str]:
|
||||
"""
|
||||
Use charset-normalizer to detect file encoding.
|
||||
Return (encoding_name, decoded_text). Falls back to cp1252 if needed.
|
||||
"""
|
||||
try:
|
||||
from charset_normalizer import from_path
|
||||
except ImportError:
|
||||
print(
|
||||
"Please install charset-normalizer: pip install charset-normalizer",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(4)
|
||||
|
||||
matches = from_path(path)
|
||||
match = matches.best()
|
||||
if match is None or match.encoding is None:
|
||||
# Fallback heuristic for Western single-byte text
|
||||
with open(path, "rb") as fb:
|
||||
data = fb.read()
|
||||
try:
|
||||
return "cp1252", data.decode("cp1252", errors="strict")
|
||||
except UnicodeDecodeError:
|
||||
print("Unable to detect encoding reliably.", file=sys.stderr)
|
||||
sys.exit(5)
|
||||
|
||||
return match.encoding, str(match)
|
||||
|
||||
|
||||
def convert_to_utf8(src_path: str, out_path: str, src_encoding: str = None) -> str:
|
||||
"""
|
||||
Convert an entire file to UTF-8 (re-decoding everything).
|
||||
If src_encoding is provided, use it; else auto-detect.
|
||||
Returns the encoding actually used.
|
||||
"""
|
||||
if src_encoding:
|
||||
with open(src_path, "rb") as fb:
|
||||
data = fb.read()
|
||||
try:
|
||||
text = data.decode(src_encoding, errors="strict")
|
||||
except LookupError:
|
||||
print(f"Unknown encoding: {src_encoding}", file=sys.stderr)
|
||||
sys.exit(6)
|
||||
except UnicodeDecodeError as e:
|
||||
print(f"Decoding failed with {src_encoding}: {e}", file=sys.stderr)
|
||||
sys.exit(7)
|
||||
used = src_encoding
|
||||
else:
|
||||
used, text = detect_encoding_text(src_path)
|
||||
|
||||
with open(out_path, "w", encoding="utf-8", newline="") as fw:
|
||||
fw.write(text)
|
||||
return used
|
||||
|
||||
|
||||
def verify_utf8_file(path: str) -> Tuple[bool, str]:
|
||||
try:
|
||||
with open(path, "rb") as fb:
|
||||
fb.read().decode("utf-8", errors="strict")
|
||||
return True, ""
|
||||
except UnicodeDecodeError as e:
|
||||
return False, str(e)
|
||||
|
||||
|
||||
# -------------------------
|
||||
# Targeted single-byte fixer
|
||||
# -------------------------
|
||||
|
||||
|
||||
def iter_lines_with_offsets(b: bytes):
|
||||
"""
|
||||
Yield (line_bytes, line_start_abs_offset). Preserves LF/CRLF/CR in bytes.
|
||||
"""
|
||||
start = 0
|
||||
for i, byte in enumerate(b):
|
||||
if byte == 0x0A: # LF
|
||||
yield b[start : i + 1], start
|
||||
start = i + 1
|
||||
if start < len(b):
|
||||
yield b[start:], start
|
||||
|
||||
|
||||
def detect_probable_fallbacks() -> List[str]:
|
||||
# Good defaults for Western/Portuguese text
|
||||
return ["cp1252", "iso-8859-1", "iso-8859-15"]
|
||||
|
||||
|
||||
def repair_mixed_utf8_line(line: bytes, base_offset: int, fallback_chain: List[str]):
|
||||
"""
|
||||
Strictly validate UTF-8 and fix *only* the exact offending byte when an error occurs.
|
||||
This avoids touching adjacent valid UTF-8 (prevents mojibake like 'é').
|
||||
"""
|
||||
out_fragments: List[str] = []
|
||||
fixes = []
|
||||
pos = 0
|
||||
n = len(line)
|
||||
|
||||
while pos < n:
|
||||
dec = codecs.getincrementaldecoder("utf-8")("strict")
|
||||
try:
|
||||
s = dec.decode(line[pos:], final=True)
|
||||
out_fragments.append(s)
|
||||
break
|
||||
except UnicodeDecodeError as e:
|
||||
# Append the valid prefix before the error
|
||||
if e.start > 0:
|
||||
out_fragments.append(
|
||||
line[pos : pos + e.start].decode("utf-8", errors="strict")
|
||||
)
|
||||
|
||||
bad_index = pos + e.start # absolute index in 'line'
|
||||
bad_slice = line[bad_index : bad_index + 1] # FIX EXACTLY ONE BYTE
|
||||
|
||||
# Decode that single byte using the first working fallback
|
||||
decoded = None
|
||||
used_enc = None
|
||||
for enc in fallback_chain:
|
||||
try:
|
||||
decoded = bad_slice.decode(enc, errors="strict")
|
||||
used_enc = enc
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
if decoded is None:
|
||||
# latin-1 always succeeds (byte->same code point)
|
||||
decoded = bad_slice.decode("latin-1")
|
||||
used_enc = "latin-1 (fallback)"
|
||||
|
||||
out_fragments.append(decoded)
|
||||
|
||||
# Log the fix
|
||||
col_1based = bad_index + 1 # byte-based column
|
||||
fixes.append(
|
||||
{
|
||||
"line_base_offset": base_offset,
|
||||
"line": None, # caller fills line number
|
||||
"column": col_1based,
|
||||
"abs_offset": base_offset + bad_index,
|
||||
"bad_bytes_hex": bad_slice.hex(),
|
||||
"used_encoding": used_enc,
|
||||
"replacement_preview": decoded,
|
||||
}
|
||||
)
|
||||
|
||||
# Advance exactly one byte past the offending byte and continue
|
||||
pos = bad_index + 1
|
||||
|
||||
return "".join(out_fragments), fixes
|
||||
|
||||
|
||||
def targeted_fix_to_utf8(
|
||||
src_path: str,
|
||||
out_path: str,
|
||||
fallback_chain: List[str],
|
||||
dry_run: bool,
|
||||
max_fixes: int,
|
||||
):
|
||||
with open(src_path, "rb") as fb:
|
||||
data = fb.read()
|
||||
|
||||
total_fixes = 0
|
||||
repaired_lines: List[str] = []
|
||||
line_no = 0
|
||||
max_val = max_fixes if max_fixes != 0 else float("inf")
|
||||
|
||||
for line_bytes, base_offset in iter_lines_with_offsets(data):
|
||||
line_no += 1
|
||||
# Fast path: keep lines that are already valid UTF-8
|
||||
try:
|
||||
repaired_lines.append(line_bytes.decode("utf-8", errors="strict"))
|
||||
continue
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
fixed_text, fixes = repair_mixed_utf8_line(
|
||||
line_bytes, base_offset, fallback_chain=fallback_chain
|
||||
)
|
||||
for f in fixes:
|
||||
f["line"] = line_no
|
||||
|
||||
repaired_lines.append(fixed_text)
|
||||
|
||||
# Log fixes
|
||||
for f in fixes:
|
||||
total_fixes += 1
|
||||
print(
|
||||
f"[FIX {total_fixes}] Line {f['line']}, Column {f['column']}, Abs offset {f['abs_offset']}"
|
||||
)
|
||||
print(f" Bad bytes: 0x{f['bad_bytes_hex']}")
|
||||
print(f" Used encoding: {f['used_encoding']}")
|
||||
preview = f["replacement_preview"].replace("\r", "\\r").replace("\n", "\\n")
|
||||
if len(preview) > 40:
|
||||
preview = preview[:40] + "…"
|
||||
print(f" Replacement preview: {preview}")
|
||||
print()
|
||||
if total_fixes >= max_val:
|
||||
print(f"Reached max fixes limit ({max_fixes}). Stopping scan.")
|
||||
break
|
||||
if total_fixes >= max_val:
|
||||
break
|
||||
|
||||
if dry_run:
|
||||
print(f"Dry run complete. Detected {total_fixes} fix(es). No file written.")
|
||||
return total_fixes
|
||||
|
||||
# Join and verify result can be encoded to UTF-8
|
||||
repaired_text = "".join(repaired_lines)
|
||||
try:
|
||||
repaired_text.encode("utf-8", errors="strict")
|
||||
except UnicodeEncodeError as e:
|
||||
print(f"Internal error: repaired text not valid UTF-8: {e}", file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
with open(out_path, "w", encoding="utf-8", newline="") as fw:
|
||||
fw.write(repaired_text)
|
||||
|
||||
print(f"Fixed file written to: {out_path}")
|
||||
print(f"Total fixes applied: {total_fixes}")
|
||||
return total_fixes
|
||||
|
||||
|
||||
# -------------------------
|
||||
# CLI
|
||||
# -------------------------
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Scan for invalid UTF-8; optionally convert whole file or fix only invalid bytes.\n\n"
|
||||
"By default, --convert and --fix **edit the input file in place** and create a backup "
|
||||
"named '<input>.bak' before writing. If you pass --output, the original file is left "
|
||||
"unchanged and no backup is created. Use --dry-run to preview fixes without writing."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
ap.add_argument("path", help="Path to the CSV/text file")
|
||||
ap.add_argument(
|
||||
"--context",
|
||||
type=int,
|
||||
default=20,
|
||||
help="Bytes of context to show around errors (default: 20)",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--limit",
|
||||
type=int,
|
||||
default=100,
|
||||
help="Max errors to report during scan (0 = unlimited)",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--skip-scan", action="store_true", help="Skip initial scan for speed"
|
||||
)
|
||||
|
||||
# Whole-file convert
|
||||
ap.add_argument(
|
||||
"--convert",
|
||||
action="store_true",
|
||||
help="Convert entire file to UTF-8 using auto/forced encoding "
|
||||
"(in-place by default; creates '<input>.bak').",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--encoding",
|
||||
help="Force source encoding for --convert or first fallback for --fix",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--output",
|
||||
help="Write to this path instead of in-place (no .bak is created in that case)",
|
||||
)
|
||||
|
||||
# Targeted fix
|
||||
ap.add_argument(
|
||||
"--fix",
|
||||
action="store_true",
|
||||
help="Fix only invalid byte(s) via fallback encodings "
|
||||
"(in-place by default; creates '<input>.bak').",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--fallbacks",
|
||||
help="Comma-separated fallback encodings (default: cp1252,iso-8859-1,iso-8859-15)",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="(fix) Print fixes but do not write or create a .bak",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--max-fixes",
|
||||
type=int,
|
||||
default=0,
|
||||
help="(fix) Stop after N fixes (0 = unlimited)",
|
||||
)
|
||||
|
||||
args = ap.parse_args()
|
||||
path = args.path
|
||||
|
||||
if not os.path.isfile(path):
|
||||
print(f"File not found: {path}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
# Optional scan first
|
||||
if not args.skip_scan:
|
||||
scan_file_for_utf8_errors(path, context=args.context, limit=args.limit)
|
||||
|
||||
# Mode selection guards
|
||||
if args.convert and args.fix:
|
||||
print("Choose either --convert or --fix (not both).", file=sys.stderr)
|
||||
sys.exit(9)
|
||||
if not args.convert and not args.fix and args.skip_scan:
|
||||
print("No action selected (use --convert or --fix).")
|
||||
return
|
||||
if not args.convert and not args.fix:
|
||||
# User only wanted a scan
|
||||
return
|
||||
|
||||
# Determine output path and backup behavior
|
||||
# In-place by default: create '<input>.bak' before overwriting.
|
||||
if args.output:
|
||||
out_path = args.output
|
||||
in_place = False
|
||||
else:
|
||||
out_path = path
|
||||
in_place = True
|
||||
|
||||
# CONVERT mode
|
||||
if args.convert:
|
||||
print("\n[CONVERT MODE] Converting file to UTF-8...")
|
||||
if in_place:
|
||||
# Create backup before overwriting original
|
||||
backup_path = path + ".bak"
|
||||
shutil.copy2(path, backup_path)
|
||||
print(f"Backup created: {backup_path}")
|
||||
used = convert_to_utf8(path, out_path, src_encoding=args.encoding)
|
||||
print(f"Source encoding used: {used}")
|
||||
print(f"Saved UTF-8 file as: {out_path}")
|
||||
ok, err = verify_utf8_file(out_path)
|
||||
if ok:
|
||||
print("Verification: output is valid UTF-8 ✅")
|
||||
else:
|
||||
print(f"Verification failed: {err}")
|
||||
sys.exit(8)
|
||||
return
|
||||
|
||||
# FIX mode (targeted, single-byte)
|
||||
if args.fix:
|
||||
print("\n[FIX MODE] Fixing only invalid bytes to UTF-8...")
|
||||
if args.dry_run:
|
||||
# Dry-run: never write or create backup
|
||||
out_path_effective = os.devnull
|
||||
in_place_effective = False
|
||||
else:
|
||||
out_path_effective = out_path
|
||||
in_place_effective = in_place
|
||||
|
||||
# Build fallback chain (if --encoding provided, try it first)
|
||||
if args.fallbacks:
|
||||
fallback_chain = [e.strip() for e in args.fallbacks.split(",") if e.strip()]
|
||||
else:
|
||||
fallback_chain = detect_probable_fallbacks()
|
||||
if args.encoding and args.encoding not in fallback_chain:
|
||||
fallback_chain = [args.encoding] + fallback_chain
|
||||
|
||||
if in_place_effective:
|
||||
# Create backup before overwriting original (only when actually writing)
|
||||
backup_path = path + ".bak"
|
||||
shutil.copy2(path, backup_path)
|
||||
print(f"Backup created: {backup_path}")
|
||||
|
||||
fix_count = targeted_fix_to_utf8(
|
||||
path,
|
||||
out_path_effective,
|
||||
fallback_chain=fallback_chain,
|
||||
dry_run=args.dry_run,
|
||||
max_fixes=args.max_fixes,
|
||||
)
|
||||
|
||||
if not args.dry_run:
|
||||
ok, err = verify_utf8_file(out_path_effective)
|
||||
if ok:
|
||||
print("Verification: output is valid UTF-8 ✅")
|
||||
print(f"Fix mode completed — {fix_count} byte(s) corrected.")
|
||||
else:
|
||||
print(f"Verification failed: {err}")
|
||||
sys.exit(8)
|
||||
return
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import logging
|
||||
import os
|
||||
import csv
|
||||
|
||||
@@ -9,60 +8,68 @@ def _main():
|
||||
input_csv_file_path = "base_reverse_dns.csv"
|
||||
base_reverse_dns_map_file_path = "base_reverse_dns_map.csv"
|
||||
known_unknown_list_file_path = "known_unknown_base_reverse_dns.txt"
|
||||
psl_overrides_file_path = "psl_overrides.txt"
|
||||
output_csv_file_path = "unknown_base_reverse_dns.csv"
|
||||
|
||||
csv_headers = ["source_name", "message_count"]
|
||||
|
||||
output_rows = []
|
||||
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
for p in [
|
||||
input_csv_file_path,
|
||||
base_reverse_dns_map_file_path,
|
||||
known_unknown_list_file_path,
|
||||
]:
|
||||
if not os.path.exists(p):
|
||||
logger.error(f"{p} does not exist")
|
||||
exit(1)
|
||||
logger.info(f"Loading {known_unknown_list_file_path}")
|
||||
known_unknown_domains = []
|
||||
with open(known_unknown_list_file_path) as f:
|
||||
for line in f.readlines():
|
||||
domain = line.lower().strip()
|
||||
if domain in known_unknown_domains:
|
||||
logger.warning(
|
||||
f"{domain} is in {known_unknown_list_file_path} multiple times"
|
||||
)
|
||||
else:
|
||||
known_unknown_domains.append(domain)
|
||||
logger.info(f"Loading {base_reverse_dns_map_file_path}")
|
||||
psl_overrides = []
|
||||
known_domains = []
|
||||
output_rows = []
|
||||
|
||||
def load_list(file_path, list_var):
|
||||
if not os.path.exists(file_path):
|
||||
print(f"Error: {file_path} does not exist")
|
||||
print(f"Loading {file_path}")
|
||||
with open(file_path) as f:
|
||||
for line in f.readlines():
|
||||
domain = line.lower().strip()
|
||||
if domain in list_var:
|
||||
print(f"Error: {domain} is in {file_path} multiple times")
|
||||
exit(1)
|
||||
elif domain != "":
|
||||
list_var.append(domain)
|
||||
|
||||
load_list(known_unknown_list_file_path, known_unknown_domains)
|
||||
load_list(psl_overrides_file_path, psl_overrides)
|
||||
if not os.path.exists(base_reverse_dns_map_file_path):
|
||||
print(f"Error: {base_reverse_dns_map_file_path} does not exist")
|
||||
print(f"Loading {base_reverse_dns_map_file_path}")
|
||||
with open(base_reverse_dns_map_file_path) as f:
|
||||
for row in csv.DictReader(f):
|
||||
domain = row["base_reverse_dns"].lower().strip()
|
||||
if domain in known_domains:
|
||||
logger.warning(
|
||||
f"{domain} is in {base_reverse_dns_map_file_path} multiple times"
|
||||
print(
|
||||
f"Error: {domain} is in {base_reverse_dns_map_file_path} multiple times"
|
||||
)
|
||||
exit()
|
||||
else:
|
||||
known_domains.append(domain)
|
||||
if domain in known_unknown_domains and known_domains:
|
||||
pass
|
||||
logger.warning(
|
||||
f"{domain} is in {known_unknown_list_file_path} and {base_reverse_dns_map_file_path}"
|
||||
print(
|
||||
f"Error:{domain} is in {known_unknown_list_file_path} and \
|
||||
{base_reverse_dns_map_file_path}"
|
||||
)
|
||||
|
||||
logger.info(f"Checking domains against {base_reverse_dns_map_file_path}")
|
||||
exit(1)
|
||||
if not os.path.exists(input_csv_file_path):
|
||||
print(f"Error: {base_reverse_dns_map_file_path} does not exist")
|
||||
exit(1)
|
||||
with open(input_csv_file_path) as f:
|
||||
for row in csv.DictReader(f):
|
||||
domain = row["source_name"].lower().strip()
|
||||
if domain == "":
|
||||
continue
|
||||
for psl_domain in psl_overrides:
|
||||
if domain.endswith(psl_domain):
|
||||
domain = psl_domain.strip(".").strip("-")
|
||||
break
|
||||
if domain not in known_domains and domain not in known_unknown_domains:
|
||||
logger.info(f"New unknown domain found: {domain}")
|
||||
print(f"New unknown domain found: {domain}")
|
||||
output_rows.append(row)
|
||||
logger.info(f"Writing {output_csv_file_path}")
|
||||
print(f"Writing {output_csv_file_path}")
|
||||
with open(output_csv_file_path, "w") as f:
|
||||
writer = csv.DictWriter(f, fieldnames=csv_headers)
|
||||
writer.writeheader()
|
||||
|
||||
@@ -1,125 +1,601 @@
|
||||
200.in-addr.arpa
|
||||
1jli.site
|
||||
26.107
|
||||
444qcuhilla.com
|
||||
4xr1.com
|
||||
9services.com
|
||||
a7e.ru
|
||||
a94434500-blog.com
|
||||
aams8.jp
|
||||
abv-10.top
|
||||
acemail.co.in
|
||||
activaicon.com
|
||||
adcritic.net
|
||||
adlucrumnewsletter.com
|
||||
admin.corpivensa.gob.ve
|
||||
advantageiq.com
|
||||
advrider.ro
|
||||
aerospacevitro.us.com
|
||||
agenturserver.de
|
||||
aghories.com
|
||||
ai270.net
|
||||
albagroup-eg.com
|
||||
alchemy.net
|
||||
alohabeachcamp.net
|
||||
alsiscad.com
|
||||
aluminumpipetubing.com
|
||||
americanstorageca.com
|
||||
amplusserver.info
|
||||
anchorfundhub.com
|
||||
anglishment.com
|
||||
anteldata.net.uy
|
||||
antis.edu
|
||||
antonaoll.com
|
||||
anviklass.org
|
||||
anwrgrp.lat
|
||||
aosau.net
|
||||
arandomserver.com
|
||||
aransk.ru
|
||||
ardcs.cn
|
||||
armninl.met
|
||||
as29550.net
|
||||
asahachimaru.com
|
||||
aserv.co.za
|
||||
asmecam.it
|
||||
ateky.net.br
|
||||
aurelienvos.com
|
||||
automatech.lat
|
||||
avistaadvantage.com
|
||||
b8sales.com
|
||||
bahjs.com
|
||||
baliaura.com
|
||||
banaras.co
|
||||
bearandbullmarketnews.com
|
||||
bestinvestingtime.com
|
||||
bhjui.com
|
||||
biocorp.com
|
||||
bisno1.co.jp
|
||||
biosophy.net
|
||||
bitter-echo.com
|
||||
bizhostingservices.com
|
||||
blguss.com
|
||||
bluenet.ch
|
||||
bluhosting.com
|
||||
bnasg.com
|
||||
bodiax.pp.ua
|
||||
bost-law.com
|
||||
brainity.com
|
||||
brazalnde.net
|
||||
brellatransplc.shop
|
||||
brnonet.cz
|
||||
broadwaycover.com
|
||||
brushinglegal.de
|
||||
brw.net
|
||||
btes.tv
|
||||
budgeteasehub.com
|
||||
buoytoys.com
|
||||
buyjapanese.jp
|
||||
c53dw7m24rj.com
|
||||
cahtelrandom.org
|
||||
casadelmarsamara.com
|
||||
cashflowmasterypro.com
|
||||
cavabeen.com
|
||||
cbti.net
|
||||
centralmalaysia.com
|
||||
chauffeurplan.co.uk
|
||||
checkpox.fun
|
||||
chegouseuvlache.org
|
||||
chinaxingyu.xyz
|
||||
christus.mx
|
||||
churchills.market
|
||||
ci-xyz.fit
|
||||
cisumrecords.com
|
||||
ckaik.cn
|
||||
clcktoact.com
|
||||
cli-eurosignal.cz
|
||||
cloud-admin.it
|
||||
cloud-edm.com
|
||||
cloudflare-email.org
|
||||
cloudhosting.rs
|
||||
cloudlogin.co
|
||||
cloudplatformpro.com
|
||||
cnode.io
|
||||
cntcloud.com
|
||||
code-it.net
|
||||
codefriend.top
|
||||
colombiaceropapel.org
|
||||
commerceinsurance.com
|
||||
comsharempc.com
|
||||
conexiona.com
|
||||
coolblaze.com
|
||||
coowo.com
|
||||
corpemail.net
|
||||
cp2-myorderbox.com
|
||||
cps.com.ar
|
||||
crnagora.net
|
||||
cross-d-bar-troutranch.com
|
||||
ctla.co.kr
|
||||
cumbalikonakhotel.com
|
||||
currencyexconverter.com
|
||||
daakbabu.com
|
||||
daikinmae.com
|
||||
dairyvalley.com.my
|
||||
dastans.ru
|
||||
datahost36.de
|
||||
ddii.network
|
||||
deep-sek.shop
|
||||
deetownsounds.com
|
||||
descarca-counter-strike.net
|
||||
detrot.xyz
|
||||
dettlaffinc.com
|
||||
dextoolse.net
|
||||
digestivedaily.com
|
||||
digi.net.my
|
||||
dinofelis.cn
|
||||
diwkyncbi.top
|
||||
dkginternet.com
|
||||
dnexpress.info
|
||||
dns-oid.com
|
||||
dnsindia.net
|
||||
domainserver.ne.jp
|
||||
domconfig.com
|
||||
doorsrv.com
|
||||
dreampox.fun
|
||||
dreamtechmedia.com
|
||||
ds.network
|
||||
dss-group.net
|
||||
dvj.theworkpc.com
|
||||
dwlcka.com
|
||||
dynamic-wiretel.in
|
||||
dyntcorp.com
|
||||
easternkingspei.com
|
||||
economiceagles.com
|
||||
egosimail.com
|
||||
eliotporterphotos.us
|
||||
emailgids.net
|
||||
emailperegrine.com
|
||||
entendercopilot.com
|
||||
entretothom.net
|
||||
epaycontrol.com
|
||||
epicinvestmentsreview.co
|
||||
epicinvestmentsreview.com
|
||||
epik.com
|
||||
epsilon-group.com
|
||||
erestaff.com
|
||||
euro-trade-gmbh.com
|
||||
example.com
|
||||
exposervers.com-new
|
||||
extendcp.co.uk
|
||||
eyecandyhosting.xyz
|
||||
fastwebnet.it
|
||||
fd9ing7wfn.com
|
||||
feipnghardware.com
|
||||
fetscorp.shop
|
||||
fewo-usedom.net
|
||||
fin-crime.com
|
||||
financeaimpoint.com
|
||||
financeupward.com
|
||||
firmflat.com
|
||||
flex-video.bnr.la
|
||||
flourishfusionlife.com
|
||||
formicidaehunt.net
|
||||
fosterheap.com
|
||||
fredi.shop
|
||||
frontiernet.net
|
||||
ftifb7tk3c.com
|
||||
gamersprotectionvpn.online
|
||||
gendns.com
|
||||
getgreencardsfast.com
|
||||
getthatroi.com
|
||||
gibbshosting.com
|
||||
gigidea.net
|
||||
giize.com
|
||||
ginous.eu.com
|
||||
gis.net
|
||||
gist-th.com
|
||||
globalglennpartners.com
|
||||
goldsboroughplace.com
|
||||
gophermedia.com
|
||||
gqlists.us.com
|
||||
gratzl.de
|
||||
greatestworldnews.com
|
||||
greennutritioncare.com
|
||||
gsbb.com
|
||||
gumbolimbo.net
|
||||
h-serv.co.uk
|
||||
haedefpartners.com
|
||||
halcyon-aboveboard.com
|
||||
hanzubon.org
|
||||
healthfuljourneyjoy.com
|
||||
hgnbroken.us.com
|
||||
highwey-diesel.com
|
||||
hirofactory.com
|
||||
hjd.asso.fr
|
||||
hongchenggco.pro
|
||||
hongkongtaxi.co
|
||||
hopsinthehanger.com
|
||||
hosted-by-worldstream.net
|
||||
hostelsucre.com
|
||||
hosting1337.com
|
||||
hostinghane.com
|
||||
hostinglotus.cloud
|
||||
hostingmichigan.com
|
||||
hostiran.name
|
||||
hostmnl.com
|
||||
hostname.localhost
|
||||
hostnetwork.com
|
||||
hosts.net.nz
|
||||
hostserv.eu
|
||||
hostwhitelabel.com
|
||||
hpms1.jp
|
||||
hunariojmk.net
|
||||
hunriokinmuim.net
|
||||
hypericine.com
|
||||
i-mecca.net
|
||||
iaasdns.com
|
||||
iam.net.ma
|
||||
iconmarketingguy.com
|
||||
idcfcloud.net
|
||||
idealconcept.live
|
||||
igmohji.com
|
||||
igppevents.org.uk
|
||||
ihglobaldns.com
|
||||
ilmessicano.com
|
||||
imjtmn.cn
|
||||
immenzaces.com
|
||||
in-addr-arpa
|
||||
in-addr.arpa
|
||||
indsalelimited.com
|
||||
indulgent-holistic.com
|
||||
industechint.org
|
||||
inshaaegypt.com
|
||||
intal.uz
|
||||
interfarma.kz
|
||||
intocpanel.com
|
||||
ip-147-135-108.us
|
||||
ip-178-33-109.eu
|
||||
ip-ptr.tech
|
||||
iswhatpercent.com
|
||||
itsidc.com
|
||||
itwebs.com
|
||||
iuon.net
|
||||
ivol.co
|
||||
jalanet.co.id
|
||||
jimishare.com
|
||||
jlccptt.net.cn
|
||||
jlenterprises.co.uk
|
||||
jmontalto.com
|
||||
joyomokei.com
|
||||
jumanra.org
|
||||
justlongshirts.com
|
||||
kahlaa.com
|
||||
kaw.theworkpc.com
|
||||
kbronet.com.tw
|
||||
kdnursing.org
|
||||
kielnet.net
|
||||
kihy.theworkpc.com
|
||||
kingschurchwirral.org
|
||||
kitchenaildbd.com
|
||||
klaomi.shop
|
||||
knkconsult.net
|
||||
kohshikai.com
|
||||
krhfund.org
|
||||
krillaglass.com
|
||||
lancorhomes.com
|
||||
landpedia.org
|
||||
lanzatuseo.es
|
||||
layerdns.cloud
|
||||
learninglinked.com
|
||||
legenditds.com
|
||||
levertechcentre.com
|
||||
lhost.no
|
||||
lideri.net.br
|
||||
lighthouse-media.com
|
||||
lightpath.net
|
||||
limogesporcelainboxes.com
|
||||
lindsaywalt.net
|
||||
linuxsunucum.com
|
||||
listertermoformadoa.com
|
||||
llsend.com
|
||||
local.net
|
||||
lohkal.com
|
||||
londionrtim.net
|
||||
lonestarmm.net
|
||||
longmarquis.com
|
||||
longwoodmgmt.com
|
||||
lse.kz
|
||||
lunvoy.com
|
||||
luxarpro.ru
|
||||
lwl-puehringer.at
|
||||
lynx.net.lb
|
||||
lyse.net
|
||||
m-sender.com.ua
|
||||
maggiolicloud.it
|
||||
magnetmail.net
|
||||
magnumgo.uz
|
||||
maia11.com
|
||||
mail-fire.com
|
||||
mailsentinel.net
|
||||
mailset.cn
|
||||
malardino.net
|
||||
managed-vps.net
|
||||
manhattanbulletpoint.com
|
||||
manpowerservices.com
|
||||
marketmysterycode.com
|
||||
marketwizardspro.com
|
||||
masterclassjournal.com
|
||||
matroguel.cam
|
||||
maximpactipo.com
|
||||
mechanicalwalk.store
|
||||
mediavobis.com
|
||||
meqlobal.com
|
||||
mgts.by
|
||||
migrans.net
|
||||
miixta.com
|
||||
milleniumsrv.com
|
||||
mindworksunlimited.com
|
||||
mirth-gale.com
|
||||
misorpresa.com
|
||||
mitomobile.com
|
||||
mitsubachi-kibako.net
|
||||
mjinn.com
|
||||
mkegs.shop
|
||||
mobius.fr
|
||||
model-ac.ink
|
||||
moderntradingnews.com
|
||||
monnaiegroup.com
|
||||
monopolizeright.com
|
||||
moonjaws.com
|
||||
morningnewscatcher.com
|
||||
motion4ever.net
|
||||
mschosting.com
|
||||
msdp1.com
|
||||
mspnet.pro
|
||||
mts-nn.ru
|
||||
multifamilydesign.com
|
||||
mxserver.ro
|
||||
mxthunder.net
|
||||
my-ihor.ru
|
||||
mycloudmailbox.com
|
||||
myfriendforum.com
|
||||
myrewards.net
|
||||
mysagestore.com
|
||||
mysecurewebserver.com
|
||||
myshanet.net
|
||||
myvps.jp
|
||||
mywedsite.net
|
||||
mywic.eu
|
||||
name.tools
|
||||
nanshenqfurniture.com
|
||||
nask.pl
|
||||
navertise.net
|
||||
ncbb.kz
|
||||
ncport.ru
|
||||
ncsdi.ws
|
||||
nebdig.com
|
||||
neovet-base.ru
|
||||
netbri.com
|
||||
netcentertelecom.net.br
|
||||
neti.ee
|
||||
netkl.org
|
||||
newinvestingguide.com
|
||||
newwallstreetcode.com
|
||||
ngvcv.cn
|
||||
nic.name
|
||||
nidix.net
|
||||
nieuwedagnetwerk.net
|
||||
nlscanme.com
|
||||
nmeuh.cn
|
||||
noisndametal.com
|
||||
nucleusemail.com
|
||||
nutriboostlife.com
|
||||
nwo.giize.com
|
||||
nwwhalewatchers.org
|
||||
ny.adsl
|
||||
nyt1.com
|
||||
offerslatedeals.com
|
||||
office365.us
|
||||
ogicom.net
|
||||
olivettilexikon.co.uk
|
||||
omegabrasil.inf.br
|
||||
onnet21.com
|
||||
onumubunumu.com
|
||||
oppt-ac.fit
|
||||
orbitel.net.co
|
||||
orfsurface.com
|
||||
orientalspot.com
|
||||
outsidences.com
|
||||
ovaltinalization.co
|
||||
overta.ru
|
||||
ox28vgrurc.com
|
||||
pamulang.net
|
||||
panaltyspot.space
|
||||
panolacountysheriffms.com
|
||||
passionatesmiles.com
|
||||
paulinelam.com
|
||||
pdi-corp.com
|
||||
peloquinbeck.com
|
||||
perimetercenter.net
|
||||
permanentscreen.com
|
||||
permasteellisagroup.com
|
||||
perumkijhyu.net
|
||||
pesnia.com.ua
|
||||
ph8ltwdi12o.com
|
||||
pharmada.com.de
|
||||
phdns3.es
|
||||
pigelixval1.com
|
||||
pipefittingsindia.com
|
||||
planethoster.net
|
||||
playamedia.io
|
||||
plesk.page
|
||||
pmnhost.net
|
||||
pokiloandhu.net
|
||||
pokupki5.ru
|
||||
polandi.net
|
||||
popiup.com
|
||||
ports.net
|
||||
posolstvostilya.com
|
||||
potia.net
|
||||
prima.com.ar
|
||||
prima.net.ar
|
||||
profsol.co.uk
|
||||
prohealthmotion.com
|
||||
promooffermarket.site
|
||||
proudserver.com
|
||||
proxado.com
|
||||
psnm.ru
|
||||
pvcwindowsprices.live
|
||||
qontenciplc.autos
|
||||
quakeclick.com
|
||||
quasarstate.store
|
||||
quatthonggiotico.com
|
||||
qxyxab44njd.com
|
||||
radianthealthrenaissance.com
|
||||
rapidns.com
|
||||
raxa.host
|
||||
reberte.com
|
||||
reethvikintl.com
|
||||
regruhosting.ru
|
||||
reliablepanel.com
|
||||
rgb365.eu
|
||||
riddlecamera.net
|
||||
riddletrends.com
|
||||
roccopugliese.com
|
||||
runnin-rebels.com
|
||||
rupar.puglia.it
|
||||
rwdhosting.ca
|
||||
s500host.com
|
||||
sageevents.co.ke
|
||||
sahacker-2020.com
|
||||
samsales.site
|
||||
sante-lorraine.fr
|
||||
saransk.ru
|
||||
satirogluet.com
|
||||
securednshost.com
|
||||
scioncontacts.com
|
||||
sdcc.my
|
||||
seaspraymta3.net
|
||||
secorp.mx
|
||||
securen.net
|
||||
securerelay.in
|
||||
securev.net
|
||||
seductiveeyes.com
|
||||
seizethedayconsulting.com
|
||||
serroplast.shop
|
||||
server290.com
|
||||
server342.com
|
||||
server3559.cc
|
||||
servershost.biz
|
||||
sfek.kz
|
||||
sgnetway.net
|
||||
shopfox.ca
|
||||
silvestrejaguar.sbs
|
||||
silvestreonca.sbs
|
||||
simplediagnostics.org
|
||||
siriuscloud.jp
|
||||
sisglobalresearch.com
|
||||
sixpacklink.net
|
||||
sjestyle.com
|
||||
smallvillages.com
|
||||
smartape-vps.com
|
||||
solusoftware.com
|
||||
sourcedns.com
|
||||
southcoastwebhosting12.com
|
||||
specialtvvs.com
|
||||
spiritualtechnologies.io
|
||||
sprout.org
|
||||
srv.cat
|
||||
stableserver.net
|
||||
statlerfa.co.uk
|
||||
stock-smtp.top
|
||||
stockepictigers.com
|
||||
stockexchangejournal.com
|
||||
subterranean-concave.com
|
||||
suksangroup.com
|
||||
swissbluetopaz.com
|
||||
switer.shop
|
||||
sysop4.com
|
||||
system.eu.com
|
||||
szhongbing.com
|
||||
t-jon.com
|
||||
tacaindo.net
|
||||
tacom.tj
|
||||
tankertelz.co
|
||||
tataidc.com
|
||||
teamveiw.com
|
||||
tecnoxia.net
|
||||
tel-xyz.fit
|
||||
tenkids.net
|
||||
terminavalley.com
|
||||
thaicloudsolutions.com
|
||||
thaikinghost.com
|
||||
thaimonster.com
|
||||
thegermainetruth.net
|
||||
thehandmaderose.com
|
||||
thepushcase.com
|
||||
ticdns.com
|
||||
tigo.bo
|
||||
toledofibra.net.br
|
||||
topdns.com
|
||||
totaal.net
|
||||
totalplay.net
|
||||
tqh.ro
|
||||
traderlearningcenter.com
|
||||
tradeukraine.site
|
||||
traveleza.com
|
||||
trwww.com
|
||||
tsuzakij.com
|
||||
tullostrucking.com
|
||||
turbinetrends.com
|
||||
twincitiesdistinctivehomes.com
|
||||
tylerfordonline.com
|
||||
uiyum.com
|
||||
ultragate.com
|
||||
uneedacollie.com
|
||||
unified.services
|
||||
unite.services
|
||||
urawasl.com
|
||||
us.servername.us
|
||||
vagebond.net
|
||||
varvia.de
|
||||
vbcploo.com
|
||||
vdc.vn
|
||||
vendimetry.com
|
||||
vibrantwellnesscorp.com
|
||||
virtualine.org
|
||||
visit.docotor
|
||||
viviotech.us
|
||||
vlflgl.com
|
||||
volganet.ru
|
||||
vrns.net
|
||||
vulterdi.edu
|
||||
vvondertex.com
|
||||
wallstreetsgossip.com
|
||||
wamego.net
|
||||
wanekoohost.com
|
||||
wealthexpertisepro.com
|
||||
web-login.eu
|
||||
weblinkinternational.com
|
||||
webnox.io
|
||||
websale.net
|
||||
welllivinghive.com
|
||||
westparkcom.com
|
||||
wetransfer-eu.com
|
||||
wheelch.me
|
||||
whoflew.com
|
||||
whpservers.com
|
||||
wisdomhard.com
|
||||
wisewealthcircle.com
|
||||
wisvis.com
|
||||
wodeniowa.com
|
||||
wordpresshosting.xyz
|
||||
wsiph2.com
|
||||
xnt.mx
|
||||
xodiax.com
|
||||
xpnuf.cn
|
||||
xsfati.us.com
|
||||
xspmail.jp
|
||||
yourciviccompass.com
|
||||
yourinvestworkbook.com
|
||||
yoursitesecure.net
|
||||
zerowebhosting.net
|
||||
zmml.uk
|
||||
znlc.jp
|
||||
ztomy.com
|
||||
|
||||
23
parsedmarc/resources/maps/psl_overrides.txt
Normal file
23
parsedmarc/resources/maps/psl_overrides.txt
Normal file
@@ -0,0 +1,23 @@
|
||||
-applefibernet.com
|
||||
-c3.net.pl
|
||||
-celsiainternet.com
|
||||
-clientes-izzi.mx
|
||||
-clientes-zap-izzi.mx
|
||||
-imnet.com.br
|
||||
-mcnbd.com
|
||||
-smile.com.bd
|
||||
-tataidc.co.in
|
||||
-veloxfiber.com.br
|
||||
-wconect.com.br
|
||||
.amazonaws.com
|
||||
.cloudaccess.net
|
||||
.ddnsgeek.com
|
||||
.fastvps-server.com
|
||||
.in-addr-arpa
|
||||
.in-addr.arpa
|
||||
.kasserver.com
|
||||
.kinghost.net
|
||||
.linode.com
|
||||
.linodeusercontent.com
|
||||
.na4u.ru
|
||||
.sakura.ne.jp
|
||||
184
parsedmarc/resources/maps/sortlists.py
Executable file
184
parsedmarc/resources/maps/sortlists.py
Executable file
@@ -0,0 +1,184 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import csv
|
||||
from pathlib import Path
|
||||
from typing import Mapping, Iterable, Optional, Collection, Union, List, Dict
|
||||
|
||||
|
||||
class CSVValidationError(Exception):
|
||||
def __init__(self, errors: list[str]):
|
||||
super().__init__("\n".join(errors))
|
||||
self.errors = errors
|
||||
|
||||
|
||||
def sort_csv(
|
||||
filepath: Union[str, Path],
|
||||
field: str,
|
||||
*,
|
||||
sort_field_value_must_be_unique: bool = True,
|
||||
strip_whitespace: bool = True,
|
||||
fields_to_lowercase: Optional[Iterable[str]] = None,
|
||||
case_insensitive_sort: bool = False,
|
||||
required_fields: Optional[Iterable[str]] = None,
|
||||
allowed_values: Optional[Mapping[str, Collection[str]]] = None,
|
||||
) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Read a CSV, optionally normalize rows (strip whitespace, lowercase certain fields),
|
||||
validate field values, and write the sorted CSV back to the same path.
|
||||
|
||||
- filepath: Path to the CSV to sort.
|
||||
- field: The field name to sort by.
|
||||
- fields_to_lowercase: Permanently lowercases these field(s) in the data.
|
||||
- strip_whitespace: Remove all whitespace at the beginning and of field values.
|
||||
- case_insensitive_sort: Ignore case when sorting without changing values.
|
||||
- required_fields: A list of fields that must have data in all rows.
|
||||
- allowed_values: A mapping of allowed values for fields.
|
||||
"""
|
||||
path = Path(filepath)
|
||||
required_fields = set(required_fields or [])
|
||||
lower_set = set(fields_to_lowercase or [])
|
||||
allowed_sets = {k: set(v) for k, v in (allowed_values or {}).items()}
|
||||
if sort_field_value_must_be_unique:
|
||||
seen_sort_field_values = []
|
||||
|
||||
with path.open("r", newline="") as infile:
|
||||
reader = csv.DictReader(infile)
|
||||
fieldnames = reader.fieldnames or []
|
||||
if field not in fieldnames:
|
||||
raise CSVValidationError([f"Missing sort column: {field!r}"])
|
||||
missing_headers = required_fields - set(fieldnames)
|
||||
if missing_headers:
|
||||
raise CSVValidationError(
|
||||
[f"Missing required header(s): {sorted(missing_headers)}"]
|
||||
)
|
||||
rows = list(reader)
|
||||
|
||||
def normalize_row(row: Dict[str, str]) -> None:
|
||||
if strip_whitespace:
|
||||
for k, v in row.items():
|
||||
if isinstance(v, str):
|
||||
row[k] = v.strip()
|
||||
for fld in lower_set:
|
||||
if fld in row and isinstance(row[fld], str):
|
||||
row[fld] = row[fld].lower()
|
||||
|
||||
def validate_row(
|
||||
row: Dict[str, str], sort_field: str, line_no: int, errors: list[str]
|
||||
) -> None:
|
||||
if sort_field_value_must_be_unique:
|
||||
if row[sort_field] in seen_sort_field_values:
|
||||
errors.append(f"Line {line_no}: Duplicate row for '{row[sort_field]}'")
|
||||
else:
|
||||
seen_sort_field_values.append(row[sort_field])
|
||||
for rf in required_fields:
|
||||
val = row.get(rf)
|
||||
if val is None or val == "":
|
||||
errors.append(
|
||||
f"Line {line_no}: Missing value for required field '{rf}'"
|
||||
)
|
||||
for field, allowed_values in allowed_sets.items():
|
||||
if field in row:
|
||||
val = row[field]
|
||||
if val not in allowed_values:
|
||||
errors.append(
|
||||
f"Line {line_no}: '{val}' is not an allowed value for '{field}' "
|
||||
f"(allowed: {sorted(allowed_values)})"
|
||||
)
|
||||
|
||||
errors: list[str] = []
|
||||
for idx, row in enumerate(rows, start=2): # header is line 1
|
||||
normalize_row(row)
|
||||
validate_row(row, field, idx, errors)
|
||||
|
||||
if errors:
|
||||
raise CSVValidationError(errors)
|
||||
|
||||
def sort_key(r: Dict[str, str]):
|
||||
v = r.get(field, "")
|
||||
if isinstance(v, str) and case_insensitive_sort:
|
||||
return v.casefold()
|
||||
return v
|
||||
|
||||
rows.sort(key=sort_key)
|
||||
|
||||
with open(filepath, "w", newline="") as outfile:
|
||||
writer = csv.DictWriter(outfile, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
writer.writerows(rows)
|
||||
|
||||
|
||||
def sort_list_file(
|
||||
filepath: Union[str, Path],
|
||||
*,
|
||||
lowercase: bool = True,
|
||||
strip: bool = True,
|
||||
deduplicate: bool = True,
|
||||
remove_blank_lines: bool = True,
|
||||
ending_newline: bool = True,
|
||||
newline: Optional[str] = "\n",
|
||||
):
|
||||
"""Read a list from a file, sort it, optionally strip and deduplicate the values,
|
||||
then write that list back to the file.
|
||||
|
||||
- Filepath: The path to the file.
|
||||
- lowercase: Lowercase all values prior to sorting.
|
||||
- remove_blank_lines: Remove any plank lines.
|
||||
- ending_newline: End the file with a newline, even if remove_blank_lines is true.
|
||||
- newline: The newline character to use.
|
||||
"""
|
||||
with open(filepath, mode="r", newline=newline) as infile:
|
||||
lines = infile.readlines()
|
||||
for i in range(len(lines)):
|
||||
if lowercase:
|
||||
lines[i] = lines[i].lower()
|
||||
if strip:
|
||||
lines[i] = lines[i].strip()
|
||||
if deduplicate:
|
||||
lines = list(set(lines))
|
||||
if remove_blank_lines:
|
||||
while "" in lines:
|
||||
lines.remove("")
|
||||
lines = sorted(lines)
|
||||
if ending_newline:
|
||||
if lines[-1] != "":
|
||||
lines.append("")
|
||||
with open(filepath, mode="w", newline=newline) as outfile:
|
||||
outfile.write("\n".join(lines))
|
||||
|
||||
|
||||
def _main():
|
||||
map_file = "base_reverse_dns_map.csv"
|
||||
map_key = "base_reverse_dns"
|
||||
list_files = ["known_unknown_base_reverse_dns.txt", "psl_overrides.txt"]
|
||||
types_file = "base_reverse_dns_types.txt"
|
||||
|
||||
with open(types_file) as f:
|
||||
types = f.readlines()
|
||||
while "" in types:
|
||||
types.remove("")
|
||||
|
||||
map_allowed_values = {"Type": types}
|
||||
|
||||
for list_file in list_files:
|
||||
if not os.path.exists(list_file):
|
||||
print(f"Error: {list_file} does not exist")
|
||||
exit(1)
|
||||
sort_list_file(list_file)
|
||||
if not os.path.exists(types_file):
|
||||
print(f"Error: {types_file} does not exist")
|
||||
exit(1)
|
||||
sort_list_file(types_file, lowercase=False)
|
||||
if not os.path.exists(map_file):
|
||||
print(f"Error: {map_file} does not exist")
|
||||
exit(1)
|
||||
try:
|
||||
sort_csv(map_file, map_key, allowed_values=map_allowed_values)
|
||||
except CSVValidationError as e:
|
||||
print(f"{map_file} did not validate: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
_main()
|
||||
@@ -44,6 +44,12 @@ parenthesis_regex = re.compile(r"\s*\(.*\)\s*")
|
||||
null_file = open(os.devnull, "w")
|
||||
mailparser_logger = logging.getLogger("mailparser")
|
||||
mailparser_logger.setLevel(logging.CRITICAL)
|
||||
psl = publicsuffixlist.PublicSuffixList()
|
||||
psl_overrides_path = str(files(parsedmarc.resources.maps).joinpath("psl_overrides.txt"))
|
||||
with open(psl_overrides_path) as f:
|
||||
psl_overrides = [line.rstrip() for line in f.readlines()]
|
||||
while "" in psl_overrides:
|
||||
psl_overrides.remove("")
|
||||
|
||||
|
||||
class EmailParserError(RuntimeError):
|
||||
@@ -78,7 +84,8 @@ def get_base_domain(domain):
|
||||
|
||||
.. note::
|
||||
Results are based on a list of public domain suffixes at
|
||||
https://publicsuffix.org/list/public_suffix_list.dat.
|
||||
https://publicsuffix.org/list/public_suffix_list.dat and overrides included in
|
||||
parsedmarc.resources.maps.psl_overrides.txt
|
||||
|
||||
Args:
|
||||
domain (str): A domain or subdomain
|
||||
@@ -87,8 +94,12 @@ def get_base_domain(domain):
|
||||
str: The base domain of the given domain
|
||||
|
||||
"""
|
||||
psl = publicsuffixlist.PublicSuffixList()
|
||||
return psl.privatesuffix(domain)
|
||||
domain = domain.lower()
|
||||
publicsuffix = psl.privatesuffix(domain)
|
||||
for override in psl_overrides:
|
||||
if domain.endswith(override):
|
||||
return override.strip(".").strip("-")
|
||||
return publicsuffix
|
||||
|
||||
|
||||
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):
|
||||
@@ -345,7 +356,7 @@ def get_service_from_reverse_dns_base_domain(
|
||||
if not (offline or always_use_local_file) and len(reverse_dns_map) == 0:
|
||||
try:
|
||||
logger.debug(f"Trying to fetch reverse DNS map from {url}...")
|
||||
headers = {"User-Agent", USER_AGENT}
|
||||
headers = {"User-Agent": USER_AGENT}
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
csv_file.write(response.text)
|
||||
@@ -356,6 +367,7 @@ def get_service_from_reverse_dns_base_domain(
|
||||
except Exception:
|
||||
logger.warning("Not a valid CSV file")
|
||||
csv_file.seek(0)
|
||||
logging.debug("Response body:")
|
||||
logger.debug(csv_file.read())
|
||||
|
||||
if len(reverse_dns_map) == 0:
|
||||
|
||||
@@ -55,6 +55,7 @@ dependencies = [
|
||||
"tqdm>=4.31.1",
|
||||
"urllib3>=1.25.7",
|
||||
"xmltodict>=0.12.0",
|
||||
"PyYAML>=6.0.3"
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
@@ -82,3 +83,14 @@ path = "parsedmarc/constants.py"
|
||||
include = [
|
||||
"/parsedmarc",
|
||||
]
|
||||
|
||||
[tool.hatch.build]
|
||||
exclude = [
|
||||
"base_reverse_dns.csv",
|
||||
"find_bad_utf8.py",
|
||||
"find_unknown_base_reverse_dns.py",
|
||||
"unknown_base_reverse_dns.csv",
|
||||
"sortmaps.py",
|
||||
"README.md",
|
||||
"*.bak"
|
||||
]
|
||||
|
||||
25
sortmaps.py
25
sortmaps.py
@@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import glob
|
||||
import csv
|
||||
|
||||
|
||||
maps_dir = os.path.join("parsedmarc", "resources", "maps")
|
||||
csv_files = glob.glob(os.path.join(maps_dir, "*.csv"))
|
||||
|
||||
|
||||
def sort_csv(filepath, column=0):
|
||||
with open(filepath, mode="r", newline="") as infile:
|
||||
reader = csv.reader(infile)
|
||||
header = next(reader)
|
||||
sorted_rows = sorted(reader, key=lambda row: row[column])
|
||||
|
||||
with open(filepath, mode="w", newline="\n") as outfile:
|
||||
writer = csv.writer(outfile)
|
||||
writer.writerow(header)
|
||||
writer.writerows(sorted_rows)
|
||||
|
||||
|
||||
for csv_file in csv_files:
|
||||
sort_csv(csv_file)
|
||||
107
splunk/smtp_tls_dashboard.xml
Normal file
107
splunk/smtp_tls_dashboard.xml
Normal file
@@ -0,0 +1,107 @@
|
||||
<form version="1.1" theme="dark">
|
||||
<label>SMTP TLS Reporting</label>
|
||||
<fieldset submitButton="false" autoRun="true">
|
||||
<input type="time" token="time">
|
||||
<label></label>
|
||||
<default>
|
||||
<earliest>-7d@h</earliest>
|
||||
<latest>now</latest>
|
||||
</default>
|
||||
</input>
|
||||
<input type="text" token="organization_name" searchWhenChanged="true">
|
||||
<label>Organization name</label>
|
||||
<default>*</default>
|
||||
<initialValue>*</initialValue>
|
||||
</input>
|
||||
<input type="text" token="policy_domain">
|
||||
<label>Policy domain</label>
|
||||
<default>*</default>
|
||||
<initialValue>*</initialValue>
|
||||
</input>
|
||||
<input type="dropdown" token="policy_type" searchWhenChanged="true">
|
||||
<label>Policy type</label>
|
||||
<choice value="*">Any</choice>
|
||||
<choice value="tlsa">tlsa</choice>
|
||||
<choice value="sts">sts</choice>
|
||||
<choice value="no-policy-found">no-policy-found</choice>
|
||||
<default>*</default>
|
||||
<initialValue>*</initialValue>
|
||||
</input>
|
||||
</fieldset>
|
||||
<row>
|
||||
<panel>
|
||||
<title>Reporting organizations</title>
|
||||
<table>
|
||||
<search>
|
||||
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$
|
||||
| rename policies{}.policy_domain as policy_domain
|
||||
| rename policies{}.policy_type as policy_type
|
||||
| rename policies{}.failed_session_count as failed_sessions
|
||||
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
|
||||
| rename policies{}.successful_session_count as successful_sessions
|
||||
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
|
||||
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
|
||||
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
|
||||
| rename policies{}.failure_details{}.result_type as failure_type
|
||||
| fillnull value=0 failed_sessions
|
||||
| stats sum(failed_sessions) as failed_sessions sum(successful_sessions) as successful_sessions by organization_name
|
||||
| sort -successful_sessions 0</query>
|
||||
<earliest>$time.earliest$</earliest>
|
||||
<latest>$time.latest$</latest>
|
||||
</search>
|
||||
<option name="drilldown">none</option>
|
||||
<option name="refresh.display">progressbar</option>
|
||||
</table>
|
||||
</panel>
|
||||
<panel>
|
||||
<title>Domains</title>
|
||||
<table>
|
||||
<search>
|
||||
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$
|
||||
| rename policies{}.policy_domain as policy_domain
|
||||
| rename policies{}.policy_type as policy_type
|
||||
| rename policies{}.failed_session_count as failed_sessions
|
||||
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
|
||||
| rename policies{}.successful_session_count as successful_sessions
|
||||
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
|
||||
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
|
||||
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
|
||||
| rename policies{}.failure_details{}.result_type as failure_type
|
||||
| fillnull value=0 failed_sessions
|
||||
| stats sum(failed_sessions) as failed_sessions sum(successful_sessions) as successful_sessions by policy_domain
|
||||
| sort -successful_sessions 0</query>
|
||||
<earliest>$time.earliest$</earliest>
|
||||
<latest>$time.latest$</latest>
|
||||
</search>
|
||||
<option name="drilldown">none</option>
|
||||
<option name="refresh.display">progressbar</option>
|
||||
</table>
|
||||
</panel>
|
||||
</row>
|
||||
<row>
|
||||
<panel>
|
||||
<title>Failure details</title>
|
||||
<table>
|
||||
<search>
|
||||
<query>index=email sourcetype=smtp:tls organization_name=$organization_name$ policies{}.policy_domain=$policy_domain$ policies{}.failure_details{}.result_type=*
|
||||
| rename policies{}.policy_domain as policy_domain
|
||||
| rename policies{}.policy_type as policy_type
|
||||
| rename policies{}.failed_session_count as failed_sessions
|
||||
| rename policies{}.failure_details{}.failed_session_count as failed_sessions
|
||||
| rename policies{}.successful_session_count as successful_sessions
|
||||
| rename policies{}.failure_details{}.sending_mta_ip as sending_mta_ip
|
||||
| rename policies{}.failure_details{}.receiving_ip as receiving_ip
|
||||
| rename policies{}.failure_details{}.receiving_mx_hostname as receiving_mx_hostname
|
||||
| fillnull value=0 failed_sessions
|
||||
| rename policies{}.failure_details{}.result_type as failure_type
|
||||
| table _time organization_name policy_domain policy_type failed_sessions successful_sessions sending_mta_ip receiving_ip receiving_mx_hostname failure_type
|
||||
| sort by -_time 0</query>
|
||||
<earliest>$time.earliest$</earliest>
|
||||
<latest>$time.latest$</latest>
|
||||
</search>
|
||||
<option name="drilldown">none</option>
|
||||
<option name="refresh.display">progressbar</option>
|
||||
</table>
|
||||
</panel>
|
||||
</row>
|
||||
</form>
|
||||
34
tests.py
34
tests.py
@@ -43,11 +43,12 @@ class Test(unittest.TestCase):
|
||||
|
||||
def testExtractReportXMLComparator(self):
|
||||
"""Test XML comparator function"""
|
||||
print()
|
||||
xmlnice = open("samples/extract_report/nice-input.xml").read()
|
||||
print(xmlnice)
|
||||
xmlchanged = minify_xml(open("samples/extract_report/changed-input.xml").read())
|
||||
print(xmlchanged)
|
||||
xmlnice_file = open("samples/extract_report/nice-input.xml")
|
||||
xmlnice = xmlnice_file.read()
|
||||
xmlnice_file.close()
|
||||
xmlchanged_file = open("samples/extract_report/changed-input.xml")
|
||||
xmlchanged = minify_xml(xmlchanged_file.read())
|
||||
xmlchanged_file.close()
|
||||
self.assertTrue(compare_xml(xmlnice, xmlnice))
|
||||
self.assertTrue(compare_xml(xmlchanged, xmlchanged))
|
||||
self.assertFalse(compare_xml(xmlnice, xmlchanged))
|
||||
@@ -62,7 +63,9 @@ class Test(unittest.TestCase):
|
||||
data = f.read()
|
||||
print("Testing {0}: ".format(file), end="")
|
||||
xmlout = parsedmarc.extract_report(data)
|
||||
xmlin = open("samples/extract_report/nice-input.xml").read()
|
||||
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||
xmlin = xmlin_file.read()
|
||||
xmlin_file.close()
|
||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||
print("Passed!")
|
||||
|
||||
@@ -72,7 +75,9 @@ class Test(unittest.TestCase):
|
||||
file = "samples/extract_report/nice-input.xml"
|
||||
print("Testing {0}: ".format(file), end="")
|
||||
xmlout = parsedmarc.extract_report(file)
|
||||
xmlin = open("samples/extract_report/nice-input.xml").read()
|
||||
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||
xmlin = xmlin_file.read()
|
||||
xmlin_file.close()
|
||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||
print("Passed!")
|
||||
|
||||
@@ -82,7 +87,9 @@ class Test(unittest.TestCase):
|
||||
file = "samples/extract_report/nice-input.xml.gz"
|
||||
print("Testing {0}: ".format(file), end="")
|
||||
xmlout = parsedmarc.extract_report_from_file_path(file)
|
||||
xmlin = open("samples/extract_report/nice-input.xml").read()
|
||||
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||
xmlin = xmlin_file.read()
|
||||
xmlin_file.close()
|
||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||
print("Passed!")
|
||||
|
||||
@@ -92,12 +99,13 @@ class Test(unittest.TestCase):
|
||||
file = "samples/extract_report/nice-input.xml.zip"
|
||||
print("Testing {0}: ".format(file), end="")
|
||||
xmlout = parsedmarc.extract_report_from_file_path(file)
|
||||
print(xmlout)
|
||||
xmlin = minify_xml(open("samples/extract_report/nice-input.xml").read())
|
||||
print(xmlin)
|
||||
xmlin_file = open("samples/extract_report/nice-input.xml")
|
||||
xmlin = minify_xml(xmlin_file.read())
|
||||
xmlin_file.close()
|
||||
self.assertTrue(compare_xml(xmlout, xmlin))
|
||||
xmlin = minify_xml(open("samples/extract_report/changed-input.xml").read())
|
||||
print(xmlin)
|
||||
xmlin_file = open("samples/extract_report/changed-input.xml")
|
||||
xmlin = xmlin_file.read()
|
||||
xmlin_file.close()
|
||||
self.assertFalse(compare_xml(xmlout, xmlin))
|
||||
print("Passed!")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user