diff --git a/.gitignore b/.gitignore index d9b6413..e443375 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,7 @@ nosetests.xml coverage.xml *.cover .hypothesis/ +.pytest_cache/ # Translations *.mo diff --git a/CHANGELOG.md b/CHANGELOG.md index 27d0603..171ec99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +3.6.1 +----- + +- Parse aggregate reports with missing SPF domain + 3.6.0 ----- diff --git a/README.rst b/README.rst index abccc18..6e3612f 100644 --- a/README.rst +++ b/README.rst @@ -11,18 +11,27 @@ parsedmarc :target: https://raw.githubusercontent.com/domainaware/parsedmarc/master/docs/_static/screenshots/dmarc-summary-charts.png ``parsedmarc`` is a Python module and CLI utility for parsing DMARC reports. +When used with Elasticsearch and Kibana, it works as a self-hosted open source +alternative to commercial DMARC report processing services such as Agari, +Dmarcian, and OnDMARC. Features ======== -* Parses draft and 1.0 standard aggregate reports -* Parses forensic reports +* Parses draft and 1.0 standard aggregate/rua reports +* Parses forensic/failure/ruf reports * Can parse reports from an inbox over IMAP * Transparently handles gzip or zip compressed reports * Consistent data structures * Simple JSON and/or CSV output * Optionally email the results -* Optionally send the results to Elasticsearch, for use with premade Kibana dashboards +* Optionally send the results to Elasticsearch, for use with premade Kibana + dashboards + +Resources +========= + +* `Demystifying DMARC`_ CLI help ======== @@ -253,3 +262,5 @@ https://github.com/domainaware/parsedmarc/issues .. |Build Status| image:: https://travis-ci.org/domainaware/parsedmarc.svg?branch=master :target: https://travis-ci.org/domainaware/parsedmarc + +.. _Demystifying DMARC: https://seanthegeek.net/459/demystifying-dmarc/ \ No newline at end of file diff --git a/parsedmarc/__init__.py b/parsedmarc/__init__.py index 3b2a26c..2b08e19 100644 --- a/parsedmarc/__init__.py +++ b/parsedmarc/__init__.py @@ -43,7 +43,7 @@ import imapclient.exceptions import dateparser import mailparser -__version__ = "3.6.0" +__version__ = "3.6.1" logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -386,11 +386,15 @@ def _parse_report_record(record, nameservers=None, timeout=6.0): new_record["auth_results"]["spf"].append(new_result) if "envelope_from" not in new_record["identifiers"]: - envelope_from = new_record["auth_results"]["spf"][-1]["domain"].lower() + envelope_from = new_record["auth_results"]["spf"][-1]["domain"] + if envelope_from is not None: + envelope_from = str(envelope_from).lower() new_record["identifiers"]["envelope_from"] = envelope_from elif new_record["identifiers"]["envelope_from"] is None: - envelope_from = new_record["auth_results"]["spf"][-1]["domain"].lower() + envelope_from = new_record["auth_results"]["spf"][-1]["domain"] + if envelope_from is not None: + envelope_from = str(envelope_from).lower() new_record["identifiers"]["envelope_from"] = envelope_from envelope_to = None @@ -583,6 +587,10 @@ def parsed_aggregate_reports_to_csv(reports): Returns: str: Parsed aggregate report data in flat CSV format, including headers """ + + def to_str(obj): + return str(obj).lower() + fields = ["xml_schema", "org_name", "org_email", "org_extra_contact_info", "report_id", "begin_date", "end_date", "errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo", @@ -656,9 +664,9 @@ def parsed_aggregate_reports_to_csv(reports): if "selector" in dkim_result: dkim_selectors.append(dkim_result["selector"]) dkim_results.append(dkim_result["result"]) - row["dkim_domains"] = ",".join(dkim_domains) - row["dkim_selectors"] = ",".join(dkim_selectors) - row["dkim_results"] = ",".join(dkim_results) + row["dkim_domains"] = ",".join(map(to_str, dkim_domains)) + row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors)) + row["dkim_results"] = ",".join(map(to_str, dkim_results)) spf_domains = [] spf_scopes = [] spf_results = [] @@ -666,9 +674,9 @@ def parsed_aggregate_reports_to_csv(reports): spf_domains.append(spf_result["domain"]) spf_scopes.append(spf_result["scope"]) spf_results.append(spf_result["result"]) - row["spf_domains"] = ",".join(spf_domains) - row["spf_scopes"] = ",".join(spf_scopes) - row["spf_results"] = ",".join(spf_results) + row["spf_domains"] = ",".join(map(to_str, spf_domains)) + row["spf_scopes"] = ",".join(map(to_str, spf_scopes)) + row["spf_results"] = ",".join(map(to_str, dkim_results)) writer.writerow(row) csv_file_object.flush() diff --git a/samples/veeam.com!example.com!1530133200!1530219600.xml.sample b/samples/veeam.com!example.com!1530133200!1530219600.xml.sample new file mode 100644 index 0000000..93402f2 --- /dev/null +++ b/samples/veeam.com!example.com!1530133200!1530219600.xml.sample @@ -0,0 +1,40 @@ + + + + veeam.com + noreply.it.dmarc@veeam.com + sonexushealth.com:1530233361 + + 1530133200 + 1530219600 + + + + example.com + r + r +

none

+ none + 100 +
+ + + 199.230.200.36 + 1 + + none + fail + fail + + + + example.com + + + + + none + + + +
diff --git a/setup.py b/setup.py index 4da12fd..90dd1d5 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ from setuptools import setup from codecs import open from os import path -__version__ = "3.6.0" +__version__ = "3.6.1" description = "A Python package and CLI for parsing aggregate and " \ "forensic DMARC reports"