3.6.1 - Parse aggregate reports with missing spf domain

This commit is contained in:
Sean Whalen
2018-06-29 11:56:47 -04:00
parent c269e49c2a
commit e30a5bb14f
6 changed files with 78 additions and 13 deletions

1
.gitignore vendored
View File

@@ -46,6 +46,7 @@ nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo

View File

@@ -1,3 +1,8 @@
3.6.1
-----
- Parse aggregate reports with missing SPF domain
3.6.0
-----

View File

@@ -11,18 +11,27 @@ parsedmarc
:target: https://raw.githubusercontent.com/domainaware/parsedmarc/master/docs/_static/screenshots/dmarc-summary-charts.png
``parsedmarc`` is a Python module and CLI utility for parsing DMARC reports.
When used with Elasticsearch and Kibana, it works as a self-hosted open source
alternative to commercial DMARC report processing services such as Agari,
Dmarcian, and OnDMARC.
Features
========
* Parses draft and 1.0 standard aggregate reports
* Parses forensic reports
* Parses draft and 1.0 standard aggregate/rua reports
* Parses forensic/failure/ruf reports
* Can parse reports from an inbox over IMAP
* Transparently handles gzip or zip compressed reports
* Consistent data structures
* Simple JSON and/or CSV output
* Optionally email the results
* Optionally send the results to Elasticsearch, for use with premade Kibana dashboards
* Optionally send the results to Elasticsearch, for use with premade Kibana
dashboards
Resources
=========
* `Demystifying DMARC`_
CLI help
========
@@ -253,3 +262,5 @@ https://github.com/domainaware/parsedmarc/issues
.. |Build Status| image:: https://travis-ci.org/domainaware/parsedmarc.svg?branch=master
:target: https://travis-ci.org/domainaware/parsedmarc
.. _Demystifying DMARC: https://seanthegeek.net/459/demystifying-dmarc/

View File

@@ -43,7 +43,7 @@ import imapclient.exceptions
import dateparser
import mailparser
__version__ = "3.6.0"
__version__ = "3.6.1"
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@@ -386,11 +386,15 @@ def _parse_report_record(record, nameservers=None, timeout=6.0):
new_record["auth_results"]["spf"].append(new_result)
if "envelope_from" not in new_record["identifiers"]:
envelope_from = new_record["auth_results"]["spf"][-1]["domain"].lower()
envelope_from = new_record["auth_results"]["spf"][-1]["domain"]
if envelope_from is not None:
envelope_from = str(envelope_from).lower()
new_record["identifiers"]["envelope_from"] = envelope_from
elif new_record["identifiers"]["envelope_from"] is None:
envelope_from = new_record["auth_results"]["spf"][-1]["domain"].lower()
envelope_from = new_record["auth_results"]["spf"][-1]["domain"]
if envelope_from is not None:
envelope_from = str(envelope_from).lower()
new_record["identifiers"]["envelope_from"] = envelope_from
envelope_to = None
@@ -583,6 +587,10 @@ def parsed_aggregate_reports_to_csv(reports):
Returns:
str: Parsed aggregate report data in flat CSV format, including headers
"""
def to_str(obj):
return str(obj).lower()
fields = ["xml_schema", "org_name", "org_email",
"org_extra_contact_info", "report_id", "begin_date", "end_date",
"errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo",
@@ -656,9 +664,9 @@ def parsed_aggregate_reports_to_csv(reports):
if "selector" in dkim_result:
dkim_selectors.append(dkim_result["selector"])
dkim_results.append(dkim_result["result"])
row["dkim_domains"] = ",".join(dkim_domains)
row["dkim_selectors"] = ",".join(dkim_selectors)
row["dkim_results"] = ",".join(dkim_results)
row["dkim_domains"] = ",".join(map(to_str, dkim_domains))
row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors))
row["dkim_results"] = ",".join(map(to_str, dkim_results))
spf_domains = []
spf_scopes = []
spf_results = []
@@ -666,9 +674,9 @@ def parsed_aggregate_reports_to_csv(reports):
spf_domains.append(spf_result["domain"])
spf_scopes.append(spf_result["scope"])
spf_results.append(spf_result["result"])
row["spf_domains"] = ",".join(spf_domains)
row["spf_scopes"] = ",".join(spf_scopes)
row["spf_results"] = ",".join(spf_results)
row["spf_domains"] = ",".join(map(to_str, spf_domains))
row["spf_scopes"] = ",".join(map(to_str, spf_scopes))
row["spf_results"] = ",".join(map(to_str, dkim_results))
writer.writerow(row)
csv_file_object.flush()

View File

@@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8" ?>
<feedback>
<report_metadata>
<org_name>veeam.com</org_name>
<email>noreply.it.dmarc@veeam.com</email>
<report_id>sonexushealth.com:1530233361</report_id>
<date_range>
<begin>1530133200</begin>
<end>1530219600</end>
</date_range>
</report_metadata>
<policy_published>
<domain>example.com</domain>
<adkim>r</adkim>
<aspf>r</aspf>
<p>none</p>
<sp>none</sp>
<pct>100</pct>
</policy_published>
<record>
<row>
<source_ip>199.230.200.36</source_ip>
<count>1</count>
<policy_evaluated>
<disposition>none</disposition>
<dkim>fail</dkim>
<spf>fail</spf>
</policy_evaluated>
</row>
<identifiers>
<header_from>example.com</header_from>
</identifiers>
<auth_results>
<spf>
<domain></domain>
<result>none</result>
</spf>
</auth_results>
</record>
</feedback>

View File

@@ -14,7 +14,7 @@ from setuptools import setup
from codecs import open
from os import path
__version__ = "3.6.0"
__version__ = "3.6.1"
description = "A Python package and CLI for parsing aggregate and " \
"forensic DMARC reports"