mirror of
https://github.com/domainaware/parsedmarc.git
synced 2026-03-21 05:55:59 +00:00
Compare commits
2 Commits
copilot/su
...
copilot/su
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
efec173012 | ||
|
|
e78550bc3f |
@@ -207,9 +207,8 @@ def _parse_config_file(config_file, opts):
|
||||
index_prefix_domain_map = yaml.safe_load(f)
|
||||
except OSError as exc:
|
||||
raise ConfigurationError(
|
||||
"Failed to read index_prefix_domain_map file '{0}': {1}".format(
|
||||
map_path, exc
|
||||
)
|
||||
"Failed to read index_prefix_domain_map file "
|
||||
"'{0}': {1}".format(map_path, exc)
|
||||
) from exc
|
||||
except yaml.YAMLError as exc:
|
||||
raise ConfigurationError(
|
||||
@@ -255,7 +254,7 @@ def _parse_config_file(config_file, opts):
|
||||
except Exception as ns_error:
|
||||
raise ConfigurationError(
|
||||
"DNS pre-flight check failed: {}".format(ns_error)
|
||||
) from ns_error
|
||||
)
|
||||
if not dummy_hostname:
|
||||
raise ConfigurationError(
|
||||
"DNS pre-flight check failed: no PTR record for {} from {}".format(
|
||||
@@ -272,6 +271,8 @@ def _parse_config_file(config_file, opts):
|
||||
opts.debug = bool(general_config.getboolean("debug"))
|
||||
if "verbose" in general_config:
|
||||
opts.verbose = bool(general_config.getboolean("verbose"))
|
||||
if "silent" in general_config:
|
||||
opts.silent = bool(general_config.getboolean("silent"))
|
||||
if "warnings" in general_config:
|
||||
opts.warnings = bool(general_config.getboolean("warnings"))
|
||||
if "fail_on_output_error" in general_config:
|
||||
@@ -905,39 +906,31 @@ def _init_output_clients(opts):
|
||||
)
|
||||
|
||||
if opts.s3_bucket:
|
||||
try:
|
||||
clients["s3_client"] = s3.S3Client(
|
||||
bucket_name=opts.s3_bucket,
|
||||
bucket_path=opts.s3_path,
|
||||
region_name=opts.s3_region_name,
|
||||
endpoint_url=opts.s3_endpoint_url,
|
||||
access_key_id=opts.s3_access_key_id,
|
||||
secret_access_key=opts.s3_secret_access_key,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("Failed to initialize S3 client; skipping", exc_info=True)
|
||||
clients["s3_client"] = s3.S3Client(
|
||||
bucket_name=opts.s3_bucket,
|
||||
bucket_path=opts.s3_path,
|
||||
region_name=opts.s3_region_name,
|
||||
endpoint_url=opts.s3_endpoint_url,
|
||||
access_key_id=opts.s3_access_key_id,
|
||||
secret_access_key=opts.s3_secret_access_key,
|
||||
)
|
||||
|
||||
if opts.syslog_server:
|
||||
try:
|
||||
clients["syslog_client"] = syslog.SyslogClient(
|
||||
server_name=opts.syslog_server,
|
||||
server_port=int(opts.syslog_port),
|
||||
protocol=opts.syslog_protocol or "udp",
|
||||
cafile_path=opts.syslog_cafile_path,
|
||||
certfile_path=opts.syslog_certfile_path,
|
||||
keyfile_path=opts.syslog_keyfile_path,
|
||||
timeout=opts.syslog_timeout if opts.syslog_timeout is not None else 5.0,
|
||||
retry_attempts=opts.syslog_retry_attempts
|
||||
if opts.syslog_retry_attempts is not None
|
||||
else 3,
|
||||
retry_delay=opts.syslog_retry_delay
|
||||
if opts.syslog_retry_delay is not None
|
||||
else 5,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to initialize syslog client; skipping", exc_info=True
|
||||
)
|
||||
clients["syslog_client"] = syslog.SyslogClient(
|
||||
server_name=opts.syslog_server,
|
||||
server_port=int(opts.syslog_port),
|
||||
protocol=opts.syslog_protocol or "udp",
|
||||
cafile_path=opts.syslog_cafile_path,
|
||||
certfile_path=opts.syslog_certfile_path,
|
||||
keyfile_path=opts.syslog_keyfile_path,
|
||||
timeout=opts.syslog_timeout if opts.syslog_timeout is not None else 5.0,
|
||||
retry_attempts=opts.syslog_retry_attempts
|
||||
if opts.syslog_retry_attempts is not None
|
||||
else 3,
|
||||
retry_delay=opts.syslog_retry_delay
|
||||
if opts.syslog_retry_delay is not None
|
||||
else 5,
|
||||
)
|
||||
|
||||
if opts.hec:
|
||||
if opts.hec_token is None or opts.hec_index is None:
|
||||
@@ -947,60 +940,42 @@ def _init_output_clients(opts):
|
||||
verify = True
|
||||
if opts.hec_skip_certificate_verification:
|
||||
verify = False
|
||||
try:
|
||||
clients["hec_client"] = splunk.HECClient(
|
||||
opts.hec, opts.hec_token, opts.hec_index, verify=verify
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to initialize Splunk HEC client; skipping", exc_info=True
|
||||
)
|
||||
clients["hec_client"] = splunk.HECClient(
|
||||
opts.hec, opts.hec_token, opts.hec_index, verify=verify
|
||||
)
|
||||
|
||||
if opts.kafka_hosts:
|
||||
ssl_context = None
|
||||
if opts.kafka_ssl:
|
||||
if opts.kafka_skip_certificate_verification:
|
||||
logger.debug("Skipping Kafka certificate verification")
|
||||
ssl_context = create_default_context()
|
||||
if opts.kafka_skip_certificate_verification:
|
||||
logger.debug("Skipping Kafka certificate verification")
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = CERT_NONE
|
||||
try:
|
||||
clients["kafka_client"] = kafkaclient.KafkaClient(
|
||||
opts.kafka_hosts,
|
||||
username=opts.kafka_username,
|
||||
password=opts.kafka_password,
|
||||
ssl=opts.kafka_ssl,
|
||||
ssl_context=ssl_context,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("Failed to initialize Kafka client; skipping", exc_info=True)
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = CERT_NONE
|
||||
clients["kafka_client"] = kafkaclient.KafkaClient(
|
||||
opts.kafka_hosts,
|
||||
username=opts.kafka_username,
|
||||
password=opts.kafka_password,
|
||||
ssl_context=ssl_context,
|
||||
)
|
||||
|
||||
if opts.gelf_host:
|
||||
try:
|
||||
clients["gelf_client"] = gelf.GelfClient(
|
||||
host=opts.gelf_host,
|
||||
port=int(opts.gelf_port),
|
||||
mode=opts.gelf_mode,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("Failed to initialize GELF client; skipping", exc_info=True)
|
||||
clients["gelf_client"] = gelf.GelfClient(
|
||||
host=opts.gelf_host,
|
||||
port=int(opts.gelf_port),
|
||||
mode=opts.gelf_mode,
|
||||
)
|
||||
|
||||
if (
|
||||
opts.webhook_aggregate_url
|
||||
or opts.webhook_forensic_url
|
||||
or opts.webhook_smtp_tls_url
|
||||
):
|
||||
try:
|
||||
clients["webhook_client"] = webhook.WebhookClient(
|
||||
aggregate_url=opts.webhook_aggregate_url,
|
||||
forensic_url=opts.webhook_forensic_url,
|
||||
smtp_tls_url=opts.webhook_smtp_tls_url,
|
||||
timeout=opts.webhook_timeout,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to initialize webhook client; skipping", exc_info=True
|
||||
)
|
||||
clients["webhook_client"] = webhook.WebhookClient(
|
||||
aggregate_url=opts.webhook_aggregate_url,
|
||||
forensic_url=opts.webhook_forensic_url,
|
||||
smtp_tls_url=opts.webhook_smtp_tls_url,
|
||||
timeout=opts.webhook_timeout,
|
||||
)
|
||||
|
||||
return clients
|
||||
|
||||
@@ -1995,6 +1970,7 @@ def _main():
|
||||
logger.info("Watching for email - Quit with ctrl-c")
|
||||
|
||||
while True:
|
||||
_reload_requested = False
|
||||
try:
|
||||
watch_inbox(
|
||||
mailbox_connection=mailbox_connection,
|
||||
@@ -2027,10 +2003,7 @@ def _main():
|
||||
if not _reload_requested:
|
||||
break
|
||||
|
||||
# Reload configuration — clear the flag first so that any new
|
||||
# SIGHUP arriving while we reload will be captured for the next
|
||||
# iteration rather than being silently dropped.
|
||||
_reload_requested = False
|
||||
# Reload configuration
|
||||
logger.info("Reloading configuration...")
|
||||
try:
|
||||
# Build a fresh opts starting from CLI-only defaults so that
|
||||
@@ -2074,31 +2047,6 @@ def _main():
|
||||
if opts.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
# Refresh FileHandler if log_file changed
|
||||
old_log_file = getattr(opts, "active_log_file", None)
|
||||
new_log_file = opts.log_file
|
||||
if old_log_file != new_log_file:
|
||||
# Remove old FileHandlers
|
||||
for h in list(logger.handlers):
|
||||
if isinstance(h, logging.FileHandler):
|
||||
h.close()
|
||||
logger.removeHandler(h)
|
||||
# Add new FileHandler if configured
|
||||
if new_log_file:
|
||||
try:
|
||||
fh = logging.FileHandler(new_log_file, "a")
|
||||
file_formatter = logging.Formatter(
|
||||
"%(asctime)s - %(levelname)s"
|
||||
" - [%(filename)s:%(lineno)d] - %(message)s"
|
||||
)
|
||||
fh.setFormatter(file_formatter)
|
||||
logger.addHandler(fh)
|
||||
except Exception as log_error:
|
||||
logger.warning(
|
||||
"Unable to write to log file: {}".format(log_error)
|
||||
)
|
||||
opts.active_log_file = new_log_file
|
||||
|
||||
logger.info("Configuration reloaded successfully")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
|
||||
@@ -178,12 +178,10 @@ class GmailConnection(MailboxConnection):
|
||||
def watch(self, check_callback, check_timeout, should_reload=None):
|
||||
"""Checks the mailbox for new messages every n seconds"""
|
||||
while True:
|
||||
if should_reload and should_reload():
|
||||
return
|
||||
sleep(check_timeout)
|
||||
check_callback(self)
|
||||
if should_reload and should_reload():
|
||||
return
|
||||
check_callback(self)
|
||||
|
||||
@lru_cache(maxsize=10)
|
||||
def _find_label_id_for_label(self, label_name: str) -> str:
|
||||
|
||||
@@ -281,10 +281,10 @@ class MSGraphConnection(MailboxConnection):
|
||||
def watch(self, check_callback, check_timeout, should_reload=None):
|
||||
"""Checks the mailbox for new messages every n seconds"""
|
||||
while True:
|
||||
if should_reload and should_reload():
|
||||
return
|
||||
sleep(check_timeout)
|
||||
check_callback(self)
|
||||
if should_reload and should_reload():
|
||||
return
|
||||
|
||||
@lru_cache(maxsize=10)
|
||||
def _find_folder_id_from_folder_path(self, folder_name: str) -> str:
|
||||
|
||||
Reference in New Issue
Block a user