From 701735f6e55de83bba01d7eb112aa482c092ef2f Mon Sep 17 00:00:00 2001 From: Trenton H <797416+stumpylog@users.noreply.github.com> Date: Sun, 22 Mar 2026 06:53:32 -0700 Subject: [PATCH] Chore: Drop old signal and unneeded apps, transition to parser registry instead (#12405) * refactor: switch consumer and callers to ParserRegistry (Phase 4) Replace all Django signal-based parser discovery with direct registry calls. Removes `_parser_cleanup`, `parser_is_new_style` shims, and all old-style isinstance checks. All parser instantiation now uses the `with parser_class() as parser:` context manager pattern. - documents/parsers.py: delegate to get_parser_registry(); drop lru_cache - documents/consumer.py: use registry + context manager; remove shims - documents/tasks.py: same pattern - documents/management/commands/document_thumbnails.py: same pattern - documents/views.py: get_metadata uses context manager - documents/checks.py: use get_parser_registry().all_parsers() - paperless/parsers/registry.py: add all_parsers() public method - tests: update mocks to target documents.consumer.get_parser_class_for_mime_type Co-Authored-By: Claude Sonnet 4.6 * refactor: drop get_parser_class_for_mime_type; callers use registry directly All callers now call get_parser_registry().get_parser_for_file() with the actual filename and path, enabling score() to use file extension hints. The MIME-only helper is removed. - consumer.py: passes self.filename + self.working_copy - tasks.py: passes document.original_filename + document.source_path - document_thumbnails.py: same pattern - views.py: passes Path(file).name + Path(file) - parsers.py: internal helpers inline the registry call with filename="" - test_parsers.py: drop TestParserDiscovery (was testing mock behavior); TestParserAvailability uses registry directly - test_consumer.py: mocks switch to documents.consumer.get_parser_registry Co-Authored-By: Claude Sonnet 4.6 * refactor: remove document_consumer_declaration signal infrastructure Remove the document_consumer_declaration signal that was previously used for parser registration. Each parser app no longer connects to this signal, and the signal declaration itself has been removed from documents/signals. Changes: - Remove document_consumer_declaration from documents/signals/__init__.py - Remove ready() methods and signal imports from all parser app configs - Delete signal shim files (signals.py) from all parser apps: - paperless_tesseract/signals.py - paperless_text/signals.py - paperless_tika/signals.py - paperless_mail/signals.py - paperless_remote/signals.py Parser discovery now happens exclusively through the ParserRegistry system introduced in the previous refactor phases. Co-Authored-By: Claude Sonnet 4.6 * refactor: remove empty paperless_text and paperless_tika Django apps After parser classes were moved to paperless/parsers/ in the plugin refactor, these Django apps contained only empty AppConfig classes with no models, views, tasks, migrations, or other functionality. - Remove paperless_text and paperless_tika from INSTALLED_APPS - Delete empty app directories entirely - Update pyproject.toml test exclusions - Clean stale mypy baseline entries for moved parser files paperless_remote app is retained as it contains meaningful system checks for Azure AI configuration. Co-Authored-By: Claude Sonnet 4.6 * Moves the checks and tests to the main application and removes the old applications * Adds a comment to satisy Sonar * refactor: remove automatic log_summary() call from get_parser_registry() The summary was logged once per process, causing it to appear repeatedly during Docker startup (management commands, web server, each Celery worker subprocess). External parsers are already announced individually at INFO when discovered; the full summary is redundant noise. log_summary() is retained on ParserRegistry for manual/debug use. Co-Authored-By: Claude Sonnet 4.6 * Cleans up the duplicate test file/fixture * Fixes a race condition where webserver threads could race to populate the registry --------- Co-authored-by: Claude Sonnet 4.6 --- .mypy-baseline.txt | 14 - pyproject.toml | 4 - src/documents/checks.py | 11 +- src/documents/consumer.py | 532 ++++++++---------- .../commands/document_thumbnails.py | 48 +- src/documents/parsers.py | 86 +-- src/documents/signals/__init__.py | 1 - src/documents/tasks.py | 210 +++---- src/documents/tests/test_checks.py | 6 +- src/documents/tests/test_consumer.py | 221 ++++---- src/documents/tests/test_parsers.py | 149 +---- src/documents/views.py | 16 +- src/paperless/checks.py | 60 ++ src/paperless/parsers/registry.py | 45 +- src/paperless/settings/__init__.py | 7 +- src/paperless/tests/parsers/conftest.py | 29 - .../tests/parsers/test_remote_parser.py | 58 +- .../tests/parsers/test_tika_parser.py | 6 +- .../tests/samples/remote/simple-digital.pdf | Bin 22926 -> 0 bytes src/paperless/tests/test_checks.py | 116 ++++ src/paperless_mail/apps.py | 10 - src/paperless_mail/signals.py | 19 - src/paperless_remote/__init__.py | 4 - src/paperless_remote/apps.py | 14 - src/paperless_remote/checks.py | 17 - src/paperless_remote/signals.py | 38 -- src/paperless_remote/tests/__init__.py | 0 src/paperless_remote/tests/test_checks.py | 24 - src/paperless_tesseract/__init__.py | 5 - src/paperless_tesseract/apps.py | 14 - src/paperless_tesseract/checks.py | 52 -- src/paperless_tesseract/signals.py | 34 -- src/paperless_tesseract/tests/__init__.py | 0 src/paperless_tesseract/tests/test_checks.py | 67 --- src/paperless_text/__init__.py | 0 src/paperless_text/apps.py | 14 - src/paperless_text/signals.py | 29 - src/paperless_text/tests/__init__.py | 0 src/paperless_tika/__init__.py | 0 src/paperless_tika/apps.py | 15 - src/paperless_tika/signals.py | 33 -- 41 files changed, 713 insertions(+), 1295 deletions(-) delete mode 100644 src/paperless/tests/samples/remote/simple-digital.pdf delete mode 100644 src/paperless_mail/signals.py delete mode 100644 src/paperless_remote/__init__.py delete mode 100644 src/paperless_remote/apps.py delete mode 100644 src/paperless_remote/checks.py delete mode 100644 src/paperless_remote/signals.py delete mode 100644 src/paperless_remote/tests/__init__.py delete mode 100644 src/paperless_remote/tests/test_checks.py delete mode 100644 src/paperless_tesseract/__init__.py delete mode 100644 src/paperless_tesseract/apps.py delete mode 100644 src/paperless_tesseract/checks.py delete mode 100644 src/paperless_tesseract/signals.py delete mode 100644 src/paperless_tesseract/tests/__init__.py delete mode 100644 src/paperless_tesseract/tests/test_checks.py delete mode 100644 src/paperless_text/__init__.py delete mode 100644 src/paperless_text/apps.py delete mode 100644 src/paperless_text/signals.py delete mode 100644 src/paperless_text/tests/__init__.py delete mode 100644 src/paperless_tika/__init__.py delete mode 100644 src/paperless_tika/apps.py delete mode 100644 src/paperless_tika/signals.py diff --git a/.mypy-baseline.txt b/.mypy-baseline.txt index 2700bfc71..61ffe9c10 100644 --- a/.mypy-baseline.txt +++ b/.mypy-baseline.txt @@ -2437,17 +2437,3 @@ src/paperless_tesseract/tests/test_parser_custom_settings.py:0: error: Item "Non src/paperless_tesseract/tests/test_parser_custom_settings.py:0: error: Item "None" of "ApplicationConfiguration | None" has no attribute "unpaper_clean" [union-attr] src/paperless_tesseract/tests/test_parser_custom_settings.py:0: error: Item "None" of "ApplicationConfiguration | None" has no attribute "unpaper_clean" [union-attr] src/paperless_tesseract/tests/test_parser_custom_settings.py:0: error: Item "None" of "ApplicationConfiguration | None" has no attribute "user_args" [union-attr] -src/paperless_text/parsers.py:0: error: Function is missing a type annotation for one or more arguments [no-untyped-def] -src/paperless_text/parsers.py:0: error: Function is missing a type annotation for one or more arguments [no-untyped-def] -src/paperless_text/parsers.py:0: error: Incompatible types in assignment (expression has type "str", variable has type "None") [assignment] -src/paperless_text/signals.py:0: error: Function is missing a type annotation [no-untyped-def] -src/paperless_text/signals.py:0: error: Function is missing a type annotation [no-untyped-def] -src/paperless_tika/parsers.py:0: error: Argument 1 to "make_thumbnail_from_pdf" has incompatible type "None"; expected "Path" [arg-type] -src/paperless_tika/parsers.py:0: error: Function is missing a return type annotation [no-untyped-def] -src/paperless_tika/parsers.py:0: error: Function is missing a type annotation [no-untyped-def] -src/paperless_tika/parsers.py:0: error: Function is missing a type annotation [no-untyped-def] -src/paperless_tika/parsers.py:0: error: Function is missing a type annotation for one or more arguments [no-untyped-def] -src/paperless_tika/parsers.py:0: error: Function is missing a type annotation for one or more arguments [no-untyped-def] -src/paperless_tika/parsers.py:0: error: Incompatible types in assignment (expression has type "str | None", variable has type "None") [assignment] -src/paperless_tika/signals.py:0: error: Function is missing a type annotation [no-untyped-def] -src/paperless_tika/signals.py:0: error: Function is missing a type annotation [no-untyped-def] diff --git a/pyproject.toml b/pyproject.toml index f2a20ac47..ee89ae4dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -269,10 +269,6 @@ testpaths = [ "src/documents/tests/", "src/paperless/tests/", "src/paperless_mail/tests/", - "src/paperless_tesseract/tests/", - "src/paperless_tika/tests", - "src/paperless_text/tests/", - "src/paperless_remote/tests/", "src/paperless_ai/tests", ] diff --git a/src/documents/checks.py b/src/documents/checks.py index b6e9e90fc..0867ef403 100644 --- a/src/documents/checks.py +++ b/src/documents/checks.py @@ -3,25 +3,20 @@ from django.core.checks import Error from django.core.checks import Warning from django.core.checks import register -from documents.signals import document_consumer_declaration from documents.templating.utils import convert_format_str_to_template_format +from paperless.parsers.registry import get_parser_registry @register() def parser_check(app_configs, **kwargs): - parsers = [] - for response in document_consumer_declaration.send(None): - parsers.append(response[1]) - - if len(parsers) == 0: + if not get_parser_registry().all_parsers(): return [ Error( "No parsers found. This is a bug. The consumer won't be " "able to consume any documents without parsers.", ), ] - else: - return [] + return [] @register() diff --git a/src/documents/consumer.py b/src/documents/consumer.py index 81d9eb456..809d6c647 100644 --- a/src/documents/consumer.py +++ b/src/documents/consumer.py @@ -32,9 +32,7 @@ from documents.models import DocumentType from documents.models import StoragePath from documents.models import Tag from documents.models import WorkflowTrigger -from documents.parsers import DocumentParser from documents.parsers import ParseError -from documents.parsers import get_parser_class_for_mime_type from documents.permissions import set_permissions_for_object from documents.plugins.base import AlwaysRunPluginMixin from documents.plugins.base import ConsumeTaskPlugin @@ -52,40 +50,12 @@ from documents.utils import copy_basic_file_stats from documents.utils import copy_file_with_basic_stats from documents.utils import run_subprocess from paperless.parsers import ParserContext -from paperless.parsers.mail import MailDocumentParser -from paperless.parsers.remote import RemoteDocumentParser -from paperless.parsers.tesseract import RasterisedDocumentParser -from paperless.parsers.text import TextDocumentParser -from paperless.parsers.tika import TikaDocumentParser +from paperless.parsers import ParserProtocol +from paperless.parsers.registry import get_parser_registry LOGGING_NAME: Final[str] = "paperless.consumer" -def _parser_cleanup(parser: DocumentParser) -> None: - """ - Call cleanup on a parser, handling the new-style context-manager parsers. - - New-style parsers (e.g. TextDocumentParser) use __exit__ for teardown - instead of a cleanup() method. This shim will be removed once all existing parsers - have switched to the new style and this consumer is updated to use it - - TODO(stumpylog): Remove me in the future - """ - if isinstance( - parser, - ( - MailDocumentParser, - RasterisedDocumentParser, - RemoteDocumentParser, - TextDocumentParser, - TikaDocumentParser, - ), - ): - parser.__exit__(None, None, None) - else: - parser.cleanup() - - class WorkflowTriggerPlugin( NoCleanupPluginMixin, NoSetupPluginMixin, @@ -422,8 +392,12 @@ class ConsumerPlugin( self.log.error(f"Error attempting to clean PDF: {e}") # Based on the mime type, get the parser for that type - parser_class: type[DocumentParser] | None = get_parser_class_for_mime_type( - mime_type, + parser_class: type[ParserProtocol] | None = ( + get_parser_registry().get_parser_for_file( + mime_type, + self.filename, + self.working_copy, + ) ) if not parser_class: tempdir.cleanup() @@ -446,313 +420,275 @@ class ConsumerPlugin( tempdir.cleanup() raise - def progress_callback( - current_progress, - max_progress, - ) -> None: # pragma: no cover - # recalculate progress to be within 20 and 80 - p = int((current_progress / max_progress) * 50 + 20) - self._send_progress(p, 100, ProgressStatusOptions.WORKING) - # This doesn't parse the document yet, but gives us a parser. - - document_parser: DocumentParser = parser_class( - self.logging_group, - progress_callback=progress_callback, - ) - - parser_is_new_style = isinstance( - document_parser, - ( - MailDocumentParser, - RasterisedDocumentParser, - RemoteDocumentParser, - TextDocumentParser, - TikaDocumentParser, - ), - ) - - # New-style parsers use __enter__/__exit__ for resource management. - # _parser_cleanup (below) handles __exit__; call __enter__ here. - # TODO(stumpylog): Remove me in the future - if parser_is_new_style: - document_parser.__enter__() - - self.log.debug(f"Parser: {type(document_parser).__name__}") - - # Parse the document. This may take some time. - - text = None - date = None - thumbnail = None - archive_path = None - page_count = None - - try: - self._send_progress( - 20, - 100, - ProgressStatusOptions.WORKING, - ConsumerStatusShortMessage.PARSING_DOCUMENT, + with parser_class() as document_parser: + document_parser.configure( + ParserContext(mailrule_id=self.input_doc.mailrule_id), ) - self.log.debug(f"Parsing {self.filename}...") - # TODO(stumpylog): Remove me in the future when all parsers use new protocol - if parser_is_new_style: - document_parser.configure( - ParserContext(mailrule_id=self.input_doc.mailrule_id), - ) - # TODO(stumpylog): Remove me in the future - document_parser.parse(self.working_copy, mime_type) - else: - document_parser.parse(self.working_copy, mime_type, self.filename) + self.log.debug(f"Parser: {document_parser.name} v{document_parser.version}") - self.log.debug(f"Generating thumbnail for {self.filename}...") - self._send_progress( - 70, - 100, - ProgressStatusOptions.WORKING, - ConsumerStatusShortMessage.GENERATING_THUMBNAIL, - ) - # TODO(stumpylog): Remove me in the future when all parsers use new protocol - if parser_is_new_style: - thumbnail = document_parser.get_thumbnail(self.working_copy, mime_type) - else: - thumbnail = document_parser.get_thumbnail( - self.working_copy, - mime_type, - self.filename, - ) + # Parse the document. This may take some time. - text = document_parser.get_text() - date = document_parser.get_date() - if date is None: + text = None + date = None + thumbnail = None + archive_path = None + page_count = None + + try: self._send_progress( - 90, + 20, 100, ProgressStatusOptions.WORKING, - ConsumerStatusShortMessage.PARSE_DATE, + ConsumerStatusShortMessage.PARSING_DOCUMENT, ) - with get_date_parser() as date_parser: - date = next(date_parser.parse(self.filename, text), None) - archive_path = document_parser.get_archive_path() - page_count = document_parser.get_page_count(self.working_copy, mime_type) + self.log.debug(f"Parsing {self.filename}...") - except ParseError as e: - _parser_cleanup(document_parser) - if tempdir: - tempdir.cleanup() - self._fail( - str(e), - f"Error occurred while consuming document {self.filename}: {e}", - exc_info=True, - exception=e, - ) - except Exception as e: - _parser_cleanup(document_parser) - if tempdir: - tempdir.cleanup() - self._fail( - str(e), - f"Unexpected error while consuming document {self.filename}: {e}", - exc_info=True, - exception=e, - ) + document_parser.parse(self.working_copy, mime_type) - # Prepare the document classifier. + self.log.debug(f"Generating thumbnail for {self.filename}...") + self._send_progress( + 70, + 100, + ProgressStatusOptions.WORKING, + ConsumerStatusShortMessage.GENERATING_THUMBNAIL, + ) + thumbnail = document_parser.get_thumbnail(self.working_copy, mime_type) - # TODO: I don't really like to do this here, but this way we avoid - # reloading the classifier multiple times, since there are multiple - # post-consume hooks that all require the classifier. - - classifier = load_classifier() - - self._send_progress( - 95, - 100, - ProgressStatusOptions.WORKING, - ConsumerStatusShortMessage.SAVE_DOCUMENT, - ) - # now that everything is done, we can start to store the document - # in the system. This will be a transaction and reasonably fast. - try: - with transaction.atomic(): - # store the document. - if self.input_doc.root_document_id: - # If this is a new version of an existing document, we need - # to make sure we're not creating a new document, but updating - # the existing one. - root_doc = Document.objects.get( - pk=self.input_doc.root_document_id, + text = document_parser.get_text() + date = document_parser.get_date() + if date is None: + self._send_progress( + 90, + 100, + ProgressStatusOptions.WORKING, + ConsumerStatusShortMessage.PARSE_DATE, ) - original_document = self._create_version_from_root( - root_doc, - text=text, - page_count=page_count, - mime_type=mime_type, - ) - actor = None + with get_date_parser() as date_parser: + date = next(date_parser.parse(self.filename, text), None) + archive_path = document_parser.get_archive_path() + page_count = document_parser.get_page_count( + self.working_copy, + mime_type, + ) - # Save the new version, potentially creating an audit log entry for the version addition if enabled. - if ( - settings.AUDIT_LOG_ENABLED - and self.metadata.actor_id is not None - ): - actor = User.objects.filter(pk=self.metadata.actor_id).first() - if actor is not None: - from auditlog.context import ( # type: ignore[import-untyped] - set_actor, - ) + except ParseError as e: + if tempdir: + tempdir.cleanup() + self._fail( + str(e), + f"Error occurred while consuming document {self.filename}: {e}", + exc_info=True, + exception=e, + ) + except Exception as e: + if tempdir: + tempdir.cleanup() + self._fail( + str(e), + f"Unexpected error while consuming document {self.filename}: {e}", + exc_info=True, + exception=e, + ) - with set_actor(actor): + # Prepare the document classifier. + + # TODO: I don't really like to do this here, but this way we avoid + # reloading the classifier multiple times, since there are multiple + # post-consume hooks that all require the classifier. + + classifier = load_classifier() + + self._send_progress( + 95, + 100, + ProgressStatusOptions.WORKING, + ConsumerStatusShortMessage.SAVE_DOCUMENT, + ) + # now that everything is done, we can start to store the document + # in the system. This will be a transaction and reasonably fast. + try: + with transaction.atomic(): + # store the document. + if self.input_doc.root_document_id: + # If this is a new version of an existing document, we need + # to make sure we're not creating a new document, but updating + # the existing one. + root_doc = Document.objects.get( + pk=self.input_doc.root_document_id, + ) + original_document = self._create_version_from_root( + root_doc, + text=text, + page_count=page_count, + mime_type=mime_type, + ) + actor = None + + # Save the new version, potentially creating an audit log entry for the version addition if enabled. + if ( + settings.AUDIT_LOG_ENABLED + and self.metadata.actor_id is not None + ): + actor = User.objects.filter( + pk=self.metadata.actor_id, + ).first() + if actor is not None: + from auditlog.context import ( # type: ignore[import-untyped] + set_actor, + ) + + with set_actor(actor): + original_document.save() + else: original_document.save() else: original_document.save() + + # Create a log entry for the version addition, if enabled + if settings.AUDIT_LOG_ENABLED: + from auditlog.models import ( # type: ignore[import-untyped] + LogEntry, + ) + + LogEntry.objects.log_create( + instance=root_doc, + changes={ + "Version Added": ["None", original_document.id], + }, + action=LogEntry.Action.UPDATE, + actor=actor, + additional_data={ + "reason": "Version added", + "version_id": original_document.id, + }, + ) + document = original_document else: - original_document.save() - - # Create a log entry for the version addition, if enabled - if settings.AUDIT_LOG_ENABLED: - from auditlog.models import ( # type: ignore[import-untyped] - LogEntry, + document = self._store( + text=text, + date=date, + page_count=page_count, + mime_type=mime_type, ) - LogEntry.objects.log_create( - instance=root_doc, - changes={ - "Version Added": ["None", original_document.id], - }, - action=LogEntry.Action.UPDATE, - actor=actor, - additional_data={ - "reason": "Version added", - "version_id": original_document.id, - }, - ) - document = original_document - else: - document = self._store( - text=text, - date=date, - page_count=page_count, - mime_type=mime_type, - ) + # If we get here, it was successful. Proceed with post-consume + # hooks. If they fail, nothing will get changed. - # If we get here, it was successful. Proceed with post-consume - # hooks. If they fail, nothing will get changed. - - document_consumption_finished.send( - sender=self.__class__, - document=document, - logging_group=self.logging_group, - classifier=classifier, - original_file=self.unmodified_original - if self.unmodified_original - else self.working_copy, - ) - - # After everything is in the database, copy the files into - # place. If this fails, we'll also rollback the transaction. - with FileLock(settings.MEDIA_LOCK): - generated_filename = generate_unique_filename(document) - if ( - len(str(generated_filename)) - > Document.MAX_STORED_FILENAME_LENGTH - ): - self.log.warning( - "Generated source filename exceeds db path limit, falling back to default naming", - ) - generated_filename = generate_filename( - document, - use_format=False, - ) - document.filename = generated_filename - create_source_path_directory(document.source_path) - - self._write( - self.unmodified_original - if self.unmodified_original is not None + document_consumption_finished.send( + sender=self.__class__, + document=document, + logging_group=self.logging_group, + classifier=classifier, + original_file=self.unmodified_original + if self.unmodified_original else self.working_copy, - document.source_path, ) - self._write( - thumbnail, - document.thumbnail_path, - ) - - if archive_path and Path(archive_path).is_file(): - generated_archive_filename = generate_unique_filename( - document, - archive_filename=True, - ) + # After everything is in the database, copy the files into + # place. If this fails, we'll also rollback the transaction. + with FileLock(settings.MEDIA_LOCK): + generated_filename = generate_unique_filename(document) if ( - len(str(generated_archive_filename)) + len(str(generated_filename)) > Document.MAX_STORED_FILENAME_LENGTH ): self.log.warning( - "Generated archive filename exceeds db path limit, falling back to default naming", + "Generated source filename exceeds db path limit, falling back to default naming", ) - generated_archive_filename = generate_filename( + generated_filename = generate_filename( document, - archive_filename=True, use_format=False, ) - document.archive_filename = generated_archive_filename - create_source_path_directory(document.archive_path) + document.filename = generated_filename + create_source_path_directory(document.source_path) + self._write( - archive_path, - document.archive_path, + self.unmodified_original + if self.unmodified_original is not None + else self.working_copy, + document.source_path, ) - with Path(archive_path).open("rb") as f: - document.archive_checksum = hashlib.md5( - f.read(), - ).hexdigest() + self._write( + thumbnail, + document.thumbnail_path, + ) - # Don't save with the lock active. Saving will cause the file - # renaming logic to acquire the lock as well. - # This triggers things like file renaming - document.save() + if archive_path and Path(archive_path).is_file(): + generated_archive_filename = generate_unique_filename( + document, + archive_filename=True, + ) + if ( + len(str(generated_archive_filename)) + > Document.MAX_STORED_FILENAME_LENGTH + ): + self.log.warning( + "Generated archive filename exceeds db path limit, falling back to default naming", + ) + generated_archive_filename = generate_filename( + document, + archive_filename=True, + use_format=False, + ) + document.archive_filename = generated_archive_filename + create_source_path_directory(document.archive_path) + self._write( + archive_path, + document.archive_path, + ) - if document.root_document_id: - document_updated.send( - sender=self.__class__, - document=document.root_document, - ) + with Path(archive_path).open("rb") as f: + document.archive_checksum = hashlib.md5( + f.read(), + ).hexdigest() - # Delete the file only if it was successfully consumed - self.log.debug(f"Deleting original file {self.input_doc.original_file}") - self.input_doc.original_file.unlink() - self.log.debug(f"Deleting working copy {self.working_copy}") - self.working_copy.unlink() - if self.unmodified_original is not None: # pragma: no cover + # Don't save with the lock active. Saving will cause the file + # renaming logic to acquire the lock as well. + # This triggers things like file renaming + document.save() + + if document.root_document_id: + document_updated.send( + sender=self.__class__, + document=document.root_document, + ) + + # Delete the file only if it was successfully consumed self.log.debug( - f"Deleting unmodified original file {self.unmodified_original}", + f"Deleting original file {self.input_doc.original_file}", ) - self.unmodified_original.unlink() + self.input_doc.original_file.unlink() + self.log.debug(f"Deleting working copy {self.working_copy}") + self.working_copy.unlink() + if self.unmodified_original is not None: # pragma: no cover + self.log.debug( + f"Deleting unmodified original file {self.unmodified_original}", + ) + self.unmodified_original.unlink() - # https://github.com/jonaswinkler/paperless-ng/discussions/1037 - shadow_file = ( - Path(self.input_doc.original_file).parent - / f"._{Path(self.input_doc.original_file).name}" + # https://github.com/jonaswinkler/paperless-ng/discussions/1037 + shadow_file = ( + Path(self.input_doc.original_file).parent + / f"._{Path(self.input_doc.original_file).name}" + ) + + if Path(shadow_file).is_file(): + self.log.debug(f"Deleting shadow file {shadow_file}") + Path(shadow_file).unlink() + + except Exception as e: + self._fail( + str(e), + f"The following error occurred while storing document " + f"{self.filename} after parsing: {e}", + exc_info=True, + exception=e, ) - - if Path(shadow_file).is_file(): - self.log.debug(f"Deleting shadow file {shadow_file}") - Path(shadow_file).unlink() - - except Exception as e: - self._fail( - str(e), - f"The following error occurred while storing document " - f"{self.filename} after parsing: {e}", - exc_info=True, - exception=e, - ) - finally: - _parser_cleanup(document_parser) - tempdir.cleanup() + finally: + tempdir.cleanup() self.run_post_consume_script(document) diff --git a/src/documents/management/commands/document_thumbnails.py b/src/documents/management/commands/document_thumbnails.py index 1756f8754..3d779ae18 100644 --- a/src/documents/management/commands/document_thumbnails.py +++ b/src/documents/management/commands/document_thumbnails.py @@ -3,19 +3,18 @@ import shutil from documents.management.commands.base import PaperlessCommand from documents.models import Document -from documents.parsers import get_parser_class_for_mime_type -from paperless.parsers.mail import MailDocumentParser -from paperless.parsers.remote import RemoteDocumentParser -from paperless.parsers.tesseract import RasterisedDocumentParser -from paperless.parsers.text import TextDocumentParser -from paperless.parsers.tika import TikaDocumentParser +from paperless.parsers.registry import get_parser_registry logger = logging.getLogger("paperless.management.thumbnails") def _process_document(doc_id: int) -> None: document: Document = Document.objects.get(id=doc_id) - parser_class = get_parser_class_for_mime_type(document.mime_type) + parser_class = get_parser_registry().get_parser_for_file( + document.mime_type, + document.original_filename or "", + document.source_path, + ) if parser_class is None: logger.warning( @@ -25,40 +24,9 @@ def _process_document(doc_id: int) -> None: ) return - parser = parser_class(logging_group=None) - - parser_is_new_style = isinstance( - parser, - ( - MailDocumentParser, - RasterisedDocumentParser, - RemoteDocumentParser, - TextDocumentParser, - TikaDocumentParser, - ), - ) - - # TODO(stumpylog): Remove branch in the future when all parsers use new protocol - if parser_is_new_style: - parser.__enter__() - - try: - # TODO(stumpylog): Remove branch in the future when all parsers use new protocol - if parser_is_new_style: - thumb = parser.get_thumbnail(document.source_path, document.mime_type) - else: - thumb = parser.get_thumbnail( - document.source_path, - document.mime_type, - document.get_public_filename(), - ) + with parser_class() as parser: + thumb = parser.get_thumbnail(document.source_path, document.mime_type) shutil.move(thumb, document.thumbnail_path) - finally: - # TODO(stumpylog): Cleanup once all parsers are handled - if parser_is_new_style: - parser.__exit__(None, None, None) - else: - parser.cleanup() class Command(PaperlessCommand): diff --git a/src/documents/parsers.py b/src/documents/parsers.py index 372cf0491..69ee4e285 100644 --- a/src/documents/parsers.py +++ b/src/documents/parsers.py @@ -3,84 +3,47 @@ from __future__ import annotations import logging import mimetypes import os -import re import shutil import subprocess import tempfile -from functools import lru_cache from pathlib import Path from typing import TYPE_CHECKING from django.conf import settings from documents.loggers import LoggingMixin -from documents.signals import document_consumer_declaration from documents.utils import copy_file_with_basic_stats from documents.utils import run_subprocess +from paperless.parsers.registry import get_parser_registry if TYPE_CHECKING: import datetime -# This regular expression will try to find dates in the document at -# hand and will match the following formats: -# - XX.YY.ZZZZ with XX + YY being 1 or 2 and ZZZZ being 2 or 4 digits -# - XX/YY/ZZZZ with XX + YY being 1 or 2 and ZZZZ being 2 or 4 digits -# - XX-YY-ZZZZ with XX + YY being 1 or 2 and ZZZZ being 2 or 4 digits -# - ZZZZ.XX.YY with XX + YY being 1 or 2 and ZZZZ being 2 or 4 digits -# - ZZZZ/XX/YY with XX + YY being 1 or 2 and ZZZZ being 2 or 4 digits -# - ZZZZ-XX-YY with XX + YY being 1 or 2 and ZZZZ being 2 or 4 digits -# - XX. MONTH ZZZZ with XX being 1 or 2 and ZZZZ being 2 or 4 digits -# - MONTH ZZZZ, with ZZZZ being 4 digits -# - MONTH XX, ZZZZ with XX being 1 or 2 and ZZZZ being 4 digits -# - XX MON ZZZZ with XX being 1 or 2 and ZZZZ being 4 digits. MONTH is 3 letters -# - XXPP MONTH ZZZZ with XX being 1 or 2 and PP being 2 letters and ZZZZ being 4 digits - -# TODO: isn't there a date parsing library for this? - -DATE_REGEX = re.compile( - r"(\b|(?!=([_-])))(\d{1,2})[\.\/-](\d{1,2})[\.\/-](\d{4}|\d{2})(\b|(?=([_-])))|" - r"(\b|(?!=([_-])))(\d{4}|\d{2})[\.\/-](\d{1,2})[\.\/-](\d{1,2})(\b|(?=([_-])))|" - r"(\b|(?!=([_-])))(\d{1,2}[\. ]+[a-zéûäëčžúřěáíóńźçŞğü]{3,9} \d{4}|[a-zéûäëčžúřěáíóńźçŞğü]{3,9} \d{1,2}, \d{4})(\b|(?=([_-])))|" - r"(\b|(?!=([_-])))([^\W\d_]{3,9} \d{1,2}, (\d{4}))(\b|(?=([_-])))|" - r"(\b|(?!=([_-])))([^\W\d_]{3,9} \d{4})(\b|(?=([_-])))|" - r"(\b|(?!=([_-])))(\d{1,2}[^ 0-9]{2}[\. ]+[^ ]{3,9}[ \.\/-]\d{4})(\b|(?=([_-])))|" - r"(\b|(?!=([_-])))(\b\d{1,2}[ \.\/-][a-zéûäëčžúřěáíóńźçŞğü]{3}[ \.\/-]\d{4})(\b|(?=([_-])))", - re.IGNORECASE, -) - - logger = logging.getLogger("paperless.parsing") -@lru_cache(maxsize=8) def is_mime_type_supported(mime_type: str) -> bool: """ Returns True if the mime type is supported, False otherwise """ - return get_parser_class_for_mime_type(mime_type) is not None + return get_parser_registry().get_parser_for_file(mime_type, "") is not None -@lru_cache(maxsize=8) def get_default_file_extension(mime_type: str) -> str: """ Returns the default file extension for a mimetype, or an empty string if it could not be determined """ - for response in document_consumer_declaration.send(None): - parser_declaration = response[1] - supported_mime_types = parser_declaration["mime_types"] - - if mime_type in supported_mime_types: - return supported_mime_types[mime_type] + parser_class = get_parser_registry().get_parser_for_file(mime_type, "") + if parser_class is not None: + supported = parser_class.supported_mime_types() + if mime_type in supported: + return supported[mime_type] ext = mimetypes.guess_extension(mime_type) - if ext: - return ext - else: - return "" + return ext if ext else "" -@lru_cache(maxsize=8) def is_file_ext_supported(ext: str) -> bool: """ Returns True if the file extension is supported, False otherwise @@ -94,44 +57,17 @@ def is_file_ext_supported(ext: str) -> bool: def get_supported_file_extensions() -> set[str]: extensions = set() - for response in document_consumer_declaration.send(None): - parser_declaration = response[1] - supported_mime_types = parser_declaration["mime_types"] - - for mime_type in supported_mime_types: + for parser_class in get_parser_registry().all_parsers(): + for mime_type, ext in parser_class.supported_mime_types().items(): extensions.update(mimetypes.guess_all_extensions(mime_type)) # Python's stdlib might be behind, so also add what the parser # says is the default extension # This makes image/webp supported on Python < 3.11 - extensions.add(supported_mime_types[mime_type]) + extensions.add(ext) return extensions -def get_parser_class_for_mime_type(mime_type: str) -> type[DocumentParser] | None: - """ - Returns the best parser (by weight) for the given mimetype or - None if no parser exists - """ - - options = [] - - for response in document_consumer_declaration.send(None): - parser_declaration = response[1] - supported_mime_types = parser_declaration["mime_types"] - - if mime_type in supported_mime_types: - options.append(parser_declaration) - - if not options: - return None - - best_parser = sorted(options, key=lambda _: _["weight"], reverse=True)[0] - - # Return the parser with the highest weight. - return best_parser["parser"] - - def run_convert( input_file, output_file, diff --git a/src/documents/signals/__init__.py b/src/documents/signals/__init__.py index fbb55d9fe..864fec09f 100644 --- a/src/documents/signals/__init__.py +++ b/src/documents/signals/__init__.py @@ -2,5 +2,4 @@ from django.dispatch import Signal document_consumption_started = Signal() document_consumption_finished = Signal() -document_consumer_declaration = Signal() document_updated = Signal() diff --git a/src/documents/tasks.py b/src/documents/tasks.py index a8ca0cc5f..751990c62 100644 --- a/src/documents/tasks.py +++ b/src/documents/tasks.py @@ -52,8 +52,6 @@ from documents.models import StoragePath from documents.models import Tag from documents.models import WorkflowRun from documents.models import WorkflowTrigger -from documents.parsers import DocumentParser -from documents.parsers import get_parser_class_for_mime_type from documents.plugins.base import ConsumeTaskPlugin from documents.plugins.base import ProgressManager from documents.plugins.base import StopConsumeTaskError @@ -66,11 +64,7 @@ from documents.signals.handlers import send_websocket_document_updated from documents.workflows.utils import get_workflows_for_trigger from paperless.config import AIConfig from paperless.parsers import ParserContext -from paperless.parsers.mail import MailDocumentParser -from paperless.parsers.remote import RemoteDocumentParser -from paperless.parsers.tesseract import RasterisedDocumentParser -from paperless.parsers.text import TextDocumentParser -from paperless.parsers.tika import TikaDocumentParser +from paperless.parsers.registry import get_parser_registry from paperless_ai.indexing import llm_index_add_or_update_document from paperless_ai.indexing import llm_index_remove_document from paperless_ai.indexing import update_llm_index @@ -310,8 +304,10 @@ def update_document_content_maybe_archive_file(document_id) -> None: mime_type = document.mime_type - parser_class: type[DocumentParser] | None = get_parser_class_for_mime_type( + parser_class = get_parser_registry().get_parser_for_file( mime_type, + document.original_filename or "", + document.source_path, ) if not parser_class: @@ -321,138 +317,92 @@ def update_document_content_maybe_archive_file(document_id) -> None: ) return - parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) + with parser_class() as parser: + parser.configure(ParserContext()) - parser_is_new_style = isinstance( - parser, - ( - MailDocumentParser, - RasterisedDocumentParser, - RemoteDocumentParser, - TextDocumentParser, - TikaDocumentParser, - ), - ) - - # TODO(stumpylog): Remove branch in the future when all parsers use new protocol - if parser_is_new_style: - parser.__enter__() - - try: - # TODO(stumpylog): Remove branch in the future when all parsers use new protocol - if parser_is_new_style: - parser.configure(ParserContext()) + try: parser.parse(document.source_path, mime_type) - else: - parser.parse( - document.source_path, - mime_type, - document.get_public_filename(), - ) - # TODO(stumpylog): Remove branch in the future when all parsers use new protocol - if parser_is_new_style: thumbnail = parser.get_thumbnail(document.source_path, mime_type) - else: - thumbnail = parser.get_thumbnail( - document.source_path, - mime_type, - document.get_public_filename(), - ) - with transaction.atomic(): - oldDocument = Document.objects.get(pk=document.pk) - if parser.get_archive_path(): - with Path(parser.get_archive_path()).open("rb") as f: - checksum = hashlib.md5(f.read()).hexdigest() - # I'm going to save first so that in case the file move - # fails, the database is rolled back. - # We also don't use save() since that triggers the filehandling - # logic, and we don't want that yet (file not yet in place) - document.archive_filename = generate_unique_filename( - document, - archive_filename=True, - ) - Document.objects.filter(pk=document.pk).update( - archive_checksum=checksum, - content=parser.get_text(), - archive_filename=document.archive_filename, - ) - newDocument = Document.objects.get(pk=document.pk) - if settings.AUDIT_LOG_ENABLED: - LogEntry.objects.log_create( - instance=oldDocument, - changes={ - "content": [oldDocument.content, newDocument.content], - "archive_checksum": [ - oldDocument.archive_checksum, - newDocument.archive_checksum, - ], - "archive_filename": [ - oldDocument.archive_filename, - newDocument.archive_filename, - ], - }, - additional_data={ - "reason": "Update document content", - }, - action=LogEntry.Action.UPDATE, - ) - else: - Document.objects.filter(pk=document.pk).update( - content=parser.get_text(), - ) - - if settings.AUDIT_LOG_ENABLED: - LogEntry.objects.log_create( - instance=oldDocument, - changes={ - "content": [oldDocument.content, parser.get_text()], - }, - additional_data={ - "reason": "Update document content", - }, - action=LogEntry.Action.UPDATE, - ) - - with FileLock(settings.MEDIA_LOCK): + with transaction.atomic(): + oldDocument = Document.objects.get(pk=document.pk) if parser.get_archive_path(): - create_source_path_directory(document.archive_path) - shutil.move(parser.get_archive_path(), document.archive_path) - shutil.move(thumbnail, document.thumbnail_path) + with Path(parser.get_archive_path()).open("rb") as f: + checksum = hashlib.md5(f.read()).hexdigest() + # I'm going to save first so that in case the file move + # fails, the database is rolled back. + # We also don't use save() since that triggers the filehandling + # logic, and we don't want that yet (file not yet in place) + document.archive_filename = generate_unique_filename( + document, + archive_filename=True, + ) + Document.objects.filter(pk=document.pk).update( + archive_checksum=checksum, + content=parser.get_text(), + archive_filename=document.archive_filename, + ) + newDocument = Document.objects.get(pk=document.pk) + if settings.AUDIT_LOG_ENABLED: + LogEntry.objects.log_create( + instance=oldDocument, + changes={ + "content": [oldDocument.content, newDocument.content], + "archive_checksum": [ + oldDocument.archive_checksum, + newDocument.archive_checksum, + ], + "archive_filename": [ + oldDocument.archive_filename, + newDocument.archive_filename, + ], + }, + additional_data={ + "reason": "Update document content", + }, + action=LogEntry.Action.UPDATE, + ) + else: + Document.objects.filter(pk=document.pk).update( + content=parser.get_text(), + ) - document.refresh_from_db() - logger.info( - f"Updating index for document {document_id} ({document.archive_checksum})", - ) - with index.open_index_writer() as writer: - index.update_document(writer, document) + if settings.AUDIT_LOG_ENABLED: + LogEntry.objects.log_create( + instance=oldDocument, + changes={ + "content": [oldDocument.content, parser.get_text()], + }, + additional_data={ + "reason": "Update document content", + }, + action=LogEntry.Action.UPDATE, + ) - ai_config = AIConfig() - if ai_config.llm_index_enabled: - llm_index_add_or_update_document(document) + with FileLock(settings.MEDIA_LOCK): + if parser.get_archive_path(): + create_source_path_directory(document.archive_path) + shutil.move(parser.get_archive_path(), document.archive_path) + shutil.move(thumbnail, document.thumbnail_path) - clear_document_caches(document.pk) + document.refresh_from_db() + logger.info( + f"Updating index for document {document_id} ({document.archive_checksum})", + ) + with index.open_index_writer() as writer: + index.update_document(writer, document) - except Exception: - logger.exception( - f"Error while parsing document {document} (ID: {document_id})", - ) - finally: - # TODO(stumpylog): Remove branch in the future when all parsers use new protocol - if isinstance( - parser, - ( - MailDocumentParser, - RasterisedDocumentParser, - RemoteDocumentParser, - TextDocumentParser, - TikaDocumentParser, - ), - ): - parser.__exit__(None, None, None) - else: - parser.cleanup() + ai_config = AIConfig() + if ai_config.llm_index_enabled: + llm_index_add_or_update_document(document) + + clear_document_caches(document.pk) + + except Exception: + logger.exception( + f"Error while parsing document {document} (ID: {document_id})", + ) @shared_task diff --git a/src/documents/tests/test_checks.py b/src/documents/tests/test_checks.py index b78946ba9..51d9cdddc 100644 --- a/src/documents/tests/test_checks.py +++ b/src/documents/tests/test_checks.py @@ -13,8 +13,10 @@ class TestDocumentChecks(TestCase): def test_parser_check(self) -> None: self.assertEqual(parser_check(None), []) - with mock.patch("documents.checks.document_consumer_declaration.send") as m: - m.return_value = [] + with mock.patch("documents.checks.get_parser_registry") as mock_registry_fn: + mock_registry = mock.MagicMock() + mock_registry.all_parsers.return_value = [] + mock_registry_fn.return_value = mock_registry self.assertEqual( parser_check(None), diff --git a/src/documents/tests/test_consumer.py b/src/documents/tests/test_consumer.py index a3574fdce..df4c7d9c4 100644 --- a/src/documents/tests/test_consumer.py +++ b/src/documents/tests/test_consumer.py @@ -27,7 +27,6 @@ from documents.models import Document from documents.models import DocumentType from documents.models import StoragePath from documents.models import Tag -from documents.parsers import DocumentParser from documents.parsers import ParseError from documents.plugins.helpers import ProgressStatusOptions from documents.tasks import sanity_check @@ -38,62 +37,106 @@ from documents.tests.utils import GetConsumerMixin from paperless_mail.models import MailRule -class _BaseTestParser(DocumentParser): - def get_settings(self) -> None: +class _BaseNewStyleParser: + """Minimal ParserProtocol implementation for use in consumer tests.""" + + name: str = "test-parser" + version: str = "0.1" + author: str = "test" + url: str = "test" + + @classmethod + def supported_mime_types(cls) -> dict: + return { + "application/pdf": ".pdf", + "image/png": ".png", + "message/rfc822": ".eml", + } + + @classmethod + def score(cls, mime_type: str, filename: str, path=None): + return 0 if mime_type in cls.supported_mime_types() else None + + @property + def can_produce_archive(self) -> bool: + return True + + @property + def requires_pdf_rendition(self) -> bool: + return False + + def __init__(self) -> None: + self._tmpdir: Path | None = None + self._text: str | None = None + self._archive: Path | None = None + self._thumb: Path | None = None + + def __enter__(self): + self._tmpdir = Path( + tempfile.mkdtemp(prefix="paperless-test-", dir=settings.SCRATCH_DIR), + ) + _, thumb = tempfile.mkstemp(suffix=".webp", dir=self._tmpdir) + self._thumb = Path(thumb) + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + if self._tmpdir and self._tmpdir.exists(): + shutil.rmtree(self._tmpdir, ignore_errors=True) + + def configure(self, context) -> None: """ - This parser does not implement additional settings yet + Test parser doesn't do anything with context """ + + def parse(self, document_path, mime_type, *, produce_archive: bool = True) -> None: + raise NotImplementedError + + def get_text(self) -> str | None: + return self._text + + def get_date(self): return None + def get_archive_path(self): + return self._archive -class DummyParser(_BaseTestParser): - def __init__(self, logging_group, scratch_dir, archive_path) -> None: - super().__init__(logging_group, None) - _, self.fake_thumb = tempfile.mkstemp(suffix=".webp", dir=scratch_dir) - self.archive_path = archive_path + def get_thumbnail(self, document_path, mime_type) -> Path: + return self._thumb - def get_thumbnail(self, document_path, mime_type, file_name=None): - return self.fake_thumb + def get_page_count(self, document_path, mime_type): + return None - def parse(self, document_path, mime_type, file_name=None) -> None: - self.text = "The Text" + def extract_metadata(self, document_path, mime_type) -> list: + return [] -class CopyParser(_BaseTestParser): - def get_thumbnail(self, document_path, mime_type, file_name=None): - return self.fake_thumb +class DummyParser(_BaseNewStyleParser): + _ARCHIVE_SRC = ( + Path(__file__).parent / "samples" / "documents" / "archive" / "0000001.pdf" + ) - def __init__(self, logging_group, progress_callback=None) -> None: - super().__init__(logging_group, progress_callback) - _, self.fake_thumb = tempfile.mkstemp(suffix=".webp", dir=self.tempdir) - - def parse(self, document_path, mime_type, file_name=None) -> None: - self.text = "The text" - self.archive_path = Path(self.tempdir / "archive.pdf") - shutil.copy(document_path, self.archive_path) + def parse(self, document_path, mime_type, *, produce_archive: bool = True) -> None: + self._text = "The Text" + if produce_archive and self._tmpdir: + self._archive = self._tmpdir / "archive.pdf" + shutil.copy(self._ARCHIVE_SRC, self._archive) -class FaultyParser(_BaseTestParser): - def __init__(self, logging_group, scratch_dir) -> None: - super().__init__(logging_group) - _, self.fake_thumb = tempfile.mkstemp(suffix=".webp", dir=scratch_dir) +class CopyParser(_BaseNewStyleParser): + def parse(self, document_path, mime_type, *, produce_archive: bool = True) -> None: + self._text = "The text" + if produce_archive and self._tmpdir: + self._archive = self._tmpdir / "archive.pdf" + shutil.copy(document_path, self._archive) - def get_thumbnail(self, document_path, mime_type, file_name=None): - return self.fake_thumb - def parse(self, document_path, mime_type, file_name=None): +class FaultyParser(_BaseNewStyleParser): + def parse(self, document_path, mime_type, *, produce_archive: bool = True) -> None: raise ParseError("Does not compute.") -class FaultyGenericExceptionParser(_BaseTestParser): - def __init__(self, logging_group, scratch_dir) -> None: - super().__init__(logging_group) - _, self.fake_thumb = tempfile.mkstemp(suffix=".webp", dir=scratch_dir) - - def get_thumbnail(self, document_path, mime_type, file_name=None): - return self.fake_thumb - - def parse(self, document_path, mime_type, file_name=None): +class FaultyGenericExceptionParser(_BaseNewStyleParser): + def parse(self, document_path, mime_type, *, produce_archive: bool = True) -> None: raise Exception("Generic exception.") @@ -147,38 +190,12 @@ class TestConsumer( self.assertEqual(payload["data"]["max_progress"], last_progress_max) self.assertEqual(payload["data"]["status"], last_status) - def make_dummy_parser(self, logging_group, progress_callback=None): - return DummyParser( - logging_group, - self.dirs.scratch_dir, - self.get_test_archive_file(), - ) - - def make_faulty_parser(self, logging_group, progress_callback=None): - return FaultyParser(logging_group, self.dirs.scratch_dir) - - def make_faulty_generic_exception_parser( - self, - logging_group, - progress_callback=None, - ): - return FaultyGenericExceptionParser(logging_group, self.dirs.scratch_dir) - def setUp(self) -> None: super().setUp() - patcher = mock.patch("documents.parsers.document_consumer_declaration.send") - m = patcher.start() - m.return_value = [ - ( - None, - { - "parser": self.make_dummy_parser, - "mime_types": {"application/pdf": ".pdf"}, - "weight": 0, - }, - ), - ] + patcher = mock.patch("documents.consumer.get_parser_registry") + mock_registry = patcher.start() + mock_registry.return_value.get_parser_for_file.return_value = DummyParser self.addCleanup(patcher.stop) def get_test_file(self): @@ -547,9 +564,9 @@ class TestConsumer( ) as consumer: consumer.run() - @mock.patch("documents.parsers.document_consumer_declaration.send") + @mock.patch("documents.consumer.get_parser_registry") def testNoParsers(self, m) -> None: - m.return_value = [] + m.return_value.get_parser_for_file.return_value = None with self.assertRaisesMessage( ConsumerError, @@ -560,18 +577,9 @@ class TestConsumer( self._assert_first_last_send_progress(last_status="FAILED") - @mock.patch("documents.parsers.document_consumer_declaration.send") + @mock.patch("documents.consumer.get_parser_registry") def testFaultyParser(self, m) -> None: - m.return_value = [ - ( - None, - { - "parser": self.make_faulty_parser, - "mime_types": {"application/pdf": ".pdf"}, - "weight": 0, - }, - ), - ] + m.return_value.get_parser_for_file.return_value = FaultyParser with self.get_consumer(self.get_test_file()) as consumer: with self.assertRaisesMessage( @@ -582,18 +590,9 @@ class TestConsumer( self._assert_first_last_send_progress(last_status="FAILED") - @mock.patch("documents.parsers.document_consumer_declaration.send") + @mock.patch("documents.consumer.get_parser_registry") def testGenericParserException(self, m) -> None: - m.return_value = [ - ( - None, - { - "parser": self.make_faulty_generic_exception_parser, - "mime_types": {"application/pdf": ".pdf"}, - "weight": 0, - }, - ), - ] + m.return_value.get_parser_for_file.return_value = FaultyGenericExceptionParser with self.get_consumer(self.get_test_file()) as consumer: with self.assertRaisesMessage( @@ -1017,7 +1016,7 @@ class TestConsumer( self._assert_first_last_send_progress() @override_settings(FILENAME_FORMAT="{title}") - @mock.patch("documents.parsers.document_consumer_declaration.send") + @mock.patch("documents.consumer.get_parser_registry") def test_similar_filenames(self, m) -> None: shutil.copy( Path(__file__).parent / "samples" / "simple.pdf", @@ -1031,16 +1030,7 @@ class TestConsumer( Path(__file__).parent / "samples" / "simple-noalpha.png", settings.CONSUMPTION_DIR / "simple.png.pdf", ) - m.return_value = [ - ( - None, - { - "parser": CopyParser, - "mime_types": {"application/pdf": ".pdf", "image/png": ".png"}, - "weight": 0, - }, - ), - ] + m.return_value.get_parser_for_file.return_value = CopyParser with self.get_consumer(settings.CONSUMPTION_DIR / "simple.png") as consumer: consumer.run() @@ -1068,8 +1058,10 @@ class TestConsumer( sanity_check() + @mock.patch("documents.consumer.get_parser_registry") @mock.patch("documents.consumer.run_subprocess") - def test_try_to_clean_invalid_pdf(self, m) -> None: + def test_try_to_clean_invalid_pdf(self, m, mock_registry) -> None: + mock_registry.return_value.get_parser_for_file.return_value = None shutil.copy( Path(__file__).parent / "samples" / "invalid_pdf.pdf", settings.CONSUMPTION_DIR / "invalid_pdf.pdf", @@ -1091,10 +1083,10 @@ class TestConsumer( @mock.patch("paperless_mail.models.MailRule.objects.get") @mock.patch("paperless.parsers.mail.MailDocumentParser.parse") - @mock.patch("documents.parsers.document_consumer_declaration.send") + @mock.patch("documents.consumer.get_parser_registry") def test_mail_parser_receives_mailrule( self, - mock_consumer_declaration_send: mock.Mock, + mock_get_parser_registry: mock.Mock, mock_mail_parser_parse: mock.Mock, mock_mailrule_get: mock.Mock, ) -> None: @@ -1106,18 +1098,11 @@ class TestConsumer( THEN: - The mail parser should receive the mail rule """ - from paperless_mail.signals import get_parser as mail_get_parser + from paperless.parsers.mail import MailDocumentParser - mock_consumer_declaration_send.return_value = [ - ( - None, - { - "parser": mail_get_parser, - "mime_types": {"message/rfc822": ".eml"}, - "weight": 0, - }, - ), - ] + mock_get_parser_registry.return_value.get_parser_for_file.return_value = ( + MailDocumentParser + ) mock_mailrule_get.return_value = mock.Mock( pdf_layout=MailRule.PdfLayout.HTML_ONLY, ) diff --git a/src/documents/tests/test_parsers.py b/src/documents/tests/test_parsers.py index 5ea1b361e..30963df70 100644 --- a/src/documents/tests/test_parsers.py +++ b/src/documents/tests/test_parsers.py @@ -1,132 +1,16 @@ -from tempfile import TemporaryDirectory -from unittest import mock - -from django.apps import apps from django.test import TestCase from django.test import override_settings from documents.parsers import get_default_file_extension -from documents.parsers import get_parser_class_for_mime_type from documents.parsers import get_supported_file_extensions from documents.parsers import is_file_ext_supported +from paperless.parsers.registry import get_parser_registry +from paperless.parsers.registry import reset_parser_registry from paperless.parsers.tesseract import RasterisedDocumentParser from paperless.parsers.text import TextDocumentParser from paperless.parsers.tika import TikaDocumentParser -class TestParserDiscovery(TestCase): - @mock.patch("documents.parsers.document_consumer_declaration.send") - def test_get_parser_class_1_parser(self, m, *args) -> None: - """ - GIVEN: - - Parser declared for a given mimetype - WHEN: - - Attempt to get parser for the mimetype - THEN: - - Declared parser class is returned - """ - - class DummyParser: - pass - - m.return_value = ( - ( - None, - { - "weight": 0, - "parser": DummyParser, - "mime_types": {"application/pdf": ".pdf"}, - }, - ), - ) - - self.assertEqual(get_parser_class_for_mime_type("application/pdf"), DummyParser) - - @mock.patch("documents.parsers.document_consumer_declaration.send") - def test_get_parser_class_n_parsers(self, m, *args) -> None: - """ - GIVEN: - - Two parsers declared for a given mimetype - - Second parser has a higher weight - WHEN: - - Attempt to get parser for the mimetype - THEN: - - Second parser class is returned - """ - - class DummyParser1: - pass - - class DummyParser2: - pass - - m.return_value = ( - ( - None, - { - "weight": 0, - "parser": DummyParser1, - "mime_types": {"application/pdf": ".pdf"}, - }, - ), - ( - None, - { - "weight": 1, - "parser": DummyParser2, - "mime_types": {"application/pdf": ".pdf"}, - }, - ), - ) - - self.assertEqual( - get_parser_class_for_mime_type("application/pdf"), - DummyParser2, - ) - - @mock.patch("documents.parsers.document_consumer_declaration.send") - def test_get_parser_class_0_parsers(self, m, *args) -> None: - """ - GIVEN: - - No parsers are declared - WHEN: - - Attempt to get parser for the mimetype - THEN: - - No parser class is returned - """ - m.return_value = [] - with TemporaryDirectory(): - self.assertIsNone(get_parser_class_for_mime_type("application/pdf")) - - @mock.patch("documents.parsers.document_consumer_declaration.send") - def test_get_parser_class_no_valid_parser(self, m, *args) -> None: - """ - GIVEN: - - No parser declared for a given mimetype - - Parser declared for a different mimetype - WHEN: - - Attempt to get parser for the given mimetype - THEN: - - No parser class is returned - """ - - class DummyParser: - pass - - m.return_value = ( - ( - None, - { - "weight": 0, - "parser": DummyParser, - "mime_types": {"application/pdf": ".pdf"}, - }, - ), - ) - - self.assertIsNone(get_parser_class_for_mime_type("image/tiff")) - - class TestParserAvailability(TestCase): def test_tesseract_parser(self) -> None: """ @@ -151,7 +35,7 @@ class TestParserAvailability(TestCase): self.assertIn(ext, supported_exts) self.assertEqual(get_default_file_extension(mime_type), ext) self.assertIsInstance( - get_parser_class_for_mime_type(mime_type)(logging_group=None), + get_parser_registry().get_parser_for_file(mime_type, "")(), RasterisedDocumentParser, ) @@ -175,7 +59,7 @@ class TestParserAvailability(TestCase): self.assertIn(ext, supported_exts) self.assertEqual(get_default_file_extension(mime_type), ext) self.assertIsInstance( - get_parser_class_for_mime_type(mime_type)(logging_group=None), + get_parser_registry().get_parser_for_file(mime_type, "")(), TextDocumentParser, ) @@ -198,22 +82,23 @@ class TestParserAvailability(TestCase): ), ] - # Force the app ready to notice the settings override - with override_settings(TIKA_ENABLED=True, INSTALLED_APPS=["paperless_tika"]): - app = apps.get_app_config("paperless_tika") - app.ready() + self.addCleanup(reset_parser_registry) + + # Reset and rebuild the registry with Tika enabled. + with override_settings(TIKA_ENABLED=True): + reset_parser_registry() supported_exts = get_supported_file_extensions() - for mime_type, ext in supported_mimes_and_exts: - self.assertIn(ext, supported_exts) - self.assertEqual(get_default_file_extension(mime_type), ext) - self.assertIsInstance( - get_parser_class_for_mime_type(mime_type)(logging_group=None), - TikaDocumentParser, - ) + for mime_type, ext in supported_mimes_and_exts: + self.assertIn(ext, supported_exts) + self.assertEqual(get_default_file_extension(mime_type), ext) + self.assertIsInstance( + get_parser_registry().get_parser_for_file(mime_type, "")(), + TikaDocumentParser, + ) def test_no_parser_for_mime(self) -> None: - self.assertIsNone(get_parser_class_for_mime_type("text/sdgsdf")) + self.assertIsNone(get_parser_registry().get_parser_for_file("text/sdgsdf", "")) def test_default_extension(self) -> None: # Test no parser declared still returns a an extension diff --git a/src/documents/views.py b/src/documents/views.py index ffdc309fd..0716ce66d 100644 --- a/src/documents/views.py +++ b/src/documents/views.py @@ -7,7 +7,6 @@ import tempfile import zipfile from collections import defaultdict from collections import deque -from contextlib import nullcontext from datetime import datetime from pathlib import Path from time import mktime @@ -159,7 +158,6 @@ from documents.models import UiSettings from documents.models import Workflow from documents.models import WorkflowAction from documents.models import WorkflowTrigger -from documents.parsers import get_parser_class_for_mime_type from documents.permissions import AcknowledgeTasksPermissions from documents.permissions import PaperlessAdminPermissions from documents.permissions import PaperlessNotePermissions @@ -227,7 +225,7 @@ from paperless.celery import app as celery_app from paperless.config import AIConfig from paperless.config import GeneralConfig from paperless.models import ApplicationConfiguration -from paperless.parsers import ParserProtocol +from paperless.parsers.registry import get_parser_registry from paperless.serialisers import GroupSerializer from paperless.serialisers import UserSerializer from paperless.views import StandardPagination @@ -1084,17 +1082,17 @@ class DocumentViewSet( if not Path(file).is_file(): return None - parser_class = get_parser_class_for_mime_type(mime_type) + parser_class = get_parser_registry().get_parser_for_file( + mime_type, + Path(file).name, + Path(file), + ) if parser_class: - parser = parser_class(progress_callback=None, logging_group=None) - cm = parser if isinstance(parser, ParserProtocol) else nullcontext(parser) - try: - with cm: + with parser_class() as parser: return parser.extract_metadata(file, mime_type) except Exception: # pragma: no cover logger.exception(f"Issue getting metadata for {file}") - # TODO: cover GPG errors, remove later. return [] else: # pragma: no cover logger.warning(f"No parser for {mime_type}") diff --git a/src/paperless/checks.py b/src/paperless/checks.py index bcea6ef24..5f069b547 100644 --- a/src/paperless/checks.py +++ b/src/paperless/checks.py @@ -3,6 +3,7 @@ import os import pwd import shutil import stat +import subprocess from pathlib import Path from django.conf import settings @@ -299,3 +300,62 @@ def check_deprecated_db_settings( ) return warnings + + +@register() +def check_remote_parser_configured(app_configs, **kwargs) -> list[Error]: + if settings.REMOTE_OCR_ENGINE == "azureai" and not ( + settings.REMOTE_OCR_ENDPOINT and settings.REMOTE_OCR_API_KEY + ): + return [ + Error( + "Azure AI remote parser requires endpoint and API key to be configured.", + ), + ] + + return [] + + +def get_tesseract_langs(): + proc = subprocess.run( + [shutil.which("tesseract"), "--list-langs"], + capture_output=True, + ) + + # Decode bytes to string, split on newlines, trim out the header + proc_lines = proc.stdout.decode("utf8", errors="ignore").strip().split("\n")[1:] + + return [x.strip() for x in proc_lines] + + +@register() +def check_default_language_available(app_configs, **kwargs): + errs = [] + + if not settings.OCR_LANGUAGE: + errs.append( + Warning( + "No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. " + "This means that tesseract will fallback to english.", + ), + ) + return errs + + # binaries_check in paperless will check and report if this doesn't exist + # So skip trying to do anything here and let that handle missing binaries + if shutil.which("tesseract") is not None: + installed_langs = get_tesseract_langs() + + specified_langs = [x.strip() for x in settings.OCR_LANGUAGE.split("+")] + + for lang in specified_langs: + if lang not in installed_langs: + errs.append( + Error( + f"The selected ocr language {lang} is " + f"not installed. Paperless cannot OCR your documents " + f"without it. Please fix PAPERLESS_OCR_LANGUAGE.", + ), + ) + + return errs diff --git a/src/paperless/parsers/registry.py b/src/paperless/parsers/registry.py index 7effe554f..c81fb1c45 100644 --- a/src/paperless/parsers/registry.py +++ b/src/paperless/parsers/registry.py @@ -33,6 +33,7 @@ name, version, author, url, supported_mime_types (callable), score (callable). from __future__ import annotations import logging +import threading from importlib.metadata import entry_points from typing import TYPE_CHECKING @@ -49,6 +50,7 @@ logger = logging.getLogger("paperless.parsers.registry") _registry: ParserRegistry | None = None _discovery_complete: bool = False +_lock = threading.Lock() # Attribute names that every registered external parser class must expose. _REQUIRED_ATTRS: tuple[str, ...] = ( @@ -74,7 +76,6 @@ def get_parser_registry() -> ParserRegistry: 1. Creates a new ParserRegistry. 2. Calls register_defaults to install built-in parsers. 3. Calls discover to load third-party plugins via importlib.metadata entrypoints. - 4. Calls log_summary to emit a startup summary. Subsequent calls return the same instance immediately. @@ -85,14 +86,15 @@ def get_parser_registry() -> ParserRegistry: """ global _registry, _discovery_complete - if _registry is None: - _registry = ParserRegistry() - _registry.register_defaults() + with _lock: + if _registry is None: + r = ParserRegistry() + r.register_defaults() + _registry = r - if not _discovery_complete: - _registry.discover() - _registry.log_summary() - _discovery_complete = True + if not _discovery_complete: + _registry.discover() + _discovery_complete = True return _registry @@ -113,9 +115,11 @@ def init_builtin_parsers() -> None: """ global _registry - if _registry is None: - _registry = ParserRegistry() - _registry.register_defaults() + with _lock: + if _registry is None: + r = ParserRegistry() + r.register_defaults() + _registry = r def reset_parser_registry() -> None: @@ -304,6 +308,23 @@ class ParserRegistry: getattr(cls, "url", "unknown"), ) + # ------------------------------------------------------------------ + # Inspection helpers + # ------------------------------------------------------------------ + + def all_parsers(self) -> list[type[ParserProtocol]]: + """Return all registered parser classes (external first, then builtins). + + Used by compatibility wrappers that need to iterate every parser to + compute the full set of supported MIME types and file extensions. + + Returns + ------- + list[type[ParserProtocol]] + External parsers followed by built-in parsers. + """ + return [*self._external, *self._builtins] + # ------------------------------------------------------------------ # Parser resolution # ------------------------------------------------------------------ @@ -334,7 +355,7 @@ class ParserRegistry: mime_type: The detected MIME type of the file. filename: - The original filename, including extension. + The original filename, including extension. May be empty in some cases path: Optional filesystem path to the file. Forwarded to each parser's score method. diff --git a/src/paperless/settings/__init__.py b/src/paperless/settings/__init__.py index 011f776b5..1c33db7c6 100644 --- a/src/paperless/settings/__init__.py +++ b/src/paperless/settings/__init__.py @@ -121,10 +121,7 @@ INSTALLED_APPS = [ "django_extensions", "paperless", "documents.apps.DocumentsConfig", - "paperless_tesseract.apps.PaperlessTesseractConfig", - "paperless_text.apps.PaperlessTextConfig", "paperless_mail.apps.PaperlessMailConfig", - "paperless_remote.apps.PaperlessRemoteParserConfig", "django.contrib.admin", "rest_framework", "rest_framework.authtoken", @@ -974,8 +971,8 @@ TIKA_GOTENBERG_ENDPOINT = os.getenv( "http://localhost:3000", ) -if TIKA_ENABLED: - INSTALLED_APPS.append("paperless_tika.apps.PaperlessTikaConfig") +# Tika parser is now integrated into the main parser registry +# No separate Django app needed AUDIT_LOG_ENABLED = get_bool_from_env("PAPERLESS_AUDIT_LOG_ENABLED", "true") if AUDIT_LOG_ENABLED: diff --git a/src/paperless/tests/parsers/conftest.py b/src/paperless/tests/parsers/conftest.py index a484f02c8..8747ac9bd 100644 --- a/src/paperless/tests/parsers/conftest.py +++ b/src/paperless/tests/parsers/conftest.py @@ -90,35 +90,6 @@ def text_parser() -> Generator[TextDocumentParser, None, None]: yield parser -# ------------------------------------------------------------------ -# Remote parser sample files -# ------------------------------------------------------------------ - - -@pytest.fixture(scope="session") -def remote_samples_dir(samples_dir: Path) -> Path: - """Absolute path to the remote parser sample files directory. - - Returns - ------- - Path - ``/remote/`` - """ - return samples_dir / "remote" - - -@pytest.fixture(scope="session") -def sample_pdf_file(remote_samples_dir: Path) -> Path: - """Path to a simple digital PDF sample file. - - Returns - ------- - Path - Absolute path to ``remote/simple-digital.pdf``. - """ - return remote_samples_dir / "simple-digital.pdf" - - # ------------------------------------------------------------------ # Remote parser instance # ------------------------------------------------------------------ diff --git a/src/paperless/tests/parsers/test_remote_parser.py b/src/paperless/tests/parsers/test_remote_parser.py index 69199a6e8..892915bb5 100644 --- a/src/paperless/tests/parsers/test_remote_parser.py +++ b/src/paperless/tests/parsers/test_remote_parser.py @@ -277,20 +277,20 @@ class TestRemoteParserParse: def test_parse_returns_text_from_azure( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, azure_client: Mock, ) -> None: - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") assert remote_parser.get_text() == _DEFAULT_TEXT def test_parse_sets_archive_path( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, azure_client: Mock, ) -> None: - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") archive = remote_parser.get_archive_path() assert archive is not None @@ -300,11 +300,11 @@ class TestRemoteParserParse: def test_parse_closes_client_on_success( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, azure_client: Mock, ) -> None: remote_parser.configure(ParserContext()) - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") azure_client.close.assert_called_once() @@ -312,9 +312,9 @@ class TestRemoteParserParse: def test_parse_sets_empty_text_when_not_configured( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, ) -> None: - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") assert remote_parser.get_text() == "" assert remote_parser.get_archive_path() is None @@ -328,10 +328,10 @@ class TestRemoteParserParse: def test_get_date_always_none( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, azure_client: Mock, ) -> None: - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") assert remote_parser.get_date() is None @@ -345,33 +345,33 @@ class TestRemoteParserParseError: def test_parse_returns_none_on_azure_error( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, failing_azure_client: Mock, ) -> None: - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") assert remote_parser.get_text() is None def test_parse_closes_client_on_error( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, failing_azure_client: Mock, ) -> None: - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") failing_azure_client.close.assert_called_once() def test_parse_logs_error_on_azure_failure( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, failing_azure_client: Mock, mocker: MockerFixture, ) -> None: mock_log = mocker.patch("paperless.parsers.remote.logger") - remote_parser.parse(sample_pdf_file, "application/pdf") + remote_parser.parse(simple_digital_pdf_file, "application/pdf") mock_log.error.assert_called_once() assert "Azure AI Vision parsing failed" in mock_log.error.call_args[0][0] @@ -386,18 +386,18 @@ class TestRemoteParserPageCount: def test_page_count_for_pdf( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, ) -> None: - count = remote_parser.get_page_count(sample_pdf_file, "application/pdf") + count = remote_parser.get_page_count(simple_digital_pdf_file, "application/pdf") assert isinstance(count, int) assert count >= 1 def test_page_count_returns_none_for_image_mime( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, ) -> None: - count = remote_parser.get_page_count(sample_pdf_file, "image/png") + count = remote_parser.get_page_count(simple_digital_pdf_file, "image/png") assert count is None def test_page_count_returns_none_for_invalid_pdf( @@ -420,25 +420,31 @@ class TestRemoteParserMetadata: def test_extract_metadata_non_pdf_returns_empty( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, ) -> None: - result = remote_parser.extract_metadata(sample_pdf_file, "image/png") + result = remote_parser.extract_metadata(simple_digital_pdf_file, "image/png") assert result == [] def test_extract_metadata_pdf_returns_list( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, ) -> None: - result = remote_parser.extract_metadata(sample_pdf_file, "application/pdf") + result = remote_parser.extract_metadata( + simple_digital_pdf_file, + "application/pdf", + ) assert isinstance(result, list) def test_extract_metadata_pdf_entries_have_required_keys( self, remote_parser: RemoteDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, ) -> None: - result = remote_parser.extract_metadata(sample_pdf_file, "application/pdf") + result = remote_parser.extract_metadata( + simple_digital_pdf_file, + "application/pdf", + ) for entry in result: assert "namespace" in entry assert "prefix" in entry diff --git a/src/paperless/tests/parsers/test_tika_parser.py b/src/paperless/tests/parsers/test_tika_parser.py index 010969259..560527934 100644 --- a/src/paperless/tests/parsers/test_tika_parser.py +++ b/src/paperless/tests/parsers/test_tika_parser.py @@ -77,10 +77,10 @@ class TestTikaParserRegistryInterface: def test_get_page_count_returns_int_with_pdf_archive( self, tika_parser: TikaDocumentParser, - sample_pdf_file: Path, + simple_digital_pdf_file: Path, ) -> None: - tika_parser._archive_path = sample_pdf_file - count = tika_parser.get_page_count(sample_pdf_file, "application/pdf") + tika_parser._archive_path = simple_digital_pdf_file + count = tika_parser.get_page_count(simple_digital_pdf_file, "application/pdf") assert isinstance(count, int) assert count > 0 diff --git a/src/paperless/tests/samples/remote/simple-digital.pdf b/src/paperless/tests/samples/remote/simple-digital.pdf deleted file mode 100644 index e450de48269ce43785b8344c63e233a1794abae6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22926 zcmeFZ1ymeg@;^!l!6mrE;Lb3(yL)iAVQ_bM2@U~*y9EgZNJ4OTf?IHc27(3mH{|=> z-S7V7z4P{+J?EYO***+?`*z*Bb*rj-daCNvG^&!)EFe~HWSZ{c?w0P)-Fe9D05*W5 znGLd_AW#wFVCiNB;DGk10i~_&+#oJMX**Llh$IB;Xbuq;Ms{^`ftcDOdu6l44>H?6H;@?p^vKWPs#P#ZGbM{;Bcf{BGr+ELKNlvYYRNcNgtX+7@aLXT zMQN!S?3XnMOGXzd6?Y;rsx^sOB+DXSS48V9%8C_*Nre0Ge09*fJ;tB)nym>uKSOw1 z2i!o0IGFz_FtqiwM&zfZJvBhw+)rnJ_woEU1@Qha3iwk&AOMJsm!0je>e%A*b|c=( zS#^}IgXn*zCa)u*nWXP(wA20EkL1j%VyEC?M)$S`K=_(QzmCRCLHrFiECjSQtYn`iKg zf}%nOaWK%_&+Ku&A#j>Q@-?@j>#2p9dZv4QKhun z=@em(Dge&env$D{x9Q_-*cI_>U>>Rgrg4#rb67eijW{P8;mu->2nuC92$yD~)|^om zof)g{JNi%po%qS2uXL^$$;LVc720v6ksjPB{pbm!yHQ(d{s&oogF>puBi3^YH8K8~ ztf=^&Z>QNYfr%PP%}Ba_X=avrD9bVAkH*pka_wzWhja;v5}TSXTYZnCH!OGA` z3&Wr_z7-7B5)oa9ALHmvT?5AkgZZZC23wcJ>T-OElbRVKU0r;Baq_`7Pq-kT3Z{JZ znzD>tk*w~s6hTUEMXXn7y`Gwr?fjkxs;nIJ_~l-gs<>$-h<Ro53Nw-;(BpU?f_z^C3`oT3wrR`6@gqyKrECgMXzc67xJHqs zAT-Dx8^>$LdmKT)E37b`Q9HMosc9RLS$SU}H%%K8sPn~!;@wJl8+r3Ni~|WNKE`!R z=<|F3*4*42fu`oqj{85Inim%J8su5@xg8h26nNi<@6U2i3+$78s?@4}*VfUtWWkk6 zeAe{ldtn!>Qb4X=udArd3&K0rj2b-D!=Pmdh8w@li!_F%!*}lAmIHJV5!u@`n11Hu z#F}Fagcv7kuQ4S`TnuB$$LG*7l+ct^QSXK;nPa<~;%{0m9&|Yq?448ky<0xS-kd^R z=`@)^j=-TZt1p0$iI!&iVt%=D96Ou<>au%fn$^mpvHOmuK3obBkAk|UuHVvh2G0bh zVd#_TTdGX6Jv;Kv{=TJ2sA4{=8zx zVGa>A?xEGeV@B7Swbkd+ z`Yz5K(Xo}_Tt4>o8W?%ftQ37A^?FYCHR{9eQ0jBvmGBcLV z7USBIYAT_SguJkPyK>eTf=DHgI?IA7lk=OMias-*WM_{oKsStX;f1tbxPT*rG)H@JdR-qiMbg%YftI!VPiy zZR^}EJtn@&S8k+jFr_tRn+KzvT{naNdgcjyzj@^-Cw4W{vM74GX3aG zA8%4J&>|DQ4h1z-uCB}oY#P?Uy|GYrt+1K%w)kn}x`2wFUsXPfXf!%W(eEb!UUN;={~aG}&ptOzqXF$UaFB(W2)RzJSXYod?!X>MwuK0cJ@kv?_Z)Wq0~* zGOg&X#OHioX*4tz8_S6BMI3fc-aPx9SV>#!LJ6SP0Y&o|8J_vzMoHtuuMdn&y(V1R zK3q=dZ`GNZv1=&=LdVU94vAbHoVU;;EGI@!=NH-SOr^m`dwB(Y2hn07Nsh@#q^8!b zog){pP5B33E|Gl)J?KO2>I$`2g4eMdGHjsV|+;o9-(THn7OA24?NE{GgWdW$|4i-A%#Om9y@vU~Gu zf_#`FM|CtNfv^t=Vv#jFC!namky9zp<6{Wl8^lNw%}gptv8L=)vGr7JU$w5d0xfO@ zO`Hb6y3uS@5GCb|O^vME)$Um$SdSk5l-cGS^vgLtmnCt;I?6gFaT1e^Kycs3X~0)8 z#@Ld>x3EadXZ=fh*Sy_b)t4{;X-ds?7e@fOpdJ~0__})@Tj!i~EmyhR zMIaQ*Gq&r}C!;53!hbq4PU6b(^$S5J$HvCwPj~NadHT7-)7`vvRWj>x(94OQT=S)QiT2GGZDghdV$l(WmRmJFIsV5<7Q&=*@b_>z0*3@5vvn##f4+iAtctFB4n-0 zwal!;jo%)3jY*cxR)?YS9BGm&4jLFzMgE%Zds9|GHgwt$G;dYa(PPb(`E&Rb*J(?S z_{*t4;H1me92Saibz9)2`y4aeaEOjeuoRE9t$Nj#&&W$5r|$}8Eg86;nv zY>xf(Dh_F-t`;Xnc;xxNV!5UqHMfq0Mn~fae3Tz`4iS{D8W|NQbS!2j1 zFH<*9e-3L`+3Q8VSR14DPu+Z%TC5kTag`HZQN$w}xA&Ek)xR!ydk{s_4Go>SZMbzn zL_!NQZ`ynqXsi}XRqLZv8&^~H(aUdJUdVX!Wb3r=2iHsE=MP+Ky3f0aysXerYl3sR z5~I>gd=d9wF?6mJ6Nf#spfYIGs^W}}v(3s@?XPuWV*1IXJ)gFtnL^COB6#`zXTs8I zjVGs@;mP!J+c-_!s)%4fjqBG1$)mOSoCb_f^J1>2>yVNogSmjmRHb3NgQCNlWix+| z$sg8^D~-jq)%N)JT%m5PZNtW+B61^Nnib_);Fa7z$&cGqY6z0urs1<5oo6tjMwHBh zLT5Uxy+ebokmIfyM`}Yfy!<3ZTCpOuLq1}?{DPAe)JsR}5mWMY)a^u$BWv&snUh|Z z@w5RenTyjt7*A*MW61mAPy2v&IL4vgK6m^sl*=XlPajm@ruzpjPB@b8&6E8!ZhOcJ zGVt6uW;rN|yGwpNKIh=k;PasC_;_3KzGd&CgJ!hc@&E-d)yGrgbLMkskg>WWifmaok5-jbv%Y8R!_ZR!a*c(d+@u|ReL8tA^_wR4s z(=t^yBG?}G5mo>c1}UuHA&WLhsuxu1Sd>%h>@wQo;x$#s>+9K^UCE#8x!0)JX4ePV z1sCD*+67yq^suZo1ogxA!0I2<=;p7$hP*h#OSg2PPx(e#C$Lc>?`kEZI6BfLi$SJO zjJt)G59v2|fqDU8FJ>>^UfrMHs7BPnfo3i=cG%(VWP5TAq^)XV21 z>6;rtTl(aT+79zB=gbYc&^^nVu<_A&2Xe&RJh8r#PMxAtj2=F z)%fPs)dVAg;B8O)`^^>5hk(N#67s$PyzgN`w1&2-_?TSfoYwM!!g0AwmnPyNNUxEU zGdjQ_KG?fTY-8g)N^Je5hwqTkvrHD?oUyNz02zSybtl5ozu44-iuMSpv>)lG6f4(H zCxLhD77fEc*}vi;X4N!6E_&~1A$gs;Yve||em_#RSFR3h6Yjd>=9CVaFK(>8{5wA! zKjaDD@8=nN@71b1-k<$AYee6E82M|*{myK$S7Dr2yYe_m6F!X?Kf@arw%S@Lck%O4NiFh)aHP0`uSyE} zzVqrj;R7Q2We;?xXxoRme}!`>0F&jz_Z z+4Z{~oZjNYI|?`TY}u5vk?2_o@$&Ar9*%ca``_lrUe4KB*;HE_UKP0{o(jKXVAguS zm??JaKRl`d!4R~I&*nvK@8{E#-!86{)6FQCt^CTf1*~W8O+HjJB&3v1?$@@eqAvj# zm4QzUQCjSS%UFWQaZ+DoVm5ZeGN8b*u$csVpm6H0!J?!S61UqK9D1U)Ta4)gZU=`a zU77grJahcUgf4TnJ1nQvW4@gP6LXRM6#^`!0#*5iP7e2R2vXlZDxQ&NV}PPF(@o>dC__(T55|`~t+14O#brDBA9x;pGlC zIt@R7J(&;skAWXW9<%BL2Mzt0YUGt3VXHfjly1aZ{T4%F3{r4IP9N?n$87sn305g* z7{!MgE!4V!RL?bXL?rn!f&C2#-is$IQOC-DS+C0ASC!-G1LXpdJ-XDFi0=5hTEAkP zv}<{5TvjT1XZ;GuxUBxJsX~b71ikv8*)L#9xQV1jh`h9Y+0p zbeVCkVQ>QJ4%^~<31eR8ncCGnt2xE^h0nnDQmmJWO-5eB|}CM)u=DRO#rwrrNJlnQ3GDQG*v`~ES$4E(xmH{pAOhk;6c~hK@@-&o1!BJ87^L2x~HgP5i7gB1bZi0mJ$O00#VQur~ zfSb@JH+r2cP*wTMj95A|U)|XFO)E;A=)!tYHa4t`*Tv&{yordhX1yQmk|wAt5%I)d zBNU>~X;9)}m=@!^yXE2bz)a03_R(GxS~8NNg@;U2MYQs82FveyQQ|F1oD#;+u#KBA z{v3$%W5!b|uu(*W3r*+MV&dlKlDiFqC__R}Vrk6=VpQPNtRU>V_{t^UiL*!jZ!czE zq|C;eS?-P3oeq1I-=8E-Ccu4S=bMN-NSK^es(qNGzG-kr5pu3dAP&XG3D%?F$ly(j zMqvh1=gQC0P+_~*_I#=DUshU>3ya;z>AIE2q7Xlk#yW~gzl26iMH!1Y|86GU+i>iS* zEN(?Bh%T-N$i;cDD8GDF`|8Xd?hKb$mrDJLDU4l=mf|JCiZNuP@~W92WyFODR1NPD zn7gh0EiRuXSd+h(mvd4M@1Cr`(<~?2;(QA*I4(8L*1O#G-@OI zDRO9j@%?0Hyb(7=Rmp^duPm~ZB^O*!*FUID4S!fc!V^pU0dY|%+!^y&JGCszlWIu2 zUh3S%QDHY?`YuOPV<_fF$wP_^dJsDNE=kmsd zBmG7+@)hcp)4s7n$s>+9{)o(NZfRrZEFbt^=*&#zyT8;qZmsoD5o9%3)&*@6r^ zm%bGXWtg1My4uO;e(bc1|6!wEj?}Bpzk&3b{9y3HGWM%}Q=yo}*OM1qdkZ`CPkW*O%DR3o%(%G_(6lTi~BdHmdw zjg=M}x*YEEBctv534OShxFd$-QFhG91mK3co1$-;JgM~o967uFLhrH;@SPz6OY&P` zr_AG;t`E5xE5Na@ltOWsJ?hN4)Rhp@VN;T8q1}S4+=8iguB9Lf|4}aewQu~e6tFKN zoYkbO*60LG(^0#$>IkX*6b5X2J&C`c86sp2at_%`-{AG|Q20|F=4knSURw^mWbvvz zR#uoU3d3Gie&Go)iXj?X$DUUzXOS&AI(Y5*y;;J$Iq9&gL7}sOBHrN$kWJ!3phqwv6>k} zay@rFY5dW+v!nd=>zH+E9`7C8>Z=W)}?i- zd;v3|?}A;cuAVvPhv+fq)dJdjD$9IfM+pYV^=bGypcaT7$$t`|JF@zEJzdHWM^gX z>vwzR5s>Bk=~F$pRddetz{x{!?!=O-|ny^XIrdD z2V@1aoNqLZ<`NW#H<+*9@NUDc$#j{E-$+Iek;fj6izKqU{`ox@mtp@Y{%wRL` z`oPTn8N)gHpf`d!JFi=6m&Uukv#8}xTxAj~k4Gs^#v_jt-^JS980*?D>Xp7`MVN8&@n+X(`cc64x)gt&rT ztexB(p_w1(p|Ytx1Sl@{v@t1Kn?YPm-K-rQ)FCd`7RXPjAxl>PC$s^j6c=~&(r4k~ z^wXGK3qV&`N3 zf5=@KCJj4u1E9GpzZb)9nAb_X7=v{ww6&X3a{A>c;~5#g z2n27yt~OOjgO%i(C}J0VHW*W0r^uTxs{5ju(-z)2C0J=o>_}p@9tgf5AX-d-FuZBd z$qLEa`kvp9BKUf<*Y>#2XzTmNeBYN=zRMTS=K<=kG}R=?Zw1{C8;JoFb9EE7c3my8 zZ$&z9e~ND7So+~|*`ynkb$DJN7b;H4zjZf6`fy{+!Zh#lW5#l)h%;uAlVw(OGux|P z!U%MfG}p6;>mrt2Hb6XUKA=CVnn${WvMI!|g)zeQi;Cl1urBS4ZZt1(Y{YUib4+Ne z=&+%GXZio4Gx? zj)sdnJUN4+>7Q*77cx-W+T6{`Ri6jyINVUXaA#VCWXl50t0r7G({3ri} z5Ksf+(6f|uM zlz>VMozfNXTj;-DKb-#%P#M7Sw+t|d>$hzG_bK4_nVK3}hPQ`1dMZ4EZl2IT&jvSH z&xW9nAppi4O8Kb#pdfnW*CEj%h>Y(L8R<;`@cHx~yxTp0p4?+xJld@y>_5cWjyBE; zRojxW!dxwP`*nj&`C+=KzMK7wJ@?8nH1Gxu2f@4xLL~@}ZE&Mtn4iG_LC1j~#vV;p^V6MRt~AjvP#y%j1EWc96Oe>Y zI!&G5bYebw%Co3(=P z^oDtP3WF+hcj>pu2cyA_aW?fh1GoPye-mchbEiF1p~05##4< zqOdrk&rv&`-F5m?kbRNl(3I{-81)mq>n+|<9tpAaLj9Is%_mh}BEiL@G8URg2^Vu*f1zhM!SL3?Q-x zj{sZ;u<9c000INg_~H1(SQD8x(Y?hZDI5pze?`I;0O;Y6Ln-rR>4~DlzZbyKBk{wQ zh|3hX(PIxqxD;%t3cJBFhDGO3se&|+QzdX!aW$ULh@GoGcY9_NqL&{tPV}U zkB`J&H|NC_Mz-wwb`0XhU=32~DqA=Ef?6F^xvuwx%pr()-QtSU52+2+vrBv3=nFYn zkYi`}F`^*yYGnU9(iKP$O(fKEJ?+@m3o`%#*v)h-bH#Co`+)A)wRnu)f^tM<0*4$d zwT4LzM;h|1Gt5NF3GfB81@T!JTS!Ers4Rs!CNd%6POV_jY z*G^(zs9IhMBL+&oq{P7tel6WYfrTma()u;3BpxMxQY5`74#g-y9y9edQID?V^Fqvt z5Gx5c06(TSrvK~x*IBPAdJxTUPGC+DPY6J9UJ>aME#l0SD-alfONSQ~zB+klu0h0z zofyV)o!&H`22};_Ong&FQ*=`VktkZhVg6wOSs_`Gg=+mf?RQesST--t zkH! zuxCm4xZ#)YMJeS*r{ci_prnNj3E$2+81xuUz<>?QVaYL zGY|Pmf!4c7H0C;u;5UlzZBmZ+AnmzW*MF3}r8ZbNS4Z^IXR zySAP0VP-k}#VtK5>qMoN=XHI#jGftUe_5u$?gP zE`cthbtco~<48nIkR(=8@PPCt4Kg;(YZpn}LcEDYE9H+g{Fp+o1A1PX;edkErAKJD zu~jgKqdxVV_Go>_H3K>a@rt*oWK#=MwNbXwRAbFWW%YAv_rKPr=qT18O-+-iCikt_Z_-+guRMNQ)`rSGsX7T>uBbS&*m@FWlJdv% z9~?grtRqpAK<4ZjQ6pm8bW;P9<}`J*--7I#w+=kpWOHG@M&wJ*mSLq#OR^or zAF$tKbrJDS6qFB;%%y0jZl|ev)BdJmpc#icT(m%Kp1uVKGa5%KsZea9Ed7-o!zd0= z9)>0xOGd81{M9dpGKSO?A9;?F&`Alx{8-gK1{HeO6saMEA^aiQEg)$Kx{3@mK)z9e zU65R=UN}|Ekzb!*U*=kJT7Xg-Q>aj=P$nR)EvYT7Ei)(U8Fk3GjMjwNgy5#`=IR#X z#^ko|rtj8#M(;x@L?wjK3e!r^O47>M%G!$5iq?wViti8bhx3Q^C-P^#m%CBB(Yukp zQ8?57fv_vH+yA5Nhw*rOiE+_t{-e|j4dzsFNWm{vsds7`G!=L=uWh7+B+v72)Vs@1 zra5iUKPBqPzc1ldTPzov&YEtXmYFV_)}E%>quaySBZr539t_<=#e&4L#d5`RI)gex zI)giNIuj^jDdH(oDIyCJ3sMT=3!(}_3)0r{y6L;&x?Lk)L|~xqqw1sH#u7*)Q=w1+ zi|C6`i;#<0ix`SnikONpz=&XGFfy1MOaW#9vw*R|2w?IJzy`qvxKFo_C$=(nl^~Eq zOwo&$5gQ>HCK)anHkme=A{jRsI~ia7r8cEn($Yh-d{XoP>Hbi`w%dZcZ{bmU}2CL;lqXI5?QVB!F78xrjB zOY;u7<~yZ4Wjy8CCdGq8NeWL2kC953%9IM1O6!g7&F#(U&7%pSNumj*$ty`Od0P^- z#XijS*7Pj`^AvN9&RgBK4|esiS|m)VsiB&Qn$emWnvpXpGx0MyGkIJgTuEHX=Ww=2 zwk)xWj0+m6qjpS$Bt=xRCYPwQf8hwE+Yh-()=G}qnMMb^{R zS=Qy503jm7D_twS!X3gx!h^#75ApXLx3ssy2=Sr8p)nF+5)l${-9g>a-5K4HRIyaK zR5?_6MPWtpMd3v;;2?0y2B;q;o+chhvY7pnE;u!7g^>0oEEd?yeckD+av&VSx`Q~^7T@Ia18{ZjZ8&De58SEKE7;qa@8w40684MbH zGmtV!HP~t7|5EL|yzSF-p1sd9M^~-n&=I#|@qK$8Z$oQs_B;31=bej9^zZRo${VB5 z`(^cp!`9(;Td#kDZ>DdqZ}_dir7;;QMrvfTOqfi(OlW_4e|&!qT_RlsUFr_63P^=i z1x1Btj3xJ0a7wVoTgyA(_~(h4H=$Rfw{;70%R-Aq3wn!D3q{LG3t`KPmaP`FmV=f# zFBva7uN|*6uQ~6)Q;KVPzn^~Me(ZjGe$sxQ{SN(t{Xl*)*T1giulKGOu6(W|uE|cm z{%YO0?YfL1A5>Yl+`<2zwm!GUv)!<9xrwozu%WU8Tm$Ynt{tr%?X-74ra#g=B0MOlK(lsF~2B3!6eM2&ZK&cuv4#7tCOsgqBAn2K13m8KSV&>ilX9W!%HRESF+jz zY6DUO#`N;^n)E8~Glx|@PceNBPus_T;L zBESfij;noZzwA$r71lqb15k(ktn}WmPy)3>Ph-Y zMo5uGwY>BO{!%j6#sLm7GXfY{Dp+kO_W(XRoCch_XX#e8ySft!g>#Ze(aKS6c!t=^ zg`J$paiO&G>iIt=nXRiois~`5glfrF7IIBBSxbDB`N+56yTG%MOCT1jsi>-`rKm8X zI3hQq!mP)v^fOE+T&GAUZJwppxmLFpQtN2u``ybK&soe_*O|rH)!EWH@Py#R?L_>9 z@r3ro;e_Hu<~y)Q5cL2S305IiG?ow6Ec9!w6AKCJ8&(<3sJy|O-86#K^tU=G zSCq9>QAy@0o5?9}rxQ6--BQkyW8Vrg>H>6WzA<}i{`$OGyva1kHORtHjMIwKkP?n%tyZa~KZ$7VW9?v#WX)v_{vk0bJo#!;rUa$9 zvRJ>^w^*jQtk_r~@XZ5D=v&j2^|Yk76DgoH=Om<*sN}&nd@Qb7rH0#0BSIOL>UF6AnQQD+vTM0APVbV0F7^Jsm(q(953TGsJHKbjkYNNHM z*;L6{bf#r3VQrjIHB!P{P*rVL@w1BLU3PwUr9p+CSfl>+=U*wm3V${Isz!blE+M5T zrP(XlD@P+rV?-lUBDvKfpEhKl%8)9R>Yi$-S;!b!@uecOqP(KsCf}xT4>ZF*BRxYl zBTz9_;i%bO{){V{C?PW?voy0Lvsv$*Ui1ReC!0?(HupA{GZr&i)1Rj&_7wNT_mKDC z_b&F@_CR~OdzRD0(*iS*GiB5GGwl^e)`?tSxTv`}xtO_@xVkvixR5z*I9aWi#tG)U z%1>TxG|(z1DNt#q2ZU9}y({TWX>MoK|aL3dbZSdK}R z>DB%ht#GZFS+4iKdi8otdW#DL_IUP~_7HopJ>7xm0ri34f&78iLBPT70`TM9$H4tH zw|Tc7w`sQ#Hv@NZcQrR;Hyif_cZWU2-Im$w!um3W*{#{s*_qi-vqDwGC0XUZ(w8j` z4Tq%5gv*fSJRf=?S|L**8X>R{T`QoKy4A1Mrj@((S!<-vsgLoQ$NAum;LYR>|IO!{ zl^f8_x0{lixSQ4+@J;;<(@i_76Z}v3@9=x@c<^NKnD9T~%g~5Wo6w|DEl}U0aidK{ zFh>+dR76-rI7FC6SVu@joJZtDG@zBCrJ)8A6~yYqO2%o#jl?d+cEwJ{Dih{#?6DJp zGC_LW3+#Oa^+Z##GjS}jLa|h_9I;~rc|3<8GOhrJ7Mr_8IL9t)O6x*%CwAD$DQClw-gLxi`NzrB@&(@2!;rw}P{Rl7g**Bh-`z!^p!iZ+TegSXOlc z8pQQ#btbB&-!s2&sWPi_sTz9Es)wedq{E`;-yp5Wp+8kM^ZtDmhJL@!j-IwogYN3b z+>Zga_ucUJ-8NJXMb=>O1{mAXb*XfrwA*69V!`5@#Uh^Nur-r?rR}~6u~n9xzLmb? zBE)yjW7gyAMGa|h{Hk*2==|sm>k?~s^?dbU_3B~4a>z1|Z?RCl&{w1Gm4%gl;T7Sr zw!VkDd;8n63#JPJbZP`61aAZ?1pG+0NViC{NZQDN$VhY)^i_-k^jb13kz)6Oi^Ru* zVu=@#qhx#hoIX~E<7E=Q@ftD|GPp9XGQfU2Me@FElDN);ildbzgm(eQSv^5IPvTUGT$n zfBne0r)XfKMJBIbB0(yFJXc3WS%pyLxeET6(-`xZ%NX|<#Telj?pV8#n~}It1@smv zY!qem!f2<(r-jU`_2MRgz(0NC!Nu9ynYUfD<#SMDt zleMXiPaXGwK0qztAYVd^LX1#sODrwK0YM1kh6EL%9!>#%A3*>?25tvV`-L$Q8Cn2t z3uXxVI%*OM2x7-4Fw#AR}D` ze+AQ?(X{yVn3XtZ;79q7?rx%PHg0lm`e#mOc4t1Q>=9JbtbNpdOtci+=8E8vxO6&D zCFr(xq!z38rnb6vqSm|iyw<-KyLPx%!1>I%(0Rc*-g(`b&zZ}4*%{;eN{@Yi@<#Xu z*9Pf#iH-X2mEQ$6j(UMTjx>o_aM;t>Ke5EI*s&S0tErb1bY$lhscDgEE9e$zBB(8B zwP-zPR}>6nm*gpFkZ7!_`KZHbAvBtF?leNOKFMcH8E=^1RHgn*CrQfAGDurWzN7bM zxE{ldpfZy=i~m9vCHy9^cwP1}shp~k0T50j1ub~!Wul=dp)cWsux)t{`7ko3GHE`2 z(TQ-ed}97Y<<99Yd~QhuQeso0`bxpV%tH5``#qgK<$?dg;j7Z3H!^4mJ|uA>KEJG{ zCOV785~|CJrW&VGr`}IJpPHItniQBiDr=W}%n{2I8y^4a^!4>1Z;~?w7_ed8x1(rI zt6geVs%ar=;bS3XVYz)j(vZ`rptcTW0Pg zr@hgB>&bUyw35}5;}_u<+-XdAfcn3pd=;UWGQ)*lsyU8Zax2y!ud z=&^WKBKlFZrsF-J4f=%r$)tEqv(tj2QT3y0&G>t^wqp%b+jZG>zx8doA-M&)*5pGv zY~@^G?~~1(14qDQ0u%KV<;fGLOZUL%5W zgGC)Epn<39QCL-O_vI(#n+0EqLDkZSrOG>5Z(VQ8Yp?5DWYx}B1v;kcn0n4tg#~8$ z-@SVqfnAZ8F}h|JdgP{~j$<7dWj#{G)Kilquy&F7k9m2H~Xl~NVw6+)HC zl?s(L8VTC%HP5)RxrVqrxYoJ6x#GAaxPZ3Ywm)oXY~pPFZ4+j-W-exxX96m^r%&dr zXB($`XL72WHF&fxs~s+x7j?tyxh*Be$V7Y}ey)8>v*g+Tu}`z7wCBG+wQsw3yH~b9 zH?}^Clrtr?6Ttez;78-AE5_J+P5_Y?(KHbWu^CY`(E?E)(F*s^TuXr(uayhGtLwW{ zr5(q-K>vr!7o{k5_{=~!VXD^`9hSFKS?)U)PG zn~Nzc8dKA&XCJMLGUgR+x$Q0-gcl5!m*xbg?mmT&Q!bc|yL_Q}Us6$018Lf_Wh&78 zIFmoq%=P|L)2ABu((`?(?@|j=-7&)W2}V_o^MNNndL8XIxof#MxZ6L+es20qY+r9L z!a8C2rp4TY;aKgW#5K*q(8t@lIj7dJbh%Eb_}2YzS+R^Cq63XSTGnU6kaFtL*a}ULK7dI_{G11uj4C@GrWj?#It< zRO1^TwFrCP52JOVEud`>KjU$6$ZiMuflvAlVfup>f=)%dM89>+1k*hy<{9Lnam-vr zZOrhjzuw;cTB3i2-jl&X3R68`lpO6&r9J7FXjN{kJ`K4*DDBWMrwZsJ5vn zsIIBrQ(a06FNrNs9s!7X`b<{S}vR-n)7RlwRk z_%iD#VI}Yp=3MkRxbt~C!Z!?7!7rz4%e;c4BGm$iLb-e^Y{G&M+Z0{{x2ih@bKPf= z=NOS>tinryJ*Tp_NjpVF38TV9ejksm97BA?ZM$5*@AiC)>Wm7HYL&~CJCLgxJR7_j zEEwz^yzg&Ga3>=V$UeDR+E`3jRTfkCQ}n$`iGQX8r$Vxr*_MFRhsig1@Et8 zPcpu9^jk)aG3K6&_@3D9@ZGwe?;MUz<>vVdpZPiCjr{t$_w{CUdxZOI{MYIc(;X{U z3)ceIa92{-WLIP3@zz9t(@V+=sUI;b_+!R*K7$u(2PKQc(`lB*=&gRI$UeP~Lu;vz zmEamZVI_-sY=Yv|Hjho?OStRnC^i)9rixGyRDM3&-x2TtDBfgX9L0 z6;P{sz|OD3f#wCz(4!URPK6K!X$3I_y0;5yv}xRL%b^eY@xyY%Gs8S-_$v}eFII@# zF#Pf!A0}_3E{VzCk>d#S2FO0Bp03YE_TqX;IrUo6c$JuxfFU+pw~QMVzbb}phW5VA ztk0BiO=s?Ae$%^JDF1Z$smykf{F|_i-`QRIb?wQ*7SyeOc6KW=1-pUIq0gGrJd2LbtBmbve)?DF{`hwezl6?wCoZdx z#@ZflI!`2SBgYb!$ngUOZ)#7H*UKhV=JKpXhyzyc8=K_jQ&v7MD$Z9ew0yd@Kxw3@ zgS6o`YPQTYoSWHtW;rgJ2v{{cHM{y+_}=381K%49x5wYx+;Uy-FFKa4R$QM`X1lk`0gJq8cws? z&$;keH27Wi9d?nmK*OAE_>J04bL~9s7|>g%(q;E$%;oKc@xjsK?NN^y#e=s&`il;V z7cenMFclTgUZTSUeSZjqVZ#?${htbHo@y(eYC?E9dH(@I2R&7T{Iya<#of)x-A(Ry z-3GMCLq1_J(Z0$R~wZv9k?C1(yT?69`5hPcqXLTf$%a#9l9AU)^?j}S@`6pv1a|_caiT}de)Y{tqH}|J=KnGW#wfS$6pbY<@=7(0>xCwIz0{?ml zo$v4G!Hy0VjxP454q%9|_}|I?+WRCO@JXD%i1+_OI4;s?Qh~h>G&m} zuaAJ%7HT*;+6imBSVLbhAqf1h{l69Y_fCi;^!*dU>}>38EFcgIh*tx|#m~;h&&kWg z2I6OfuH(N@{x2)}L)lP&g8Cou?+*B+@&7Zn|19MHZ+85TMgC)vf0G9O<68eF#s14| z|6`GVS#|%Wf&a0{ze%zGGTZ-Hn-u#m zv;9BEBL9e=LGg@2l%7w~vHvMz_!pq23KTQv4Gmx+16A!z!4P|B@DC_%=Loj_E9&fT z5yrov&i(+8N!dZ{p|DCQl+4t@+|S$s1FAXNL6l5?1K<8!22d3WB>i*pzc-$=0aVccy}W;(^f#kFCslU0chyH`1Ly<( zCY|A50;B&mBKw5hb4C7dfIVd3-w}Jz$m-uAd(c^d3fAVX`hRHl=}^Ma{Rueumnqo) zE2vTe8h5pGw1lo4I@ccvL1dtc6U0Fb{1oif|7|4zP)An};NWg&_s2grDDQt(-QO*s zXzE}|4{>18RA+$7Y3gDt2MtU@-QuYUG5-@D2%X_C!Dd(e-?3;lL+HBx2m}K_zj3#J zmHRJn$v;DSg3?mb(rj#8JZ#V^CpI>2KIonkx`)CtdDx-G_*)?IPdZMCKWMn2w7==0 zbez!f+}wYDpyRnYpdV;Il!T1~%9mXVx@Uv(`aLC79Z#V_dP!(Fkev+#0`Y>l*x7!^ z1?kw>=otPYu9pkM0%~S<4rC~&KQ{n37Z(Q?zyk1R9}gD?)ZkAWz~TQY+=_7iIzwS=we$U&?!S+P!s`D6h(o=UGn;zf@Ux^bR~{QiN51tfzOz8%?u_+(c;KQ zhSD}T@LJ)eXGF>TK0gtN`{OwrawtjqNr zu=Ty=!&Y~Iu{Jf(7qWchg&`<|%RL{t1SLLk>*XB}8ke?Pd;Gj%M2wgYKkM)?7506b z)Cc-o(|s#&`Rh0lg~C4S#JPxi2#hpVu65C6obhw`Ur2K@=FM_GeSBSizlWDuDCw-V kPGfa=d?`gW`dlS1xI->&7F}1o7(8T1_&&7T@9RN#cXxA{fdBvi diff --git a/src/paperless/tests/test_checks.py b/src/paperless/tests/test_checks.py index 3572f02a4..87e64a90e 100644 --- a/src/paperless/tests/test_checks.py +++ b/src/paperless/tests/test_checks.py @@ -5,6 +5,7 @@ from pathlib import Path from unittest import mock import pytest +from django.core.checks import ERROR from django.core.checks import Error from django.core.checks import Warning from pytest_django.fixtures import SettingsWrapper @@ -12,7 +13,9 @@ from pytest_mock import MockerFixture from paperless.checks import audit_log_check from paperless.checks import binaries_check +from paperless.checks import check_default_language_available from paperless.checks import check_deprecated_db_settings +from paperless.checks import check_remote_parser_configured from paperless.checks import check_v3_minimum_upgrade_version from paperless.checks import debug_mode_check from paperless.checks import paths_check @@ -626,3 +629,116 @@ class TestV3MinimumUpgradeVersionCheck: conn.introspection.table_names.side_effect = OperationalError("DB unavailable") mocker.patch.dict("paperless.checks.connections", {"default": conn}) assert check_v3_minimum_upgrade_version(None) == [] + + +class TestRemoteParserChecks: + def test_no_engine(self, settings: SettingsWrapper) -> None: + settings.REMOTE_OCR_ENGINE = None + msgs = check_remote_parser_configured(None) + + assert len(msgs) == 0 + + def test_azure_no_endpoint(self, settings: SettingsWrapper) -> None: + + settings.REMOTE_OCR_ENGINE = "azureai" + settings.REMOTE_OCR_API_KEY = "somekey" + settings.REMOTE_OCR_ENDPOINT = None + + msgs = check_remote_parser_configured(None) + + assert len(msgs) == 1 + + msg = msgs[0] + + assert ( + "Azure AI remote parser requires endpoint and API key to be configured." + in msg.msg + ) + + +class TestTesseractChecks: + def test_default_language(self) -> None: + check_default_language_available(None) + + def test_no_language(self, settings: SettingsWrapper) -> None: + + settings.OCR_LANGUAGE = "" + + msgs = check_default_language_available(None) + + assert len(msgs) == 1 + msg = msgs[0] + + assert ( + "No OCR language has been specified with PAPERLESS_OCR_LANGUAGE" in msg.msg + ) + + def test_invalid_language( + self, + settings: SettingsWrapper, + mocker: MockerFixture, + ) -> None: + + settings.OCR_LANGUAGE = "ita" + + tesser_lang_mock = mocker.patch("paperless.checks.get_tesseract_langs") + tesser_lang_mock.return_value = ["deu", "eng"] + + msgs = check_default_language_available(None) + + assert len(msgs) == 1 + msg = msgs[0] + + assert msg.level == ERROR + assert "The selected ocr language ita is not installed" in msg.msg + + def test_multi_part_language( + self, + settings: SettingsWrapper, + mocker: MockerFixture, + ) -> None: + """ + GIVEN: + - An OCR language which is multi part (ie chi-sim) + - The language is correctly formatted + WHEN: + - Installed packages are checked + THEN: + - No errors are reported + """ + + settings.OCR_LANGUAGE = "chi_sim" + + tesser_lang_mock = mocker.patch("paperless.checks.get_tesseract_langs") + tesser_lang_mock.return_value = ["chi_sim", "eng"] + + msgs = check_default_language_available(None) + + assert len(msgs) == 0 + + def test_multi_part_language_bad_format( + self, + settings: SettingsWrapper, + mocker: MockerFixture, + ) -> None: + """ + GIVEN: + - An OCR language which is multi part (ie chi-sim) + - The language is correctly NOT formatted + WHEN: + - Installed packages are checked + THEN: + - No errors are reported + """ + settings.OCR_LANGUAGE = "chi-sim" + + tesser_lang_mock = mocker.patch("paperless.checks.get_tesseract_langs") + tesser_lang_mock.return_value = ["chi_sim", "eng"] + + msgs = check_default_language_available(None) + + assert len(msgs) == 1 + msg = msgs[0] + + assert msg.level == ERROR + assert "The selected ocr language chi-sim is not installed" in msg.msg diff --git a/src/paperless_mail/apps.py b/src/paperless_mail/apps.py index dd3e71f82..1c5d656e0 100644 --- a/src/paperless_mail/apps.py +++ b/src/paperless_mail/apps.py @@ -1,18 +1,8 @@ from django.apps import AppConfig -from django.conf import settings from django.utils.translation import gettext_lazy as _ -from paperless_mail.signals import mail_consumer_declaration - class PaperlessMailConfig(AppConfig): name = "paperless_mail" verbose_name = _("Paperless mail") - - def ready(self) -> None: - from documents.signals import document_consumer_declaration - - if settings.TIKA_ENABLED: - document_consumer_declaration.connect(mail_consumer_declaration) - AppConfig.ready(self) diff --git a/src/paperless_mail/signals.py b/src/paperless_mail/signals.py deleted file mode 100644 index 8fe046393..000000000 --- a/src/paperless_mail/signals.py +++ /dev/null @@ -1,19 +0,0 @@ -def get_parser(*args, **kwargs): - from paperless.parsers.mail import MailDocumentParser - - # MailDocumentParser accepts no constructor args in the new-style protocol. - # Pop legacy args that arrive from the signal-based consumer path. - # Phase 4 will replace this signal path with the ParserRegistry. - kwargs.pop("logging_group", None) - kwargs.pop("progress_callback", None) - return MailDocumentParser() - - -def mail_consumer_declaration(sender, **kwargs): - return { - "parser": get_parser, - "weight": 20, - "mime_types": { - "message/rfc822": ".eml", - }, - } diff --git a/src/paperless_remote/__init__.py b/src/paperless_remote/__init__.py deleted file mode 100644 index 5380ea5ac..000000000 --- a/src/paperless_remote/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# this is here so that django finds the checks. -from paperless_remote.checks import check_remote_parser_configured - -__all__ = ["check_remote_parser_configured"] diff --git a/src/paperless_remote/apps.py b/src/paperless_remote/apps.py deleted file mode 100644 index 1997b0ae9..000000000 --- a/src/paperless_remote/apps.py +++ /dev/null @@ -1,14 +0,0 @@ -from django.apps import AppConfig - -from paperless_remote.signals import remote_consumer_declaration - - -class PaperlessRemoteParserConfig(AppConfig): - name = "paperless_remote" - - def ready(self) -> None: - from documents.signals import document_consumer_declaration - - document_consumer_declaration.connect(remote_consumer_declaration) - - AppConfig.ready(self) diff --git a/src/paperless_remote/checks.py b/src/paperless_remote/checks.py deleted file mode 100644 index b9abb0592..000000000 --- a/src/paperless_remote/checks.py +++ /dev/null @@ -1,17 +0,0 @@ -from django.conf import settings -from django.core.checks import Error -from django.core.checks import register - - -@register() -def check_remote_parser_configured(app_configs, **kwargs): - if settings.REMOTE_OCR_ENGINE == "azureai" and not ( - settings.REMOTE_OCR_ENDPOINT and settings.REMOTE_OCR_API_KEY - ): - return [ - Error( - "Azure AI remote parser requires endpoint and API key to be configured.", - ), - ] - - return [] diff --git a/src/paperless_remote/signals.py b/src/paperless_remote/signals.py deleted file mode 100644 index 2300be760..000000000 --- a/src/paperless_remote/signals.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import annotations - -from typing import Any - - -def get_parser(*args: Any, **kwargs: Any) -> Any: - from paperless.parsers.remote import RemoteDocumentParser - - # The new RemoteDocumentParser does not accept the progress_callback - # kwarg injected by the old signal-based consumer. logging_group is - # forwarded as a positional arg. - # Phase 4 will replace this signal path with the new ParserRegistry. - kwargs.pop("progress_callback", None) - return RemoteDocumentParser(*args, **kwargs) - - -def get_supported_mime_types() -> dict[str, str]: - from django.conf import settings - - from paperless.parsers.remote import RemoteDocumentParser - from paperless.parsers.remote import RemoteEngineConfig - - config = RemoteEngineConfig( - engine=settings.REMOTE_OCR_ENGINE, - api_key=settings.REMOTE_OCR_API_KEY, - endpoint=settings.REMOTE_OCR_ENDPOINT, - ) - if not config.engine_is_valid(): - return {} - return RemoteDocumentParser.supported_mime_types() - - -def remote_consumer_declaration(sender: Any, **kwargs: Any) -> dict[str, Any]: - return { - "parser": get_parser, - "weight": 5, - "mime_types": get_supported_mime_types(), - } diff --git a/src/paperless_remote/tests/__init__.py b/src/paperless_remote/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/paperless_remote/tests/test_checks.py b/src/paperless_remote/tests/test_checks.py deleted file mode 100644 index 0512fb257..000000000 --- a/src/paperless_remote/tests/test_checks.py +++ /dev/null @@ -1,24 +0,0 @@ -from unittest import TestCase - -from django.test import override_settings - -from paperless_remote import check_remote_parser_configured - - -class TestChecks(TestCase): - @override_settings(REMOTE_OCR_ENGINE=None) - def test_no_engine(self) -> None: - msgs = check_remote_parser_configured(None) - self.assertEqual(len(msgs), 0) - - @override_settings(REMOTE_OCR_ENGINE="azureai") - @override_settings(REMOTE_OCR_API_KEY="somekey") - @override_settings(REMOTE_OCR_ENDPOINT=None) - def test_azure_no_endpoint(self) -> None: - msgs = check_remote_parser_configured(None) - self.assertEqual(len(msgs), 1) - self.assertTrue( - msgs[0].msg.startswith( - "Azure AI remote parser requires endpoint and API key to be configured.", - ), - ) diff --git a/src/paperless_tesseract/__init__.py b/src/paperless_tesseract/__init__.py deleted file mode 100644 index cc0b886aa..000000000 --- a/src/paperless_tesseract/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# this is here so that django finds the checks. -from paperless_tesseract.checks import check_default_language_available -from paperless_tesseract.checks import get_tesseract_langs - -__all__ = ["check_default_language_available", "get_tesseract_langs"] diff --git a/src/paperless_tesseract/apps.py b/src/paperless_tesseract/apps.py deleted file mode 100644 index 8ade88400..000000000 --- a/src/paperless_tesseract/apps.py +++ /dev/null @@ -1,14 +0,0 @@ -from django.apps import AppConfig - -from paperless_tesseract.signals import tesseract_consumer_declaration - - -class PaperlessTesseractConfig(AppConfig): - name = "paperless_tesseract" - - def ready(self) -> None: - from documents.signals import document_consumer_declaration - - document_consumer_declaration.connect(tesseract_consumer_declaration) - - AppConfig.ready(self) diff --git a/src/paperless_tesseract/checks.py b/src/paperless_tesseract/checks.py deleted file mode 100644 index 0d7a1d90d..000000000 --- a/src/paperless_tesseract/checks.py +++ /dev/null @@ -1,52 +0,0 @@ -import shutil -import subprocess - -from django.conf import settings -from django.core.checks import Error -from django.core.checks import Warning -from django.core.checks import register - - -def get_tesseract_langs(): - proc = subprocess.run( - [shutil.which("tesseract"), "--list-langs"], - capture_output=True, - ) - - # Decode bytes to string, split on newlines, trim out the header - proc_lines = proc.stdout.decode("utf8", errors="ignore").strip().split("\n")[1:] - - return [x.strip() for x in proc_lines] - - -@register() -def check_default_language_available(app_configs, **kwargs): - errs = [] - - if not settings.OCR_LANGUAGE: - errs.append( - Warning( - "No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. " - "This means that tesseract will fallback to english.", - ), - ) - return errs - - # binaries_check in paperless will check and report if this doesn't exist - # So skip trying to do anything here and let that handle missing binaries - if shutil.which("tesseract") is not None: - installed_langs = get_tesseract_langs() - - specified_langs = [x.strip() for x in settings.OCR_LANGUAGE.split("+")] - - for lang in specified_langs: - if lang not in installed_langs: - errs.append( - Error( - f"The selected ocr language {lang} is " - f"not installed. Paperless cannot OCR your documents " - f"without it. Please fix PAPERLESS_OCR_LANGUAGE.", - ), - ) - - return errs diff --git a/src/paperless_tesseract/signals.py b/src/paperless_tesseract/signals.py deleted file mode 100644 index d80d13614..000000000 --- a/src/paperless_tesseract/signals.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import annotations - -from typing import Any - - -def get_parser(*args: Any, **kwargs: Any) -> Any: - from paperless.parsers.tesseract import RasterisedDocumentParser - - # RasterisedDocumentParser accepts logging_group for constructor compatibility but - # does not store or use it (no legacy DocumentParser base class). - # progress_callback is also not used. Both may arrive as a positional arg - # (consumer) or a keyword arg (views); *args absorbs the positional form, - # kwargs.pop handles the keyword form. Phase 4 will replace this signal - # path with the new ParserRegistry so the shim can be removed at that point. - kwargs.pop("logging_group", None) - kwargs.pop("progress_callback", None) - return RasterisedDocumentParser(*args, **kwargs) - - -def tesseract_consumer_declaration(sender: Any, **kwargs: Any) -> dict[str, Any]: - return { - "parser": get_parser, - "weight": 0, - "mime_types": { - "application/pdf": ".pdf", - "image/jpeg": ".jpg", - "image/png": ".png", - "image/tiff": ".tif", - "image/gif": ".gif", - "image/bmp": ".bmp", - "image/webp": ".webp", - "image/heic": ".heic", - }, - } diff --git a/src/paperless_tesseract/tests/__init__.py b/src/paperless_tesseract/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/paperless_tesseract/tests/test_checks.py b/src/paperless_tesseract/tests/test_checks.py deleted file mode 100644 index ab3ba0c16..000000000 --- a/src/paperless_tesseract/tests/test_checks.py +++ /dev/null @@ -1,67 +0,0 @@ -from unittest import mock - -from django.core.checks import ERROR -from django.test import TestCase -from django.test import override_settings - -from paperless_tesseract import check_default_language_available - - -class TestChecks(TestCase): - def test_default_language(self) -> None: - check_default_language_available(None) - - @override_settings(OCR_LANGUAGE="") - def test_no_language(self) -> None: - msgs = check_default_language_available(None) - self.assertEqual(len(msgs), 1) - self.assertTrue( - msgs[0].msg.startswith( - "No OCR language has been specified with PAPERLESS_OCR_LANGUAGE", - ), - ) - - @override_settings(OCR_LANGUAGE="ita") - @mock.patch("paperless_tesseract.checks.get_tesseract_langs") - def test_invalid_language(self, m) -> None: - m.return_value = ["deu", "eng"] - msgs = check_default_language_available(None) - self.assertEqual(len(msgs), 1) - self.assertEqual(msgs[0].level, ERROR) - - @override_settings(OCR_LANGUAGE="chi_sim") - @mock.patch("paperless_tesseract.checks.get_tesseract_langs") - def test_multi_part_language(self, m) -> None: - """ - GIVEN: - - An OCR language which is multi part (ie chi-sim) - - The language is correctly formatted - WHEN: - - Installed packages are checked - THEN: - - No errors are reported - """ - m.return_value = ["chi_sim", "eng"] - - msgs = check_default_language_available(None) - - self.assertEqual(len(msgs), 0) - - @override_settings(OCR_LANGUAGE="chi-sim") - @mock.patch("paperless_tesseract.checks.get_tesseract_langs") - def test_multi_part_language_bad_format(self, m) -> None: - """ - GIVEN: - - An OCR language which is multi part (ie chi-sim) - - The language is correctly NOT formatted - WHEN: - - Installed packages are checked - THEN: - - No errors are reported - """ - m.return_value = ["chi_sim", "eng"] - - msgs = check_default_language_available(None) - - self.assertEqual(len(msgs), 1) - self.assertEqual(msgs[0].level, ERROR) diff --git a/src/paperless_text/__init__.py b/src/paperless_text/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/paperless_text/apps.py b/src/paperless_text/apps.py deleted file mode 100644 index 619d71886..000000000 --- a/src/paperless_text/apps.py +++ /dev/null @@ -1,14 +0,0 @@ -from django.apps import AppConfig - -from paperless_text.signals import text_consumer_declaration - - -class PaperlessTextConfig(AppConfig): - name = "paperless_text" - - def ready(self) -> None: - from documents.signals import document_consumer_declaration - - document_consumer_declaration.connect(text_consumer_declaration) - - AppConfig.ready(self) diff --git a/src/paperless_text/signals.py b/src/paperless_text/signals.py deleted file mode 100644 index 916f0a7c0..000000000 --- a/src/paperless_text/signals.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import annotations - -from typing import Any - - -def get_parser(*args: Any, **kwargs: Any) -> Any: - from paperless.parsers.text import TextDocumentParser - - # TextDocumentParser accepts logging_group for constructor compatibility but - # does not store or use it (no legacy DocumentParser base class). - # progress_callback is also not used. Both may arrive as a positional arg - # (consumer) or a keyword arg (views); *args absorbs the positional form, - # kwargs.pop handles the keyword form. Phase 4 will replace this signal - # path with the new ParserRegistry so the shim can be removed at that point. - kwargs.pop("logging_group", None) - kwargs.pop("progress_callback", None) - return TextDocumentParser(*args, **kwargs) - - -def text_consumer_declaration(sender: Any, **kwargs: Any) -> dict[str, Any]: - return { - "parser": get_parser, - "weight": 10, - "mime_types": { - "text/plain": ".txt", - "text/csv": ".csv", - "application/csv": ".csv", - }, - } diff --git a/src/paperless_text/tests/__init__.py b/src/paperless_text/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/paperless_tika/__init__.py b/src/paperless_tika/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/paperless_tika/apps.py b/src/paperless_tika/apps.py deleted file mode 100644 index 714a05188..000000000 --- a/src/paperless_tika/apps.py +++ /dev/null @@ -1,15 +0,0 @@ -from django.apps import AppConfig -from django.conf import settings - -from paperless_tika.signals import tika_consumer_declaration - - -class PaperlessTikaConfig(AppConfig): - name = "paperless_tika" - - def ready(self) -> None: - from documents.signals import document_consumer_declaration - - if settings.TIKA_ENABLED: - document_consumer_declaration.connect(tika_consumer_declaration) - AppConfig.ready(self) diff --git a/src/paperless_tika/signals.py b/src/paperless_tika/signals.py deleted file mode 100644 index f1fd17ef6..000000000 --- a/src/paperless_tika/signals.py +++ /dev/null @@ -1,33 +0,0 @@ -def get_parser(*args, **kwargs): - from paperless.parsers.tika import TikaDocumentParser - - # TikaDocumentParser accepts logging_group for constructor compatibility but - # does not store or use it (no legacy DocumentParser base class). - # progress_callback is also not used. Both may arrive as a positional arg - # (consumer) or a keyword arg (views); *args absorbs the positional form, - # kwargs.pop handles the keyword form. Phase 4 will replace this signal - # path with the new ParserRegistry so the shim can be removed at that point. - kwargs.pop("logging_group", None) - kwargs.pop("progress_callback", None) - return TikaDocumentParser() - - -def tika_consumer_declaration(sender, **kwargs): - return { - "parser": get_parser, - "weight": 10, - "mime_types": { - "application/msword": ".doc", - "application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx", - "application/vnd.ms-excel": ".xls", - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx", - "application/vnd.ms-powerpoint": ".ppt", - "application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx", - "application/vnd.openxmlformats-officedocument.presentationml.slideshow": ".ppsx", - "application/vnd.oasis.opendocument.presentation": ".odp", - "application/vnd.oasis.opendocument.spreadsheet": ".ods", - "application/vnd.oasis.opendocument.text": ".odt", - "application/vnd.oasis.opendocument.graphics": ".odg", - "text/rtf": ".rtf", - }, - }