mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2026-04-06 08:08:51 +00:00
Compare commits
12 Commits
feature-ar
...
feature-se
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b5b6fdad5 | ||
|
|
d98dbd50f4 | ||
|
|
7649e4a6b1 | ||
|
|
610ba27891 | ||
|
|
7c50e0077c | ||
|
|
288740ea62 | ||
|
|
d998d3fbaf | ||
|
|
6cf01dd383 | ||
|
|
0d915c58a4 | ||
|
|
46008d2da7 | ||
|
|
6768c1e6f8 | ||
|
|
5a94291b79 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -111,4 +111,3 @@ celerybeat-schedule*
|
||||
|
||||
# ignore pnpm package store folder created when setting up the devcontainer
|
||||
.pnpm-store/
|
||||
.worktrees
|
||||
|
||||
@@ -821,14 +821,11 @@ parsing documents.
|
||||
|
||||
#### [`PAPERLESS_OCR_MODE=<mode>`](#PAPERLESS_OCR_MODE) {#PAPERLESS_OCR_MODE}
|
||||
|
||||
: Tell paperless when and how to perform ocr on your documents. Four
|
||||
: Tell paperless when and how to perform ocr on your documents. Three
|
||||
modes are available:
|
||||
|
||||
- `auto` (default): Paperless detects whether a document already
|
||||
has embedded text via pdftotext. If sufficient text is found,
|
||||
OCR is skipped for that document (`--skip-text`). If no text is
|
||||
present, OCR runs normally. This is the safest option for mixed
|
||||
document collections.
|
||||
- `skip`: Paperless skips all pages and will perform ocr only on
|
||||
pages where no text is present. This is the safest option.
|
||||
|
||||
- `redo`: Paperless will OCR all pages of your documents and
|
||||
attempt to replace any existing text layers with new text. This
|
||||
@@ -846,59 +843,24 @@ modes are available:
|
||||
significantly larger and text won't appear as sharp when zoomed
|
||||
in.
|
||||
|
||||
- `off`: Paperless never invokes the OCR engine. For PDFs, text
|
||||
is extracted via pdftotext only. For image documents, text will
|
||||
be empty. Archive file generation still works via format
|
||||
conversion (no Tesseract or Ghostscript required).
|
||||
The default is `skip`, which only performs OCR when necessary and
|
||||
always creates archived documents.
|
||||
|
||||
The default is `auto`.
|
||||
|
||||
For the `skip`, `redo`, and `force` modes, read more about OCR
|
||||
behaviour in the [OCRmyPDF
|
||||
Read more about this in the [OCRmyPDF
|
||||
documentation](https://ocrmypdf.readthedocs.io/en/latest/advanced.html#when-ocr-is-skipped).
|
||||
|
||||
#### [`PAPERLESS_ARCHIVE_FILE_GENERATION=<mode>`](#PAPERLESS_ARCHIVE_FILE_GENERATION) {#PAPERLESS_ARCHIVE_FILE_GENERATION}
|
||||
#### [`PAPERLESS_OCR_SKIP_ARCHIVE_FILE=<mode>`](#PAPERLESS_OCR_SKIP_ARCHIVE_FILE) {#PAPERLESS_OCR_SKIP_ARCHIVE_FILE}
|
||||
|
||||
: Controls when paperless creates a PDF/A archive version of your
|
||||
documents. Archive files are stored alongside the original and are used
|
||||
for display in the web interface.
|
||||
: Specify when you would like paperless to skip creating an archived
|
||||
version of your documents. This is useful if you don't want to have two
|
||||
almost-identical versions of your documents in the media folder.
|
||||
|
||||
- `auto` (default): Produce archives for scanned or image-based
|
||||
documents. Skip archive generation for born-digital PDFs that
|
||||
already contain embedded text. This is the recommended setting
|
||||
for mixed document collections.
|
||||
- `always`: Always produce a PDF/A archive when the parser
|
||||
supports it, regardless of whether the document already has
|
||||
text.
|
||||
- `never`: Never produce an archive. Only the original file is
|
||||
stored. Saves disk space but the web viewer will display the
|
||||
original file directly.
|
||||
- `never`: Never skip creating an archived version.
|
||||
- `with_text`: Skip creating an archived version for documents
|
||||
that already have embedded text.
|
||||
- `always`: Always skip creating an archived version.
|
||||
|
||||
**Behaviour by file type and mode** (`auto` column shows the default):
|
||||
|
||||
| Document type | `never` | `auto` (default) | `always` |
|
||||
| -------------------------- | ------- | -------------------------- | -------- |
|
||||
| Scanned image (TIFF, JPEG) | No | **Yes** | Yes |
|
||||
| Image-based PDF | No | **Yes** (short/no text, untagged) | Yes |
|
||||
| Born-digital PDF | No | No (tagged or has embedded text) | Yes |
|
||||
| Plain text, email, HTML | No | No | No |
|
||||
| DOCX / ODT (via Tika) | Yes\* | Yes\* | Yes\* |
|
||||
|
||||
\* Tika always produces a PDF rendition for display; this counts as
|
||||
the archive regardless of the setting.
|
||||
|
||||
!!! note
|
||||
|
||||
This setting applies to the built-in Tesseract parser. Parsers
|
||||
that must always convert documents to PDF for display (e.g. DOCX,
|
||||
ODT via Tika) will produce a PDF regardless of this setting.
|
||||
|
||||
!!! note
|
||||
|
||||
The **remote OCR parser** (Azure AI) always produces a searchable
|
||||
PDF and stores it as the archive copy, regardless of this setting.
|
||||
`ARCHIVE_FILE_GENERATION=never` has no effect when the remote
|
||||
parser handles a document.
|
||||
The default is `never`.
|
||||
|
||||
#### [`PAPERLESS_OCR_CLEAN=<mode>`](#PAPERLESS_OCR_CLEAN) {#PAPERLESS_OCR_CLEAN}
|
||||
|
||||
|
||||
@@ -123,68 +123,7 @@ Multiple options are combined in a single value:
|
||||
PAPERLESS_DB_OPTIONS="sslmode=require;sslrootcert=/certs/ca.pem;pool.max_size=10"
|
||||
```
|
||||
|
||||
## OCR and Archive File Generation Settings
|
||||
|
||||
The settings that control OCR behaviour and archive file generation have been redesigned. The old settings that coupled these two concerns together are **removed** — old values are not silently honoured; a startup warning is logged if any removed variable is still set in your environment.
|
||||
|
||||
### Removed settings
|
||||
|
||||
| Removed Setting | Replacement |
|
||||
| ------------------------------------------- | --------------------------------------------------------------------- |
|
||||
| `PAPERLESS_OCR_MODE=skip` | `PAPERLESS_OCR_MODE=auto` (new default) |
|
||||
| `PAPERLESS_OCR_MODE=skip_noarchive` | `PAPERLESS_OCR_MODE=auto` + `PAPERLESS_ARCHIVE_FILE_GENERATION=never` |
|
||||
| `PAPERLESS_OCR_SKIP_ARCHIVE_FILE=never` | `PAPERLESS_ARCHIVE_FILE_GENERATION=always` |
|
||||
| `PAPERLESS_OCR_SKIP_ARCHIVE_FILE=with_text` | `PAPERLESS_ARCHIVE_FILE_GENERATION=auto` (new default) |
|
||||
| `PAPERLESS_OCR_SKIP_ARCHIVE_FILE=always` | `PAPERLESS_ARCHIVE_FILE_GENERATION=never` |
|
||||
|
||||
### What changed and why
|
||||
|
||||
Previously, `OCR_MODE` conflated two independent concerns: whether to run OCR and whether to produce an archive. `skip` meant "skip OCR if text exists, but always produce an archive". `skip_noarchive` meant "skip OCR if text exists, and also skip the archive". This made it impossible to, for example, disable OCR entirely while still producing archives.
|
||||
|
||||
The new settings are independent:
|
||||
|
||||
- [`PAPERLESS_OCR_MODE`](configuration.md#PAPERLESS_OCR_MODE) controls OCR: `auto` (default), `force`, `redo`, `off`.
|
||||
- [`PAPERLESS_ARCHIVE_FILE_GENERATION`](configuration.md#PAPERLESS_ARCHIVE_FILE_GENERATION) controls archive production: `auto` (default), `always`, `never`.
|
||||
|
||||
### Database configuration
|
||||
|
||||
If you changed OCR settings via the admin UI (ApplicationConfiguration), the database values are **migrated automatically** during the upgrade. `mode` values (`skip` / `skip_noarchive`) are mapped to their new equivalents and `skip_archive_file` values are converted to the new `archive_file_generation` field. After upgrading, review the OCR settings in the admin UI to confirm the migrated values match your intent.
|
||||
|
||||
### Action Required
|
||||
|
||||
Remove any `PAPERLESS_OCR_SKIP_ARCHIVE_FILE` variable from your environment. If you relied on `OCR_MODE=skip` or `OCR_MODE=skip_noarchive`, update accordingly:
|
||||
|
||||
```bash
|
||||
# v2: skip OCR when text present, always archive
|
||||
PAPERLESS_OCR_MODE=skip
|
||||
# v3: equivalent (auto is the new default)
|
||||
# No change needed — auto is the default
|
||||
|
||||
# v2: skip OCR when text present, skip archive too
|
||||
PAPERLESS_OCR_MODE=skip_noarchive
|
||||
# v3: equivalent
|
||||
PAPERLESS_OCR_MODE=auto
|
||||
PAPERLESS_ARCHIVE_FILE_GENERATION=never
|
||||
|
||||
# v2: always skip archive
|
||||
PAPERLESS_OCR_SKIP_ARCHIVE_FILE=always
|
||||
# v3: equivalent
|
||||
PAPERLESS_ARCHIVE_FILE_GENERATION=never
|
||||
|
||||
# v2: skip archive only for born-digital docs
|
||||
PAPERLESS_OCR_SKIP_ARCHIVE_FILE=with_text
|
||||
# v3: equivalent (auto is the new default)
|
||||
PAPERLESS_ARCHIVE_FILE_GENERATION=auto
|
||||
```
|
||||
|
||||
### Remote OCR parser
|
||||
|
||||
If you use the **remote OCR parser** (Azure AI), note that it always produces a
|
||||
searchable PDF and stores it as the archive copy. `ARCHIVE_FILE_GENERATION=never`
|
||||
has no effect for documents handled by the remote parser — the archive is produced
|
||||
unconditionally by the remote engine.
|
||||
|
||||
# Search Index (Whoosh -> Tantivy)
|
||||
## Search Index (Whoosh -> Tantivy)
|
||||
|
||||
The full-text search backend has been replaced with [Tantivy](https://github.com/quickwit-oss/tantivy).
|
||||
The index format is incompatible with Whoosh, so **the search index is automatically rebuilt from
|
||||
|
||||
@@ -633,11 +633,12 @@ hardware, but a few settings can improve performance:
|
||||
consumption, so you might want to lower these settings (example: 2
|
||||
workers and 1 thread to always have some computing power left for
|
||||
other tasks).
|
||||
- Keep [`PAPERLESS_OCR_MODE`](configuration.md#PAPERLESS_OCR_MODE) at its default value `auto` and consider
|
||||
- Keep [`PAPERLESS_OCR_MODE`](configuration.md#PAPERLESS_OCR_MODE) at its default value `skip` and consider
|
||||
OCRing your documents before feeding them into Paperless. Some
|
||||
scanners are able to do this!
|
||||
- Set [`PAPERLESS_ARCHIVE_FILE_GENERATION`](configuration.md#PAPERLESS_ARCHIVE_FILE_GENERATION) to `never` to skip archive
|
||||
file generation entirely, saving disk space at the cost of in-browser PDF/A viewing.
|
||||
- Set [`PAPERLESS_OCR_SKIP_ARCHIVE_FILE`](configuration.md#PAPERLESS_OCR_SKIP_ARCHIVE_FILE) to `with_text` to skip archive
|
||||
file generation for already OCRed documents, or `always` to skip it
|
||||
for all documents.
|
||||
- If you want to perform OCR on the device, consider using
|
||||
`PAPERLESS_OCR_CLEAN=none`. This will speed up OCR times and use
|
||||
less memory at the expense of slightly worse OCR results.
|
||||
|
||||
1176
docs/superpowers/plans/2026-04-03-search-performance.md
Normal file
1176
docs/superpowers/plans/2026-04-03-search-performance.md
Normal file
File diff suppressed because it is too large
Load Diff
121
docs/superpowers/plans/profiling-baseline.txt
Normal file
121
docs/superpowers/plans/profiling-baseline.txt
Normal file
@@ -0,0 +1,121 @@
|
||||
============================= test session starts ==============================
|
||||
platform linux -- Python 3.14.3, pytest-9.0.2, pluggy-1.6.0 -- /home/trenton/Documents/projects/paperless-ngx/.venv/bin/python
|
||||
cachedir: .pytest_cache
|
||||
django: version: 5.2.12, settings: paperless.settings (from ini)
|
||||
rootdir: /home/trenton/Documents/projects/paperless-ngx
|
||||
configfile: pyproject.toml
|
||||
plugins: sugar-1.1.1, xdist-3.8.0, cov-7.0.0, httpx-0.36.0, django-4.12.0, Faker-40.8.0, env-1.5.0, time-machine-3.2.0, mock-3.15.1, anyio-4.12.1, rerunfailures-16.1
|
||||
collecting ... collected 6 items
|
||||
|
||||
src/documents/tests/test_search_profiling.py::TestSearchProfilingBaseline::test_profile_relevance_search Creating test database for alias 'default'...
|
||||
|
||||
============================================================
|
||||
Profile: BEFORE — relevance search (no ordering)
|
||||
============================================================
|
||||
Wall time: 0.9622s
|
||||
Queries: 33 (0.0000s)
|
||||
Memory delta: 16557.2 KiB
|
||||
Peak memory: 16584.0 KiB
|
||||
|
||||
Top 5 allocations:
|
||||
<frozen importlib._bootstrap_external>:511: size=5480 KiB (+5480 KiB), count=45642 (+45642), average=123 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/fido2/rpid.py:47: size=518 KiB (+518 KiB), count=9769 (+9769), average=54 B
|
||||
<frozen abc>:106: size=432 KiB (+432 KiB), count=1480 (+1480), average=299 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/langdetect/utils/ngram.py:257: size=391 KiB (+391 KiB), count=6667 (+6667), average=60 B
|
||||
<frozen importlib._bootstrap>:491: size=284 KiB (+284 KiB), count=2543 (+2543), average=114 B
|
||||
============================================================
|
||||
|
||||
PASSED
|
||||
src/documents/tests/test_search_profiling.py::TestSearchProfilingBaseline::test_profile_sorted_search
|
||||
============================================================
|
||||
Profile: BEFORE — sorted search (ordering=created)
|
||||
============================================================
|
||||
Wall time: 0.1320s
|
||||
Queries: 32 (0.0010s)
|
||||
Memory delta: 880.8 KiB
|
||||
Peak memory: 906.8 KiB
|
||||
|
||||
Top 5 allocations:
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:575: size=50.1 KiB (+50.1 KiB), count=521 (+521), average=99 B
|
||||
/home/trenton/.local/share/uv/python/cpython-3.14.3-linux-x86_64-gnu/lib/python3.14/copyreg.py:104: size=49.7 KiB (+49.7 KiB), count=315 (+315), average=162 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django/db/models/sql/query.py:386: size=38.0 KiB (+38.0 KiB), count=160 (+160), average=243 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django_filters/filterset.py:209: size=32.0 KiB (+32.0 KiB), count=82 (+82), average=400 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django_filters/filters.py:158: size=21.4 KiB (+21.4 KiB), count=104 (+104), average=210 B
|
||||
============================================================
|
||||
|
||||
PASSED
|
||||
src/documents/tests/test_search_profiling.py::TestSearchProfilingBaseline::test_profile_paginated_search
|
||||
============================================================
|
||||
Profile: BEFORE — paginated search (page=2, page_size=25)
|
||||
============================================================
|
||||
Wall time: 0.1395s
|
||||
Queries: 32 (0.0000s)
|
||||
Memory delta: 868.1 KiB
|
||||
Peak memory: 893.5 KiB
|
||||
|
||||
Top 5 allocations:
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:575: size=50.1 KiB (+50.1 KiB), count=521 (+521), average=99 B
|
||||
/home/trenton/.local/share/uv/python/cpython-3.14.3-linux-x86_64-gnu/lib/python3.14/copyreg.py:104: size=49.2 KiB (+49.2 KiB), count=315 (+315), average=160 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django/db/models/sql/query.py:386: size=38.1 KiB (+38.1 KiB), count=161 (+161), average=242 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django_filters/filterset.py:209: size=32.0 KiB (+32.0 KiB), count=82 (+82), average=400 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django_filters/filters.py:158: size=21.3 KiB (+21.3 KiB), count=104 (+104), average=209 B
|
||||
============================================================
|
||||
|
||||
PASSED
|
||||
src/documents/tests/test_search_profiling.py::TestSearchProfilingBaseline::test_profile_search_with_selection_data
|
||||
============================================================
|
||||
Profile: BEFORE — search with selection_data
|
||||
============================================================
|
||||
Wall time: 0.1656s
|
||||
Queries: 37 (0.0020s)
|
||||
Memory delta: 926.9 KiB
|
||||
Peak memory: 1084.3 KiB
|
||||
|
||||
Top 5 allocations:
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:575: size=50.1 KiB (+50.1 KiB), count=521 (+521), average=99 B
|
||||
/home/trenton/.local/share/uv/python/cpython-3.14.3-linux-x86_64-gnu/lib/python3.14/copyreg.py:104: size=49.6 KiB (+49.6 KiB), count=327 (+327), average=155 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django/db/models/sql/query.py:386: size=38.1 KiB (+38.1 KiB), count=161 (+161), average=242 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django_filters/filterset.py:209: size=32.0 KiB (+32.0 KiB), count=82 (+82), average=400 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/.venv/lib/python3.14/site-packages/django/db/backends/sqlite3/operations.py:193: size=27.1 KiB (+27.1 KiB), count=37 (+37), average=751 B
|
||||
============================================================
|
||||
|
||||
PASSED
|
||||
src/documents/tests/test_search_profiling.py::TestSearchProfilingBaseline::test_profile_backend_search_only
|
||||
============================================================
|
||||
Profile: BEFORE — backend.search(page_size=10000, all highlights)
|
||||
============================================================
|
||||
Wall time: 0.0175s
|
||||
Queries: 0 (0.0000s)
|
||||
Memory delta: 88.6 KiB
|
||||
Peak memory: 100.3 KiB
|
||||
|
||||
Top 5 allocations:
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:575: size=51.2 KiB (+51.2 KiB), count=530 (+530), average=99 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:557: size=17.8 KiB (+17.8 KiB), count=200 (+200), average=91 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:542: size=8576 B (+8576 B), count=134 (+134), average=64 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:522: size=4800 B (+4800 B), count=200 (+200), average=24 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:515: size=2376 B (+2376 B), count=99 (+99), average=24 B
|
||||
============================================================
|
||||
|
||||
PASSED
|
||||
src/documents/tests/test_search_profiling.py::TestSearchProfilingBaseline::test_profile_backend_search_single_page
|
||||
============================================================
|
||||
Profile: BEFORE — backend.search(page_size=25)
|
||||
============================================================
|
||||
Wall time: 0.0070s
|
||||
Queries: 0 (0.0000s)
|
||||
Memory delta: 5.9 KiB
|
||||
Peak memory: 11.3 KiB
|
||||
|
||||
Top 5 allocations:
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:557: size=2275 B (+2275 B), count=25 (+25), average=91 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:575: size=1600 B (+1600 B), count=25 (+25), average=64 B
|
||||
/home/trenton/.local/share/uv/python/cpython-3.14.3-linux-x86_64-gnu/lib/python3.14/weakref.py:73: size=1280 B (+1280 B), count=20 (+20), average=64 B
|
||||
/home/trenton/Documents/projects/paperless-ngx/src/documents/search/_backend.py:574: size=256 B (+256 B), count=1 (+1), average=256 B
|
||||
/home/trenton/.local/share/uv/python/cpython-3.14.3-linux-x86_64-gnu/lib/python3.14/tracemalloc.py:560: size=240 B (+240 B), count=1 (+1), average=240 B
|
||||
============================================================
|
||||
|
||||
PASSEDDestroying test database for alias 'default'...
|
||||
|
||||
|
||||
======================== 6 passed in 241.83s (0:04:01) =========================
|
||||
@@ -134,9 +134,9 @@ following operations on your documents:
|
||||
!!! tip
|
||||
|
||||
This process can be configured to fit your needs. If you don't want
|
||||
paperless to create archived versions for born-digital documents, set
|
||||
[`PAPERLESS_ARCHIVE_FILE_GENERATION=auto`](configuration.md#PAPERLESS_ARCHIVE_FILE_GENERATION)
|
||||
(the default). To skip archives entirely, use `never`. Please read the
|
||||
paperless to create archived versions for digital documents, you can
|
||||
configure that by configuring
|
||||
`PAPERLESS_OCR_SKIP_ARCHIVE_FILE=with_text`. Please read the
|
||||
[relevant section in the documentation](configuration.md#ocr).
|
||||
|
||||
!!! note
|
||||
|
||||
@@ -11,16 +11,16 @@ export enum OutputTypeConfig {
|
||||
}
|
||||
|
||||
export enum ModeConfig {
|
||||
AUTO = 'auto',
|
||||
FORCE = 'force',
|
||||
SKIP = 'skip',
|
||||
REDO = 'redo',
|
||||
OFF = 'off',
|
||||
FORCE = 'force',
|
||||
SKIP_NO_ARCHIVE = 'skip_noarchive',
|
||||
}
|
||||
|
||||
export enum ArchiveFileConfig {
|
||||
AUTO = 'auto',
|
||||
ALWAYS = 'always',
|
||||
NEVER = 'never',
|
||||
WITH_TEXT = 'with_text',
|
||||
ALWAYS = 'always',
|
||||
}
|
||||
|
||||
export enum CleanConfig {
|
||||
@@ -115,11 +115,11 @@ export const PaperlessConfigOptions: ConfigOption[] = [
|
||||
category: ConfigCategory.OCR,
|
||||
},
|
||||
{
|
||||
key: 'archive_file_generation',
|
||||
title: $localize`Archive File Generation`,
|
||||
key: 'skip_archive_file',
|
||||
title: $localize`Skip Archive File`,
|
||||
type: ConfigOptionType.Select,
|
||||
choices: mapToItems(ArchiveFileConfig),
|
||||
config_key: 'PAPERLESS_ARCHIVE_FILE_GENERATION',
|
||||
config_key: 'PAPERLESS_OCR_SKIP_ARCHIVE_FILE',
|
||||
category: ConfigCategory.OCR,
|
||||
},
|
||||
{
|
||||
@@ -337,7 +337,7 @@ export interface PaperlessConfig extends ObjectWithId {
|
||||
pages: number
|
||||
language: string
|
||||
mode: ModeConfig
|
||||
archive_file_generation: ArchiveFileConfig
|
||||
skip_archive_file: ArchiveFileConfig
|
||||
image_dpi: number
|
||||
unpaper_clean: CleanConfig
|
||||
deskew: boolean
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
@@ -51,14 +50,9 @@ from documents.utils import compute_checksum
|
||||
from documents.utils import copy_basic_file_stats
|
||||
from documents.utils import copy_file_with_basic_stats
|
||||
from documents.utils import run_subprocess
|
||||
from paperless.config import OcrConfig
|
||||
from paperless.models import ArchiveFileGenerationChoices
|
||||
from paperless.parsers import ParserContext
|
||||
from paperless.parsers import ParserProtocol
|
||||
from paperless.parsers.registry import get_parser_registry
|
||||
from paperless.parsers.utils import PDF_TEXT_MIN_LENGTH
|
||||
from paperless.parsers.utils import extract_pdf_text
|
||||
from paperless.parsers.utils import is_tagged_pdf
|
||||
|
||||
LOGGING_NAME: Final[str] = "paperless.consumer"
|
||||
|
||||
@@ -111,74 +105,6 @@ class ConsumerStatusShortMessage(StrEnum):
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
def should_produce_archive(
|
||||
parser: "ParserProtocol",
|
||||
mime_type: str,
|
||||
document_path: Path,
|
||||
log: logging.Logger | None = None,
|
||||
) -> bool:
|
||||
"""Return True if a PDF/A archive should be produced for this document.
|
||||
|
||||
IMPORTANT: *parser* must be an instantiated parser, not the class.
|
||||
``requires_pdf_rendition`` and ``can_produce_archive`` are instance
|
||||
``@property`` methods — accessing them on the class returns the descriptor
|
||||
(always truthy).
|
||||
"""
|
||||
_log = log or logging.getLogger(LOGGING_NAME)
|
||||
|
||||
# Must produce a PDF so the frontend can display the original format at all.
|
||||
if parser.requires_pdf_rendition:
|
||||
_log.debug("Archive: yes — parser requires PDF rendition for frontend display")
|
||||
return True
|
||||
|
||||
# Parser cannot produce an archive (e.g. TextDocumentParser).
|
||||
if not parser.can_produce_archive:
|
||||
_log.debug("Archive: no — parser cannot produce archives")
|
||||
return False
|
||||
|
||||
generation = OcrConfig().archive_file_generation
|
||||
|
||||
if generation == ArchiveFileGenerationChoices.ALWAYS:
|
||||
_log.debug("Archive: yes — ARCHIVE_FILE_GENERATION=always")
|
||||
return True
|
||||
if generation == ArchiveFileGenerationChoices.NEVER:
|
||||
_log.debug("Archive: no — ARCHIVE_FILE_GENERATION=never")
|
||||
return False
|
||||
|
||||
# auto: produce archives for scanned/image documents; skip for born-digital PDFs.
|
||||
if mime_type.startswith("image/"):
|
||||
_log.debug("Archive: yes — image document, ARCHIVE_FILE_GENERATION=auto")
|
||||
return True
|
||||
if mime_type == "application/pdf":
|
||||
if is_tagged_pdf(document_path):
|
||||
_log.debug(
|
||||
"Archive: no — born-digital PDF (structure tags detected),"
|
||||
" ARCHIVE_FILE_GENERATION=auto",
|
||||
)
|
||||
return False
|
||||
text = extract_pdf_text(document_path)
|
||||
if text is None or len(text) <= PDF_TEXT_MIN_LENGTH:
|
||||
_log.debug(
|
||||
"Archive: yes — scanned PDF (text_length=%d ≤ %d),"
|
||||
" ARCHIVE_FILE_GENERATION=auto",
|
||||
len(text) if text else 0,
|
||||
PDF_TEXT_MIN_LENGTH,
|
||||
)
|
||||
return True
|
||||
_log.debug(
|
||||
"Archive: no — born-digital PDF (text_length=%d > %d),"
|
||||
" ARCHIVE_FILE_GENERATION=auto",
|
||||
len(text),
|
||||
PDF_TEXT_MIN_LENGTH,
|
||||
)
|
||||
return False
|
||||
_log.debug(
|
||||
"Archive: no — MIME type %r not eligible for auto archive generation",
|
||||
mime_type,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
class ConsumerPluginMixin:
|
||||
if TYPE_CHECKING:
|
||||
from logging import Logger
|
||||
@@ -510,17 +436,7 @@ class ConsumerPlugin(
|
||||
)
|
||||
self.log.debug(f"Parsing {self.filename}...")
|
||||
|
||||
produce_archive = should_produce_archive(
|
||||
document_parser,
|
||||
mime_type,
|
||||
self.working_copy,
|
||||
self.log,
|
||||
)
|
||||
document_parser.parse(
|
||||
self.working_copy,
|
||||
mime_type,
|
||||
produce_archive=produce_archive,
|
||||
)
|
||||
document_parser.parse(self.working_copy, mime_type)
|
||||
|
||||
self.log.debug(f"Generating thumbnail for {self.filename}...")
|
||||
self._send_progress(
|
||||
@@ -869,7 +785,7 @@ class ConsumerPlugin(
|
||||
|
||||
return document
|
||||
|
||||
def apply_overrides(self, document: Document) -> None:
|
||||
def apply_overrides(self, document) -> None:
|
||||
if self.metadata.correspondent_id:
|
||||
document.correspondent = Correspondent.objects.get(
|
||||
pk=self.metadata.correspondent_id,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from documents.search._backend import SearchHit
|
||||
from documents.search._backend import SearchIndexLockError
|
||||
from documents.search._backend import SearchMode
|
||||
from documents.search._backend import SearchResults
|
||||
@@ -10,6 +11,7 @@ from documents.search._schema import needs_rebuild
|
||||
from documents.search._schema import wipe_index
|
||||
|
||||
__all__ = [
|
||||
"SearchHit",
|
||||
"SearchIndexLockError",
|
||||
"SearchMode",
|
||||
"SearchResults",
|
||||
|
||||
@@ -106,27 +106,61 @@ class SearchResults:
|
||||
|
||||
class TantivyRelevanceList:
|
||||
"""
|
||||
DRF-compatible list wrapper for Tantivy search hits.
|
||||
DRF-compatible list wrapper for Tantivy search results.
|
||||
|
||||
Provides paginated access to search results while storing all hits in memory
|
||||
for efficient ID retrieval. Used by Django REST framework for pagination.
|
||||
Holds a lightweight ordered list of IDs (for pagination count and
|
||||
``selection_data``) together with a small page of rich ``SearchHit``
|
||||
dicts (for serialization). DRF's ``PageNumberPagination`` calls
|
||||
``__len__`` to compute the total page count and ``__getitem__`` to
|
||||
slice the displayed page.
|
||||
|
||||
Methods:
|
||||
__len__: Returns total hit count for pagination calculations
|
||||
__getitem__: Slices the hit list for page-specific results
|
||||
|
||||
Note: Stores ALL post-filter hits so get_all_result_ids() can return
|
||||
every matching document ID without requiring a second search query.
|
||||
Args:
|
||||
ordered_ids: All matching document IDs in display order.
|
||||
page_hits: Rich SearchHit dicts for the requested DRF page only.
|
||||
page_offset: Index into *ordered_ids* where *page_hits* starts.
|
||||
"""
|
||||
|
||||
def __init__(self, hits: list[SearchHit]) -> None:
|
||||
self._hits = hits
|
||||
def __init__(
|
||||
self,
|
||||
ordered_ids: list[int],
|
||||
page_hits: list[SearchHit],
|
||||
page_offset: int = 0,
|
||||
) -> None:
|
||||
self._ordered_ids = ordered_ids
|
||||
self._page_hits = page_hits
|
||||
self._page_offset = page_offset
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._hits)
|
||||
return len(self._ordered_ids)
|
||||
|
||||
def __getitem__(self, key: slice) -> list[SearchHit]:
|
||||
return self._hits[key]
|
||||
def __getitem__(self, key: int | slice) -> SearchHit | list[SearchHit]:
|
||||
if isinstance(key, int):
|
||||
idx = key if key >= 0 else len(self._ordered_ids) + key
|
||||
if self._page_offset <= idx < self._page_offset + len(self._page_hits):
|
||||
return self._page_hits[idx - self._page_offset]
|
||||
return SearchHit(
|
||||
id=self._ordered_ids[key],
|
||||
score=0.0,
|
||||
rank=idx + 1,
|
||||
highlights={},
|
||||
)
|
||||
start = key.start or 0
|
||||
stop = key.stop or len(self._ordered_ids)
|
||||
# DRF slices to extract the current page. If the slice aligns
|
||||
# with our pre-fetched page_hits, return them directly.
|
||||
if start == self._page_offset and stop <= self._page_offset + len(
|
||||
self._page_hits,
|
||||
):
|
||||
return self._page_hits[: stop - start]
|
||||
# Fallback: return stub dicts (no highlights).
|
||||
return [
|
||||
SearchHit(id=doc_id, score=0.0, rank=start + i + 1, highlights={})
|
||||
for i, doc_id in enumerate(self._ordered_ids[key])
|
||||
]
|
||||
|
||||
def get_all_ids(self) -> list[int]:
|
||||
"""Return all matching document IDs in display order."""
|
||||
return self._ordered_ids
|
||||
|
||||
|
||||
class SearchIndexLockError(Exception):
|
||||
@@ -234,6 +268,34 @@ class TantivyBackend:
|
||||
the underlying index directory changes (e.g., during test isolation).
|
||||
"""
|
||||
|
||||
# Maps DRF ordering field names to Tantivy index field names.
|
||||
SORT_FIELD_MAP: dict[str, str] = {
|
||||
"title": "title_sort",
|
||||
"correspondent__name": "correspondent_sort",
|
||||
"document_type__name": "type_sort",
|
||||
"created": "created",
|
||||
"added": "added",
|
||||
"modified": "modified",
|
||||
"archive_serial_number": "asn",
|
||||
"page_count": "page_count",
|
||||
"num_notes": "num_notes",
|
||||
}
|
||||
|
||||
# Fields where Tantivy's sort order matches the ORM's sort order.
|
||||
# Text-based fields (title, correspondent__name, document_type__name)
|
||||
# are excluded because Tantivy's tokenized fast fields produce different
|
||||
# ordering than the ORM's collation-based ordering.
|
||||
SORTABLE_FIELDS: frozenset[str] = frozenset(
|
||||
{
|
||||
"created",
|
||||
"added",
|
||||
"modified",
|
||||
"archive_serial_number",
|
||||
"page_count",
|
||||
"num_notes",
|
||||
},
|
||||
)
|
||||
|
||||
def __init__(self, path: Path | None = None):
|
||||
# path=None → in-memory index (for tests)
|
||||
# path=some_dir → on-disk index (for production)
|
||||
@@ -272,6 +334,36 @@ class TantivyBackend:
|
||||
if self._index is None:
|
||||
self.open() # pragma: no cover
|
||||
|
||||
def _parse_query(
|
||||
self,
|
||||
query: str,
|
||||
search_mode: SearchMode,
|
||||
) -> tantivy.Query:
|
||||
"""Parse a user query string into a Tantivy Query object."""
|
||||
tz = get_current_timezone()
|
||||
if search_mode is SearchMode.TEXT:
|
||||
return parse_simple_text_query(self._index, query)
|
||||
elif search_mode is SearchMode.TITLE:
|
||||
return parse_simple_title_query(self._index, query)
|
||||
else:
|
||||
return parse_user_query(self._index, query, tz)
|
||||
|
||||
def _apply_permission_filter(
|
||||
self,
|
||||
query: tantivy.Query,
|
||||
user: AbstractBaseUser | None,
|
||||
) -> tantivy.Query:
|
||||
"""Wrap a query with a permission filter if the user is not a superuser."""
|
||||
if user is not None:
|
||||
permission_filter = build_permission_filter(self._schema, user)
|
||||
return tantivy.Query.boolean_query(
|
||||
[
|
||||
(tantivy.Occur.Must, query),
|
||||
(tantivy.Occur.Must, permission_filter),
|
||||
],
|
||||
)
|
||||
return query
|
||||
|
||||
def _build_tantivy_doc(
|
||||
self,
|
||||
document: Document,
|
||||
@@ -435,6 +527,8 @@ class TantivyBackend:
|
||||
*,
|
||||
sort_reverse: bool,
|
||||
search_mode: SearchMode = SearchMode.QUERY,
|
||||
highlight_page: int | None = None,
|
||||
highlight_page_size: int | None = None,
|
||||
) -> SearchResults:
|
||||
"""
|
||||
Execute a search query against the document index.
|
||||
@@ -462,45 +556,15 @@ class TantivyBackend:
|
||||
SearchResults with hits, total count, and processed query
|
||||
"""
|
||||
self._ensure_open()
|
||||
tz = get_current_timezone()
|
||||
if search_mode is SearchMode.TEXT:
|
||||
user_query = parse_simple_text_query(self._index, query)
|
||||
elif search_mode is SearchMode.TITLE:
|
||||
user_query = parse_simple_title_query(self._index, query)
|
||||
else:
|
||||
user_query = parse_user_query(self._index, query, tz)
|
||||
|
||||
# Apply permission filter if user is not None (not superuser)
|
||||
if user is not None:
|
||||
permission_filter = build_permission_filter(self._schema, user)
|
||||
final_query = tantivy.Query.boolean_query(
|
||||
[
|
||||
(tantivy.Occur.Must, user_query),
|
||||
(tantivy.Occur.Must, permission_filter),
|
||||
],
|
||||
)
|
||||
else:
|
||||
final_query = user_query
|
||||
user_query = self._parse_query(query, search_mode)
|
||||
final_query = self._apply_permission_filter(user_query, user)
|
||||
|
||||
searcher = self._index.searcher()
|
||||
offset = (page - 1) * page_size
|
||||
|
||||
# Map sort fields
|
||||
sort_field_map = {
|
||||
"title": "title_sort",
|
||||
"correspondent__name": "correspondent_sort",
|
||||
"document_type__name": "type_sort",
|
||||
"created": "created",
|
||||
"added": "added",
|
||||
"modified": "modified",
|
||||
"archive_serial_number": "asn",
|
||||
"page_count": "page_count",
|
||||
"num_notes": "num_notes",
|
||||
}
|
||||
|
||||
# Perform search
|
||||
if sort_field and sort_field in sort_field_map:
|
||||
mapped_field = sort_field_map[sort_field]
|
||||
if sort_field and sort_field in self.SORT_FIELD_MAP:
|
||||
mapped_field = self.SORT_FIELD_MAP[sort_field]
|
||||
results = searcher.search(
|
||||
final_query,
|
||||
limit=offset + page_size,
|
||||
@@ -534,6 +598,15 @@ class TantivyBackend:
|
||||
snippet_generator = None
|
||||
notes_snippet_generator = None
|
||||
|
||||
# Determine which hits need highlights
|
||||
if highlight_page is not None and highlight_page_size is not None:
|
||||
hl_start = (highlight_page - 1) * highlight_page_size
|
||||
hl_end = hl_start + highlight_page_size
|
||||
else:
|
||||
# Highlight all hits (backward-compatible default)
|
||||
hl_start = 0
|
||||
hl_end = len(page_hits)
|
||||
|
||||
for rank, (doc_address, score) in enumerate(page_hits, start=offset + 1):
|
||||
# Get the actual document from the searcher using the doc address
|
||||
actual_doc = searcher.doc(doc_address)
|
||||
@@ -542,8 +615,9 @@ class TantivyBackend:
|
||||
|
||||
highlights: dict[str, str] = {}
|
||||
|
||||
# Generate highlights if score > 0
|
||||
if score > 0:
|
||||
# Generate highlights if score > 0 and hit is in the highlight window
|
||||
hit_index = rank - offset - 1 # 0-based index within page_hits
|
||||
if score > 0 and hl_start <= hit_index < hl_end:
|
||||
try:
|
||||
if snippet_generator is None:
|
||||
snippet_generator = tantivy.SnippetGenerator.create(
|
||||
@@ -590,6 +664,160 @@ class TantivyBackend:
|
||||
query=query,
|
||||
)
|
||||
|
||||
def highlight_hits(
|
||||
self,
|
||||
query: str,
|
||||
doc_ids: list[int],
|
||||
*,
|
||||
search_mode: SearchMode = SearchMode.QUERY,
|
||||
) -> list[SearchHit]:
|
||||
"""
|
||||
Generate SearchHit dicts with highlights for specific document IDs.
|
||||
|
||||
Unlike search(), this does not execute a ranked query — it looks up
|
||||
each document by ID and generates snippets against the provided query.
|
||||
Use this when you already know which documents to display (from
|
||||
search_ids + ORM filtering) and just need highlight data.
|
||||
|
||||
Note: Each doc_id requires an individual index lookup because tantivy-py
|
||||
does not expose a batch doc-address-by-ID API. This is acceptable for
|
||||
page-sized batches (typically 25 docs) but should not be called with
|
||||
thousands of IDs.
|
||||
|
||||
Args:
|
||||
query: The search query (used for snippet generation)
|
||||
doc_ids: Ordered list of document IDs to generate hits for
|
||||
search_mode: Query parsing mode (for building the snippet query)
|
||||
|
||||
Returns:
|
||||
List of SearchHit dicts in the same order as doc_ids
|
||||
"""
|
||||
if not doc_ids:
|
||||
return []
|
||||
|
||||
self._ensure_open()
|
||||
user_query = self._parse_query(query, search_mode)
|
||||
|
||||
searcher = self._index.searcher()
|
||||
snippet_generator = None
|
||||
hits: list[SearchHit] = []
|
||||
|
||||
for rank, doc_id in enumerate(doc_ids, start=1):
|
||||
# Look up document by ID
|
||||
id_query = tantivy.Query.range_query(
|
||||
self._schema,
|
||||
"id",
|
||||
tantivy.FieldType.Unsigned,
|
||||
doc_id,
|
||||
doc_id,
|
||||
)
|
||||
results = searcher.search(id_query, limit=1)
|
||||
|
||||
if not results.hits:
|
||||
continue
|
||||
|
||||
doc_address = results.hits[0][1]
|
||||
actual_doc = searcher.doc(doc_address)
|
||||
doc_dict = actual_doc.to_dict()
|
||||
|
||||
highlights: dict[str, str] = {}
|
||||
try:
|
||||
if snippet_generator is None:
|
||||
snippet_generator = tantivy.SnippetGenerator.create(
|
||||
searcher,
|
||||
user_query,
|
||||
self._schema,
|
||||
"content",
|
||||
)
|
||||
|
||||
content_snippet = snippet_generator.snippet_from_doc(actual_doc)
|
||||
if content_snippet:
|
||||
highlights["content"] = str(content_snippet)
|
||||
|
||||
if "notes" in doc_dict:
|
||||
notes_generator = tantivy.SnippetGenerator.create(
|
||||
searcher,
|
||||
user_query,
|
||||
self._schema,
|
||||
"notes",
|
||||
)
|
||||
notes_snippet = notes_generator.snippet_from_doc(actual_doc)
|
||||
if notes_snippet:
|
||||
highlights["notes"] = str(notes_snippet)
|
||||
|
||||
except Exception: # pragma: no cover
|
||||
logger.debug("Failed to generate highlights for doc %s", doc_id)
|
||||
|
||||
hits.append(
|
||||
SearchHit(
|
||||
id=doc_id,
|
||||
score=0.0,
|
||||
rank=rank,
|
||||
highlights=highlights,
|
||||
),
|
||||
)
|
||||
|
||||
return hits
|
||||
|
||||
def search_ids(
|
||||
self,
|
||||
query: str,
|
||||
user: AbstractBaseUser | None,
|
||||
*,
|
||||
sort_field: str | None = None,
|
||||
sort_reverse: bool = False,
|
||||
search_mode: SearchMode = SearchMode.QUERY,
|
||||
limit: int | None = None,
|
||||
) -> list[int]:
|
||||
"""
|
||||
Return document IDs matching a query — no highlights or scores.
|
||||
|
||||
This is the lightweight companion to search(). Use it when you need the
|
||||
full set of matching IDs (e.g. for ``selection_data``) but don't need
|
||||
scores, ranks, or highlights.
|
||||
|
||||
Args:
|
||||
query: User's search query
|
||||
user: User for permission filtering (None for superuser/no filtering)
|
||||
sort_field: Field to sort by (None for relevance ranking)
|
||||
sort_reverse: Whether to reverse the sort order
|
||||
search_mode: Query parsing mode (QUERY, TEXT, or TITLE)
|
||||
limit: Maximum number of IDs to return (None = all matching docs)
|
||||
|
||||
Returns:
|
||||
List of document IDs in the requested order
|
||||
"""
|
||||
self._ensure_open()
|
||||
user_query = self._parse_query(query, search_mode)
|
||||
final_query = self._apply_permission_filter(user_query, user)
|
||||
|
||||
searcher = self._index.searcher()
|
||||
effective_limit = limit if limit is not None else searcher.num_docs
|
||||
|
||||
if sort_field and sort_field in self.SORT_FIELD_MAP:
|
||||
mapped_field = self.SORT_FIELD_MAP[sort_field]
|
||||
results = searcher.search(
|
||||
final_query,
|
||||
limit=effective_limit,
|
||||
order_by_field=mapped_field,
|
||||
order=tantivy.Order.Desc if sort_reverse else tantivy.Order.Asc,
|
||||
)
|
||||
all_hits = [(hit[1],) for hit in results.hits]
|
||||
else:
|
||||
results = searcher.search(final_query, limit=effective_limit)
|
||||
all_hits = [(hit[1], hit[0]) for hit in results.hits]
|
||||
|
||||
# Normalize scores and apply threshold (relevance search only)
|
||||
if all_hits:
|
||||
max_score = max(hit[1] for hit in all_hits) or 1.0
|
||||
all_hits = [(hit[0], hit[1] / max_score) for hit in all_hits]
|
||||
|
||||
threshold = settings.ADVANCED_FUZZY_SEARCH_THRESHOLD
|
||||
if threshold is not None:
|
||||
all_hits = [hit for hit in all_hits if hit[1] >= threshold]
|
||||
|
||||
return [searcher.doc(doc_addr).to_dict()["id"][0] for doc_addr, *_ in all_hits]
|
||||
|
||||
def autocomplete(
|
||||
self,
|
||||
term: str,
|
||||
@@ -623,7 +851,7 @@ class TantivyBackend:
|
||||
else:
|
||||
base_query = tantivy.Query.all_query()
|
||||
|
||||
results = searcher.search(base_query, limit=10000)
|
||||
results = searcher.search(base_query, limit=searcher.num_docs)
|
||||
|
||||
# Count how many visible documents each word appears in.
|
||||
# Using Counter (not set) preserves per-word document frequency so
|
||||
@@ -699,17 +927,7 @@ class TantivyBackend:
|
||||
boost_factor=None,
|
||||
)
|
||||
|
||||
# Apply permission filter
|
||||
if user is not None:
|
||||
permission_filter = build_permission_filter(self._schema, user)
|
||||
final_query = tantivy.Query.boolean_query(
|
||||
[
|
||||
(tantivy.Occur.Must, mlt_query),
|
||||
(tantivy.Occur.Must, permission_filter),
|
||||
],
|
||||
)
|
||||
else:
|
||||
final_query = mlt_query
|
||||
final_query = self._apply_permission_filter(mlt_query, user)
|
||||
|
||||
# Search
|
||||
offset = (page - 1) * page_size
|
||||
@@ -753,6 +971,66 @@ class TantivyBackend:
|
||||
query=f"more_like:{doc_id}",
|
||||
)
|
||||
|
||||
def more_like_this_ids(
|
||||
self,
|
||||
doc_id: int,
|
||||
user: AbstractBaseUser | None,
|
||||
*,
|
||||
limit: int | None = None,
|
||||
) -> list[int]:
|
||||
"""
|
||||
Return IDs of documents similar to the given document — no highlights.
|
||||
|
||||
Lightweight companion to more_like_this(). The original document is
|
||||
excluded from results.
|
||||
|
||||
Args:
|
||||
doc_id: Primary key of the reference document
|
||||
user: User for permission filtering (None for no filtering)
|
||||
limit: Maximum number of IDs to return (None = all matching docs)
|
||||
|
||||
Returns:
|
||||
List of similar document IDs (excluding the original)
|
||||
"""
|
||||
self._ensure_open()
|
||||
searcher = self._index.searcher()
|
||||
|
||||
id_query = tantivy.Query.range_query(
|
||||
self._schema,
|
||||
"id",
|
||||
tantivy.FieldType.Unsigned,
|
||||
doc_id,
|
||||
doc_id,
|
||||
)
|
||||
results = searcher.search(id_query, limit=1)
|
||||
|
||||
if not results.hits:
|
||||
return []
|
||||
|
||||
doc_address = results.hits[0][1]
|
||||
mlt_query = tantivy.Query.more_like_this_query(
|
||||
doc_address,
|
||||
min_doc_frequency=1,
|
||||
max_doc_frequency=None,
|
||||
min_term_frequency=1,
|
||||
max_query_terms=12,
|
||||
min_word_length=None,
|
||||
max_word_length=None,
|
||||
boost_factor=None,
|
||||
)
|
||||
|
||||
final_query = self._apply_permission_filter(mlt_query, user)
|
||||
|
||||
effective_limit = limit if limit is not None else searcher.num_docs
|
||||
results = searcher.search(final_query, limit=effective_limit)
|
||||
|
||||
ids = []
|
||||
for _score, doc_address in results.hits:
|
||||
result_doc_id = searcher.doc(doc_address).to_dict()["id"][0]
|
||||
if result_doc_id != doc_id:
|
||||
ids.append(result_doc_id)
|
||||
return ids
|
||||
|
||||
def batch_update(self, lock_timeout: float = 30.0) -> WriteBatch:
|
||||
"""
|
||||
Get a batch context manager for bulk index operations.
|
||||
|
||||
@@ -30,7 +30,6 @@ from documents.consumer import AsnCheckPlugin
|
||||
from documents.consumer import ConsumerPlugin
|
||||
from documents.consumer import ConsumerPreflightPlugin
|
||||
from documents.consumer import WorkflowTriggerPlugin
|
||||
from documents.consumer import should_produce_archive
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentMetadataOverrides
|
||||
from documents.double_sided import CollatePlugin
|
||||
@@ -312,16 +311,7 @@ def update_document_content_maybe_archive_file(document_id) -> None:
|
||||
parser.configure(ParserContext())
|
||||
|
||||
try:
|
||||
produce_archive = should_produce_archive(
|
||||
parser,
|
||||
mime_type,
|
||||
document.source_path,
|
||||
)
|
||||
parser.parse(
|
||||
document.source_path,
|
||||
mime_type,
|
||||
produce_archive=produce_archive,
|
||||
)
|
||||
parser.parse(document.source_path, mime_type)
|
||||
|
||||
thumbnail = parser.get_thumbnail(document.source_path, mime_type)
|
||||
|
||||
|
||||
@@ -428,6 +428,162 @@ class TestSearch:
|
||||
== 0
|
||||
)
|
||||
|
||||
def test_highlight_page_only_highlights_requested_slice(
|
||||
self,
|
||||
backend: TantivyBackend,
|
||||
):
|
||||
"""Only hits in the highlight_page slice should have non-empty highlights."""
|
||||
for i in range(6):
|
||||
doc = Document.objects.create(
|
||||
title=f"highlight doc {i}",
|
||||
content=f"searchable highlight content number {i}",
|
||||
checksum=f"HP{i}",
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
r = backend.search(
|
||||
"searchable",
|
||||
user=None,
|
||||
page=1,
|
||||
page_size=10000,
|
||||
sort_field=None,
|
||||
sort_reverse=False,
|
||||
highlight_page=1,
|
||||
highlight_page_size=3,
|
||||
)
|
||||
assert r.total == 6
|
||||
assert len(r.hits) == 6
|
||||
highlighted = [h for h in r.hits if h["highlights"]]
|
||||
not_highlighted = [h for h in r.hits if not h["highlights"]]
|
||||
assert len(highlighted) == 3
|
||||
assert len(not_highlighted) == 3
|
||||
|
||||
def test_highlight_page_2_highlights_correct_slice(self, backend: TantivyBackend):
|
||||
"""highlight_page=2 should highlight only the second page of results."""
|
||||
for i in range(6):
|
||||
doc = Document.objects.create(
|
||||
title=f"page2 doc {i}",
|
||||
content=f"searchable page2 content number {i}",
|
||||
checksum=f"HP2{i}",
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
r = backend.search(
|
||||
"searchable",
|
||||
user=None,
|
||||
page=1,
|
||||
page_size=10000,
|
||||
sort_field=None,
|
||||
sort_reverse=False,
|
||||
highlight_page=2,
|
||||
highlight_page_size=2,
|
||||
)
|
||||
assert r.total == 6
|
||||
assert len(r.hits) == 6
|
||||
highlighted = [h for h in r.hits if h["highlights"]]
|
||||
not_highlighted = [h for h in r.hits if not h["highlights"]]
|
||||
# Only 2 hits (the second page) should have highlights
|
||||
assert len(highlighted) == 2
|
||||
assert len(not_highlighted) == 4
|
||||
|
||||
def test_no_highlight_page_highlights_all(self, backend: TantivyBackend):
|
||||
"""When highlight_page is not specified, all hits get highlights (backward compat)."""
|
||||
for i in range(3):
|
||||
doc = Document.objects.create(
|
||||
title=f"compat doc {i}",
|
||||
content=f"searchable compat content {i}",
|
||||
checksum=f"HC{i}",
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
r = backend.search(
|
||||
"searchable",
|
||||
user=None,
|
||||
page=1,
|
||||
page_size=10000,
|
||||
sort_field=None,
|
||||
sort_reverse=False,
|
||||
)
|
||||
assert len(r.hits) == 3
|
||||
for hit in r.hits:
|
||||
assert "content" in hit["highlights"]
|
||||
|
||||
|
||||
class TestSearchIds:
|
||||
"""Test lightweight ID-only search."""
|
||||
|
||||
def test_returns_matching_ids(self, backend: TantivyBackend):
|
||||
"""search_ids must return IDs of all matching documents."""
|
||||
docs = []
|
||||
for i in range(5):
|
||||
doc = Document.objects.create(
|
||||
title=f"findable doc {i}",
|
||||
content="common keyword",
|
||||
checksum=f"SI{i}",
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
docs.append(doc)
|
||||
other = Document.objects.create(
|
||||
title="unrelated",
|
||||
content="nothing here",
|
||||
checksum="SI_other",
|
||||
)
|
||||
backend.add_or_update(other)
|
||||
|
||||
ids = backend.search_ids(
|
||||
"common keyword",
|
||||
user=None,
|
||||
search_mode=SearchMode.QUERY,
|
||||
)
|
||||
assert set(ids) == {d.pk for d in docs}
|
||||
assert other.pk not in ids
|
||||
|
||||
def test_respects_permission_filter(self, backend: TantivyBackend):
|
||||
"""search_ids must respect user permission filtering."""
|
||||
owner = User.objects.create_user("ids_owner")
|
||||
other = User.objects.create_user("ids_other")
|
||||
doc = Document.objects.create(
|
||||
title="private doc",
|
||||
content="secret keyword",
|
||||
checksum="SIP1",
|
||||
owner=owner,
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
assert backend.search_ids(
|
||||
"secret",
|
||||
user=owner,
|
||||
search_mode=SearchMode.QUERY,
|
||||
) == [doc.pk]
|
||||
assert (
|
||||
backend.search_ids("secret", user=other, search_mode=SearchMode.QUERY) == []
|
||||
)
|
||||
|
||||
def test_respects_fuzzy_threshold(self, backend: TantivyBackend, settings):
|
||||
"""search_ids must apply the same fuzzy threshold as search()."""
|
||||
doc = Document.objects.create(
|
||||
title="threshold test",
|
||||
content="unique term",
|
||||
checksum="SIT1",
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
settings.ADVANCED_FUZZY_SEARCH_THRESHOLD = 1.1
|
||||
ids = backend.search_ids("unique", user=None, search_mode=SearchMode.QUERY)
|
||||
assert ids == []
|
||||
|
||||
def test_returns_ids_for_text_mode(self, backend: TantivyBackend):
|
||||
"""search_ids must work with TEXT search mode."""
|
||||
doc = Document.objects.create(
|
||||
title="text mode doc",
|
||||
content="findable phrase",
|
||||
checksum="SIM1",
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
ids = backend.search_ids("findable", user=None, search_mode=SearchMode.TEXT)
|
||||
assert ids == [doc.pk]
|
||||
|
||||
|
||||
class TestRebuild:
|
||||
"""Test index rebuilding functionality."""
|
||||
@@ -542,6 +698,27 @@ class TestMoreLikeThis:
|
||||
assert results.hits == []
|
||||
assert results.total == 0
|
||||
|
||||
def test_more_like_this_ids_excludes_original(self, backend: TantivyBackend):
|
||||
"""more_like_this_ids must return IDs of similar documents, excluding the original."""
|
||||
doc1 = Document.objects.create(
|
||||
title="Important document",
|
||||
content="financial information report",
|
||||
checksum="MLTI1",
|
||||
pk=150,
|
||||
)
|
||||
doc2 = Document.objects.create(
|
||||
title="Another document",
|
||||
content="financial information report",
|
||||
checksum="MLTI2",
|
||||
pk=151,
|
||||
)
|
||||
backend.add_or_update(doc1)
|
||||
backend.add_or_update(doc2)
|
||||
|
||||
ids = backend.more_like_this_ids(doc_id=150, user=None)
|
||||
assert 150 not in ids
|
||||
assert 151 in ids
|
||||
|
||||
|
||||
class TestSingleton:
|
||||
"""Test get_backend() and reset_backend() singleton lifecycle."""
|
||||
|
||||
@@ -46,7 +46,7 @@ class TestApiAppConfig(DirectoriesMixin, APITestCase):
|
||||
"pages": None,
|
||||
"language": None,
|
||||
"mode": None,
|
||||
"archive_file_generation": None,
|
||||
"skip_archive_file": None,
|
||||
"image_dpi": None,
|
||||
"unpaper_clean": None,
|
||||
"deskew": None,
|
||||
|
||||
@@ -1503,6 +1503,89 @@ class TestDocumentSearchApi(DirectoriesMixin, APITestCase):
|
||||
[d2.id, d1.id, d3.id],
|
||||
)
|
||||
|
||||
def test_search_with_tantivy_native_sort(self) -> None:
|
||||
"""When ordering by a Tantivy-sortable field, results must be correctly sorted."""
|
||||
backend = get_backend()
|
||||
for i, asn in enumerate([30, 10, 20]):
|
||||
doc = Document.objects.create(
|
||||
title=f"sortable doc {i}",
|
||||
content="searchable content",
|
||||
checksum=f"TNS{i}",
|
||||
archive_serial_number=asn,
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
response = self.client.get(
|
||||
"/api/documents/?query=searchable&ordering=archive_serial_number",
|
||||
)
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
asns = [doc["archive_serial_number"] for doc in response.data["results"]]
|
||||
self.assertEqual(asns, [10, 20, 30])
|
||||
|
||||
response = self.client.get(
|
||||
"/api/documents/?query=searchable&ordering=-archive_serial_number",
|
||||
)
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
asns = [doc["archive_serial_number"] for doc in response.data["results"]]
|
||||
self.assertEqual(asns, [30, 20, 10])
|
||||
|
||||
def test_search_page_2_returns_correct_slice(self) -> None:
|
||||
"""Page 2 must return the second slice, not overlap with page 1."""
|
||||
backend = get_backend()
|
||||
for i in range(10):
|
||||
doc = Document.objects.create(
|
||||
title=f"doc {i}",
|
||||
content="paginated content",
|
||||
checksum=f"PG2{i}",
|
||||
archive_serial_number=i + 1,
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
|
||||
response = self.client.get(
|
||||
"/api/documents/?query=paginated&ordering=archive_serial_number&page=1&page_size=3",
|
||||
)
|
||||
page1_ids = [r["id"] for r in response.data["results"]]
|
||||
self.assertEqual(len(page1_ids), 3)
|
||||
|
||||
response = self.client.get(
|
||||
"/api/documents/?query=paginated&ordering=archive_serial_number&page=2&page_size=3",
|
||||
)
|
||||
page2_ids = [r["id"] for r in response.data["results"]]
|
||||
self.assertEqual(len(page2_ids), 3)
|
||||
|
||||
# No overlap between pages
|
||||
self.assertEqual(set(page1_ids) & set(page2_ids), set())
|
||||
# Page 2 ASNs are higher than page 1
|
||||
page1_asns = [
|
||||
Document.objects.get(pk=pk).archive_serial_number for pk in page1_ids
|
||||
]
|
||||
page2_asns = [
|
||||
Document.objects.get(pk=pk).archive_serial_number for pk in page2_ids
|
||||
]
|
||||
self.assertTrue(max(page1_asns) < min(page2_asns))
|
||||
|
||||
def test_search_all_field_contains_all_ids_when_paginated(self) -> None:
|
||||
"""The 'all' field must contain every matching ID, even when paginated."""
|
||||
backend = get_backend()
|
||||
doc_ids = []
|
||||
for i in range(10):
|
||||
doc = Document.objects.create(
|
||||
title=f"all field doc {i}",
|
||||
content="allfield content",
|
||||
checksum=f"AF{i}",
|
||||
)
|
||||
backend.add_or_update(doc)
|
||||
doc_ids.append(doc.pk)
|
||||
|
||||
response = self.client.get(
|
||||
"/api/documents/?query=allfield&page=1&page_size=3",
|
||||
headers={"Accept": "application/json; version=9"},
|
||||
)
|
||||
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
||||
self.assertEqual(len(response.data["results"]), 3)
|
||||
# "all" must contain ALL 10 matching IDs
|
||||
self.assertCountEqual(response.data["all"], doc_ids)
|
||||
|
||||
@mock.patch("documents.bulk_edit.bulk_update_documents")
|
||||
def test_global_search(self, m) -> None:
|
||||
"""
|
||||
|
||||
@@ -1020,7 +1020,7 @@ class TestTagBarcode(DirectoriesMixin, SampleDirMixin, GetReaderPluginMixin, Tes
|
||||
CONSUMER_TAG_BARCODE_SPLIT=True,
|
||||
CONSUMER_TAG_BARCODE_MAPPING={"TAG:(.*)": "\\g<1>"},
|
||||
CELERY_TASK_ALWAYS_EAGER=True,
|
||||
OCR_MODE="auto",
|
||||
OCR_MODE="skip",
|
||||
)
|
||||
def test_consume_barcode_file_tag_split_and_assignment(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -230,11 +230,7 @@ class TestConsumer(
|
||||
shutil.copy(src, dst)
|
||||
return dst
|
||||
|
||||
@override_settings(
|
||||
FILENAME_FORMAT=None,
|
||||
TIME_ZONE="America/Chicago",
|
||||
ARCHIVE_FILE_GENERATION="always",
|
||||
)
|
||||
@override_settings(FILENAME_FORMAT=None, TIME_ZONE="America/Chicago")
|
||||
def testNormalOperation(self) -> None:
|
||||
filename = self.get_test_file()
|
||||
|
||||
@@ -633,10 +629,7 @@ class TestConsumer(
|
||||
# Database empty
|
||||
self.assertEqual(Document.objects.all().count(), 0)
|
||||
|
||||
@override_settings(
|
||||
FILENAME_FORMAT="{correspondent}/{title}",
|
||||
ARCHIVE_FILE_GENERATION="always",
|
||||
)
|
||||
@override_settings(FILENAME_FORMAT="{correspondent}/{title}")
|
||||
def testFilenameHandling(self) -> None:
|
||||
with self.get_consumer(
|
||||
self.get_test_file(),
|
||||
@@ -653,7 +646,7 @@ class TestConsumer(
|
||||
self._assert_first_last_send_progress()
|
||||
|
||||
@mock.patch("documents.consumer.generate_unique_filename")
|
||||
@override_settings(FILENAME_FORMAT="{pk}", ARCHIVE_FILE_GENERATION="always")
|
||||
@override_settings(FILENAME_FORMAT="{pk}")
|
||||
def testFilenameHandlingFallsBackWhenGeneratedPathExceedsDbLimit(self, m):
|
||||
m.side_effect = lambda doc, archive_filename=False: Path(
|
||||
("a" * 1100 + ".pdf") if not archive_filename else ("b" * 1100 + ".pdf"),
|
||||
@@ -680,10 +673,7 @@ class TestConsumer(
|
||||
|
||||
self._assert_first_last_send_progress()
|
||||
|
||||
@override_settings(
|
||||
FILENAME_FORMAT="{correspondent}/{title}",
|
||||
ARCHIVE_FILE_GENERATION="always",
|
||||
)
|
||||
@override_settings(FILENAME_FORMAT="{correspondent}/{title}")
|
||||
@mock.patch("documents.signals.handlers.generate_unique_filename")
|
||||
def testFilenameHandlingUnstableFormat(self, m) -> None:
|
||||
filenames = ["this", "that", "now this", "i cannot decide"]
|
||||
@@ -1031,7 +1021,7 @@ class TestConsumer(
|
||||
self.assertEqual(Document.objects.count(), 2)
|
||||
self._assert_first_last_send_progress()
|
||||
|
||||
@override_settings(FILENAME_FORMAT="{title}", ARCHIVE_FILE_GENERATION="always")
|
||||
@override_settings(FILENAME_FORMAT="{title}")
|
||||
@mock.patch("documents.consumer.get_parser_registry")
|
||||
def test_similar_filenames(self, m) -> None:
|
||||
shutil.copy(
|
||||
@@ -1142,7 +1132,6 @@ class TestConsumer(
|
||||
mock_mail_parser_parse.assert_called_once_with(
|
||||
consumer.working_copy,
|
||||
"message/rfc822",
|
||||
produce_archive=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -1290,14 +1279,7 @@ class PreConsumeTestCase(DirectoriesMixin, GetConsumerMixin, TestCase):
|
||||
def test_no_pre_consume_script(self, m) -> None:
|
||||
with self.get_consumer(self.test_file) as c:
|
||||
c.run()
|
||||
# Verify no pre-consume script subprocess was invoked
|
||||
# (run_subprocess may still be called by _extract_text_for_archive_check)
|
||||
script_calls = [
|
||||
call
|
||||
for call in m.call_args_list
|
||||
if call.args and call.args[0] and call.args[0][0] not in ("pdftotext",)
|
||||
]
|
||||
self.assertEqual(script_calls, [])
|
||||
m.assert_not_called()
|
||||
|
||||
@mock.patch("documents.consumer.run_subprocess")
|
||||
@override_settings(PRE_CONSUME_SCRIPT="does-not-exist")
|
||||
@@ -1313,16 +1295,9 @@ class PreConsumeTestCase(DirectoriesMixin, GetConsumerMixin, TestCase):
|
||||
with self.get_consumer(self.test_file) as c:
|
||||
c.run()
|
||||
|
||||
self.assertTrue(m.called)
|
||||
m.assert_called_once()
|
||||
|
||||
# Find the call that invoked the pre-consume script
|
||||
# (run_subprocess may also be called by _extract_text_for_archive_check)
|
||||
script_call = next(
|
||||
call
|
||||
for call in m.call_args_list
|
||||
if call.args and call.args[0] and call.args[0][0] == script.name
|
||||
)
|
||||
args, _ = script_call
|
||||
args, _ = m.call_args
|
||||
|
||||
command = args[0]
|
||||
environment = args[1]
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
"""Tests for should_produce_archive()."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from documents.consumer import should_produce_archive
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
|
||||
def _parser_instance(
|
||||
*,
|
||||
can_produce: bool = True,
|
||||
requires_rendition: bool = False,
|
||||
) -> MagicMock:
|
||||
"""Return a mock parser instance with the given capability flags."""
|
||||
instance = MagicMock()
|
||||
instance.can_produce_archive = can_produce
|
||||
instance.requires_pdf_rendition = requires_rendition
|
||||
return instance
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def null_app_config(mocker) -> MagicMock:
|
||||
"""Mock ApplicationConfiguration with all fields None → falls back to Django settings."""
|
||||
return mocker.MagicMock(
|
||||
output_type=None,
|
||||
pages=None,
|
||||
language=None,
|
||||
mode=None,
|
||||
archive_file_generation=None,
|
||||
image_dpi=None,
|
||||
unpaper_clean=None,
|
||||
deskew=None,
|
||||
rotate_pages=None,
|
||||
rotate_pages_threshold=None,
|
||||
max_image_pixels=None,
|
||||
color_conversion_strategy=None,
|
||||
user_args=None,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def patch_app_config(mocker, null_app_config):
|
||||
"""Patch BaseConfig._get_config_instance for all tests in this module."""
|
||||
mocker.patch(
|
||||
"paperless.config.BaseConfig._get_config_instance",
|
||||
return_value=null_app_config,
|
||||
)
|
||||
|
||||
|
||||
class TestShouldProduceArchive:
|
||||
@pytest.mark.parametrize(
|
||||
("generation", "can_produce", "requires_rendition", "mime", "expected"),
|
||||
[
|
||||
pytest.param(
|
||||
"never",
|
||||
True,
|
||||
False,
|
||||
"application/pdf",
|
||||
False,
|
||||
id="never-returns-false",
|
||||
),
|
||||
pytest.param(
|
||||
"always",
|
||||
True,
|
||||
False,
|
||||
"application/pdf",
|
||||
True,
|
||||
id="always-returns-true",
|
||||
),
|
||||
pytest.param(
|
||||
"never",
|
||||
True,
|
||||
True,
|
||||
"application/pdf",
|
||||
True,
|
||||
id="requires-rendition-overrides-never",
|
||||
),
|
||||
pytest.param(
|
||||
"always",
|
||||
False,
|
||||
False,
|
||||
"text/plain",
|
||||
False,
|
||||
id="cannot-produce-overrides-always",
|
||||
),
|
||||
pytest.param(
|
||||
"always",
|
||||
False,
|
||||
True,
|
||||
"application/pdf",
|
||||
True,
|
||||
id="requires-rendition-wins-even-if-cannot-produce",
|
||||
),
|
||||
pytest.param(
|
||||
"auto",
|
||||
True,
|
||||
False,
|
||||
"image/tiff",
|
||||
True,
|
||||
id="auto-image-returns-true",
|
||||
),
|
||||
pytest.param(
|
||||
"auto",
|
||||
True,
|
||||
False,
|
||||
"message/rfc822",
|
||||
False,
|
||||
id="auto-non-pdf-non-image-returns-false",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_generation_setting(
|
||||
self,
|
||||
settings,
|
||||
generation: str,
|
||||
can_produce: bool, # noqa: FBT001
|
||||
requires_rendition: bool, # noqa: FBT001
|
||||
mime: str,
|
||||
expected: bool, # noqa: FBT001
|
||||
) -> None:
|
||||
settings.ARCHIVE_FILE_GENERATION = generation
|
||||
parser = _parser_instance(
|
||||
can_produce=can_produce,
|
||||
requires_rendition=requires_rendition,
|
||||
)
|
||||
assert should_produce_archive(parser, mime, Path("/tmp/doc")) is expected
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("extracted_text", "expected"),
|
||||
[
|
||||
pytest.param(
|
||||
"This is a born-digital PDF with lots of text content. " * 10,
|
||||
False,
|
||||
id="born-digital-long-text-skips-archive",
|
||||
),
|
||||
pytest.param(None, True, id="no-text-scanned-produces-archive"),
|
||||
pytest.param("tiny", True, id="short-text-treated-as-scanned"),
|
||||
],
|
||||
)
|
||||
def test_auto_pdf_archive_decision(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
settings,
|
||||
extracted_text: str | None,
|
||||
expected: bool, # noqa: FBT001
|
||||
) -> None:
|
||||
settings.ARCHIVE_FILE_GENERATION = "auto"
|
||||
mocker.patch("documents.consumer.is_tagged_pdf", return_value=False)
|
||||
mocker.patch("documents.consumer.extract_pdf_text", return_value=extracted_text)
|
||||
parser = _parser_instance(can_produce=True, requires_rendition=False)
|
||||
assert (
|
||||
should_produce_archive(parser, "application/pdf", Path("/tmp/doc.pdf"))
|
||||
is expected
|
||||
)
|
||||
|
||||
def test_tagged_pdf_skips_archive_in_auto_mode(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
settings,
|
||||
) -> None:
|
||||
"""Tagged PDFs (e.g. Word exports) are treated as born-digital regardless of text length."""
|
||||
settings.ARCHIVE_FILE_GENERATION = "auto"
|
||||
mocker.patch("documents.consumer.is_tagged_pdf", return_value=True)
|
||||
parser = _parser_instance(can_produce=True, requires_rendition=False)
|
||||
assert (
|
||||
should_produce_archive(parser, "application/pdf", Path("/tmp/doc.pdf"))
|
||||
is False
|
||||
)
|
||||
|
||||
def test_tagged_pdf_does_not_call_pdftotext(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
settings,
|
||||
) -> None:
|
||||
"""When a PDF is tagged, pdftotext is not invoked (fast path)."""
|
||||
settings.ARCHIVE_FILE_GENERATION = "auto"
|
||||
mocker.patch("documents.consumer.is_tagged_pdf", return_value=True)
|
||||
mock_extract = mocker.patch("documents.consumer.extract_pdf_text")
|
||||
parser = _parser_instance(can_produce=True, requires_rendition=False)
|
||||
should_produce_archive(parser, "application/pdf", Path("/tmp/doc.pdf"))
|
||||
mock_extract.assert_not_called()
|
||||
@@ -27,10 +27,7 @@ sample_file: Path = Path(__file__).parent / "samples" / "simple.pdf"
|
||||
|
||||
|
||||
@pytest.mark.management
|
||||
@override_settings(
|
||||
FILENAME_FORMAT="{correspondent}/{title}",
|
||||
ARCHIVE_FILE_GENERATION="always",
|
||||
)
|
||||
@override_settings(FILENAME_FORMAT="{correspondent}/{title}")
|
||||
class TestArchiver(DirectoriesMixin, FileSystemAssertsMixin, TestCase):
|
||||
def make_models(self):
|
||||
return Document.objects.create(
|
||||
|
||||
@@ -213,7 +213,6 @@ class TestEmptyTrashTask(DirectoriesMixin, FileSystemAssertsMixin, TestCase):
|
||||
self.assertEqual(Document.global_objects.count(), 0)
|
||||
|
||||
|
||||
@override_settings(ARCHIVE_FILE_GENERATION="always")
|
||||
class TestUpdateContent(DirectoriesMixin, TestCase):
|
||||
def test_update_content_maybe_archive_file(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -2058,13 +2058,14 @@ class UnifiedSearchViewSet(DocumentViewSet):
|
||||
if not self._is_search_request():
|
||||
return super().list(request)
|
||||
|
||||
from documents.search import SearchHit
|
||||
from documents.search import SearchMode
|
||||
from documents.search import TantivyBackend
|
||||
from documents.search import TantivyRelevanceList
|
||||
from documents.search import get_backend
|
||||
|
||||
try:
|
||||
backend = get_backend()
|
||||
# ORM-filtered queryset: permissions + field filters + ordering (DRF backends applied)
|
||||
filtered_qs = self.filter_queryset(self.get_queryset())
|
||||
|
||||
user = None if request.user.is_superuser else request.user
|
||||
@@ -2079,6 +2080,28 @@ class UnifiedSearchViewSet(DocumentViewSet):
|
||||
},
|
||||
)
|
||||
|
||||
# Parse ordering param
|
||||
ordering_param = request.query_params.get("ordering", "")
|
||||
sort_reverse = ordering_param.startswith("-")
|
||||
sort_field_name = ordering_param.lstrip("-") or None
|
||||
|
||||
use_tantivy_sort = (
|
||||
sort_field_name in TantivyBackend.SORTABLE_FIELDS
|
||||
or sort_field_name is None
|
||||
)
|
||||
|
||||
# Compute the DRF page so we can tell Tantivy which slice to highlight
|
||||
try:
|
||||
requested_page = int(request.query_params.get("page", 1))
|
||||
except (TypeError, ValueError):
|
||||
requested_page = 1
|
||||
try:
|
||||
requested_page_size = int(
|
||||
request.query_params.get("page_size", self.paginator.page_size),
|
||||
)
|
||||
except (TypeError, ValueError):
|
||||
requested_page_size = self.paginator.page_size
|
||||
|
||||
if (
|
||||
"text" in request.query_params
|
||||
or "title_search" in request.query_params
|
||||
@@ -2093,17 +2116,44 @@ class UnifiedSearchViewSet(DocumentViewSet):
|
||||
else:
|
||||
search_mode = SearchMode.QUERY
|
||||
query_str = request.query_params["query"]
|
||||
results = backend.search(
|
||||
|
||||
# Step 1: Get all matching IDs (lightweight, no highlights)
|
||||
all_ids = backend.search_ids(
|
||||
query_str,
|
||||
user=user,
|
||||
page=1,
|
||||
page_size=10000,
|
||||
sort_field=None,
|
||||
sort_reverse=False,
|
||||
sort_field=sort_field_name if use_tantivy_sort else None,
|
||||
sort_reverse=sort_reverse,
|
||||
search_mode=search_mode,
|
||||
)
|
||||
|
||||
# Step 2: Intersect with ORM-visible IDs (field filters)
|
||||
orm_ids = set(filtered_qs.values_list("pk", flat=True))
|
||||
|
||||
if use_tantivy_sort:
|
||||
# Fast path: Tantivy already ordered the IDs
|
||||
ordered_ids = [doc_id for doc_id in all_ids if doc_id in orm_ids]
|
||||
else:
|
||||
# Slow path: ORM must re-sort
|
||||
id_set = set(all_ids) & orm_ids
|
||||
ordered_ids = list(
|
||||
filtered_qs.filter(id__in=id_set).values_list(
|
||||
"pk",
|
||||
flat=True,
|
||||
),
|
||||
)
|
||||
|
||||
# Step 3: Fetch highlights for the displayed page only
|
||||
page_offset = (requested_page - 1) * requested_page_size
|
||||
page_ids = ordered_ids[page_offset : page_offset + requested_page_size]
|
||||
|
||||
page_hits = backend.highlight_hits(
|
||||
query_str,
|
||||
page_ids,
|
||||
search_mode=search_mode,
|
||||
)
|
||||
|
||||
else:
|
||||
# more_like_id — validate permission on the seed document first
|
||||
# more_like_id path
|
||||
try:
|
||||
more_like_doc_id = int(request.query_params["more_like_id"])
|
||||
more_like_doc = Document.objects.select_related("owner").get(
|
||||
@@ -2119,33 +2169,24 @@ class UnifiedSearchViewSet(DocumentViewSet):
|
||||
):
|
||||
raise PermissionDenied(_("Insufficient permissions."))
|
||||
|
||||
results = backend.more_like_this(
|
||||
# Step 1: Get all matching IDs (lightweight)
|
||||
all_ids = backend.more_like_this_ids(
|
||||
more_like_doc_id,
|
||||
user=user,
|
||||
page=1,
|
||||
page_size=10000,
|
||||
)
|
||||
|
||||
hits_by_id = {h["id"]: h for h in results.hits}
|
||||
|
||||
# Determine sort order: no ordering param -> Tantivy relevance; otherwise -> ORM order
|
||||
ordering_param = request.query_params.get("ordering", "").lstrip("-")
|
||||
if not ordering_param:
|
||||
# Preserve Tantivy relevance order; intersect with ORM-visible IDs
|
||||
orm_ids = set(filtered_qs.values_list("pk", flat=True))
|
||||
ordered_hits = [h for h in results.hits if h["id"] in orm_ids]
|
||||
else:
|
||||
# Use ORM ordering (already applied by DocumentsOrderingFilter)
|
||||
hit_ids = set(hits_by_id.keys())
|
||||
orm_ordered_ids = filtered_qs.filter(id__in=hit_ids).values_list(
|
||||
"pk",
|
||||
flat=True,
|
||||
)
|
||||
ordered_hits = [
|
||||
hits_by_id[pk] for pk in orm_ordered_ids if pk in hits_by_id
|
||||
ordered_ids = [doc_id for doc_id in all_ids if doc_id in orm_ids]
|
||||
|
||||
# Step 2: Build hit dicts for the displayed page
|
||||
# MLT has no text query, so no highlights needed
|
||||
page_offset = (requested_page - 1) * requested_page_size
|
||||
page_ids = ordered_ids[page_offset : page_offset + requested_page_size]
|
||||
page_hits = [
|
||||
SearchHit(id=doc_id, score=0.0, rank=rank, highlights={})
|
||||
for rank, doc_id in enumerate(page_ids, start=page_offset + 1)
|
||||
]
|
||||
|
||||
rl = TantivyRelevanceList(ordered_hits)
|
||||
rl = TantivyRelevanceList(ordered_ids, page_hits, page_offset)
|
||||
page = self.paginate_queryset(rl)
|
||||
|
||||
if page is not None:
|
||||
@@ -2155,15 +2196,14 @@ class UnifiedSearchViewSet(DocumentViewSet):
|
||||
if get_boolean(
|
||||
str(request.query_params.get("include_selection_data", "false")),
|
||||
):
|
||||
all_ids = [h["id"] for h in ordered_hits]
|
||||
response.data["selection_data"] = (
|
||||
self._get_selection_data_for_queryset(
|
||||
filtered_qs.filter(pk__in=all_ids),
|
||||
filtered_qs.filter(pk__in=ordered_ids),
|
||||
)
|
||||
)
|
||||
return response
|
||||
|
||||
serializer = self.get_serializer(ordered_hits, many=True)
|
||||
serializer = self.get_serializer(page_hits, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
except NotFound:
|
||||
|
||||
@@ -5,7 +5,6 @@ import shutil
|
||||
import stat
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.checks import Error
|
||||
@@ -23,7 +22,7 @@ writeable_hint = (
|
||||
)
|
||||
|
||||
|
||||
def path_check(var: str, directory: Path) -> list[Error]:
|
||||
def path_check(var, directory: Path) -> list[Error]:
|
||||
messages: list[Error] = []
|
||||
if directory:
|
||||
if not directory.is_dir():
|
||||
@@ -60,7 +59,7 @@ def path_check(var: str, directory: Path) -> list[Error]:
|
||||
|
||||
|
||||
@register()
|
||||
def paths_check(app_configs: Any, **kwargs: Any) -> list[Error]:
|
||||
def paths_check(app_configs, **kwargs) -> list[Error]:
|
||||
"""
|
||||
Check the various paths for existence, readability and writeability
|
||||
"""
|
||||
@@ -74,7 +73,7 @@ def paths_check(app_configs: Any, **kwargs: Any) -> list[Error]:
|
||||
|
||||
|
||||
@register()
|
||||
def binaries_check(app_configs: Any, **kwargs: Any) -> list[Error]:
|
||||
def binaries_check(app_configs, **kwargs):
|
||||
"""
|
||||
Paperless requires the existence of a few binaries, so we do some checks
|
||||
for those here.
|
||||
@@ -94,7 +93,7 @@ def binaries_check(app_configs: Any, **kwargs: Any) -> list[Error]:
|
||||
|
||||
|
||||
@register()
|
||||
def debug_mode_check(app_configs: Any, **kwargs: Any) -> list[Warning]:
|
||||
def debug_mode_check(app_configs, **kwargs):
|
||||
if settings.DEBUG:
|
||||
return [
|
||||
Warning(
|
||||
@@ -110,7 +109,7 @@ def debug_mode_check(app_configs: Any, **kwargs: Any) -> list[Warning]:
|
||||
|
||||
|
||||
@register()
|
||||
def settings_values_check(app_configs: Any, **kwargs: Any) -> list[Error | Warning]:
|
||||
def settings_values_check(app_configs, **kwargs):
|
||||
"""
|
||||
Validates at least some of the user provided settings
|
||||
"""
|
||||
@@ -133,14 +132,23 @@ def settings_values_check(app_configs: Any, **kwargs: Any) -> list[Error | Warni
|
||||
Error(f'OCR output type "{settings.OCR_OUTPUT_TYPE}" is not valid'),
|
||||
)
|
||||
|
||||
if settings.OCR_MODE not in {"auto", "force", "redo", "off"}:
|
||||
if settings.OCR_MODE not in {"force", "skip", "redo", "skip_noarchive"}:
|
||||
msgs.append(Error(f'OCR output mode "{settings.OCR_MODE}" is not valid'))
|
||||
|
||||
if settings.ARCHIVE_FILE_GENERATION not in {"auto", "always", "never"}:
|
||||
if settings.OCR_MODE == "skip_noarchive":
|
||||
msgs.append(
|
||||
Warning(
|
||||
'OCR output mode "skip_noarchive" is deprecated and will be '
|
||||
"removed in a future version. Please use "
|
||||
"PAPERLESS_OCR_SKIP_ARCHIVE_FILE instead.",
|
||||
),
|
||||
)
|
||||
|
||||
if settings.OCR_SKIP_ARCHIVE_FILE not in {"never", "with_text", "always"}:
|
||||
msgs.append(
|
||||
Error(
|
||||
"PAPERLESS_ARCHIVE_FILE_GENERATION setting "
|
||||
f'"{settings.ARCHIVE_FILE_GENERATION}" is not valid',
|
||||
"OCR_SKIP_ARCHIVE_FILE setting "
|
||||
f'"{settings.OCR_SKIP_ARCHIVE_FILE}" is not valid',
|
||||
),
|
||||
)
|
||||
|
||||
@@ -183,7 +191,7 @@ def settings_values_check(app_configs: Any, **kwargs: Any) -> list[Error | Warni
|
||||
|
||||
|
||||
@register()
|
||||
def audit_log_check(app_configs: Any, **kwargs: Any) -> list[Error]:
|
||||
def audit_log_check(app_configs, **kwargs):
|
||||
db_conn = connections["default"]
|
||||
all_tables = db_conn.introspection.table_names()
|
||||
result = []
|
||||
@@ -295,42 +303,7 @@ def check_deprecated_db_settings(
|
||||
|
||||
|
||||
@register()
|
||||
def check_deprecated_v2_ocr_env_vars(
|
||||
app_configs: object,
|
||||
**kwargs: object,
|
||||
) -> list[Warning]:
|
||||
"""Warn when deprecated v2 OCR environment variables are set.
|
||||
|
||||
Users upgrading from v2 may still have these in their environment or
|
||||
config files, where they are now silently ignored.
|
||||
"""
|
||||
warnings: list[Warning] = []
|
||||
|
||||
if os.environ.get("PAPERLESS_OCR_SKIP_ARCHIVE_FILE"):
|
||||
warnings.append(
|
||||
Warning(
|
||||
"PAPERLESS_OCR_SKIP_ARCHIVE_FILE is set but has no effect. "
|
||||
"Use PAPERLESS_ARCHIVE_FILE_GENERATION=never/always/auto instead.",
|
||||
id="paperless.W002",
|
||||
),
|
||||
)
|
||||
|
||||
ocr_mode = os.environ.get("PAPERLESS_OCR_MODE", "")
|
||||
if ocr_mode in {"skip", "skip_noarchive"}:
|
||||
warnings.append(
|
||||
Warning(
|
||||
f"PAPERLESS_OCR_MODE={ocr_mode!r} is not a valid value. "
|
||||
f"Use PAPERLESS_OCR_MODE=auto (and PAPERLESS_ARCHIVE_FILE_GENERATION=never "
|
||||
f"if you used skip_noarchive) instead.",
|
||||
id="paperless.W003",
|
||||
),
|
||||
)
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
@register()
|
||||
def check_remote_parser_configured(app_configs: Any, **kwargs: Any) -> list[Error]:
|
||||
def check_remote_parser_configured(app_configs, **kwargs) -> list[Error]:
|
||||
if settings.REMOTE_OCR_ENGINE == "azureai" and not (
|
||||
settings.REMOTE_OCR_ENDPOINT and settings.REMOTE_OCR_API_KEY
|
||||
):
|
||||
@@ -356,7 +329,7 @@ def get_tesseract_langs():
|
||||
|
||||
|
||||
@register()
|
||||
def check_default_language_available(app_configs: Any, **kwargs: Any) -> list[Error]:
|
||||
def check_default_language_available(app_configs, **kwargs):
|
||||
errs = []
|
||||
|
||||
if not settings.OCR_LANGUAGE:
|
||||
|
||||
@@ -4,11 +4,6 @@ import json
|
||||
from django.conf import settings
|
||||
|
||||
from paperless.models import ApplicationConfiguration
|
||||
from paperless.models import ArchiveFileGenerationChoices
|
||||
from paperless.models import CleanChoices
|
||||
from paperless.models import ColorConvertChoices
|
||||
from paperless.models import ModeChoices
|
||||
from paperless.models import OutputTypeChoices
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@@ -33,7 +28,7 @@ class OutputTypeConfig(BaseConfig):
|
||||
Almost all parsers care about the chosen PDF output format
|
||||
"""
|
||||
|
||||
output_type: OutputTypeChoices = dataclasses.field(init=False)
|
||||
output_type: str = dataclasses.field(init=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
app_config = self._get_config_instance()
|
||||
@@ -50,17 +45,15 @@ class OcrConfig(OutputTypeConfig):
|
||||
|
||||
pages: int | None = dataclasses.field(init=False)
|
||||
language: str = dataclasses.field(init=False)
|
||||
mode: ModeChoices = dataclasses.field(init=False)
|
||||
archive_file_generation: ArchiveFileGenerationChoices = dataclasses.field(
|
||||
init=False,
|
||||
)
|
||||
mode: str = dataclasses.field(init=False)
|
||||
skip_archive_file: str = dataclasses.field(init=False)
|
||||
image_dpi: int | None = dataclasses.field(init=False)
|
||||
clean: CleanChoices = dataclasses.field(init=False)
|
||||
clean: str = dataclasses.field(init=False)
|
||||
deskew: bool = dataclasses.field(init=False)
|
||||
rotate: bool = dataclasses.field(init=False)
|
||||
rotate_threshold: float = dataclasses.field(init=False)
|
||||
max_image_pixel: float | None = dataclasses.field(init=False)
|
||||
color_conversion_strategy: ColorConvertChoices = dataclasses.field(init=False)
|
||||
color_conversion_strategy: str = dataclasses.field(init=False)
|
||||
user_args: dict[str, str] | None = dataclasses.field(init=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
@@ -71,8 +64,8 @@ class OcrConfig(OutputTypeConfig):
|
||||
self.pages = app_config.pages or settings.OCR_PAGES
|
||||
self.language = app_config.language or settings.OCR_LANGUAGE
|
||||
self.mode = app_config.mode or settings.OCR_MODE
|
||||
self.archive_file_generation = (
|
||||
app_config.archive_file_generation or settings.ARCHIVE_FILE_GENERATION
|
||||
self.skip_archive_file = (
|
||||
app_config.skip_archive_file or settings.OCR_SKIP_ARCHIVE_FILE
|
||||
)
|
||||
self.image_dpi = app_config.image_dpi or settings.OCR_IMAGE_DPI
|
||||
self.clean = app_config.unpaper_clean or settings.OCR_CLEAN
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
# Generated by Django 5.2.12 on 2026-03-26 20:31
|
||||
|
||||
from django.db import migrations
|
||||
from django.db import models
|
||||
|
||||
_MODE_MAP = {
|
||||
"skip": "auto",
|
||||
"redo": "redo",
|
||||
"force": "force",
|
||||
"skip_noarchive": "auto",
|
||||
}
|
||||
|
||||
_ARCHIVE_MAP = {
|
||||
# never skip -> always generate
|
||||
"never": "always",
|
||||
# skip when text present -> auto
|
||||
"with_text": "auto",
|
||||
# always skip -> never generate
|
||||
"always": "never",
|
||||
}
|
||||
|
||||
|
||||
def migrate_old_values(apps, schema_editor):
|
||||
ApplicationConfiguration = apps.get_model("paperless", "ApplicationConfiguration")
|
||||
for config in ApplicationConfiguration.objects.all():
|
||||
old_mode = config.mode
|
||||
old_skip = config.skip_archive_file
|
||||
|
||||
# Map the old mode value
|
||||
if old_mode in _MODE_MAP:
|
||||
config.mode = _MODE_MAP[old_mode]
|
||||
|
||||
# Map skip_archive_file -> archive_file_generation
|
||||
if old_skip in _ARCHIVE_MAP:
|
||||
config.archive_file_generation = _ARCHIVE_MAP[old_skip]
|
||||
|
||||
# skip_noarchive implied no archive file; set that if the user
|
||||
# didn't already have an explicit skip_archive_file preference
|
||||
if old_mode == "skip_noarchive" and old_skip is None:
|
||||
config.archive_file_generation = "never"
|
||||
|
||||
config.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("paperless", "0007_optimize_integer_field_sizes"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# 1. Update mode choices in-place (old values still in the column)
|
||||
migrations.AlterField(
|
||||
model_name="applicationconfiguration",
|
||||
name="mode",
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
("auto", "auto"),
|
||||
("force", "force"),
|
||||
("redo", "redo"),
|
||||
("off", "off"),
|
||||
],
|
||||
max_length=16,
|
||||
null=True,
|
||||
verbose_name="Sets the OCR mode",
|
||||
),
|
||||
),
|
||||
# 2. Add the new field
|
||||
migrations.AddField(
|
||||
model_name="applicationconfiguration",
|
||||
name="archive_file_generation",
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[("auto", "auto"), ("always", "always"), ("never", "never")],
|
||||
max_length=8,
|
||||
null=True,
|
||||
verbose_name="Controls archive file generation",
|
||||
),
|
||||
),
|
||||
# 3. Migrate data from old values to new
|
||||
migrations.RunPython(
|
||||
migrate_old_values,
|
||||
migrations.RunPython.noop,
|
||||
),
|
||||
# 4. Drop the old field
|
||||
migrations.RemoveField(
|
||||
model_name="applicationconfiguration",
|
||||
name="skip_archive_file",
|
||||
),
|
||||
]
|
||||
@@ -36,20 +36,20 @@ class ModeChoices(models.TextChoices):
|
||||
and our own custom setting
|
||||
"""
|
||||
|
||||
AUTO = ("auto", _("auto"))
|
||||
FORCE = ("force", _("force"))
|
||||
SKIP = ("skip", _("skip"))
|
||||
REDO = ("redo", _("redo"))
|
||||
OFF = ("off", _("off"))
|
||||
FORCE = ("force", _("force"))
|
||||
SKIP_NO_ARCHIVE = ("skip_noarchive", _("skip_noarchive"))
|
||||
|
||||
|
||||
class ArchiveFileGenerationChoices(models.TextChoices):
|
||||
class ArchiveFileChoices(models.TextChoices):
|
||||
"""
|
||||
Settings to control creation of an archive PDF file
|
||||
"""
|
||||
|
||||
AUTO = ("auto", _("auto"))
|
||||
ALWAYS = ("always", _("always"))
|
||||
NEVER = ("never", _("never"))
|
||||
WITH_TEXT = ("with_text", _("with_text"))
|
||||
ALWAYS = ("always", _("always"))
|
||||
|
||||
|
||||
class CleanChoices(models.TextChoices):
|
||||
@@ -126,12 +126,12 @@ class ApplicationConfiguration(AbstractSingletonModel):
|
||||
choices=ModeChoices.choices,
|
||||
)
|
||||
|
||||
archive_file_generation = models.CharField(
|
||||
verbose_name=_("Controls archive file generation"),
|
||||
skip_archive_file = models.CharField(
|
||||
verbose_name=_("Controls the generation of an archive file"),
|
||||
null=True,
|
||||
blank=True,
|
||||
max_length=8,
|
||||
choices=ArchiveFileGenerationChoices.choices,
|
||||
max_length=16,
|
||||
choices=ArchiveFileChoices.choices,
|
||||
)
|
||||
|
||||
image_dpi = models.PositiveSmallIntegerField(
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.resources
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
@@ -9,8 +8,6 @@ import tempfile
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Any
|
||||
from typing import Final
|
||||
from typing import NoReturn
|
||||
from typing import Self
|
||||
|
||||
from django.conf import settings
|
||||
@@ -21,11 +18,9 @@ from documents.parsers import make_thumbnail_from_pdf
|
||||
from documents.utils import maybe_override_pixel_limit
|
||||
from documents.utils import run_subprocess
|
||||
from paperless.config import OcrConfig
|
||||
from paperless.models import ArchiveFileChoices
|
||||
from paperless.models import CleanChoices
|
||||
from paperless.models import ModeChoices
|
||||
from paperless.parsers.utils import PDF_TEXT_MIN_LENGTH
|
||||
from paperless.parsers.utils import extract_pdf_text
|
||||
from paperless.parsers.utils import is_tagged_pdf
|
||||
from paperless.parsers.utils import read_file_handle_unicode_errors
|
||||
from paperless.version import __full_version_str__
|
||||
|
||||
@@ -38,11 +33,7 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger("paperless.parsing.tesseract")
|
||||
|
||||
_SRGB_ICC_DATA: Final[bytes] = (
|
||||
importlib.resources.files("ocrmypdf.data").joinpath("sRGB.icc").read_bytes()
|
||||
)
|
||||
|
||||
_SUPPORTED_MIME_TYPES: Final[dict[str, str]] = {
|
||||
_SUPPORTED_MIME_TYPES: dict[str, str] = {
|
||||
"application/pdf": ".pdf",
|
||||
"image/jpeg": ".jpg",
|
||||
"image/png": ".png",
|
||||
@@ -108,7 +99,7 @@ class RasterisedDocumentParser:
|
||||
# Lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def __init__(self, logging_group: object | None = None) -> None:
|
||||
def __init__(self, logging_group: object = None) -> None:
|
||||
settings.SCRATCH_DIR.mkdir(parents=True, exist_ok=True)
|
||||
self.tempdir = Path(
|
||||
tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR),
|
||||
@@ -242,7 +233,7 @@ class RasterisedDocumentParser:
|
||||
if (
|
||||
sidecar_file is not None
|
||||
and sidecar_file.is_file()
|
||||
and self.settings.mode != ModeChoices.REDO
|
||||
and self.settings.mode != "redo"
|
||||
):
|
||||
text = read_file_handle_unicode_errors(sidecar_file)
|
||||
|
||||
@@ -259,7 +250,36 @@ class RasterisedDocumentParser:
|
||||
if not Path(pdf_file).is_file():
|
||||
return None
|
||||
|
||||
return post_process_text(extract_pdf_text(Path(pdf_file), log=self.log))
|
||||
try:
|
||||
text = None
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w+",
|
||||
dir=self.tempdir,
|
||||
) as tmp:
|
||||
run_subprocess(
|
||||
[
|
||||
"pdftotext",
|
||||
"-q",
|
||||
"-layout",
|
||||
"-enc",
|
||||
"UTF-8",
|
||||
str(pdf_file),
|
||||
tmp.name,
|
||||
],
|
||||
logger=self.log,
|
||||
)
|
||||
text = read_file_handle_unicode_errors(Path(tmp.name))
|
||||
|
||||
return post_process_text(text)
|
||||
|
||||
except Exception:
|
||||
# If pdftotext fails, fall back to OCR.
|
||||
self.log.warning(
|
||||
"Error while getting text from PDF document with pdftotext",
|
||||
exc_info=True,
|
||||
)
|
||||
# probably not a PDF file.
|
||||
return None
|
||||
|
||||
def construct_ocrmypdf_parameters(
|
||||
self,
|
||||
@@ -269,7 +289,6 @@ class RasterisedDocumentParser:
|
||||
sidecar_file: Path,
|
||||
*,
|
||||
safe_fallback: bool = False,
|
||||
skip_text: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
ocrmypdf_args: dict[str, Any] = {
|
||||
"input_file_or_options": input_file,
|
||||
@@ -288,14 +307,15 @@ class RasterisedDocumentParser:
|
||||
self.settings.color_conversion_strategy
|
||||
)
|
||||
|
||||
if safe_fallback or self.settings.mode == ModeChoices.FORCE:
|
||||
if self.settings.mode == ModeChoices.FORCE or safe_fallback:
|
||||
ocrmypdf_args["force_ocr"] = True
|
||||
elif self.settings.mode in {
|
||||
ModeChoices.SKIP,
|
||||
ModeChoices.SKIP_NO_ARCHIVE,
|
||||
}:
|
||||
ocrmypdf_args["skip_text"] = True
|
||||
elif self.settings.mode == ModeChoices.REDO:
|
||||
ocrmypdf_args["redo_ocr"] = True
|
||||
elif skip_text or self.settings.mode == ModeChoices.OFF:
|
||||
ocrmypdf_args["skip_text"] = True
|
||||
elif self.settings.mode == ModeChoices.AUTO:
|
||||
pass # no extra flag: normal OCR (text not found case)
|
||||
else: # pragma: no cover
|
||||
raise ParseError(f"Invalid ocr mode: {self.settings.mode}")
|
||||
|
||||
@@ -380,74 +400,6 @@ class RasterisedDocumentParser:
|
||||
|
||||
return ocrmypdf_args
|
||||
|
||||
def _convert_image_to_pdfa(self, document_path: Path) -> Path:
|
||||
"""Convert an image to a PDF/A-2b file without invoking the OCR engine.
|
||||
|
||||
Uses img2pdf for the initial image->PDF wrapping, then pikepdf to stamp
|
||||
PDF/A-2b conformance metadata.
|
||||
|
||||
No Tesseract and no Ghostscript are invoked.
|
||||
"""
|
||||
import img2pdf
|
||||
import pikepdf
|
||||
|
||||
plain_pdf_path = Path(self.tempdir) / "image_plain.pdf"
|
||||
try:
|
||||
convert_kwargs: dict = {}
|
||||
if self.settings.image_dpi is not None:
|
||||
convert_kwargs["layout_fun"] = img2pdf.get_fixed_dpi_layout_fun(
|
||||
(self.settings.image_dpi, self.settings.image_dpi),
|
||||
)
|
||||
plain_pdf_path.write_bytes(
|
||||
img2pdf.convert(str(document_path), **convert_kwargs),
|
||||
)
|
||||
except Exception as e:
|
||||
raise ParseError(
|
||||
f"img2pdf conversion failed for {document_path}: {e!s}",
|
||||
) from e
|
||||
|
||||
pdfa_path = Path(self.tempdir) / "archive.pdf"
|
||||
try:
|
||||
with pikepdf.open(plain_pdf_path) as pdf:
|
||||
cs = pdf.make_stream(_SRGB_ICC_DATA)
|
||||
cs["/N"] = 3
|
||||
output_intent = pikepdf.Dictionary(
|
||||
Type=pikepdf.Name("/OutputIntent"),
|
||||
S=pikepdf.Name("/GTS_PDFA1"),
|
||||
OutputConditionIdentifier=pikepdf.String("sRGB"),
|
||||
DestOutputProfile=cs,
|
||||
)
|
||||
pdf.Root["/OutputIntents"] = pdf.make_indirect(
|
||||
pikepdf.Array([output_intent]),
|
||||
)
|
||||
meta = pdf.open_metadata(set_pikepdf_as_editor=False)
|
||||
meta["pdfaid:part"] = "2"
|
||||
meta["pdfaid:conformance"] = "B"
|
||||
pdf.save(pdfa_path)
|
||||
except Exception as e:
|
||||
self.log.warning(
|
||||
f"PDF/A metadata stamping failed ({e!s}); falling back to plain PDF.",
|
||||
)
|
||||
pdfa_path.write_bytes(plain_pdf_path.read_bytes())
|
||||
|
||||
return pdfa_path
|
||||
|
||||
def _handle_subprocess_output_error(self, e: Exception) -> NoReturn:
|
||||
"""Log context for Ghostscript failures and raise ParseError.
|
||||
|
||||
Called from the SubprocessOutputError handlers in parse() to avoid
|
||||
duplicating the Ghostscript hint and re-raise logic.
|
||||
"""
|
||||
if "Ghostscript PDF/A rendering" in str(e):
|
||||
self.log.warning(
|
||||
"Ghostscript PDF/A rendering failed, consider setting "
|
||||
"PAPERLESS_OCR_USER_ARGS: "
|
||||
"'{\"continue_on_soft_render_error\": true}'",
|
||||
)
|
||||
raise ParseError(
|
||||
f"SubprocessOutputError: {e!s}. See logs for more information.",
|
||||
) from e
|
||||
|
||||
def parse(
|
||||
self,
|
||||
document_path: Path,
|
||||
@@ -457,118 +409,57 @@ class RasterisedDocumentParser:
|
||||
) -> None:
|
||||
# This forces tesseract to use one core per page.
|
||||
os.environ["OMP_THREAD_LIMIT"] = "1"
|
||||
VALID_TEXT_LENGTH = 50
|
||||
|
||||
if mime_type == "application/pdf":
|
||||
text_original = self.extract_text(None, document_path)
|
||||
original_has_text = (
|
||||
text_original is not None and len(text_original) > VALID_TEXT_LENGTH
|
||||
)
|
||||
else:
|
||||
text_original = None
|
||||
original_has_text = False
|
||||
|
||||
# If the original has text, and the user doesn't want an archive,
|
||||
# we're done here
|
||||
skip_archive_for_text = (
|
||||
self.settings.mode == ModeChoices.SKIP_NO_ARCHIVE
|
||||
or self.settings.skip_archive_file
|
||||
in {
|
||||
ArchiveFileChoices.WITH_TEXT,
|
||||
ArchiveFileChoices.ALWAYS,
|
||||
}
|
||||
)
|
||||
if skip_archive_for_text and original_has_text:
|
||||
self.log.debug("Document has text, skipping OCRmyPDF entirely.")
|
||||
self.text = text_original
|
||||
return
|
||||
|
||||
# Either no text was in the original or there should be an archive
|
||||
# file created, so OCR the file and create an archive with any
|
||||
# text located via OCR
|
||||
|
||||
import ocrmypdf
|
||||
from ocrmypdf import EncryptedPdfError
|
||||
from ocrmypdf import InputFileError
|
||||
from ocrmypdf import SubprocessOutputError
|
||||
from ocrmypdf.exceptions import DigitalSignatureError
|
||||
from ocrmypdf.exceptions import PriorOcrFoundError
|
||||
|
||||
if mime_type == "application/pdf":
|
||||
text_original = self.extract_text(None, document_path)
|
||||
original_has_text = is_tagged_pdf(document_path, log=self.log) or (
|
||||
text_original is not None and len(text_original) > PDF_TEXT_MIN_LENGTH
|
||||
)
|
||||
else:
|
||||
text_original = None
|
||||
original_has_text = False
|
||||
|
||||
self.log.debug(
|
||||
"Text detection: original_has_text=%s (text_length=%d, mode=%s, produce_archive=%s)",
|
||||
original_has_text,
|
||||
len(text_original) if text_original else 0,
|
||||
self.settings.mode,
|
||||
produce_archive,
|
||||
)
|
||||
|
||||
# --- OCR_MODE=off: never invoke OCR engine ---
|
||||
if self.settings.mode == ModeChoices.OFF:
|
||||
if not produce_archive:
|
||||
self.log.debug(
|
||||
"OCR: skipped — OCR_MODE=off, no archive requested;"
|
||||
" returning pdftotext content only",
|
||||
)
|
||||
self.text = text_original or ""
|
||||
return
|
||||
if self.is_image(mime_type):
|
||||
self.log.debug(
|
||||
"OCR: skipped — OCR_MODE=off, image input;"
|
||||
" converting to PDF/A without OCR",
|
||||
)
|
||||
try:
|
||||
self.archive_path = self._convert_image_to_pdfa(
|
||||
document_path,
|
||||
)
|
||||
self.text = ""
|
||||
except Exception as e:
|
||||
raise ParseError(
|
||||
f"Image to PDF/A conversion failed: {e!s}",
|
||||
) from e
|
||||
return
|
||||
# PDFs in off mode: PDF/A conversion only via skip_text
|
||||
archive_path = Path(self.tempdir) / "archive.pdf"
|
||||
sidecar_file = Path(self.tempdir) / "sidecar.txt"
|
||||
args = self.construct_ocrmypdf_parameters(
|
||||
document_path,
|
||||
mime_type,
|
||||
archive_path,
|
||||
sidecar_file,
|
||||
skip_text=True,
|
||||
)
|
||||
try:
|
||||
self.log.debug(
|
||||
f"Calling OCRmyPDF (off mode, PDF/A conversion only): {args}",
|
||||
)
|
||||
ocrmypdf.ocr(**args)
|
||||
self.archive_path = archive_path
|
||||
self.text = self.extract_text(None, archive_path) or text_original or ""
|
||||
except SubprocessOutputError as e:
|
||||
self._handle_subprocess_output_error(e)
|
||||
except Exception as e:
|
||||
raise ParseError(f"{e.__class__.__name__}: {e!s}") from e
|
||||
return
|
||||
|
||||
# --- OCR_MODE=auto: skip ocrmypdf entirely if text exists and no archive needed ---
|
||||
if (
|
||||
self.settings.mode == ModeChoices.AUTO
|
||||
and original_has_text
|
||||
and not produce_archive
|
||||
):
|
||||
self.log.debug(
|
||||
"Document has text and no archive requested; skipping OCRmyPDF entirely.",
|
||||
)
|
||||
self.text = text_original
|
||||
return
|
||||
|
||||
# --- All other paths: run ocrmypdf ---
|
||||
archive_path = Path(self.tempdir) / "archive.pdf"
|
||||
sidecar_file = Path(self.tempdir) / "sidecar.txt"
|
||||
|
||||
# auto mode with existing text: PDF/A conversion only (no OCR).
|
||||
skip_text = self.settings.mode == ModeChoices.AUTO and original_has_text
|
||||
|
||||
if skip_text:
|
||||
self.log.debug(
|
||||
"OCR strategy: PDF/A conversion only (skip_text)"
|
||||
" — OCR_MODE=auto, document already has text",
|
||||
)
|
||||
else:
|
||||
self.log.debug("OCR strategy: full OCR — OCR_MODE=%s", self.settings.mode)
|
||||
|
||||
args = self.construct_ocrmypdf_parameters(
|
||||
document_path,
|
||||
mime_type,
|
||||
archive_path,
|
||||
sidecar_file,
|
||||
skip_text=skip_text,
|
||||
)
|
||||
|
||||
try:
|
||||
self.log.debug(f"Calling OCRmyPDF with args: {args}")
|
||||
ocrmypdf.ocr(**args)
|
||||
|
||||
if produce_archive:
|
||||
if self.settings.skip_archive_file != ArchiveFileChoices.ALWAYS:
|
||||
self.archive_path = archive_path
|
||||
|
||||
self.text = self.extract_text(sidecar_file, archive_path)
|
||||
@@ -583,8 +474,16 @@ class RasterisedDocumentParser:
|
||||
if original_has_text:
|
||||
self.text = text_original
|
||||
except SubprocessOutputError as e:
|
||||
self._handle_subprocess_output_error(e)
|
||||
except (NoTextFoundException, InputFileError, PriorOcrFoundError) as e:
|
||||
if "Ghostscript PDF/A rendering" in str(e):
|
||||
self.log.warning(
|
||||
"Ghostscript PDF/A rendering failed, consider setting "
|
||||
"PAPERLESS_OCR_USER_ARGS: '{\"continue_on_soft_render_error\": true}'",
|
||||
)
|
||||
|
||||
raise ParseError(
|
||||
f"SubprocessOutputError: {e!s}. See logs for more information.",
|
||||
) from e
|
||||
except (NoTextFoundException, InputFileError) as e:
|
||||
self.log.warning(
|
||||
f"Encountered an error while running OCR: {e!s}. "
|
||||
f"Attempting force OCR to get the text.",
|
||||
@@ -593,6 +492,8 @@ class RasterisedDocumentParser:
|
||||
archive_path_fallback = Path(self.tempdir) / "archive-fallback.pdf"
|
||||
sidecar_file_fallback = Path(self.tempdir) / "sidecar-fallback.txt"
|
||||
|
||||
# Attempt to run OCR with safe settings.
|
||||
|
||||
args = self.construct_ocrmypdf_parameters(
|
||||
document_path,
|
||||
mime_type,
|
||||
@@ -604,18 +505,25 @@ class RasterisedDocumentParser:
|
||||
try:
|
||||
self.log.debug(f"Fallback: Calling OCRmyPDF with args: {args}")
|
||||
ocrmypdf.ocr(**args)
|
||||
|
||||
# Don't return the archived file here, since this file
|
||||
# is bigger and blurry due to --force-ocr.
|
||||
|
||||
self.text = self.extract_text(
|
||||
sidecar_file_fallback,
|
||||
archive_path_fallback,
|
||||
)
|
||||
if produce_archive:
|
||||
self.archive_path = archive_path_fallback
|
||||
|
||||
except Exception as e:
|
||||
# If this fails, we have a serious issue at hand.
|
||||
raise ParseError(f"{e.__class__.__name__}: {e!s}") from e
|
||||
|
||||
except Exception as e:
|
||||
# Anything else is probably serious.
|
||||
raise ParseError(f"{e.__class__.__name__}: {e!s}") from e
|
||||
|
||||
# As a last resort, if we still don't have any text for any reason,
|
||||
# try to extract the text from the original document.
|
||||
if not self.text:
|
||||
if original_has_text:
|
||||
self.text = text_original
|
||||
|
||||
@@ -10,105 +10,15 @@ from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Final
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from paperless.parsers import MetadataEntry
|
||||
|
||||
logger = logging.getLogger("paperless.parsers.utils")
|
||||
|
||||
# Minimum character count for a PDF to be considered "born-digital" (has real text).
|
||||
# Used by both the consumer (archive decision) and the tesseract parser (skip-OCR decision).
|
||||
PDF_TEXT_MIN_LENGTH: Final[int] = 50
|
||||
|
||||
|
||||
def is_tagged_pdf(
|
||||
path: Path,
|
||||
log: logging.Logger | None = None,
|
||||
) -> bool:
|
||||
"""Return True if the PDF declares itself as tagged (born-digital indicator).
|
||||
|
||||
Tagged PDFs (e.g. exported from Word or LibreOffice) have ``/MarkInfo``
|
||||
with ``/Marked true`` in the document root. This is a reliable signal
|
||||
that the document has a logical structure and embedded text — running OCR
|
||||
on it is unnecessary and archive generation can be skipped.
|
||||
|
||||
https://github.com/ocrmypdf/OCRmyPDF/blob/4e974ebd465a5921b2e79004f098f5d203010282/src/ocrmypdf/pdfinfo/info.py#L449
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path:
|
||||
Absolute path to the PDF file.
|
||||
log:
|
||||
Logger for warnings. Falls back to the module-level logger when omitted.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
``True`` when the PDF is tagged, ``False`` otherwise or on any error.
|
||||
"""
|
||||
import pikepdf
|
||||
|
||||
_log = log or logger
|
||||
try:
|
||||
with pikepdf.open(path) as pdf:
|
||||
mark_info = pdf.Root.get("/MarkInfo")
|
||||
if mark_info is None:
|
||||
return False
|
||||
return bool(mark_info.get("/Marked", False))
|
||||
except Exception:
|
||||
_log.warning("Could not check PDF tag status for %s", path, exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def extract_pdf_text(
|
||||
path: Path,
|
||||
log: logging.Logger | None = None,
|
||||
) -> str | None:
|
||||
"""Run pdftotext on *path* and return the extracted text, or None on failure.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path:
|
||||
Absolute path to the PDF file.
|
||||
log:
|
||||
Logger for warnings. Falls back to the module-level logger when omitted.
|
||||
|
||||
Returns
|
||||
-------
|
||||
str | None
|
||||
Extracted text, or ``None`` if pdftotext fails or the file is not a PDF.
|
||||
"""
|
||||
from documents.utils import run_subprocess
|
||||
|
||||
_log = log or logger
|
||||
try:
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
out_path = Path(tmpdir) / "text.txt"
|
||||
run_subprocess(
|
||||
[
|
||||
"pdftotext",
|
||||
"-q",
|
||||
"-layout",
|
||||
"-enc",
|
||||
"UTF-8",
|
||||
str(path),
|
||||
str(out_path),
|
||||
],
|
||||
logger=_log,
|
||||
)
|
||||
text = read_file_handle_unicode_errors(out_path, log=_log)
|
||||
return text or None
|
||||
except Exception:
|
||||
_log.warning(
|
||||
"Error while getting text from PDF document with pdftotext",
|
||||
exc_info=True,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def read_file_handle_unicode_errors(
|
||||
filepath: Path,
|
||||
|
||||
@@ -889,20 +889,10 @@ OCR_LANGUAGE = os.getenv("PAPERLESS_OCR_LANGUAGE", "eng")
|
||||
# OCRmyPDF --output-type options are available.
|
||||
OCR_OUTPUT_TYPE = os.getenv("PAPERLESS_OCR_OUTPUT_TYPE", "pdfa")
|
||||
|
||||
if os.environ.get("PAPERLESS_OCR_MODE", "") in ("skip", "skip_noarchive"):
|
||||
OCR_MODE = "auto"
|
||||
else:
|
||||
OCR_MODE = get_choice_from_env(
|
||||
"PAPERLESS_OCR_MODE",
|
||||
{"auto", "force", "redo", "off"},
|
||||
default="auto",
|
||||
)
|
||||
# skip. redo, force
|
||||
OCR_MODE = os.getenv("PAPERLESS_OCR_MODE", "skip")
|
||||
|
||||
ARCHIVE_FILE_GENERATION = get_choice_from_env(
|
||||
"PAPERLESS_ARCHIVE_FILE_GENERATION",
|
||||
{"auto", "always", "never"},
|
||||
default="auto",
|
||||
)
|
||||
OCR_SKIP_ARCHIVE_FILE = os.getenv("PAPERLESS_OCR_SKIP_ARCHIVE_FILE", "never")
|
||||
|
||||
OCR_IMAGE_DPI = get_int_from_env("PAPERLESS_OCR_IMAGE_DPI")
|
||||
|
||||
|
||||
@@ -708,7 +708,7 @@ def null_app_config(mocker: MockerFixture) -> MagicMock:
|
||||
pages=None,
|
||||
language=None,
|
||||
mode=None,
|
||||
archive_file_generation=None,
|
||||
skip_archive_file=None,
|
||||
image_dpi=None,
|
||||
unpaper_clean=None,
|
||||
deskew=None,
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
"""
|
||||
Tests for RasterisedDocumentParser._convert_image_to_pdfa.
|
||||
|
||||
The method converts an image to a PDF/A-2b file using img2pdf (wrapping)
|
||||
then pikepdf (PDF/A metadata stamping), with a fallback to plain PDF when
|
||||
pikepdf stamping fails. No Tesseract or Ghostscript is invoked.
|
||||
|
||||
These are unit/integration tests: img2pdf and pikepdf run for real; only
|
||||
error-path branches mock the respective library call.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import img2pdf
|
||||
import magic
|
||||
import pikepdf
|
||||
import pytest
|
||||
|
||||
from documents.parsers import ParseError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from paperless.parsers.tesseract import RasterisedDocumentParser
|
||||
|
||||
|
||||
class TestConvertImageToPdfa:
|
||||
"""_convert_image_to_pdfa: output shape, error paths, DPI handling."""
|
||||
|
||||
def test_valid_png_produces_pdf_bytes(
|
||||
self,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN: a valid PNG with DPI metadata
|
||||
WHEN: _convert_image_to_pdfa is called
|
||||
THEN: the returned file is non-empty and begins with the PDF magic bytes
|
||||
"""
|
||||
result = tesseract_parser._convert_image_to_pdfa(simple_png_file)
|
||||
|
||||
assert result.exists()
|
||||
assert magic.from_file(str(result), mime=True) == "application/pdf"
|
||||
|
||||
def test_output_path_is_archive_pdf_in_tempdir(
|
||||
self,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN: any valid image
|
||||
WHEN: _convert_image_to_pdfa is called
|
||||
THEN: the returned path is exactly <tempdir>/archive.pdf
|
||||
"""
|
||||
result = tesseract_parser._convert_image_to_pdfa(simple_png_file)
|
||||
|
||||
assert result == Path(tesseract_parser.tempdir) / "archive.pdf"
|
||||
|
||||
def test_img2pdf_failure_raises_parse_error(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN: img2pdf.convert raises an exception
|
||||
WHEN: _convert_image_to_pdfa is called
|
||||
THEN: a ParseError is raised that mentions "img2pdf conversion failed"
|
||||
"""
|
||||
mocker.patch.object(img2pdf, "convert", side_effect=Exception("boom"))
|
||||
|
||||
with pytest.raises(ParseError, match="img2pdf conversion failed"):
|
||||
tesseract_parser._convert_image_to_pdfa(simple_png_file)
|
||||
|
||||
def test_pikepdf_stamping_failure_falls_back_to_plain_pdf(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN: pikepdf.open raises during PDF/A metadata stamping
|
||||
WHEN: _convert_image_to_pdfa is called
|
||||
THEN: no exception is raised and the returned file is still a valid PDF
|
||||
(plain PDF bytes are used as fallback)
|
||||
"""
|
||||
mocker.patch.object(pikepdf, "open", side_effect=Exception("pikepdf boom"))
|
||||
|
||||
result = tesseract_parser._convert_image_to_pdfa(simple_png_file)
|
||||
|
||||
assert result.exists()
|
||||
assert magic.from_file(str(result), mime=True) == "application/pdf"
|
||||
|
||||
def test_image_dpi_setting_applies_fixed_dpi_layout(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_no_dpi_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN: parser.settings.image_dpi = 150
|
||||
WHEN: _convert_image_to_pdfa is called with a no-DPI PNG
|
||||
THEN: img2pdf.get_fixed_dpi_layout_fun is called with (150, 150)
|
||||
and the output is still a valid PDF
|
||||
"""
|
||||
spy = mocker.patch.object(
|
||||
img2pdf,
|
||||
"get_fixed_dpi_layout_fun",
|
||||
wraps=img2pdf.get_fixed_dpi_layout_fun,
|
||||
)
|
||||
tesseract_parser.settings.image_dpi = 150
|
||||
|
||||
result = tesseract_parser._convert_image_to_pdfa(simple_no_dpi_png_file)
|
||||
|
||||
spy.assert_called_once_with((150, 150))
|
||||
assert magic.from_file(str(result), mime=True) == "application/pdf"
|
||||
|
||||
def test_no_image_dpi_setting_skips_fixed_dpi_layout(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN: parser.settings.image_dpi is None (default)
|
||||
WHEN: _convert_image_to_pdfa is called
|
||||
THEN: img2pdf.get_fixed_dpi_layout_fun is never called
|
||||
"""
|
||||
spy = mocker.patch.object(
|
||||
img2pdf,
|
||||
"get_fixed_dpi_layout_fun",
|
||||
wraps=img2pdf.get_fixed_dpi_layout_fun,
|
||||
)
|
||||
tesseract_parser.settings.image_dpi = None
|
||||
|
||||
tesseract_parser._convert_image_to_pdfa(simple_png_file)
|
||||
|
||||
spy.assert_not_called()
|
||||
@@ -1,436 +0,0 @@
|
||||
"""
|
||||
Focused tests for RasterisedDocumentParser.parse() mode behaviour.
|
||||
|
||||
These tests mock ``ocrmypdf.ocr`` so they run without a real Tesseract/OCRmyPDF
|
||||
installation and execute quickly. The intent is to verify the *control flow*
|
||||
introduced by the ``produce_archive`` flag and the ``OCR_MODE=auto/off`` logic,
|
||||
not to test OCRmyPDF itself.
|
||||
|
||||
Fixtures are pulled from conftest.py in this package.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from paperless.parsers.tesseract import RasterisedDocumentParser
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_LONG_TEXT = "This is a test document with enough text. " * 5 # >50 chars
|
||||
_SHORT_TEXT = "Hi." # <50 chars
|
||||
|
||||
|
||||
def _make_extract_text(text: str | None):
|
||||
"""Return a side_effect function for ``extract_text`` that returns *text*."""
|
||||
|
||||
def _extract(sidecar_file, pdf_file):
|
||||
return text
|
||||
|
||||
return _extract
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AUTO mode — PDF with sufficient text layer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestAutoModeWithText:
|
||||
"""AUTO mode, original PDF has detectable text (>50 chars)."""
|
||||
|
||||
def test_auto_text_no_archive_skips_ocrmypdf(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_digital_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- AUTO mode, produce_archive=False
|
||||
- PDF with text > VALID_TEXT_LENGTH
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf.ocr is NOT called (early return path)
|
||||
- archive_path remains None
|
||||
- text is set from the original
|
||||
"""
|
||||
# Patch extract_text to return long text (simulating detectable text layer)
|
||||
mocker.patch.object(
|
||||
tesseract_parser,
|
||||
"extract_text",
|
||||
return_value=_LONG_TEXT,
|
||||
)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.parse(
|
||||
simple_digital_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
|
||||
mock_ocr.assert_not_called()
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert tesseract_parser.get_text() == _LONG_TEXT
|
||||
|
||||
def test_auto_text_with_archive_calls_ocrmypdf_skip_text(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_digital_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- AUTO mode, produce_archive=True
|
||||
- PDF with text > VALID_TEXT_LENGTH
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf.ocr IS called with skip_text=True
|
||||
- archive_path is set
|
||||
"""
|
||||
mocker.patch.object(
|
||||
tesseract_parser,
|
||||
"extract_text",
|
||||
return_value=_LONG_TEXT,
|
||||
)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.parse(
|
||||
simple_digital_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=True,
|
||||
)
|
||||
|
||||
mock_ocr.assert_called_once()
|
||||
call_kwargs = mock_ocr.call_args.kwargs
|
||||
assert call_kwargs.get("skip_text") is True
|
||||
assert "force_ocr" not in call_kwargs
|
||||
assert "redo_ocr" not in call_kwargs
|
||||
assert tesseract_parser.archive_path is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AUTO mode — PDF without text layer (or too short)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestAutoModeNoText:
|
||||
"""AUTO mode, original PDF has no detectable text (<= 50 chars)."""
|
||||
|
||||
def test_auto_no_text_with_archive_calls_ocrmypdf_no_extra_flag(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
multi_page_images_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- AUTO mode, produce_archive=True
|
||||
- PDF with no text (or text <= VALID_TEXT_LENGTH)
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf.ocr IS called WITHOUT skip_text/force_ocr/redo_ocr
|
||||
- archive_path is set (since produce_archive=True)
|
||||
"""
|
||||
# Return "no text" for the original; return real text for archive
|
||||
extract_call_count = 0
|
||||
|
||||
def _extract_side(sidecar_file, pdf_file):
|
||||
nonlocal extract_call_count
|
||||
extract_call_count += 1
|
||||
if extract_call_count == 1:
|
||||
return None # original has no text
|
||||
return _LONG_TEXT # text from archive after OCR
|
||||
|
||||
mocker.patch.object(tesseract_parser, "extract_text", side_effect=_extract_side)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.parse(
|
||||
multi_page_images_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=True,
|
||||
)
|
||||
|
||||
mock_ocr.assert_called_once()
|
||||
call_kwargs = mock_ocr.call_args.kwargs
|
||||
assert "skip_text" not in call_kwargs
|
||||
assert "force_ocr" not in call_kwargs
|
||||
assert "redo_ocr" not in call_kwargs
|
||||
assert tesseract_parser.archive_path is not None
|
||||
|
||||
def test_auto_no_text_no_archive_calls_ocrmypdf(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
multi_page_images_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- AUTO mode, produce_archive=False
|
||||
- PDF with no text
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf.ocr IS called (no early return since no text detected)
|
||||
- archive_path is NOT set (produce_archive=False)
|
||||
"""
|
||||
extract_call_count = 0
|
||||
|
||||
def _extract_side(sidecar_file, pdf_file):
|
||||
nonlocal extract_call_count
|
||||
extract_call_count += 1
|
||||
if extract_call_count == 1:
|
||||
return None
|
||||
return _LONG_TEXT
|
||||
|
||||
mocker.patch.object(tesseract_parser, "extract_text", side_effect=_extract_side)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.parse(
|
||||
multi_page_images_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
|
||||
mock_ocr.assert_called_once()
|
||||
assert tesseract_parser.archive_path is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# OFF mode — PDF
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestOffModePdf:
|
||||
"""OCR_MODE=off, document is a PDF."""
|
||||
|
||||
def test_off_no_archive_returns_pdftotext(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_digital_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- OFF mode, produce_archive=False
|
||||
- PDF with text
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf.ocr is NOT called
|
||||
- archive_path is None
|
||||
- text comes from pdftotext (extract_text)
|
||||
"""
|
||||
mocker.patch.object(
|
||||
tesseract_parser,
|
||||
"extract_text",
|
||||
return_value=_LONG_TEXT,
|
||||
)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "off"
|
||||
tesseract_parser.parse(
|
||||
simple_digital_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
|
||||
mock_ocr.assert_not_called()
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert tesseract_parser.get_text() == _LONG_TEXT
|
||||
|
||||
def test_off_with_archive_calls_ocrmypdf_skip_text(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_digital_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- OFF mode, produce_archive=True
|
||||
- PDF document
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf.ocr IS called with skip_text=True (PDF/A conversion only)
|
||||
- archive_path is set
|
||||
"""
|
||||
mocker.patch.object(
|
||||
tesseract_parser,
|
||||
"extract_text",
|
||||
return_value=_LONG_TEXT,
|
||||
)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "off"
|
||||
tesseract_parser.parse(
|
||||
simple_digital_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=True,
|
||||
)
|
||||
|
||||
mock_ocr.assert_called_once()
|
||||
call_kwargs = mock_ocr.call_args.kwargs
|
||||
assert call_kwargs.get("skip_text") is True
|
||||
assert "force_ocr" not in call_kwargs
|
||||
assert "redo_ocr" not in call_kwargs
|
||||
assert tesseract_parser.archive_path is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# OFF mode — image
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestOffModeImage:
|
||||
"""OCR_MODE=off, document is an image (PNG)."""
|
||||
|
||||
def test_off_image_no_archive_no_ocrmypdf(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- OFF mode, produce_archive=False
|
||||
- Image document (PNG)
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf.ocr is NOT called
|
||||
- archive_path is None
|
||||
- text is empty string (images have no text layer)
|
||||
"""
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "off"
|
||||
tesseract_parser.parse(simple_png_file, "image/png", produce_archive=False)
|
||||
|
||||
mock_ocr.assert_not_called()
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert tesseract_parser.get_text() == ""
|
||||
|
||||
def test_off_image_with_archive_uses_img2pdf_path(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_png_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- OFF mode, produce_archive=True
|
||||
- Image document (PNG)
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- _convert_image_to_pdfa() is called instead of ocrmypdf.ocr
|
||||
- archive_path is set to the returned path
|
||||
- text is empty string
|
||||
"""
|
||||
fake_archive = Path("/tmp/fake-archive.pdf")
|
||||
mock_convert = mocker.patch.object(
|
||||
tesseract_parser,
|
||||
"_convert_image_to_pdfa",
|
||||
return_value=fake_archive,
|
||||
)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "off"
|
||||
tesseract_parser.parse(simple_png_file, "image/png", produce_archive=True)
|
||||
|
||||
mock_convert.assert_called_once_with(simple_png_file)
|
||||
mock_ocr.assert_not_called()
|
||||
assert tesseract_parser.archive_path == fake_archive
|
||||
assert tesseract_parser.get_text() == ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# produce_archive=False never sets archive_path for FORCE / REDO / AUTO modes
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestProduceArchiveFalse:
|
||||
"""Verify produce_archive=False never results in an archive regardless of mode."""
|
||||
|
||||
@pytest.mark.parametrize("mode", ["force", "redo"])
|
||||
def test_produce_archive_false_force_redo_modes(
|
||||
self,
|
||||
mode: str,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
multi_page_images_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- FORCE or REDO mode, produce_archive=False
|
||||
- Any PDF
|
||||
WHEN:
|
||||
- parse() is called (ocrmypdf mocked to succeed)
|
||||
THEN:
|
||||
- archive_path is NOT set even though ocrmypdf ran
|
||||
"""
|
||||
mocker.patch.object(
|
||||
tesseract_parser,
|
||||
"extract_text",
|
||||
return_value=_LONG_TEXT,
|
||||
)
|
||||
mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = mode
|
||||
tesseract_parser.parse(
|
||||
multi_page_images_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert tesseract_parser.get_text() is not None
|
||||
|
||||
def test_produce_archive_false_auto_with_text(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
simple_digital_pdf_file: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- AUTO mode, produce_archive=False
|
||||
- PDF with text > VALID_TEXT_LENGTH
|
||||
WHEN:
|
||||
- parse() is called
|
||||
THEN:
|
||||
- ocrmypdf is skipped entirely (early return)
|
||||
- archive_path is None
|
||||
"""
|
||||
mocker.patch.object(
|
||||
tesseract_parser,
|
||||
"extract_text",
|
||||
return_value=_LONG_TEXT,
|
||||
)
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.parse(
|
||||
simple_digital_pdf_file,
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
|
||||
mock_ocr.assert_not_called()
|
||||
assert tesseract_parser.archive_path is None
|
||||
@@ -94,35 +94,15 @@ class TestParserSettingsFromDb(DirectoriesMixin, FileSystemAssertsMixin, TestCas
|
||||
WHEN:
|
||||
- OCR parameters are constructed
|
||||
THEN:
|
||||
- Configuration from database is utilized (AUTO mode with skip_text=True
|
||||
triggers skip_text; AUTO mode alone does not add any extra flag)
|
||||
- Configuration from database is utilized
|
||||
"""
|
||||
# AUTO mode with skip_text=True explicitly passed: skip_text is set
|
||||
with override_settings(OCR_MODE="redo"):
|
||||
instance = ApplicationConfiguration.objects.all().first()
|
||||
instance.mode = ModeChoices.AUTO
|
||||
instance.save()
|
||||
|
||||
params = RasterisedDocumentParser(None).construct_ocrmypdf_parameters(
|
||||
input_file="input.pdf",
|
||||
output_file="output.pdf",
|
||||
sidecar_file="sidecar.txt",
|
||||
mime_type="application/pdf",
|
||||
safe_fallback=False,
|
||||
skip_text=True,
|
||||
)
|
||||
self.assertTrue(params["skip_text"])
|
||||
self.assertNotIn("redo_ocr", params)
|
||||
self.assertNotIn("force_ocr", params)
|
||||
|
||||
# AUTO mode alone (no skip_text): no extra OCR flag is set
|
||||
with override_settings(OCR_MODE="redo"):
|
||||
instance = ApplicationConfiguration.objects.all().first()
|
||||
instance.mode = ModeChoices.AUTO
|
||||
instance.mode = ModeChoices.SKIP
|
||||
instance.save()
|
||||
|
||||
params = self.get_params()
|
||||
self.assertNotIn("skip_text", params)
|
||||
self.assertTrue(params["skip_text"])
|
||||
self.assertNotIn("redo_ocr", params)
|
||||
self.assertNotIn("force_ocr", params)
|
||||
|
||||
|
||||
@@ -370,26 +370,15 @@ class TestParsePdf:
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- Multi-page digital PDF with sufficient text layer
|
||||
- Default settings (mode=auto, produce_archive=True)
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- Archive is created (AUTO mode + text present + produce_archive=True
|
||||
→ PDF/A conversion via skip_text)
|
||||
- Text is extracted
|
||||
"""
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "multi-page-digital.pdf",
|
||||
tesseract_samples_dir / "simple-digital.pdf",
|
||||
"application/pdf",
|
||||
)
|
||||
assert tesseract_parser.archive_path is not None
|
||||
assert tesseract_parser.archive_path.is_file()
|
||||
assert_ordered_substrings(
|
||||
tesseract_parser.get_text().lower(),
|
||||
["page 1", "page 2", "page 3"],
|
||||
tesseract_parser.get_text(),
|
||||
["This is a test document."],
|
||||
)
|
||||
|
||||
def test_with_form_default(
|
||||
@@ -408,7 +397,7 @@ class TestParsePdf:
|
||||
["Please enter your name in here:", "This is a PDF document with a form."],
|
||||
)
|
||||
|
||||
def test_with_form_redo_no_archive_when_not_requested(
|
||||
def test_with_form_redo_produces_no_archive(
|
||||
self,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
@@ -417,7 +406,6 @@ class TestParsePdf:
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "with-form.pdf",
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert_ordered_substrings(
|
||||
@@ -445,7 +433,7 @@ class TestParsePdf:
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip"
|
||||
tesseract_parser.parse(tesseract_samples_dir / "signed.pdf", "application/pdf")
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert_ordered_substrings(
|
||||
@@ -461,7 +449,7 @@ class TestParsePdf:
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "encrypted.pdf",
|
||||
"application/pdf",
|
||||
@@ -571,7 +559,7 @@ class TestParseMultiPage:
|
||||
@pytest.mark.parametrize(
|
||||
"mode",
|
||||
[
|
||||
pytest.param("auto", id="auto"),
|
||||
pytest.param("skip", id="skip"),
|
||||
pytest.param("redo", id="redo"),
|
||||
pytest.param("force", id="force"),
|
||||
],
|
||||
@@ -599,7 +587,7 @@ class TestParseMultiPage:
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "multi-page-images.pdf",
|
||||
"application/pdf",
|
||||
@@ -747,18 +735,16 @@ class TestSkipArchive:
|
||||
"""
|
||||
GIVEN:
|
||||
- File with existing text layer
|
||||
- Mode: auto, produce_archive=False
|
||||
- Mode: skip_noarchive
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- Text extracted from original; no archive created (text exists +
|
||||
produce_archive=False skips OCRmyPDF entirely)
|
||||
- Text extracted; no archive created
|
||||
"""
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip_noarchive"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "multi-page-digital.pdf",
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert_ordered_substrings(
|
||||
@@ -774,13 +760,13 @@ class TestSkipArchive:
|
||||
"""
|
||||
GIVEN:
|
||||
- File with image-only pages (no text layer)
|
||||
- Mode: auto, skip_archive_file: auto
|
||||
- Mode: skip_noarchive
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- Text extracted; archive created (OCR needed, no existing text)
|
||||
- Text extracted; archive created (OCR needed)
|
||||
"""
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip_noarchive"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "multi-page-images.pdf",
|
||||
"application/pdf",
|
||||
@@ -792,58 +778,41 @@ class TestSkipArchive:
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("produce_archive", "filename", "expect_archive"),
|
||||
("skip_archive_file", "filename", "expect_archive"),
|
||||
[
|
||||
pytest.param("never", "multi-page-digital.pdf", True, id="never-with-text"),
|
||||
pytest.param("never", "multi-page-images.pdf", True, id="never-no-text"),
|
||||
pytest.param(
|
||||
True,
|
||||
"multi-page-digital.pdf",
|
||||
True,
|
||||
id="produce-archive-with-text",
|
||||
),
|
||||
pytest.param(
|
||||
True,
|
||||
"multi-page-images.pdf",
|
||||
True,
|
||||
id="produce-archive-no-text",
|
||||
),
|
||||
pytest.param(
|
||||
False,
|
||||
"with_text",
|
||||
"multi-page-digital.pdf",
|
||||
False,
|
||||
id="no-archive-with-text-layer",
|
||||
id="with-text-layer",
|
||||
),
|
||||
pytest.param(
|
||||
False,
|
||||
"with_text",
|
||||
"multi-page-images.pdf",
|
||||
False,
|
||||
id="no-archive-no-text-layer",
|
||||
True,
|
||||
id="with-text-no-layer",
|
||||
),
|
||||
pytest.param(
|
||||
"always",
|
||||
"multi-page-digital.pdf",
|
||||
False,
|
||||
id="always-with-text",
|
||||
),
|
||||
pytest.param("always", "multi-page-images.pdf", False, id="always-no-text"),
|
||||
],
|
||||
)
|
||||
def test_produce_archive_flag(
|
||||
def test_skip_archive_file_setting(
|
||||
self,
|
||||
produce_archive: bool, # noqa: FBT001
|
||||
skip_archive_file: str,
|
||||
filename: str,
|
||||
expect_archive: bool, # noqa: FBT001
|
||||
expect_archive: str,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- Various PDFs (with and without text layers)
|
||||
- produce_archive flag set to True or False
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- archive_path is set if and only if produce_archive=True
|
||||
- Text is always extracted
|
||||
"""
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / filename,
|
||||
"application/pdf",
|
||||
produce_archive=produce_archive,
|
||||
)
|
||||
tesseract_parser.settings.skip_archive_file = skip_archive_file
|
||||
tesseract_parser.parse(tesseract_samples_dir / filename, "application/pdf")
|
||||
text = tesseract_parser.get_text().lower()
|
||||
assert_ordered_substrings(text, ["page 1", "page 2", "page 3"])
|
||||
if expect_archive:
|
||||
@@ -851,59 +820,6 @@ class TestSkipArchive:
|
||||
else:
|
||||
assert tesseract_parser.archive_path is None
|
||||
|
||||
def test_tagged_pdf_skips_ocr_in_auto_mode(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- A tagged PDF (e.g. exported from Word, /MarkInfo /Marked true)
|
||||
- Mode: auto, produce_archive=False
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- OCRmyPDF is not invoked (tagged ⇒ original_has_text=True)
|
||||
- Text is extracted from the original via pdftotext
|
||||
- No archive is produced
|
||||
"""
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
mock_ocr = mocker.patch("ocrmypdf.ocr")
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "simple-digital.pdf",
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
mock_ocr.assert_not_called()
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert tesseract_parser.get_text()
|
||||
|
||||
def test_tagged_pdf_produces_pdfa_archive_without_ocr(
|
||||
self,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- A tagged PDF (e.g. exported from Word, /MarkInfo /Marked true)
|
||||
- Mode: auto, produce_archive=True
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- OCRmyPDF runs with skip_text (PDF/A conversion only, no OCR)
|
||||
- Archive is produced
|
||||
- Text is preserved from the original
|
||||
"""
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "simple-digital.pdf",
|
||||
"application/pdf",
|
||||
produce_archive=True,
|
||||
)
|
||||
assert tesseract_parser.archive_path is not None
|
||||
assert tesseract_parser.get_text()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Parse — mixed pages / sidecar
|
||||
@@ -919,13 +835,13 @@ class TestParseMixed:
|
||||
"""
|
||||
GIVEN:
|
||||
- File with text in some pages (image) and some pages (digital)
|
||||
- Mode: auto (skip_text), skip_archive_file: always
|
||||
- Mode: skip
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- All pages extracted; archive created; sidecar notes skipped pages
|
||||
"""
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "multi-page-mixed.pdf",
|
||||
"application/pdf",
|
||||
@@ -982,18 +898,17 @@ class TestParseMixed:
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- File with mixed pages (some with text, some image-only)
|
||||
- Mode: auto, produce_archive=False
|
||||
- File with mixed pages
|
||||
- Mode: skip_noarchive
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- No archive created (produce_archive=False); text from text layer present
|
||||
- No archive created (file has text layer); later-page text present
|
||||
"""
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip_noarchive"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "multi-page-mixed.pdf",
|
||||
"application/pdf",
|
||||
produce_archive=False,
|
||||
)
|
||||
assert tesseract_parser.archive_path is None
|
||||
assert_ordered_substrings(
|
||||
@@ -1008,12 +923,12 @@ class TestParseMixed:
|
||||
|
||||
|
||||
class TestParseRotate:
|
||||
def test_rotate_auto_mode(
|
||||
def test_rotate_skip_mode(
|
||||
self,
|
||||
tesseract_parser: RasterisedDocumentParser,
|
||||
tesseract_samples_dir: Path,
|
||||
) -> None:
|
||||
tesseract_parser.settings.mode = "auto"
|
||||
tesseract_parser.settings.mode = "skip"
|
||||
tesseract_parser.settings.rotate = True
|
||||
tesseract_parser.parse(tesseract_samples_dir / "rotated.pdf", "application/pdf")
|
||||
assert_ordered_substrings(
|
||||
@@ -1040,19 +955,12 @@ class TestParseRtl:
|
||||
) -> None:
|
||||
"""
|
||||
GIVEN:
|
||||
- PDF with RTL Arabic text in its text layer (short: 18 chars)
|
||||
- mode=off, produce_archive=True: PDF/A conversion via skip_text, no OCR engine
|
||||
- PDF with RTL Arabic text
|
||||
WHEN:
|
||||
- Document is parsed
|
||||
THEN:
|
||||
- Arabic content is extracted from the PDF text layer (normalised for bidi)
|
||||
|
||||
Note: The RTL PDF has a short text layer (< VALID_TEXT_LENGTH=50) so AUTO mode
|
||||
would attempt full OCR, which fails due to PriorOcrFoundError and falls back to
|
||||
force-ocr with English Tesseract (producing garbage). Using mode="off" forces
|
||||
skip_text=True so the Arabic text layer is preserved through PDF/A conversion.
|
||||
- Arabic content is extracted (normalised for bidi)
|
||||
"""
|
||||
tesseract_parser.settings.mode = "off"
|
||||
tesseract_parser.parse(
|
||||
tesseract_samples_dir / "rtl-test.pdf",
|
||||
"application/pdf",
|
||||
@@ -1115,11 +1023,11 @@ class TestOcrmypdfParameters:
|
||||
assert ("clean" in params) == expected_clean
|
||||
assert ("clean_final" in params) == expected_clean_final
|
||||
|
||||
def test_clean_final_auto_mode(
|
||||
def test_clean_final_skip_mode(
|
||||
self,
|
||||
make_tesseract_parser: MakeTesseractParser,
|
||||
) -> None:
|
||||
with make_tesseract_parser(OCR_CLEAN="clean-final", OCR_MODE="auto") as parser:
|
||||
with make_tesseract_parser(OCR_CLEAN="clean-final", OCR_MODE="skip") as parser:
|
||||
params = parser.construct_ocrmypdf_parameters("", "", "", "")
|
||||
assert params["clean_final"] is True
|
||||
assert "clean" not in params
|
||||
@@ -1136,9 +1044,9 @@ class TestOcrmypdfParameters:
|
||||
@pytest.mark.parametrize(
|
||||
("ocr_mode", "ocr_deskew", "expect_deskew"),
|
||||
[
|
||||
pytest.param("auto", True, True, id="auto-deskew-on"),
|
||||
pytest.param("skip", True, True, id="skip-deskew-on"),
|
||||
pytest.param("redo", True, False, id="redo-deskew-off"),
|
||||
pytest.param("auto", False, False, id="auto-no-deskew"),
|
||||
pytest.param("skip", False, False, id="skip-no-deskew"),
|
||||
],
|
||||
)
|
||||
def test_deskew_option(
|
||||
|
||||
@@ -132,13 +132,13 @@ class TestOcrSettingsChecks:
|
||||
pytest.param(
|
||||
"OCR_MODE",
|
||||
"skip_noarchive",
|
||||
'OCR output mode "skip_noarchive"',
|
||||
id="deprecated-mode-now-invalid",
|
||||
"deprecated",
|
||||
id="deprecated-mode",
|
||||
),
|
||||
pytest.param(
|
||||
"ARCHIVE_FILE_GENERATION",
|
||||
"OCR_SKIP_ARCHIVE_FILE",
|
||||
"invalid",
|
||||
'PAPERLESS_ARCHIVE_FILE_GENERATION setting "invalid"',
|
||||
'OCR_SKIP_ARCHIVE_FILE setting "invalid"',
|
||||
id="invalid-skip-archive-file",
|
||||
),
|
||||
pytest.param(
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
"""Tests for v3 system checks: deprecated v2 OCR env var warnings."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
|
||||
from paperless.checks import check_deprecated_v2_ocr_env_vars
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
|
||||
class TestDeprecatedV2OcrEnvVarWarnings:
|
||||
def test_no_deprecated_vars_returns_empty(self, mocker: MockerFixture) -> None:
|
||||
"""No warnings when neither deprecated variable is set."""
|
||||
mocker.patch.dict(os.environ, {"PAPERLESS_OCR_MODE": "auto"}, clear=True)
|
||||
result = check_deprecated_v2_ocr_env_vars(None)
|
||||
assert result == []
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("env_var", "env_value", "expected_id", "expected_fragment"),
|
||||
[
|
||||
pytest.param(
|
||||
"PAPERLESS_OCR_SKIP_ARCHIVE_FILE",
|
||||
"always",
|
||||
"paperless.W002",
|
||||
"PAPERLESS_OCR_SKIP_ARCHIVE_FILE",
|
||||
id="skip-archive-file-warns",
|
||||
),
|
||||
pytest.param(
|
||||
"PAPERLESS_OCR_MODE",
|
||||
"skip",
|
||||
"paperless.W003",
|
||||
"skip",
|
||||
id="ocr-mode-skip-warns",
|
||||
),
|
||||
pytest.param(
|
||||
"PAPERLESS_OCR_MODE",
|
||||
"skip_noarchive",
|
||||
"paperless.W003",
|
||||
"skip_noarchive",
|
||||
id="ocr-mode-skip-noarchive-warns",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_deprecated_var_produces_one_warning(
|
||||
self,
|
||||
mocker: MockerFixture,
|
||||
env_var: str,
|
||||
env_value: str,
|
||||
expected_id: str,
|
||||
expected_fragment: str,
|
||||
) -> None:
|
||||
"""Each deprecated setting in isolation produces exactly one warning."""
|
||||
mocker.patch.dict(os.environ, {env_var: env_value}, clear=True)
|
||||
result = check_deprecated_v2_ocr_env_vars(None)
|
||||
|
||||
assert len(result) == 1
|
||||
warning = result[0]
|
||||
assert warning.id == expected_id
|
||||
assert expected_fragment in warning.msg
|
||||
@@ -1,89 +0,0 @@
|
||||
from documents.tests.utils import TestMigrations
|
||||
|
||||
|
||||
class TestMigrateSkipArchiveFile(TestMigrations):
|
||||
migrate_from = "0007_optimize_integer_field_sizes"
|
||||
migrate_to = "0008_replace_skip_archive_file"
|
||||
|
||||
def setUpBeforeMigration(self, apps):
|
||||
ApplicationConfiguration = apps.get_model(
|
||||
"paperless",
|
||||
"ApplicationConfiguration",
|
||||
)
|
||||
ApplicationConfiguration.objects.all().delete()
|
||||
ApplicationConfiguration.objects.create(
|
||||
pk=1,
|
||||
mode="skip",
|
||||
skip_archive_file="always",
|
||||
)
|
||||
ApplicationConfiguration.objects.create(
|
||||
pk=2,
|
||||
mode="redo",
|
||||
skip_archive_file="with_text",
|
||||
)
|
||||
ApplicationConfiguration.objects.create(
|
||||
pk=3,
|
||||
mode="force",
|
||||
skip_archive_file="never",
|
||||
)
|
||||
ApplicationConfiguration.objects.create(
|
||||
pk=4,
|
||||
mode="skip_noarchive",
|
||||
skip_archive_file=None,
|
||||
)
|
||||
ApplicationConfiguration.objects.create(
|
||||
pk=5,
|
||||
mode="skip_noarchive",
|
||||
skip_archive_file="never",
|
||||
)
|
||||
ApplicationConfiguration.objects.create(pk=6, mode=None, skip_archive_file=None)
|
||||
|
||||
def _get_config(self, pk):
|
||||
ApplicationConfiguration = self.apps.get_model(
|
||||
"paperless",
|
||||
"ApplicationConfiguration",
|
||||
)
|
||||
return ApplicationConfiguration.objects.get(pk=pk)
|
||||
|
||||
def test_skip_mapped_to_auto(self):
|
||||
config = self._get_config(1)
|
||||
assert config.mode == "auto"
|
||||
|
||||
def test_skip_archive_always_mapped_to_never(self):
|
||||
config = self._get_config(1)
|
||||
assert config.archive_file_generation == "never"
|
||||
|
||||
def test_redo_unchanged(self):
|
||||
config = self._get_config(2)
|
||||
assert config.mode == "redo"
|
||||
|
||||
def test_skip_archive_with_text_mapped_to_auto(self):
|
||||
config = self._get_config(2)
|
||||
assert config.archive_file_generation == "auto"
|
||||
|
||||
def test_force_unchanged(self):
|
||||
config = self._get_config(3)
|
||||
assert config.mode == "force"
|
||||
|
||||
def test_skip_archive_never_mapped_to_always(self):
|
||||
config = self._get_config(3)
|
||||
assert config.archive_file_generation == "always"
|
||||
|
||||
def test_skip_noarchive_mapped_to_auto(self):
|
||||
config = self._get_config(4)
|
||||
assert config.mode == "auto"
|
||||
|
||||
def test_skip_noarchive_implies_archive_never(self):
|
||||
config = self._get_config(4)
|
||||
assert config.archive_file_generation == "never"
|
||||
|
||||
def test_skip_noarchive_explicit_skip_archive_takes_precedence(self):
|
||||
"""skip_archive_file=never maps to always, not overridden by skip_noarchive."""
|
||||
config = self._get_config(5)
|
||||
assert config.mode == "auto"
|
||||
assert config.archive_file_generation == "always"
|
||||
|
||||
def test_null_values_remain_null(self):
|
||||
config = self._get_config(6)
|
||||
assert config.mode is None
|
||||
assert config.archive_file_generation is None
|
||||
@@ -1,66 +0,0 @@
|
||||
"""Tests for OcrConfig archive_file_generation field behavior."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from django.test import override_settings
|
||||
|
||||
from paperless.config import OcrConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def null_app_config(mocker) -> MagicMock:
|
||||
"""Mock ApplicationConfiguration with all fields None → falls back to Django settings."""
|
||||
return mocker.MagicMock(
|
||||
output_type=None,
|
||||
pages=None,
|
||||
language=None,
|
||||
mode=None,
|
||||
archive_file_generation=None,
|
||||
image_dpi=None,
|
||||
unpaper_clean=None,
|
||||
deskew=None,
|
||||
rotate_pages=None,
|
||||
rotate_pages_threshold=None,
|
||||
max_image_pixels=None,
|
||||
color_conversion_strategy=None,
|
||||
user_args=None,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def make_ocr_config(mocker, null_app_config):
|
||||
mocker.patch(
|
||||
"paperless.config.BaseConfig._get_config_instance",
|
||||
return_value=null_app_config,
|
||||
)
|
||||
|
||||
def _make(**django_settings_overrides):
|
||||
with override_settings(**django_settings_overrides):
|
||||
return OcrConfig()
|
||||
|
||||
return _make
|
||||
|
||||
|
||||
class TestOcrConfigArchiveFileGeneration:
|
||||
def test_auto_from_settings(self, make_ocr_config) -> None:
|
||||
cfg = make_ocr_config(OCR_MODE="auto", ARCHIVE_FILE_GENERATION="auto")
|
||||
assert cfg.archive_file_generation == "auto"
|
||||
|
||||
def test_always_from_settings(self, make_ocr_config) -> None:
|
||||
cfg = make_ocr_config(ARCHIVE_FILE_GENERATION="always")
|
||||
assert cfg.archive_file_generation == "always"
|
||||
|
||||
def test_never_from_settings(self, make_ocr_config) -> None:
|
||||
cfg = make_ocr_config(ARCHIVE_FILE_GENERATION="never")
|
||||
assert cfg.archive_file_generation == "never"
|
||||
|
||||
def test_db_value_overrides_setting(self, make_ocr_config, null_app_config) -> None:
|
||||
null_app_config.archive_file_generation = "never"
|
||||
cfg = make_ocr_config(ARCHIVE_FILE_GENERATION="always")
|
||||
assert cfg.archive_file_generation == "never"
|
||||
@@ -1,25 +0,0 @@
|
||||
"""Tests for paperless.parsers.utils helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from paperless.parsers.utils import is_tagged_pdf
|
||||
|
||||
SAMPLES = Path(__file__).parent / "samples" / "tesseract"
|
||||
|
||||
|
||||
class TestIsTaggedPdf:
|
||||
def test_tagged_pdf_returns_true(self) -> None:
|
||||
assert is_tagged_pdf(SAMPLES / "simple-digital.pdf") is True
|
||||
|
||||
def test_untagged_pdf_returns_false(self) -> None:
|
||||
assert is_tagged_pdf(SAMPLES / "multi-page-images.pdf") is False
|
||||
|
||||
def test_nonexistent_path_returns_false(self) -> None:
|
||||
assert is_tagged_pdf(Path("/nonexistent/file.pdf")) is False
|
||||
|
||||
def test_corrupt_pdf_returns_false(self, tmp_path: Path) -> None:
|
||||
bad = tmp_path / "bad.pdf"
|
||||
bad.write_bytes(b"not a pdf")
|
||||
assert is_tagged_pdf(bad) is False
|
||||
@@ -89,7 +89,7 @@ class StandardPagination(PageNumberPagination):
|
||||
|
||||
query = self.page.paginator.object_list
|
||||
if isinstance(query, TantivyRelevanceList):
|
||||
return [h["id"] for h in query._hits]
|
||||
return query.get_all_ids()
|
||||
return self.page.paginator.object_list.values_list("pk", flat=True)
|
||||
|
||||
def get_paginated_response_schema(self, schema):
|
||||
|
||||
Reference in New Issue
Block a user