mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2026-01-02 14:28:14 -06:00
Compare commits
3 Commits
feature-re
...
feature-tr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61e0bd7eb6 | ||
|
|
d127361411 | ||
|
|
d45dee6d39 |
@@ -1804,23 +1804,3 @@ password. All of these options come from their similarly-named [Django settings]
|
||||
#### [`PAPERLESS_EMAIL_USE_SSL=<bool>`](#PAPERLESS_EMAIL_USE_SSL) {#PAPERLESS_EMAIL_USE_SSL}
|
||||
|
||||
: Defaults to false.
|
||||
|
||||
## Remote OCR
|
||||
|
||||
#### [`PAPERLESS_REMOTE_OCR_ENGINE=<str>`](#PAPERLESS_REMOTE_OCR_ENGINE) {#PAPERLESS_REMOTE_OCR_ENGINE}
|
||||
|
||||
: The remote OCR engine to use. Currently only Azure AI is supported as "azureai".
|
||||
|
||||
Defaults to None, which disables remote OCR.
|
||||
|
||||
#### [`PAPERLESS_REMOTE_OCR_API_KEY=<str>`](#PAPERLESS_REMOTE_OCR_API_KEY) {#PAPERLESS_REMOTE_OCR_API_KEY}
|
||||
|
||||
: The API key to use for the remote OCR engine.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
#### [`PAPERLESS_REMOTE_OCR_ENDPOINT=<str>`](#PAPERLESS_REMOTE_OCR_ENDPOINT) {#PAPERLESS_REMOTE_OCR_ENDPOINT}
|
||||
|
||||
: The endpoint to use for the remote OCR engine. This is required for Azure AI.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
@@ -25,10 +25,9 @@ physical documents into a searchable online archive so you can keep, well, _less
|
||||
## Features
|
||||
|
||||
- **Organize and index** your scanned documents with tags, correspondents, types, and more.
|
||||
- _Your_ data is stored locally on _your_ server and is never transmitted or shared in any way, unless you explicitly choose to do so.
|
||||
- _Your_ data is stored locally on _your_ server and is never transmitted or shared in any way.
|
||||
- Performs **OCR** on your documents, adding searchable and selectable text, even to documents scanned with only images.
|
||||
- Utilizes the open-source Tesseract engine to recognize more than 100 languages.
|
||||
- _New!_ Supports remote OCR with Azure AI (opt-in).
|
||||
- Utilizes the open-source Tesseract engine to recognize more than 100 languages.
|
||||
- Documents are saved as PDF/A format which is designed for long term storage, alongside the unaltered originals.
|
||||
- Uses machine-learning to automatically add tags, correspondents and document types to your documents.
|
||||
- Supports PDF documents, images, plain text files, Office documents (Word, Excel, PowerPoint, and LibreOffice equivalents)[^1] and more.
|
||||
|
||||
@@ -901,21 +901,6 @@ how regularly you intend to scan documents and use paperless.
|
||||
performed the task associated with the document, move it to the
|
||||
inbox.
|
||||
|
||||
## Remote OCR
|
||||
|
||||
!!! important
|
||||
|
||||
This feature is disabled by default and will always remain strictly "opt-in".
|
||||
|
||||
Paperless-ngx supports performing OCR on documents using remote services. At the moment, this is limited to
|
||||
[Microsoft's Azure "Document Intelligence" service](https://azure.microsoft.com/en-us/products/ai-services/ai-document-intelligence).
|
||||
This is of course a paid service (with a free tier) which requires an Azure account and subscription. Azure AI is not affiliated with
|
||||
Paperless-ngx in any way. When enabled, Paperless-ngx will automatically send appropriate documents to Azure for OCR processing, bypassing
|
||||
the local OCR engine. See the [configuration](configuration.md#PAPERLESS_REMOTE_OCR_ENGINE) options for more details.
|
||||
|
||||
Additionally, when using a commercial service with this feature, consider both potential costs as well as any associated file size
|
||||
or page limitations (e.g. with a free tier).
|
||||
|
||||
## Architecture
|
||||
|
||||
Paperless-ngx consists of the following components:
|
||||
|
||||
@@ -16,7 +16,6 @@ classifiers = [
|
||||
# This will allow testing to not install a webserver, mysql, etc
|
||||
|
||||
dependencies = [
|
||||
"azure-ai-documentintelligence>=1.0.2",
|
||||
"babel>=2.17",
|
||||
"bleach~=6.3.0",
|
||||
"celery[redis]~=5.5.1",
|
||||
@@ -254,7 +253,6 @@ testpaths = [
|
||||
"src/paperless_tesseract/tests/",
|
||||
"src/paperless_tika/tests",
|
||||
"src/paperless_text/tests/",
|
||||
"src/paperless_remote/tests/",
|
||||
]
|
||||
addopts = [
|
||||
"--pythonwarnings=all",
|
||||
|
||||
139
scripts/tag_perf_probe.py
Normal file
139
scripts/tag_perf_probe.py
Normal file
@@ -0,0 +1,139 @@
|
||||
# noqa: INP001
|
||||
|
||||
"""
|
||||
Ad-hoc script to gauge Tag + treenode performance locally.
|
||||
|
||||
It bootstraps a fresh SQLite DB in a temp folder (or PAPERLESS_DATA_DIR),
|
||||
uses locmem cache/redis to avoid external services, creates synthetic tags,
|
||||
and measures:
|
||||
- creation time
|
||||
- query count and wall time for the Tag list view
|
||||
|
||||
Usage:
|
||||
PAPERLESS_DEBUG=1 PAPERLESS_REDIS=locmem:// PYTHONPATH=src \
|
||||
PAPERLESS_DATA_DIR=/tmp/paperless-tags-probe \
|
||||
.venv/bin/python scripts/tag_perf_probe.py
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from collections.abc import Iterable
|
||||
from contextlib import contextmanager
|
||||
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "paperless.settings")
|
||||
os.environ.setdefault("PAPERLESS_DEBUG", "1")
|
||||
os.environ.setdefault("PAPERLESS_REDIS", "locmem://")
|
||||
os.environ.setdefault("PYTHONPATH", "src")
|
||||
|
||||
import django
|
||||
|
||||
django.setup()
|
||||
|
||||
from django.contrib.auth import get_user_model # noqa: E402
|
||||
from django.core.management import call_command # noqa: E402
|
||||
from django.db import connection # noqa: E402
|
||||
from django.test.client import RequestFactory # noqa: E402
|
||||
from rest_framework.test import force_authenticate # noqa: E402
|
||||
from treenode.signals import no_signals # noqa: E402
|
||||
|
||||
from documents.models import Tag # noqa: E402
|
||||
from documents.views import TagViewSet # noqa: E402
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def count_queries():
|
||||
total = 0
|
||||
|
||||
def wrapper(execute, sql, params, many, context):
|
||||
nonlocal total
|
||||
total += 1
|
||||
return execute(sql, params, many, context)
|
||||
|
||||
with connection.execute_wrapper(wrapper):
|
||||
yield lambda: total
|
||||
|
||||
|
||||
def measure_list(tag_count: int, user) -> tuple[int, float]:
|
||||
"""Render Tag list with page_size=tag_count and return (queries, seconds)."""
|
||||
rf = RequestFactory()
|
||||
view = TagViewSet.as_view({"get": "list"})
|
||||
request = rf.get("/api/tags/", {"page_size": tag_count})
|
||||
force_authenticate(request, user=user)
|
||||
|
||||
with count_queries() as get_count:
|
||||
start = time.perf_counter()
|
||||
response = view(request)
|
||||
response.render()
|
||||
elapsed = time.perf_counter() - start
|
||||
total_queries = get_count()
|
||||
|
||||
return total_queries, elapsed
|
||||
|
||||
|
||||
def bulk_create_tags(count: int, parents: Iterable[Tag] | None = None) -> None:
|
||||
"""Create tags; when parents provided, create one child per parent."""
|
||||
if parents is None:
|
||||
Tag.objects.bulk_create([Tag(name=f"Flat {i}") for i in range(count)])
|
||||
return
|
||||
|
||||
children = []
|
||||
for p in parents:
|
||||
children.append(Tag(name=f"Child {p.id}", tn_parent=p))
|
||||
Tag.objects.bulk_create(children)
|
||||
|
||||
|
||||
def run():
|
||||
# Ensure tables exist when pointing at a fresh DATA_DIR.
|
||||
call_command("migrate", interactive=False, verbosity=0)
|
||||
|
||||
user, _ = User.objects.get_or_create(
|
||||
username="admin",
|
||||
defaults={"is_superuser": True, "is_staff": True},
|
||||
)
|
||||
|
||||
# Flat scenario
|
||||
Tag.objects.all().delete()
|
||||
start = time.perf_counter()
|
||||
bulk_create_tags(200)
|
||||
flat_create = time.perf_counter() - start
|
||||
q, t = measure_list(tag_count=200, user=user)
|
||||
print(f"Flat create 200 -> {flat_create:.2f}s; list -> {q} queries, {t:.2f}s") # noqa: T201
|
||||
|
||||
# Nested scenario (parents + 2 children each => 600 total)
|
||||
Tag.objects.all().delete()
|
||||
start = time.perf_counter()
|
||||
with no_signals(): # avoid per-save tree rebuild; rebuild once
|
||||
parents = Tag.objects.bulk_create([Tag(name=f"Parent {i}") for i in range(200)])
|
||||
children = []
|
||||
for p in parents:
|
||||
children.extend(
|
||||
Tag(name=f"Child {p.id}-{j}", tn_parent=p) for j in range(2)
|
||||
)
|
||||
Tag.objects.bulk_create(children)
|
||||
Tag.update_tree()
|
||||
nested_create = time.perf_counter() - start
|
||||
q, t = measure_list(tag_count=600, user=user)
|
||||
print(f"Nested create 600 -> {nested_create:.2f}s; list -> {q} queries, {t:.2f}s") # noqa: T201
|
||||
|
||||
# Larger nested scenario (1 child per parent, 3000 total)
|
||||
Tag.objects.all().delete()
|
||||
start = time.perf_counter()
|
||||
with no_signals():
|
||||
parents = Tag.objects.bulk_create(
|
||||
[Tag(name=f"Parent {i}") for i in range(1500)],
|
||||
)
|
||||
bulk_create_tags(0, parents=parents)
|
||||
Tag.update_tree()
|
||||
big_create = time.perf_counter() - start
|
||||
q, t = measure_list(tag_count=3000, user=user)
|
||||
print(f"Nested create 3000 -> {big_create:.2f}s; list -> {q} queries, {t:.2f}s") # noqa: T201
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if "runserver" in sys.argv:
|
||||
print("Run directly: .venv/bin/python scripts/tag_perf_probe.py") # noqa: T201
|
||||
sys.exit(1)
|
||||
run()
|
||||
@@ -1,5 +1,9 @@
|
||||
from django.apps import AppConfig
|
||||
from django.db.models.signals import post_delete
|
||||
from django.db.models.signals import post_save
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from treenode.signals import post_delete_treenode
|
||||
from treenode.signals import post_save_treenode
|
||||
|
||||
|
||||
class DocumentsConfig(AppConfig):
|
||||
@@ -8,12 +12,14 @@ class DocumentsConfig(AppConfig):
|
||||
verbose_name = _("Documents")
|
||||
|
||||
def ready(self):
|
||||
from documents.models import Tag
|
||||
from documents.signals import document_consumption_finished
|
||||
from documents.signals import document_updated
|
||||
from documents.signals.handlers import add_inbox_tags
|
||||
from documents.signals.handlers import add_to_index
|
||||
from documents.signals.handlers import run_workflows_added
|
||||
from documents.signals.handlers import run_workflows_updated
|
||||
from documents.signals.handlers import schedule_tag_tree_update
|
||||
from documents.signals.handlers import set_correspondent
|
||||
from documents.signals.handlers import set_document_type
|
||||
from documents.signals.handlers import set_storage_path
|
||||
@@ -28,6 +34,29 @@ class DocumentsConfig(AppConfig):
|
||||
document_consumption_finished.connect(run_workflows_added)
|
||||
document_updated.connect(run_workflows_updated)
|
||||
|
||||
# treenode updates the entire tree on every save/delete via hooks
|
||||
# so disconnect for Tags and run once-per-transaction.
|
||||
post_save.disconnect(
|
||||
post_save_treenode,
|
||||
sender=Tag,
|
||||
dispatch_uid="post_save_treenode",
|
||||
)
|
||||
post_delete.disconnect(
|
||||
post_delete_treenode,
|
||||
sender=Tag,
|
||||
dispatch_uid="post_delete_treenode",
|
||||
)
|
||||
post_save.connect(
|
||||
schedule_tag_tree_update,
|
||||
sender=Tag,
|
||||
dispatch_uid="paperless_tag_mark_dirty_save",
|
||||
)
|
||||
post_delete.connect(
|
||||
schedule_tag_tree_update,
|
||||
sender=Tag,
|
||||
dispatch_uid="paperless_tag_mark_dirty_delete",
|
||||
)
|
||||
|
||||
import documents.schema # noqa: F401
|
||||
|
||||
AppConfig.ready(self)
|
||||
|
||||
@@ -580,30 +580,34 @@ class TagSerializer(MatchingModelSerializer, OwnedObjectSerializer):
|
||||
),
|
||||
)
|
||||
def get_children(self, obj):
|
||||
filter_q = self.context.get("document_count_filter")
|
||||
request = self.context.get("request")
|
||||
if filter_q is None:
|
||||
user = getattr(request, "user", None) if request else None
|
||||
filter_q = get_document_count_filter_for_user(user)
|
||||
self.context["document_count_filter"] = filter_q
|
||||
children_map = self.context.get("children_map")
|
||||
if children_map is not None:
|
||||
children = children_map.get(obj.pk, [])
|
||||
else:
|
||||
filter_q = self.context.get("document_count_filter")
|
||||
request = self.context.get("request")
|
||||
if filter_q is None:
|
||||
user = getattr(request, "user", None) if request else None
|
||||
filter_q = get_document_count_filter_for_user(user)
|
||||
self.context["document_count_filter"] = filter_q
|
||||
|
||||
children_queryset = (
|
||||
obj.get_children_queryset()
|
||||
.select_related("owner")
|
||||
.annotate(document_count=Count("documents", filter=filter_q))
|
||||
)
|
||||
children = (
|
||||
obj.get_children_queryset()
|
||||
.select_related("owner")
|
||||
.annotate(document_count=Count("documents", filter=filter_q))
|
||||
)
|
||||
|
||||
view = self.context.get("view")
|
||||
ordering = (
|
||||
OrderingFilter().get_ordering(request, children_queryset, view)
|
||||
if request and view
|
||||
else None
|
||||
)
|
||||
ordering = ordering or (Lower("name"),)
|
||||
children_queryset = children_queryset.order_by(*ordering)
|
||||
view = self.context.get("view")
|
||||
ordering = (
|
||||
OrderingFilter().get_ordering(request, children, view)
|
||||
if request and view
|
||||
else None
|
||||
)
|
||||
ordering = ordering or (Lower("name"),)
|
||||
children = children.order_by(*ordering)
|
||||
|
||||
serializer = TagSerializer(
|
||||
children_queryset,
|
||||
children,
|
||||
many=True,
|
||||
user=self.user,
|
||||
full_perms=self.full_perms,
|
||||
|
||||
@@ -19,6 +19,7 @@ from django.db import DatabaseError
|
||||
from django.db import close_old_connections
|
||||
from django.db import connections
|
||||
from django.db import models
|
||||
from django.db import transaction
|
||||
from django.db.models import Q
|
||||
from django.dispatch import receiver
|
||||
from django.utils import timezone
|
||||
@@ -60,6 +61,8 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger("paperless.handlers")
|
||||
|
||||
_tag_tree_update_scheduled = False
|
||||
|
||||
|
||||
def add_inbox_tags(sender, document: Document, logging_group=None, **kwargs):
|
||||
if document.owner is not None:
|
||||
@@ -944,3 +947,26 @@ def close_connection_pool_on_worker_init(**kwargs):
|
||||
for conn in connections.all(initialized_only=True):
|
||||
if conn.alias == "default" and hasattr(conn, "pool") and conn.pool:
|
||||
conn.close_pool()
|
||||
|
||||
|
||||
def schedule_tag_tree_update(**_kwargs):
|
||||
"""
|
||||
Schedule a single Tag.update_tree() at transaction commit.
|
||||
|
||||
Treenode's default post_save hooks rebuild the entire tree on every save,
|
||||
which is very slow for large tag sets so collapse to one update per
|
||||
transaction.
|
||||
"""
|
||||
global _tag_tree_update_scheduled
|
||||
if _tag_tree_update_scheduled:
|
||||
return
|
||||
_tag_tree_update_scheduled = True
|
||||
|
||||
def _run():
|
||||
global _tag_tree_update_scheduled
|
||||
try:
|
||||
Tag.update_tree()
|
||||
finally:
|
||||
_tag_tree_update_scheduled = False
|
||||
|
||||
transaction.on_commit(_run)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from unittest import mock
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.db import transaction
|
||||
from rest_framework.test import APITestCase
|
||||
|
||||
from documents import bulk_edit
|
||||
@@ -10,6 +11,7 @@ from documents.models import Workflow
|
||||
from documents.models import WorkflowAction
|
||||
from documents.models import WorkflowTrigger
|
||||
from documents.serialisers import TagSerializer
|
||||
from documents.signals import handlers
|
||||
from documents.signals.handlers import run_workflows
|
||||
|
||||
|
||||
@@ -250,3 +252,31 @@ class TestTagHierarchy(APITestCase):
|
||||
row for row in response.data["results"] if row["id"] == self.parent.pk
|
||||
)
|
||||
assert any(child["id"] == self.child.pk for child in parent_entry["children"])
|
||||
|
||||
def test_tag_tree_deferred_update_runs_on_commit(self):
|
||||
from django.db import transaction
|
||||
|
||||
# Create tags inside an explicit transaction and commit.
|
||||
with transaction.atomic():
|
||||
parent = Tag.objects.create(name="Parent 2")
|
||||
child = Tag.objects.create(name="Child 2", tn_parent=parent)
|
||||
# After commit, tn_* fields should be populated.
|
||||
parent.refresh_from_db()
|
||||
child.refresh_from_db()
|
||||
assert parent.tn_children_count == 1
|
||||
assert child.tn_ancestors_count == 1
|
||||
|
||||
def test_tag_tree_update_runs_once_per_transaction(self):
|
||||
handlers._tag_tree_update_scheduled = False
|
||||
|
||||
with mock.patch("documents.signals.handlers.Tag.update_tree") as update_tree:
|
||||
with self.captureOnCommitCallbacks(execute=True) as callbacks:
|
||||
with transaction.atomic():
|
||||
handlers.schedule_tag_tree_update()
|
||||
handlers.schedule_tag_tree_update()
|
||||
update_tree.assert_not_called()
|
||||
assert handlers._tag_tree_update_scheduled is True
|
||||
|
||||
assert len(callbacks) == 1
|
||||
update_tree.assert_called_once()
|
||||
assert handlers._tag_tree_update_scheduled is False
|
||||
|
||||
@@ -448,8 +448,43 @@ class TagViewSet(ModelViewSet, PermissionsAwareDocumentCountMixin):
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
context["document_count_filter"] = self.get_document_count_filter()
|
||||
if hasattr(self, "_children_map"):
|
||||
context["children_map"] = self._children_map
|
||||
return context
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
"""
|
||||
Build a children map once to avoid per-parent queries in the serializer.
|
||||
"""
|
||||
queryset = self.filter_queryset(self.get_queryset())
|
||||
ordering = OrderingFilter().get_ordering(request, queryset, self) or (
|
||||
Lower("name"),
|
||||
)
|
||||
queryset = queryset.order_by(*ordering)
|
||||
|
||||
all_tags = list(queryset)
|
||||
descendant_pks = {pk for tag in all_tags for pk in tag.get_descendants_pks()}
|
||||
|
||||
if descendant_pks:
|
||||
filter_q = self.get_document_count_filter()
|
||||
children_source = (
|
||||
Tag.objects.filter(pk__in=descendant_pks | {t.pk for t in all_tags})
|
||||
.select_related("owner")
|
||||
.annotate(document_count=Count("documents", filter=filter_q))
|
||||
.order_by(*ordering)
|
||||
)
|
||||
else:
|
||||
children_source = all_tags
|
||||
|
||||
children_map = {}
|
||||
for tag in children_source:
|
||||
children_map.setdefault(tag.tn_parent_id, []).append(tag)
|
||||
self._children_map = children_map
|
||||
|
||||
page = self.paginate_queryset(queryset)
|
||||
serializer = self.get_serializer(page, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
def perform_update(self, serializer):
|
||||
old_parent = self.get_object().get_parent()
|
||||
tag = serializer.save()
|
||||
|
||||
@@ -322,7 +322,6 @@ INSTALLED_APPS = [
|
||||
"paperless_tesseract.apps.PaperlessTesseractConfig",
|
||||
"paperless_text.apps.PaperlessTextConfig",
|
||||
"paperless_mail.apps.PaperlessMailConfig",
|
||||
"paperless_remote.apps.PaperlessRemoteParserConfig",
|
||||
"django.contrib.admin",
|
||||
"rest_framework",
|
||||
"rest_framework.authtoken",
|
||||
@@ -1403,10 +1402,3 @@ WEBHOOKS_ALLOW_INTERNAL_REQUESTS = __get_boolean(
|
||||
"PAPERLESS_WEBHOOKS_ALLOW_INTERNAL_REQUESTS",
|
||||
"true",
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# Remote Parser #
|
||||
###############################################################################
|
||||
REMOTE_OCR_ENGINE = os.getenv("PAPERLESS_REMOTE_OCR_ENGINE")
|
||||
REMOTE_OCR_API_KEY = os.getenv("PAPERLESS_REMOTE_OCR_API_KEY")
|
||||
REMOTE_OCR_ENDPOINT = os.getenv("PAPERLESS_REMOTE_OCR_ENDPOINT")
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
# this is here so that django finds the checks.
|
||||
from paperless_remote.checks import check_remote_parser_configured
|
||||
|
||||
__all__ = ["check_remote_parser_configured"]
|
||||
@@ -1,14 +0,0 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
from paperless_remote.signals import remote_consumer_declaration
|
||||
|
||||
|
||||
class PaperlessRemoteParserConfig(AppConfig):
|
||||
name = "paperless_remote"
|
||||
|
||||
def ready(self):
|
||||
from documents.signals import document_consumer_declaration
|
||||
|
||||
document_consumer_declaration.connect(remote_consumer_declaration)
|
||||
|
||||
AppConfig.ready(self)
|
||||
@@ -1,17 +0,0 @@
|
||||
from django.conf import settings
|
||||
from django.core.checks import Error
|
||||
from django.core.checks import register
|
||||
|
||||
|
||||
@register()
|
||||
def check_remote_parser_configured(app_configs, **kwargs):
|
||||
if settings.REMOTE_OCR_ENGINE == "azureai" and not (
|
||||
settings.REMOTE_OCR_ENDPOINT and settings.REMOTE_OCR_API_KEY
|
||||
):
|
||||
return [
|
||||
Error(
|
||||
"Azure AI remote parser requires endpoint and API key to be configured.",
|
||||
),
|
||||
]
|
||||
|
||||
return []
|
||||
@@ -1,118 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from paperless_tesseract.parsers import RasterisedDocumentParser
|
||||
|
||||
|
||||
class RemoteEngineConfig:
|
||||
def __init__(
|
||||
self,
|
||||
engine: str,
|
||||
api_key: str | None = None,
|
||||
endpoint: str | None = None,
|
||||
):
|
||||
self.engine = engine
|
||||
self.api_key = api_key
|
||||
self.endpoint = endpoint
|
||||
|
||||
def engine_is_valid(self):
|
||||
valid = self.engine in ["azureai"] and self.api_key is not None
|
||||
if self.engine == "azureai":
|
||||
valid = valid and self.endpoint is not None
|
||||
return valid
|
||||
|
||||
|
||||
class RemoteDocumentParser(RasterisedDocumentParser):
|
||||
"""
|
||||
This parser uses a remote OCR engine to parse documents. Currently, it supports Azure AI Vision
|
||||
as this is the only service that provides a remote OCR API with text-embedded PDF output.
|
||||
"""
|
||||
|
||||
logging_name = "paperless.parsing.remote"
|
||||
|
||||
def get_settings(self) -> RemoteEngineConfig:
|
||||
"""
|
||||
Returns the configuration for the remote OCR engine, loaded from Django settings.
|
||||
"""
|
||||
return RemoteEngineConfig(
|
||||
engine=settings.REMOTE_OCR_ENGINE,
|
||||
api_key=settings.REMOTE_OCR_API_KEY,
|
||||
endpoint=settings.REMOTE_OCR_ENDPOINT,
|
||||
)
|
||||
|
||||
def supported_mime_types(self):
|
||||
if self.settings.engine_is_valid():
|
||||
return {
|
||||
"application/pdf": ".pdf",
|
||||
"image/png": ".png",
|
||||
"image/jpeg": ".jpg",
|
||||
"image/tiff": ".tiff",
|
||||
"image/bmp": ".bmp",
|
||||
"image/gif": ".gif",
|
||||
"image/webp": ".webp",
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
def azure_ai_vision_parse(
|
||||
self,
|
||||
file: Path,
|
||||
) -> str | None:
|
||||
"""
|
||||
Uses Azure AI Vision to parse the document and return the text content.
|
||||
It requests a searchable PDF output with embedded text.
|
||||
The PDF is saved to the archive_path attribute.
|
||||
Returns the text content extracted from the document.
|
||||
If the parsing fails, it returns None.
|
||||
"""
|
||||
from azure.ai.documentintelligence import DocumentIntelligenceClient
|
||||
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest
|
||||
from azure.ai.documentintelligence.models import AnalyzeOutputOption
|
||||
from azure.ai.documentintelligence.models import DocumentContentFormat
|
||||
from azure.core.credentials import AzureKeyCredential
|
||||
|
||||
client = DocumentIntelligenceClient(
|
||||
endpoint=self.settings.endpoint,
|
||||
credential=AzureKeyCredential(self.settings.api_key),
|
||||
)
|
||||
|
||||
try:
|
||||
with file.open("rb") as f:
|
||||
analyze_request = AnalyzeDocumentRequest(bytes_source=f.read())
|
||||
poller = client.begin_analyze_document(
|
||||
model_id="prebuilt-read",
|
||||
body=analyze_request,
|
||||
output_content_format=DocumentContentFormat.TEXT,
|
||||
output=[AnalyzeOutputOption.PDF], # request searchable PDF output
|
||||
content_type="application/json",
|
||||
)
|
||||
|
||||
poller.wait()
|
||||
result_id = poller.details["operation_id"]
|
||||
result = poller.result()
|
||||
|
||||
# Download the PDF with embedded text
|
||||
self.archive_path = self.tempdir / "archive.pdf"
|
||||
with self.archive_path.open("wb") as f:
|
||||
for chunk in client.get_analyze_result_pdf(
|
||||
model_id="prebuilt-read",
|
||||
result_id=result_id,
|
||||
):
|
||||
f.write(chunk)
|
||||
return result.content
|
||||
except Exception as e:
|
||||
self.log.error(f"Azure AI Vision parsing failed: {e}")
|
||||
finally:
|
||||
client.close()
|
||||
|
||||
return None
|
||||
|
||||
def parse(self, document_path: Path, mime_type, file_name=None):
|
||||
if not self.settings.engine_is_valid():
|
||||
self.log.warning(
|
||||
"No valid remote parser engine is configured, content will be empty.",
|
||||
)
|
||||
self.text = ""
|
||||
elif self.settings.engine == "azureai":
|
||||
self.text = self.azure_ai_vision_parse(document_path)
|
||||
@@ -1,18 +0,0 @@
|
||||
def get_parser(*args, **kwargs):
|
||||
from paperless_remote.parsers import RemoteDocumentParser
|
||||
|
||||
return RemoteDocumentParser(*args, **kwargs)
|
||||
|
||||
|
||||
def get_supported_mime_types():
|
||||
from paperless_remote.parsers import RemoteDocumentParser
|
||||
|
||||
return RemoteDocumentParser(None).supported_mime_types()
|
||||
|
||||
|
||||
def remote_consumer_declaration(sender, **kwargs):
|
||||
return {
|
||||
"parser": get_parser,
|
||||
"weight": 5,
|
||||
"mime_types": get_supported_mime_types(),
|
||||
}
|
||||
Binary file not shown.
@@ -1,24 +0,0 @@
|
||||
from unittest import TestCase
|
||||
|
||||
from django.test import override_settings
|
||||
|
||||
from paperless_remote import check_remote_parser_configured
|
||||
|
||||
|
||||
class TestChecks(TestCase):
|
||||
@override_settings(REMOTE_OCR_ENGINE=None)
|
||||
def test_no_engine(self):
|
||||
msgs = check_remote_parser_configured(None)
|
||||
self.assertEqual(len(msgs), 0)
|
||||
|
||||
@override_settings(REMOTE_OCR_ENGINE="azureai")
|
||||
@override_settings(REMOTE_OCR_API_KEY="somekey")
|
||||
@override_settings(REMOTE_OCR_ENDPOINT=None)
|
||||
def test_azure_no_endpoint(self):
|
||||
msgs = check_remote_parser_configured(None)
|
||||
self.assertEqual(len(msgs), 1)
|
||||
self.assertTrue(
|
||||
msgs[0].msg.startswith(
|
||||
"Azure AI remote parser requires endpoint and API key to be configured.",
|
||||
),
|
||||
)
|
||||
@@ -1,128 +0,0 @@
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from unittest import mock
|
||||
|
||||
from django.test import TestCase
|
||||
from django.test import override_settings
|
||||
|
||||
from documents.tests.utils import DirectoriesMixin
|
||||
from documents.tests.utils import FileSystemAssertsMixin
|
||||
from paperless_remote.parsers import RemoteDocumentParser
|
||||
from paperless_remote.signals import get_parser
|
||||
|
||||
|
||||
class TestParser(DirectoriesMixin, FileSystemAssertsMixin, TestCase):
|
||||
SAMPLE_FILES = Path(__file__).resolve().parent / "samples"
|
||||
|
||||
def assertContainsStrings(self, content: str, strings: list[str]):
|
||||
# Asserts that all strings appear in content, in the given order.
|
||||
indices = []
|
||||
for s in strings:
|
||||
if s in content:
|
||||
indices.append(content.index(s))
|
||||
else:
|
||||
self.fail(f"'{s}' is not in '{content}'")
|
||||
self.assertListEqual(indices, sorted(indices))
|
||||
|
||||
@mock.patch("paperless_tesseract.parsers.run_subprocess")
|
||||
@mock.patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
|
||||
def test_get_text_with_azure(self, mock_client_cls, mock_subprocess):
|
||||
# Arrange mock Azure client
|
||||
mock_client = mock.Mock()
|
||||
mock_client_cls.return_value = mock_client
|
||||
|
||||
# Simulate poller result and its `.details`
|
||||
mock_poller = mock.Mock()
|
||||
mock_poller.wait.return_value = None
|
||||
mock_poller.details = {"operation_id": "fake-op-id"}
|
||||
mock_client.begin_analyze_document.return_value = mock_poller
|
||||
mock_poller.result.return_value.content = "This is a test document."
|
||||
|
||||
# Return dummy PDF bytes
|
||||
mock_client.get_analyze_result_pdf.return_value = [
|
||||
b"%PDF-",
|
||||
b"1.7 ",
|
||||
b"FAKEPDF",
|
||||
]
|
||||
|
||||
# Simulate pdftotext by writing dummy text to sidecar file
|
||||
def fake_run(cmd, *args, **kwargs):
|
||||
with Path(cmd[-1]).open("w", encoding="utf-8") as f:
|
||||
f.write("This is a test document.")
|
||||
|
||||
mock_subprocess.side_effect = fake_run
|
||||
|
||||
with override_settings(
|
||||
REMOTE_OCR_ENGINE="azureai",
|
||||
REMOTE_OCR_API_KEY="somekey",
|
||||
REMOTE_OCR_ENDPOINT="https://endpoint.cognitiveservices.azure.com",
|
||||
):
|
||||
parser = get_parser(uuid.uuid4())
|
||||
parser.parse(
|
||||
self.SAMPLE_FILES / "simple-digital.pdf",
|
||||
"application/pdf",
|
||||
)
|
||||
|
||||
self.assertContainsStrings(
|
||||
parser.text.strip(),
|
||||
["This is a test document."],
|
||||
)
|
||||
|
||||
@mock.patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
|
||||
def test_get_text_with_azure_error_logged_and_returns_none(self, mock_client_cls):
|
||||
mock_client = mock.Mock()
|
||||
mock_client.begin_analyze_document.side_effect = RuntimeError("fail")
|
||||
mock_client_cls.return_value = mock_client
|
||||
|
||||
with override_settings(
|
||||
REMOTE_OCR_ENGINE="azureai",
|
||||
REMOTE_OCR_API_KEY="somekey",
|
||||
REMOTE_OCR_ENDPOINT="https://endpoint.cognitiveservices.azure.com",
|
||||
):
|
||||
parser = get_parser(uuid.uuid4())
|
||||
with mock.patch.object(parser.log, "error") as mock_log_error:
|
||||
parser.parse(
|
||||
self.SAMPLE_FILES / "simple-digital.pdf",
|
||||
"application/pdf",
|
||||
)
|
||||
|
||||
self.assertIsNone(parser.text)
|
||||
mock_client.begin_analyze_document.assert_called_once()
|
||||
mock_client.close.assert_called_once()
|
||||
mock_log_error.assert_called_once()
|
||||
self.assertIn(
|
||||
"Azure AI Vision parsing failed",
|
||||
mock_log_error.call_args[0][0],
|
||||
)
|
||||
|
||||
@override_settings(
|
||||
REMOTE_OCR_ENGINE="azureai",
|
||||
REMOTE_OCR_API_KEY="key",
|
||||
REMOTE_OCR_ENDPOINT="https://endpoint.cognitiveservices.azure.com",
|
||||
)
|
||||
def test_supported_mime_types_valid_config(self):
|
||||
parser = RemoteDocumentParser(uuid.uuid4())
|
||||
expected_types = {
|
||||
"application/pdf": ".pdf",
|
||||
"image/png": ".png",
|
||||
"image/jpeg": ".jpg",
|
||||
"image/tiff": ".tiff",
|
||||
"image/bmp": ".bmp",
|
||||
"image/gif": ".gif",
|
||||
"image/webp": ".webp",
|
||||
}
|
||||
self.assertEqual(parser.supported_mime_types(), expected_types)
|
||||
|
||||
def test_supported_mime_types_invalid_config(self):
|
||||
parser = get_parser(uuid.uuid4())
|
||||
self.assertEqual(parser.supported_mime_types(), {})
|
||||
|
||||
@override_settings(
|
||||
REMOTE_OCR_ENGINE=None,
|
||||
REMOTE_OCR_API_KEY=None,
|
||||
REMOTE_OCR_ENDPOINT=None,
|
||||
)
|
||||
def test_parse_with_invalid_config(self):
|
||||
parser = get_parser(uuid.uuid4())
|
||||
parser.parse(self.SAMPLE_FILES / "simple-digital.pdf", "application/pdf")
|
||||
self.assertEqual(parser.text, "")
|
||||
39
uv.lock
generated
39
uv.lock
generated
@@ -95,34 +95,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/02/ff/1175b0b7371e46244032d43a56862d0af455823b5280a50c63d99cc50f18/automat-25.4.16-py3-none-any.whl", hash = "sha256:04e9bce696a8d5671ee698005af6e5a9fa15354140a87f4870744604dcdd3ba1", size = 42842, upload-time = "2025-04-16T20:12:14.447Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "azure-ai-documentintelligence"
|
||||
version = "1.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
{ name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
{ name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/44/7b/8115cd713e2caa5e44def85f2b7ebd02a74ae74d7113ba20bdd41fd6dd80/azure_ai_documentintelligence-1.0.2.tar.gz", hash = "sha256:4d75a2513f2839365ebabc0e0e1772f5601b3a8c9a71e75da12440da13b63484", size = 170940 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/75/c9ec040f23082f54ffb1977ff8f364c2d21c79a640a13d1c1809e7fd6b1a/azure_ai_documentintelligence-1.0.2-py3-none-any.whl", hash = "sha256:e1fb446abbdeccc9759d897898a0fe13141ed29f9ad11fc705f951925822ed59", size = 106005 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "azure-core"
|
||||
version = "1.33.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
{ name = "six", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
{ name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/75/aa/7c9db8edd626f1a7d99d09ef7926f6f4fb34d5f9fa00dc394afdfe8e2a80/azure_core-1.33.0.tar.gz", hash = "sha256:f367aa07b5e3005fec2c1e184b882b0b039910733907d001c20fb08ebb8c0eb9", size = 295633 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/07/b7/76b7e144aa53bd206bf1ce34fa75350472c3f69bf30e5c8c18bc9881035d/azure_core-1.33.0-py3-none-any.whl", hash = "sha256:9b5b6d0223a1d38c37500e6971118c1e0f13f54951e6893968b38910bc9cda8f", size = 207071 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "babel"
|
||||
version = "2.17.0"
|
||||
@@ -1479,15 +1451,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/fc/4e5a141c3f7c7bed550ac1f69e599e92b6be449dd4677ec09f325cad0955/inotifyrecursive-0.3.5-py3-none-any.whl", hash = "sha256:7e5f4a2e1dc2bef0efa3b5f6b339c41fb4599055a2b54909d020e9e932cc8d2f", size = 8009, upload-time = "2020-11-20T12:38:46.981Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "isodate"
|
||||
version = "0.7.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/54/4d/e940025e2ce31a8ce1202635910747e5a87cc3a6a6bb2d00973375014749/isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6", size = 29705 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15", size = 22320 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jinja2"
|
||||
version = "3.1.6"
|
||||
@@ -2155,7 +2118,6 @@ name = "paperless-ngx"
|
||||
version = "2.20.3"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "azure-ai-documentintelligence", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
{ name = "babel", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
{ name = "bleach", marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
{ name = "celery", extra = ["redis"], marker = "sys_platform == 'darwin' or sys_platform == 'linux'" },
|
||||
@@ -2293,7 +2255,6 @@ typing = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "azure-ai-documentintelligence", specifier = ">=1.0.2" },
|
||||
{ name = "babel", specifier = ">=2.17" },
|
||||
{ name = "bleach", specifier = "~=6.3.0" },
|
||||
{ name = "celery", extras = ["redis"], specifier = "~=5.5.1" },
|
||||
|
||||
Reference in New Issue
Block a user