Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
2c94a8c717 Chore(deps): Bump filelock from 3.20.0 to 3.20.3
Bumps [filelock](https://github.com/tox-dev/py-filelock) from 3.20.0 to 3.20.3.
- [Release notes](https://github.com/tox-dev/py-filelock/releases)
- [Changelog](https://github.com/tox-dev/filelock/blob/main/docs/changelog.rst)
- [Commits](https://github.com/tox-dev/py-filelock/compare/3.20.0...3.20.3)

---
updated-dependencies:
- dependency-name: filelock
  dependency-version: 3.20.3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-13 23:03:54 +00:00
3 changed files with 317 additions and 455 deletions

View File

@@ -28,7 +28,7 @@ dependencies = [
# Only patch versions are guaranteed to not introduce breaking changes.
"django~=5.2.5",
"django-allauth[mfa,socialaccount]~=65.12.1",
"django-auditlog~=3.4.1",
"django-auditlog~=3.3.0",
"django-cachalot~=2.8.0",
"django-celery-results~=2.6.0",
"django-compression-middleware~=0.5.0",
@@ -47,7 +47,7 @@ dependencies = [
"faiss-cpu>=1.10",
"filelock~=3.20.0",
"flower~=2.0.1",
"gotenberg-client~=0.13.1",
"gotenberg-client~=0.12.0",
"httpx-oauth~=0.16",
"imap-tools~=1.11.0",
"inotifyrecursive~=0.3",
@@ -60,7 +60,7 @@ dependencies = [
"llama-index-llms-openai>=0.3.38",
"llama-index-vector-stores-faiss>=0.3",
"nltk~=3.9.1",
"ocrmypdf~=16.13.0",
"ocrmypdf~=16.12.0",
"openai>=1.76",
"pathvalidate~=3.3.1",
"pdf2image~=1.17.0",
@@ -91,7 +91,7 @@ optional-dependencies.postgres = [
"psycopg[c,pool]==3.2.12",
# Direct dependency for proper resolution of the pre-built wheels
"psycopg-c==3.2.12",
"psycopg-pool==3.3",
"psycopg-pool==3.2.7",
]
optional-dependencies.webserver = [
"granian[uvloop]~=2.5.1",
@@ -126,7 +126,7 @@ testing = [
]
lint = [
"pre-commit~=4.5.1",
"pre-commit~=4.4.0",
"pre-commit-uv~=4.2.0",
"ruff~=0.14.0",
]

View File

@@ -11,12 +11,14 @@ from paperless_ai.chat import stream_chat_with_documents
@pytest.fixture(autouse=True)
def patch_embed_model():
from llama_index.core import settings as llama_settings
from llama_index.core.embeddings.utils import MockEmbedding
mock_embed_model = MockEmbedding(embed_dim=8)
llama_settings.Settings.embed_model = mock_embed_model
mock_embed_model = MagicMock()
mock_embed_model._get_text_embedding_batch.return_value = [
[0.1] * 1536,
] # 1 vector per input
llama_settings.Settings._embed_model = mock_embed_model
yield
llama_settings.Settings.embed_model = None
llama_settings.Settings._embed_model = None
@pytest.fixture(autouse=True)

752
uv.lock generated

File diff suppressed because it is too large Load Diff