Compare commits

..

1 Commits

Author SHA1 Message Date
Trenton H
3ba5bde7dd Output durations for all tests 2026-01-13 11:39:09 -08:00
31 changed files with 707 additions and 988 deletions

View File

@@ -1,4 +1,3 @@
# https://docs.codecov.com/docs/codecovyml-reference#codecov
codecov: codecov:
require_ci_to_pass: true require_ci_to_pass: true
# https://docs.codecov.com/docs/components # https://docs.codecov.com/docs/components
@@ -10,70 +9,35 @@ component_management:
- component_id: frontend - component_id: frontend
paths: paths:
- src-ui/** - src-ui/**
# https://docs.codecov.com/docs/flags#step-2-flag-management-in-yaml
# https://docs.codecov.com/docs/carryforward-flags
flags: flags:
# Backend Python versions backend:
backend-python-3.10:
paths: paths:
- src/** - src/**
carryforward: true carryforward: true
backend-python-3.11: frontend:
paths:
- src/**
carryforward: true
backend-python-3.12:
paths:
- src/**
carryforward: true
# Frontend (shards merge into single flag)
frontend-node-24.x:
paths: paths:
- src-ui/** - src-ui/**
carryforward: true carryforward: true
# https://docs.codecov.com/docs/pull-request-comments
comment: comment:
layout: "header, diff, components, flags, files" layout: "header, diff, components, flags, files"
# https://docs.codecov.com/docs/javascript-bundle-analysis
require_bundle_changes: true require_bundle_changes: true
bundle_change_threshold: "50Kb" bundle_change_threshold: "50Kb"
coverage: coverage:
# https://docs.codecov.com/docs/commit-status
status: status:
project: project:
backend: default:
flags:
- backend-python-3.10
- backend-python-3.11
- backend-python-3.12
paths:
- src/**
# https://docs.codecov.com/docs/commit-status#threshold # https://docs.codecov.com/docs/commit-status#threshold
threshold: 1% threshold: 1%
removed_code_behavior: adjust_base
frontend:
flags:
- frontend-node-24.x
paths:
- src-ui/**
threshold: 1%
removed_code_behavior: adjust_base
patch: patch:
backend: default:
flags: # For the changed lines only, target 100% covered, but
- backend-python-3.10 # allow as low as 75%
- backend-python-3.11
- backend-python-3.12
paths:
- src/**
target: 100%
threshold: 25%
frontend:
flags:
- frontend-node-24.x
paths:
- src-ui/**
target: 100% target: 100%
threshold: 25% threshold: 25%
# https://docs.codecov.com/docs/javascript-bundle-analysis # https://docs.codecov.com/docs/javascript-bundle-analysis
bundle_analysis: bundle_analysis:
# Fail if the bundle size increases by more than 1MB
warning_threshold: "1MB" warning_threshold: "1MB"
status: true status: true

View File

@@ -44,7 +44,6 @@ include-labels:
- 'notable' - 'notable'
exclude-labels: exclude-labels:
- 'skip-changelog' - 'skip-changelog'
filter-by-commitish: true
category-template: '### $TITLE' category-template: '### $TITLE'
change-template: '- $TITLE @$AUTHOR ([#$NUMBER]($URL))' change-template: '- $TITLE @$AUTHOR ([#$NUMBER]($URL))'
change-title-escapes: '\<*_&#@' change-title-escapes: '\<*_&#@'

View File

@@ -88,13 +88,13 @@ jobs:
if: always() if: always()
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
flags: backend-python-${{ matrix.python-version }} flags: backend,backend-python-${{ matrix.python-version }}
files: junit.xml files: junit.xml
report_type: test_results report_type: test_results
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
flags: backend-python-${{ matrix.python-version }} flags: backend,backend-python-${{ matrix.python-version }}
files: coverage.xml files: coverage.xml
report_type: coverage report_type: coverage
- name: Stop containers - name: Stop containers

View File

@@ -35,7 +35,7 @@ jobs:
contents: read contents: read
packages: write packages: write
outputs: outputs:
should-push: ${{ steps.check-push.outputs.should-push }} can-push: ${{ steps.check-push.outputs.can-push }}
push-external: ${{ steps.check-push.outputs.push-external }} push-external: ${{ steps.check-push.outputs.push-external }}
repository: ${{ steps.repo.outputs.name }} repository: ${{ steps.repo.outputs.name }}
ref-name: ${{ steps.ref.outputs.name }} ref-name: ${{ steps.ref.outputs.name }}
@@ -59,28 +59,16 @@ jobs:
env: env:
REF_NAME: ${{ steps.ref.outputs.name }} REF_NAME: ${{ steps.ref.outputs.name }}
run: | run: |
# should-push: Should we push to GHCR? # can-push: Can we push to GHCR?
# True for: # True for: pushes, or PRs from the same repo (not forks)
# 1. Pushes (tags/dev/beta) - filtered via the workflow triggers can_push=${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
# 2. Internal PRs where the branch name starts with 'feature-' - filtered here when a PR is synced echo "can-push=${can_push}"
echo "can-push=${can_push}" >> $GITHUB_OUTPUT
should_push="false"
if [[ "${{ github.event_name }}" == "push" ]]; then
should_push="true"
elif [[ "${{ github.event_name }}" == "pull_request" && "${{ github.event.pull_request.head.repo.full_name }}" == "${{ github.repository }}" ]]; then
if [[ "${REF_NAME}" == feature-* || "${REF_NAME}" == fix-* ]]; then
should_push="true"
fi
fi
echo "should-push=${should_push}"
echo "should-push=${should_push}" >> $GITHUB_OUTPUT
# push-external: Should we also push to Docker Hub and Quay.io? # push-external: Should we also push to Docker Hub and Quay.io?
# Only for main repo on dev/beta branches or version tags # Only for main repo on dev/beta branches or version tags
push_external="false" push_external="false"
if [[ "${should_push}" == "true" && "${{ github.repository_owner }}" == "paperless-ngx" ]]; then if [[ "${can_push}" == "true" && "${{ github.repository_owner }}" == "paperless-ngx" ]]; then
case "${REF_NAME}" in case "${REF_NAME}" in
dev|beta) dev|beta)
push_external="true" push_external="true"
@@ -137,20 +125,20 @@ jobs:
labels: ${{ steps.docker-meta.outputs.labels }} labels: ${{ steps.docker-meta.outputs.labels }}
build-args: | build-args: |
PNGX_TAG_VERSION=${{ steps.docker-meta.outputs.version }} PNGX_TAG_VERSION=${{ steps.docker-meta.outputs.version }}
outputs: type=image,name=${{ env.REGISTRY }}/${{ steps.repo.outputs.name }},push-by-digest=true,name-canonical=true,push=${{ steps.check-push.outputs.should-push }} outputs: type=image,name=${{ env.REGISTRY }}/${{ steps.repo.outputs.name }},push-by-digest=true,name-canonical=true,push=${{ steps.check-push.outputs.can-push }}
cache-from: | cache-from: |
type=registry,ref=${{ env.REGISTRY }}/${{ steps.repo.outputs.name }}/cache/app:${{ steps.ref.outputs.cache-ref }}-${{ matrix.arch }} type=registry,ref=${{ env.REGISTRY }}/${{ steps.repo.outputs.name }}/cache/app:${{ steps.ref.outputs.cache-ref }}-${{ matrix.arch }}
type=registry,ref=${{ env.REGISTRY }}/${{ steps.repo.outputs.name }}/cache/app:dev-${{ matrix.arch }} type=registry,ref=${{ env.REGISTRY }}/${{ steps.repo.outputs.name }}/cache/app:dev-${{ matrix.arch }}
cache-to: ${{ steps.check-push.outputs.should-push == 'true' && format('type=registry,mode=max,ref={0}/{1}/cache/app:{2}-{3}', env.REGISTRY, steps.repo.outputs.name, steps.ref.outputs.cache-ref, matrix.arch) || '' }} cache-to: ${{ steps.check-push.outputs.can-push == 'true' && format('type=registry,mode=max,ref={0}/{1}/cache/app:{2}-{3}', env.REGISTRY, steps.repo.outputs.name, steps.ref.outputs.cache-ref, matrix.arch) || '' }}
- name: Export digest - name: Export digest
if: steps.check-push.outputs.should-push == 'true' if: steps.check-push.outputs.can-push == 'true'
run: | run: |
mkdir -p /tmp/digests mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}" digest="${{ steps.build.outputs.digest }}"
echo "digest=${digest}" echo "digest=${digest}"
touch "/tmp/digests/${digest#sha256:}" touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest - name: Upload digest
if: steps.check-push.outputs.should-push == 'true' if: steps.check-push.outputs.can-push == 'true'
uses: actions/upload-artifact@v6.0.0 uses: actions/upload-artifact@v6.0.0
with: with:
name: digests-${{ matrix.arch }} name: digests-${{ matrix.arch }}
@@ -161,7 +149,7 @@ jobs:
name: Merge and Push Manifest name: Merge and Push Manifest
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
needs: build-arch needs: build-arch
if: needs.build-arch.outputs.should-push == 'true' if: needs.build-arch.outputs.can-push == 'true'
permissions: permissions:
contents: read contents: read
packages: write packages: write

View File

@@ -109,13 +109,13 @@ jobs:
if: always() if: always()
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
flags: frontend-node-${{ matrix.node-version }} flags: frontend,frontend-node-${{ matrix.node-version }}
directory: src-ui/ directory: src-ui/
report_type: test_results report_type: test_results
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v5 uses: codecov/codecov-action@v5
with: with:
flags: frontend-node-${{ matrix.node-version }} flags: frontend,frontend-node-${{ matrix.node-version }}
directory: src-ui/coverage/ directory: src-ui/coverage/
e2e-tests: e2e-tests:
name: "E2E Tests (${{ matrix.shard-index }}/${{ matrix.shard-count }})" name: "E2E Tests (${{ matrix.shard-index }}/${{ matrix.shard-count }})"

View File

@@ -30,7 +30,7 @@ RUN set -eux \
# Purpose: Installs s6-overlay and rootfs # Purpose: Installs s6-overlay and rootfs
# Comments: # Comments:
# - Don't leave anything extra in here either # - Don't leave anything extra in here either
FROM ghcr.io/astral-sh/uv:0.9.26-python3.12-trixie-slim AS s6-overlay-base FROM ghcr.io/astral-sh/uv:0.9.15-python3.12-trixie-slim AS s6-overlay-base
WORKDIR /usr/src/s6 WORKDIR /usr/src/s6
@@ -196,11 +196,7 @@ RUN set -eux \
&& apt-get install --yes --quiet --no-install-recommends ${BUILD_PACKAGES} \ && apt-get install --yes --quiet --no-install-recommends ${BUILD_PACKAGES} \
&& echo "Installing Python requirements" \ && echo "Installing Python requirements" \
&& uv export --quiet --no-dev --all-extras --format requirements-txt --output-file requirements.txt \ && uv export --quiet --no-dev --all-extras --format requirements-txt --output-file requirements.txt \
&& uv pip install --no-cache --system --no-python-downloads --python-preference system \ && uv pip install --no-cache --system --no-python-downloads --python-preference system --requirements requirements.txt \
--index https://pypi.org/simple \
--index https://download.pytorch.org/whl/cpu \
--index-strategy unsafe-best-match \
--requirements requirements.txt \
&& echo "Installing NLTK data" \ && echo "Installing NLTK data" \
&& python3 -W ignore::RuntimeWarning -m nltk.downloader -d "/usr/share/nltk_data" snowball_data \ && python3 -W ignore::RuntimeWarning -m nltk.downloader -d "/usr/share/nltk_data" snowball_data \
&& python3 -W ignore::RuntimeWarning -m nltk.downloader -d "/usr/share/nltk_data" stopwords \ && python3 -W ignore::RuntimeWarning -m nltk.downloader -d "/usr/share/nltk_data" stopwords \

View File

@@ -1,60 +1,9 @@
# Changelog # Changelog
## paperless-ngx 2.20.5
### Bug Fixes
- Fix: ensure horizontal scroll for long tag names in list, wrap tags without parent [@shamoon](https://github.com/shamoon) ([#11811](https://github.com/paperless-ngx/paperless-ngx/pull/11811))
- Fix: use explicit order field for workflow actions [@shamoon](https://github.com/shamoon) [@stumpylog](https://github.com/stumpylog) ([#11781](https://github.com/paperless-ngx/paperless-ngx/pull/11781))
### All App Changes
<details>
<summary>2 changes</summary>
- Fix: ensure horizontal scroll for long tag names in list, wrap tags without parent [@shamoon](https://github.com/shamoon) ([#11811](https://github.com/paperless-ngx/paperless-ngx/pull/11811))
- Fix: use explicit order field for workflow actions [@shamoon](https://github.com/shamoon) [@stumpylog](https://github.com/stumpylog) ([#11781](https://github.com/paperless-ngx/paperless-ngx/pull/11781))
</details>
## paperless-ngx 2.20.4
### Security
- Resolve [GHSA-28cf-xvcf-hw6m](https://github.com/paperless-ngx/paperless-ngx/security/advisories/GHSA-28cf-xvcf-hw6m)
### Bug Fixes
- Fix: propagate metadata override created value [@shamoon](https://github.com/shamoon) ([#11659](https://github.com/paperless-ngx/paperless-ngx/pull/11659))
- Fix: support ordering by storage path name [@shamoon](https://github.com/shamoon) ([#11661](https://github.com/paperless-ngx/paperless-ngx/pull/11661))
- Fix: validate cf integer values within PostgreSQL range [@shamoon](https://github.com/shamoon) ([#11666](https://github.com/paperless-ngx/paperless-ngx/pull/11666))
- Fixhancement: add error handling and retry when opening index [@shamoon](https://github.com/shamoon) ([#11731](https://github.com/paperless-ngx/paperless-ngx/pull/11731))
- Fix: fix recurring workflow to respect latest run time [@shamoon](https://github.com/shamoon) ([#11735](https://github.com/paperless-ngx/paperless-ngx/pull/11735))
### All App Changes
<details>
<summary>5 changes</summary>
- Fix: propagate metadata override created value [@shamoon](https://github.com/shamoon) ([#11659](https://github.com/paperless-ngx/paperless-ngx/pull/11659))
- Fix: support ordering by storage path name [@shamoon](https://github.com/shamoon) ([#11661](https://github.com/paperless-ngx/paperless-ngx/pull/11661))
- Fix: validate cf integer values within PostgreSQL range [@shamoon](https://github.com/shamoon) ([#11666](https://github.com/paperless-ngx/paperless-ngx/pull/11666))
- Fixhancement: add error handling and retry when opening index [@shamoon](https://github.com/shamoon) ([#11731](https://github.com/paperless-ngx/paperless-ngx/pull/11731))
- Fix: fix recurring workflow to respect latest run time [@shamoon](https://github.com/shamoon) ([#11735](https://github.com/paperless-ngx/paperless-ngx/pull/11735))
</details>
## paperless-ngx 2.20.3 ## paperless-ngx 2.20.3
### Security
- Resolve [GHSA-7cq3-mhxq-w946](https://github.com/paperless-ngx/paperless-ngx/security/advisories/GHSA-7cq3-mhxq-w946)
## paperless-ngx 2.20.2 ## paperless-ngx 2.20.2
### Security
- Resolve [GHSA-6653-vcx4-69mc](https://github.com/paperless-ngx/paperless-ngx/security/advisories/GHSA-6653-vcx4-69mc)
- Resolve [GHSA-24x5-wp64-9fcc](https://github.com/paperless-ngx/paperless-ngx/security/advisories/GHSA-24x5-wp64-9fcc)
### Features / Enhancements ### Features / Enhancements
- Tweakhancement: dim inactive users in users-groups list [@shamoon](https://github.com/shamoon) ([#11537](https://github.com/paperless-ngx/paperless-ngx/pull/11537)) - Tweakhancement: dim inactive users in users-groups list [@shamoon](https://github.com/shamoon) ([#11537](https://github.com/paperless-ngx/paperless-ngx/pull/11537))

View File

@@ -170,18 +170,11 @@ Available options are `postgresql` and `mariadb`.
!!! note !!! note
A pool of 8-10 connections per worker is typically sufficient. A small pool is typically sufficient — for example, a size of 4.
If you encounter error messages such as `couldn't get a connection` Make sure your PostgreSQL server's max_connections setting is large enough to handle:
or database connection timeouts, you probably need to increase the pool size. ```(Paperless workers + Celery workers) × pool size + safety margin```
For example, with 4 Paperless workers and 2 Celery workers, and a pool size of 4:
!!! warning (4 + 2) × 4 + 10 = 34 connections required.
Make sure your PostgreSQL `max_connections` setting is large enough to handle the connection pools:
`(NB_PAPERLESS_WORKERS + NB_CELERY_WORKERS) × POOL_SIZE + SAFETY_MARGIN`. For example, with
4 Paperless workers and 2 Celery workers, and a pool size of 8:``(4 + 2) × 8 + 10 = 58`,
so `max_connections = 60` (or even more) is appropriate.
This assumes only Paperless-ngx connects to your PostgreSQL instance. If you have other applications,
you should increase `max_connections` accordingly.
#### [`PAPERLESS_DB_READ_CACHE_ENABLED=<bool>`](#PAPERLESS_DB_READ_CACHE_ENABLED) {#PAPERLESS_DB_READ_CACHE_ENABLED} #### [`PAPERLESS_DB_READ_CACHE_ENABLED=<bool>`](#PAPERLESS_DB_READ_CACHE_ENABLED) {#PAPERLESS_DB_READ_CACHE_ENABLED}
@@ -1873,7 +1866,7 @@ using the OpenAI API. This setting is required to be set to use the AI features.
#### [`PAPERLESS_AI_LLM_MODEL=<str>`](#PAPERLESS_AI_LLM_MODEL) {#PAPERLESS_AI_LLM_MODEL} #### [`PAPERLESS_AI_LLM_MODEL=<str>`](#PAPERLESS_AI_LLM_MODEL) {#PAPERLESS_AI_LLM_MODEL}
: The model to use for the AI backend, i.e. "gpt-3.5-turbo", "gpt-4" or any of the models supported by the : The model to use for the AI backend, i.e. "gpt-3.5-turbo", "gpt-4" or any of the models supported by the
current backend. If not supplied, defaults to "gpt-3.5-turbo" for OpenAI and "llama3.1" for Ollama. current backend. If not supplied, defaults to "gpt-3.5-turbo" for OpenAI and "llama3" for Ollama.
Defaults to None. Defaults to None.

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "paperless-ngx" name = "paperless-ngx"
version = "2.20.5" version = "2.20.3"
description = "A community-supported supercharged document management system: scan, index and archive all your physical documents" description = "A community-supported supercharged document management system: scan, index and archive all your physical documents"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"
@@ -28,7 +28,7 @@ dependencies = [
# Only patch versions are guaranteed to not introduce breaking changes. # Only patch versions are guaranteed to not introduce breaking changes.
"django~=5.2.5", "django~=5.2.5",
"django-allauth[mfa,socialaccount]~=65.12.1", "django-allauth[mfa,socialaccount]~=65.12.1",
"django-auditlog~=3.4.1", "django-auditlog~=3.3.0",
"django-cachalot~=2.8.0", "django-cachalot~=2.8.0",
"django-celery-results~=2.6.0", "django-celery-results~=2.6.0",
"django-compression-middleware~=0.5.0", "django-compression-middleware~=0.5.0",
@@ -47,20 +47,20 @@ dependencies = [
"faiss-cpu>=1.10", "faiss-cpu>=1.10",
"filelock~=3.20.0", "filelock~=3.20.0",
"flower~=2.0.1", "flower~=2.0.1",
"gotenberg-client~=0.13.1", "gotenberg-client~=0.12.0",
"httpx-oauth~=0.16", "httpx-oauth~=0.16",
"imap-tools~=1.11.0", "imap-tools~=1.11.0",
"inotifyrecursive~=0.3", "inotifyrecursive~=0.3",
"jinja2~=3.1.5", "jinja2~=3.1.5",
"langdetect~=1.0.9", "langdetect~=1.0.9",
"llama-index-core>=0.14.12", "llama-index-core>=0.12.33.post1",
"llama-index-embeddings-huggingface>=0.6.1", "llama-index-embeddings-huggingface>=0.5.3",
"llama-index-embeddings-openai>=0.5.1", "llama-index-embeddings-openai>=0.3.1",
"llama-index-llms-ollama>=0.9.1", "llama-index-llms-ollama>=0.5.4",
"llama-index-llms-openai>=0.6.13", "llama-index-llms-openai>=0.3.38",
"llama-index-vector-stores-faiss>=0.5.2", "llama-index-vector-stores-faiss>=0.3",
"nltk~=3.9.1", "nltk~=3.9.1",
"ocrmypdf~=16.13.0", "ocrmypdf~=16.12.0",
"openai>=1.76", "openai>=1.76",
"pathvalidate~=3.3.1", "pathvalidate~=3.3.1",
"pdf2image~=1.17.0", "pdf2image~=1.17.0",
@@ -77,7 +77,6 @@ dependencies = [
"sentence-transformers>=4.1", "sentence-transformers>=4.1",
"setproctitle~=1.3.4", "setproctitle~=1.3.4",
"tika-client~=0.10.0", "tika-client~=0.10.0",
"torch~=2.9.1",
"tqdm~=4.67.1", "tqdm~=4.67.1",
"watchdog~=6.0", "watchdog~=6.0",
"whitenoise~=6.9", "whitenoise~=6.9",
@@ -92,7 +91,7 @@ optional-dependencies.postgres = [
"psycopg[c,pool]==3.2.12", "psycopg[c,pool]==3.2.12",
# Direct dependency for proper resolution of the pre-built wheels # Direct dependency for proper resolution of the pre-built wheels
"psycopg-c==3.2.12", "psycopg-c==3.2.12",
"psycopg-pool==3.3", "psycopg-pool==3.2.7",
] ]
optional-dependencies.webserver = [ optional-dependencies.webserver = [
"granian[uvloop]~=2.5.1", "granian[uvloop]~=2.5.1",
@@ -127,7 +126,7 @@ testing = [
] ]
lint = [ lint = [
"pre-commit~=4.5.1", "pre-commit~=4.4.0",
"pre-commit-uv~=4.2.0", "pre-commit-uv~=4.2.0",
"ruff~=0.14.0", "ruff~=0.14.0",
] ]
@@ -170,15 +169,6 @@ zxing-cpp = [
{ url = "https://github.com/paperless-ngx/builder/releases/download/zxing-2.3.0/zxing_cpp-2.3.0-cp312-cp312-linux_aarch64.whl", marker = "sys_platform == 'linux' and platform_machine == 'aarch64' and python_version == '3.12'" }, { url = "https://github.com/paperless-ngx/builder/releases/download/zxing-2.3.0/zxing_cpp-2.3.0-cp312-cp312-linux_aarch64.whl", marker = "sys_platform == 'linux' and platform_machine == 'aarch64' and python_version == '3.12'" },
] ]
torch = [
{ index = "pytorch-cpu" },
]
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true
[tool.ruff] [tool.ruff]
target-version = "py310" target-version = "py310"
line-length = 88 line-length = 88
@@ -284,7 +274,7 @@ addopts = [
"--numprocesses=auto", "--numprocesses=auto",
"--maxprocesses=16", "--maxprocesses=16",
"--quiet", "--quiet",
"--durations=50", "--durations=0",
"--junitxml=junit.xml", "--junitxml=junit.xml",
"-o junit_family=legacy", "-o junit_family=legacy",
] ]

View File

@@ -1,6 +1,6 @@
{ {
"name": "paperless-ngx-ui", "name": "paperless-ngx-ui",
"version": "2.20.5", "version": "2.20.3",
"scripts": { "scripts": {
"preinstall": "npx only-allow pnpm", "preinstall": "npx only-allow pnpm",
"ng": "ng", "ng": "ng",

View File

@@ -252,7 +252,7 @@ describe('WorkflowEditDialogComponent', () => {
expect(component.object.actions.length).toEqual(2) expect(component.object.actions.length).toEqual(2)
}) })
it('should update order on drag n drop', () => { it('should update order and remove ids from actions on drag n drop', () => {
const action1 = workflow.actions[0] const action1 = workflow.actions[0]
const action2 = workflow.actions[1] const action2 = workflow.actions[1]
component.object = workflow component.object = workflow
@@ -261,6 +261,8 @@ describe('WorkflowEditDialogComponent', () => {
WorkflowAction[] WorkflowAction[]
>) >)
expect(component.object.actions).toEqual([action2, action1]) expect(component.object.actions).toEqual([action2, action1])
expect(action1.id).toBeNull()
expect(action2.id).toBeNull()
}) })
it('should not include auto matching in algorithms', () => { it('should not include auto matching in algorithms', () => {

View File

@@ -1283,6 +1283,11 @@ export class WorkflowEditDialogComponent
const actionField = this.actionFields.at(event.previousIndex) const actionField = this.actionFields.at(event.previousIndex)
this.actionFields.removeAt(event.previousIndex) this.actionFields.removeAt(event.previousIndex)
this.actionFields.insert(event.currentIndex, actionField) this.actionFields.insert(event.currentIndex, actionField)
// removing id will effectively re-create the actions in this order
this.object.actions.forEach((a) => (a.id = null))
this.actionFields.controls.forEach((c) =>
c.get('id').setValue(null, { emitEvent: false })
)
} }
save(): void { save(): void {

View File

@@ -28,7 +28,7 @@
</button> </button>
</ng-template> </ng-template>
<ng-template ng-option-tmp let-item="item" let-index="index" let-search="searchTerm"> <ng-template ng-option-tmp let-item="item" let-index="index" let-search="searchTerm">
<div class="tag-option-row d-flex align-items-center" [class.w-auto]="!getTag(item.id)?.parent"> <div class="tag-option-row d-flex align-items-center">
@if (item.id && tags) { @if (item.id && tags) {
@if (getTag(item.id)?.parent) { @if (getTag(item.id)?.parent) {
<i-bs name="list-nested" class="me-1"></i-bs> <i-bs name="list-nested" class="me-1"></i-bs>

View File

@@ -22,8 +22,8 @@
} }
// Dropdown hierarchy reveal for ng-select options // Dropdown hierarchy reveal for ng-select options
:host ::ng-deep .ng-dropdown-panel .ng-option { ::ng-deep .ng-dropdown-panel .ng-option {
overflow-x: auto !important; overflow-x: scroll;
.tag-option-row { .tag-option-row {
font-size: 1rem; font-size: 1rem;
@@ -41,12 +41,12 @@
} }
} }
:host ::ng-deep .ng-dropdown-panel .ng-option:hover .hierarchy-reveal, ::ng-deep .ng-dropdown-panel .ng-option:hover .hierarchy-reveal,
:host ::ng-deep .ng-dropdown-panel .ng-option.ng-option-marked .hierarchy-reveal { ::ng-deep .ng-dropdown-panel .ng-option.ng-option-marked .hierarchy-reveal {
max-width: 1000px; max-width: 1000px;
} }
::ng-deep .ng-dropdown-panel .ng-option:hover .hierarchy-indicator, ::ng-deep .ng-dropdown-panel .ng-option:hover .hierarchy-indicator,
:host ::ng-deep .ng-dropdown-panel .ng-option.ng-option-marked .hierarchy-indicator { ::ng-deep .ng-dropdown-panel .ng-option.ng-option-marked .hierarchy-indicator {
background: transparent; background: transparent;
} }

View File

@@ -285,10 +285,10 @@ export class DocumentDetailComponent
if ( if (
element && element &&
element.nativeElement.offsetParent !== null && element.nativeElement.offsetParent !== null &&
this.nav?.activeId == DocumentDetailNavIDs.Preview this.nav?.activeId == 4
) { ) {
// its visible // its visible
setTimeout(() => this.nav?.select(DocumentDetailNavIDs.Details)) setTimeout(() => this.nav?.select(1))
} }
} }

View File

@@ -6,7 +6,7 @@ export const environment = {
apiVersion: '9', // match src/paperless/settings.py apiVersion: '9', // match src/paperless/settings.py
appTitle: 'Paperless-ngx', appTitle: 'Paperless-ngx',
tag: 'prod', tag: 'prod',
version: '2.20.5', version: '2.20.3',
webSocketHost: window.location.host, webSocketHost: window.location.host,
webSocketProtocol: window.location.protocol == 'https:' ? 'wss:' : 'ws:', webSocketProtocol: window.location.protocol == 'https:' ? 'wss:' : 'ws:',
webSocketBaseUrl: base_url.pathname + 'ws/', webSocketBaseUrl: base_url.pathname + 'ws/',

View File

@@ -6,7 +6,7 @@ from django.db import models
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
("documents", "1075_workflowaction_order"), ("documents", "1074_workflowrun_deleted_at_workflowrun_restored_at_and_more"),
] ]
operations = [ operations = [

View File

@@ -1,28 +0,0 @@
# Generated by Django 5.2.7 on 2026-01-14 16:53
from django.db import migrations
from django.db import models
from django.db.models import F
def populate_action_order(apps, schema_editor):
WorkflowAction = apps.get_model("documents", "WorkflowAction")
WorkflowAction.objects.all().update(order=F("id"))
class Migration(migrations.Migration):
dependencies = [
("documents", "1074_workflowrun_deleted_at_workflowrun_restored_at_and_more"),
]
operations = [
migrations.AddField(
model_name="workflowaction",
name="order",
field=models.PositiveIntegerField(default=0, verbose_name="order"),
),
migrations.RunPython(
populate_action_order,
reverse_code=migrations.RunPython.noop,
),
]

View File

@@ -1295,8 +1295,6 @@ class WorkflowAction(models.Model):
default=WorkflowActionType.ASSIGNMENT, default=WorkflowActionType.ASSIGNMENT,
) )
order = models.PositiveIntegerField(_("order"), default=0)
assign_title = models.TextField( assign_title = models.TextField(
_("assign title"), _("assign title"),
null=True, null=True,

View File

@@ -2577,8 +2577,7 @@ class WorkflowSerializer(serializers.ModelSerializer):
set_triggers.append(trigger_instance) set_triggers.append(trigger_instance)
if actions is not None and actions is not serializers.empty: if actions is not None and actions is not serializers.empty:
for index, action in enumerate(actions): for action in actions:
action["order"] = index
assign_tags = action.pop("assign_tags", None) assign_tags = action.pop("assign_tags", None)
assign_view_users = action.pop("assign_view_users", None) assign_view_users = action.pop("assign_view_users", None)
assign_view_groups = action.pop("assign_view_groups", None) assign_view_groups = action.pop("assign_view_groups", None)
@@ -2705,16 +2704,6 @@ class WorkflowSerializer(serializers.ModelSerializer):
return instance return instance
def to_representation(self, instance):
data = super().to_representation(instance)
actions = instance.actions.order_by("order", "pk")
data["actions"] = WorkflowActionSerializer(
actions,
many=True,
context=self.context,
).data
return data
class TrashSerializer(SerializerWithPerms): class TrashSerializer(SerializerWithPerms):
documents = serializers.ListField( documents = serializers.ListField(

View File

@@ -421,15 +421,7 @@ def update_filename_and_move_files(
return return
instance = instance.document instance = instance.document
def validate_move(instance, old_path: Path, new_path: Path, root: Path): def validate_move(instance, old_path: Path, new_path: Path):
if not new_path.is_relative_to(root):
msg = (
f"Document {instance!s}: Refusing to move file outside root {root}: "
f"{new_path}."
)
logger.warning(msg)
raise CannotMoveFilesException(msg)
if not old_path.is_file(): if not old_path.is_file():
# Can't do anything if the old file does not exist anymore. # Can't do anything if the old file does not exist anymore.
msg = f"Document {instance!s}: File {old_path} doesn't exist." msg = f"Document {instance!s}: File {old_path} doesn't exist."
@@ -518,22 +510,12 @@ def update_filename_and_move_files(
return return
if move_original: if move_original:
validate_move( validate_move(instance, old_source_path, instance.source_path)
instance,
old_source_path,
instance.source_path,
settings.ORIGINALS_DIR,
)
create_source_path_directory(instance.source_path) create_source_path_directory(instance.source_path)
shutil.move(old_source_path, instance.source_path) shutil.move(old_source_path, instance.source_path)
if move_archive: if move_archive:
validate_move( validate_move(instance, old_archive_path, instance.archive_path)
instance,
old_archive_path,
instance.archive_path,
settings.ARCHIVE_DIR,
)
create_source_path_directory(instance.archive_path) create_source_path_directory(instance.archive_path)
shutil.move(old_archive_path, instance.archive_path) shutil.move(old_archive_path, instance.archive_path)
@@ -781,7 +763,7 @@ def run_workflows(
if matching.document_matches_workflow(document, workflow, trigger_type): if matching.document_matches_workflow(document, workflow, trigger_type):
action: WorkflowAction action: WorkflowAction
for action in workflow.actions.order_by("order", "pk"): for action in workflow.actions.all():
message = f"Applying {action} from {workflow}" message = f"Applying {action} from {workflow}"
if not use_overrides: if not use_overrides:
logger.info(message, extra={"group": logging_group}) logger.info(message, extra={"group": logging_group})

View File

@@ -262,17 +262,6 @@ def get_custom_fields_context(
return field_data return field_data
def _is_safe_relative_path(value: str) -> bool:
if value == "":
return True
path = PurePath(value)
if path.is_absolute() or path.drive:
return False
return ".." not in path.parts
def validate_filepath_template_and_render( def validate_filepath_template_and_render(
template_string: str, template_string: str,
document: Document | None = None, document: Document | None = None,
@@ -320,12 +309,6 @@ def validate_filepath_template_and_render(
) )
rendered_template = template.render(context) rendered_template = template.render(context)
if not _is_safe_relative_path(rendered_template):
logger.warning(
"Template rendered an unsafe path (absolute or containing traversal).",
)
return None
# We're good! # We're good!
return rendered_template return rendered_template
except UndefinedError: except UndefinedError:

View File

@@ -219,30 +219,6 @@ class TestApiStoragePaths(DirectoriesMixin, APITestCase):
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(StoragePath.objects.count(), 1) self.assertEqual(StoragePath.objects.count(), 1)
def test_api_create_storage_path_rejects_traversal(self):
"""
GIVEN:
- API request to create a storage paths
- Storage path attempts directory traversal
WHEN:
- API is called
THEN:
- Correct HTTP 400 response
- No storage path is created
"""
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"name": "Traversal path",
"path": "../../../../../tmp/proof",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(StoragePath.objects.count(), 1)
def test_api_storage_path_placeholders(self): def test_api_storage_path_placeholders(self):
""" """
GIVEN: GIVEN:

View File

@@ -20,6 +20,9 @@ def get_workflows_for_trigger(
wrap it in a list; otherwise fetch enabled workflows for the trigger with wrap it in a list; otherwise fetch enabled workflows for the trigger with
the prefetches used by the runner. the prefetches used by the runner.
""" """
if workflow_to_run is not None:
return [workflow_to_run]
annotated_actions = ( annotated_actions = (
WorkflowAction.objects.select_related( WorkflowAction.objects.select_related(
"assign_correspondent", "assign_correspondent",
@@ -102,25 +105,10 @@ def get_workflows_for_trigger(
) )
) )
action_prefetch = Prefetch(
"actions",
queryset=annotated_actions.order_by("order", "pk"),
)
if workflow_to_run is not None:
return (
Workflow.objects.filter(pk=workflow_to_run.pk)
.prefetch_related(
action_prefetch,
"triggers",
)
.distinct()
)
return ( return (
Workflow.objects.filter(enabled=True, triggers__type=trigger_type) Workflow.objects.filter(enabled=True, triggers__type=trigger_type)
.prefetch_related( .prefetch_related(
action_prefetch, Prefetch("actions", queryset=annotated_actions),
"triggers", "triggers",
) )
.order_by("order") .order_by("order")

View File

@@ -2,7 +2,7 @@ msgid ""
msgstr "" msgstr ""
"Project-Id-Version: paperless-ngx\n" "Project-Id-Version: paperless-ngx\n"
"Report-Msgid-Bugs-To: \n" "Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-01-15 23:01+0000\n" "POT-Creation-Date: 2026-01-13 16:26+0000\n"
"PO-Revision-Date: 2022-02-17 04:17\n" "PO-Revision-Date: 2022-02-17 04:17\n"
"Last-Translator: \n" "Last-Translator: \n"
"Language-Team: English\n" "Language-Team: English\n"
@@ -89,7 +89,7 @@ msgstr ""
msgid "Automatic" msgid "Automatic"
msgstr "" msgstr ""
#: documents/models.py:64 documents/models.py:456 documents/models.py:1529 #: documents/models.py:64 documents/models.py:456 documents/models.py:1527
#: paperless_mail/models.py:23 paperless_mail/models.py:143 #: paperless_mail/models.py:23 paperless_mail/models.py:143
msgid "name" msgid "name"
msgstr "" msgstr ""
@@ -264,7 +264,7 @@ msgid "The position of this document in your physical document archive."
msgstr "" msgstr ""
#: documents/models.py:318 documents/models.py:700 documents/models.py:754 #: documents/models.py:318 documents/models.py:700 documents/models.py:754
#: documents/models.py:1572 #: documents/models.py:1570
msgid "document" msgid "document"
msgstr "" msgstr ""
@@ -1047,180 +1047,179 @@ msgstr ""
msgid "Workflow Action Type" msgid "Workflow Action Type"
msgstr "" msgstr ""
#: documents/models.py:1298 documents/models.py:1531 #: documents/models.py:1299
#: paperless_mail/models.py:145
msgid "order"
msgstr ""
#: documents/models.py:1301
msgid "assign title" msgid "assign title"
msgstr "" msgstr ""
#: documents/models.py:1305 #: documents/models.py:1303
msgid "Assign a document title, must be a Jinja2 template, see documentation." msgid "Assign a document title, must be a Jinja2 template, see documentation."
msgstr "" msgstr ""
#: documents/models.py:1313 paperless_mail/models.py:274 #: documents/models.py:1311 paperless_mail/models.py:274
msgid "assign this tag" msgid "assign this tag"
msgstr "" msgstr ""
#: documents/models.py:1322 paperless_mail/models.py:282 #: documents/models.py:1320 paperless_mail/models.py:282
msgid "assign this document type" msgid "assign this document type"
msgstr "" msgstr ""
#: documents/models.py:1331 paperless_mail/models.py:296 #: documents/models.py:1329 paperless_mail/models.py:296
msgid "assign this correspondent" msgid "assign this correspondent"
msgstr "" msgstr ""
#: documents/models.py:1340 #: documents/models.py:1338
msgid "assign this storage path" msgid "assign this storage path"
msgstr "" msgstr ""
#: documents/models.py:1349 #: documents/models.py:1347
msgid "assign this owner" msgid "assign this owner"
msgstr "" msgstr ""
#: documents/models.py:1356 #: documents/models.py:1354
msgid "grant view permissions to these users" msgid "grant view permissions to these users"
msgstr "" msgstr ""
#: documents/models.py:1363 #: documents/models.py:1361
msgid "grant view permissions to these groups" msgid "grant view permissions to these groups"
msgstr "" msgstr ""
#: documents/models.py:1370 #: documents/models.py:1368
msgid "grant change permissions to these users" msgid "grant change permissions to these users"
msgstr "" msgstr ""
#: documents/models.py:1377 #: documents/models.py:1375
msgid "grant change permissions to these groups" msgid "grant change permissions to these groups"
msgstr "" msgstr ""
#: documents/models.py:1384 #: documents/models.py:1382
msgid "assign these custom fields" msgid "assign these custom fields"
msgstr "" msgstr ""
#: documents/models.py:1388 #: documents/models.py:1386
msgid "custom field values" msgid "custom field values"
msgstr "" msgstr ""
#: documents/models.py:1392 #: documents/models.py:1390
msgid "Optional values to assign to the custom fields." msgid "Optional values to assign to the custom fields."
msgstr "" msgstr ""
#: documents/models.py:1401 #: documents/models.py:1399
msgid "remove these tag(s)" msgid "remove these tag(s)"
msgstr "" msgstr ""
#: documents/models.py:1406 #: documents/models.py:1404
msgid "remove all tags" msgid "remove all tags"
msgstr "" msgstr ""
#: documents/models.py:1413 #: documents/models.py:1411
msgid "remove these document type(s)" msgid "remove these document type(s)"
msgstr "" msgstr ""
#: documents/models.py:1418 #: documents/models.py:1416
msgid "remove all document types" msgid "remove all document types"
msgstr "" msgstr ""
#: documents/models.py:1425 #: documents/models.py:1423
msgid "remove these correspondent(s)" msgid "remove these correspondent(s)"
msgstr "" msgstr ""
#: documents/models.py:1430 #: documents/models.py:1428
msgid "remove all correspondents" msgid "remove all correspondents"
msgstr "" msgstr ""
#: documents/models.py:1437 #: documents/models.py:1435
msgid "remove these storage path(s)" msgid "remove these storage path(s)"
msgstr "" msgstr ""
#: documents/models.py:1442 #: documents/models.py:1440
msgid "remove all storage paths" msgid "remove all storage paths"
msgstr "" msgstr ""
#: documents/models.py:1449 #: documents/models.py:1447
msgid "remove these owner(s)" msgid "remove these owner(s)"
msgstr "" msgstr ""
#: documents/models.py:1454 #: documents/models.py:1452
msgid "remove all owners" msgid "remove all owners"
msgstr "" msgstr ""
#: documents/models.py:1461 #: documents/models.py:1459
msgid "remove view permissions for these users" msgid "remove view permissions for these users"
msgstr "" msgstr ""
#: documents/models.py:1468 #: documents/models.py:1466
msgid "remove view permissions for these groups" msgid "remove view permissions for these groups"
msgstr "" msgstr ""
#: documents/models.py:1475 #: documents/models.py:1473
msgid "remove change permissions for these users" msgid "remove change permissions for these users"
msgstr "" msgstr ""
#: documents/models.py:1482 #: documents/models.py:1480
msgid "remove change permissions for these groups" msgid "remove change permissions for these groups"
msgstr "" msgstr ""
#: documents/models.py:1487 #: documents/models.py:1485
msgid "remove all permissions" msgid "remove all permissions"
msgstr "" msgstr ""
#: documents/models.py:1494 #: documents/models.py:1492
msgid "remove these custom fields" msgid "remove these custom fields"
msgstr "" msgstr ""
#: documents/models.py:1499 #: documents/models.py:1497
msgid "remove all custom fields" msgid "remove all custom fields"
msgstr "" msgstr ""
#: documents/models.py:1508 #: documents/models.py:1506
msgid "email" msgid "email"
msgstr "" msgstr ""
#: documents/models.py:1517 #: documents/models.py:1515
msgid "webhook" msgid "webhook"
msgstr "" msgstr ""
#: documents/models.py:1521 #: documents/models.py:1519
msgid "workflow action" msgid "workflow action"
msgstr "" msgstr ""
#: documents/models.py:1522 #: documents/models.py:1520
msgid "workflow actions" msgid "workflow actions"
msgstr "" msgstr ""
#: documents/models.py:1537 #: documents/models.py:1529 paperless_mail/models.py:145
msgid "order"
msgstr ""
#: documents/models.py:1535
msgid "triggers" msgid "triggers"
msgstr "" msgstr ""
#: documents/models.py:1544 #: documents/models.py:1542
msgid "actions" msgid "actions"
msgstr "" msgstr ""
#: documents/models.py:1547 paperless_mail/models.py:154 #: documents/models.py:1545 paperless_mail/models.py:154
msgid "enabled" msgid "enabled"
msgstr "" msgstr ""
#: documents/models.py:1558 #: documents/models.py:1556
msgid "workflow" msgid "workflow"
msgstr "" msgstr ""
#: documents/models.py:1562 #: documents/models.py:1560
msgid "workflow trigger type" msgid "workflow trigger type"
msgstr "" msgstr ""
#: documents/models.py:1576 #: documents/models.py:1574
msgid "date run" msgid "date run"
msgstr "" msgstr ""
#: documents/models.py:1582 #: documents/models.py:1580
msgid "workflow run" msgid "workflow run"
msgstr "" msgstr ""
#: documents/models.py:1583 #: documents/models.py:1581
msgid "workflow runs" msgid "workflow runs"
msgstr "" msgstr ""

View File

@@ -1,6 +1,6 @@
from typing import Final from typing import Final
__version__: Final[tuple[int, int, int]] = (2, 20, 5) __version__: Final[tuple[int, int, int]] = (2, 20, 3)
# Version string like X.Y.Z # Version string like X.Y.Z
__full_version_str__: Final[str] = ".".join(map(str, __version__)) __full_version_str__: Final[str] = ".".join(map(str, __version__))
# Version string like X.Y # Version string like X.Y

View File

@@ -23,7 +23,7 @@ class AIClient:
def get_llm(self) -> Ollama | OpenAI: def get_llm(self) -> Ollama | OpenAI:
if self.settings.llm_backend == "ollama": if self.settings.llm_backend == "ollama":
return Ollama( return Ollama(
model=self.settings.llm_model or "llama3.1", model=self.settings.llm_model or "llama3",
base_url=self.settings.llm_endpoint or "http://localhost:11434", base_url=self.settings.llm_endpoint or "http://localhost:11434",
request_timeout=120, request_timeout=120,
) )
@@ -52,7 +52,7 @@ class AIClient:
) )
tool_calls = self.llm.get_tool_calls_from_response( tool_calls = self.llm.get_tool_calls_from_response(
result, result,
error_on_no_tool_call=True, error_on_no_tool_calls=True,
) )
logger.debug("LLM query result: %s", tool_calls) logger.debug("LLM query result: %s", tool_calls)
parsed = DocumentClassifierSchema(**tool_calls[0].tool_kwargs) parsed = DocumentClassifierSchema(**tool_calls[0].tool_kwargs)

View File

@@ -1,14 +1,11 @@
import logging import logging
import shutil import shutil
from datetime import timedelta
from pathlib import Path from pathlib import Path
import faiss import faiss
import llama_index.core.settings as llama_settings import llama_index.core.settings as llama_settings
import tqdm import tqdm
from celery import states
from django.conf import settings from django.conf import settings
from django.utils import timezone
from llama_index.core import Document as LlamaDocument from llama_index.core import Document as LlamaDocument
from llama_index.core import StorageContext from llama_index.core import StorageContext
from llama_index.core import VectorStoreIndex from llama_index.core import VectorStoreIndex
@@ -24,7 +21,6 @@ from llama_index.core.text_splitter import TokenTextSplitter
from llama_index.vector_stores.faiss import FaissVectorStore from llama_index.vector_stores.faiss import FaissVectorStore
from documents.models import Document from documents.models import Document
from documents.models import PaperlessTask
from paperless_ai.embedding import build_llm_index_text from paperless_ai.embedding import build_llm_index_text
from paperless_ai.embedding import get_embedding_dim from paperless_ai.embedding import get_embedding_dim
from paperless_ai.embedding import get_embedding_model from paperless_ai.embedding import get_embedding_model
@@ -32,29 +28,6 @@ from paperless_ai.embedding import get_embedding_model
logger = logging.getLogger("paperless_ai.indexing") logger = logging.getLogger("paperless_ai.indexing")
def queue_llm_index_update_if_needed(*, rebuild: bool, reason: str) -> bool:
from documents.tasks import llmindex_index
has_running = PaperlessTask.objects.filter(
task_name=PaperlessTask.TaskName.LLMINDEX_UPDATE,
status__in=[states.PENDING, states.STARTED],
).exists()
has_recent = PaperlessTask.objects.filter(
task_name=PaperlessTask.TaskName.LLMINDEX_UPDATE,
date_created__gte=(timezone.now() - timedelta(minutes=5)),
).exists()
if has_running or has_recent:
return False
llmindex_index.delay(rebuild=rebuild, scheduled=False, auto=True)
logger.warning(
"Queued LLM index update%s: %s",
" (rebuild)" if rebuild else "",
reason,
)
return True
def get_or_create_storage_context(*, rebuild=False): def get_or_create_storage_context(*, rebuild=False):
""" """
Loads or creates the StorageContext (vector store, docstore, index store). Loads or creates the StorageContext (vector store, docstore, index store).
@@ -120,10 +93,6 @@ def load_or_build_index(nodes=None):
except ValueError as e: except ValueError as e:
logger.warning("Failed to load index from storage: %s", e) logger.warning("Failed to load index from storage: %s", e)
if not nodes: if not nodes:
queue_llm_index_update_if_needed(
rebuild=vector_store_file_exists(),
reason="LLM index missing or invalid while loading.",
)
logger.info("No nodes provided for index creation.") logger.info("No nodes provided for index creation.")
raise raise
return VectorStoreIndex( return VectorStoreIndex(
@@ -281,21 +250,7 @@ def query_similar_documents(
""" """
Runs a similarity query and returns top-k similar Document objects. Runs a similarity query and returns top-k similar Document objects.
""" """
if not vector_store_file_exists():
queue_llm_index_update_if_needed(
rebuild=False,
reason="LLM index not found for similarity query.",
)
return []
try:
index = load_or_build_index() index = load_or_build_index()
except ValueError:
queue_llm_index_update_if_needed(
rebuild=True,
reason="LLM index failed to load for similarity query.",
)
return []
# constrain only the node(s) that match the document IDs, if given # constrain only the node(s) that match the document IDs, if given
doc_node_ids = ( doc_node_ids = (

View File

@@ -299,15 +299,11 @@ def test_query_similar_documents(
with ( with (
patch("paperless_ai.indexing.get_or_create_storage_context") as mock_storage, patch("paperless_ai.indexing.get_or_create_storage_context") as mock_storage,
patch("paperless_ai.indexing.load_or_build_index") as mock_load_or_build_index, patch("paperless_ai.indexing.load_or_build_index") as mock_load_or_build_index,
patch(
"paperless_ai.indexing.vector_store_file_exists",
) as mock_vector_store_exists,
patch("paperless_ai.indexing.VectorIndexRetriever") as mock_retriever_cls, patch("paperless_ai.indexing.VectorIndexRetriever") as mock_retriever_cls,
patch("paperless_ai.indexing.Document.objects.filter") as mock_filter, patch("paperless_ai.indexing.Document.objects.filter") as mock_filter,
): ):
mock_storage.return_value = MagicMock() mock_storage.return_value = MagicMock()
mock_storage.return_value.persist_dir = temp_llm_index_dir mock_storage.return_value.persist_dir = temp_llm_index_dir
mock_vector_store_exists.return_value = True
mock_index = MagicMock() mock_index = MagicMock()
mock_load_or_build_index.return_value = mock_index mock_load_or_build_index.return_value = mock_index

View File

@@ -11,12 +11,14 @@ from paperless_ai.chat import stream_chat_with_documents
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def patch_embed_model(): def patch_embed_model():
from llama_index.core import settings as llama_settings from llama_index.core import settings as llama_settings
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# Use a real BaseEmbedding subclass to satisfy llama-index 0.14 validation mock_embed_model = MagicMock()
llama_settings.Settings.embed_model = MockEmbedding(embed_dim=1536) mock_embed_model._get_text_embedding_batch.return_value = [
[0.1] * 1536,
] # 1 vector per input
llama_settings.Settings._embed_model = mock_embed_model
yield yield
llama_settings.Settings.embed_model = None llama_settings.Settings._embed_model = None
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)

1151
uv.lock generated

File diff suppressed because it is too large Load Diff