mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2025-07-28 18:24:38 -05:00
Compare commits
61 Commits
e35dad81d9
...
fix-merge-
Author | SHA1 | Date | |
---|---|---|---|
![]() |
37f323d836 | ||
![]() |
e628c488cf | ||
![]() |
8cfa9061d4 | ||
![]() |
f269919410 | ||
![]() |
5f00066dff | ||
![]() |
705f542129 | ||
![]() |
f036292b72 | ||
![]() |
f8a43d5dab | ||
![]() |
53c106d448 | ||
![]() |
0f37186c88 | ||
![]() |
0fb55f3ae8 | ||
![]() |
78822f6121 | ||
![]() |
2ee1d7540e | ||
![]() |
43b2527275 | ||
![]() |
248b573c03 | ||
![]() |
d87b421f98 | ||
![]() |
b9f7428f2f | ||
![]() |
e715e8a2db | ||
![]() |
ec4d5352df | ||
![]() |
35ccca6a86 | ||
![]() |
192cfacfce | ||
![]() |
571f3b4ab2 | ||
![]() |
8891d5ca16 | ||
![]() |
83ca4ef69b | ||
![]() |
3e70e960f8 | ||
![]() |
845f7be287 | ||
![]() |
f80b16acad | ||
![]() |
e3b00cd46a | ||
![]() |
db9af62442 | ||
![]() |
84e9a4ff57 | ||
![]() |
67f82e117d | ||
![]() |
f04fe4de3f | ||
![]() |
cb1019e893 | ||
![]() |
2037ba4123 | ||
![]() |
de05d7a270 | ||
![]() |
8279dec4e7 | ||
![]() |
6850a8a4e7 | ||
![]() |
fe404e4a24 | ||
![]() |
bf57299501 | ||
![]() |
b7b343222c | ||
![]() |
730636f38e | ||
![]() |
b92651aad2 | ||
![]() |
2cc14dd5c3 | ||
![]() |
ea5ec58967 | ||
![]() |
2bb0a137f0 | ||
![]() |
fbd5fb29ca | ||
![]() |
2cde6bd556 | ||
![]() |
930c984788 | ||
![]() |
60ad84645b | ||
![]() |
a5c75313f5 | ||
![]() |
774070c783 | ||
![]() |
cb429f9c3f | ||
![]() |
d00b04ac2e | ||
![]() |
531cda52b5 | ||
![]() |
5c4f16d16c | ||
![]() |
2d0e32be61 | ||
![]() |
721b3f5713 | ||
![]() |
3a82e09028 | ||
![]() |
bd86802333 | ||
![]() |
ce287096ec | ||
![]() |
232f3d6ce4 |
@@ -44,7 +44,7 @@ services:
|
||||
- ..:/usr/src/paperless/paperless-ngx:delegated
|
||||
- ../.devcontainer/vscode:/usr/src/paperless/paperless-ngx/.vscode:delegated # VSCode config files
|
||||
- virtualenv:/usr/src/paperless/paperless-ngx/.venv # Virtual environment persisted in volume
|
||||
- /usr/src/paperless/paperless-ngx/src/documents/static/frontend # Static frontend files exist only in container
|
||||
- /usr/src/paperless/paperless-ngx/src/paperless/static/frontend # Static frontend files exist only in container
|
||||
- /usr/src/paperless/paperless-ngx/src/.pytest_cache
|
||||
- /usr/src/paperless/paperless-ngx/.ruff_cache
|
||||
- /usr/src/paperless/paperless-ngx/htmlcov
|
||||
@@ -58,11 +58,11 @@ services:
|
||||
PAPERLESS_TIKA_ENABLED: 1
|
||||
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
|
||||
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
|
||||
PAPERLESS_STATICDIR: ./src/documents/static
|
||||
PAPERLESS_STATICDIR: ./src/paperless/static
|
||||
PAPERLESS_DEBUG: true
|
||||
|
||||
# Overrides default command so things don't shut down after the process ends.
|
||||
command: /bin/sh -c "chown -R paperless:paperless /usr/src/paperless/paperless-ngx/src/documents/static/frontend && chown -R paperless:paperless /usr/src/paperless/paperless-ngx/.ruff_cache && while sleep 1000; do :; done"
|
||||
command: /bin/sh -c "chown -R paperless:paperless /usr/src/paperless/paperless-ngx/src/paperless/static/frontend && chown -R paperless:paperless /usr/src/paperless/paperless-ngx/.ruff_cache && while sleep 1000; do :; done"
|
||||
|
||||
gotenberg:
|
||||
image: docker.io/gotenberg/gotenberg:8.17
|
||||
|
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@@ -430,13 +430,13 @@ jobs:
|
||||
name: Export frontend artifact from docker
|
||||
run: |
|
||||
docker create --name frontend-extract ${{ fromJSON(steps.docker-meta.outputs.json).tags[0] }}
|
||||
docker cp frontend-extract:/usr/src/paperless/src/documents/static/frontend src/documents/static/frontend/
|
||||
docker cp frontend-extract:/usr/src/paperless/src/paperless/static/frontend src/paperless/static/frontend/
|
||||
-
|
||||
name: Upload frontend artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: frontend-compiled
|
||||
path: src/documents/static/frontend/
|
||||
path: src/paperless/static/frontend/
|
||||
retention-days: 7
|
||||
|
||||
build-release:
|
||||
@@ -476,7 +476,7 @@ jobs:
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: frontend-compiled
|
||||
path: src/documents/static/frontend/
|
||||
path: src/paperless/static/frontend/
|
||||
-
|
||||
name: Download documentation artifact
|
||||
uses: actions/download-artifact@v4
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -94,7 +94,7 @@ scripts/nuke
|
||||
/export/
|
||||
|
||||
# this is where the compiled frontend is moved to.
|
||||
/src/documents/static/frontend/
|
||||
/src/paperless/static/frontend/
|
||||
|
||||
# mac os
|
||||
.DS_Store
|
||||
|
@@ -81,7 +81,7 @@ Some notes about translation:
|
||||
|
||||
If a language has already been added, and you would like to contribute new translations or change existing translations, please read the "Translation" section in the README.md file for further details on that.
|
||||
|
||||
If you would like the project to be translated to another language, first head over to https://crwd.in/paperless-ngx to check if that language has already been enabled for translation.
|
||||
If you would like the project to be translated to another language, first head over to https://crowdin.com/project/paperless-ngx to check if that language has already been enabled for translation.
|
||||
If not, please request the language to be added by creating an issue on GitHub. The issue should contain:
|
||||
|
||||
- English name of the language (the localized name can be added on Crowdin).
|
||||
|
@@ -32,7 +32,7 @@ RUN set -eux \
|
||||
# Purpose: Installs s6-overlay and rootfs
|
||||
# Comments:
|
||||
# - Don't leave anything extra in here either
|
||||
FROM ghcr.io/astral-sh/uv:0.6.11-python3.12-bookworm-slim AS s6-overlay-base
|
||||
FROM ghcr.io/astral-sh/uv:0.6.13-python3.12-bookworm-slim AS s6-overlay-base
|
||||
|
||||
WORKDIR /usr/src/s6
|
||||
|
||||
@@ -234,7 +234,7 @@ RUN --mount=type=cache,target=${UV_CACHE_DIR},id=python-cache \
|
||||
COPY --chown=1000:1000 ./src ./
|
||||
|
||||
# copy frontend
|
||||
COPY --from=compile-frontend --chown=1000:1000 /src/src/documents/static/frontend/ ./documents/static/frontend/
|
||||
COPY --from=compile-frontend --chown=1000:1000 /src/src/paperless/static/frontend/ ./paperless/static/frontend/
|
||||
|
||||
# add users, setup scripts
|
||||
# Mount the compiled frontend to expected location
|
||||
|
@@ -83,7 +83,7 @@ People interested in continuing the work on paperless-ngx are encouraged to reac
|
||||
|
||||
## Translation
|
||||
|
||||
Paperless-ngx is available in many languages that are coordinated on Crowdin. If you want to help out by translating paperless-ngx into your language, please head over to https://crwd.in/paperless-ngx, and thank you! More details can be found in [CONTRIBUTING.md](https://github.com/paperless-ngx/paperless-ngx/blob/main/CONTRIBUTING.md#translating-paperless-ngx).
|
||||
Paperless-ngx is available in many languages that are coordinated on Crowdin. If you want to help out by translating paperless-ngx into your language, please head over to https://crowdin.com/project/paperless-ngx, and thank you! More details can be found in [CONTRIBUTING.md](https://github.com/paperless-ngx/paperless-ngx/blob/main/CONTRIBUTING.md#translating-paperless-ngx).
|
||||
|
||||
## Feature Requests
|
||||
|
||||
|
@@ -17,6 +17,9 @@ if find /run/s6/container_environment/*"_FILE" -maxdepth 1 > /dev/null 2>&1; the
|
||||
if [[ -f ${SECRETFILE} ]]; then
|
||||
# Trim off trailing _FILE
|
||||
FILESTRIP=${FILENAME//_FILE/}
|
||||
if [[ $(tail -n1 "${SECRETFILE}" | wc -l) != 0 ]]; then
|
||||
echo "${log_prefix} Your secret: ${FILENAME##*/} contains a trailing newline and may not work as expected"
|
||||
fi
|
||||
# Set environment variable
|
||||
cat "${SECRETFILE}" > "${FILESTRIP}"
|
||||
echo "${log_prefix} ${FILESTRIP##*/} set from ${FILENAME##*/}"
|
||||
|
7
docker/rootfs/etc/s6-overlay/s6-rc.d/init-migrations/migrate.sh
Executable file
7
docker/rootfs/etc/s6-overlay/s6-rc.d/init-migrations/migrate.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/command/with-contenv /usr/bin/bash
|
||||
# shellcheck shell=bash
|
||||
declare -r data_dir="${PAPERLESS_DATA_DIR:-/usr/src/paperless/data}"
|
||||
|
||||
# shellcheck disable=SC2164
|
||||
cd "${PAPERLESS_SRC_DIR}"
|
||||
exec s6-setlock -n "${data_dir}/migration_lock" python3 manage.py migrate --skip-checks --no-input
|
@@ -1,20 +1,12 @@
|
||||
#!/command/with-contenv /usr/bin/bash
|
||||
# shellcheck shell=bash
|
||||
declare -r log_prefix="[init-migrations]"
|
||||
declare -r data_dir="${PAPERLESS_DATA_DIR:-/usr/src/paperless/data}"
|
||||
|
||||
(
|
||||
# flock is in place to prevent multiple containers from doing migrations
|
||||
# simultaneously. This also ensures that the db is ready when the command
|
||||
# of the current container starts.
|
||||
flock 200
|
||||
echo "${log_prefix} Apply database migrations..."
|
||||
cd "${PAPERLESS_SRC_DIR}"
|
||||
echo "${log_prefix} Apply database migrations..."
|
||||
|
||||
if [[ -n "${USER_IS_NON_ROOT}" ]]; then
|
||||
exec python3 manage.py migrate --skip-checks --no-input
|
||||
else
|
||||
exec s6-setuidgid paperless python3 manage.py migrate --skip-checks --no-input
|
||||
fi
|
||||
|
||||
) 200>"${data_dir}/migration_lock"
|
||||
# The whole migrate, with flock, needs to run as the right user
|
||||
if [[ -n "${USER_IS_NON_ROOT}" ]]; then
|
||||
exec /etc/s6-overlay/s6-rc.d/init-migrations/migrate.sh
|
||||
else
|
||||
exec s6-setuidgid paperless /etc/s6-overlay/s6-rc.d/init-migrations/migrate.sh
|
||||
fi
|
||||
|
@@ -1,5 +1,34 @@
|
||||
# Changelog
|
||||
|
||||
## paperless-ngx 2.15.1
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Fix: Run migration lock as the correct user [@stumpylog](https://github.com/stumpylog) ([#9604](https://github.com/paperless-ngx/paperless-ngx/pull/9604))
|
||||
- Fix: Adds a warning to the user if their secret file includes a trailing newline [@stumpylog](https://github.com/stumpylog) ([#9601](https://github.com/paperless-ngx/paperless-ngx/pull/9601))
|
||||
- Fix: correct download filename in 2.15.0 [@shamoon](https://github.com/shamoon) ([#9599](https://github.com/paperless-ngx/paperless-ngx/pull/9599))
|
||||
- Fix: dont exclude matching check for scheduled workflows [@shamoon](https://github.com/shamoon) ([#9594](https://github.com/paperless-ngx/paperless-ngx/pull/9594))
|
||||
|
||||
### Maintenance
|
||||
|
||||
- docker(deps): Bump astral-sh/uv from 0.6.9-python3.12-bookworm-slim to 0.6.13-python3.12-bookworm-slim @[dependabot[bot]](https://github.com/apps/dependabot) ([#9573](https://github.com/paperless-ngx/paperless-ngx/pull/9573))
|
||||
|
||||
### Dependencies
|
||||
|
||||
- docker(deps): Bump astral-sh/uv from 0.6.9-python3.12-bookworm-slim to 0.6.13-python3.12-bookworm-slim @[dependabot[bot]](https://github.com/apps/dependabot) ([#9573](https://github.com/paperless-ngx/paperless-ngx/pull/9573))
|
||||
- Chore: move to whoosh-reloaded, for now [@shamoon](https://github.com/shamoon) ([#9605](https://github.com/paperless-ngx/paperless-ngx/pull/9605))
|
||||
|
||||
### All App Changes
|
||||
|
||||
<details>
|
||||
<summary>4 changes</summary>
|
||||
|
||||
- Fix: Run migration lock as the correct user [@stumpylog](https://github.com/stumpylog) ([#9604](https://github.com/paperless-ngx/paperless-ngx/pull/9604))
|
||||
- Fix: Adds a warning to the user if their secret file includes a trailing newline [@stumpylog](https://github.com/stumpylog) ([#9601](https://github.com/paperless-ngx/paperless-ngx/pull/9601))
|
||||
- Fix: correct download filename in 2.15.0 [@shamoon](https://github.com/shamoon) ([#9599](https://github.com/paperless-ngx/paperless-ngx/pull/9599))
|
||||
- Fix: dont exclude matching check for scheduled workflows [@shamoon](https://github.com/shamoon) ([#9594](https://github.com/paperless-ngx/paperless-ngx/pull/9594))
|
||||
</details>
|
||||
|
||||
## paperless-ngx 2.15.0
|
||||
|
||||
### Features
|
||||
|
@@ -390,7 +390,7 @@ Custom parsers can be added to Paperless-ngx to support more file types. In
|
||||
order to do that, you need to write the parser itself and announce its
|
||||
existence to Paperless-ngx.
|
||||
|
||||
The parser itself must extend `documents.parsers.DocumentParser` and
|
||||
The parser itself must extend `paperless.parsers.DocumentParser` and
|
||||
must implement the methods `parse` and `get_thumbnail`. You can provide
|
||||
your own implementation to `get_date` if you don't want to rely on
|
||||
Paperless-ngx' default date guessing mechanisms.
|
||||
@@ -418,7 +418,7 @@ class MyCustomParser(DocumentParser):
|
||||
```
|
||||
|
||||
If you encounter any issues during parsing, raise a
|
||||
`documents.parsers.ParseError`.
|
||||
`paperless.parsers.ParseError`.
|
||||
|
||||
The `self.tempdir` directory is a temporary directory that is guaranteed
|
||||
to be empty and removed after consumption finished. You can use that
|
||||
|
@@ -197,7 +197,7 @@ People interested in continuing the work on paperless-ngx are encouraged to reac
|
||||
|
||||
### Translation
|
||||
|
||||
Paperless-ngx is available in many languages that are coordinated on [Crowdin](https://crwd.in/paperless-ngx). If you want to help out by translating paperless-ngx into your language, please head over to the [Paperless-ngx project at Crowdin](https://crwd.in/paperless-ngx), and thank you!
|
||||
Paperless-ngx is available in many languages that are coordinated on [Crowdin](https://crowdin.com/project/paperless-ngx). If you want to help out by translating paperless-ngx into your language, please head over to the [Paperless-ngx project at Crowdin](https://crowdin.com/project/paperless-ngx), and thank you!
|
||||
|
||||
## Scanners & Software
|
||||
|
||||
|
@@ -191,7 +191,7 @@ This might have multiple reasons.
|
||||
either manually or as part of the docker image build.
|
||||
|
||||
If the front end is still missing, make sure that the front end is
|
||||
compiled (files present in `src/documents/static/frontend`). If it
|
||||
compiled (files present in `src/paperless/static/frontend`). If it
|
||||
is not, you need to compile the front end yourself or download the
|
||||
release archive instead of cloning the repository.
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "paperless-ngx"
|
||||
version = "2.15.0"
|
||||
version = "2.15.1"
|
||||
description = "A community-supported supercharged version of paperless: scan, index and archive all your physical documents"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
@@ -65,7 +65,7 @@ dependencies = [
|
||||
"tqdm~=4.67.1",
|
||||
"watchdog~=6.0",
|
||||
"whitenoise~=6.9",
|
||||
"whoosh~=2.7",
|
||||
"whoosh-reloaded>=2.7.5",
|
||||
"zxing-cpp~=2.3.0",
|
||||
]
|
||||
|
||||
@@ -200,63 +200,60 @@ lint.per-file-ignores."docker/wait-for-redis.py" = [
|
||||
"INP001",
|
||||
"T201",
|
||||
]
|
||||
lint.per-file-ignores."src/documents/file_handling.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/management/commands/document_consumer.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/management/commands/document_exporter.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/migrations/1012_fix_archive_files.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/models.py" = [
|
||||
"SIM115",
|
||||
]
|
||||
lint.per-file-ignores."src/documents/parsers.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/signals/handlers.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_consumer.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_file_handling.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_management.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_management_consumer.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_management_exporter.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_migration_archive_files.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_migration_document_pages_count.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_migration_mime_type.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/tests/test_sanity_check.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/documents/views.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/checks.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/file_handling.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/management/commands/document_consumer.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/management/commands/document_exporter.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/models.py" = [
|
||||
"SIM115",
|
||||
]
|
||||
lint.per-file-ignores."src/paperless/parsers.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/settings.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/signals/handlers.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_consumer.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_file_handling.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_management.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_management_consumer.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_management_exporter.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_migration_archive_files.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_migration_document_pages_count.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_migration_mime_type.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/tests/test_sanity_check.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
lint.per-file-ignores."src/paperless/views.py" = [
|
||||
"PTH",
|
||||
] # TODO Enable & remove
|
||||
|
@@ -100,7 +100,7 @@
|
||||
"with": "src/environments/environment.prod.ts"
|
||||
}
|
||||
],
|
||||
"outputPath": "../src/documents/static/frontend/",
|
||||
"outputPath": "../src/paperless/static/frontend/",
|
||||
"optimization": true,
|
||||
"outputHashing": "none",
|
||||
"sourceMap": false,
|
||||
|
@@ -1,5 +1,10 @@
|
||||
import { DatePipe } from '@angular/common'
|
||||
import { provideHttpClient, withInterceptorsFromDi } from '@angular/common/http'
|
||||
import {
|
||||
HttpHeaders,
|
||||
HttpResponse,
|
||||
provideHttpClient,
|
||||
withInterceptorsFromDi,
|
||||
} from '@angular/common/http'
|
||||
import {
|
||||
HttpTestingController,
|
||||
provideHttpClientTesting,
|
||||
@@ -1331,6 +1336,34 @@ describe('DocumentDetailComponent', () => {
|
||||
expect(urlRevokeSpy).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should download a file with the correct filename', () => {
|
||||
const mockBlob = new Blob(['test content'], { type: 'text/plain' })
|
||||
const mockResponse = new HttpResponse({
|
||||
body: mockBlob,
|
||||
headers: new HttpHeaders({
|
||||
'Content-Disposition': 'attachment; filename="test-file.txt"',
|
||||
}),
|
||||
})
|
||||
|
||||
const downloadUrl = 'http://example.com/download'
|
||||
component.documentId = 123
|
||||
jest.spyOn(documentService, 'getDownloadUrl').mockReturnValue(downloadUrl)
|
||||
|
||||
const createSpy = jest.spyOn(document, 'createElement')
|
||||
const anchor: HTMLAnchorElement = {} as HTMLAnchorElement
|
||||
createSpy.mockReturnValueOnce(anchor)
|
||||
|
||||
component.download(false)
|
||||
|
||||
httpTestingController
|
||||
.expectOne(downloadUrl)
|
||||
.flush(mockBlob, { headers: mockResponse.headers })
|
||||
|
||||
expect(createSpy).toHaveBeenCalledWith('a')
|
||||
expect(anchor.download).toBe('test-file.txt')
|
||||
createSpy.mockClear()
|
||||
})
|
||||
|
||||
it('should get email enabled status from settings', () => {
|
||||
jest.spyOn(settingsService, 'get').mockReturnValue(true)
|
||||
expect(component.emailEnabled).toBeTruthy()
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import { AsyncPipe, NgTemplateOutlet } from '@angular/common'
|
||||
import { HttpClient } from '@angular/common/http'
|
||||
import { HttpClient, HttpResponse } from '@angular/common/http'
|
||||
import { Component, OnDestroy, OnInit, ViewChild } from '@angular/core'
|
||||
import {
|
||||
FormArray,
|
||||
@@ -995,44 +995,48 @@ export class DocumentDetailComponent
|
||||
this.documentId,
|
||||
original
|
||||
)
|
||||
this.http.get(downloadUrl, { responseType: 'blob' }).subscribe({
|
||||
next: (blob) => {
|
||||
this.downloading = false
|
||||
const blobParts = [blob]
|
||||
const file = new File(
|
||||
blobParts,
|
||||
original
|
||||
? this.document.original_file_name
|
||||
: this.document.archived_file_name,
|
||||
{
|
||||
type: original ? this.document.mime_type : 'application/pdf',
|
||||
}
|
||||
)
|
||||
if (
|
||||
!this.deviceDetectorService.isDesktop() &&
|
||||
navigator.canShare &&
|
||||
navigator.canShare({ files: [file] })
|
||||
) {
|
||||
navigator.share({
|
||||
files: [file],
|
||||
this.http
|
||||
.get(downloadUrl, { observe: 'response', responseType: 'blob' })
|
||||
.subscribe({
|
||||
next: (response: HttpResponse<Blob>) => {
|
||||
const filename = response.headers
|
||||
.get('Content-Disposition')
|
||||
?.split(';')
|
||||
?.find((part) => part.trim().startsWith('filename='))
|
||||
?.split('=')[1]
|
||||
?.replace(/['"]/g, '')
|
||||
const blob = new Blob([response.body], {
|
||||
type: response.body.type,
|
||||
})
|
||||
} else {
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement('a')
|
||||
a.href = url
|
||||
a.download = this.document.title
|
||||
a.click()
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
},
|
||||
error: (error) => {
|
||||
this.downloading = false
|
||||
this.toastService.showError(
|
||||
$localize`Error downloading document`,
|
||||
error
|
||||
)
|
||||
},
|
||||
})
|
||||
this.downloading = false
|
||||
const file = new File([blob], filename, {
|
||||
type: response.body.type,
|
||||
})
|
||||
if (
|
||||
!this.deviceDetectorService.isDesktop() &&
|
||||
navigator.canShare &&
|
||||
navigator.canShare({ files: [file] })
|
||||
) {
|
||||
navigator.share({
|
||||
files: [file],
|
||||
})
|
||||
} else {
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement('a')
|
||||
a.href = url
|
||||
a.download = filename
|
||||
a.click()
|
||||
URL.revokeObjectURL(url)
|
||||
}
|
||||
},
|
||||
error: (error) => {
|
||||
this.downloading = false
|
||||
this.toastService.showError(
|
||||
$localize`Error downloading document`,
|
||||
error
|
||||
)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
hasNext() {
|
||||
|
@@ -5,7 +5,7 @@ export const environment = {
|
||||
apiBaseUrl: document.baseURI + 'api/',
|
||||
apiVersion: '7',
|
||||
appTitle: 'Paperless-ngx',
|
||||
version: '2.15.0',
|
||||
version: '2.15.1',
|
||||
webSocketHost: window.location.host,
|
||||
webSocketProtocol: window.location.protocol == 'https:' ? 'wss:' : 'ws:',
|
||||
webSocketBaseUrl: base_url.pathname + 'ws/',
|
||||
|
@@ -556,7 +556,7 @@
|
||||
<context context-type="sourcefile">src/app/components/admin/config/config.component.html</context>
|
||||
<context context-type="linenumber">2</context>
|
||||
</context-group>
|
||||
<target state="translated">應用程式設定</target>
|
||||
<target state="translated">系統配置</target>
|
||||
</trans-unit>
|
||||
<trans-unit id="8528041182664173532" datatype="html">
|
||||
<source>Global app configuration options which apply to <strong>every</strong> user of this install of Paperless-ngx. Options can also be set using environment variables or the configuration file but the value here will always take precedence.</source>
|
||||
@@ -564,7 +564,7 @@
|
||||
<context context-type="sourcefile">src/app/components/admin/config/config.component.html</context>
|
||||
<context context-type="linenumber">4</context>
|
||||
</context-group>
|
||||
<target state="translated">全域應用程式設定選項適用於此安裝版本的<strong>每位</strong>使用者。雖然也可以透過環境變數或設定檔來設定,但這裡的設定將始終優先於其他設定。</target>
|
||||
<target state="translated">全域系統配置會套用至該系統的<strong>每一位</strong>使用者。雖然環境變數或設定檔也可以調整相關設定,但此處的設定將優先於他處的設定。</target>
|
||||
</trans-unit>
|
||||
<trans-unit id="7991430199894172363" datatype="html">
|
||||
<source>Read the documentation about this setting</source>
|
||||
@@ -864,7 +864,7 @@
|
||||
<context context-type="sourcefile">src/app/components/admin/settings/settings.component.html</context>
|
||||
<context context-type="linenumber">4</context>
|
||||
</context-group>
|
||||
<target state="translated">自訂外觀、通知等選項。設定只適用於<strong>目前使用者</strong>。</target>
|
||||
<target state="translated">自訂外觀、通知等選項。這些設定只套用於<strong>目前的使用者</strong>。</target>
|
||||
</trans-unit>
|
||||
<trans-unit id="1685061484835793745" datatype="html">
|
||||
<source>Start tour</source>
|
||||
|
@@ -1,5 +1 @@
|
||||
# this is here so that django finds the checks.
|
||||
from documents.checks import changed_password_check
|
||||
from documents.checks import parser_check
|
||||
|
||||
__all__ = ["changed_password_check", "parser_check"]
|
||||
__all__ = []
|
||||
|
@@ -1,214 +0,0 @@
|
||||
from django.conf import settings
|
||||
from django.contrib import admin
|
||||
from guardian.admin import GuardedModelAdmin
|
||||
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomField
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import Note
|
||||
from documents.models import PaperlessTask
|
||||
from documents.models import SavedView
|
||||
from documents.models import SavedViewFilterRule
|
||||
from documents.models import ShareLink
|
||||
from documents.models import StoragePath
|
||||
from documents.models import Tag
|
||||
|
||||
if settings.AUDIT_LOG_ENABLED:
|
||||
from auditlog.admin import LogEntryAdmin
|
||||
from auditlog.models import LogEntry
|
||||
|
||||
|
||||
class CorrespondentAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "match", "matching_algorithm")
|
||||
list_filter = ("matching_algorithm",)
|
||||
list_editable = ("match", "matching_algorithm")
|
||||
|
||||
|
||||
class TagAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "color", "match", "matching_algorithm")
|
||||
list_filter = ("matching_algorithm",)
|
||||
list_editable = ("color", "match", "matching_algorithm")
|
||||
search_fields = ("color", "name")
|
||||
|
||||
|
||||
class DocumentTypeAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "match", "matching_algorithm")
|
||||
list_filter = ("matching_algorithm",)
|
||||
list_editable = ("match", "matching_algorithm")
|
||||
|
||||
|
||||
class DocumentAdmin(GuardedModelAdmin):
|
||||
search_fields = ("correspondent__name", "title", "content", "tags__name")
|
||||
readonly_fields = (
|
||||
"added",
|
||||
"modified",
|
||||
"mime_type",
|
||||
"storage_type",
|
||||
"filename",
|
||||
"checksum",
|
||||
"archive_filename",
|
||||
"archive_checksum",
|
||||
"original_filename",
|
||||
"deleted_at",
|
||||
)
|
||||
|
||||
list_display_links = ("title",)
|
||||
|
||||
list_display = ("id", "title", "mime_type", "filename", "archive_filename")
|
||||
|
||||
list_filter = (
|
||||
("mime_type"),
|
||||
("archive_serial_number", admin.EmptyFieldListFilter),
|
||||
("archive_filename", admin.EmptyFieldListFilter),
|
||||
)
|
||||
|
||||
filter_horizontal = ("tags",)
|
||||
|
||||
ordering = ["-id"]
|
||||
|
||||
date_hierarchy = "created"
|
||||
|
||||
def has_add_permission(self, request):
|
||||
return False
|
||||
|
||||
def created_(self, obj):
|
||||
return obj.created.date().strftime("%Y-%m-%d")
|
||||
|
||||
created_.short_description = "Created"
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
"""
|
||||
Include trashed documents
|
||||
"""
|
||||
return Document.global_objects.all()
|
||||
|
||||
def delete_queryset(self, request, queryset):
|
||||
from documents import index
|
||||
|
||||
with index.open_index_writer() as writer:
|
||||
for o in queryset:
|
||||
index.remove_document(writer, o)
|
||||
|
||||
super().delete_queryset(request, queryset)
|
||||
|
||||
def delete_model(self, request, obj):
|
||||
from documents import index
|
||||
|
||||
index.remove_document_from_index(obj)
|
||||
super().delete_model(request, obj)
|
||||
|
||||
def save_model(self, request, obj, form, change):
|
||||
from documents import index
|
||||
|
||||
index.add_or_update_document(obj)
|
||||
super().save_model(request, obj, form, change)
|
||||
|
||||
|
||||
class RuleInline(admin.TabularInline):
|
||||
model = SavedViewFilterRule
|
||||
|
||||
|
||||
class SavedViewAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "owner")
|
||||
|
||||
inlines = [RuleInline]
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return super().get_queryset(request).select_related("owner")
|
||||
|
||||
|
||||
class StoragePathInline(admin.TabularInline):
|
||||
model = StoragePath
|
||||
|
||||
|
||||
class StoragePathAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "path", "match", "matching_algorithm")
|
||||
list_filter = ("path", "matching_algorithm")
|
||||
list_editable = ("path", "match", "matching_algorithm")
|
||||
|
||||
|
||||
class TaskAdmin(admin.ModelAdmin):
|
||||
list_display = ("task_id", "task_file_name", "task_name", "date_done", "status")
|
||||
list_filter = ("status", "date_done", "task_name")
|
||||
search_fields = ("task_name", "task_id", "status", "task_file_name")
|
||||
readonly_fields = (
|
||||
"task_id",
|
||||
"task_file_name",
|
||||
"task_name",
|
||||
"status",
|
||||
"date_created",
|
||||
"date_started",
|
||||
"date_done",
|
||||
"result",
|
||||
)
|
||||
|
||||
|
||||
class NotesAdmin(GuardedModelAdmin):
|
||||
list_display = ("user", "created", "note", "document")
|
||||
list_filter = ("created", "user")
|
||||
list_display_links = ("created",)
|
||||
raw_id_fields = ("document",)
|
||||
search_fields = ("document__title",)
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return (
|
||||
super()
|
||||
.get_queryset(request)
|
||||
.select_related("user", "document__correspondent")
|
||||
)
|
||||
|
||||
|
||||
class ShareLinksAdmin(GuardedModelAdmin):
|
||||
list_display = ("created", "expiration", "document")
|
||||
list_filter = ("created", "expiration", "owner")
|
||||
list_display_links = ("created",)
|
||||
raw_id_fields = ("document",)
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return super().get_queryset(request).select_related("document__correspondent")
|
||||
|
||||
|
||||
class CustomFieldsAdmin(GuardedModelAdmin):
|
||||
fields = ("name", "created", "data_type")
|
||||
readonly_fields = ("created", "data_type")
|
||||
list_display = ("name", "created", "data_type")
|
||||
list_filter = ("created", "data_type")
|
||||
|
||||
|
||||
class CustomFieldInstancesAdmin(GuardedModelAdmin):
|
||||
fields = ("field", "document", "created", "value")
|
||||
readonly_fields = ("field", "document", "created", "value")
|
||||
list_display = ("field", "document", "value", "created")
|
||||
search_fields = ("document__title",)
|
||||
list_filter = ("created", "field")
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return (
|
||||
super()
|
||||
.get_queryset(request)
|
||||
.select_related("field", "document__correspondent")
|
||||
)
|
||||
|
||||
|
||||
admin.site.register(Correspondent, CorrespondentAdmin)
|
||||
admin.site.register(Tag, TagAdmin)
|
||||
admin.site.register(DocumentType, DocumentTypeAdmin)
|
||||
admin.site.register(Document, DocumentAdmin)
|
||||
admin.site.register(SavedView, SavedViewAdmin)
|
||||
admin.site.register(StoragePath, StoragePathAdmin)
|
||||
admin.site.register(PaperlessTask, TaskAdmin)
|
||||
admin.site.register(Note, NotesAdmin)
|
||||
admin.site.register(ShareLink, ShareLinksAdmin)
|
||||
admin.site.register(CustomField, CustomFieldsAdmin)
|
||||
admin.site.register(CustomFieldInstance, CustomFieldInstancesAdmin)
|
||||
|
||||
if settings.AUDIT_LOG_ENABLED:
|
||||
|
||||
class LogEntryAUDIT(LogEntryAdmin):
|
||||
def has_delete_permission(self, request, obj=None):
|
||||
return False
|
||||
|
||||
admin.site.unregister(LogEntry)
|
||||
admin.site.register(LogEntry, LogEntryAUDIT)
|
@@ -4,30 +4,4 @@ from django.utils.translation import gettext_lazy as _
|
||||
|
||||
class DocumentsConfig(AppConfig):
|
||||
name = "documents"
|
||||
|
||||
verbose_name = _("Documents")
|
||||
|
||||
def ready(self):
|
||||
from documents.signals import document_consumption_finished
|
||||
from documents.signals import document_updated
|
||||
from documents.signals.handlers import add_inbox_tags
|
||||
from documents.signals.handlers import add_to_index
|
||||
from documents.signals.handlers import run_workflows_added
|
||||
from documents.signals.handlers import run_workflows_updated
|
||||
from documents.signals.handlers import set_correspondent
|
||||
from documents.signals.handlers import set_document_type
|
||||
from documents.signals.handlers import set_storage_path
|
||||
from documents.signals.handlers import set_tags
|
||||
|
||||
document_consumption_finished.connect(add_inbox_tags)
|
||||
document_consumption_finished.connect(set_correspondent)
|
||||
document_consumption_finished.connect(set_document_type)
|
||||
document_consumption_finished.connect(set_tags)
|
||||
document_consumption_finished.connect(set_storage_path)
|
||||
document_consumption_finished.connect(add_to_index)
|
||||
document_consumption_finished.connect(run_workflows_added)
|
||||
document_updated.connect(run_workflows_updated)
|
||||
|
||||
import documents.schema # noqa: F401
|
||||
|
||||
AppConfig.ready(self)
|
||||
verbose_name = _("Documents (legacy)")
|
||||
|
@@ -1,88 +0,0 @@
|
||||
import textwrap
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.checks import Error
|
||||
from django.core.checks import Warning
|
||||
from django.core.checks import register
|
||||
from django.core.exceptions import FieldError
|
||||
from django.db.utils import OperationalError
|
||||
from django.db.utils import ProgrammingError
|
||||
|
||||
from documents.signals import document_consumer_declaration
|
||||
from documents.templating.utils import convert_format_str_to_template_format
|
||||
|
||||
|
||||
@register()
|
||||
def changed_password_check(app_configs, **kwargs):
|
||||
from documents.models import Document
|
||||
from paperless.db import GnuPG
|
||||
|
||||
try:
|
||||
encrypted_doc = (
|
||||
Document.objects.filter(
|
||||
storage_type=Document.STORAGE_TYPE_GPG,
|
||||
)
|
||||
.only("pk", "storage_type")
|
||||
.first()
|
||||
)
|
||||
except (OperationalError, ProgrammingError, FieldError):
|
||||
return [] # No documents table yet
|
||||
|
||||
if encrypted_doc:
|
||||
if not settings.PASSPHRASE:
|
||||
return [
|
||||
Error(
|
||||
"The database contains encrypted documents but no password is set.",
|
||||
),
|
||||
]
|
||||
|
||||
if not GnuPG.decrypted(encrypted_doc.source_file):
|
||||
return [
|
||||
Error(
|
||||
textwrap.dedent(
|
||||
"""
|
||||
The current password doesn't match the password of the
|
||||
existing documents.
|
||||
|
||||
If you intend to change your password, you must first export
|
||||
all of the old documents, start fresh with the new password
|
||||
and then re-import them."
|
||||
""",
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
@register()
|
||||
def parser_check(app_configs, **kwargs):
|
||||
parsers = []
|
||||
for response in document_consumer_declaration.send(None):
|
||||
parsers.append(response[1])
|
||||
|
||||
if len(parsers) == 0:
|
||||
return [
|
||||
Error(
|
||||
"No parsers found. This is a bug. The consumer won't be "
|
||||
"able to consume any documents without parsers.",
|
||||
),
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
@register()
|
||||
def filename_format_check(app_configs, **kwargs):
|
||||
if settings.FILENAME_FORMAT:
|
||||
converted_format = convert_format_str_to_template_format(
|
||||
settings.FILENAME_FORMAT,
|
||||
)
|
||||
if converted_format != settings.FILENAME_FORMAT:
|
||||
return [
|
||||
Warning(
|
||||
f"Filename format {settings.FILENAME_FORMAT} is using the old style, please update to use double curly brackets",
|
||||
hint=converted_format,
|
||||
),
|
||||
]
|
||||
return []
|
@@ -1,950 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import json
|
||||
import operator
|
||||
from contextlib import contextmanager
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.db.models import Case
|
||||
from django.db.models import CharField
|
||||
from django.db.models import Count
|
||||
from django.db.models import Exists
|
||||
from django.db.models import IntegerField
|
||||
from django.db.models import OuterRef
|
||||
from django.db.models import Q
|
||||
from django.db.models import Subquery
|
||||
from django.db.models import Sum
|
||||
from django.db.models import Value
|
||||
from django.db.models import When
|
||||
from django.db.models.functions import Cast
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django_filters.rest_framework import BooleanFilter
|
||||
from django_filters.rest_framework import Filter
|
||||
from django_filters.rest_framework import FilterSet
|
||||
from drf_spectacular.utils import extend_schema_field
|
||||
from guardian.utils import get_group_obj_perms_model
|
||||
from guardian.utils import get_user_obj_perms_model
|
||||
from rest_framework import serializers
|
||||
from rest_framework.filters import OrderingFilter
|
||||
from rest_framework_guardian.filters import ObjectPermissionsFilter
|
||||
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomField
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import PaperlessTask
|
||||
from documents.models import ShareLink
|
||||
from documents.models import StoragePath
|
||||
from documents.models import Tag
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
CHAR_KWARGS = ["istartswith", "iendswith", "icontains", "iexact"]
|
||||
ID_KWARGS = ["in", "exact"]
|
||||
INT_KWARGS = ["exact", "gt", "gte", "lt", "lte", "isnull"]
|
||||
DATE_KWARGS = [
|
||||
"year",
|
||||
"month",
|
||||
"day",
|
||||
"date__gt",
|
||||
"date__gte",
|
||||
"gt",
|
||||
"gte",
|
||||
"date__lt",
|
||||
"date__lte",
|
||||
"lt",
|
||||
"lte",
|
||||
]
|
||||
|
||||
CUSTOM_FIELD_QUERY_MAX_DEPTH = 10
|
||||
CUSTOM_FIELD_QUERY_MAX_ATOMS = 20
|
||||
|
||||
|
||||
class CorrespondentFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = Correspondent
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class TagFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = Tag
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class DocumentTypeFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = DocumentType
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class StoragePathFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = StoragePath
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
"path": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class ObjectFilter(Filter):
|
||||
def __init__(self, *, exclude=False, in_list=False, field_name=""):
|
||||
super().__init__()
|
||||
self.exclude = exclude
|
||||
self.in_list = in_list
|
||||
self.field_name = field_name
|
||||
|
||||
def filter(self, qs, value):
|
||||
if not value:
|
||||
return qs
|
||||
|
||||
try:
|
||||
object_ids = [int(x) for x in value.split(",")]
|
||||
except ValueError:
|
||||
return qs
|
||||
|
||||
if self.in_list:
|
||||
qs = qs.filter(**{f"{self.field_name}__id__in": object_ids}).distinct()
|
||||
else:
|
||||
for obj_id in object_ids:
|
||||
if self.exclude:
|
||||
qs = qs.exclude(**{f"{self.field_name}__id": obj_id})
|
||||
else:
|
||||
qs = qs.filter(**{f"{self.field_name}__id": obj_id})
|
||||
|
||||
return qs
|
||||
|
||||
|
||||
@extend_schema_field(serializers.BooleanField)
|
||||
class InboxFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value == "true":
|
||||
return qs.filter(tags__is_inbox_tag=True)
|
||||
elif value == "false":
|
||||
return qs.exclude(tags__is_inbox_tag=True)
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
@extend_schema_field(serializers.CharField)
|
||||
class TitleContentFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value:
|
||||
return qs.filter(Q(title__icontains=value) | Q(content__icontains=value))
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
@extend_schema_field(serializers.BooleanField)
|
||||
class SharedByUser(Filter):
|
||||
def filter(self, qs, value):
|
||||
ctype = ContentType.objects.get_for_model(self.model)
|
||||
UserObjectPermission = get_user_obj_perms_model()
|
||||
GroupObjectPermission = get_group_obj_perms_model()
|
||||
# see https://github.com/paperless-ngx/paperless-ngx/issues/5392, we limit subqueries
|
||||
# to 1 because Postgres doesn't like returning > 1 row, but all we care about is > 0
|
||||
return (
|
||||
qs.filter(
|
||||
owner_id=value,
|
||||
)
|
||||
.annotate(
|
||||
num_shared_users=Count(
|
||||
UserObjectPermission.objects.filter(
|
||||
content_type=ctype,
|
||||
object_pk=Cast(OuterRef("pk"), CharField()),
|
||||
).values("user_id")[:1],
|
||||
),
|
||||
)
|
||||
.annotate(
|
||||
num_shared_groups=Count(
|
||||
GroupObjectPermission.objects.filter(
|
||||
content_type=ctype,
|
||||
object_pk=Cast(OuterRef("pk"), CharField()),
|
||||
).values("group_id")[:1],
|
||||
),
|
||||
)
|
||||
.filter(
|
||||
Q(num_shared_users__gt=0) | Q(num_shared_groups__gt=0),
|
||||
)
|
||||
if value is not None
|
||||
else qs
|
||||
)
|
||||
|
||||
|
||||
class CustomFieldFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = CustomField
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
@extend_schema_field(serializers.CharField)
|
||||
class CustomFieldsFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value:
|
||||
fields_with_matching_selects = CustomField.objects.filter(
|
||||
extra_data__icontains=value,
|
||||
)
|
||||
option_ids = []
|
||||
if fields_with_matching_selects.count() > 0:
|
||||
for field in fields_with_matching_selects:
|
||||
options = field.extra_data.get("select_options", [])
|
||||
for _, option in enumerate(options):
|
||||
if option.get("label").lower().find(value.lower()) != -1:
|
||||
option_ids.extend([option.get("id")])
|
||||
return (
|
||||
qs.filter(custom_fields__field__name__icontains=value)
|
||||
| qs.filter(custom_fields__value_text__icontains=value)
|
||||
| qs.filter(custom_fields__value_bool__icontains=value)
|
||||
| qs.filter(custom_fields__value_int__icontains=value)
|
||||
| qs.filter(custom_fields__value_float__icontains=value)
|
||||
| qs.filter(custom_fields__value_date__icontains=value)
|
||||
| qs.filter(custom_fields__value_url__icontains=value)
|
||||
| qs.filter(custom_fields__value_monetary__icontains=value)
|
||||
| qs.filter(custom_fields__value_document_ids__icontains=value)
|
||||
| qs.filter(custom_fields__value_select__in=option_ids)
|
||||
)
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
class MimeTypeFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value:
|
||||
return qs.filter(mime_type__icontains=value)
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
class SelectField(serializers.CharField):
|
||||
def __init__(self, custom_field: CustomField):
|
||||
self._options = custom_field.extra_data["select_options"]
|
||||
super().__init__(max_length=16)
|
||||
|
||||
def to_internal_value(self, data):
|
||||
# If the supplied value is the option label instead of the ID
|
||||
try:
|
||||
data = next(
|
||||
option.get("id")
|
||||
for option in self._options
|
||||
if option.get("label") == data
|
||||
)
|
||||
except StopIteration:
|
||||
pass
|
||||
return super().to_internal_value(data)
|
||||
|
||||
|
||||
def handle_validation_prefix(func: Callable):
|
||||
"""
|
||||
Catch ValidationErrors raised by the wrapped function
|
||||
and add a prefix to the exception detail to track what causes the exception,
|
||||
similar to nested serializers.
|
||||
"""
|
||||
|
||||
def wrapper(*args, validation_prefix=None, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except serializers.ValidationError as e:
|
||||
raise serializers.ValidationError({validation_prefix: e.detail})
|
||||
|
||||
# Update the signature to include the validation_prefix argument
|
||||
old_sig = inspect.signature(func)
|
||||
new_param = inspect.Parameter("validation_prefix", inspect.Parameter.KEYWORD_ONLY)
|
||||
new_sig = old_sig.replace(parameters=[*old_sig.parameters.values(), new_param])
|
||||
|
||||
# Apply functools.wraps and manually set the new signature
|
||||
functools.update_wrapper(wrapper, func)
|
||||
wrapper.__signature__ = new_sig
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class CustomFieldQueryParser:
|
||||
EXPR_BY_CATEGORY = {
|
||||
"basic": ["exact", "in", "isnull", "exists"],
|
||||
"string": [
|
||||
"icontains",
|
||||
"istartswith",
|
||||
"iendswith",
|
||||
],
|
||||
"arithmetic": [
|
||||
"gt",
|
||||
"gte",
|
||||
"lt",
|
||||
"lte",
|
||||
"range",
|
||||
],
|
||||
"containment": ["contains"],
|
||||
}
|
||||
|
||||
SUPPORTED_EXPR_CATEGORIES = {
|
||||
CustomField.FieldDataType.STRING: ("basic", "string"),
|
||||
CustomField.FieldDataType.URL: ("basic", "string"),
|
||||
CustomField.FieldDataType.DATE: ("basic", "arithmetic"),
|
||||
CustomField.FieldDataType.BOOL: ("basic",),
|
||||
CustomField.FieldDataType.INT: ("basic", "arithmetic"),
|
||||
CustomField.FieldDataType.FLOAT: ("basic", "arithmetic"),
|
||||
CustomField.FieldDataType.MONETARY: ("basic", "string", "arithmetic"),
|
||||
CustomField.FieldDataType.DOCUMENTLINK: ("basic", "containment"),
|
||||
CustomField.FieldDataType.SELECT: ("basic",),
|
||||
}
|
||||
|
||||
DATE_COMPONENTS = [
|
||||
"year",
|
||||
"iso_year",
|
||||
"month",
|
||||
"day",
|
||||
"week",
|
||||
"week_day",
|
||||
"iso_week_day",
|
||||
"quarter",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
validation_prefix,
|
||||
max_query_depth=10,
|
||||
max_atom_count=20,
|
||||
) -> None:
|
||||
"""
|
||||
A helper class that parses the query string into a `django.db.models.Q` for filtering
|
||||
documents based on custom field values.
|
||||
|
||||
The syntax of the query expression is illustrated with the below pseudo code rules:
|
||||
1. parse([`custom_field`, "exists", true]):
|
||||
matches documents with Q(custom_fields__field=`custom_field`)
|
||||
2. parse([`custom_field`, "exists", false]):
|
||||
matches documents with ~Q(custom_fields__field=`custom_field`)
|
||||
3. parse([`custom_field`, `op`, `value`]):
|
||||
matches documents with
|
||||
Q(custom_fields__field=`custom_field`, custom_fields__value_`type`__`op`= `value`)
|
||||
4. parse(["AND", [`q0`, `q1`, ..., `qn`]])
|
||||
-> parse(`q0`) & parse(`q1`) & ... & parse(`qn`)
|
||||
5. parse(["OR", [`q0`, `q1`, ..., `qn`]])
|
||||
-> parse(`q0`) | parse(`q1`) | ... | parse(`qn`)
|
||||
6. parse(["NOT", `q`])
|
||||
-> ~parse(`q`)
|
||||
|
||||
Args:
|
||||
validation_prefix: Used to generate the ValidationError message.
|
||||
max_query_depth: Limits the maximum nesting depth of queries.
|
||||
max_atom_count: Limits the maximum number of atoms (i.e., rule 1, 2, 3) in the query.
|
||||
|
||||
`max_query_depth` and `max_atom_count` can be set to guard against generating arbitrarily
|
||||
complex SQL queries.
|
||||
"""
|
||||
self._custom_fields: dict[int | str, CustomField] = {}
|
||||
self._validation_prefix = validation_prefix
|
||||
# Dummy ModelSerializer used to convert a Django models.Field to serializers.Field.
|
||||
self._model_serializer = serializers.ModelSerializer()
|
||||
# Used for sanity check
|
||||
self._max_query_depth = max_query_depth
|
||||
self._max_atom_count = max_atom_count
|
||||
self._current_depth = 0
|
||||
self._atom_count = 0
|
||||
# The set of annotations that we need to apply to the queryset
|
||||
self._annotations = {}
|
||||
|
||||
def parse(self, query: str) -> tuple[Q, dict[str, Count]]:
|
||||
"""
|
||||
Parses the query string into a `django.db.models.Q`
|
||||
and a set of annotations to be applied to the queryset.
|
||||
"""
|
||||
try:
|
||||
expr = json.loads(query)
|
||||
except json.JSONDecodeError:
|
||||
raise serializers.ValidationError(
|
||||
{self._validation_prefix: [_("Value must be valid JSON.")]},
|
||||
)
|
||||
return (
|
||||
self._parse_expr(expr, validation_prefix=self._validation_prefix),
|
||||
self._annotations,
|
||||
)
|
||||
|
||||
@handle_validation_prefix
|
||||
def _parse_expr(self, expr) -> Q:
|
||||
"""
|
||||
Applies rule (1, 2, 3) or (4, 5, 6) based on the length of the expr.
|
||||
"""
|
||||
with self._track_query_depth():
|
||||
if isinstance(expr, list | tuple):
|
||||
if len(expr) == 2:
|
||||
return self._parse_logical_expr(*expr)
|
||||
elif len(expr) == 3:
|
||||
return self._parse_atom(*expr)
|
||||
raise serializers.ValidationError(
|
||||
[_("Invalid custom field query expression")],
|
||||
)
|
||||
|
||||
@handle_validation_prefix
|
||||
def _parse_expr_list(self, exprs) -> list[Q]:
|
||||
"""
|
||||
Handles [`q0`, `q1`, ..., `qn`] in rule 4 & 5.
|
||||
"""
|
||||
if not isinstance(exprs, list | tuple) or not exprs:
|
||||
raise serializers.ValidationError(
|
||||
[_("Invalid expression list. Must be nonempty.")],
|
||||
)
|
||||
return [
|
||||
self._parse_expr(expr, validation_prefix=i) for i, expr in enumerate(exprs)
|
||||
]
|
||||
|
||||
def _parse_logical_expr(self, op, args) -> Q:
|
||||
"""
|
||||
Handles rule 4, 5, 6.
|
||||
"""
|
||||
op_lower = op.lower()
|
||||
|
||||
if op_lower == "not":
|
||||
return ~self._parse_expr(args, validation_prefix=1)
|
||||
|
||||
if op_lower == "and":
|
||||
op_func = operator.and_
|
||||
elif op_lower == "or":
|
||||
op_func = operator.or_
|
||||
else:
|
||||
raise serializers.ValidationError(
|
||||
{"0": [_("Invalid logical operator {op!r}").format(op=op)]},
|
||||
)
|
||||
|
||||
qs = self._parse_expr_list(args, validation_prefix="1")
|
||||
return functools.reduce(op_func, qs)
|
||||
|
||||
def _parse_atom(self, id_or_name, op, value) -> Q:
|
||||
"""
|
||||
Handles rule 1, 2, 3.
|
||||
"""
|
||||
# Guard against queries with too many conditions.
|
||||
self._atom_count += 1
|
||||
if self._atom_count > self._max_atom_count:
|
||||
raise serializers.ValidationError(
|
||||
[_("Maximum number of query conditions exceeded.")],
|
||||
)
|
||||
|
||||
custom_field = self._get_custom_field(id_or_name, validation_prefix="0")
|
||||
op = self._validate_atom_op(custom_field, op, validation_prefix="1")
|
||||
value = self._validate_atom_value(
|
||||
custom_field,
|
||||
op,
|
||||
value,
|
||||
validation_prefix="2",
|
||||
)
|
||||
|
||||
# Needed because not all DB backends support Array __contains
|
||||
if (
|
||||
custom_field.data_type == CustomField.FieldDataType.DOCUMENTLINK
|
||||
and op == "contains"
|
||||
):
|
||||
return self._parse_atom_doc_link_contains(custom_field, value)
|
||||
|
||||
value_field_name = CustomFieldInstance.get_value_field_name(
|
||||
custom_field.data_type,
|
||||
)
|
||||
if (
|
||||
custom_field.data_type == CustomField.FieldDataType.MONETARY
|
||||
and op in self.EXPR_BY_CATEGORY["arithmetic"]
|
||||
):
|
||||
value_field_name = "value_monetary_amount"
|
||||
has_field = Q(custom_fields__field=custom_field)
|
||||
|
||||
# We need to use an annotation here because different atoms
|
||||
# might be referring to different instances of custom fields.
|
||||
annotation_name = f"_custom_field_filter_{len(self._annotations)}"
|
||||
|
||||
# Our special exists operator.
|
||||
if op == "exists":
|
||||
annotation = Count("custom_fields", filter=has_field)
|
||||
# A Document should have > 0 match if it has this field, or 0 if doesn't.
|
||||
query_op = "gt" if value else "exact"
|
||||
query = Q(**{f"{annotation_name}__{query_op}": 0})
|
||||
else:
|
||||
# Check if 1) custom field name matches, and 2) value satisfies condition
|
||||
field_filter = has_field & Q(
|
||||
**{f"custom_fields__{value_field_name}__{op}": value},
|
||||
)
|
||||
# Annotate how many matching custom fields each document has
|
||||
annotation = Count("custom_fields", filter=field_filter)
|
||||
# Filter document by count
|
||||
query = Q(**{f"{annotation_name}__gt": 0})
|
||||
|
||||
self._annotations[annotation_name] = annotation
|
||||
return query
|
||||
|
||||
@handle_validation_prefix
|
||||
def _get_custom_field(self, id_or_name):
|
||||
"""Get the CustomField instance by id or name."""
|
||||
if id_or_name in self._custom_fields:
|
||||
return self._custom_fields[id_or_name]
|
||||
|
||||
kwargs = (
|
||||
{"id": id_or_name} if isinstance(id_or_name, int) else {"name": id_or_name}
|
||||
)
|
||||
try:
|
||||
custom_field = CustomField.objects.get(**kwargs)
|
||||
except CustomField.DoesNotExist:
|
||||
raise serializers.ValidationError(
|
||||
[_("{name!r} is not a valid custom field.").format(name=id_or_name)],
|
||||
)
|
||||
self._custom_fields[custom_field.id] = custom_field
|
||||
self._custom_fields[custom_field.name] = custom_field
|
||||
return custom_field
|
||||
|
||||
@staticmethod
|
||||
def _split_op(full_op):
|
||||
*prefix, op = str(full_op).rsplit("__", maxsplit=1)
|
||||
prefix = prefix[0] if prefix else None
|
||||
return prefix, op
|
||||
|
||||
@handle_validation_prefix
|
||||
def _validate_atom_op(self, custom_field, raw_op):
|
||||
"""Check if the `op` is compatible with the type of the custom field."""
|
||||
prefix, op = self._split_op(raw_op)
|
||||
|
||||
# Check if the operator is supported for the current data_type.
|
||||
supported = False
|
||||
for category in self.SUPPORTED_EXPR_CATEGORIES[custom_field.data_type]:
|
||||
if op in self.EXPR_BY_CATEGORY[category]:
|
||||
supported = True
|
||||
break
|
||||
|
||||
# Check prefix
|
||||
if prefix is not None:
|
||||
if (
|
||||
prefix in self.DATE_COMPONENTS
|
||||
and custom_field.data_type == CustomField.FieldDataType.DATE
|
||||
):
|
||||
pass # ok - e.g., "year__exact" for date field
|
||||
else:
|
||||
supported = False # anything else is invalid
|
||||
|
||||
if not supported:
|
||||
raise serializers.ValidationError(
|
||||
[
|
||||
_("{data_type} does not support query expr {expr!r}.").format(
|
||||
data_type=custom_field.data_type,
|
||||
expr=raw_op,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
return raw_op
|
||||
|
||||
def _get_serializer_field(self, custom_field, full_op):
|
||||
"""Return a serializers.Field for value validation."""
|
||||
prefix, op = self._split_op(full_op)
|
||||
field = None
|
||||
|
||||
if op in ("isnull", "exists"):
|
||||
# `isnull` takes either True or False regardless of the data_type.
|
||||
field = serializers.BooleanField()
|
||||
elif (
|
||||
custom_field.data_type == CustomField.FieldDataType.DATE
|
||||
and prefix in self.DATE_COMPONENTS
|
||||
):
|
||||
# DateField admits queries in the form of `year__exact`, etc. These take integers.
|
||||
field = serializers.IntegerField()
|
||||
elif custom_field.data_type == CustomField.FieldDataType.DOCUMENTLINK:
|
||||
# We can be more specific here and make sure the value is a list.
|
||||
field = serializers.ListField(child=serializers.IntegerField())
|
||||
elif custom_field.data_type == CustomField.FieldDataType.SELECT:
|
||||
# We use this custom field to permit SELECT option names.
|
||||
field = SelectField(custom_field)
|
||||
elif custom_field.data_type == CustomField.FieldDataType.URL:
|
||||
# For URL fields we don't need to be strict about validation (e.g., for istartswith).
|
||||
field = serializers.CharField()
|
||||
else:
|
||||
# The general case: inferred from the corresponding field in CustomFieldInstance.
|
||||
value_field_name = CustomFieldInstance.get_value_field_name(
|
||||
custom_field.data_type,
|
||||
)
|
||||
model_field = CustomFieldInstance._meta.get_field(value_field_name)
|
||||
field_name = model_field.deconstruct()[0]
|
||||
field_class, field_kwargs = self._model_serializer.build_standard_field(
|
||||
field_name,
|
||||
model_field,
|
||||
)
|
||||
field = field_class(**field_kwargs)
|
||||
field.allow_null = False
|
||||
|
||||
# Need to set allow_blank manually because of the inconsistency in CustomFieldInstance validation.
|
||||
# See https://github.com/paperless-ngx/paperless-ngx/issues/7361.
|
||||
if isinstance(field, serializers.CharField):
|
||||
field.allow_blank = True
|
||||
|
||||
if op == "in":
|
||||
# `in` takes a list of values.
|
||||
field = serializers.ListField(child=field, allow_empty=False)
|
||||
elif op == "range":
|
||||
# `range` takes a list of values, i.e., [start, end].
|
||||
field = serializers.ListField(
|
||||
child=field,
|
||||
min_length=2,
|
||||
max_length=2,
|
||||
)
|
||||
|
||||
return field
|
||||
|
||||
@handle_validation_prefix
|
||||
def _validate_atom_value(self, custom_field, op, value):
|
||||
"""Check if `value` is valid for the custom field and `op`. Returns the validated value."""
|
||||
serializer_field = self._get_serializer_field(custom_field, op)
|
||||
return serializer_field.run_validation(value)
|
||||
|
||||
def _parse_atom_doc_link_contains(self, custom_field, value) -> Q:
|
||||
"""
|
||||
Handles document link `contains` in a way that is supported by all DB backends.
|
||||
"""
|
||||
|
||||
# If the value is an empty set,
|
||||
# this is trivially true for any document with not null document links.
|
||||
if not value:
|
||||
return Q(
|
||||
custom_fields__field=custom_field,
|
||||
custom_fields__value_document_ids__isnull=False,
|
||||
)
|
||||
|
||||
# First we look up reverse links from the requested documents.
|
||||
links = CustomFieldInstance.objects.filter(
|
||||
document_id__in=value,
|
||||
field__data_type=CustomField.FieldDataType.DOCUMENTLINK,
|
||||
)
|
||||
|
||||
# Check if any of the requested IDs are missing.
|
||||
missing_ids = set(value) - set(link.document_id for link in links)
|
||||
if missing_ids:
|
||||
# The result should be an empty set in this case.
|
||||
return Q(id__in=[])
|
||||
|
||||
# Take the intersection of the reverse links - this should be what we are looking for.
|
||||
document_ids_we_want = functools.reduce(
|
||||
operator.and_,
|
||||
(set(link.value_document_ids) for link in links),
|
||||
)
|
||||
|
||||
return Q(id__in=document_ids_we_want)
|
||||
|
||||
@contextmanager
|
||||
def _track_query_depth(self):
|
||||
# guard against queries that are too deeply nested
|
||||
self._current_depth += 1
|
||||
if self._current_depth > self._max_query_depth:
|
||||
raise serializers.ValidationError([_("Maximum nesting depth exceeded.")])
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self._current_depth -= 1
|
||||
|
||||
|
||||
@extend_schema_field(serializers.CharField)
|
||||
class CustomFieldQueryFilter(Filter):
|
||||
def __init__(self, validation_prefix):
|
||||
"""
|
||||
A filter that filters documents based on custom field name and value.
|
||||
|
||||
Args:
|
||||
validation_prefix: Used to generate the ValidationError message.
|
||||
"""
|
||||
super().__init__()
|
||||
self._validation_prefix = validation_prefix
|
||||
|
||||
def filter(self, qs, value):
|
||||
if not value:
|
||||
return qs
|
||||
|
||||
parser = CustomFieldQueryParser(
|
||||
self._validation_prefix,
|
||||
max_query_depth=CUSTOM_FIELD_QUERY_MAX_DEPTH,
|
||||
max_atom_count=CUSTOM_FIELD_QUERY_MAX_ATOMS,
|
||||
)
|
||||
q, annotations = parser.parse(value)
|
||||
|
||||
return qs.annotate(**annotations).filter(q)
|
||||
|
||||
|
||||
class DocumentFilterSet(FilterSet):
|
||||
is_tagged = BooleanFilter(
|
||||
label="Is tagged",
|
||||
field_name="tags",
|
||||
lookup_expr="isnull",
|
||||
exclude=True,
|
||||
)
|
||||
|
||||
tags__id__all = ObjectFilter(field_name="tags")
|
||||
|
||||
tags__id__none = ObjectFilter(field_name="tags", exclude=True)
|
||||
|
||||
tags__id__in = ObjectFilter(field_name="tags", in_list=True)
|
||||
|
||||
correspondent__id__none = ObjectFilter(field_name="correspondent", exclude=True)
|
||||
|
||||
document_type__id__none = ObjectFilter(field_name="document_type", exclude=True)
|
||||
|
||||
storage_path__id__none = ObjectFilter(field_name="storage_path", exclude=True)
|
||||
|
||||
is_in_inbox = InboxFilter()
|
||||
|
||||
title_content = TitleContentFilter()
|
||||
|
||||
owner__id__none = ObjectFilter(field_name="owner", exclude=True)
|
||||
|
||||
custom_fields__icontains = CustomFieldsFilter()
|
||||
|
||||
custom_fields__id__all = ObjectFilter(field_name="custom_fields__field")
|
||||
|
||||
custom_fields__id__none = ObjectFilter(
|
||||
field_name="custom_fields__field",
|
||||
exclude=True,
|
||||
)
|
||||
|
||||
custom_fields__id__in = ObjectFilter(
|
||||
field_name="custom_fields__field",
|
||||
in_list=True,
|
||||
)
|
||||
|
||||
has_custom_fields = BooleanFilter(
|
||||
label="Has custom field",
|
||||
field_name="custom_fields",
|
||||
lookup_expr="isnull",
|
||||
exclude=True,
|
||||
)
|
||||
|
||||
custom_field_query = CustomFieldQueryFilter("custom_field_query")
|
||||
|
||||
shared_by__id = SharedByUser()
|
||||
|
||||
mime_type = MimeTypeFilter()
|
||||
|
||||
class Meta:
|
||||
model = Document
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"title": CHAR_KWARGS,
|
||||
"content": CHAR_KWARGS,
|
||||
"archive_serial_number": INT_KWARGS,
|
||||
"created": DATE_KWARGS,
|
||||
"added": DATE_KWARGS,
|
||||
"modified": DATE_KWARGS,
|
||||
"original_filename": CHAR_KWARGS,
|
||||
"checksum": CHAR_KWARGS,
|
||||
"correspondent": ["isnull"],
|
||||
"correspondent__id": ID_KWARGS,
|
||||
"correspondent__name": CHAR_KWARGS,
|
||||
"tags__id": ID_KWARGS,
|
||||
"tags__name": CHAR_KWARGS,
|
||||
"document_type": ["isnull"],
|
||||
"document_type__id": ID_KWARGS,
|
||||
"document_type__name": CHAR_KWARGS,
|
||||
"storage_path": ["isnull"],
|
||||
"storage_path__id": ID_KWARGS,
|
||||
"storage_path__name": CHAR_KWARGS,
|
||||
"owner": ["isnull"],
|
||||
"owner__id": ID_KWARGS,
|
||||
"custom_fields": ["icontains"],
|
||||
}
|
||||
|
||||
|
||||
class ShareLinkFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = ShareLink
|
||||
fields = {
|
||||
"created": DATE_KWARGS,
|
||||
"expiration": DATE_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class PaperlessTaskFilterSet(FilterSet):
|
||||
acknowledged = BooleanFilter(
|
||||
label="Acknowledged",
|
||||
field_name="acknowledged",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = PaperlessTask
|
||||
fields = {
|
||||
"type": ["exact"],
|
||||
"task_name": ["exact"],
|
||||
"status": ["exact"],
|
||||
}
|
||||
|
||||
|
||||
class ObjectOwnedOrGrantedPermissionsFilter(ObjectPermissionsFilter):
|
||||
"""
|
||||
A filter backend that limits results to those where the requesting user
|
||||
has read object level permissions, owns the objects, or objects without
|
||||
an owner (for backwards compat)
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
objects_with_perms = super().filter_queryset(request, queryset, view)
|
||||
objects_owned = queryset.filter(owner=request.user)
|
||||
objects_unowned = queryset.filter(owner__isnull=True)
|
||||
return objects_with_perms | objects_owned | objects_unowned
|
||||
|
||||
|
||||
class ObjectOwnedPermissionsFilter(ObjectPermissionsFilter):
|
||||
"""
|
||||
A filter backend that limits results to those where the requesting user
|
||||
owns the objects or objects without an owner (for backwards compat)
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
if request.user.is_superuser:
|
||||
return queryset
|
||||
objects_owned = queryset.filter(owner=request.user)
|
||||
objects_unowned = queryset.filter(owner__isnull=True)
|
||||
return objects_owned | objects_unowned
|
||||
|
||||
|
||||
class DocumentsOrderingFilter(OrderingFilter):
|
||||
field_name = "ordering"
|
||||
prefix = "custom_field_"
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
param = request.query_params.get("ordering")
|
||||
if param and self.prefix in param:
|
||||
custom_field_id = int(param.split(self.prefix)[1])
|
||||
try:
|
||||
field = CustomField.objects.get(pk=custom_field_id)
|
||||
except CustomField.DoesNotExist:
|
||||
raise serializers.ValidationError(
|
||||
{self.prefix + str(custom_field_id): [_("Custom field not found")]},
|
||||
)
|
||||
|
||||
annotation = None
|
||||
match field.data_type:
|
||||
case CustomField.FieldDataType.STRING:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_text")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.INT:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_int")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.FLOAT:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_float")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.DATE:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_date")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.MONETARY:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_monetary_amount")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.SELECT:
|
||||
# Select options are a little more complicated since the value is the id of the option, not
|
||||
# the label. Additionally, to support sqlite we can't use StringAgg, so we need to create a
|
||||
# case statement for each option, setting the value to the index of the option in a list
|
||||
# sorted by label, and then summing the results to give a single value for the annotation
|
||||
|
||||
select_options = sorted(
|
||||
field.extra_data.get("select_options", []),
|
||||
key=lambda x: x.get("label"),
|
||||
)
|
||||
whens = [
|
||||
When(
|
||||
custom_fields__field_id=custom_field_id,
|
||||
custom_fields__value_select=option.get("id"),
|
||||
then=Value(idx, output_field=IntegerField()),
|
||||
)
|
||||
for idx, option in enumerate(select_options)
|
||||
]
|
||||
whens.append(
|
||||
When(
|
||||
custom_fields__field_id=custom_field_id,
|
||||
custom_fields__value_select__isnull=True,
|
||||
then=Value(
|
||||
len(select_options),
|
||||
output_field=IntegerField(),
|
||||
),
|
||||
),
|
||||
)
|
||||
annotation = Sum(
|
||||
Case(
|
||||
*whens,
|
||||
default=Value(0),
|
||||
output_field=IntegerField(),
|
||||
),
|
||||
)
|
||||
case CustomField.FieldDataType.DOCUMENTLINK:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_document_ids")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.URL:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_url")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.BOOL:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_bool")[:1],
|
||||
)
|
||||
|
||||
if not annotation:
|
||||
# Only happens if a new data type is added and not handled here
|
||||
raise ValueError("Invalid custom field data type")
|
||||
|
||||
queryset = (
|
||||
queryset.annotate(
|
||||
# We need to annotate the queryset with the custom field value
|
||||
custom_field_value=annotation,
|
||||
# We also need to annotate the queryset with a boolean for sorting whether the field exists
|
||||
has_field=Exists(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
),
|
||||
),
|
||||
)
|
||||
.order_by(
|
||||
"-has_field",
|
||||
param.replace(
|
||||
self.prefix + str(custom_field_id),
|
||||
"custom_field_value",
|
||||
),
|
||||
)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
return super().filter_queryset(request, queryset, view)
|
@@ -189,9 +189,9 @@ def parse_wrapper(parser, path, mime_type, file_name):
|
||||
|
||||
|
||||
def create_archive_version(doc, retry_count=3):
|
||||
from documents.parsers import DocumentParser
|
||||
from documents.parsers import ParseError
|
||||
from documents.parsers import get_parser_class_for_mime_type
|
||||
from paperless.parsers import DocumentParser
|
||||
from paperless.parsers import ParseError
|
||||
from paperless.parsers import get_parser_class_for_mime_type
|
||||
|
||||
logger.info(f"Regenerating archive document for document ID:{doc.id}")
|
||||
parser_class = get_parser_class_for_mime_type(doc.mime_type)
|
||||
@@ -271,7 +271,7 @@ def move_old_to_new_locations(apps, schema_editor):
|
||||
|
||||
# check that we can regenerate affected archive versions
|
||||
for doc_id in affected_document_ids:
|
||||
from documents.parsers import get_parser_class_for_mime_type
|
||||
from paperless.parsers import get_parser_class_for_mime_type
|
||||
|
||||
doc = Document.objects.get(id=doc_id)
|
||||
parser_class = get_parser_class_for_mime_type(doc.mime_type)
|
||||
|
@@ -9,7 +9,7 @@ from pathlib import Path
|
||||
from django.conf import settings
|
||||
from django.db import migrations
|
||||
|
||||
from documents.parsers import run_convert
|
||||
from paperless.parsers import run_convert
|
||||
|
||||
logger = logging.getLogger("paperless.migrations")
|
||||
|
||||
|
@@ -10,7 +10,7 @@ import gnupg
|
||||
from django.conf import settings
|
||||
from django.db import migrations
|
||||
|
||||
from documents.parsers import run_convert
|
||||
from paperless.parsers import run_convert
|
||||
|
||||
logger = logging.getLogger("paperless.migrations")
|
||||
|
||||
|
@@ -6,7 +6,7 @@ from django.db import models
|
||||
from django.db import transaction
|
||||
from filelock import FileLock
|
||||
|
||||
from documents.templating.utils import convert_format_str_to_template_format
|
||||
from paperless.templating.utils import convert_format_str_to_template_format
|
||||
|
||||
|
||||
def convert_from_format_to_template(apps, schema_editor):
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,11 +0,0 @@
|
||||
# Defines the names of file/thumbnail for the manifest
|
||||
# for exporting/importing commands
|
||||
EXPORTER_FILE_NAME = "__exported_file_name__"
|
||||
EXPORTER_THUMBNAIL_NAME = "__exported_thumbnail_name__"
|
||||
EXPORTER_ARCHIVE_NAME = "__exported_archive_name__"
|
||||
|
||||
EXPORTER_CRYPTO_SETTINGS_NAME = "__crypto__"
|
||||
EXPORTER_CRYPTO_SALT_NAME = "__salt_hex__"
|
||||
EXPORTER_CRYPTO_KEY_ITERATIONS_NAME = "__key_iters__"
|
||||
EXPORTER_CRYPTO_KEY_SIZE_NAME = "__key_size__"
|
||||
EXPORTER_CRYPTO_ALGO_NAME = "__key_algo__"
|
@@ -1,91 +0,0 @@
|
||||
import textwrap
|
||||
from unittest import mock
|
||||
|
||||
from django.core.checks import Error
|
||||
from django.core.checks import Warning
|
||||
from django.test import TestCase
|
||||
from django.test import override_settings
|
||||
|
||||
from documents.checks import changed_password_check
|
||||
from documents.checks import filename_format_check
|
||||
from documents.checks import parser_check
|
||||
from documents.models import Document
|
||||
from documents.tests.factories import DocumentFactory
|
||||
|
||||
|
||||
class TestDocumentChecks(TestCase):
|
||||
def test_changed_password_check_empty_db(self):
|
||||
self.assertListEqual(changed_password_check(None), [])
|
||||
|
||||
def test_changed_password_check_no_encryption(self):
|
||||
DocumentFactory.create(storage_type=Document.STORAGE_TYPE_UNENCRYPTED)
|
||||
self.assertListEqual(changed_password_check(None), [])
|
||||
|
||||
def test_encrypted_missing_passphrase(self):
|
||||
DocumentFactory.create(storage_type=Document.STORAGE_TYPE_GPG)
|
||||
msgs = changed_password_check(None)
|
||||
self.assertEqual(len(msgs), 1)
|
||||
msg_text = msgs[0].msg
|
||||
self.assertEqual(
|
||||
msg_text,
|
||||
"The database contains encrypted documents but no password is set.",
|
||||
)
|
||||
|
||||
@override_settings(
|
||||
PASSPHRASE="test",
|
||||
)
|
||||
@mock.patch("paperless.db.GnuPG.decrypted")
|
||||
@mock.patch("documents.models.Document.source_file")
|
||||
def test_encrypted_decrypt_fails(self, mock_decrypted, mock_source_file):
|
||||
mock_decrypted.return_value = None
|
||||
mock_source_file.return_value = b""
|
||||
|
||||
DocumentFactory.create(storage_type=Document.STORAGE_TYPE_GPG)
|
||||
|
||||
msgs = changed_password_check(None)
|
||||
|
||||
self.assertEqual(len(msgs), 1)
|
||||
msg_text = msgs[0].msg
|
||||
self.assertEqual(
|
||||
msg_text,
|
||||
textwrap.dedent(
|
||||
"""
|
||||
The current password doesn't match the password of the
|
||||
existing documents.
|
||||
|
||||
If you intend to change your password, you must first export
|
||||
all of the old documents, start fresh with the new password
|
||||
and then re-import them."
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
def test_parser_check(self):
|
||||
self.assertEqual(parser_check(None), [])
|
||||
|
||||
with mock.patch("documents.checks.document_consumer_declaration.send") as m:
|
||||
m.return_value = []
|
||||
|
||||
self.assertEqual(
|
||||
parser_check(None),
|
||||
[
|
||||
Error(
|
||||
"No parsers found. This is a bug. The consumer won't be "
|
||||
"able to consume any documents without parsers.",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def test_filename_format_check(self):
|
||||
self.assertEqual(filename_format_check(None), [])
|
||||
|
||||
with override_settings(FILENAME_FORMAT="{created}/{title}"):
|
||||
self.assertEqual(
|
||||
filename_format_check(None),
|
||||
[
|
||||
Warning(
|
||||
"Filename format {created}/{title} is using the old style, please update to use double curly brackets",
|
||||
hint="{{ created }}/{{ title }}",
|
||||
),
|
||||
],
|
||||
)
|
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ msgstr ""
|
||||
"Project-Id-Version: paperless-ngx\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-03-26 21:04-0700\n"
|
||||
"PO-Revision-Date: 2025-04-02 00:33\n"
|
||||
"PO-Revision-Date: 2025-04-09 12:12\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: French\n"
|
||||
"Language: fr_FR\n"
|
||||
@@ -582,7 +582,7 @@ msgstr "Fichier à consommer"
|
||||
|
||||
#: documents/models.py:540
|
||||
msgid "Train Classifier"
|
||||
msgstr ""
|
||||
msgstr "Entrainer le classificateur"
|
||||
|
||||
#: documents/models.py:541
|
||||
msgid "Check Sanity"
|
||||
|
@@ -3,7 +3,7 @@ msgstr ""
|
||||
"Project-Id-Version: paperless-ngx\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-03-26 21:04-0700\n"
|
||||
"PO-Revision-Date: 2025-03-29 17:14\n"
|
||||
"PO-Revision-Date: 2025-04-09 21:44\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: Dutch\n"
|
||||
"Language: nl_NL\n"
|
||||
@@ -23,7 +23,7 @@ msgstr "Documenten"
|
||||
|
||||
#: documents/filters.py:374
|
||||
msgid "Value must be valid JSON."
|
||||
msgstr ""
|
||||
msgstr "Waarde moet een geldige JSON zijn."
|
||||
|
||||
#: documents/filters.py:393
|
||||
msgid "Invalid custom field query expression"
|
||||
@@ -766,7 +766,7 @@ msgstr "aangepaste velden"
|
||||
|
||||
#: documents/models.py:766
|
||||
msgid "custom fields"
|
||||
msgstr "Aangepaste velden"
|
||||
msgstr "aangepaste velden"
|
||||
|
||||
#: documents/models.py:863
|
||||
msgid "custom field instance"
|
||||
|
@@ -3,7 +3,7 @@ msgstr ""
|
||||
"Project-Id-Version: paperless-ngx\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2025-03-26 21:04-0700\n"
|
||||
"PO-Revision-Date: 2025-03-31 12:13\n"
|
||||
"PO-Revision-Date: 2025-04-09 21:44\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: Chinese Traditional\n"
|
||||
"Language: zh_TW\n"
|
||||
|
@@ -1,6 +1,8 @@
|
||||
from paperless.celery import app as celery_app
|
||||
from paperless.checks import audit_log_check
|
||||
from paperless.checks import binaries_check
|
||||
from paperless.checks import changed_password_check
|
||||
from paperless.checks import parser_check
|
||||
from paperless.checks import paths_check
|
||||
from paperless.checks import settings_values_check
|
||||
|
||||
@@ -8,6 +10,8 @@ __all__ = [
|
||||
"audit_log_check",
|
||||
"binaries_check",
|
||||
"celery_app",
|
||||
"changed_password_check",
|
||||
"parser_check",
|
||||
"paths_check",
|
||||
"settings_values_check",
|
||||
]
|
||||
|
@@ -10,8 +10,8 @@ from django.contrib.auth.models import User
|
||||
from django.forms import ValidationError
|
||||
from django.urls import reverse
|
||||
|
||||
from documents.models import Document
|
||||
from paperless.signals import handle_social_account_updated
|
||||
from paperless.models import Document
|
||||
from paperless.signals.signals import handle_social_account_updated
|
||||
|
||||
logger = logging.getLogger("paperless.auth")
|
||||
|
||||
|
@@ -1,7 +1,26 @@
|
||||
from django import forms
|
||||
from django.conf import settings
|
||||
from django.contrib import admin
|
||||
from django.contrib.auth.admin import UserAdmin
|
||||
from django.contrib.auth.models import User
|
||||
from guardian.admin import GuardedModelAdmin
|
||||
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomField
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import Note
|
||||
from paperless.models import PaperlessTask
|
||||
from paperless.models import SavedView
|
||||
from paperless.models import SavedViewFilterRule
|
||||
from paperless.models import ShareLink
|
||||
from paperless.models import StoragePath
|
||||
from paperless.models import Tag
|
||||
|
||||
if settings.AUDIT_LOG_ENABLED:
|
||||
from auditlog.admin import LogEntryAdmin
|
||||
from auditlog.models import LogEntry
|
||||
|
||||
|
||||
class PaperlessUserForm(forms.ModelForm):
|
||||
@@ -51,3 +70,197 @@ class PaperlessUserAdmin(UserAdmin):
|
||||
|
||||
admin.site.unregister(User)
|
||||
admin.site.register(User, PaperlessUserAdmin)
|
||||
|
||||
|
||||
class CorrespondentAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "match", "matching_algorithm")
|
||||
list_filter = ("matching_algorithm",)
|
||||
list_editable = ("match", "matching_algorithm")
|
||||
|
||||
|
||||
class TagAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "color", "match", "matching_algorithm")
|
||||
list_filter = ("matching_algorithm",)
|
||||
list_editable = ("color", "match", "matching_algorithm")
|
||||
search_fields = ("color", "name")
|
||||
|
||||
|
||||
class DocumentTypeAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "match", "matching_algorithm")
|
||||
list_filter = ("matching_algorithm",)
|
||||
list_editable = ("match", "matching_algorithm")
|
||||
|
||||
|
||||
class DocumentAdmin(GuardedModelAdmin):
|
||||
search_fields = ("correspondent__name", "title", "content", "tags__name")
|
||||
readonly_fields = (
|
||||
"added",
|
||||
"modified",
|
||||
"mime_type",
|
||||
"storage_type",
|
||||
"filename",
|
||||
"checksum",
|
||||
"archive_filename",
|
||||
"archive_checksum",
|
||||
"original_filename",
|
||||
"deleted_at",
|
||||
)
|
||||
|
||||
list_display_links = ("title",)
|
||||
|
||||
list_display = ("id", "title", "mime_type", "filename", "archive_filename")
|
||||
|
||||
list_filter = (
|
||||
("mime_type"),
|
||||
("archive_serial_number", admin.EmptyFieldListFilter),
|
||||
("archive_filename", admin.EmptyFieldListFilter),
|
||||
)
|
||||
|
||||
filter_horizontal = ("tags",)
|
||||
|
||||
ordering = ["-id"]
|
||||
|
||||
date_hierarchy = "created"
|
||||
|
||||
def has_add_permission(self, request):
|
||||
return False
|
||||
|
||||
def created_(self, obj):
|
||||
return obj.created.date().strftime("%Y-%m-%d")
|
||||
|
||||
created_.short_description = "Created"
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
"""
|
||||
Include trashed documents
|
||||
"""
|
||||
return Document.global_objects.all()
|
||||
|
||||
def delete_queryset(self, request, queryset):
|
||||
from paperless import index
|
||||
|
||||
with index.open_index_writer() as writer:
|
||||
for o in queryset:
|
||||
index.remove_document(writer, o)
|
||||
|
||||
super().delete_queryset(request, queryset)
|
||||
|
||||
def delete_model(self, request, obj):
|
||||
from paperless import index
|
||||
|
||||
index.remove_document_from_index(obj)
|
||||
super().delete_model(request, obj)
|
||||
|
||||
def save_model(self, request, obj, form, change):
|
||||
from paperless import index
|
||||
|
||||
index.add_or_update_document(obj)
|
||||
super().save_model(request, obj, form, change)
|
||||
|
||||
|
||||
class RuleInline(admin.TabularInline):
|
||||
model = SavedViewFilterRule
|
||||
|
||||
|
||||
class SavedViewAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "owner")
|
||||
|
||||
inlines = [RuleInline]
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return super().get_queryset(request).select_related("owner")
|
||||
|
||||
|
||||
class StoragePathInline(admin.TabularInline):
|
||||
model = StoragePath
|
||||
|
||||
|
||||
class StoragePathAdmin(GuardedModelAdmin):
|
||||
list_display = ("name", "path", "match", "matching_algorithm")
|
||||
list_filter = ("path", "matching_algorithm")
|
||||
list_editable = ("path", "match", "matching_algorithm")
|
||||
|
||||
|
||||
class TaskAdmin(admin.ModelAdmin):
|
||||
list_display = ("task_id", "task_file_name", "task_name", "date_done", "status")
|
||||
list_filter = ("status", "date_done", "task_name")
|
||||
search_fields = ("task_name", "task_id", "status", "task_file_name")
|
||||
readonly_fields = (
|
||||
"task_id",
|
||||
"task_file_name",
|
||||
"task_name",
|
||||
"status",
|
||||
"date_created",
|
||||
"date_started",
|
||||
"date_done",
|
||||
"result",
|
||||
)
|
||||
|
||||
|
||||
class NotesAdmin(GuardedModelAdmin):
|
||||
list_display = ("user", "created", "note", "document")
|
||||
list_filter = ("created", "user")
|
||||
list_display_links = ("created",)
|
||||
raw_id_fields = ("document",)
|
||||
search_fields = ("document__title",)
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return (
|
||||
super()
|
||||
.get_queryset(request)
|
||||
.select_related("user", "document__correspondent")
|
||||
)
|
||||
|
||||
|
||||
class ShareLinksAdmin(GuardedModelAdmin):
|
||||
list_display = ("created", "expiration", "document")
|
||||
list_filter = ("created", "expiration", "owner")
|
||||
list_display_links = ("created",)
|
||||
raw_id_fields = ("document",)
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return super().get_queryset(request).select_related("document__correspondent")
|
||||
|
||||
|
||||
class CustomFieldsAdmin(GuardedModelAdmin):
|
||||
fields = ("name", "created", "data_type")
|
||||
readonly_fields = ("created", "data_type")
|
||||
list_display = ("name", "created", "data_type")
|
||||
list_filter = ("created", "data_type")
|
||||
|
||||
|
||||
class CustomFieldInstancesAdmin(GuardedModelAdmin):
|
||||
fields = ("field", "document", "created", "value")
|
||||
readonly_fields = ("field", "document", "created", "value")
|
||||
list_display = ("field", "document", "value", "created")
|
||||
search_fields = ("document__title",)
|
||||
list_filter = ("created", "field")
|
||||
|
||||
def get_queryset(self, request): # pragma: no cover
|
||||
return (
|
||||
super()
|
||||
.get_queryset(request)
|
||||
.select_related("field", "document__correspondent")
|
||||
)
|
||||
|
||||
|
||||
admin.site.register(Correspondent, CorrespondentAdmin)
|
||||
admin.site.register(Tag, TagAdmin)
|
||||
admin.site.register(DocumentType, DocumentTypeAdmin)
|
||||
admin.site.register(Document, DocumentAdmin)
|
||||
admin.site.register(SavedView, SavedViewAdmin)
|
||||
admin.site.register(StoragePath, StoragePathAdmin)
|
||||
admin.site.register(PaperlessTask, TaskAdmin)
|
||||
admin.site.register(Note, NotesAdmin)
|
||||
admin.site.register(ShareLink, ShareLinksAdmin)
|
||||
admin.site.register(CustomField, CustomFieldsAdmin)
|
||||
admin.site.register(CustomFieldInstance, CustomFieldInstancesAdmin)
|
||||
|
||||
if settings.AUDIT_LOG_ENABLED:
|
||||
|
||||
class LogEntryAUDIT(LogEntryAdmin):
|
||||
def has_delete_permission(self, request, obj=None):
|
||||
return False
|
||||
|
||||
admin.site.unregister(LogEntry)
|
||||
admin.site.register(LogEntry, LogEntryAUDIT)
|
||||
|
@@ -1,8 +1,8 @@
|
||||
from django.apps import AppConfig
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from paperless.signals import handle_failed_login
|
||||
from paperless.signals import handle_social_account_updated
|
||||
from paperless.signals.signals import handle_failed_login
|
||||
from paperless.signals.signals import handle_social_account_updated
|
||||
|
||||
|
||||
class PaperlessConfig(AppConfig):
|
||||
@@ -19,4 +19,26 @@ class PaperlessConfig(AppConfig):
|
||||
|
||||
social_account_updated.connect(handle_social_account_updated)
|
||||
|
||||
from paperless.signals import document_consumption_finished
|
||||
from paperless.signals import document_updated
|
||||
from paperless.signals.handlers import add_inbox_tags
|
||||
from paperless.signals.handlers import add_to_index
|
||||
from paperless.signals.handlers import run_workflows_added
|
||||
from paperless.signals.handlers import run_workflows_updated
|
||||
from paperless.signals.handlers import set_correspondent
|
||||
from paperless.signals.handlers import set_document_type
|
||||
from paperless.signals.handlers import set_storage_path
|
||||
from paperless.signals.handlers import set_tags
|
||||
|
||||
document_consumption_finished.connect(add_inbox_tags)
|
||||
document_consumption_finished.connect(set_correspondent)
|
||||
document_consumption_finished.connect(set_document_type)
|
||||
document_consumption_finished.connect(set_tags)
|
||||
document_consumption_finished.connect(set_storage_path)
|
||||
document_consumption_finished.connect(add_to_index)
|
||||
document_consumption_finished.connect(run_workflows_added)
|
||||
document_updated.connect(run_workflows_updated)
|
||||
|
||||
import paperless.schema # noqa: F401
|
||||
|
||||
AppConfig.ready(self)
|
||||
|
@@ -13,15 +13,15 @@ from pikepdf import Page
|
||||
from pikepdf import PasswordError
|
||||
from pikepdf import Pdf
|
||||
|
||||
from documents.converters import convert_from_tiff_to_pdf
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.models import Tag
|
||||
from documents.plugins.base import ConsumeTaskPlugin
|
||||
from documents.plugins.base import StopConsumeTaskError
|
||||
from documents.plugins.helpers import ProgressStatusOptions
|
||||
from documents.utils import copy_basic_file_stats
|
||||
from documents.utils import copy_file_with_basic_stats
|
||||
from documents.utils import maybe_override_pixel_limit
|
||||
from paperless.converters import convert_from_tiff_to_pdf
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.models import Tag
|
||||
from paperless.plugins.base import ConsumeTaskPlugin
|
||||
from paperless.plugins.base import StopConsumeTaskError
|
||||
from paperless.plugins.helpers import ProgressStatusOptions
|
||||
from paperless.utils import copy_basic_file_stats
|
||||
from paperless.utils import copy_file_with_basic_stats
|
||||
from paperless.utils import maybe_override_pixel_limit
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
@@ -123,7 +123,7 @@ class BarcodePlugin(ConsumeTaskPlugin):
|
||||
),
|
||||
).resolve()
|
||||
|
||||
from documents import tasks
|
||||
from paperless import tasks
|
||||
|
||||
# Create the split document tasks
|
||||
for new_document in self.separate_pages(separator_pages):
|
@@ -8,7 +8,7 @@ if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from zipfile import ZipFile
|
||||
|
||||
from documents.models import Document
|
||||
from paperless.models import Document
|
||||
|
||||
|
||||
class BulkArchiveStrategy:
|
@@ -16,20 +16,20 @@ from django.conf import settings
|
||||
from django.db.models import Q
|
||||
from django.utils import timezone
|
||||
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentMetadataOverrides
|
||||
from documents.data_models import DocumentSource
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomField
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import StoragePath
|
||||
from documents.permissions import set_permissions_for_object
|
||||
from documents.plugins.helpers import DocumentsStatusManager
|
||||
from documents.tasks import bulk_update_documents
|
||||
from documents.tasks import consume_file
|
||||
from documents.tasks import update_document_content_maybe_archive_file
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.data_models import DocumentMetadataOverrides
|
||||
from paperless.data_models import DocumentSource
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomField
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import StoragePath
|
||||
from paperless.permissions import set_permissions_for_object
|
||||
from paperless.plugins.helpers import DocumentsStatusManager
|
||||
from paperless.tasks import bulk_update_documents
|
||||
from paperless.tasks import consume_file
|
||||
from paperless.tasks import update_document_content_maybe_archive_file
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from django.contrib.auth.models import User
|
||||
@@ -220,7 +220,7 @@ def delete(doc_ids: list[int]) -> Literal["OK"]:
|
||||
try:
|
||||
Document.objects.filter(id__in=doc_ids).delete()
|
||||
|
||||
from documents import index
|
||||
from paperless import index
|
||||
|
||||
with index.open_index_writer() as writer:
|
||||
for id in doc_ids:
|
@@ -8,10 +8,10 @@ from typing import Final
|
||||
|
||||
from django.core.cache import cache
|
||||
|
||||
from documents.models import Document
|
||||
from paperless.models import Document
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from documents.classifier import DocumentClassifier
|
||||
from paperless.classifier import DocumentClassifier
|
||||
|
||||
logger = logging.getLogger("paperless.caching")
|
||||
|
||||
@@ -53,7 +53,7 @@ def get_suggestion_cache(document_id: int) -> SuggestionCacheData | None:
|
||||
The classifier needs to be matching in format and hash and the suggestions need to
|
||||
have been cached once.
|
||||
"""
|
||||
from documents.classifier import DocumentClassifier
|
||||
from paperless.classifier import DocumentClassifier
|
||||
|
||||
doc_key = get_suggestion_cache_key(document_id)
|
||||
cache_hits = cache.get_many([CLASSIFIER_VERSION_KEY, CLASSIFIER_HASH_KEY, doc_key])
|
@@ -3,12 +3,19 @@ import os
|
||||
import pwd
|
||||
import shutil
|
||||
import stat
|
||||
import textwrap
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.checks import Error
|
||||
from django.core.checks import Warning
|
||||
from django.core.checks import register
|
||||
from django.core.exceptions import FieldError
|
||||
from django.db import connections
|
||||
from django.db.utils import OperationalError
|
||||
from django.db.utils import ProgrammingError
|
||||
|
||||
from paperless.signals import document_consumer_declaration
|
||||
from paperless.templating.utils import convert_format_str_to_template_format
|
||||
|
||||
exists_message = "{} is set but doesn't exist."
|
||||
exists_hint = "Create a directory at {}"
|
||||
@@ -212,3 +219,79 @@ def audit_log_check(app_configs, **kwargs):
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@register()
|
||||
def changed_password_check(app_configs, **kwargs):
|
||||
from paperless.db import GnuPG
|
||||
from paperless.models import Document
|
||||
|
||||
try:
|
||||
encrypted_doc = (
|
||||
Document.objects.filter(
|
||||
storage_type=Document.STORAGE_TYPE_GPG,
|
||||
)
|
||||
.only("pk", "storage_type")
|
||||
.first()
|
||||
)
|
||||
except (OperationalError, ProgrammingError, FieldError):
|
||||
return [] # No documents table yet
|
||||
|
||||
if encrypted_doc:
|
||||
if not settings.PASSPHRASE:
|
||||
return [
|
||||
Error(
|
||||
"The database contains encrypted documents but no password is set.",
|
||||
),
|
||||
]
|
||||
|
||||
if not GnuPG.decrypted(encrypted_doc.source_file):
|
||||
return [
|
||||
Error(
|
||||
textwrap.dedent(
|
||||
"""
|
||||
The current password doesn't match the password of the
|
||||
existing documents.
|
||||
|
||||
If you intend to change your password, you must first export
|
||||
all of the old documents, start fresh with the new password
|
||||
and then re-import them."
|
||||
""",
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
@register()
|
||||
def parser_check(app_configs, **kwargs):
|
||||
parsers = []
|
||||
for response in document_consumer_declaration.send(None):
|
||||
parsers.append(response[1])
|
||||
|
||||
if len(parsers) == 0:
|
||||
return [
|
||||
Error(
|
||||
"No parsers found. This is a bug. The consumer won't be "
|
||||
"able to consume any documents without parsers.",
|
||||
),
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
@register()
|
||||
def filename_format_check(app_configs, **kwargs):
|
||||
if settings.FILENAME_FORMAT:
|
||||
converted_format = convert_format_str_to_template_format(
|
||||
settings.FILENAME_FORMAT,
|
||||
)
|
||||
if converted_format != settings.FILENAME_FORMAT:
|
||||
return [
|
||||
Warning(
|
||||
f"Filename format {settings.FILENAME_FORMAT} is using the old style, please update to use double curly brackets",
|
||||
hint=converted_format,
|
||||
),
|
||||
]
|
||||
return []
|
||||
|
@@ -17,12 +17,12 @@ if TYPE_CHECKING:
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
|
||||
from documents.caching import CACHE_50_MINUTES
|
||||
from documents.caching import CLASSIFIER_HASH_KEY
|
||||
from documents.caching import CLASSIFIER_MODIFIED_KEY
|
||||
from documents.caching import CLASSIFIER_VERSION_KEY
|
||||
from documents.models import Document
|
||||
from documents.models import MatchingModel
|
||||
from paperless.caching import CACHE_50_MINUTES
|
||||
from paperless.caching import CLASSIFIER_HASH_KEY
|
||||
from paperless.caching import CLASSIFIER_MODIFIED_KEY
|
||||
from paperless.caching import CLASSIFIER_VERSION_KEY
|
||||
from paperless.models import Document
|
||||
from paperless.models import MatchingModel
|
||||
|
||||
logger = logging.getLogger("paperless.classifier")
|
||||
|
@@ -4,14 +4,14 @@ from datetime import timezone
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
|
||||
from documents.caching import CACHE_5_MINUTES
|
||||
from documents.caching import CACHE_50_MINUTES
|
||||
from documents.caching import CLASSIFIER_HASH_KEY
|
||||
from documents.caching import CLASSIFIER_MODIFIED_KEY
|
||||
from documents.caching import CLASSIFIER_VERSION_KEY
|
||||
from documents.caching import get_thumbnail_modified_key
|
||||
from documents.classifier import DocumentClassifier
|
||||
from documents.models import Document
|
||||
from paperless.caching import CACHE_5_MINUTES
|
||||
from paperless.caching import CACHE_50_MINUTES
|
||||
from paperless.caching import CLASSIFIER_HASH_KEY
|
||||
from paperless.caching import CLASSIFIER_MODIFIED_KEY
|
||||
from paperless.caching import CLASSIFIER_VERSION_KEY
|
||||
from paperless.caching import get_thumbnail_modified_key
|
||||
from paperless.classifier import DocumentClassifier
|
||||
from paperless.models import Document
|
||||
|
||||
|
||||
def suggestions_etag(request, pk: int) -> str | None:
|
@@ -15,38 +15,38 @@ from django.utils import timezone
|
||||
from filelock import FileLock
|
||||
from rest_framework.reverse import reverse
|
||||
|
||||
from documents.classifier import load_classifier
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentMetadataOverrides
|
||||
from documents.file_handling import create_source_path_directory
|
||||
from documents.file_handling import generate_unique_filename
|
||||
from documents.loggers import LoggingMixin
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomField
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import StoragePath
|
||||
from documents.models import Tag
|
||||
from documents.models import WorkflowTrigger
|
||||
from documents.parsers import DocumentParser
|
||||
from documents.parsers import ParseError
|
||||
from documents.parsers import get_parser_class_for_mime_type
|
||||
from documents.parsers import parse_date
|
||||
from documents.permissions import set_permissions_for_object
|
||||
from documents.plugins.base import AlwaysRunPluginMixin
|
||||
from documents.plugins.base import ConsumeTaskPlugin
|
||||
from documents.plugins.base import NoCleanupPluginMixin
|
||||
from documents.plugins.base import NoSetupPluginMixin
|
||||
from documents.plugins.helpers import ProgressManager
|
||||
from documents.plugins.helpers import ProgressStatusOptions
|
||||
from documents.signals import document_consumption_finished
|
||||
from documents.signals import document_consumption_started
|
||||
from documents.signals.handlers import run_workflows
|
||||
from documents.templating.workflows import parse_w_workflow_placeholders
|
||||
from documents.utils import copy_basic_file_stats
|
||||
from documents.utils import copy_file_with_basic_stats
|
||||
from documents.utils import run_subprocess
|
||||
from paperless.classifier import load_classifier
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.data_models import DocumentMetadataOverrides
|
||||
from paperless.file_handling import create_source_path_directory
|
||||
from paperless.file_handling import generate_unique_filename
|
||||
from paperless.loggers import LoggingMixin
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomField
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import StoragePath
|
||||
from paperless.models import Tag
|
||||
from paperless.models import WorkflowTrigger
|
||||
from paperless.parsers import DocumentParser
|
||||
from paperless.parsers import ParseError
|
||||
from paperless.parsers import get_parser_class_for_mime_type
|
||||
from paperless.parsers import parse_date
|
||||
from paperless.permissions import set_permissions_for_object
|
||||
from paperless.plugins.base import AlwaysRunPluginMixin
|
||||
from paperless.plugins.base import ConsumeTaskPlugin
|
||||
from paperless.plugins.base import NoCleanupPluginMixin
|
||||
from paperless.plugins.base import NoSetupPluginMixin
|
||||
from paperless.plugins.helpers import ProgressManager
|
||||
from paperless.plugins.helpers import ProgressStatusOptions
|
||||
from paperless.signals import document_consumption_finished
|
||||
from paperless.signals import document_consumption_started
|
||||
from paperless.signals.handlers import run_workflows
|
||||
from paperless.templating.workflows import parse_w_workflow_placeholders
|
||||
from paperless.utils import copy_basic_file_stats
|
||||
from paperless.utils import copy_file_with_basic_stats
|
||||
from paperless.utils import run_subprocess
|
||||
from paperless_mail.parsers import MailDocumentParser
|
||||
|
||||
|
@@ -1,8 +1,8 @@
|
||||
from django.conf import settings as django_settings
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from documents.models import Document
|
||||
from paperless.config import GeneralConfig
|
||||
from paperless.models import Document
|
||||
|
||||
|
||||
def settings(request):
|
@@ -4,9 +4,9 @@ import img2pdf
|
||||
from django.conf import settings
|
||||
from PIL import Image
|
||||
|
||||
from documents.utils import copy_basic_file_stats
|
||||
from documents.utils import maybe_override_pixel_limit
|
||||
from documents.utils import run_subprocess
|
||||
from paperless.utils import copy_basic_file_stats
|
||||
from paperless.utils import maybe_override_pixel_limit
|
||||
from paperless.utils import run_subprocess
|
||||
|
||||
|
||||
def convert_from_tiff_to_pdf(tiff_path: Path, target_directory: Path) -> Path:
|
@@ -8,12 +8,12 @@ from typing import Final
|
||||
from django.conf import settings
|
||||
from pikepdf import Pdf
|
||||
|
||||
from documents.consumer import ConsumerError
|
||||
from documents.converters import convert_from_tiff_to_pdf
|
||||
from documents.plugins.base import ConsumeTaskPlugin
|
||||
from documents.plugins.base import NoCleanupPluginMixin
|
||||
from documents.plugins.base import NoSetupPluginMixin
|
||||
from documents.plugins.base import StopConsumeTaskError
|
||||
from paperless.consumer import ConsumerError
|
||||
from paperless.converters import convert_from_tiff_to_pdf
|
||||
from paperless.plugins.base import ConsumeTaskPlugin
|
||||
from paperless.plugins.base import NoCleanupPluginMixin
|
||||
from paperless.plugins.base import NoSetupPluginMixin
|
||||
from paperless.plugins.base import StopConsumeTaskError
|
||||
|
||||
logger = logging.getLogger("paperless.double_sided")
|
||||
|
@@ -2,9 +2,9 @@ import os
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from documents.models import Document
|
||||
from documents.templating.filepath import validate_filepath_template_and_render
|
||||
from documents.templating.utils import convert_format_str_to_template_format
|
||||
from paperless.models import Document
|
||||
from paperless.templating.filepath import validate_filepath_template_and_render
|
||||
from paperless.templating.utils import convert_format_str_to_template_format
|
||||
|
||||
|
||||
def create_source_path_directory(source_path):
|
@@ -1,8 +1,70 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import json
|
||||
import operator
|
||||
from contextlib import contextmanager
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from django.contrib.auth.models import Group
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.db.models import Case
|
||||
from django.db.models import CharField
|
||||
from django.db.models import Count
|
||||
from django.db.models import Exists
|
||||
from django.db.models import IntegerField
|
||||
from django.db.models import OuterRef
|
||||
from django.db.models import Q
|
||||
from django.db.models import Subquery
|
||||
from django.db.models import Sum
|
||||
from django.db.models import Value
|
||||
from django.db.models import When
|
||||
from django.db.models.functions import Cast
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django_filters.rest_framework import BooleanFilter
|
||||
from django_filters.rest_framework import Filter
|
||||
from django_filters.rest_framework import FilterSet
|
||||
from drf_spectacular.utils import extend_schema_field
|
||||
from guardian.utils import get_group_obj_perms_model
|
||||
from guardian.utils import get_user_obj_perms_model
|
||||
from rest_framework import serializers
|
||||
from rest_framework.filters import OrderingFilter
|
||||
from rest_framework_guardian.filters import ObjectPermissionsFilter
|
||||
|
||||
from documents.filters import CHAR_KWARGS
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomField
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import PaperlessTask
|
||||
from paperless.models import ShareLink
|
||||
from paperless.models import StoragePath
|
||||
from paperless.models import Tag
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
CHAR_KWARGS = ["istartswith", "iendswith", "icontains", "iexact"]
|
||||
ID_KWARGS = ["in", "exact"]
|
||||
INT_KWARGS = ["exact", "gt", "gte", "lt", "lte", "isnull"]
|
||||
DATE_KWARGS = [
|
||||
"year",
|
||||
"month",
|
||||
"day",
|
||||
"date__gt",
|
||||
"date__gte",
|
||||
"gt",
|
||||
"gte",
|
||||
"date__lt",
|
||||
"date__lte",
|
||||
"lt",
|
||||
"lte",
|
||||
]
|
||||
|
||||
CUSTOM_FIELD_QUERY_MAX_DEPTH = 10
|
||||
CUSTOM_FIELD_QUERY_MAX_ATOMS = 20
|
||||
|
||||
|
||||
class UserFilterSet(FilterSet):
|
||||
@@ -15,3 +77,888 @@ class GroupFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = Group
|
||||
fields = {"name": CHAR_KWARGS}
|
||||
|
||||
|
||||
class CorrespondentFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = Correspondent
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class TagFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = Tag
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class DocumentTypeFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = DocumentType
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class StoragePathFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = StoragePath
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
"path": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class ObjectFilter(Filter):
|
||||
def __init__(self, *, exclude=False, in_list=False, field_name=""):
|
||||
super().__init__()
|
||||
self.exclude = exclude
|
||||
self.in_list = in_list
|
||||
self.field_name = field_name
|
||||
|
||||
def filter(self, qs, value):
|
||||
if not value:
|
||||
return qs
|
||||
|
||||
try:
|
||||
object_ids = [int(x) for x in value.split(",")]
|
||||
except ValueError:
|
||||
return qs
|
||||
|
||||
if self.in_list:
|
||||
qs = qs.filter(**{f"{self.field_name}__id__in": object_ids}).distinct()
|
||||
else:
|
||||
for obj_id in object_ids:
|
||||
if self.exclude:
|
||||
qs = qs.exclude(**{f"{self.field_name}__id": obj_id})
|
||||
else:
|
||||
qs = qs.filter(**{f"{self.field_name}__id": obj_id})
|
||||
|
||||
return qs
|
||||
|
||||
|
||||
@extend_schema_field(serializers.BooleanField)
|
||||
class InboxFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value == "true":
|
||||
return qs.filter(tags__is_inbox_tag=True)
|
||||
elif value == "false":
|
||||
return qs.exclude(tags__is_inbox_tag=True)
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
@extend_schema_field(serializers.CharField)
|
||||
class TitleContentFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value:
|
||||
return qs.filter(Q(title__icontains=value) | Q(content__icontains=value))
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
@extend_schema_field(serializers.BooleanField)
|
||||
class SharedByUser(Filter):
|
||||
def filter(self, qs, value):
|
||||
ctype = ContentType.objects.get_for_model(self.model)
|
||||
UserObjectPermission = get_user_obj_perms_model()
|
||||
GroupObjectPermission = get_group_obj_perms_model()
|
||||
# see https://github.com/paperless-ngx/paperless-ngx/issues/5392, we limit subqueries
|
||||
# to 1 because Postgres doesn't like returning > 1 row, but all we care about is > 0
|
||||
return (
|
||||
qs.filter(
|
||||
owner_id=value,
|
||||
)
|
||||
.annotate(
|
||||
num_shared_users=Count(
|
||||
UserObjectPermission.objects.filter(
|
||||
content_type=ctype,
|
||||
object_pk=Cast(OuterRef("pk"), CharField()),
|
||||
).values("user_id")[:1],
|
||||
),
|
||||
)
|
||||
.annotate(
|
||||
num_shared_groups=Count(
|
||||
GroupObjectPermission.objects.filter(
|
||||
content_type=ctype,
|
||||
object_pk=Cast(OuterRef("pk"), CharField()),
|
||||
).values("group_id")[:1],
|
||||
),
|
||||
)
|
||||
.filter(
|
||||
Q(num_shared_users__gt=0) | Q(num_shared_groups__gt=0),
|
||||
)
|
||||
if value is not None
|
||||
else qs
|
||||
)
|
||||
|
||||
|
||||
class CustomFieldFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = CustomField
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"name": CHAR_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
@extend_schema_field(serializers.CharField)
|
||||
class CustomFieldsFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value:
|
||||
fields_with_matching_selects = CustomField.objects.filter(
|
||||
extra_data__icontains=value,
|
||||
)
|
||||
option_ids = []
|
||||
if fields_with_matching_selects.count() > 0:
|
||||
for field in fields_with_matching_selects:
|
||||
options = field.extra_data.get("select_options", [])
|
||||
for _, option in enumerate(options):
|
||||
if option.get("label").lower().find(value.lower()) != -1:
|
||||
option_ids.extend([option.get("id")])
|
||||
return (
|
||||
qs.filter(custom_fields__field__name__icontains=value)
|
||||
| qs.filter(custom_fields__value_text__icontains=value)
|
||||
| qs.filter(custom_fields__value_bool__icontains=value)
|
||||
| qs.filter(custom_fields__value_int__icontains=value)
|
||||
| qs.filter(custom_fields__value_float__icontains=value)
|
||||
| qs.filter(custom_fields__value_date__icontains=value)
|
||||
| qs.filter(custom_fields__value_url__icontains=value)
|
||||
| qs.filter(custom_fields__value_monetary__icontains=value)
|
||||
| qs.filter(custom_fields__value_document_ids__icontains=value)
|
||||
| qs.filter(custom_fields__value_select__in=option_ids)
|
||||
)
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
class MimeTypeFilter(Filter):
|
||||
def filter(self, qs, value):
|
||||
if value:
|
||||
return qs.filter(mime_type__icontains=value)
|
||||
else:
|
||||
return qs
|
||||
|
||||
|
||||
class SelectField(serializers.CharField):
|
||||
def __init__(self, custom_field: CustomField):
|
||||
self._options = custom_field.extra_data["select_options"]
|
||||
super().__init__(max_length=16)
|
||||
|
||||
def to_internal_value(self, data):
|
||||
# If the supplied value is the option label instead of the ID
|
||||
try:
|
||||
data = next(
|
||||
option.get("id")
|
||||
for option in self._options
|
||||
if option.get("label") == data
|
||||
)
|
||||
except StopIteration:
|
||||
pass
|
||||
return super().to_internal_value(data)
|
||||
|
||||
|
||||
def handle_validation_prefix(func: Callable):
|
||||
"""
|
||||
Catch ValidationErrors raised by the wrapped function
|
||||
and add a prefix to the exception detail to track what causes the exception,
|
||||
similar to nested serializers.
|
||||
"""
|
||||
|
||||
def wrapper(*args, validation_prefix=None, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except serializers.ValidationError as e:
|
||||
raise serializers.ValidationError({validation_prefix: e.detail})
|
||||
|
||||
# Update the signature to include the validation_prefix argument
|
||||
old_sig = inspect.signature(func)
|
||||
new_param = inspect.Parameter("validation_prefix", inspect.Parameter.KEYWORD_ONLY)
|
||||
new_sig = old_sig.replace(parameters=[*old_sig.parameters.values(), new_param])
|
||||
|
||||
# Apply functools.wraps and manually set the new signature
|
||||
functools.update_wrapper(wrapper, func)
|
||||
wrapper.__signature__ = new_sig
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class CustomFieldQueryParser:
|
||||
EXPR_BY_CATEGORY = {
|
||||
"basic": ["exact", "in", "isnull", "exists"],
|
||||
"string": [
|
||||
"icontains",
|
||||
"istartswith",
|
||||
"iendswith",
|
||||
],
|
||||
"arithmetic": [
|
||||
"gt",
|
||||
"gte",
|
||||
"lt",
|
||||
"lte",
|
||||
"range",
|
||||
],
|
||||
"containment": ["contains"],
|
||||
}
|
||||
|
||||
SUPPORTED_EXPR_CATEGORIES = {
|
||||
CustomField.FieldDataType.STRING: ("basic", "string"),
|
||||
CustomField.FieldDataType.URL: ("basic", "string"),
|
||||
CustomField.FieldDataType.DATE: ("basic", "arithmetic"),
|
||||
CustomField.FieldDataType.BOOL: ("basic",),
|
||||
CustomField.FieldDataType.INT: ("basic", "arithmetic"),
|
||||
CustomField.FieldDataType.FLOAT: ("basic", "arithmetic"),
|
||||
CustomField.FieldDataType.MONETARY: ("basic", "string", "arithmetic"),
|
||||
CustomField.FieldDataType.DOCUMENTLINK: ("basic", "containment"),
|
||||
CustomField.FieldDataType.SELECT: ("basic",),
|
||||
}
|
||||
|
||||
DATE_COMPONENTS = [
|
||||
"year",
|
||||
"iso_year",
|
||||
"month",
|
||||
"day",
|
||||
"week",
|
||||
"week_day",
|
||||
"iso_week_day",
|
||||
"quarter",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
validation_prefix,
|
||||
max_query_depth=10,
|
||||
max_atom_count=20,
|
||||
) -> None:
|
||||
"""
|
||||
A helper class that parses the query string into a `django.db.models.Q` for filtering
|
||||
documents based on custom field values.
|
||||
|
||||
The syntax of the query expression is illustrated with the below pseudo code rules:
|
||||
1. parse([`custom_field`, "exists", true]):
|
||||
matches documents with Q(custom_fields__field=`custom_field`)
|
||||
2. parse([`custom_field`, "exists", false]):
|
||||
matches documents with ~Q(custom_fields__field=`custom_field`)
|
||||
3. parse([`custom_field`, `op`, `value`]):
|
||||
matches documents with
|
||||
Q(custom_fields__field=`custom_field`, custom_fields__value_`type`__`op`= `value`)
|
||||
4. parse(["AND", [`q0`, `q1`, ..., `qn`]])
|
||||
-> parse(`q0`) & parse(`q1`) & ... & parse(`qn`)
|
||||
5. parse(["OR", [`q0`, `q1`, ..., `qn`]])
|
||||
-> parse(`q0`) | parse(`q1`) | ... | parse(`qn`)
|
||||
6. parse(["NOT", `q`])
|
||||
-> ~parse(`q`)
|
||||
|
||||
Args:
|
||||
validation_prefix: Used to generate the ValidationError message.
|
||||
max_query_depth: Limits the maximum nesting depth of queries.
|
||||
max_atom_count: Limits the maximum number of atoms (i.e., rule 1, 2, 3) in the query.
|
||||
|
||||
`max_query_depth` and `max_atom_count` can be set to guard against generating arbitrarily
|
||||
complex SQL queries.
|
||||
"""
|
||||
self._custom_fields: dict[int | str, CustomField] = {}
|
||||
self._validation_prefix = validation_prefix
|
||||
# Dummy ModelSerializer used to convert a Django models.Field to serializers.Field.
|
||||
self._model_serializer = serializers.ModelSerializer()
|
||||
# Used for sanity check
|
||||
self._max_query_depth = max_query_depth
|
||||
self._max_atom_count = max_atom_count
|
||||
self._current_depth = 0
|
||||
self._atom_count = 0
|
||||
# The set of annotations that we need to apply to the queryset
|
||||
self._annotations = {}
|
||||
|
||||
def parse(self, query: str) -> tuple[Q, dict[str, Count]]:
|
||||
"""
|
||||
Parses the query string into a `django.db.models.Q`
|
||||
and a set of annotations to be applied to the queryset.
|
||||
"""
|
||||
try:
|
||||
expr = json.loads(query)
|
||||
except json.JSONDecodeError:
|
||||
raise serializers.ValidationError(
|
||||
{self._validation_prefix: [_("Value must be valid JSON.")]},
|
||||
)
|
||||
return (
|
||||
self._parse_expr(expr, validation_prefix=self._validation_prefix),
|
||||
self._annotations,
|
||||
)
|
||||
|
||||
@handle_validation_prefix
|
||||
def _parse_expr(self, expr) -> Q:
|
||||
"""
|
||||
Applies rule (1, 2, 3) or (4, 5, 6) based on the length of the expr.
|
||||
"""
|
||||
with self._track_query_depth():
|
||||
if isinstance(expr, list | tuple):
|
||||
if len(expr) == 2:
|
||||
return self._parse_logical_expr(*expr)
|
||||
elif len(expr) == 3:
|
||||
return self._parse_atom(*expr)
|
||||
raise serializers.ValidationError(
|
||||
[_("Invalid custom field query expression")],
|
||||
)
|
||||
|
||||
@handle_validation_prefix
|
||||
def _parse_expr_list(self, exprs) -> list[Q]:
|
||||
"""
|
||||
Handles [`q0`, `q1`, ..., `qn`] in rule 4 & 5.
|
||||
"""
|
||||
if not isinstance(exprs, list | tuple) or not exprs:
|
||||
raise serializers.ValidationError(
|
||||
[_("Invalid expression list. Must be nonempty.")],
|
||||
)
|
||||
return [
|
||||
self._parse_expr(expr, validation_prefix=i) for i, expr in enumerate(exprs)
|
||||
]
|
||||
|
||||
def _parse_logical_expr(self, op, args) -> Q:
|
||||
"""
|
||||
Handles rule 4, 5, 6.
|
||||
"""
|
||||
op_lower = op.lower()
|
||||
|
||||
if op_lower == "not":
|
||||
return ~self._parse_expr(args, validation_prefix=1)
|
||||
|
||||
if op_lower == "and":
|
||||
op_func = operator.and_
|
||||
elif op_lower == "or":
|
||||
op_func = operator.or_
|
||||
else:
|
||||
raise serializers.ValidationError(
|
||||
{"0": [_("Invalid logical operator {op!r}").format(op=op)]},
|
||||
)
|
||||
|
||||
qs = self._parse_expr_list(args, validation_prefix="1")
|
||||
return functools.reduce(op_func, qs)
|
||||
|
||||
def _parse_atom(self, id_or_name, op, value) -> Q:
|
||||
"""
|
||||
Handles rule 1, 2, 3.
|
||||
"""
|
||||
# Guard against queries with too many conditions.
|
||||
self._atom_count += 1
|
||||
if self._atom_count > self._max_atom_count:
|
||||
raise serializers.ValidationError(
|
||||
[_("Maximum number of query conditions exceeded.")],
|
||||
)
|
||||
|
||||
custom_field = self._get_custom_field(id_or_name, validation_prefix="0")
|
||||
op = self._validate_atom_op(custom_field, op, validation_prefix="1")
|
||||
value = self._validate_atom_value(
|
||||
custom_field,
|
||||
op,
|
||||
value,
|
||||
validation_prefix="2",
|
||||
)
|
||||
|
||||
# Needed because not all DB backends support Array __contains
|
||||
if (
|
||||
custom_field.data_type == CustomField.FieldDataType.DOCUMENTLINK
|
||||
and op == "contains"
|
||||
):
|
||||
return self._parse_atom_doc_link_contains(custom_field, value)
|
||||
|
||||
value_field_name = CustomFieldInstance.get_value_field_name(
|
||||
custom_field.data_type,
|
||||
)
|
||||
if (
|
||||
custom_field.data_type == CustomField.FieldDataType.MONETARY
|
||||
and op in self.EXPR_BY_CATEGORY["arithmetic"]
|
||||
):
|
||||
value_field_name = "value_monetary_amount"
|
||||
has_field = Q(custom_fields__field=custom_field)
|
||||
|
||||
# We need to use an annotation here because different atoms
|
||||
# might be referring to different instances of custom fields.
|
||||
annotation_name = f"_custom_field_filter_{len(self._annotations)}"
|
||||
|
||||
# Our special exists operator.
|
||||
if op == "exists":
|
||||
annotation = Count("custom_fields", filter=has_field)
|
||||
# A Document should have > 0 match if it has this field, or 0 if doesn't.
|
||||
query_op = "gt" if value else "exact"
|
||||
query = Q(**{f"{annotation_name}__{query_op}": 0})
|
||||
else:
|
||||
# Check if 1) custom field name matches, and 2) value satisfies condition
|
||||
field_filter = has_field & Q(
|
||||
**{f"custom_fields__{value_field_name}__{op}": value},
|
||||
)
|
||||
# Annotate how many matching custom fields each document has
|
||||
annotation = Count("custom_fields", filter=field_filter)
|
||||
# Filter document by count
|
||||
query = Q(**{f"{annotation_name}__gt": 0})
|
||||
|
||||
self._annotations[annotation_name] = annotation
|
||||
return query
|
||||
|
||||
@handle_validation_prefix
|
||||
def _get_custom_field(self, id_or_name):
|
||||
"""Get the CustomField instance by id or name."""
|
||||
if id_or_name in self._custom_fields:
|
||||
return self._custom_fields[id_or_name]
|
||||
|
||||
kwargs = (
|
||||
{"id": id_or_name} if isinstance(id_or_name, int) else {"name": id_or_name}
|
||||
)
|
||||
try:
|
||||
custom_field = CustomField.objects.get(**kwargs)
|
||||
except CustomField.DoesNotExist:
|
||||
raise serializers.ValidationError(
|
||||
[_("{name!r} is not a valid custom field.").format(name=id_or_name)],
|
||||
)
|
||||
self._custom_fields[custom_field.id] = custom_field
|
||||
self._custom_fields[custom_field.name] = custom_field
|
||||
return custom_field
|
||||
|
||||
@staticmethod
|
||||
def _split_op(full_op):
|
||||
*prefix, op = str(full_op).rsplit("__", maxsplit=1)
|
||||
prefix = prefix[0] if prefix else None
|
||||
return prefix, op
|
||||
|
||||
@handle_validation_prefix
|
||||
def _validate_atom_op(self, custom_field, raw_op):
|
||||
"""Check if the `op` is compatible with the type of the custom field."""
|
||||
prefix, op = self._split_op(raw_op)
|
||||
|
||||
# Check if the operator is supported for the current data_type.
|
||||
supported = False
|
||||
for category in self.SUPPORTED_EXPR_CATEGORIES[custom_field.data_type]:
|
||||
if op in self.EXPR_BY_CATEGORY[category]:
|
||||
supported = True
|
||||
break
|
||||
|
||||
# Check prefix
|
||||
if prefix is not None:
|
||||
if (
|
||||
prefix in self.DATE_COMPONENTS
|
||||
and custom_field.data_type == CustomField.FieldDataType.DATE
|
||||
):
|
||||
pass # ok - e.g., "year__exact" for date field
|
||||
else:
|
||||
supported = False # anything else is invalid
|
||||
|
||||
if not supported:
|
||||
raise serializers.ValidationError(
|
||||
[
|
||||
_("{data_type} does not support query expr {expr!r}.").format(
|
||||
data_type=custom_field.data_type,
|
||||
expr=raw_op,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
return raw_op
|
||||
|
||||
def _get_serializer_field(self, custom_field, full_op):
|
||||
"""Return a serializers.Field for value validation."""
|
||||
prefix, op = self._split_op(full_op)
|
||||
field = None
|
||||
|
||||
if op in ("isnull", "exists"):
|
||||
# `isnull` takes either True or False regardless of the data_type.
|
||||
field = serializers.BooleanField()
|
||||
elif (
|
||||
custom_field.data_type == CustomField.FieldDataType.DATE
|
||||
and prefix in self.DATE_COMPONENTS
|
||||
):
|
||||
# DateField admits queries in the form of `year__exact`, etc. These take integers.
|
||||
field = serializers.IntegerField()
|
||||
elif custom_field.data_type == CustomField.FieldDataType.DOCUMENTLINK:
|
||||
# We can be more specific here and make sure the value is a list.
|
||||
field = serializers.ListField(child=serializers.IntegerField())
|
||||
elif custom_field.data_type == CustomField.FieldDataType.SELECT:
|
||||
# We use this custom field to permit SELECT option names.
|
||||
field = SelectField(custom_field)
|
||||
elif custom_field.data_type == CustomField.FieldDataType.URL:
|
||||
# For URL fields we don't need to be strict about validation (e.g., for istartswith).
|
||||
field = serializers.CharField()
|
||||
else:
|
||||
# The general case: inferred from the corresponding field in CustomFieldInstance.
|
||||
value_field_name = CustomFieldInstance.get_value_field_name(
|
||||
custom_field.data_type,
|
||||
)
|
||||
model_field = CustomFieldInstance._meta.get_field(value_field_name)
|
||||
field_name = model_field.deconstruct()[0]
|
||||
field_class, field_kwargs = self._model_serializer.build_standard_field(
|
||||
field_name,
|
||||
model_field,
|
||||
)
|
||||
field = field_class(**field_kwargs)
|
||||
field.allow_null = False
|
||||
|
||||
# Need to set allow_blank manually because of the inconsistency in CustomFieldInstance validation.
|
||||
# See https://github.com/paperless-ngx/paperless-ngx/issues/7361.
|
||||
if isinstance(field, serializers.CharField):
|
||||
field.allow_blank = True
|
||||
|
||||
if op == "in":
|
||||
# `in` takes a list of values.
|
||||
field = serializers.ListField(child=field, allow_empty=False)
|
||||
elif op == "range":
|
||||
# `range` takes a list of values, i.e., [start, end].
|
||||
field = serializers.ListField(
|
||||
child=field,
|
||||
min_length=2,
|
||||
max_length=2,
|
||||
)
|
||||
|
||||
return field
|
||||
|
||||
@handle_validation_prefix
|
||||
def _validate_atom_value(self, custom_field, op, value):
|
||||
"""Check if `value` is valid for the custom field and `op`. Returns the validated value."""
|
||||
serializer_field = self._get_serializer_field(custom_field, op)
|
||||
return serializer_field.run_validation(value)
|
||||
|
||||
def _parse_atom_doc_link_contains(self, custom_field, value) -> Q:
|
||||
"""
|
||||
Handles document link `contains` in a way that is supported by all DB backends.
|
||||
"""
|
||||
|
||||
# If the value is an empty set,
|
||||
# this is trivially true for any document with not null document links.
|
||||
if not value:
|
||||
return Q(
|
||||
custom_fields__field=custom_field,
|
||||
custom_fields__value_document_ids__isnull=False,
|
||||
)
|
||||
|
||||
# First we look up reverse links from the requested documents.
|
||||
links = CustomFieldInstance.objects.filter(
|
||||
document_id__in=value,
|
||||
field__data_type=CustomField.FieldDataType.DOCUMENTLINK,
|
||||
)
|
||||
|
||||
# Check if any of the requested IDs are missing.
|
||||
missing_ids = set(value) - set(link.document_id for link in links)
|
||||
if missing_ids:
|
||||
# The result should be an empty set in this case.
|
||||
return Q(id__in=[])
|
||||
|
||||
# Take the intersection of the reverse links - this should be what we are looking for.
|
||||
document_ids_we_want = functools.reduce(
|
||||
operator.and_,
|
||||
(set(link.value_document_ids) for link in links),
|
||||
)
|
||||
|
||||
return Q(id__in=document_ids_we_want)
|
||||
|
||||
@contextmanager
|
||||
def _track_query_depth(self):
|
||||
# guard against queries that are too deeply nested
|
||||
self._current_depth += 1
|
||||
if self._current_depth > self._max_query_depth:
|
||||
raise serializers.ValidationError([_("Maximum nesting depth exceeded.")])
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self._current_depth -= 1
|
||||
|
||||
|
||||
@extend_schema_field(serializers.CharField)
|
||||
class CustomFieldQueryFilter(Filter):
|
||||
def __init__(self, validation_prefix):
|
||||
"""
|
||||
A filter that filters documents based on custom field name and value.
|
||||
|
||||
Args:
|
||||
validation_prefix: Used to generate the ValidationError message.
|
||||
"""
|
||||
super().__init__()
|
||||
self._validation_prefix = validation_prefix
|
||||
|
||||
def filter(self, qs, value):
|
||||
if not value:
|
||||
return qs
|
||||
|
||||
parser = CustomFieldQueryParser(
|
||||
self._validation_prefix,
|
||||
max_query_depth=CUSTOM_FIELD_QUERY_MAX_DEPTH,
|
||||
max_atom_count=CUSTOM_FIELD_QUERY_MAX_ATOMS,
|
||||
)
|
||||
q, annotations = parser.parse(value)
|
||||
|
||||
return qs.annotate(**annotations).filter(q)
|
||||
|
||||
|
||||
class DocumentFilterSet(FilterSet):
|
||||
is_tagged = BooleanFilter(
|
||||
label="Is tagged",
|
||||
field_name="tags",
|
||||
lookup_expr="isnull",
|
||||
exclude=True,
|
||||
)
|
||||
|
||||
tags__id__all = ObjectFilter(field_name="tags")
|
||||
|
||||
tags__id__none = ObjectFilter(field_name="tags", exclude=True)
|
||||
|
||||
tags__id__in = ObjectFilter(field_name="tags", in_list=True)
|
||||
|
||||
correspondent__id__none = ObjectFilter(field_name="correspondent", exclude=True)
|
||||
|
||||
document_type__id__none = ObjectFilter(field_name="document_type", exclude=True)
|
||||
|
||||
storage_path__id__none = ObjectFilter(field_name="storage_path", exclude=True)
|
||||
|
||||
is_in_inbox = InboxFilter()
|
||||
|
||||
title_content = TitleContentFilter()
|
||||
|
||||
owner__id__none = ObjectFilter(field_name="owner", exclude=True)
|
||||
|
||||
custom_fields__icontains = CustomFieldsFilter()
|
||||
|
||||
custom_fields__id__all = ObjectFilter(field_name="custom_fields__field")
|
||||
|
||||
custom_fields__id__none = ObjectFilter(
|
||||
field_name="custom_fields__field",
|
||||
exclude=True,
|
||||
)
|
||||
|
||||
custom_fields__id__in = ObjectFilter(
|
||||
field_name="custom_fields__field",
|
||||
in_list=True,
|
||||
)
|
||||
|
||||
has_custom_fields = BooleanFilter(
|
||||
label="Has custom field",
|
||||
field_name="custom_fields",
|
||||
lookup_expr="isnull",
|
||||
exclude=True,
|
||||
)
|
||||
|
||||
custom_field_query = CustomFieldQueryFilter("custom_field_query")
|
||||
|
||||
shared_by__id = SharedByUser()
|
||||
|
||||
mime_type = MimeTypeFilter()
|
||||
|
||||
class Meta:
|
||||
model = Document
|
||||
fields = {
|
||||
"id": ID_KWARGS,
|
||||
"title": CHAR_KWARGS,
|
||||
"content": CHAR_KWARGS,
|
||||
"archive_serial_number": INT_KWARGS,
|
||||
"created": DATE_KWARGS,
|
||||
"added": DATE_KWARGS,
|
||||
"modified": DATE_KWARGS,
|
||||
"original_filename": CHAR_KWARGS,
|
||||
"checksum": CHAR_KWARGS,
|
||||
"correspondent": ["isnull"],
|
||||
"correspondent__id": ID_KWARGS,
|
||||
"correspondent__name": CHAR_KWARGS,
|
||||
"tags__id": ID_KWARGS,
|
||||
"tags__name": CHAR_KWARGS,
|
||||
"document_type": ["isnull"],
|
||||
"document_type__id": ID_KWARGS,
|
||||
"document_type__name": CHAR_KWARGS,
|
||||
"storage_path": ["isnull"],
|
||||
"storage_path__id": ID_KWARGS,
|
||||
"storage_path__name": CHAR_KWARGS,
|
||||
"owner": ["isnull"],
|
||||
"owner__id": ID_KWARGS,
|
||||
"custom_fields": ["icontains"],
|
||||
}
|
||||
|
||||
|
||||
class ShareLinkFilterSet(FilterSet):
|
||||
class Meta:
|
||||
model = ShareLink
|
||||
fields = {
|
||||
"created": DATE_KWARGS,
|
||||
"expiration": DATE_KWARGS,
|
||||
}
|
||||
|
||||
|
||||
class PaperlessTaskFilterSet(FilterSet):
|
||||
acknowledged = BooleanFilter(
|
||||
label="Acknowledged",
|
||||
field_name="acknowledged",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = PaperlessTask
|
||||
fields = {
|
||||
"type": ["exact"],
|
||||
"task_name": ["exact"],
|
||||
"status": ["exact"],
|
||||
}
|
||||
|
||||
|
||||
class ObjectOwnedOrGrantedPermissionsFilter(ObjectPermissionsFilter):
|
||||
"""
|
||||
A filter backend that limits results to those where the requesting user
|
||||
has read object level permissions, owns the objects, or objects without
|
||||
an owner (for backwards compat)
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
objects_with_perms = super().filter_queryset(request, queryset, view)
|
||||
objects_owned = queryset.filter(owner=request.user)
|
||||
objects_unowned = queryset.filter(owner__isnull=True)
|
||||
return objects_with_perms | objects_owned | objects_unowned
|
||||
|
||||
|
||||
class ObjectOwnedPermissionsFilter(ObjectPermissionsFilter):
|
||||
"""
|
||||
A filter backend that limits results to those where the requesting user
|
||||
owns the objects or objects without an owner (for backwards compat)
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
if request.user.is_superuser:
|
||||
return queryset
|
||||
objects_owned = queryset.filter(owner=request.user)
|
||||
objects_unowned = queryset.filter(owner__isnull=True)
|
||||
return objects_owned | objects_unowned
|
||||
|
||||
|
||||
class DocumentsOrderingFilter(OrderingFilter):
|
||||
field_name = "ordering"
|
||||
prefix = "custom_field_"
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
param = request.query_params.get("ordering")
|
||||
if param and self.prefix in param:
|
||||
custom_field_id = int(param.split(self.prefix)[1])
|
||||
try:
|
||||
field = CustomField.objects.get(pk=custom_field_id)
|
||||
except CustomField.DoesNotExist:
|
||||
raise serializers.ValidationError(
|
||||
{self.prefix + str(custom_field_id): [_("Custom field not found")]},
|
||||
)
|
||||
|
||||
annotation = None
|
||||
match field.data_type:
|
||||
case CustomField.FieldDataType.STRING:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_text")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.INT:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_int")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.FLOAT:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_float")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.DATE:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_date")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.MONETARY:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_monetary_amount")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.SELECT:
|
||||
# Select options are a little more complicated since the value is the id of the option, not
|
||||
# the label. Additionally, to support sqlite we can't use StringAgg, so we need to create a
|
||||
# case statement for each option, setting the value to the index of the option in a list
|
||||
# sorted by label, and then summing the results to give a single value for the annotation
|
||||
|
||||
select_options = sorted(
|
||||
field.extra_data.get("select_options", []),
|
||||
key=lambda x: x.get("label"),
|
||||
)
|
||||
whens = [
|
||||
When(
|
||||
custom_fields__field_id=custom_field_id,
|
||||
custom_fields__value_select=option.get("id"),
|
||||
then=Value(idx, output_field=IntegerField()),
|
||||
)
|
||||
for idx, option in enumerate(select_options)
|
||||
]
|
||||
whens.append(
|
||||
When(
|
||||
custom_fields__field_id=custom_field_id,
|
||||
custom_fields__value_select__isnull=True,
|
||||
then=Value(
|
||||
len(select_options),
|
||||
output_field=IntegerField(),
|
||||
),
|
||||
),
|
||||
)
|
||||
annotation = Sum(
|
||||
Case(
|
||||
*whens,
|
||||
default=Value(0),
|
||||
output_field=IntegerField(),
|
||||
),
|
||||
)
|
||||
case CustomField.FieldDataType.DOCUMENTLINK:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_document_ids")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.URL:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_url")[:1],
|
||||
)
|
||||
case CustomField.FieldDataType.BOOL:
|
||||
annotation = Subquery(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
).values("value_bool")[:1],
|
||||
)
|
||||
|
||||
if not annotation:
|
||||
# Only happens if a new data type is added and not handled here
|
||||
raise ValueError("Invalid custom field data type")
|
||||
|
||||
queryset = (
|
||||
queryset.annotate(
|
||||
# We need to annotate the queryset with the custom field value
|
||||
custom_field_value=annotation,
|
||||
# We also need to annotate the queryset with a boolean for sorting whether the field exists
|
||||
has_field=Exists(
|
||||
CustomFieldInstance.objects.filter(
|
||||
document_id=OuterRef("id"),
|
||||
field_id=custom_field_id,
|
||||
),
|
||||
),
|
||||
)
|
||||
.order_by(
|
||||
"-has_field",
|
||||
param.replace(
|
||||
self.prefix + str(custom_field_id),
|
||||
"custom_field_value",
|
||||
),
|
||||
)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
return super().filter_queryset(request, queryset, view)
|
||||
|
@@ -38,10 +38,10 @@ from whoosh.scoring import TF_IDF
|
||||
from whoosh.util.times import timespan
|
||||
from whoosh.writing import AsyncWriter
|
||||
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import Note
|
||||
from documents.models import User
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import Note
|
||||
from paperless.models import User
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from django.db.models import QuerySet
|
@@ -2,7 +2,7 @@ from django.core.management.base import BaseCommand
|
||||
from django.db import connection
|
||||
from django.db import models
|
||||
|
||||
from documents.models import Document
|
||||
from paperless.models import Document
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
@@ -4,8 +4,8 @@ from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.core.management.base import CommandError
|
||||
|
||||
from documents.models import Document
|
||||
from paperless.db import GnuPG
|
||||
from paperless.models import Document
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
@@ -6,10 +6,10 @@ from django import db
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from documents.management.commands.mixins import MultiProcessMixin
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from documents.models import Document
|
||||
from documents.tasks import update_document_content_maybe_archive_file
|
||||
from paperless.management.commands.mixins import MultiProcessMixin
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.models import Document
|
||||
from paperless.tasks import update_document_content_maybe_archive_file
|
||||
|
||||
logger = logging.getLogger("paperless.management.archiver")
|
||||
|
@@ -16,12 +16,12 @@ from django.core.management.base import CommandError
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
from watchdog.observers.polling import PollingObserver
|
||||
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentMetadataOverrides
|
||||
from documents.data_models import DocumentSource
|
||||
from documents.models import Tag
|
||||
from documents.parsers import is_file_ext_supported
|
||||
from documents.tasks import consume_file
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.data_models import DocumentMetadataOverrides
|
||||
from paperless.data_models import DocumentSource
|
||||
from paperless.models import Tag
|
||||
from paperless.parsers import is_file_ext_supported
|
||||
from paperless.tasks import consume_file
|
||||
|
||||
try:
|
||||
from inotifyrecursive import INotify
|
@@ -1,6 +1,6 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from documents.tasks import train_classifier
|
||||
from paperless.tasks import train_classifier
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
@@ -32,32 +32,32 @@ if TYPE_CHECKING:
|
||||
if settings.AUDIT_LOG_ENABLED:
|
||||
from auditlog.models import LogEntry
|
||||
|
||||
from documents.file_handling import delete_empty_directories
|
||||
from documents.file_handling import generate_filename
|
||||
from documents.management.commands.mixins import CryptMixin
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomField
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import Note
|
||||
from documents.models import SavedView
|
||||
from documents.models import SavedViewFilterRule
|
||||
from documents.models import StoragePath
|
||||
from documents.models import Tag
|
||||
from documents.models import UiSettings
|
||||
from documents.models import Workflow
|
||||
from documents.models import WorkflowAction
|
||||
from documents.models import WorkflowActionEmail
|
||||
from documents.models import WorkflowActionWebhook
|
||||
from documents.models import WorkflowTrigger
|
||||
from documents.settings import EXPORTER_ARCHIVE_NAME
|
||||
from documents.settings import EXPORTER_FILE_NAME
|
||||
from documents.settings import EXPORTER_THUMBNAIL_NAME
|
||||
from documents.utils import copy_file_with_basic_stats
|
||||
from paperless import version
|
||||
from paperless.db import GnuPG
|
||||
from paperless.file_handling import delete_empty_directories
|
||||
from paperless.file_handling import generate_filename
|
||||
from paperless.management.commands.mixins import CryptMixin
|
||||
from paperless.models import ApplicationConfiguration
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomField
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import Note
|
||||
from paperless.models import SavedView
|
||||
from paperless.models import SavedViewFilterRule
|
||||
from paperless.models import StoragePath
|
||||
from paperless.models import Tag
|
||||
from paperless.models import UiSettings
|
||||
from paperless.models import Workflow
|
||||
from paperless.models import WorkflowAction
|
||||
from paperless.models import WorkflowActionEmail
|
||||
from paperless.models import WorkflowActionWebhook
|
||||
from paperless.models import WorkflowTrigger
|
||||
from paperless.settings import EXPORTER_ARCHIVE_NAME
|
||||
from paperless.settings import EXPORTER_FILE_NAME
|
||||
from paperless.settings import EXPORTER_THUMBNAIL_NAME
|
||||
from paperless.utils import copy_file_with_basic_stats
|
||||
from paperless_mail.models import MailAccount
|
||||
from paperless_mail.models import MailRule
|
||||
|
@@ -7,9 +7,9 @@ import tqdm
|
||||
from django.core.management import BaseCommand
|
||||
from django.core.management import CommandError
|
||||
|
||||
from documents.management.commands.mixins import MultiProcessMixin
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from documents.models import Document
|
||||
from paperless.management.commands.mixins import MultiProcessMixin
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.models import Document
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
@@ -21,24 +21,24 @@ from django.db.models.signals import m2m_changed
|
||||
from django.db.models.signals import post_save
|
||||
from filelock import FileLock
|
||||
|
||||
from documents.file_handling import create_source_path_directory
|
||||
from documents.management.commands.mixins import CryptMixin
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomField
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import Note
|
||||
from documents.models import Tag
|
||||
from documents.parsers import run_convert
|
||||
from documents.settings import EXPORTER_ARCHIVE_NAME
|
||||
from documents.settings import EXPORTER_CRYPTO_SETTINGS_NAME
|
||||
from documents.settings import EXPORTER_FILE_NAME
|
||||
from documents.settings import EXPORTER_THUMBNAIL_NAME
|
||||
from documents.signals.handlers import check_paths_and_prune_custom_fields
|
||||
from documents.signals.handlers import update_filename_and_move_files
|
||||
from documents.utils import copy_file_with_basic_stats
|
||||
from paperless import version
|
||||
from paperless.file_handling import create_source_path_directory
|
||||
from paperless.management.commands.mixins import CryptMixin
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomField
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import Note
|
||||
from paperless.models import Tag
|
||||
from paperless.parsers import run_convert
|
||||
from paperless.settings import EXPORTER_ARCHIVE_NAME
|
||||
from paperless.settings import EXPORTER_CRYPTO_SETTINGS_NAME
|
||||
from paperless.settings import EXPORTER_FILE_NAME
|
||||
from paperless.settings import EXPORTER_THUMBNAIL_NAME
|
||||
from paperless.signals.handlers import check_paths_and_prune_custom_fields
|
||||
from paperless.signals.handlers import update_filename_and_move_files
|
||||
from paperless.utils import copy_file_with_basic_stats
|
||||
|
||||
if settings.AUDIT_LOG_ENABLED:
|
||||
from auditlog.registry import auditlog
|
@@ -1,9 +1,9 @@
|
||||
from django.core.management import BaseCommand
|
||||
from django.db import transaction
|
||||
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from documents.tasks import index_optimize
|
||||
from documents.tasks import index_reindex
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.tasks import index_optimize
|
||||
from paperless.tasks import index_reindex
|
||||
|
||||
|
||||
class Command(ProgressBarMixin, BaseCommand):
|
@@ -4,8 +4,8 @@ import tqdm
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db.models.signals import post_save
|
||||
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from documents.models import Document
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.models import Document
|
||||
|
||||
|
||||
class Command(ProgressBarMixin, BaseCommand):
|
@@ -3,13 +3,13 @@ import logging
|
||||
import tqdm
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from documents.classifier import load_classifier
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from documents.models import Document
|
||||
from documents.signals.handlers import set_correspondent
|
||||
from documents.signals.handlers import set_document_type
|
||||
from documents.signals.handlers import set_storage_path
|
||||
from documents.signals.handlers import set_tags
|
||||
from paperless.classifier import load_classifier
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.models import Document
|
||||
from paperless.signals.handlers import set_correspondent
|
||||
from paperless.signals.handlers import set_document_type
|
||||
from paperless.signals.handlers import set_storage_path
|
||||
from paperless.signals.handlers import set_tags
|
||||
|
||||
logger = logging.getLogger("paperless.management.retagger")
|
||||
|
@@ -1,7 +1,7 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from documents.sanity_checker import check_sanity
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.sanity_checker import check_sanity
|
||||
|
||||
|
||||
class Command(ProgressBarMixin, BaseCommand):
|
@@ -6,10 +6,10 @@ import tqdm
|
||||
from django import db
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from documents.management.commands.mixins import MultiProcessMixin
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from documents.models import Document
|
||||
from documents.parsers import get_parser_class_for_mime_type
|
||||
from paperless.management.commands.mixins import MultiProcessMixin
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.models import Document
|
||||
from paperless.parsers import get_parser_class_for_mime_type
|
||||
|
||||
|
||||
def _process_document(doc_id):
|
@@ -8,11 +8,11 @@ from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
||||
from django.core.management import CommandError
|
||||
|
||||
from documents.settings import EXPORTER_CRYPTO_ALGO_NAME
|
||||
from documents.settings import EXPORTER_CRYPTO_KEY_ITERATIONS_NAME
|
||||
from documents.settings import EXPORTER_CRYPTO_KEY_SIZE_NAME
|
||||
from documents.settings import EXPORTER_CRYPTO_SALT_NAME
|
||||
from documents.settings import EXPORTER_CRYPTO_SETTINGS_NAME
|
||||
from paperless.settings import EXPORTER_CRYPTO_ALGO_NAME
|
||||
from paperless.settings import EXPORTER_CRYPTO_KEY_ITERATIONS_NAME
|
||||
from paperless.settings import EXPORTER_CRYPTO_KEY_SIZE_NAME
|
||||
from paperless.settings import EXPORTER_CRYPTO_SALT_NAME
|
||||
from paperless.settings import EXPORTER_CRYPTO_SETTINGS_NAME
|
||||
|
||||
|
||||
class CryptFields(TypedDict):
|
@@ -3,7 +3,7 @@ from django.core.management.base import BaseCommand
|
||||
from django.db import transaction
|
||||
from tqdm import tqdm
|
||||
|
||||
from documents.management.commands.mixins import ProgressBarMixin
|
||||
from paperless.management.commands.mixins import ProgressBarMixin
|
||||
|
||||
|
||||
class Command(BaseCommand, ProgressBarMixin):
|
@@ -5,20 +5,20 @@ import re
|
||||
from fnmatch import fnmatch
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentSource
|
||||
from documents.models import Correspondent
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import MatchingModel
|
||||
from documents.models import StoragePath
|
||||
from documents.models import Tag
|
||||
from documents.models import Workflow
|
||||
from documents.models import WorkflowTrigger
|
||||
from documents.permissions import get_objects_for_user_owner_aware
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.data_models import DocumentSource
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import MatchingModel
|
||||
from paperless.models import StoragePath
|
||||
from paperless.models import Tag
|
||||
from paperless.models import Workflow
|
||||
from paperless.models import WorkflowTrigger
|
||||
from paperless.permissions import get_objects_for_user_owner_aware
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from documents.classifier import DocumentClassifier
|
||||
from paperless.classifier import DocumentClassifier
|
||||
|
||||
logger = logging.getLogger("paperless.matching")
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -15,10 +15,10 @@ from typing import TYPE_CHECKING
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
from documents.loggers import LoggingMixin
|
||||
from documents.signals import document_consumer_declaration
|
||||
from documents.utils import copy_file_with_basic_stats
|
||||
from documents.utils import run_subprocess
|
||||
from paperless.loggers import LoggingMixin
|
||||
from paperless.signals import document_consumer_declaration
|
||||
from paperless.utils import copy_file_with_basic_stats
|
||||
from paperless.utils import run_subprocess
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import datetime
|
@@ -2,9 +2,9 @@ import abc
|
||||
from pathlib import Path
|
||||
from typing import Final
|
||||
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentMetadataOverrides
|
||||
from documents.plugins.helpers import ProgressManager
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.data_models import DocumentMetadataOverrides
|
||||
from paperless.plugins.helpers import ProgressManager
|
||||
|
||||
|
||||
class StopConsumeTaskError(Exception):
|
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 4.8 KiB |
@@ -10,8 +10,8 @@ from django.conf import settings
|
||||
from django.utils import timezone
|
||||
from tqdm import tqdm
|
||||
|
||||
from documents.models import Document
|
||||
from documents.models import PaperlessTask
|
||||
from paperless.models import Document
|
||||
from paperless.models import PaperlessTask
|
||||
|
||||
|
||||
class SanityCheckMessages:
|
File diff suppressed because it is too large
Load Diff
@@ -176,7 +176,7 @@ def _parse_beat_schedule() -> dict:
|
||||
"env_key": "PAPERLESS_TRAIN_TASK_CRON",
|
||||
# Default hourly at 5 minutes past the hour
|
||||
"env_default": "5 */1 * * *",
|
||||
"task": "documents.tasks.train_classifier",
|
||||
"task": "paperless.tasks.train_classifier",
|
||||
"options": {
|
||||
# 1 minute before default schedule sends again
|
||||
"expires": 59.0 * 60.0,
|
||||
@@ -187,7 +187,7 @@ def _parse_beat_schedule() -> dict:
|
||||
"env_key": "PAPERLESS_INDEX_TASK_CRON",
|
||||
# Default daily at midnight
|
||||
"env_default": "0 0 * * *",
|
||||
"task": "documents.tasks.index_optimize",
|
||||
"task": "paperless.tasks.index_optimize",
|
||||
"options": {
|
||||
# 1 hour before default schedule sends again
|
||||
"expires": 23.0 * 60.0 * 60.0,
|
||||
@@ -198,7 +198,7 @@ def _parse_beat_schedule() -> dict:
|
||||
"env_key": "PAPERLESS_SANITY_TASK_CRON",
|
||||
# Default Sunday at 00:30
|
||||
"env_default": "30 0 * * sun",
|
||||
"task": "documents.tasks.sanity_check",
|
||||
"task": "paperless.tasks.sanity_check",
|
||||
"options": {
|
||||
# 1 hour before default schedule sends again
|
||||
"expires": ((7.0 * 24.0) - 1.0) * 60.0 * 60.0,
|
||||
@@ -209,7 +209,7 @@ def _parse_beat_schedule() -> dict:
|
||||
"env_key": "PAPERLESS_EMPTY_TRASH_TASK_CRON",
|
||||
# Default daily at 01:00
|
||||
"env_default": "0 1 * * *",
|
||||
"task": "documents.tasks.empty_trash",
|
||||
"task": "paperless.tasks.empty_trash",
|
||||
"options": {
|
||||
# 1 hour before default schedule sends again
|
||||
"expires": 23.0 * 60.0 * 60.0,
|
||||
@@ -220,7 +220,7 @@ def _parse_beat_schedule() -> dict:
|
||||
"env_key": "PAPERLESS_WORKFLOW_SCHEDULED_TASK_CRON",
|
||||
# Default hourly at 5 minutes past the hour
|
||||
"env_default": "5 */1 * * *",
|
||||
"task": "documents.tasks.check_scheduled_workflows",
|
||||
"task": "paperless.tasks.check_scheduled_workflows",
|
||||
"options": {
|
||||
# 1 minute before default schedule sends again
|
||||
"expires": 59.0 * 60.0,
|
||||
@@ -363,7 +363,7 @@ SPECTACULAR_SETTINGS = {
|
||||
"url": "https://docs.paperless-ngx.com/api/",
|
||||
},
|
||||
"ENUM_NAME_OVERRIDES": {
|
||||
"MatchingAlgorithm": "documents.models.MatchingModel.MATCHING_ALGORITHMS",
|
||||
"MatchingAlgorithm": "paperless.models.MatchingModel.MATCHING_ALGORITHMS",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -443,7 +443,7 @@ TEMPLATES = [
|
||||
"django.template.context_processors.request",
|
||||
"django.contrib.auth.context_processors.auth",
|
||||
"django.contrib.messages.context_processors.messages",
|
||||
"documents.context_processors.settings",
|
||||
"paperless.context_processors.settings",
|
||||
],
|
||||
},
|
||||
},
|
||||
@@ -565,6 +565,10 @@ if DEBUG:
|
||||
# Allow access from the angular development server during debugging
|
||||
CORS_ALLOWED_ORIGINS.append("http://localhost:4200")
|
||||
|
||||
CORS_EXPOSE_HEADERS = [
|
||||
"Content-Disposition",
|
||||
]
|
||||
|
||||
ALLOWED_HOSTS = __get_list("PAPERLESS_ALLOWED_HOSTS", ["*"])
|
||||
if ALLOWED_HOSTS != ["*"]:
|
||||
# always allow localhost. Necessary e.g. for healthcheck in docker.
|
||||
@@ -1262,3 +1266,15 @@ OUTLOOK_OAUTH_ENABLED = bool(
|
||||
and OUTLOOK_OAUTH_CLIENT_ID
|
||||
and OUTLOOK_OAUTH_CLIENT_SECRET,
|
||||
)
|
||||
|
||||
# Defines the names of file/thumbnail for the manifest
|
||||
# for exporting/importing commands
|
||||
EXPORTER_FILE_NAME = "__exported_file_name__"
|
||||
EXPORTER_THUMBNAIL_NAME = "__exported_thumbnail_name__"
|
||||
EXPORTER_ARCHIVE_NAME = "__exported_archive_name__"
|
||||
|
||||
EXPORTER_CRYPTO_SETTINGS_NAME = "__crypto__"
|
||||
EXPORTER_CRYPTO_SALT_NAME = "__salt_hex__"
|
||||
EXPORTER_CRYPTO_KEY_ITERATIONS_NAME = "__key_iters__"
|
||||
EXPORTER_CRYPTO_KEY_SIZE_NAME = "__key_size__"
|
||||
EXPORTER_CRYPTO_ALGO_NAME = "__key_algo__"
|
||||
|
@@ -23,35 +23,35 @@ from django.utils import timezone
|
||||
from filelock import FileLock
|
||||
from guardian.shortcuts import remove_perm
|
||||
|
||||
from documents import matching
|
||||
from documents.caching import clear_document_caches
|
||||
from documents.file_handling import create_source_path_directory
|
||||
from documents.file_handling import delete_empty_directories
|
||||
from documents.file_handling import generate_unique_filename
|
||||
from documents.mail import send_email
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomField
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import MatchingModel
|
||||
from documents.models import PaperlessTask
|
||||
from documents.models import SavedView
|
||||
from documents.models import Tag
|
||||
from documents.models import Workflow
|
||||
from documents.models import WorkflowAction
|
||||
from documents.models import WorkflowRun
|
||||
from documents.models import WorkflowTrigger
|
||||
from documents.permissions import get_objects_for_user_owner_aware
|
||||
from documents.permissions import set_permissions_for_object
|
||||
from documents.templating.workflows import parse_w_workflow_placeholders
|
||||
from paperless import matching
|
||||
from paperless.caching import clear_document_caches
|
||||
from paperless.file_handling import create_source_path_directory
|
||||
from paperless.file_handling import delete_empty_directories
|
||||
from paperless.file_handling import generate_unique_filename
|
||||
from paperless.mail import send_email
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomField
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import MatchingModel
|
||||
from paperless.models import PaperlessTask
|
||||
from paperless.models import SavedView
|
||||
from paperless.models import Tag
|
||||
from paperless.models import Workflow
|
||||
from paperless.models import WorkflowAction
|
||||
from paperless.models import WorkflowRun
|
||||
from paperless.models import WorkflowTrigger
|
||||
from paperless.permissions import get_objects_for_user_owner_aware
|
||||
from paperless.permissions import set_permissions_for_object
|
||||
from paperless.templating.workflows import parse_w_workflow_placeholders
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from documents.classifier import DocumentClassifier
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentMetadataOverrides
|
||||
from paperless.classifier import DocumentClassifier
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.data_models import DocumentMetadataOverrides
|
||||
|
||||
logger = logging.getLogger("paperless.handlers")
|
||||
|
||||
@@ -578,7 +578,7 @@ def cleanup_custom_field_deletion(sender, instance: CustomField, **kwargs):
|
||||
|
||||
|
||||
def add_to_index(sender, document, **kwargs):
|
||||
from documents import index
|
||||
from paperless import index
|
||||
|
||||
index.add_or_update_document(document)
|
||||
|
||||
@@ -1225,14 +1225,7 @@ def run_workflows(
|
||||
document.refresh_from_db()
|
||||
doc_tag_ids = list(document.tags.values_list("pk", flat=True))
|
||||
|
||||
# If a workflow is supplied, we don't need to check if it matches
|
||||
matches = (
|
||||
matching.document_matches_workflow(document, workflow, trigger_type)
|
||||
if workflow_to_run is None
|
||||
else True
|
||||
)
|
||||
|
||||
if matches:
|
||||
if matching.document_matches_workflow(document, workflow, trigger_type):
|
||||
action: WorkflowAction
|
||||
for action in workflow.actions.all():
|
||||
message = f"Applying {action} from {workflow}"
|
||||
@@ -1278,7 +1271,7 @@ def before_task_publish_handler(sender=None, headers=None, body=None, **kwargs):
|
||||
https://docs.celeryq.dev/en/stable/internals/protocol.html#version-2
|
||||
|
||||
"""
|
||||
if "task" not in headers or headers["task"] != "documents.tasks.consume_file":
|
||||
if "task" not in headers or headers["task"] != "paperless.tasks.consume_file":
|
||||
# Assumption: this is only ever a v2 message
|
||||
return
|
||||
|
@@ -19,39 +19,39 @@ from django.utils import timezone
|
||||
from filelock import FileLock
|
||||
from whoosh.writing import AsyncWriter
|
||||
|
||||
from documents import index
|
||||
from documents import sanity_checker
|
||||
from documents.barcodes import BarcodePlugin
|
||||
from documents.caching import clear_document_caches
|
||||
from documents.classifier import DocumentClassifier
|
||||
from documents.classifier import load_classifier
|
||||
from documents.consumer import ConsumerPlugin
|
||||
from documents.consumer import WorkflowTriggerPlugin
|
||||
from documents.data_models import ConsumableDocument
|
||||
from documents.data_models import DocumentMetadataOverrides
|
||||
from documents.double_sided import CollatePlugin
|
||||
from documents.file_handling import create_source_path_directory
|
||||
from documents.file_handling import generate_unique_filename
|
||||
from documents.models import Correspondent
|
||||
from documents.models import CustomFieldInstance
|
||||
from documents.models import Document
|
||||
from documents.models import DocumentType
|
||||
from documents.models import PaperlessTask
|
||||
from documents.models import StoragePath
|
||||
from documents.models import Tag
|
||||
from documents.models import Workflow
|
||||
from documents.models import WorkflowRun
|
||||
from documents.models import WorkflowTrigger
|
||||
from documents.parsers import DocumentParser
|
||||
from documents.parsers import get_parser_class_for_mime_type
|
||||
from documents.plugins.base import ConsumeTaskPlugin
|
||||
from documents.plugins.base import ProgressManager
|
||||
from documents.plugins.base import StopConsumeTaskError
|
||||
from documents.plugins.helpers import ProgressStatusOptions
|
||||
from documents.sanity_checker import SanityCheckFailedException
|
||||
from documents.signals import document_updated
|
||||
from documents.signals.handlers import cleanup_document_deletion
|
||||
from documents.signals.handlers import run_workflows
|
||||
from paperless import index
|
||||
from paperless import sanity_checker
|
||||
from paperless.barcodes import BarcodePlugin
|
||||
from paperless.caching import clear_document_caches
|
||||
from paperless.classifier import DocumentClassifier
|
||||
from paperless.classifier import load_classifier
|
||||
from paperless.consumer import ConsumerPlugin
|
||||
from paperless.consumer import WorkflowTriggerPlugin
|
||||
from paperless.data_models import ConsumableDocument
|
||||
from paperless.data_models import DocumentMetadataOverrides
|
||||
from paperless.double_sided import CollatePlugin
|
||||
from paperless.file_handling import create_source_path_directory
|
||||
from paperless.file_handling import generate_unique_filename
|
||||
from paperless.models import Correspondent
|
||||
from paperless.models import CustomFieldInstance
|
||||
from paperless.models import Document
|
||||
from paperless.models import DocumentType
|
||||
from paperless.models import PaperlessTask
|
||||
from paperless.models import StoragePath
|
||||
from paperless.models import Tag
|
||||
from paperless.models import Workflow
|
||||
from paperless.models import WorkflowRun
|
||||
from paperless.models import WorkflowTrigger
|
||||
from paperless.parsers import DocumentParser
|
||||
from paperless.parsers import get_parser_class_for_mime_type
|
||||
from paperless.plugins.base import ConsumeTaskPlugin
|
||||
from paperless.plugins.base import ProgressManager
|
||||
from paperless.plugins.base import StopConsumeTaskError
|
||||
from paperless.plugins.helpers import ProgressStatusOptions
|
||||
from paperless.sanity_checker import SanityCheckFailedException
|
||||
from paperless.signals import document_updated
|
||||
from paperless.signals.handlers import cleanup_document_deletion
|
||||
from paperless.signals.handlers import run_workflows
|
||||
|
||||
if settings.AUDIT_LOG_ENABLED:
|
||||
from auditlog.models import LogEntry
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user