mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2025-04-02 13:45:10 -05:00
Rewrites the email parsing to be more clear and concise.
Adds testing to use httpx mocked responses to stand in as a server even offline
This commit is contained in:
parent
6e65558ea4
commit
2c1cd25be4
15
.github/workflows/ci.yml
vendored
15
.github/workflows/ci.yml
vendored
@ -106,15 +106,6 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
python-version: ['3.8', '3.9', '3.10']
|
python-version: ['3.8', '3.9', '3.10']
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
env:
|
|
||||||
# Enable Tika end to end testing
|
|
||||||
TIKA_LIVE: 1
|
|
||||||
# Enable paperless_mail testing against real server
|
|
||||||
PAPERLESS_MAIL_TEST_HOST: ${{ secrets.TEST_MAIL_HOST }}
|
|
||||||
PAPERLESS_MAIL_TEST_USER: ${{ secrets.TEST_MAIL_USER }}
|
|
||||||
PAPERLESS_MAIL_TEST_PASSWD: ${{ secrets.TEST_MAIL_PASSWD }}
|
|
||||||
# Enable Gotenberg end to end testing
|
|
||||||
GOTENBERG_LIVE: 1
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
@ -156,6 +147,12 @@ jobs:
|
|||||||
pipenv --python ${{ steps.setup-python.outputs.python-version }} run pip list
|
pipenv --python ${{ steps.setup-python.outputs.python-version }} run pip list
|
||||||
-
|
-
|
||||||
name: Tests
|
name: Tests
|
||||||
|
env:
|
||||||
|
PAPERLESS_CI_TEST: 1
|
||||||
|
# Enable paperless_mail testing against real server
|
||||||
|
PAPERLESS_MAIL_TEST_HOST: ${{ secrets.TEST_MAIL_HOST }}
|
||||||
|
PAPERLESS_MAIL_TEST_USER: ${{ secrets.TEST_MAIL_USER }}
|
||||||
|
PAPERLESS_MAIL_TEST_PASSWD: ${{ secrets.TEST_MAIL_PASSWD }}
|
||||||
run: |
|
run: |
|
||||||
cd src/
|
cd src/
|
||||||
pipenv --python ${{ steps.setup-python.outputs.python-version }} run pytest -ra
|
pipenv --python ${{ steps.setup-python.outputs.python-version }} run pytest -ra
|
||||||
|
6
Pipfile.lock
generated
6
Pipfile.lock
generated
@ -1745,11 +1745,11 @@
|
|||||||
},
|
},
|
||||||
"tika-client": {
|
"tika-client": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:6f2afab12eb46cd7b4ed6c34c9c2a1791a45d2f479c0da0076936dc6dbfe8061",
|
"sha256:43b53816b3783c9c77e16df314cad5ad66ab606391c26ad4bc94a784d473a156",
|
||||||
"sha256:f2c23cb76677b7b8be70e2d95ac3418ed046b1514bff920f7460beae1ca3342b"
|
"sha256:e1ef3447b4307059e4a836e3786088498637323733f83a2f807b77f998d77610"
|
||||||
],
|
],
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==0.0.2"
|
"version": "==0.0.3"
|
||||||
},
|
},
|
||||||
"tornado": {
|
"tornado": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -105,6 +105,20 @@ class FileSystemAssertsMixin:
|
|||||||
def assertIsNotDir(self, path: Union[PathLike, str]):
|
def assertIsNotDir(self, path: Union[PathLike, str]):
|
||||||
self.assertFalse(Path(path).resolve().is_dir(), f"Dir does exist: {path}")
|
self.assertFalse(Path(path).resolve().is_dir(), f"Dir does exist: {path}")
|
||||||
|
|
||||||
|
def assertFilesEqual(
|
||||||
|
self,
|
||||||
|
path1: Union[PathLike, str],
|
||||||
|
path2: Union[PathLike, str],
|
||||||
|
):
|
||||||
|
path1 = Path(path1)
|
||||||
|
path2 = Path(path2)
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
hash1 = hashlib.sha256(path1.read_bytes()).hexdigest()
|
||||||
|
hash2 = hashlib.sha256(path2.read_bytes()).hexdigest()
|
||||||
|
|
||||||
|
self.assertEqual(hash1, hash2, "File SHA256 mismatch")
|
||||||
|
|
||||||
|
|
||||||
class ConsumerProgressMixin:
|
class ConsumerProgressMixin:
|
||||||
def setUp(self) -> None:
|
def setUp(self) -> None:
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
import os
|
|
||||||
import re
|
import re
|
||||||
from html import escape
|
from html import escape
|
||||||
from io import BytesIO
|
from pathlib import Path
|
||||||
from io import StringIO
|
from typing import List
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
from bleach import clean
|
from bleach import clean
|
||||||
@ -11,8 +10,9 @@ from django.conf import settings
|
|||||||
from django.utils.timezone import is_naive
|
from django.utils.timezone import is_naive
|
||||||
from django.utils.timezone import make_aware
|
from django.utils.timezone import make_aware
|
||||||
from humanfriendly import format_size
|
from humanfriendly import format_size
|
||||||
|
from imap_tools import MailAttachment
|
||||||
from imap_tools import MailMessage
|
from imap_tools import MailMessage
|
||||||
from tika import parser
|
from tika_client import TikaClient
|
||||||
|
|
||||||
from documents.parsers import DocumentParser
|
from documents.parsers import DocumentParser
|
||||||
from documents.parsers import ParseError
|
from documents.parsers import ParseError
|
||||||
@ -22,33 +22,15 @@ from documents.parsers import make_thumbnail_from_pdf
|
|||||||
class MailDocumentParser(DocumentParser):
|
class MailDocumentParser(DocumentParser):
|
||||||
"""
|
"""
|
||||||
This parser uses imap_tools to parse .eml files, generates pdf using
|
This parser uses imap_tools to parse .eml files, generates pdf using
|
||||||
gotenbergs and sends the html part to a local tika server for text extraction.
|
Gotenberg and sends the html part to a Tika server for text extraction.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
gotenberg_server = settings.TIKA_GOTENBERG_ENDPOINT
|
gotenberg_server = settings.TIKA_GOTENBERG_ENDPOINT
|
||||||
tika_server = settings.TIKA_ENDPOINT
|
tika_server = settings.TIKA_ENDPOINT
|
||||||
|
|
||||||
logging_name = "paperless.parsing.mail"
|
logging_name = "paperless.parsing.mail"
|
||||||
_parsed = None
|
|
||||||
|
|
||||||
def get_parsed(self, document_path) -> MailMessage:
|
def get_thumbnail(self, document_path: Path, mime_type: str, file_name=None):
|
||||||
if not self._parsed:
|
|
||||||
try:
|
|
||||||
with open(document_path, "rb") as eml:
|
|
||||||
self._parsed = MailMessage.from_bytes(eml.read())
|
|
||||||
except Exception as err:
|
|
||||||
raise ParseError(
|
|
||||||
f"Could not parse {document_path}: {err}",
|
|
||||||
) from err
|
|
||||||
if not self._parsed.from_values:
|
|
||||||
self._parsed = None
|
|
||||||
raise ParseError(
|
|
||||||
f"Could not parse {document_path}: Missing 'from'",
|
|
||||||
)
|
|
||||||
|
|
||||||
return self._parsed
|
|
||||||
|
|
||||||
def get_thumbnail(self, document_path, mime_type, file_name=None):
|
|
||||||
if not self.archive_path:
|
if not self.archive_path:
|
||||||
self.archive_path = self.generate_pdf(document_path)
|
self.archive_path = self.generate_pdf(document_path)
|
||||||
|
|
||||||
@ -58,11 +40,11 @@ class MailDocumentParser(DocumentParser):
|
|||||||
self.logging_group,
|
self.logging_group,
|
||||||
)
|
)
|
||||||
|
|
||||||
def extract_metadata(self, document_path, mime_type):
|
def extract_metadata(self, document_path: Path, mime_type: str):
|
||||||
result = []
|
result = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
mail = self.get_parsed(document_path)
|
mail = self.parse_file_to_message(document_path)
|
||||||
except ParseError as e:
|
except ParseError as e:
|
||||||
self.log.warning(
|
self.log.warning(
|
||||||
f"Error while fetching document metadata for {document_path}: {e}",
|
f"Error while fetching document metadata for {document_path}: {e}",
|
||||||
@ -106,101 +88,157 @@ class MailDocumentParser(DocumentParser):
|
|||||||
result.sort(key=lambda item: (item["prefix"], item["key"]))
|
result.sort(key=lambda item: (item["prefix"], item["key"]))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def parse(self, document_path, mime_type, file_name=None):
|
def parse(self, document_path: Path, mime_type: str, file_name=None):
|
||||||
|
"""
|
||||||
|
Parses the given .eml into formatted text, based on the decoded email.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
def strip_text(text: str):
|
def strip_text(text: str):
|
||||||
|
"""
|
||||||
|
Reduces the spacing of the given text string
|
||||||
|
"""
|
||||||
text = re.sub(r"\s+", " ", text)
|
text = re.sub(r"\s+", " ", text)
|
||||||
text = re.sub(r"(\n *)+", "\n", text)
|
text = re.sub(r"(\n *)+", "\n", text)
|
||||||
return text.strip()
|
return text.strip()
|
||||||
|
|
||||||
mail = self.get_parsed(document_path)
|
def build_formatted_text(mail_message: MailMessage) -> str:
|
||||||
|
"""
|
||||||
|
Constructs a formatted string, based on the given email. Basically tries
|
||||||
|
to get most of the email content, included front matter, into a nice string
|
||||||
|
"""
|
||||||
|
fmt_text = f"Subject: {mail_message.subject}\n\n"
|
||||||
|
fmt_text += f"From: {mail_message.from_values.full}\n\n"
|
||||||
|
to_list = [address.full for address in mail_message.to_values]
|
||||||
|
fmt_text += f"To: {', '.join(to_list)}\n\n"
|
||||||
|
if mail_message.cc_values:
|
||||||
|
fmt_text += (
|
||||||
|
f"CC: {', '.join(address.full for address in mail.cc_values)}\n\n"
|
||||||
|
)
|
||||||
|
if mail_message.bcc_values:
|
||||||
|
fmt_text += (
|
||||||
|
f"BCC: {', '.join(address.full for address in mail.bcc_values)}\n\n"
|
||||||
|
)
|
||||||
|
if mail_message.attachments:
|
||||||
|
att = []
|
||||||
|
for a in mail.attachments:
|
||||||
|
att.append(f"{a.filename} ({format_size(a.size, binary=True)})")
|
||||||
|
fmt_text += f"Attachments: {', '.join(att)}\n\n"
|
||||||
|
|
||||||
self.text = f"Subject: {mail.subject}\n\n"
|
if mail.html:
|
||||||
self.text += f"From: {mail.from_values.full}\n\n"
|
fmt_text += "HTML content: " + strip_text(self.tika_parse(mail.html))
|
||||||
self.text += f"To: {', '.join(address.full for address in mail.to_values)}\n\n"
|
|
||||||
if len(mail.cc_values) >= 1:
|
|
||||||
self.text += (
|
|
||||||
f"CC: {', '.join(address.full for address in mail.cc_values)}\n\n"
|
|
||||||
)
|
|
||||||
if len(mail.bcc_values) >= 1:
|
|
||||||
self.text += (
|
|
||||||
f"BCC: {', '.join(address.full for address in mail.bcc_values)}\n\n"
|
|
||||||
)
|
|
||||||
if len(mail.attachments) >= 1:
|
|
||||||
att = []
|
|
||||||
for a in mail.attachments:
|
|
||||||
att.append(f"{a.filename} ({format_size(a.size, binary=True)})")
|
|
||||||
|
|
||||||
self.text += f"Attachments: {', '.join(att)}\n\n"
|
fmt_text += f"\n\n{strip_text(mail.text)}"
|
||||||
|
|
||||||
if mail.html:
|
return fmt_text
|
||||||
self.text += "HTML content: " + strip_text(self.tika_parse(mail.html))
|
|
||||||
|
|
||||||
self.text += f"\n\n{strip_text(mail.text)}"
|
self.log.debug(f"Parsing file {document_path.name} into an email")
|
||||||
|
mail = self.parse_file_to_message(document_path)
|
||||||
|
|
||||||
|
self.log.debug("Building formatted text from email")
|
||||||
|
self.text = build_formatted_text(mail)
|
||||||
|
|
||||||
if is_naive(mail.date):
|
if is_naive(mail.date):
|
||||||
self.date = make_aware(mail.date)
|
self.date = make_aware(mail.date)
|
||||||
else:
|
else:
|
||||||
self.date = mail.date
|
self.date = mail.date
|
||||||
|
|
||||||
self.archive_path = self.generate_pdf(document_path)
|
self.log.debug("Creating a PDF from the email")
|
||||||
|
self.archive_path = self.generate_pdf(mail)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_file_to_message(filepath: Path) -> MailMessage:
|
||||||
|
"""
|
||||||
|
Parses the given .eml file into a MailMessage object
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with filepath.open("rb") as eml:
|
||||||
|
parsed = MailMessage.from_bytes(eml.read())
|
||||||
|
if parsed.from_values is None:
|
||||||
|
raise ParseError(
|
||||||
|
f"Could not parse {filepath}: Missing 'from'",
|
||||||
|
)
|
||||||
|
except Exception as err:
|
||||||
|
raise ParseError(
|
||||||
|
f"Could not parse {filepath}: {err}",
|
||||||
|
) from err
|
||||||
|
|
||||||
|
return parsed
|
||||||
|
|
||||||
def tika_parse(self, html: str):
|
def tika_parse(self, html: str):
|
||||||
self.log.info("Sending content to Tika server")
|
self.log.info("Sending content to Tika server")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
parsed = parser.from_buffer(html, self.tika_server)
|
with TikaClient(tika_url=self.tika_server) as client:
|
||||||
|
parsed = client.tika.as_text.from_buffer(html, "text/html")
|
||||||
|
|
||||||
|
if "X-TIKA:content" in parsed.data:
|
||||||
|
return parsed.data["X-TIKA:content"].strip()
|
||||||
|
return ""
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ParseError(
|
raise ParseError(
|
||||||
f"Could not parse content with tika server at "
|
f"Could not parse content with tika server at "
|
||||||
f"{self.tika_server}: {err}",
|
f"{self.tika_server}: {err}",
|
||||||
) from err
|
) from err
|
||||||
if parsed["content"]:
|
|
||||||
return parsed["content"]
|
def generate_pdf(self, mail_message: MailMessage) -> Path:
|
||||||
|
archive_path = Path(self.tempdir) / "merged.pdf"
|
||||||
|
|
||||||
|
mail_pdf_file = self.generate_pdf_from_mail(mail_message)
|
||||||
|
|
||||||
|
# If no HTML content, create the PDF from the message
|
||||||
|
# Otherwise, create 2 PDFs and merge them with Gotenberg
|
||||||
|
if not mail_message.html:
|
||||||
|
archive_path.write_bytes(mail_pdf_file.read_bytes())
|
||||||
else:
|
else:
|
||||||
return ""
|
url_merge = self.gotenberg_server + "/forms/pdfengines/merge"
|
||||||
|
|
||||||
def generate_pdf(self, document_path):
|
pdf_of_html_content = self.generate_pdf_from_html(
|
||||||
pdf_collection = []
|
mail_message.html,
|
||||||
url_merge = self.gotenberg_server + "/forms/pdfengines/merge"
|
mail_message.attachments,
|
||||||
pdf_path = os.path.join(self.tempdir, "merged.pdf")
|
|
||||||
mail = self.get_parsed(document_path)
|
|
||||||
|
|
||||||
pdf_collection.append(("1_mail.pdf", self.generate_pdf_from_mail(mail)))
|
|
||||||
|
|
||||||
if not mail.html:
|
|
||||||
with open(pdf_path, "wb") as file:
|
|
||||||
file.write(pdf_collection[0][1])
|
|
||||||
file.close()
|
|
||||||
return pdf_path
|
|
||||||
else:
|
|
||||||
pdf_collection.append(
|
|
||||||
(
|
|
||||||
"2_html.pdf",
|
|
||||||
self.generate_pdf_from_html(mail.html, mail.attachments),
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
files = {}
|
pdf_collection = {
|
||||||
for name, content in pdf_collection:
|
"1_mail.pdf": ("1_mail.pdf", mail_pdf_file, "application/pdf"),
|
||||||
files[name] = (name, BytesIO(content))
|
"2_html.pdf": ("2_html.pdf", pdf_of_html_content, "application/pdf"),
|
||||||
headers = {}
|
}
|
||||||
try:
|
|
||||||
response = httpx.post(url_merge, files=files, headers=headers)
|
|
||||||
response.raise_for_status() # ensure we notice bad responses
|
|
||||||
except Exception as err:
|
|
||||||
raise ParseError(f"Error while converting document to PDF: {err}") from err
|
|
||||||
|
|
||||||
with open(pdf_path, "wb") as file:
|
try:
|
||||||
file.write(response.content)
|
# Open a handle to each file, replacing the tuple
|
||||||
file.close()
|
for filename in pdf_collection:
|
||||||
|
file_multi_part = pdf_collection[filename]
|
||||||
|
pdf_collection[filename] = (
|
||||||
|
file_multi_part[0],
|
||||||
|
file_multi_part[1].open("rb"),
|
||||||
|
file_multi_part[2],
|
||||||
|
)
|
||||||
|
|
||||||
return pdf_path
|
response = httpx.post(url_merge, files=pdf_collection)
|
||||||
|
response.raise_for_status() # ensure we notice bad responses
|
||||||
|
|
||||||
@staticmethod
|
archive_path.write_bytes(response.content)
|
||||||
def mail_to_html(mail: MailMessage) -> StringIO:
|
|
||||||
data = {}
|
|
||||||
|
|
||||||
def clean_html(text: str):
|
except Exception as err:
|
||||||
|
raise ParseError(
|
||||||
|
f"Error while merging email HTML into PDF: {err}",
|
||||||
|
) from err
|
||||||
|
finally:
|
||||||
|
for filename in pdf_collection:
|
||||||
|
file_multi_part_handle = pdf_collection[filename][1]
|
||||||
|
file_multi_part_handle.close()
|
||||||
|
|
||||||
|
return archive_path
|
||||||
|
|
||||||
|
def mail_to_html(self, mail: MailMessage) -> Path:
|
||||||
|
"""
|
||||||
|
Converts the given email into an HTML file, formatted
|
||||||
|
based on the given template
|
||||||
|
"""
|
||||||
|
|
||||||
|
def clean_html(text: str) -> str:
|
||||||
|
"""
|
||||||
|
Attempts to clean, escape and linkify the given HTML string
|
||||||
|
"""
|
||||||
if isinstance(text, list):
|
if isinstance(text, list):
|
||||||
text = "\n".join([str(e) for e in text])
|
text = "\n".join([str(e) for e in text])
|
||||||
if type(text) != str:
|
if type(text) != str:
|
||||||
@ -211,6 +249,8 @@ class MailDocumentParser(DocumentParser):
|
|||||||
text = text.replace("\n", "<br>")
|
text = text.replace("\n", "<br>")
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
data = {}
|
||||||
|
|
||||||
data["subject"] = clean_html(mail.subject)
|
data["subject"] = clean_html(mail.subject)
|
||||||
if data["subject"]:
|
if data["subject"]:
|
||||||
data["subject_label"] = "Subject"
|
data["subject_label"] = "Subject"
|
||||||
@ -237,27 +277,33 @@ class MailDocumentParser(DocumentParser):
|
|||||||
data["date"] = clean_html(mail.date.astimezone().strftime("%Y-%m-%d %H:%M"))
|
data["date"] = clean_html(mail.date.astimezone().strftime("%Y-%m-%d %H:%M"))
|
||||||
data["content"] = clean_html(mail.text.strip())
|
data["content"] = clean_html(mail.text.strip())
|
||||||
|
|
||||||
html = StringIO()
|
|
||||||
|
|
||||||
from django.template.loader import render_to_string
|
from django.template.loader import render_to_string
|
||||||
|
|
||||||
rendered = render_to_string("email_msg_template.html", context=data)
|
html_file = Path(self.tempdir) / "email_as_html.html"
|
||||||
|
html_file.write_text(render_to_string("email_msg_template.html", context=data))
|
||||||
|
|
||||||
html.write(rendered)
|
return html_file
|
||||||
html.seek(0)
|
|
||||||
|
|
||||||
return html
|
def generate_pdf_from_mail(self, mail: MailMessage) -> Path:
|
||||||
|
"""
|
||||||
def generate_pdf_from_mail(self, mail):
|
Creates a PDF based on the given email, using the email's values in a
|
||||||
|
an HTML template
|
||||||
|
"""
|
||||||
url = self.gotenberg_server + "/forms/chromium/convert/html"
|
url = self.gotenberg_server + "/forms/chromium/convert/html"
|
||||||
self.log.info("Converting mail to PDF")
|
self.log.info("Converting mail to PDF")
|
||||||
|
|
||||||
css_file = os.path.join(os.path.dirname(__file__), "templates/output.css")
|
css_file = Path(__file__).parent / "templates" / "output.css"
|
||||||
|
email_html_file = self.mail_to_html(mail)
|
||||||
|
|
||||||
with open(css_file, "rb") as css_handle:
|
print(css_file)
|
||||||
|
print(email_html_file)
|
||||||
|
|
||||||
|
with css_file.open("rb") as css_handle, email_html_file.open(
|
||||||
|
"rb",
|
||||||
|
) as email_html_handle:
|
||||||
files = {
|
files = {
|
||||||
"html": ("index.html", self.mail_to_html(mail)),
|
"html": ("index.html", email_html_handle, "text/html"),
|
||||||
"css": ("output.css", css_handle),
|
"css": ("output.css", css_handle, "text/css"),
|
||||||
}
|
}
|
||||||
headers = {}
|
headers = {}
|
||||||
data = {
|
data = {
|
||||||
@ -289,13 +335,23 @@ class MailDocumentParser(DocumentParser):
|
|||||||
response.raise_for_status() # ensure we notice bad responses
|
response.raise_for_status() # ensure we notice bad responses
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ParseError(
|
raise ParseError(
|
||||||
f"Error while converting document to PDF: {err}",
|
f"Error while converting email to PDF: {err}",
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
return response.content
|
email_as_pdf_file = Path(self.tempdir) / "email_as_pdf.pdf"
|
||||||
|
email_as_pdf_file.write_bytes(response.content)
|
||||||
|
|
||||||
|
return email_as_pdf_file
|
||||||
|
|
||||||
|
def generate_pdf_from_html(
|
||||||
|
self,
|
||||||
|
orig_html: str,
|
||||||
|
attachments: List[MailAttachment],
|
||||||
|
) -> Path:
|
||||||
|
"""
|
||||||
|
Generates a PDF file based on the HTML and attachments of the email
|
||||||
|
"""
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def transform_inline_html(html, attachments):
|
|
||||||
def clean_html_script(text: str):
|
def clean_html_script(text: str):
|
||||||
compiled_open = re.compile(re.escape("<script"), re.IGNORECASE)
|
compiled_open = re.compile(re.escape("<script"), re.IGNORECASE)
|
||||||
text = compiled_open.sub("<div hidden ", text)
|
text = compiled_open.sub("<div hidden ", text)
|
||||||
@ -304,28 +360,36 @@ class MailDocumentParser(DocumentParser):
|
|||||||
text = compiled_close.sub("</div", text)
|
text = compiled_close.sub("</div", text)
|
||||||
return text
|
return text
|
||||||
|
|
||||||
html_clean = clean_html_script(html)
|
|
||||||
files = []
|
|
||||||
|
|
||||||
for a in attachments:
|
|
||||||
name_cid = "cid:" + a.content_id
|
|
||||||
name_clean = "".join(e for e in name_cid if e.isalnum())
|
|
||||||
files.append((name_clean, BytesIO(a.payload)))
|
|
||||||
html_clean = html_clean.replace(name_cid, name_clean)
|
|
||||||
|
|
||||||
files.append(("index.html", StringIO(html_clean)))
|
|
||||||
|
|
||||||
return files
|
|
||||||
|
|
||||||
def generate_pdf_from_html(self, orig_html, attachments):
|
|
||||||
url = self.gotenberg_server + "/forms/chromium/convert/html"
|
url = self.gotenberg_server + "/forms/chromium/convert/html"
|
||||||
self.log.info("Converting html to PDF")
|
self.log.info("Converting html to PDF")
|
||||||
|
|
||||||
files = {}
|
tempdir = Path(self.tempdir)
|
||||||
for name, file in self.transform_inline_html(orig_html, attachments):
|
|
||||||
files[name] = (name, file)
|
html_clean = clean_html_script(orig_html)
|
||||||
|
|
||||||
|
files = {}
|
||||||
|
|
||||||
|
for attachment in attachments:
|
||||||
|
# Clean the attachment name to be valid
|
||||||
|
name_cid = f"cid:{attachment.content_id}"
|
||||||
|
name_clean = "".join(e for e in name_cid if e.isalnum())
|
||||||
|
|
||||||
|
# Write attachment payload to a temp file
|
||||||
|
temp_file = tempdir / name_clean
|
||||||
|
temp_file.write_bytes(attachment.payload)
|
||||||
|
|
||||||
|
# Store the attachment for upload
|
||||||
|
files[name_clean] = (name_clean, temp_file, attachment.content_type)
|
||||||
|
|
||||||
|
# Replace as needed the name with the clean name
|
||||||
|
html_clean = html_clean.replace(name_cid, name_clean)
|
||||||
|
|
||||||
|
# Now store the cleaned up HTML version
|
||||||
|
html_clean_file = tempdir / "index.html"
|
||||||
|
html_clean_file.write_text(html_clean)
|
||||||
|
|
||||||
|
files["index.html"] = ("index.html", html_clean_file, "text/html")
|
||||||
|
|
||||||
headers = {}
|
|
||||||
data = {
|
data = {
|
||||||
"marginTop": "0.1",
|
"marginTop": "0.1",
|
||||||
"marginBottom": "0.1",
|
"marginBottom": "0.1",
|
||||||
@ -336,14 +400,29 @@ class MailDocumentParser(DocumentParser):
|
|||||||
"scale": "1.0",
|
"scale": "1.0",
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
|
# Open a handle to each file, replacing the tuple
|
||||||
|
for filename in files:
|
||||||
|
file_multi_part = files[filename]
|
||||||
|
files[filename] = (
|
||||||
|
file_multi_part[0],
|
||||||
|
file_multi_part[1].open("rb"),
|
||||||
|
file_multi_part[2],
|
||||||
|
)
|
||||||
|
|
||||||
response = httpx.post(
|
response = httpx.post(
|
||||||
url,
|
url,
|
||||||
files=files,
|
files=files,
|
||||||
headers=headers,
|
|
||||||
data=data,
|
data=data,
|
||||||
)
|
)
|
||||||
response.raise_for_status() # ensure we notice bad responses
|
response.raise_for_status() # ensure we notice bad responses
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ParseError(f"Error while converting document to PDF: {err}") from err
|
raise ParseError(f"Error while converting document to PDF: {err}") from err
|
||||||
|
finally:
|
||||||
|
# Ensure all file handles as closed
|
||||||
|
for filename in files:
|
||||||
|
file_multi_part_handle = files[filename][1]
|
||||||
|
file_multi_part_handle.close()
|
||||||
|
|
||||||
return response.content
|
html_pdf = tempdir / "html.pdf"
|
||||||
|
html_pdf.write_bytes(response.content)
|
||||||
|
return html_pdf
|
||||||
|
@ -1,24 +1,39 @@
|
|||||||
import datetime
|
import datetime
|
||||||
import os
|
from pathlib import Path
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
|
import httpx
|
||||||
from django.test import TestCase
|
from django.test import TestCase
|
||||||
|
|
||||||
from documents.parsers import ParseError
|
from documents.parsers import ParseError
|
||||||
from documents.tests.utils import FileSystemAssertsMixin
|
from documents.tests.utils import FileSystemAssertsMixin
|
||||||
from paperless_mail.parsers import MailDocumentParser
|
from paperless_mail.parsers import MailDocumentParser
|
||||||
|
from paperless_tika.tests.utils import HttpxMockMixin
|
||||||
|
|
||||||
|
|
||||||
class TestParser(FileSystemAssertsMixin, TestCase):
|
class BaseMailParserTestCase(TestCase):
|
||||||
SAMPLE_FILES = os.path.join(os.path.dirname(__file__), "samples")
|
"""
|
||||||
|
Basic setup for the below test cases
|
||||||
|
"""
|
||||||
|
|
||||||
|
SAMPLE_DIR = Path(__file__).parent / "samples"
|
||||||
|
|
||||||
def setUp(self) -> None:
|
def setUp(self) -> None:
|
||||||
|
super().setUp()
|
||||||
self.parser = MailDocumentParser(logging_group=None)
|
self.parser = MailDocumentParser(logging_group=None)
|
||||||
|
|
||||||
def tearDown(self) -> None:
|
def tearDown(self) -> None:
|
||||||
|
super().tearDown()
|
||||||
self.parser.cleanup()
|
self.parser.cleanup()
|
||||||
|
|
||||||
def test_get_parsed_missing_file(self):
|
|
||||||
|
class TestEmailFileParsing(FileSystemAssertsMixin, BaseMailParserTestCase):
|
||||||
|
"""
|
||||||
|
Tests around reading a file and parsing it into a
|
||||||
|
MailMessage
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_parse_error_missing_file(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh parser
|
- Fresh parser
|
||||||
@ -28,13 +43,17 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
- An Exception is thrown
|
- An Exception is thrown
|
||||||
"""
|
"""
|
||||||
# Check if exception is raised when parsing fails.
|
# Check if exception is raised when parsing fails.
|
||||||
|
test_file = self.SAMPLE_DIR / "doesntexist.eml"
|
||||||
|
|
||||||
|
self.assertIsNotFile(test_file)
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ParseError,
|
ParseError,
|
||||||
self.parser.get_parsed,
|
self.parser.parse,
|
||||||
os.path.join(self.SAMPLE_FILES, "na"),
|
test_file,
|
||||||
|
"messages/rfc822",
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_get_parsed_broken_file(self):
|
def test_parse_error_invalid_email(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh parser
|
- Fresh parser
|
||||||
@ -46,11 +65,12 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
# Check if exception is raised when the mail is faulty.
|
# Check if exception is raised when the mail is faulty.
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ParseError,
|
ParseError,
|
||||||
self.parser.get_parsed,
|
self.parser.parse,
|
||||||
os.path.join(self.SAMPLE_FILES, "broken.eml"),
|
self.SAMPLE_DIR / "broken.eml",
|
||||||
|
"messages/rfc822",
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_get_parsed_simple_text_mail(self):
|
def test_parse_simple_text_email_file(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh parser
|
- Fresh parser
|
||||||
@ -60,8 +80,8 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
- The content of the mail should be available in the parse result.
|
- The content of the mail should be available in the parse result.
|
||||||
"""
|
"""
|
||||||
# Parse Test file and check relevant content
|
# Parse Test file and check relevant content
|
||||||
parsed1 = self.parser.get_parsed(
|
parsed1 = self.parser.parse_file_to_message(
|
||||||
os.path.join(self.SAMPLE_FILES, "simple_text.eml"),
|
self.SAMPLE_DIR / "simple_text.eml",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(parsed1.date.year, 2022)
|
self.assertEqual(parsed1.date.year, 2022)
|
||||||
@ -76,58 +96,11 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
self.assertEqual(parsed1.text, "This is just a simple Text Mail.\n")
|
self.assertEqual(parsed1.text, "This is just a simple Text Mail.\n")
|
||||||
self.assertEqual(parsed1.to, ("some@one.de",))
|
self.assertEqual(parsed1.to, ("some@one.de",))
|
||||||
|
|
||||||
def test_get_parsed_reparse(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- An E-Mail was parsed
|
|
||||||
WHEN:
|
|
||||||
- Another .eml file should be parsed
|
|
||||||
THEN:
|
|
||||||
- The parser should not retry to parse and return the old results
|
|
||||||
"""
|
|
||||||
# Parse Test file and check relevant content
|
|
||||||
parsed1 = self.parser.get_parsed(
|
|
||||||
os.path.join(self.SAMPLE_FILES, "simple_text.eml"),
|
|
||||||
)
|
|
||||||
# Check if same parsed object as before is returned, even if another file is given.
|
|
||||||
parsed2 = self.parser.get_parsed(
|
|
||||||
os.path.join(os.path.join(self.SAMPLE_FILES, "html.eml")),
|
|
||||||
)
|
|
||||||
self.assertEqual(parsed1, parsed2)
|
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf")
|
class TestEmailMetadataExtraction(BaseMailParserTestCase):
|
||||||
@mock.patch("paperless_mail.parsers.make_thumbnail_from_pdf")
|
"""
|
||||||
def test_get_thumbnail(
|
Tests extraction of metadata from an email
|
||||||
self,
|
"""
|
||||||
mock_make_thumbnail_from_pdf: mock.MagicMock,
|
|
||||||
mock_generate_pdf: mock.MagicMock,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- An E-Mail was parsed
|
|
||||||
WHEN:
|
|
||||||
- The Thumbnail is requested
|
|
||||||
THEN:
|
|
||||||
- The parser should call the functions which generate the thumbnail
|
|
||||||
"""
|
|
||||||
mocked_return = "Passing the return value through.."
|
|
||||||
mock_make_thumbnail_from_pdf.return_value = mocked_return
|
|
||||||
|
|
||||||
mock_generate_pdf.return_value = "Mocked return value.."
|
|
||||||
|
|
||||||
thumb = self.parser.get_thumbnail(
|
|
||||||
os.path.join(self.SAMPLE_FILES, "simple_text.eml"),
|
|
||||||
"message/rfc822",
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
self.parser.archive_path,
|
|
||||||
mock_make_thumbnail_from_pdf.call_args_list[0].args[0],
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
self.parser.tempdir,
|
|
||||||
mock_make_thumbnail_from_pdf.call_args_list[0].args[1],
|
|
||||||
)
|
|
||||||
self.assertEqual(mocked_return, thumb)
|
|
||||||
|
|
||||||
def test_extract_metadata_fail(self):
|
def test_extract_metadata_fail(self):
|
||||||
"""
|
"""
|
||||||
@ -157,7 +130,7 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
"""
|
"""
|
||||||
# Validate Metadata parsing returns the expected results
|
# Validate Metadata parsing returns the expected results
|
||||||
metadata = self.parser.extract_metadata(
|
metadata = self.parser.extract_metadata(
|
||||||
os.path.join(self.SAMPLE_FILES, "simple_text.eml"),
|
self.SAMPLE_DIR / "simple_text.eml",
|
||||||
"message/rfc822",
|
"message/rfc822",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -287,90 +260,53 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
metadata,
|
metadata,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_parse_na(self):
|
|
||||||
|
class TestEmailThumbnailGenerate(BaseMailParserTestCase):
|
||||||
|
"""
|
||||||
|
Tests the correct generation of an thumbnail for an email
|
||||||
|
"""
|
||||||
|
|
||||||
|
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf")
|
||||||
|
@mock.patch("paperless_mail.parsers.make_thumbnail_from_pdf")
|
||||||
|
def test_get_thumbnail(
|
||||||
|
self,
|
||||||
|
mock_make_thumbnail_from_pdf: mock.MagicMock,
|
||||||
|
mock_generate_pdf: mock.MagicMock,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- An E-Mail was parsed
|
||||||
WHEN:
|
WHEN:
|
||||||
- parsing is attempted with nonexistent file
|
- The Thumbnail is requested
|
||||||
THEN:
|
THEN:
|
||||||
- Exception is thrown
|
- The parser should call the functions which generate the thumbnail
|
||||||
"""
|
"""
|
||||||
# Check if exception is raised when parsing fails.
|
mocked_return = "Passing the return value through.."
|
||||||
self.assertRaises(
|
mock_make_thumbnail_from_pdf.return_value = mocked_return
|
||||||
ParseError,
|
|
||||||
self.parser.parse,
|
mock_generate_pdf.return_value = "Mocked return value.."
|
||||||
os.path.join(self.SAMPLE_FILES, "na"),
|
|
||||||
|
test_file = self.SAMPLE_DIR / "simple_text.eml"
|
||||||
|
|
||||||
|
thumb = self.parser.get_thumbnail(
|
||||||
|
test_file,
|
||||||
"message/rfc822",
|
"message/rfc822",
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.tika_parse")
|
mock_generate_pdf.assert_called_once_with(
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf")
|
test_file,
|
||||||
def test_parse_html_eml(self, n, mock_tika_parse: mock.MagicMock):
|
)
|
||||||
"""
|
mock_make_thumbnail_from_pdf.assert_called_once_with(
|
||||||
GIVEN:
|
"Mocked return value..",
|
||||||
- Fresh start
|
self.parser.tempdir,
|
||||||
WHEN:
|
None,
|
||||||
- parsing is done with html mail
|
|
||||||
THEN:
|
|
||||||
- Tika is called, parsed information from non html parts is available
|
|
||||||
"""
|
|
||||||
# Validate parsing returns the expected results
|
|
||||||
text_expected = "Subject: HTML Message\n\nFrom: Name <someone@example.de>\n\nTo: someone@example.de\n\nAttachments: IntM6gnXFm00FEV5.png (6.89 KiB), 600+kbfile.txt (600.24 KiB)\n\nHTML content: tika return\n\nSome Text and an embedded image."
|
|
||||||
mock_tika_parse.return_value = "tika return"
|
|
||||||
|
|
||||||
self.parser.parse(os.path.join(self.SAMPLE_FILES, "html.eml"), "message/rfc822")
|
|
||||||
|
|
||||||
self.assertEqual(text_expected, self.parser.text)
|
|
||||||
self.assertEqual(
|
|
||||||
datetime.datetime(
|
|
||||||
2022,
|
|
||||||
10,
|
|
||||||
15,
|
|
||||||
11,
|
|
||||||
23,
|
|
||||||
19,
|
|
||||||
tzinfo=datetime.timezone(datetime.timedelta(seconds=7200)),
|
|
||||||
),
|
|
||||||
self.parser.date,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf")
|
self.assertEqual(mocked_return, thumb)
|
||||||
def test_parse_simple_eml(self, m: mock.MagicMock):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- parsing is done with non html mail
|
|
||||||
THEN:
|
|
||||||
- parsed information is available
|
|
||||||
"""
|
|
||||||
# Validate parsing returns the expected results
|
|
||||||
|
|
||||||
self.parser.parse(
|
|
||||||
os.path.join(self.SAMPLE_FILES, "simple_text.eml"),
|
|
||||||
"message/rfc822",
|
|
||||||
)
|
|
||||||
text_expected = "Subject: Simple Text Mail\n\nFrom: Some One <mail@someserver.de>\n\nTo: some@one.de\n\nCC: asdasd@æsdasd.de, asdadasdasdasda.asdasd@æsdasd.de\n\nBCC: fdf@fvf.de\n\n\n\nThis is just a simple Text Mail."
|
|
||||||
self.assertEqual(text_expected, self.parser.text)
|
|
||||||
self.assertEqual(
|
|
||||||
datetime.datetime(
|
|
||||||
2022,
|
|
||||||
10,
|
|
||||||
12,
|
|
||||||
21,
|
|
||||||
40,
|
|
||||||
43,
|
|
||||||
tzinfo=datetime.timezone(datetime.timedelta(seconds=7200)),
|
|
||||||
),
|
|
||||||
self.parser.date,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Just check if tried to generate archive, the unittest for generate_pdf() goes deeper.
|
class TestTikaHtmlParse(HttpxMockMixin, BaseMailParserTestCase):
|
||||||
m.assert_called()
|
def test_tika_parse_unsuccessful(self):
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.parser.from_buffer")
|
|
||||||
def test_tika_parse_unsuccessful(self, mock_from_buffer: mock.MagicMock):
|
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- Fresh start
|
||||||
@ -380,12 +316,13 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
- the parser should return an empty string
|
- the parser should return an empty string
|
||||||
"""
|
"""
|
||||||
# Check unsuccessful parsing
|
# Check unsuccessful parsing
|
||||||
mock_from_buffer.return_value = {"content": None}
|
self.httpx_mock.add_response(
|
||||||
parsed = self.parser.tika_parse(None)
|
json={"Content-Type": "text/html", "X-TIKA:Parsed-By": []},
|
||||||
|
)
|
||||||
|
parsed = self.parser.tika_parse("None")
|
||||||
self.assertEqual("", parsed)
|
self.assertEqual("", parsed)
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.parser.from_buffer")
|
def test_tika_parse(self):
|
||||||
def test_tika_parse(self, mock_from_buffer: mock.MagicMock):
|
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- Fresh start
|
||||||
@ -397,14 +334,18 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>'
|
html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>'
|
||||||
expected_text = "Some Text"
|
expected_text = "Some Text"
|
||||||
|
|
||||||
# Check successful parsing
|
self.httpx_mock.add_response(
|
||||||
mock_from_buffer.return_value = {"content": expected_text}
|
json={
|
||||||
|
"Content-Type": "text/html",
|
||||||
|
"X-TIKA:Parsed-By": [],
|
||||||
|
"X-TIKA:content": expected_text,
|
||||||
|
},
|
||||||
|
)
|
||||||
parsed = self.parser.tika_parse(html)
|
parsed = self.parser.tika_parse(html)
|
||||||
self.assertEqual(expected_text, parsed.strip())
|
self.assertEqual(expected_text, parsed.strip())
|
||||||
mock_from_buffer.assert_called_with(html, self.parser.tika_server)
|
self.assertIn(self.parser.tika_server, str(self.httpx_mock.get_request().url))
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.parser.from_buffer")
|
def test_tika_parse_exception(self):
|
||||||
def test_tika_parse_exception(self, mock_from_buffer: mock.MagicMock):
|
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- Fresh start
|
||||||
@ -415,11 +356,8 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
"""
|
"""
|
||||||
html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>'
|
html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>'
|
||||||
|
|
||||||
# Check ParseError
|
self.httpx_mock.add_response(status_code=httpx.codes.INTERNAL_SERVER_ERROR)
|
||||||
def my_side_effect():
|
|
||||||
raise Exception("Test")
|
|
||||||
|
|
||||||
mock_from_buffer.side_effect = my_side_effect
|
|
||||||
self.assertRaises(ParseError, self.parser.tika_parse, html)
|
self.assertRaises(ParseError, self.parser.tika_parse, html)
|
||||||
|
|
||||||
def test_tika_parse_unreachable(self):
|
def test_tika_parse_unreachable(self):
|
||||||
@ -437,258 +375,285 @@ class TestParser(FileSystemAssertsMixin, TestCase):
|
|||||||
self.parser.tika_server = ""
|
self.parser.tika_server = ""
|
||||||
self.assertRaises(ParseError, self.parser.tika_parse, html)
|
self.assertRaises(ParseError, self.parser.tika_parse, html)
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_mail")
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_html")
|
class TestParser(FileSystemAssertsMixin, HttpxMockMixin, BaseMailParserTestCase):
|
||||||
def test_generate_pdf_parse_error(self, m: mock.MagicMock, n: mock.MagicMock):
|
def test_parse_no_file(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- Fresh start
|
||||||
WHEN:
|
WHEN:
|
||||||
- pdf generation is requested but gotenberg can not be reached
|
- parsing is attempted with nonexistent file
|
||||||
|
THEN:
|
||||||
|
- Exception is thrown
|
||||||
|
"""
|
||||||
|
# Check if exception is raised when parsing fails.
|
||||||
|
self.assertRaises(
|
||||||
|
ParseError,
|
||||||
|
self.parser.parse,
|
||||||
|
self.SAMPLE_DIR / "na.eml",
|
||||||
|
"message/rfc822",
|
||||||
|
)
|
||||||
|
|
||||||
|
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf")
|
||||||
|
def test_parse_eml_simple(self, mock_generate_pdf: mock.MagicMock):
|
||||||
|
"""
|
||||||
|
GIVEN:
|
||||||
|
- Fresh start
|
||||||
|
WHEN:
|
||||||
|
- parsing is done with non html mail
|
||||||
|
THEN:
|
||||||
|
- parsed information is available
|
||||||
|
"""
|
||||||
|
# Validate parsing returns the expected results
|
||||||
|
|
||||||
|
self.parser.parse(
|
||||||
|
self.SAMPLE_DIR / "simple_text.eml",
|
||||||
|
"message/rfc822",
|
||||||
|
)
|
||||||
|
text_expected = (
|
||||||
|
"Subject: Simple Text Mail\n\n"
|
||||||
|
"From: Some One <mail@someserver.de>\n\n"
|
||||||
|
"To: some@one.de\n\n"
|
||||||
|
"CC: asdasd@æsdasd.de, asdadasdasdasda.asdasd@æsdasd.de\n\n"
|
||||||
|
"BCC: fdf@fvf.de\n\n"
|
||||||
|
"\n\nThis is just a simple Text Mail."
|
||||||
|
)
|
||||||
|
self.assertEqual(text_expected, self.parser.text)
|
||||||
|
self.assertEqual(
|
||||||
|
datetime.datetime(
|
||||||
|
2022,
|
||||||
|
10,
|
||||||
|
12,
|
||||||
|
21,
|
||||||
|
40,
|
||||||
|
43,
|
||||||
|
tzinfo=datetime.timezone(datetime.timedelta(seconds=7200)),
|
||||||
|
),
|
||||||
|
self.parser.date,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Just check if tried to generate archive, the unittest for generate_pdf() goes deeper.
|
||||||
|
mock_generate_pdf.assert_called()
|
||||||
|
|
||||||
|
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf")
|
||||||
|
def test_parse_eml_html(self, mock_generate_pdf: mock.MagicMock):
|
||||||
|
"""
|
||||||
|
GIVEN:
|
||||||
|
- Fresh start
|
||||||
|
WHEN:
|
||||||
|
- parsing is done with html mail
|
||||||
|
THEN:
|
||||||
|
- Tika is called, parsed information from non html parts is available
|
||||||
|
"""
|
||||||
|
# Validate parsing returns the expected results
|
||||||
|
text_expected = (
|
||||||
|
"Subject: HTML Message\n\n"
|
||||||
|
"From: Name <someone@example.de>\n\n"
|
||||||
|
"To: someone@example.de\n\n"
|
||||||
|
"Attachments: IntM6gnXFm00FEV5.png (6.89 KiB), 600+kbfile.txt (600.24 KiB)\n\n"
|
||||||
|
"HTML content: tika return\n\n"
|
||||||
|
"Some Text and an embedded image."
|
||||||
|
)
|
||||||
|
|
||||||
|
self.httpx_mock.add_response(
|
||||||
|
json={
|
||||||
|
"Content-Type": "text/html",
|
||||||
|
"X-TIKA:Parsed-By": [],
|
||||||
|
"X-TIKA:content": "tika return",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.parser.parse(self.SAMPLE_DIR / "html.eml", "message/rfc822")
|
||||||
|
|
||||||
|
mock_generate_pdf.assert_called_once()
|
||||||
|
self.assertEqual(text_expected, self.parser.text)
|
||||||
|
self.assertEqual(
|
||||||
|
datetime.datetime(
|
||||||
|
2022,
|
||||||
|
10,
|
||||||
|
15,
|
||||||
|
11,
|
||||||
|
23,
|
||||||
|
19,
|
||||||
|
tzinfo=datetime.timezone(datetime.timedelta(seconds=7200)),
|
||||||
|
),
|
||||||
|
self.parser.date,
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_generate_pdf_parse_error(self):
|
||||||
|
"""
|
||||||
|
GIVEN:
|
||||||
|
- Fresh start
|
||||||
|
WHEN:
|
||||||
|
- pdf generation is requested but gotenberg fails
|
||||||
THEN:
|
THEN:
|
||||||
- a ParseError Exception is thrown
|
- a ParseError Exception is thrown
|
||||||
"""
|
"""
|
||||||
m.return_value = b""
|
self.httpx_mock.add_response(status_code=httpx.codes.INTERNAL_SERVER_ERROR)
|
||||||
n.return_value = b""
|
|
||||||
|
|
||||||
# Check if exception is raised when the pdf can not be created.
|
|
||||||
self.parser.gotenberg_server = ""
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ParseError,
|
ParseError,
|
||||||
self.parser.generate_pdf,
|
self.parser.parse,
|
||||||
os.path.join(self.SAMPLE_FILES, "html.eml"),
|
self.SAMPLE_DIR / "simple_text.eml",
|
||||||
|
"message/rfc822",
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_generate_pdf_exception(self):
|
def test_generate_pdf_simple_email(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- Simple text email with no HTML content
|
||||||
WHEN:
|
WHEN:
|
||||||
- pdf generation is requested but parsing throws an exception
|
- Email is parsed
|
||||||
THEN:
|
THEN:
|
||||||
- a ParseError Exception is thrown
|
- Gotenberg is called to generate a PDF from HTML
|
||||||
|
- Archive file is generated
|
||||||
"""
|
"""
|
||||||
# Check if exception is raised when the mail can not be parsed.
|
|
||||||
self.assertRaises(
|
self.httpx_mock.add_response(
|
||||||
ParseError,
|
url="http://localhost:3000/forms/chromium/convert/html",
|
||||||
self.parser.generate_pdf,
|
method="POST",
|
||||||
os.path.join(self.SAMPLE_FILES, "broken.eml"),
|
content=(self.SAMPLE_DIR / "simple_text.eml.pdf").read_bytes(),
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.requests.post")
|
self.parser.parse(self.SAMPLE_DIR / "simple_text.eml", "message/rfc822")
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_mail")
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_html")
|
self.assertIsNotNone(self.parser.archive_path)
|
||||||
def test_generate_pdf(
|
|
||||||
self,
|
def test_generate_pdf_html_email(self):
|
||||||
mock_generate_pdf_from_html: mock.MagicMock,
|
|
||||||
mock_generate_pdf_from_mail: mock.MagicMock,
|
|
||||||
mock_post: mock.MagicMock,
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- email with HTML content
|
||||||
WHEN:
|
WHEN:
|
||||||
- pdf generation is requested
|
- Email is parsed
|
||||||
THEN:
|
THEN:
|
||||||
- gotenberg is called and the resulting file is returned
|
- Gotenberg is called to generate a PDF from HTML
|
||||||
|
- Gotenberg is used to merge the two PDFs
|
||||||
|
- Archive file is generated
|
||||||
"""
|
"""
|
||||||
mock_generate_pdf_from_mail.return_value = b"Mail Return"
|
self.httpx_mock.add_response(
|
||||||
mock_generate_pdf_from_html.return_value = b"HTML Return"
|
url="http://localhost:9998/tika/text",
|
||||||
|
method="PUT",
|
||||||
|
json={
|
||||||
|
"Content-Type": "text/html",
|
||||||
|
"X-TIKA:Parsed-By": [],
|
||||||
|
"X-TIKA:content": "This is some Tika HTML text",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.httpx_mock.add_response(
|
||||||
|
url="http://localhost:3000/forms/chromium/convert/html",
|
||||||
|
method="POST",
|
||||||
|
content=(self.SAMPLE_DIR / "html.eml.pdf").read_bytes(),
|
||||||
|
)
|
||||||
|
self.httpx_mock.add_response(
|
||||||
|
url="http://localhost:3000/forms/pdfengines/merge",
|
||||||
|
method="POST",
|
||||||
|
content=b"Pretend merged PDF content",
|
||||||
|
)
|
||||||
|
self.parser.parse(self.SAMPLE_DIR / "html.eml", "message/rfc822")
|
||||||
|
|
||||||
mock_response = mock.MagicMock()
|
self.assertIsNotNone(self.parser.archive_path)
|
||||||
mock_response.content = b"Content"
|
|
||||||
mock_post.return_value = mock_response
|
|
||||||
pdf_path = self.parser.generate_pdf(os.path.join(self.SAMPLE_FILES, "html.eml"))
|
|
||||||
self.assertIsFile(pdf_path)
|
|
||||||
|
|
||||||
mock_generate_pdf_from_mail.assert_called_once_with(
|
def test_generate_pdf_html_email_html_to_pdf_failure(self):
|
||||||
self.parser.get_parsed(None),
|
"""
|
||||||
|
GIVEN:
|
||||||
|
- email with HTML content
|
||||||
|
WHEN:
|
||||||
|
- Email is parsed
|
||||||
|
- Conversion of email HTML content to PDF fails
|
||||||
|
THEN:
|
||||||
|
- ParseError is raised
|
||||||
|
"""
|
||||||
|
self.httpx_mock.add_response(
|
||||||
|
url="http://localhost:9998/tika/text",
|
||||||
|
method="PUT",
|
||||||
|
json={
|
||||||
|
"Content-Type": "text/html",
|
||||||
|
"X-TIKA:Parsed-By": [],
|
||||||
|
"X-TIKA:content": "This is some Tika HTML text",
|
||||||
|
},
|
||||||
)
|
)
|
||||||
mock_generate_pdf_from_html.assert_called_once_with(
|
self.httpx_mock.add_response(
|
||||||
self.parser.get_parsed(None).html,
|
url="http://localhost:3000/forms/chromium/convert/html",
|
||||||
self.parser.get_parsed(None).attachments,
|
method="POST",
|
||||||
|
content=(self.SAMPLE_DIR / "html.eml.pdf").read_bytes(),
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.httpx_mock.add_response(
|
||||||
self.parser.gotenberg_server + "/forms/pdfengines/merge",
|
url="http://localhost:3000/forms/chromium/convert/html",
|
||||||
mock_post.call_args.args[0],
|
method="POST",
|
||||||
)
|
status_code=httpx.codes.INTERNAL_SERVER_ERROR,
|
||||||
self.assertEqual({}, mock_post.call_args.kwargs["headers"])
|
|
||||||
self.assertEqual(
|
|
||||||
b"Mail Return",
|
|
||||||
mock_post.call_args.kwargs["files"]["1_mail.pdf"][1].read(),
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
b"HTML Return",
|
|
||||||
mock_post.call_args.kwargs["files"]["2_html.pdf"][1].read(),
|
|
||||||
)
|
)
|
||||||
|
with self.assertRaises(ParseError):
|
||||||
|
self.parser.parse(self.SAMPLE_DIR / "html.eml", "message/rfc822")
|
||||||
|
|
||||||
mock_response.raise_for_status.assert_called_once()
|
def test_generate_pdf_html_email_merge_failure(self):
|
||||||
|
"""
|
||||||
with open(pdf_path, "rb") as file:
|
GIVEN:
|
||||||
self.assertEqual(b"Content", file.read())
|
- email with HTML content
|
||||||
|
WHEN:
|
||||||
|
- Email is parsed
|
||||||
|
- Merging of PDFs fails
|
||||||
|
THEN:
|
||||||
|
- ParseError is raised
|
||||||
|
"""
|
||||||
|
self.httpx_mock.add_response(
|
||||||
|
url="http://localhost:9998/tika/text",
|
||||||
|
method="PUT",
|
||||||
|
json={
|
||||||
|
"Content-Type": "text/html",
|
||||||
|
"X-TIKA:Parsed-By": [],
|
||||||
|
"X-TIKA:content": "This is some Tika HTML text",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.httpx_mock.add_response(
|
||||||
|
url="http://localhost:3000/forms/chromium/convert/html",
|
||||||
|
method="POST",
|
||||||
|
content=(self.SAMPLE_DIR / "html.eml.pdf").read_bytes(),
|
||||||
|
)
|
||||||
|
self.httpx_mock.add_response(
|
||||||
|
url="http://localhost:3000/forms/pdfengines/merge",
|
||||||
|
method="POST",
|
||||||
|
status_code=httpx.codes.INTERNAL_SERVER_ERROR,
|
||||||
|
)
|
||||||
|
with self.assertRaises(ParseError):
|
||||||
|
self.parser.parse(self.SAMPLE_DIR / "html.eml", "message/rfc822")
|
||||||
|
|
||||||
def test_mail_to_html(self):
|
def test_mail_to_html(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- Email message with HTML content
|
||||||
WHEN:
|
WHEN:
|
||||||
- conversion from eml to html is requested
|
- Email is parsed
|
||||||
THEN:
|
THEN:
|
||||||
- html should be returned
|
- Resulting HTML is as expected
|
||||||
"""
|
"""
|
||||||
mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml"))
|
mail = self.parser.parse_file_to_message(self.SAMPLE_DIR / "html.eml")
|
||||||
html_handle = self.parser.mail_to_html(mail)
|
html_file = self.parser.mail_to_html(mail)
|
||||||
html_received = html_handle.read()
|
expected_html_file = self.SAMPLE_DIR / "html.eml.html"
|
||||||
|
|
||||||
with open(
|
self.assertHTMLEqual(expected_html_file.read_text(), html_file.read_text())
|
||||||
os.path.join(self.SAMPLE_FILES, "html.eml.html"),
|
|
||||||
) as html_expected_handle:
|
|
||||||
html_expected = html_expected_handle.read()
|
|
||||||
|
|
||||||
self.assertHTMLEqual(html_expected, html_received)
|
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.requests.post")
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.mail_to_html")
|
|
||||||
def test_generate_pdf_from_mail(
|
def test_generate_pdf_from_mail(
|
||||||
self,
|
self,
|
||||||
mock_mail_to_html: mock.MagicMock,
|
|
||||||
mock_post: mock.MagicMock,
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Fresh start
|
- Email message with HTML content
|
||||||
WHEN:
|
WHEN:
|
||||||
- conversion of PDF from .eml is requested
|
- Email is parsed
|
||||||
THEN:
|
THEN:
|
||||||
- gotenberg should be called with valid intermediary html files, the resulting pdf is returned
|
- Gotenberg is used to convert HTML to PDF
|
||||||
"""
|
"""
|
||||||
mock_response = mock.MagicMock()
|
|
||||||
mock_response.content = b"Content"
|
|
||||||
mock_post.return_value = mock_response
|
|
||||||
|
|
||||||
mock_mail_to_html.return_value = "Testresponse"
|
self.httpx_mock.add_response(content=b"Content")
|
||||||
|
|
||||||
mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml"))
|
mail = self.parser.parse_file_to_message(self.SAMPLE_DIR / "html.eml")
|
||||||
|
|
||||||
retval = self.parser.generate_pdf_from_mail(mail)
|
retval = self.parser.generate_pdf_from_mail(mail)
|
||||||
self.assertEqual(b"Content", retval)
|
self.assertEqual(b"Content", retval.read_bytes())
|
||||||
|
|
||||||
|
request = self.httpx_mock.get_request()
|
||||||
|
|
||||||
mock_mail_to_html.assert_called_once_with(mail)
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
str(request.url),
|
||||||
self.parser.gotenberg_server + "/forms/chromium/convert/html",
|
self.parser.gotenberg_server + "/forms/chromium/convert/html",
|
||||||
mock_post.call_args.args[0],
|
|
||||||
)
|
)
|
||||||
self.assertDictEqual({}, mock_post.call_args.kwargs["headers"])
|
|
||||||
self.assertDictEqual(
|
|
||||||
{
|
|
||||||
"marginTop": "0.1",
|
|
||||||
"marginBottom": "0.1",
|
|
||||||
"marginLeft": "0.1",
|
|
||||||
"marginRight": "0.1",
|
|
||||||
"paperWidth": "8.27",
|
|
||||||
"paperHeight": "11.7",
|
|
||||||
"scale": "1.0",
|
|
||||||
"pdfFormat": "PDF/A-2b",
|
|
||||||
},
|
|
||||||
mock_post.call_args.kwargs["data"],
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
"Testresponse",
|
|
||||||
mock_post.call_args.kwargs["files"]["html"][1],
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
"output.css",
|
|
||||||
mock_post.call_args.kwargs["files"]["css"][0],
|
|
||||||
)
|
|
||||||
|
|
||||||
mock_response.raise_for_status.assert_called_once()
|
|
||||||
|
|
||||||
def test_transform_inline_html(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- transforming of html content from an email with an inline image attachment is requested
|
|
||||||
THEN:
|
|
||||||
- html is returned and sanitized
|
|
||||||
"""
|
|
||||||
|
|
||||||
class MailAttachmentMock:
|
|
||||||
def __init__(self, payload, content_id):
|
|
||||||
self.payload = payload
|
|
||||||
self.content_id = content_id
|
|
||||||
|
|
||||||
result = None
|
|
||||||
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.html")) as html_file:
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.png"), "rb") as png_file:
|
|
||||||
html = html_file.read()
|
|
||||||
png = png_file.read()
|
|
||||||
attachments = [
|
|
||||||
MailAttachmentMock(png, "part1.pNdUSz0s.D3NqVtPg@example.de"),
|
|
||||||
]
|
|
||||||
result = self.parser.transform_inline_html(html, attachments)
|
|
||||||
|
|
||||||
resulting_html = result[-1][1].read()
|
|
||||||
self.assertTrue(result[-1][0] == "index.html")
|
|
||||||
self.assertIn(result[0][0], resulting_html)
|
|
||||||
self.assertNotIn("<script", resulting_html.lower())
|
|
||||||
|
|
||||||
@mock.patch("paperless_mail.parsers.requests.post")
|
|
||||||
def test_generate_pdf_from_html(self, mock_post: mock.MagicMock):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- generating pdf from html with inline attachments is attempted
|
|
||||||
THEN:
|
|
||||||
- gotenberg is called with the correct parameters and the resulting pdf is returned
|
|
||||||
"""
|
|
||||||
|
|
||||||
class MailAttachmentMock:
|
|
||||||
def __init__(self, payload, content_id):
|
|
||||||
self.payload = payload
|
|
||||||
self.content_id = content_id
|
|
||||||
|
|
||||||
mock_response = mock.MagicMock()
|
|
||||||
mock_response.content = b"Content"
|
|
||||||
mock_post.return_value = mock_response
|
|
||||||
|
|
||||||
result = None
|
|
||||||
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.html")) as html_file:
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.png"), "rb") as png_file:
|
|
||||||
html = html_file.read()
|
|
||||||
png = png_file.read()
|
|
||||||
attachments = [
|
|
||||||
MailAttachmentMock(png, "part1.pNdUSz0s.D3NqVtPg@example.de"),
|
|
||||||
]
|
|
||||||
result = self.parser.generate_pdf_from_html(html, attachments)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
self.parser.gotenberg_server + "/forms/chromium/convert/html",
|
|
||||||
mock_post.call_args.args[0],
|
|
||||||
)
|
|
||||||
self.assertDictEqual({}, mock_post.call_args.kwargs["headers"])
|
|
||||||
self.assertDictEqual(
|
|
||||||
{
|
|
||||||
"marginTop": "0.1",
|
|
||||||
"marginBottom": "0.1",
|
|
||||||
"marginLeft": "0.1",
|
|
||||||
"marginRight": "0.1",
|
|
||||||
"paperWidth": "8.27",
|
|
||||||
"paperHeight": "11.7",
|
|
||||||
"scale": "1.0",
|
|
||||||
},
|
|
||||||
mock_post.call_args.kwargs["data"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# read to assert it is a file like object.
|
|
||||||
mock_post.call_args.kwargs["files"]["cidpart1pNdUSz0sD3NqVtPgexamplede"][
|
|
||||||
1
|
|
||||||
].read()
|
|
||||||
mock_post.call_args.kwargs["files"]["index.html"][1].read()
|
|
||||||
|
|
||||||
mock_response.raise_for_status.assert_called_once()
|
|
||||||
|
|
||||||
self.assertEqual(b"Content", result)
|
|
||||||
|
@ -1,29 +1,80 @@
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
from urllib.error import HTTPError
|
|
||||||
from urllib.request import urlopen
|
|
||||||
|
|
||||||
|
import httpx
|
||||||
import pytest
|
import pytest
|
||||||
from django.test import TestCase
|
from django.test import TestCase
|
||||||
from imagehash import average_hash
|
from imagehash import average_hash
|
||||||
from pdfminer.high_level import extract_text
|
from pdfminer.high_level import extract_text
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
from documents.parsers import run_convert
|
|
||||||
from documents.tests.utils import FileSystemAssertsMixin
|
from documents.tests.utils import FileSystemAssertsMixin
|
||||||
from paperless_mail.parsers import MailDocumentParser
|
from paperless_mail.tests.test_parsers import BaseMailParserTestCase
|
||||||
|
|
||||||
|
|
||||||
class TestParserLive(FileSystemAssertsMixin, TestCase):
|
class MailAttachmentMock:
|
||||||
SAMPLE_FILES = os.path.join(os.path.dirname(__file__), "samples")
|
def __init__(self, payload, content_id):
|
||||||
|
self.payload = payload
|
||||||
|
self.content_id = content_id
|
||||||
|
self.content_type = "image/png"
|
||||||
|
|
||||||
def setUp(self) -> None:
|
|
||||||
self.parser = MailDocumentParser(logging_group=None)
|
|
||||||
|
|
||||||
def tearDown(self) -> None:
|
@pytest.mark.skipif(
|
||||||
self.parser.cleanup()
|
"PAPERLESS_CI_TEST" not in os.environ,
|
||||||
|
reason="No Gotenberg/Tika servers to test with",
|
||||||
|
)
|
||||||
|
class TestUrlCanary(TestCase):
|
||||||
|
"""
|
||||||
|
Verify certain URLs are still available so testing is valid still
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_online_image_exception_on_not_available(self):
|
||||||
|
"""
|
||||||
|
GIVEN:
|
||||||
|
- Fresh start
|
||||||
|
WHEN:
|
||||||
|
- nonexistent image is requested
|
||||||
|
THEN:
|
||||||
|
- An exception shall be thrown
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
A public image is used in the html sample file. We have no control
|
||||||
|
whether this image stays online forever, so here we check if we can detect if is not
|
||||||
|
available anymore.
|
||||||
|
"""
|
||||||
|
with self.assertRaises(httpx.HTTPStatusError) as cm:
|
||||||
|
resp = httpx.get(
|
||||||
|
"https://upload.wikimedia.org/wikipedia/en/f/f7/nonexistent.png",
|
||||||
|
)
|
||||||
|
resp.raise_for_status()
|
||||||
|
|
||||||
|
self.assertEqual(cm.exception.response.status_code, httpx.codes.NOT_FOUND)
|
||||||
|
|
||||||
|
def test_is_online_image_still_available(self):
|
||||||
|
"""
|
||||||
|
GIVEN:
|
||||||
|
- Fresh start
|
||||||
|
WHEN:
|
||||||
|
- A public image used in the html sample file is requested
|
||||||
|
THEN:
|
||||||
|
- No exception shall be thrown
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
A public image is used in the html sample file. We have no control
|
||||||
|
whether this image stays online forever, so here we check if it is still there
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Now check the URL used in samples/sample.html
|
||||||
|
resp = httpx.get("https://upload.wikimedia.org/wikipedia/en/f/f7/RickRoll.png")
|
||||||
|
resp.raise_for_status()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(
|
||||||
|
"PAPERLESS_CI_TEST" not in os.environ,
|
||||||
|
reason="No Gotenberg/Tika servers to test with",
|
||||||
|
)
|
||||||
|
class TestParserLive(FileSystemAssertsMixin, BaseMailParserTestCase):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def imagehash(file, hash_size=18):
|
def imagehash(file, hash_size=18):
|
||||||
return f"{average_hash(Image.open(file), hash_size)}"
|
return f"{average_hash(Image.open(file), hash_size)}"
|
||||||
@ -54,13 +105,18 @@ class TestParserLive(FileSystemAssertsMixin, TestCase):
|
|||||||
result = method_or_callable(*args)
|
result = method_or_callable(*args)
|
||||||
|
|
||||||
succeeded = True
|
succeeded = True
|
||||||
except Exception as e:
|
except httpx.HTTPError as e:
|
||||||
|
raise
|
||||||
|
# Retry on HTTP errors
|
||||||
print(f"{e} during try #{retry_count}", flush=True)
|
print(f"{e} during try #{retry_count}", flush=True)
|
||||||
|
|
||||||
retry_count = retry_count + 1
|
retry_count = retry_count + 1
|
||||||
|
|
||||||
time.sleep(retry_time)
|
time.sleep(retry_time)
|
||||||
retry_time = retry_time * 2.0
|
retry_time = retry_time * 2.0
|
||||||
|
except Exception:
|
||||||
|
# Not on other error
|
||||||
|
raise
|
||||||
|
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
succeeded,
|
succeeded,
|
||||||
@ -79,17 +135,14 @@ class TestParserLive(FileSystemAssertsMixin, TestCase):
|
|||||||
THEN:
|
THEN:
|
||||||
- The returned thumbnail image file is as expected
|
- The returned thumbnail image file is as expected
|
||||||
"""
|
"""
|
||||||
mock_generate_pdf.return_value = os.path.join(
|
mock_generate_pdf.return_value = self.SAMPLE_DIR / "simple_text.eml.pdf"
|
||||||
self.SAMPLE_FILES,
|
|
||||||
"simple_text.eml.pdf",
|
|
||||||
)
|
|
||||||
thumb = self.parser.get_thumbnail(
|
thumb = self.parser.get_thumbnail(
|
||||||
os.path.join(self.SAMPLE_FILES, "simple_text.eml"),
|
self.SAMPLE_DIR / "simple_text.eml",
|
||||||
"message/rfc822",
|
"message/rfc822",
|
||||||
)
|
)
|
||||||
self.assertIsFile(thumb)
|
self.assertIsFile(thumb)
|
||||||
|
|
||||||
expected = os.path.join(self.SAMPLE_FILES, "simple_text.eml.pdf.webp")
|
expected = self.SAMPLE_DIR / "simple_text.eml.pdf.webp"
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.imagehash(thumb),
|
self.imagehash(thumb),
|
||||||
@ -97,10 +150,6 @@ class TestParserLive(FileSystemAssertsMixin, TestCase):
|
|||||||
f"Created Thumbnail {thumb} differs from expected file {expected}",
|
f"Created Thumbnail {thumb} differs from expected file {expected}",
|
||||||
)
|
)
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"TIKA_LIVE" not in os.environ,
|
|
||||||
reason="No tika server",
|
|
||||||
)
|
|
||||||
def test_tika_parse_successful(self):
|
def test_tika_parse_successful(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
@ -117,27 +166,6 @@ class TestParserLive(FileSystemAssertsMixin, TestCase):
|
|||||||
parsed = self.parser.tika_parse(html)
|
parsed = self.parser.tika_parse(html)
|
||||||
self.assertEqual(expected_text, parsed.strip())
|
self.assertEqual(expected_text, parsed.strip())
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"TIKA_LIVE" not in os.environ,
|
|
||||||
reason="No tika server",
|
|
||||||
)
|
|
||||||
def test_tika_parse_unsuccessful(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- tika parsing fails
|
|
||||||
THEN:
|
|
||||||
- the parser should return an empty string
|
|
||||||
"""
|
|
||||||
# Check unsuccessful parsing
|
|
||||||
parsed = self.parser.tika_parse(None)
|
|
||||||
self.assertEqual("", parsed)
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"GOTENBERG_LIVE" not in os.environ,
|
|
||||||
reason="No gotenberg server",
|
|
||||||
)
|
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_mail")
|
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_mail")
|
||||||
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_html")
|
@mock.patch("paperless_mail.parsers.MailDocumentParser.generate_pdf_from_html")
|
||||||
def test_generate_pdf_gotenberg_merging(
|
def test_generate_pdf_gotenberg_merging(
|
||||||
@ -153,15 +181,16 @@ class TestParserLive(FileSystemAssertsMixin, TestCase):
|
|||||||
THEN:
|
THEN:
|
||||||
- gotenberg is called to merge files and the resulting file is returned
|
- gotenberg is called to merge files and the resulting file is returned
|
||||||
"""
|
"""
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "first.pdf"), "rb") as first:
|
mock_generate_pdf_from_mail.return_value = self.SAMPLE_DIR / "first.pdf"
|
||||||
mock_generate_pdf_from_mail.return_value = first.read()
|
mock_generate_pdf_from_html.return_value = self.SAMPLE_DIR / "second.pdf"
|
||||||
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "second.pdf"), "rb") as second:
|
msg = self.parser.parse_file_to_message(
|
||||||
mock_generate_pdf_from_html.return_value = second.read()
|
self.SAMPLE_DIR / "html.eml",
|
||||||
|
)
|
||||||
|
|
||||||
pdf_path = self.util_call_with_backoff(
|
pdf_path = self.util_call_with_backoff(
|
||||||
self.parser.generate_pdf,
|
self.parser.generate_pdf,
|
||||||
[os.path.join(self.SAMPLE_FILES, "html.eml")],
|
[msg],
|
||||||
)
|
)
|
||||||
self.assertIsFile(pdf_path)
|
self.assertIsFile(pdf_path)
|
||||||
|
|
||||||
@ -169,38 +198,9 @@ class TestParserLive(FileSystemAssertsMixin, TestCase):
|
|||||||
expected = (
|
expected = (
|
||||||
"first\tPDF\tto\tbe\tmerged.\n\n\x0csecond\tPDF\tto\tbe\tmerged.\n\n\x0c"
|
"first\tPDF\tto\tbe\tmerged.\n\n\x0csecond\tPDF\tto\tbe\tmerged.\n\n\x0c"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(expected, extracted)
|
self.assertEqual(expected, extracted)
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"GOTENBERG_LIVE" not in os.environ,
|
|
||||||
reason="No gotenberg server",
|
|
||||||
)
|
|
||||||
def test_generate_pdf_from_mail_no_convert(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- pdf generation from simple eml file is requested
|
|
||||||
THEN:
|
|
||||||
- gotenberg is called and the resulting file is returned and contains the expected text.
|
|
||||||
"""
|
|
||||||
mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml"))
|
|
||||||
|
|
||||||
pdf_path = os.path.join(self.parser.tempdir, "html.eml.pdf")
|
|
||||||
|
|
||||||
with open(pdf_path, "wb") as file:
|
|
||||||
file.write(
|
|
||||||
self.util_call_with_backoff(self.parser.generate_pdf_from_mail, [mail]),
|
|
||||||
)
|
|
||||||
|
|
||||||
extracted = extract_text(pdf_path)
|
|
||||||
expected = extract_text(os.path.join(self.SAMPLE_FILES, "html.eml.pdf"))
|
|
||||||
self.assertEqual(expected, extracted)
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"GOTENBERG_LIVE" not in os.environ,
|
|
||||||
reason="No gotenberg server",
|
|
||||||
)
|
|
||||||
def test_generate_pdf_from_mail(self):
|
def test_generate_pdf_from_mail(self):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
@ -210,193 +210,32 @@ class TestParserLive(FileSystemAssertsMixin, TestCase):
|
|||||||
THEN:
|
THEN:
|
||||||
- gotenberg is called and the resulting file is returned and look as expected.
|
- gotenberg is called and the resulting file is returned and look as expected.
|
||||||
"""
|
"""
|
||||||
mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, "html.eml"))
|
|
||||||
|
|
||||||
pdf_path = os.path.join(self.parser.tempdir, "html.eml.pdf")
|
self.util_call_with_backoff(
|
||||||
|
self.parser.parse,
|
||||||
with open(pdf_path, "wb") as file:
|
[self.SAMPLE_DIR / "html.eml", "message/rfc822"],
|
||||||
file.write(
|
|
||||||
self.util_call_with_backoff(self.parser.generate_pdf_from_mail, [mail]),
|
|
||||||
)
|
|
||||||
|
|
||||||
converted = os.path.join(
|
|
||||||
self.parser.tempdir,
|
|
||||||
"html.eml.pdf.webp",
|
|
||||||
)
|
)
|
||||||
run_convert(
|
|
||||||
density=300,
|
# Check the archive PDF
|
||||||
scale="500x5000>",
|
archive_path = self.parser.get_archive_path()
|
||||||
alpha="remove",
|
archive_text = extract_text(archive_path)
|
||||||
strip=True,
|
expected_archive_text = extract_text(self.SAMPLE_DIR / "html.eml.pdf")
|
||||||
trim=False,
|
|
||||||
auto_orient=True,
|
# Archive includes the HTML content, so use in
|
||||||
input_file=f"{pdf_path}", # Do net define an index to convert all pages.
|
self.assertIn(expected_archive_text, archive_text)
|
||||||
output_file=converted,
|
|
||||||
logging_group=None,
|
# Check the thumbnail
|
||||||
|
generated_thumbnail = self.parser.get_thumbnail(
|
||||||
|
self.SAMPLE_DIR / "html.eml",
|
||||||
|
"message/rfc822",
|
||||||
)
|
)
|
||||||
self.assertIsFile(converted)
|
generated_thumbnail_hash = self.imagehash(generated_thumbnail)
|
||||||
thumb_hash = self.imagehash(converted)
|
|
||||||
|
|
||||||
# The created pdf is not reproducible. But the converted image should always look the same.
|
# The created pdf is not reproducible. But the converted image should always look the same.
|
||||||
expected_hash = self.imagehash(
|
expected_hash = self.imagehash(self.SAMPLE_DIR / "html.eml.pdf.webp")
|
||||||
os.path.join(self.SAMPLE_FILES, "html.eml.pdf.webp"),
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
thumb_hash,
|
|
||||||
expected_hash,
|
|
||||||
f"PDF looks different. Check if {converted} looks weird.",
|
|
||||||
)
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"GOTENBERG_LIVE" not in os.environ,
|
|
||||||
reason="No gotenberg server",
|
|
||||||
)
|
|
||||||
def test_generate_pdf_from_html_no_convert(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- pdf generation from html eml file is requested
|
|
||||||
THEN:
|
|
||||||
- gotenberg is called and the resulting file is returned and contains the expected text.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class MailAttachmentMock:
|
|
||||||
def __init__(self, payload, content_id):
|
|
||||||
self.payload = payload
|
|
||||||
self.content_id = content_id
|
|
||||||
|
|
||||||
result = None
|
|
||||||
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.html")) as html_file:
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.png"), "rb") as png_file:
|
|
||||||
html = html_file.read()
|
|
||||||
png = png_file.read()
|
|
||||||
attachments = [
|
|
||||||
MailAttachmentMock(png, "part1.pNdUSz0s.D3NqVtPg@example.de"),
|
|
||||||
]
|
|
||||||
result = self.util_call_with_backoff(
|
|
||||||
self.parser.generate_pdf_from_html,
|
|
||||||
[html, attachments],
|
|
||||||
)
|
|
||||||
|
|
||||||
pdf_path = os.path.join(self.parser.tempdir, "sample.html.pdf")
|
|
||||||
|
|
||||||
with open(pdf_path, "wb") as file:
|
|
||||||
file.write(result)
|
|
||||||
|
|
||||||
extracted = extract_text(pdf_path)
|
|
||||||
expected = extract_text(os.path.join(self.SAMPLE_FILES, "sample.html.pdf"))
|
|
||||||
self.assertEqual(expected, extracted)
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"GOTENBERG_LIVE" not in os.environ,
|
|
||||||
reason="No gotenberg server",
|
|
||||||
)
|
|
||||||
def test_generate_pdf_from_html(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- pdf generation from html eml file is requested
|
|
||||||
THEN:
|
|
||||||
- gotenberg is called and the resulting file is returned and look as expected.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class MailAttachmentMock:
|
|
||||||
def __init__(self, payload, content_id):
|
|
||||||
self.payload = payload
|
|
||||||
self.content_id = content_id
|
|
||||||
|
|
||||||
result = None
|
|
||||||
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.html")) as html_file:
|
|
||||||
with open(os.path.join(self.SAMPLE_FILES, "sample.png"), "rb") as png_file:
|
|
||||||
html = html_file.read()
|
|
||||||
png = png_file.read()
|
|
||||||
attachments = [
|
|
||||||
MailAttachmentMock(png, "part1.pNdUSz0s.D3NqVtPg@example.de"),
|
|
||||||
]
|
|
||||||
result = self.util_call_with_backoff(
|
|
||||||
self.parser.generate_pdf_from_html,
|
|
||||||
[html, attachments],
|
|
||||||
)
|
|
||||||
|
|
||||||
pdf_path = os.path.join(self.parser.tempdir, "sample.html.pdf")
|
|
||||||
|
|
||||||
with open(pdf_path, "wb") as file:
|
|
||||||
file.write(result)
|
|
||||||
|
|
||||||
converted = os.path.join(self.parser.tempdir, "sample.html.pdf.webp")
|
|
||||||
run_convert(
|
|
||||||
density=300,
|
|
||||||
scale="500x5000>",
|
|
||||||
alpha="remove",
|
|
||||||
strip=True,
|
|
||||||
trim=False,
|
|
||||||
auto_orient=True,
|
|
||||||
input_file=f"{pdf_path}", # Do net define an index to convert all pages.
|
|
||||||
output_file=converted,
|
|
||||||
logging_group=None,
|
|
||||||
)
|
|
||||||
self.assertIsFile(converted)
|
|
||||||
thumb_hash = self.imagehash(converted)
|
|
||||||
|
|
||||||
# The created pdf is not reproducible. But the converted image should always look the same.
|
|
||||||
expected_hash = self.imagehash(
|
|
||||||
os.path.join(self.SAMPLE_FILES, "sample.html.pdf.webp"),
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
thumb_hash,
|
generated_thumbnail_hash,
|
||||||
expected_hash,
|
expected_hash,
|
||||||
f"PDF looks different. Check if {converted} looks weird. "
|
f"PDF looks different. Check if {generated_thumbnail} looks weird.",
|
||||||
f"If Rick Astley is shown, Gotenberg loads from web which is bad for Mail content.",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"GOTENBERG_LIVE" not in os.environ,
|
|
||||||
reason="No gotenberg server",
|
|
||||||
)
|
|
||||||
def test_online_image_exception_on_not_available(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- nonexistent image is requested
|
|
||||||
THEN:
|
|
||||||
- An exception shall be thrown
|
|
||||||
"""
|
|
||||||
"""
|
|
||||||
A public image is used in the html sample file. We have no control
|
|
||||||
whether this image stays online forever, so here we check if we can detect if is not
|
|
||||||
available anymore.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Start by Testing if nonexistent URL really throws an Exception
|
|
||||||
self.assertRaises(
|
|
||||||
HTTPError,
|
|
||||||
urlopen,
|
|
||||||
"https://upload.wikimedia.org/wikipedia/en/f/f7/nonexistent.png",
|
|
||||||
)
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
|
||||||
"GOTENBERG_LIVE" not in os.environ,
|
|
||||||
reason="No gotenberg server",
|
|
||||||
)
|
|
||||||
def test_is_online_image_still_available(self):
|
|
||||||
"""
|
|
||||||
GIVEN:
|
|
||||||
- Fresh start
|
|
||||||
WHEN:
|
|
||||||
- A public image used in the html sample file is requested
|
|
||||||
THEN:
|
|
||||||
- No exception shall be thrown
|
|
||||||
"""
|
|
||||||
"""
|
|
||||||
A public image is used in the html sample file. We have no control
|
|
||||||
whether this image stays online forever, so here we check if it is still there
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Now check the URL used in samples/sample.html
|
|
||||||
urlopen("https://upload.wikimedia.org/wikipedia/en/f/f7/RickRoll.png")
|
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import dateutil.parser
|
|
||||||
import httpx
|
import httpx
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from tika import parser
|
from tika_client import TikaClient
|
||||||
|
|
||||||
from documents.parsers import DocumentParser
|
from documents.parsers import DocumentParser
|
||||||
from documents.parsers import ParseError
|
from documents.parsers import ParseError
|
||||||
@ -29,55 +28,38 @@ class TikaDocumentParser(DocumentParser):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def extract_metadata(self, document_path, mime_type):
|
def extract_metadata(self, document_path, mime_type):
|
||||||
tika_server = settings.TIKA_ENDPOINT
|
|
||||||
|
|
||||||
# tika does not support a PathLike, only strings
|
|
||||||
# ensure this is a string
|
|
||||||
document_path = str(document_path)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
parsed = parser.from_file(document_path, tika_server)
|
with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:
|
||||||
|
parsed = client.metadata.from_file(document_path, mime_type)
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"namespace": "",
|
||||||
|
"prefix": "",
|
||||||
|
"key": key,
|
||||||
|
"value": parsed.data[key],
|
||||||
|
}
|
||||||
|
for key in parsed.data
|
||||||
|
]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.log.warning(
|
self.log.warning(
|
||||||
f"Error while fetching document metadata for {document_path}: {e}",
|
f"Error while fetching document metadata for {document_path}: {e}",
|
||||||
)
|
)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
return [
|
def parse(self, document_path: Path, mime_type: str, file_name=None):
|
||||||
{
|
|
||||||
"namespace": "",
|
|
||||||
"prefix": "",
|
|
||||||
"key": key,
|
|
||||||
"value": parsed["metadata"][key],
|
|
||||||
}
|
|
||||||
for key in parsed["metadata"]
|
|
||||||
]
|
|
||||||
|
|
||||||
def parse(self, document_path: Path, mime_type, file_name=None):
|
|
||||||
self.log.info(f"Sending {document_path} to Tika server")
|
self.log.info(f"Sending {document_path} to Tika server")
|
||||||
tika_server = settings.TIKA_ENDPOINT
|
|
||||||
|
|
||||||
# tika does not support a PathLike, only strings
|
|
||||||
# ensure this is a string
|
|
||||||
document_path = str(document_path)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
parsed = parser.from_file(document_path, tika_server)
|
with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:
|
||||||
|
parsed = client.tika.as_text.from_file(document_path, mime_type)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ParseError(
|
raise ParseError(
|
||||||
f"Could not parse {document_path} with tika server at "
|
f"Could not parse {document_path} with tika server at "
|
||||||
f"{tika_server}: {err}",
|
f"{settings.TIKA_ENDPOINT}: {err}",
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
self.text = parsed["content"].strip()
|
self.text = parsed.content.strip()
|
||||||
|
self.date = parsed.metadata.created
|
||||||
try:
|
|
||||||
self.date = dateutil.parser.isoparse(parsed["metadata"]["Creation-Date"])
|
|
||||||
except Exception as e:
|
|
||||||
self.log.warning(
|
|
||||||
f"Unable to extract date for document {document_path}: {e}",
|
|
||||||
)
|
|
||||||
|
|
||||||
self.archive_path = self.convert_to_pdf(document_path, file_name)
|
self.archive_path = self.convert_to_pdf(document_path, file_name)
|
||||||
|
|
||||||
def convert_to_pdf(self, document_path, file_name):
|
def convert_to_pdf(self, document_path, file_name):
|
||||||
|
@ -9,7 +9,10 @@ from django.test import TestCase
|
|||||||
from paperless_tika.parsers import TikaDocumentParser
|
from paperless_tika.parsers import TikaDocumentParser
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif("TIKA_LIVE" not in os.environ, reason="No tika server")
|
@pytest.mark.skipif(
|
||||||
|
"PAPERLESS_CI_TEST" not in os.environ,
|
||||||
|
reason="No Gotenberg/Tika servers to test with",
|
||||||
|
)
|
||||||
class TestTikaParserAgainstServer(TestCase):
|
class TestTikaParserAgainstServer(TestCase):
|
||||||
"""
|
"""
|
||||||
This test case tests the Tika parsing against a live tika server,
|
This test case tests the Tika parsing against a live tika server,
|
||||||
@ -25,7 +28,7 @@ class TestTikaParserAgainstServer(TestCase):
|
|||||||
def tearDown(self) -> None:
|
def tearDown(self) -> None:
|
||||||
self.parser.cleanup()
|
self.parser.cleanup()
|
||||||
|
|
||||||
def try_parse_with_wait(self, test_file, mime_type):
|
def try_parse_with_wait(self, test_file: Path, mime_type: str):
|
||||||
"""
|
"""
|
||||||
For whatever reason, the image started during the test pipeline likes to
|
For whatever reason, the image started during the test pipeline likes to
|
||||||
segfault sometimes, when run with the exact files that usually pass.
|
segfault sometimes, when run with the exact files that usually pass.
|
||||||
|
@ -5,34 +5,38 @@ from unittest import mock
|
|||||||
|
|
||||||
from django.test import TestCase
|
from django.test import TestCase
|
||||||
from django.test import override_settings
|
from django.test import override_settings
|
||||||
from requests import Response
|
from httpx import Request
|
||||||
|
from httpx import Response
|
||||||
from rest_framework import status
|
from rest_framework import status
|
||||||
|
|
||||||
from documents.parsers import ParseError
|
from documents.parsers import ParseError
|
||||||
from paperless_tika.parsers import TikaDocumentParser
|
from paperless_tika.parsers import TikaDocumentParser
|
||||||
|
from paperless_tika.tests.utils import HttpxMockMixin
|
||||||
|
|
||||||
|
|
||||||
class TestTikaParser(TestCase):
|
class TestTikaParser(HttpxMockMixin, TestCase):
|
||||||
def setUp(self) -> None:
|
def setUp(self) -> None:
|
||||||
self.parser = TikaDocumentParser(logging_group=None)
|
self.parser = TikaDocumentParser(logging_group=None)
|
||||||
|
|
||||||
def tearDown(self) -> None:
|
def tearDown(self) -> None:
|
||||||
self.parser.cleanup()
|
self.parser.cleanup()
|
||||||
|
|
||||||
@mock.patch("paperless_tika.parsers.parser.from_file")
|
def test_parse(self):
|
||||||
@mock.patch("paperless_tika.parsers.requests.post")
|
# Pretend parse response
|
||||||
def test_parse(self, post, from_file):
|
self.httpx_mock.add_response(
|
||||||
from_file.return_value = {
|
json={
|
||||||
"content": "the content",
|
"Content-Type": "application/vnd.oasis.opendocument.text",
|
||||||
"metadata": {"Creation-Date": "2020-11-21"},
|
"X-TIKA:Parsed-By": [],
|
||||||
}
|
"X-TIKA:content": "the content",
|
||||||
response = Response()
|
"dcterms:created": "2020-11-21T00:00:00",
|
||||||
response._content = b"PDF document"
|
},
|
||||||
response.status_code = status.HTTP_200_OK
|
)
|
||||||
post.return_value = response
|
# Pretend convert to PDF response
|
||||||
|
self.httpx_mock.add_response(content=b"PDF document")
|
||||||
|
|
||||||
|
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
|
||||||
|
file.touch()
|
||||||
|
|
||||||
file = os.path.join(self.parser.tempdir, "input.odt")
|
|
||||||
Path(file).touch()
|
|
||||||
self.parser.parse(file, "application/vnd.oasis.opendocument.text")
|
self.parser.parse(file, "application/vnd.oasis.opendocument.text")
|
||||||
|
|
||||||
self.assertEqual(self.parser.text, "the content")
|
self.assertEqual(self.parser.text, "the content")
|
||||||
@ -42,26 +46,28 @@ class TestTikaParser(TestCase):
|
|||||||
|
|
||||||
self.assertEqual(self.parser.date, datetime.datetime(2020, 11, 21))
|
self.assertEqual(self.parser.date, datetime.datetime(2020, 11, 21))
|
||||||
|
|
||||||
@mock.patch("paperless_tika.parsers.parser.from_file")
|
def test_metadata(self):
|
||||||
def test_metadata(self, from_file):
|
self.httpx_mock.add_response(
|
||||||
from_file.return_value = {
|
json={
|
||||||
"metadata": {"Creation-Date": "2020-11-21", "Some-key": "value"},
|
"Content-Type": "application/vnd.oasis.opendocument.text",
|
||||||
}
|
"X-TIKA:Parsed-By": [],
|
||||||
|
"Some-key": "value",
|
||||||
|
"dcterms:created": "2020-11-21T00:00:00",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
file = os.path.join(self.parser.tempdir, "input.odt")
|
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
|
||||||
Path(file).touch()
|
file.touch()
|
||||||
|
|
||||||
metadata = self.parser.extract_metadata(
|
metadata = self.parser.extract_metadata(
|
||||||
file,
|
file,
|
||||||
"application/vnd.oasis.opendocument.text",
|
"application/vnd.oasis.opendocument.text",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertTrue("Creation-Date" in [m["key"] for m in metadata])
|
self.assertTrue("dcterms:created" in [m["key"] for m in metadata])
|
||||||
self.assertTrue("Some-key" in [m["key"] for m in metadata])
|
self.assertTrue("Some-key" in [m["key"] for m in metadata])
|
||||||
|
|
||||||
@mock.patch("paperless_tika.parsers.parser.from_file")
|
def test_convert_failure(self):
|
||||||
@mock.patch("paperless_tika.parsers.requests.post")
|
|
||||||
def test_convert_failure(self, post, from_file):
|
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
- Document needs to be converted to PDF
|
- Document needs to be converted to PDF
|
||||||
@ -70,22 +76,16 @@ class TestTikaParser(TestCase):
|
|||||||
THEN:
|
THEN:
|
||||||
- Parse error is raised
|
- Parse error is raised
|
||||||
"""
|
"""
|
||||||
from_file.return_value = {
|
# Pretend convert to PDF response
|
||||||
"content": "the content",
|
self.httpx_mock.add_response(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||||
"metadata": {"Creation-Date": "2020-11-21"},
|
|
||||||
}
|
|
||||||
response = Response()
|
|
||||||
response._content = b"PDF document"
|
|
||||||
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
|
|
||||||
post.return_value = response
|
|
||||||
|
|
||||||
file = os.path.join(self.parser.tempdir, "input.odt")
|
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
|
||||||
Path(file).touch()
|
file.touch()
|
||||||
|
|
||||||
with self.assertRaises(ParseError):
|
with self.assertRaises(ParseError):
|
||||||
self.parser.convert_to_pdf(file, None)
|
self.parser.convert_to_pdf(file, None)
|
||||||
|
|
||||||
@mock.patch("paperless_tika.parsers.requests.post")
|
@mock.patch("paperless_tika.parsers.httpx.post")
|
||||||
def test_request_pdf_a_format(self, post: mock.Mock):
|
def test_request_pdf_a_format(self, post: mock.Mock):
|
||||||
"""
|
"""
|
||||||
GIVEN:
|
GIVEN:
|
||||||
@ -95,12 +95,11 @@ class TestTikaParser(TestCase):
|
|||||||
THEN:
|
THEN:
|
||||||
- Request to Gotenberg contains the expected PDF/A format string
|
- Request to Gotenberg contains the expected PDF/A format string
|
||||||
"""
|
"""
|
||||||
file = os.path.join(self.parser.tempdir, "input.odt")
|
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
|
||||||
Path(file).touch()
|
file.touch()
|
||||||
|
|
||||||
response = Response()
|
response = Response(status_code=status.HTTP_200_OK)
|
||||||
response._content = b"PDF document"
|
response.request = Request("POST", "/somewhere/")
|
||||||
response.status_code = status.HTTP_200_OK
|
|
||||||
post.return_value = response
|
post.return_value = response
|
||||||
|
|
||||||
for setting, expected_key in [
|
for setting, expected_key in [
|
||||||
|
Loading…
x
Reference in New Issue
Block a user