From f3c7c95c6968c6085b446cb342776a47fe50d6c6 Mon Sep 17 00:00:00 2001 From: shamoon <4887959+shamoon@users.noreply.github.com> Date: Sat, 19 Apr 2025 21:01:54 -0700 Subject: [PATCH] Changeup logging --- src/documents/ai/client.py | 17 ++++++++++++++--- src/documents/ai/llm_classifier.py | 5 +++-- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/documents/ai/client.py b/src/documents/ai/client.py index 4d3b85ec0..13bf680bc 100644 --- a/src/documents/ai/client.py +++ b/src/documents/ai/client.py @@ -7,9 +7,20 @@ logger = logging.getLogger("paperless.ai.client") def run_llm_query(prompt: str) -> str: - if settings.LLM_BACKEND == "ollama": - return _run_ollama_query(prompt) - return _run_openai_query(prompt) + logger.debug( + "Running LLM query against %s with model %s", + settings.LLM_BACKEND, + settings.LLM_MODEL, + ) + match settings.LLM_BACKEND: + case "openai": + result = _run_openai_query(prompt) + case "ollama": + result = _run_ollama_query(prompt) + case _: + raise ValueError(f"Unsupported LLM backend: {settings.LLM_BACKEND}") + logger.debug("LLM query result: %s", result) + return result def _run_ollama_query(prompt: str) -> str: diff --git a/src/documents/ai/llm_classifier.py b/src/documents/ai/llm_classifier.py index 53fad9148..b4c4db33f 100644 --- a/src/documents/ai/llm_classifier.py +++ b/src/documents/ai/llm_classifier.py @@ -15,6 +15,9 @@ def get_ai_document_classification(document: Document) -> dict: filename = document.filename or "" content = document.content or "" + # Limit the content to 10k characters + content = content[:10000] + prompt = f""" You are a document classification assistant. Based on the content below, return a JSON object suggesting the following classification fields: - title: A descriptive title for the document @@ -33,9 +36,7 @@ def get_ai_document_classification(document: Document) -> dict: """ try: - logger.debug(f"LLM classification prompt: {prompt}") result = run_llm_query(prompt) - logger.debug(f"LLM classification result: {result}") suggestions = parse_llm_classification_response(result) return suggestions or {} except Exception: