Compare commits

...

2 Commits

Author SHA1 Message Date
shamoon
94a5af66eb Fix default llama3.1 2026-01-14 15:36:01 -08:00
shamoon
948c664dcf Correct get_tool_calls_from_response signature 2026-01-14 14:55:03 -08:00
2 changed files with 3 additions and 3 deletions

View File

@@ -1873,7 +1873,7 @@ using the OpenAI API. This setting is required to be set to use the AI features.
#### [`PAPERLESS_AI_LLM_MODEL=<str>`](#PAPERLESS_AI_LLM_MODEL) {#PAPERLESS_AI_LLM_MODEL} #### [`PAPERLESS_AI_LLM_MODEL=<str>`](#PAPERLESS_AI_LLM_MODEL) {#PAPERLESS_AI_LLM_MODEL}
: The model to use for the AI backend, i.e. "gpt-3.5-turbo", "gpt-4" or any of the models supported by the : The model to use for the AI backend, i.e. "gpt-3.5-turbo", "gpt-4" or any of the models supported by the
current backend. If not supplied, defaults to "gpt-3.5-turbo" for OpenAI and "llama3" for Ollama. current backend. If not supplied, defaults to "gpt-3.5-turbo" for OpenAI and "llama3.1" for Ollama.
Defaults to None. Defaults to None.

View File

@@ -23,7 +23,7 @@ class AIClient:
def get_llm(self) -> Ollama | OpenAI: def get_llm(self) -> Ollama | OpenAI:
if self.settings.llm_backend == "ollama": if self.settings.llm_backend == "ollama":
return Ollama( return Ollama(
model=self.settings.llm_model or "llama3", model=self.settings.llm_model or "llama3.1",
base_url=self.settings.llm_endpoint or "http://localhost:11434", base_url=self.settings.llm_endpoint or "http://localhost:11434",
request_timeout=120, request_timeout=120,
) )
@@ -52,7 +52,7 @@ class AIClient:
) )
tool_calls = self.llm.get_tool_calls_from_response( tool_calls = self.llm.get_tool_calls_from_response(
result, result,
error_on_no_tool_calls=True, error_on_no_tool_call=True,
) )
logger.debug("LLM query result: %s", tool_calls) logger.debug("LLM query result: %s", tool_calls)
parsed = DocumentClassifierSchema(**tool_calls[0].tool_kwargs) parsed = DocumentClassifierSchema(**tool_calls[0].tool_kwargs)