Variable refactoring

This commit is contained in:
shamoon
2025-08-08 08:06:25 -04:00
parent 6b99c21710
commit 74c9fedd4c
10 changed files with 35 additions and 35 deletions

View File

@@ -1779,20 +1779,20 @@ password. All of these options come from their similarly-named [Django settings]
## AI {#ai}
#### [`PAPERLESS_ENABLE_AI=<bool>`](#PAPERLESS_ENABLE_AI) {#PAPERLESS_ENABLE_AI}
#### [`PAPERLESS_AI_ENABLED=<bool>`](#PAPERLESS_AI_ENABLED) {#PAPERLESS_AI_ENABLED}
: Enables the AI features in Paperless. This includes the AI-based
suggestions. This setting is required to be set to true in order to use the AI features.
Defaults to false.
#### [`PAPERLESS_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_LLM_EMBEDDING_BACKEND) {#PAPERLESS_LLM_EMBEDDING_BACKEND}
#### [`PAPERLESS_AI_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_AI_LLM_EMBEDDING_BACKEND) {#PAPERLESS_AI_LLM_EMBEDDING_BACKEND}
: The embedding backend to use for RAG. This can be either "openai" or "huggingface".
Defaults to None.
#### [`PAPERLESS_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_LLM_EMBEDDING_MODEL) {#PAPERLESS_LLM_EMBEDDING_MODEL}
#### [`PAPERLESS_AI_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_AI_LLM_EMBEDDING_MODEL) {#PAPERLESS_AI_LLM_EMBEDDING_MODEL}
: The model to use for the embedding backend for RAG. This can be set to any of the embedding models supported by the current embedding backend. If not supplied, defaults to "text-embedding-3-small" for OpenAI and "sentence-transformers/all-MiniLM-L6-v2" for Huggingface.
@@ -1815,26 +1815,26 @@ using the OpenAI API. This setting is required to be set to use the AI features.
Refer to the OpenAI terms of service, and use at your own risk.
#### [`PAPERLESS_LLM_MODEL=<str>`](#PAPERLESS_LLM_MODEL) {#PAPERLESS_LLM_MODEL}
#### [`PAPERLESS_AI_LLM_MODEL=<str>`](#PAPERLESS_AI_LLM_MODEL) {#PAPERLESS_AI_LLM_MODEL}
: The model to use for the AI backend, i.e. "gpt-3.5-turbo", "gpt-4" or any of the models supported by the
current backend. If not supplied, defaults to "gpt-3.5-turbo" for OpenAI and "llama3" for Ollama.
Defaults to None.
#### [`PAPERLESS_LLM_API_KEY=<str>`](#PAPERLESS_LLM_API_KEY) {#PAPERLESS_LLM_API_KEY}
#### [`PAPERLESS_AI_LLM_API_KEY=<str>`](#PAPERLESS_AI_LLM_API_KEY) {#PAPERLESS_AI_LLM_API_KEY}
: The API key to use for the AI backend. This is required for the OpenAI backend only.
Defaults to None.
#### [`PAPERLESS_LLM_URL=<str>`](#PAPERLESS_LLM_URL) {#PAPERLESS_LLM_URL}
#### [`PAPERLESS_AI_LLM_ENDPOINT=<str>`](#PAPERLESS_AI_LLM_ENDPOINT) {#PAPERLESS_AI_LLM_ENDPOINT}
: The URL to use for the AI backend. This is required for the Ollama backend only.
: The endpoint / url to use for the AI backend. This is required for the Ollama backend only.
Defaults to None.
#### [`PAPERLESS_LLM_INDEX_TASK_CRON=<cron expression>`](#PAPERLESS_LLM_INDEX_TASK_CRON) {#PAPERLESS_LLM_INDEX_TASK_CRON}
#### [`PAPERLESS_AI_LLM_INDEX_TASK_CRON=<cron expression>`](#PAPERLESS_AI_LLM_INDEX_TASK_CRON) {#PAPERLESS_AI_LLM_INDEX_TASK_CRON}
: Configures the schedule to update the AI embeddings of text content and metadata for all documents. Only performed if
AI is enabled and the LLM embedding backend is set.

View File

@@ -284,14 +284,14 @@ export const PaperlessConfigOptions: ConfigOption[] = [
title: $localize`LLM Embedding Backend`,
type: ConfigOptionType.Select,
choices: mapToItems(LLMEmbeddingBackendConfig),
config_key: 'PAPERLESS_LLM_EMBEDDING_BACKEND',
config_key: 'PAPERLESS_AI_LLM_EMBEDDING_BACKEND',
category: ConfigCategory.AI,
},
{
key: 'llm_embedding_model',
title: $localize`LLM Embedding Model`,
type: ConfigOptionType.String,
config_key: 'PAPERLESS_LLM_EMBEDDING_MODEL',
config_key: 'PAPERLESS_AI_LLM_EMBEDDING_MODEL',
category: ConfigCategory.AI,
},
{
@@ -299,28 +299,28 @@ export const PaperlessConfigOptions: ConfigOption[] = [
title: $localize`LLM Backend`,
type: ConfigOptionType.Select,
choices: mapToItems(LLMBackendConfig),
config_key: 'PAPERLESS_LLM_BACKEND',
config_key: 'PAPERLESS_AI_LLM_BACKEND',
category: ConfigCategory.AI,
},
{
key: 'llm_model',
title: $localize`LLM Model`,
type: ConfigOptionType.String,
config_key: 'PAPERLESS_LLM_MODEL',
config_key: 'PAPERLESS_AI_LLM_MODEL',
category: ConfigCategory.AI,
},
{
key: 'llm_api_key',
title: $localize`LLM API Key`,
type: ConfigOptionType.Password,
config_key: 'PAPERLESS_LLM_API_KEY',
config_key: 'PAPERLESS_AI_LLM_API_KEY',
category: ConfigCategory.AI,
},
{
key: 'llm_url',
title: $localize`LLM URL`,
key: 'llm_endpoint',
title: $localize`LLM Endpoint`,
type: ConfigOptionType.String,
config_key: 'PAPERLESS_LLM_URL',
config_key: 'PAPERLESS_AI_LLM_ENDPOINT',
category: ConfigCategory.AI,
},
]
@@ -358,5 +358,5 @@ export interface PaperlessConfig extends ObjectWithId {
llm_backend: string
llm_model: string
llm_api_key: string
llm_url: string
llm_endpoint: string
}

View File

@@ -71,7 +71,7 @@ class TestApiAppConfig(DirectoriesMixin, APITestCase):
"llm_backend": None,
"llm_model": None,
"llm_api_key": None,
"llm_url": None,
"llm_endpoint": None,
},
)

View File

@@ -183,7 +183,7 @@ class AIConfig(BaseConfig):
llm_backend: str = dataclasses.field(init=False)
llm_model: str = dataclasses.field(init=False)
llm_api_key: str = dataclasses.field(init=False)
llm_url: str = dataclasses.field(init=False)
llm_endpoint: str = dataclasses.field(init=False)
def __post_init__(self) -> None:
app_config = self._get_config_instance()
@@ -198,7 +198,7 @@ class AIConfig(BaseConfig):
self.llm_backend = app_config.llm_backend or settings.LLM_BACKEND
self.llm_model = app_config.llm_model or settings.LLM_MODEL
self.llm_api_key = app_config.llm_api_key or settings.LLM_API_KEY
self.llm_url = app_config.llm_url or settings.LLM_URL
self.llm_endpoint = app_config.llm_endpoint or settings.LLM_ENDPOINT
def llm_index_enabled(self) -> bool:
return self.ai_enabled and self.llm_embedding_backend

View File

@@ -73,12 +73,12 @@ class Migration(migrations.Migration):
),
migrations.AddField(
model_name="applicationconfiguration",
name="llm_url",
name="llm_endpoint",
field=models.CharField(
blank=True,
max_length=128,
null=True,
verbose_name="Sets the LLM URL, optional",
verbose_name="Sets the LLM endpoint, optional",
),
),
]

View File

@@ -326,8 +326,8 @@ class ApplicationConfiguration(AbstractSingletonModel):
max_length=128,
)
llm_url = models.CharField(
verbose_name=_("Sets the LLM URL, optional"),
llm_endpoint = models.CharField(
verbose_name=_("Sets the LLM endpoint, optional"),
null=True,
blank=True,
max_length=128,

View File

@@ -1460,10 +1460,10 @@ OUTLOOK_OAUTH_ENABLED = bool(
################################################################################
AI_ENABLED = __get_boolean("PAPERLESS_AI_ENABLED", "NO")
LLM_EMBEDDING_BACKEND = os.getenv(
"PAPERLESS_LLM_EMBEDDING_BACKEND",
"PAPERLESS_AI_LLM_EMBEDDING_BACKEND",
) # "huggingface" or "openai"
LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_LLM_EMBEDDING_MODEL")
LLM_BACKEND = os.getenv("PAPERLESS_LLM_BACKEND") # "ollama" or "openai"
LLM_MODEL = os.getenv("PAPERLESS_LLM_MODEL")
LLM_API_KEY = os.getenv("PAPERLESS_LLM_API_KEY")
LLM_URL = os.getenv("PAPERLESS_LLM_URL")
LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_AI_LLM_EMBEDDING_MODEL")
LLM_BACKEND = os.getenv("PAPERLESS_AI_LLM_BACKEND") # "ollama" or "openai"
LLM_MODEL = os.getenv("PAPERLESS_AI_LLM_MODEL")
LLM_API_KEY = os.getenv("PAPERLESS_AI_LLM_API_KEY")
LLM_ENDPOINT = os.getenv("PAPERLESS_AI_LLM_ENDPOINT")

View File

@@ -6,7 +6,7 @@ from llama_index.llms.ollama import Ollama
from llama_index.llms.openai import OpenAI
from paperless.config import AIConfig
from paperless_ai.tools import DocumentClassifierSchema
from paperless_ai.base_model import DocumentClassifierSchema
logger = logging.getLogger("paperless_ai.client")
@@ -24,7 +24,7 @@ class AIClient:
if self.settings.llm_backend == "ollama":
return Ollama(
model=self.settings.llm_model or "llama3",
base_url=self.settings.llm_url or "http://localhost:11434",
base_url=self.settings.llm_endpoint or "http://localhost:11434",
request_timeout=120,
)
elif self.settings.llm_backend == "openai":

View File

@@ -31,7 +31,7 @@ def mock_openai_llm():
def test_get_llm_ollama(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
mock_ai_config.llm_url = "http://test-url"
mock_ai_config.llm_endpoint = "http://test-url"
client = AIClient()
@@ -67,7 +67,7 @@ def test_get_llm_unsupported_backend(mock_ai_config):
def test_run_llm_query(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
mock_ai_config.llm_url = "http://test-url"
mock_ai_config.llm_endpoint = "http://test-url"
mock_llm_instance = mock_ollama_llm.return_value
@@ -96,7 +96,7 @@ def test_run_llm_query(mock_ai_config, mock_ollama_llm):
def test_run_chat(mock_ai_config, mock_ollama_llm):
mock_ai_config.llm_backend = "ollama"
mock_ai_config.llm_model = "test_model"
mock_ai_config.llm_url = "http://test-url"
mock_ai_config.llm_endpoint = "http://test-url"
mock_llm_instance = mock_ollama_llm.return_value
mock_llm_instance.chat.return_value = "test_chat_result"