mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2025-08-12 00:19:48 +00:00
Variable refactoring
This commit is contained in:
@@ -71,7 +71,7 @@ class TestApiAppConfig(DirectoriesMixin, APITestCase):
|
||||
"llm_backend": None,
|
||||
"llm_model": None,
|
||||
"llm_api_key": None,
|
||||
"llm_url": None,
|
||||
"llm_endpoint": None,
|
||||
},
|
||||
)
|
||||
|
||||
|
@@ -183,7 +183,7 @@ class AIConfig(BaseConfig):
|
||||
llm_backend: str = dataclasses.field(init=False)
|
||||
llm_model: str = dataclasses.field(init=False)
|
||||
llm_api_key: str = dataclasses.field(init=False)
|
||||
llm_url: str = dataclasses.field(init=False)
|
||||
llm_endpoint: str = dataclasses.field(init=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
app_config = self._get_config_instance()
|
||||
@@ -198,7 +198,7 @@ class AIConfig(BaseConfig):
|
||||
self.llm_backend = app_config.llm_backend or settings.LLM_BACKEND
|
||||
self.llm_model = app_config.llm_model or settings.LLM_MODEL
|
||||
self.llm_api_key = app_config.llm_api_key or settings.LLM_API_KEY
|
||||
self.llm_url = app_config.llm_url or settings.LLM_URL
|
||||
self.llm_endpoint = app_config.llm_endpoint or settings.LLM_ENDPOINT
|
||||
|
||||
def llm_index_enabled(self) -> bool:
|
||||
return self.ai_enabled and self.llm_embedding_backend
|
||||
|
@@ -73,12 +73,12 @@ class Migration(migrations.Migration):
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="applicationconfiguration",
|
||||
name="llm_url",
|
||||
name="llm_endpoint",
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
max_length=128,
|
||||
null=True,
|
||||
verbose_name="Sets the LLM URL, optional",
|
||||
verbose_name="Sets the LLM endpoint, optional",
|
||||
),
|
||||
),
|
||||
]
|
||||
|
@@ -326,8 +326,8 @@ class ApplicationConfiguration(AbstractSingletonModel):
|
||||
max_length=128,
|
||||
)
|
||||
|
||||
llm_url = models.CharField(
|
||||
verbose_name=_("Sets the LLM URL, optional"),
|
||||
llm_endpoint = models.CharField(
|
||||
verbose_name=_("Sets the LLM endpoint, optional"),
|
||||
null=True,
|
||||
blank=True,
|
||||
max_length=128,
|
||||
|
@@ -1460,10 +1460,10 @@ OUTLOOK_OAUTH_ENABLED = bool(
|
||||
################################################################################
|
||||
AI_ENABLED = __get_boolean("PAPERLESS_AI_ENABLED", "NO")
|
||||
LLM_EMBEDDING_BACKEND = os.getenv(
|
||||
"PAPERLESS_LLM_EMBEDDING_BACKEND",
|
||||
"PAPERLESS_AI_LLM_EMBEDDING_BACKEND",
|
||||
) # "huggingface" or "openai"
|
||||
LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_LLM_EMBEDDING_MODEL")
|
||||
LLM_BACKEND = os.getenv("PAPERLESS_LLM_BACKEND") # "ollama" or "openai"
|
||||
LLM_MODEL = os.getenv("PAPERLESS_LLM_MODEL")
|
||||
LLM_API_KEY = os.getenv("PAPERLESS_LLM_API_KEY")
|
||||
LLM_URL = os.getenv("PAPERLESS_LLM_URL")
|
||||
LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_AI_LLM_EMBEDDING_MODEL")
|
||||
LLM_BACKEND = os.getenv("PAPERLESS_AI_LLM_BACKEND") # "ollama" or "openai"
|
||||
LLM_MODEL = os.getenv("PAPERLESS_AI_LLM_MODEL")
|
||||
LLM_API_KEY = os.getenv("PAPERLESS_AI_LLM_API_KEY")
|
||||
LLM_ENDPOINT = os.getenv("PAPERLESS_AI_LLM_ENDPOINT")
|
||||
|
@@ -6,7 +6,7 @@ from llama_index.llms.ollama import Ollama
|
||||
from llama_index.llms.openai import OpenAI
|
||||
|
||||
from paperless.config import AIConfig
|
||||
from paperless_ai.tools import DocumentClassifierSchema
|
||||
from paperless_ai.base_model import DocumentClassifierSchema
|
||||
|
||||
logger = logging.getLogger("paperless_ai.client")
|
||||
|
||||
@@ -24,7 +24,7 @@ class AIClient:
|
||||
if self.settings.llm_backend == "ollama":
|
||||
return Ollama(
|
||||
model=self.settings.llm_model or "llama3",
|
||||
base_url=self.settings.llm_url or "http://localhost:11434",
|
||||
base_url=self.settings.llm_endpoint or "http://localhost:11434",
|
||||
request_timeout=120,
|
||||
)
|
||||
elif self.settings.llm_backend == "openai":
|
||||
|
@@ -31,7 +31,7 @@ def mock_openai_llm():
|
||||
def test_get_llm_ollama(mock_ai_config, mock_ollama_llm):
|
||||
mock_ai_config.llm_backend = "ollama"
|
||||
mock_ai_config.llm_model = "test_model"
|
||||
mock_ai_config.llm_url = "http://test-url"
|
||||
mock_ai_config.llm_endpoint = "http://test-url"
|
||||
|
||||
client = AIClient()
|
||||
|
||||
@@ -67,7 +67,7 @@ def test_get_llm_unsupported_backend(mock_ai_config):
|
||||
def test_run_llm_query(mock_ai_config, mock_ollama_llm):
|
||||
mock_ai_config.llm_backend = "ollama"
|
||||
mock_ai_config.llm_model = "test_model"
|
||||
mock_ai_config.llm_url = "http://test-url"
|
||||
mock_ai_config.llm_endpoint = "http://test-url"
|
||||
|
||||
mock_llm_instance = mock_ollama_llm.return_value
|
||||
|
||||
@@ -96,7 +96,7 @@ def test_run_llm_query(mock_ai_config, mock_ollama_llm):
|
||||
def test_run_chat(mock_ai_config, mock_ollama_llm):
|
||||
mock_ai_config.llm_backend = "ollama"
|
||||
mock_ai_config.llm_model = "test_model"
|
||||
mock_ai_config.llm_url = "http://test-url"
|
||||
mock_ai_config.llm_endpoint = "http://test-url"
|
||||
|
||||
mock_llm_instance = mock_ollama_llm.return_value
|
||||
mock_llm_instance.chat.return_value = "test_chat_result"
|
||||
|
Reference in New Issue
Block a user