mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2025-05-23 12:58:18 -05:00
Fix naming
This commit is contained in:
parent
e2869e906f
commit
741e9a5f82
@ -1718,9 +1718,9 @@ suggestions. This setting is required to be set to true in order to use the AI f
|
|||||||
|
|
||||||
#### [`PAPERLESS_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_LLM_EMBEDDING_BACKEND) {#PAPERLESS_LLM_EMBEDDING_BACKEND}
|
#### [`PAPERLESS_LLM_EMBEDDING_BACKEND=<str>`](#PAPERLESS_LLM_EMBEDDING_BACKEND) {#PAPERLESS_LLM_EMBEDDING_BACKEND}
|
||||||
|
|
||||||
: The embedding backend to use for RAG. This can be either "openai" or "local".
|
: The embedding backend to use for RAG. This can be either "openai" or "huggingface".
|
||||||
|
|
||||||
Defaults to "local".
|
Defaults to None.
|
||||||
|
|
||||||
#### [`PAPERLESS_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_LLM_EMBEDDING_MODEL) {#PAPERLESS_LLM_EMBEDDING_MODEL}
|
#### [`PAPERLESS_LLM_EMBEDDING_MODEL=<str>`](#PAPERLESS_LLM_EMBEDDING_MODEL) {#PAPERLESS_LLM_EMBEDDING_MODEL}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ from llama_index.embeddings.openai import OpenAIEmbedding
|
|||||||
from documents.models import Document
|
from documents.models import Document
|
||||||
from documents.models import Note
|
from documents.models import Note
|
||||||
from paperless.config import AIConfig
|
from paperless.config import AIConfig
|
||||||
|
from paperless.models import LLMEmbeddingBackend
|
||||||
|
|
||||||
EMBEDDING_DIMENSIONS = {
|
EMBEDDING_DIMENSIONS = {
|
||||||
"text-embedding-3-small": 1536,
|
"text-embedding-3-small": 1536,
|
||||||
@ -15,12 +16,12 @@ def get_embedding_model():
|
|||||||
config = AIConfig()
|
config = AIConfig()
|
||||||
|
|
||||||
match config.llm_embedding_backend:
|
match config.llm_embedding_backend:
|
||||||
case "openai":
|
case LLMEmbeddingBackend.OPENAI:
|
||||||
return OpenAIEmbedding(
|
return OpenAIEmbedding(
|
||||||
model=config.llm_embedding_model or "text-embedding-3-small",
|
model=config.llm_embedding_model or "text-embedding-3-small",
|
||||||
api_key=config.llm_api_key,
|
api_key=config.llm_api_key,
|
||||||
)
|
)
|
||||||
case "local":
|
case LLMEmbeddingBackend.HUGGINGFACE:
|
||||||
return HuggingFaceEmbedding(
|
return HuggingFaceEmbedding(
|
||||||
model_name=config.llm_embedding_model
|
model_name=config.llm_embedding_model
|
||||||
or "sentence-transformers/all-MiniLM-L6-v2",
|
or "sentence-transformers/all-MiniLM-L6-v2",
|
||||||
|
@ -76,7 +76,7 @@ class ColorConvertChoices(models.TextChoices):
|
|||||||
|
|
||||||
class LLMEmbeddingBackend(models.TextChoices):
|
class LLMEmbeddingBackend(models.TextChoices):
|
||||||
OPENAI = ("openai", _("OpenAI"))
|
OPENAI = ("openai", _("OpenAI"))
|
||||||
LOCAL = ("local", _("Local"))
|
HUGGINGFACE = ("huggingface", _("Huggingface"))
|
||||||
|
|
||||||
|
|
||||||
class LLMBackend(models.TextChoices):
|
class LLMBackend(models.TextChoices):
|
||||||
|
@ -1284,7 +1284,7 @@ OUTLOOK_OAUTH_ENABLED = bool(
|
|||||||
AI_ENABLED = __get_boolean("PAPERLESS_AI_ENABLED", "NO")
|
AI_ENABLED = __get_boolean("PAPERLESS_AI_ENABLED", "NO")
|
||||||
LLM_EMBEDDING_BACKEND = os.getenv(
|
LLM_EMBEDDING_BACKEND = os.getenv(
|
||||||
"PAPERLESS_LLM_EMBEDDING_BACKEND",
|
"PAPERLESS_LLM_EMBEDDING_BACKEND",
|
||||||
) # "local" or "openai"
|
) # "huggingface" or "openai"
|
||||||
LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_LLM_EMBEDDING_MODEL")
|
LLM_EMBEDDING_MODEL = os.getenv("PAPERLESS_LLM_EMBEDDING_MODEL")
|
||||||
LLM_BACKEND = os.getenv("PAPERLESS_LLM_BACKEND") # "ollama" or "openai"
|
LLM_BACKEND = os.getenv("PAPERLESS_LLM_BACKEND") # "ollama" or "openai"
|
||||||
LLM_MODEL = os.getenv("PAPERLESS_LLM_MODEL")
|
LLM_MODEL = os.getenv("PAPERLESS_LLM_MODEL")
|
||||||
|
@ -66,7 +66,7 @@ def test_parse_llm_classification_response_invalid_json():
|
|||||||
@patch("paperless.ai.client.AIClient.run_llm_query")
|
@patch("paperless.ai.client.AIClient.run_llm_query")
|
||||||
@patch("paperless.ai.ai_classifier.build_prompt_with_rag")
|
@patch("paperless.ai.ai_classifier.build_prompt_with_rag")
|
||||||
@override_settings(
|
@override_settings(
|
||||||
LLM_EMBEDDING_BACKEND="local",
|
LLM_EMBEDDING_BACKEND="huggingface",
|
||||||
LLM_EMBEDDING_MODEL="some_model",
|
LLM_EMBEDDING_MODEL="some_model",
|
||||||
LLM_BACKEND="ollama",
|
LLM_BACKEND="ollama",
|
||||||
LLM_MODEL="some_model",
|
LLM_MODEL="some_model",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user