Fix embedding mock

This commit is contained in:
shamoon
2026-01-14 13:13:05 -08:00
parent af1e7bc557
commit 45b7f9577c

View File

@@ -11,14 +11,12 @@ from paperless_ai.chat import stream_chat_with_documents
@pytest.fixture(autouse=True)
def patch_embed_model():
from llama_index.core import settings as llama_settings
from llama_index.core.embeddings.utils import MockEmbedding
mock_embed_model = MagicMock()
mock_embed_model._get_text_embedding_batch.return_value = [
[0.1] * 1536,
] # 1 vector per input
llama_settings.Settings._embed_model = mock_embed_model
mock_embed_model = MockEmbedding(embed_dim=8)
llama_settings.Settings.embed_model = mock_embed_model
yield
llama_settings.Settings._embed_model = None
llama_settings.Settings.embed_model = None
@pytest.fixture(autouse=True)