mirror of
https://github.com/paperless-ngx/paperless-ngx.git
synced 2026-01-16 22:04:21 -06:00
Chore(deps): Bump the ai-group (#11798)
* Chore(deps): Bump llama-index-core from 0.12.33.post1 to 0.13.0 Bumps [llama-index-core](https://github.com/run-llama/llama_index) from 0.12.33.post1 to 0.13.0. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/commits/v0.13.0) --- updated-dependencies: - dependency-name: llama-index-core dependency-version: 0.13.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> * Update llama-index to latest versions * Fix embedding mock --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: shamoon <4887959+shamoon@users.noreply.github.com>
This commit is contained in:
@@ -11,14 +11,12 @@ from paperless_ai.chat import stream_chat_with_documents
|
||||
@pytest.fixture(autouse=True)
|
||||
def patch_embed_model():
|
||||
from llama_index.core import settings as llama_settings
|
||||
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
|
||||
|
||||
mock_embed_model = MagicMock()
|
||||
mock_embed_model._get_text_embedding_batch.return_value = [
|
||||
[0.1] * 1536,
|
||||
] # 1 vector per input
|
||||
llama_settings.Settings._embed_model = mock_embed_model
|
||||
# Use a real BaseEmbedding subclass to satisfy llama-index 0.14 validation
|
||||
llama_settings.Settings.embed_model = MockEmbedding(embed_dim=1536)
|
||||
yield
|
||||
llama_settings.Settings._embed_model = None
|
||||
llama_settings.Settings.embed_model = None
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
||||
Reference in New Issue
Block a user