We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6906afc commit a829c95Copy full SHA for a829c95
CHANGELOG.md
@@ -1,5 +1,30 @@
1
# ChangeLog
2
3
+## [2025-05-30]
4
+
5
+### `llama-index-core` [0.12.39]
6
7
+- feat: Adding Resource to perform dependency injection in Workflows (docs coming soon!) (#18884)
8
+- feat: Add `require_tool` param to function calling LLMs (#18654)
9
+- fix: make prefix and response non-required for hitl events (#18896)
10
+- fix: SelectionOutputParser when LLM chooses no choices (#18886)
11
12
+### `llama-index-indices-managed-llama-cloud` [0.7.2]
13
14
+- feat: add non persisted composite retrieval (#18908)
15
16
+### `llama-index-llms-bedrock-converse` [0.7.0]
17
18
+- feat: Update aioboto3 dependency to allow latest version (#18889)
19
20
+### `llama-index-llms-ollama` [0.6.1]
21
22
+- Support ollama 0.5.0 SDK, update ollama docs (#18904)
23
24
+### `llama-index-vector-stores-milvus` [0.8.3]
25
26
+- feat: Multi language analyzer supported in Milvus (#18901)
27
28
## [2025-05-28]
29
30
### `llama-index-core` [0.12.38]
docs/docs/CHANGELOG.md
docs/mkdocs.yml
@@ -1171,10 +1171,7 @@ nav:
1171
- ./api_reference/llama_deploy/control_plane.md
1172
- ./api_reference/llama_deploy/deploy.md
1173
- ./api_reference/llama_deploy/deployment.md
1174
- - ./api_reference/llama_deploy/message_consumers.md
1175
- - ./api_reference/llama_deploy/message_publishers.md
1176
- ./api_reference/llama_deploy/messages.md
1177
- - ./api_reference/llama_deploy/orchestrators.md
1178
- ./api_reference/llama_deploy/python_sdk.md
1179
- ./api_reference/llama_deploy/services.md
1180
- ./api_reference/llama_deploy/types.md
@@ -1191,7 +1188,6 @@ nav:
1191
1188
- ./api_reference/llama_deploy/message_queues/rabbitmq.md
1192
1189
- ./api_reference/llama_deploy/message_queues/redis.md
1193
1190
- ./api_reference/llama_deploy/message_queues/simple.md
1194
- - ./api_reference/llama_deploy/message_queues/solace.md
1195
- Metadata Extractors:
1196
- ./api_reference/extractors/documentcontext.md
1197
- ./api_reference/extractors/entity.md
llama-index-core/llama_index/core/__init__.py
@@ -1,6 +1,6 @@
"""Top-level imports for LlamaIndex."""
-__version__ = "0.12.38"
+__version__ = "0.12.39"
import logging
from logging import NullHandler
llama-index-core/pyproject.toml
@@ -33,7 +33,7 @@ dev = [
33
34
[project]
35
name = "llama-index-core"
36
-version = "0.12.38"
+version = "0.12.39"
37
description = "Interface between LLMs and your data"
38
authors = [{name = "Jerry Liu", email = "[email protected]"}]
39
requires-python = ">=3.9,<4.0"
pyproject.toml
@@ -41,11 +41,11 @@ classifiers = [
41
dependencies = [
42
"llama-index-agent-openai>=0.4.0,<0.5",
43
"llama-index-cli>=0.4.1,<0.5",
44
- "llama-index-core>=0.12.38,<0.13",
+ "llama-index-core>=0.12.39,<0.13",
45
"llama-index-embeddings-openai>=0.3.0,<0.4",
46
"llama-index-indices-managed-llama-cloud>=0.4.0",
47
- "llama-index-llms-openai>=0.3.0,<0.4",
48
- "llama-index-multi-modal-llms-openai>=0.4.0,<0.5",
+ "llama-index-llms-openai>=0.4.0,<0.5",
+ "llama-index-multi-modal-llms-openai>=0.5.0,<0.6",
49
"llama-index-program-openai>=0.3.0,<0.4",
50
"llama-index-question-gen-openai>=0.3.0,<0.4",
51
"llama-index-readers-file>=0.4.0,<0.5",
@@ -74,7 +74,7 @@ maintainers = [
74
name = "llama-index"
75
readme = "README.md"
76
77
78
79
[project.scripts]
80
llamaindex-cli = "llama_index.cli.command_line:main"
uv.lock
0 commit comments