Skip to content

Commit

Permalink
v0.12.12 (#17561)
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich authored Jan 20, 2025
1 parent 637c79d commit 1183f72
Show file tree
Hide file tree
Showing 11 changed files with 160 additions and 73 deletions.
35 changes: 35 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,40 @@
# ChangeLog

## [2025-01-20]

### `llama-index-core` [0.12.12]

- feat: add AgentWorkflow system to support single and multi-agent workflows (#17237)
- Fix image-path validation in ImageNode (#17558)

### `llama-index-indices-managed-vectara` [0.4.0]

- (breaking change) API Migration (#17545)

### `llama-index-llms-anthropic` [0.6.4]

- feat: support direct PDF handling for Anthropic (#17506)

### `llama-index-llms-fireworks` [0.3.1]

- Deepseek-v3 is now supported by fireworks (#17518)

### `llama-index-llms-stepfun` [1.0.0]

- feat: add stepfun integrations (#17514)

### `llama-index-multi-modal-llms-gemini` [0.5.0]

- refact: make GeminiMultiModal a thin wrapper around Gemini (#17501)

### `llama-index-postprocessor-longllmlingua` [0.4.0]

- Add longllmlingua2 integration (#17531)

### `llama-index-readers-web` [0.3.4]

- feat: Hyperbrowser Web Reader (#17489)

## [2025-01-15]

### `llama-index-core` [0.12.11]
Expand Down
35 changes: 35 additions & 0 deletions docs/docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,40 @@
# ChangeLog

## [2025-01-20]

### `llama-index-core` [0.12.12]

- feat: add AgentWorkflow system to support single and multi-agent workflows (#17237)
- Fix image-path validation in ImageNode (#17558)

### `llama-index-indices-managed-vectara` [0.4.0]

- (breaking change) API Migration (#17545)

### `llama-index-llms-anthropic` [0.6.4]

- feat: support direct PDF handling for Anthropic (#17506)

### `llama-index-llms-fireworks` [0.3.1]

- Deepseek-v3 is now supported by fireworks (#17518)

### `llama-index-llms-stepfun` [1.0.0]

- feat: add stepfun integrations (#17514)

### `llama-index-multi-modal-llms-gemini` [0.5.0]

- refact: make GeminiMultiModal a thin wrapper around Gemini (#17501)

### `llama-index-postprocessor-longllmlingua` [0.4.0]

- Add longllmlingua2 integration (#17531)

### `llama-index-readers-web` [0.3.4]

- feat: Hyperbrowser Web Reader (#17489)

## [2025-01-15]

### `llama-index-core` [0.12.11]
Expand Down
2 changes: 1 addition & 1 deletion llama-index-core/llama_index/core/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Init file of LlamaIndex."""

__version__ = "0.12.11"
__version__ = "0.12.12"

import logging
from logging import NullHandler
Expand Down
2 changes: 1 addition & 1 deletion llama-index-core/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ name = "llama-index-core"
packages = [{include = "llama_index"}]
readme = "README.md"
repository = "https://github.com/run-llama/llama_index"
version = "0.12.11"
version = "0.12.12"

[tool.poetry.dependencies]
SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-llms-gemini"
readme = "README.md"
version = "0.4.3"
version = "0.4.4"

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
pillow = "^10.2.0"
google-generativeai = ">=0.5.2"
llama-index-core = "^0.12.0"
llama-index-core = "^0.12.12"

[tool.poetry.group.dev.dependencies]
ipython = "8.10.0"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ readme = "README.md"
version = "0.3.1"

[tool.poetry.dependencies]
python = ">=3.10,<4.0"
python = ">=3.10,<3.13"
ibm-watsonx-ai = ">=1.1.24"
pyarrow = "*"
llama-index-core = "^0.12.0"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-multi-modal-llms-gemini"
readme = "README.md"
version = "0.4.1"
version = "0.5.0"

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
llama-index-llms-gemini = "^0.4.0"
llama-index-llms-gemini = "^0.4.4"
pillow = "^10.2.0"
llama-index-core = "^0.12.0"
llama-index-core = "^0.12.12"

[tool.poetry.group.dev.dependencies]
ipython = "8.10.0"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@


class BM25Retriever(BaseRetriever):
"""A BM25 retriever that uses the BM25 algorithm to retrieve nodes.
r"""A BM25 retriever that uses the BM25 algorithm to retrieve nodes.
Args:
nodes (List[BaseNode], optional):
Expand All @@ -52,6 +52,10 @@ class BM25Retriever(BaseRetriever):
The objects to retrieve. Defaults to None.
object_map (dict, optional):
A map of object IDs to nodes. Defaults to None.
token_pattern (str, optional):
The token pattern to use. Defaults to (?u)\\b\\w\\w+\\b.
skip_stemming (bool, optional):
Whether to skip stemming. Defaults to False.
verbose (bool, optional):
Whether to show progress. Defaults to False.
"""
Expand All @@ -67,9 +71,13 @@ def __init__(
objects: Optional[List[IndexNode]] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
skip_stemming: bool = False,
token_pattern: str = r"(?u)\b\w\w+\b",
) -> None:
self.stemmer = stemmer or Stemmer.Stemmer("english")
self.similarity_top_k = similarity_top_k
self.token_pattern = token_pattern
self.skip_stemming = skip_stemming

if existing_bm25 is not None:
self.bm25 = existing_bm25
Expand All @@ -83,7 +91,8 @@ def __init__(
corpus_tokens = bm25s.tokenize(
[node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes],
stopwords=language,
stemmer=self.stemmer,
stemmer=self.stemmer if not skip_stemming else None,
token_pattern=self.token_pattern,
show_progress=verbose,
)
self.bm25 = bm25s.BM25()
Expand All @@ -105,6 +114,8 @@ def from_defaults(
language: str = "en",
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
verbose: bool = False,
skip_stemming: bool = False,
token_pattern: str = r"(?u)\b\w\w+\b",
# deprecated
tokenizer: Optional[Callable[[str], List[str]]] = None,
) -> "BM25Retriever":
Expand Down Expand Up @@ -134,6 +145,8 @@ def from_defaults(
language=language,
similarity_top_k=similarity_top_k,
verbose=verbose,
skip_stemming=skip_stemming,
token_pattern=token_pattern,
)

def get_persist_args(self) -> Dict[str, Any]:
Expand Down Expand Up @@ -161,7 +174,10 @@ def from_persist_dir(cls, path: str, **kwargs: Any) -> "BM25Retriever":
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
tokenized_query = bm25s.tokenize(
query, stemmer=self.stemmer, show_progress=self._verbose
query,
stemmer=self.stemmer if not self.skip_stemming else None,
token_pattern=self.token_pattern,
show_progress=self._verbose,
)
indexes, scores = self.bm25.retrieve(
tokenized_query, k=self.similarity_top_k, show_progress=self._verbose
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-retrievers-bm25"
readme = "README.md"
version = "0.5.0"
version = "0.5.1"

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
Expand Down
Loading

0 comments on commit 1183f72

Please sign in to comment.