Skip to content
This repository has been archived by the owner on Mar 1, 2024. It is now read-only.

Commit

Permalink
cr
Browse files Browse the repository at this point in the history
  • Loading branch information
jerryjliu committed Feb 2, 2024
1 parent e8c413f commit 884ffc2
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 10 deletions.
16 changes: 12 additions & 4 deletions llama_hub/llama_packs/rag_cli_local/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,22 @@ from llama_index.llama_pack import download_llama_pack

# download and install dependencies
download_llama_pack(
"LocalRAGCLIPack", "./local_rag_cli_pack"
"LocalRAGCLIPack", "./local_rag_cli_pack", skip_load=True
)
```

From here, you can use the pack. The most straightforward way is through the CLI.
From here, you can use the pack. The most straightforward way is through the CLI. You can directly run base.py, or run the `setup_cli.sh` script.

```python
TODO
```bash
cd local_rag_cli_pack

# option 1
python base.py rag -h

# option 2 - you may need sudo
# default name is lcli_local
sudo sh setup_cli.sh
lcli_local rag -h

```

Expand Down
10 changes: 5 additions & 5 deletions llama_hub/llama_packs/rag_cli_local/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def init_local_rag_cli(
llm = Ollama(model=llm_model_name, request_timeout=30.0)
print("> LLM initialized")
embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
print('> Embedding model initialized')
print("> Embedding model initialized")

ingestion_pipeline = IngestionPipeline(
transformations=[SentenceSplitter(), embed_model],
Expand Down Expand Up @@ -76,7 +76,7 @@ def init_local_rag_cli(
llm=llm, # optional
persist_dir=persist_dir,
query_pipeline=query_pipeline,
verbose=False
verbose=False,
)
return rag_cli_instance

Expand All @@ -97,10 +97,10 @@ def __init__(
self.llm_model_name = llm_model_name
self.embed_model_name = embed_model_name
self.rag_cli = init_local_rag_cli(
persist_dir=self.persist_dir,
persist_dir=self.persist_dir,
verbose=self.verbose,
llm_model_name=self.llm_model_name,
embed_model_name=self.embed_model_name
embed_model_name=self.embed_model_name,
)

def get_modules(self) -> Dict[str, Any]:
Expand All @@ -114,4 +114,4 @@ def run(self, *args: Any, **kwargs: Any) -> Any:

if __name__ == "__main__":
rag_cli_instance = init_local_rag_cli()
rag_cli_instance.cli()
rag_cli_instance.cli()
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ include = [
[tool.poetry.dependencies]
# Updated Python version
python = ">=3.8.1,<3.12"
llama-index = ">=0.9.39"
llama-index = ">=0.9.41"
html2text = "*"
psutil = "*"
retrying = "*"
Expand Down

0 comments on commit 884ffc2

Please sign in to comment.