diff --git a/llama_hub/llama_packs/rag_cli_local/README.md b/llama_hub/llama_packs/rag_cli_local/README.md index c45ee2ab64..e567a4a028 100644 --- a/llama_hub/llama_packs/rag_cli_local/README.md +++ b/llama_hub/llama_packs/rag_cli_local/README.md @@ -25,14 +25,22 @@ from llama_index.llama_pack import download_llama_pack # download and install dependencies download_llama_pack( - "LocalRAGCLIPack", "./local_rag_cli_pack" + "LocalRAGCLIPack", "./local_rag_cli_pack", skip_load=True ) ``` -From here, you can use the pack. The most straightforward way is through the CLI. +From here, you can use the pack. The most straightforward way is through the CLI. You can directly run base.py, or run the `setup_cli.sh` script. -```python -TODO +```bash +cd local_rag_cli_pack + +# option 1 +python base.py rag -h + +# option 2 - you may need sudo +# default name is lcli_local +sudo sh setup_cli.sh +lcli_local rag -h ``` diff --git a/llama_hub/llama_packs/rag_cli_local/base.py b/llama_hub/llama_packs/rag_cli_local/base.py index 566884a9a7..0c862a6eea 100644 --- a/llama_hub/llama_packs/rag_cli_local/base.py +++ b/llama_hub/llama_packs/rag_cli_local/base.py @@ -42,7 +42,7 @@ def init_local_rag_cli( llm = Ollama(model=llm_model_name, request_timeout=30.0) print("> LLM initialized") embed_model = HuggingFaceEmbedding(model_name=embed_model_name) - print('> Embedding model initialized') + print("> Embedding model initialized") ingestion_pipeline = IngestionPipeline( transformations=[SentenceSplitter(), embed_model], @@ -76,7 +76,7 @@ def init_local_rag_cli( llm=llm, # optional persist_dir=persist_dir, query_pipeline=query_pipeline, - verbose=False + verbose=False, ) return rag_cli_instance @@ -97,10 +97,10 @@ def __init__( self.llm_model_name = llm_model_name self.embed_model_name = embed_model_name self.rag_cli = init_local_rag_cli( - persist_dir=self.persist_dir, + persist_dir=self.persist_dir, verbose=self.verbose, llm_model_name=self.llm_model_name, - embed_model_name=self.embed_model_name + embed_model_name=self.embed_model_name, ) def get_modules(self) -> Dict[str, Any]: @@ -114,4 +114,4 @@ def run(self, *args: Any, **kwargs: Any) -> Any: if __name__ == "__main__": rag_cli_instance = init_local_rag_cli() - rag_cli_instance.cli() \ No newline at end of file + rag_cli_instance.cli() diff --git a/pyproject.toml b/pyproject.toml index 6f2e657d42..f4852224b7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ include = [ [tool.poetry.dependencies] # Updated Python version python = ">=3.8.1,<3.12" -llama-index = ">=0.9.39" +llama-index = ">=0.9.41" html2text = "*" psutil = "*" retrying = "*"