diff --git a/llama_hub/llama_packs/__init__.py b/llama_hub/llama_packs/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/llama_hub/llama_packs/arize_phoenix_query_engine/README.md b/llama_hub/llama_packs/arize_phoenix_query_engine/README.md
new file mode 100644
index 0000000000..33134c6758
--- /dev/null
+++ b/llama_hub/llama_packs/arize_phoenix_query_engine/README.md
@@ -0,0 +1,92 @@
+
+
+
+
+ Docs
+ |
+ GitHub
+ |
+ Community
+
+
+Arize-Phoenix LlamaPack
+
+This LlamaPack instruments your LlamaIndex app for LLM tracing with [Phoenix](https://github.com/Arize-ai/phoenix), an open-source LLM observability library from [Arize AI](https://phoenix.arize.com/).
+
+## Install and Import Dependencies
+
+
+```python
+!pip install "arize-phoenix[llama-index]" llama-hub html2text
+```
+
+
+```python
+import os
+
+from llama_hub.llama_packs.arize_phoenix_query_engine import ArizePhoenixQueryEnginePack
+from llama_index.node_parser import SentenceSplitter
+from llama_index.readers import SimpleWebPageReader
+from tqdm.auto import tqdm
+```
+
+Configure your OpenAI API key.
+
+
+```python
+os.environ["OPENAI_API_KEY"] = "copy-your-openai-api-key-here"
+```
+
+Parse your documents into a list of nodes and pass to your LlamaPack. In this example, use nodes from a Paul Graham essay as input.
+
+
+```python
+documents = SimpleWebPageReader().load_data(
+ [
+ "https://raw.githubusercontent.com/jerryjliu/llama_index/adb054429f642cc7bbfcb66d4c232e072325eeab/examples/paul_graham_essay/data/paul_graham_essay.txt"
+ ]
+)
+parser = SentenceSplitter()
+nodes = parser.get_nodes_from_documents(documents)
+phoenix_pack = ArizePhoenixQueryEnginePack(nodes=nodes)
+```
+
+Run a set of queries via the pack's `run` method, which delegates to the underlying query engine.
+
+
+```python
+queries = [
+ "What did Paul Graham do growing up?",
+ "When and how did Paul Graham's mother die?",
+ "What, in Paul Graham's opinion, is the most distinctive thing about YC?",
+ "When and how did Paul Graham meet Jessica Livingston?",
+ "What is Bel, and when and where was it written?",
+]
+for query in tqdm(queries):
+ print("Query")
+ print("=====")
+ print(query)
+ print()
+ response = phoenix_pack.run(query)
+ print("Response")
+ print("========")
+ print(response)
+ print()
+```
+
+View your trace data in the Phoenix UI.
+
+
+```python
+phoenix_session_url = phoenix_pack.get_modules()["session_url"]
+print(f"Open the Phoenix UI to view your trace data: {phoenix_session_url}")
+```
+
+You can access the internals of the LlamaPack, including your Phoenix session and your query engine, via the `get_modules` method.
+
+
+```python
+phoenix_pack.get_modules()
+```
+
+Check out the [Phoenix documentation](https://docs.arize.com/phoenix/) for more information!
diff --git a/llama_hub/llama_packs/arize_phoenix_query_engine/__init__.py b/llama_hub/llama_packs/arize_phoenix_query_engine/__init__.py
new file mode 100644
index 0000000000..9065b83f52
--- /dev/null
+++ b/llama_hub/llama_packs/arize_phoenix_query_engine/__init__.py
@@ -0,0 +1,3 @@
+from llama_hub.llama_packs.arize_phoenix_query_engine.base import ArizePhoenixQueryEnginePack
+
+__all__ = ["ArizePhoenixQueryEnginePack"]
diff --git a/llama_hub/llama_packs/arize_phoenix_query_engine/arize_phoenix_llama_pack.ipynb b/llama_hub/llama_packs/arize_phoenix_query_engine/arize_phoenix_llama_pack.ipynb
new file mode 100644
index 0000000000..09add376c6
--- /dev/null
+++ b/llama_hub/llama_packs/arize_phoenix_query_engine/arize_phoenix_llama_pack.ipynb
@@ -0,0 +1,186 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ " \n",
+ "
\n",
+ "
\n",
+ " Docs\n",
+ " |\n",
+ " GitHub\n",
+ " |\n",
+ " Community\n",
+ "
\n",
+ "\n",
+ "Arize-Phoenix LlamaPack
\n",
+ "\n",
+ "This LlamaPack instruments your LlamaIndex app for LLM tracing with [Phoenix](https://github.com/Arize-ai/phoenix), an open-source LLM observability library from [Arize AI](https://phoenix.arize.com/)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Install and Import Dependencies"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install \"arize-phoenix[llama-index]\" llama-hub html2text"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from llama_hub.llama_packs.arize_phoenix_query_engine import ArizePhoenixQueryEnginePack\n",
+ "from llama_index.node_parser import SentenceSplitter\n",
+ "from llama_index.readers import SimpleWebPageReader\n",
+ "from tqdm.auto import tqdm"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Configure your OpenAI API key."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "os.environ[\"OPENAI_API_KEY\"] = \"copy-your-openai-api-key-here\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Parse your documents into a list of nodes and pass to your LlamaPack. In this example, use nodes from a Paul Graham essay as input."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "documents = SimpleWebPageReader().load_data(\n",
+ " [\n",
+ " \"https://raw.githubusercontent.com/jerryjliu/llama_index/adb054429f642cc7bbfcb66d4c232e072325eeab/examples/paul_graham_essay/data/paul_graham_essay.txt\"\n",
+ " ]\n",
+ ")\n",
+ "parser = SentenceSplitter()\n",
+ "nodes = parser.get_nodes_from_documents(documents)\n",
+ "phoenix_pack = ArizePhoenixQueryEnginePack(nodes=nodes)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Run a set of queries via the pack's `run` method, which delegates to the underlying query engine."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "queries = [\n",
+ " \"What did Paul Graham do growing up?\",\n",
+ " \"When and how did Paul Graham's mother die?\",\n",
+ " \"What, in Paul Graham's opinion, is the most distinctive thing about YC?\",\n",
+ " \"When and how did Paul Graham meet Jessica Livingston?\",\n",
+ " \"What is Bel, and when and where was it written?\",\n",
+ "]\n",
+ "for query in tqdm(queries):\n",
+ " print(\"Query\")\n",
+ " print(\"=====\")\n",
+ " print(query)\n",
+ " print()\n",
+ " response = phoenix_pack.run(query)\n",
+ " print(\"Response\")\n",
+ " print(\"========\")\n",
+ " print(response)\n",
+ " print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "View your trace data in the Phoenix UI."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "phoenix_session_url = phoenix_pack.get_modules()[\"session_url\"]\n",
+ "print(f\"Open the Phoenix UI to view your trace data: {phoenix_session_url}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can access the internals of the LlamaPack, including your Phoenix session and your query engine, via the `get_modules` method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "phoenix_pack.get_modules()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Check out the [Phoenix documentation](https://docs.arize.com/phoenix/) for more information!"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "llmapps",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/llama_hub/llama_packs/arize_phoenix_query_engine/base.py b/llama_hub/llama_packs/arize_phoenix_query_engine/base.py
new file mode 100755
index 0000000000..cd452aec98
--- /dev/null
+++ b/llama_hub/llama_packs/arize_phoenix_query_engine/base.py
@@ -0,0 +1,73 @@
+"""
+Arize-Phoenix LlamaPack.
+"""
+
+from typing import TYPE_CHECKING, Any, Dict, List
+
+from llama_index import set_global_handler
+from llama_index.indices.vector_store import VectorStoreIndex
+from llama_index.llama_pack.base import BaseLlamaPack
+from llama_index.schema import TextNode
+
+if TYPE_CHECKING:
+ from phoenix import Session as PhoenixSession
+
+
+class ArizePhoenixQueryEnginePack(BaseLlamaPack):
+ """
+ The Arize-Phoenix LlamaPack show how to instrument your LlamaIndex query
+ engine with tracing. It launches Phoenix in the background, builds an index
+ over an input list of nodes, and instantiates and instruments a query engine
+ over that index so that trace data from each query is sent to Phoenix.
+
+ Note: Using this LlamaPack requires that your OpenAI API key is set via the
+ OPENAI_API_KEY environment variable.
+ """
+
+ def __init__(
+ self,
+ nodes: List[TextNode],
+ **kwargs: Any,
+ ) -> None:
+ """
+ Initializes a new instance of ArizePhoenixQueryEnginePack.
+
+ Args:
+ nodes (List[TextNode]): An input list of nodes over which the index
+ will be built.
+ """
+ try:
+ import phoenix as px
+ except ImportError:
+ raise ImportError(
+ "The arize-phoenix package could not be found. "
+ "Please install with `pip install arize-phoenix`."
+ )
+ self._session: "PhoenixSession" = px.launch_app()
+ set_global_handler("arize_phoenix")
+ self._index = VectorStoreIndex(nodes, **kwargs)
+ self._query_engine = self._index.as_query_engine()
+
+ def get_modules(self) -> Dict[str, Any]:
+ """
+ Returns a dictionary containing the internals of the LlamaPack.
+
+ Returns:
+ Dict[str, Any]: A dictionary containing the internals of the
+ LlamaPack.
+ """
+ return {
+ "session": self._session,
+ "session_url": self._session.url,
+ "index": self._index,
+ "query_engine": self._query_engine,
+ }
+
+ def run(self, *args: Any, **kwargs: Any) -> Any:
+ """
+ Runs queries against the index.
+
+ Returns:
+ Any: A response from the query engine.
+ """
+ return self._query_engine.query(*args, **kwargs)
diff --git a/llama_hub/llama_packs/arize_phoenix_query_engine/requirements.txt b/llama_hub/llama_packs/arize_phoenix_query_engine/requirements.txt
new file mode 100644
index 0000000000..ae222faa82
--- /dev/null
+++ b/llama_hub/llama_packs/arize_phoenix_query_engine/requirements.txt
@@ -0,0 +1 @@
+arize-phoenix
diff --git a/llama_hub/llama_packs/chroma_autoretrieval/README.md b/llama_hub/llama_packs/chroma_autoretrieval/README.md
new file mode 100644
index 0000000000..073e2e3ba2
--- /dev/null
+++ b/llama_hub/llama_packs/chroma_autoretrieval/README.md
@@ -0,0 +1,69 @@
+# Chroma AutoRetrieval Pack
+
+This LlamaPack inserts your data into chroma and insantiates an auto-retriever, which will use the LLM at runtime to set metadata filtering, top-k, and query string.
+
+## Usage
+
+You can download the pack to a the `./chroma_pack` directory:
+
+```python
+from llama_index.llama_packs import download_llama_pack
+
+# download and install dependencies
+ChromaAutoretrievalPack = download_llama_pack(
+ "ChromaAutoretrievalPack", "./chroma_pack"
+)
+```
+
+From here, you can use the pack, or inspect and modify the pack in `./chroma_pack`.
+
+Then, you can set up the pack like so:
+
+```python
+# setup pack arguments
+from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
+
+vector_store_info = VectorStoreInfo(
+ content_info="brief biography of celebrities",
+ metadata_info=[
+ MetadataInfo(
+ name="category",
+ type="str",
+ description=(
+ "Category of the celebrity, one of [Sports Entertainment, Business, Music]"
+ ),
+ ),
+ ],
+)
+
+import chromadb
+client = chromadb.EphemeralClient()
+
+nodes = [...]
+
+# create the pack
+chroma_pack = ChromaAutoretrievalPack(
+ collection_name="test",
+ vector_store_info=vector_store_index
+ nodes=nodes,
+ client=client
+)
+```
+
+The `run()` function is a light wrapper around `query_engine.query()`.
+
+```python
+response = chroma_pack.run("Tell me a bout a Music celebritiy.")
+```
+
+You can also use modules individually.
+
+```python
+# use the retreiver
+retriever = chroma_pack.retriever
+nodes = retriever.retrieve("query_str")
+
+# use the query engine
+query_engine = chroma_pack.query_engine
+response = query_engine.query("query_str")
+```
\ No newline at end of file
diff --git a/llama_hub/llama_packs/chroma_autoretrieval/__init__.py b/llama_hub/llama_packs/chroma_autoretrieval/__init__.py
new file mode 100644
index 0000000000..64d641ff3e
--- /dev/null
+++ b/llama_hub/llama_packs/chroma_autoretrieval/__init__.py
@@ -0,0 +1,3 @@
+from llama_hub.llama_packs.chroma_autoretrieval.base import ChromaAutoretrievalPack
+
+__all__ = ["ChromaAutoretrievalPack"]
diff --git a/llama_hub/llama_packs/chroma_autoretrieval/base.py b/llama_hub/llama_packs/chroma_autoretrieval/base.py
new file mode 100755
index 0000000000..878f59f29f
--- /dev/null
+++ b/llama_hub/llama_packs/chroma_autoretrieval/base.py
@@ -0,0 +1,67 @@
+"""Chroma Auto-retrieval Pack."""
+
+
+from typing import Any, Dict, List, Optional
+
+from llama_index.indices.vector_store import VectorStoreIndex
+from llama_index.indices.vector_store.retrievers import (
+ VectorIndexAutoRetriever,
+)
+from llama_index.llama_pack.base import BaseLlamaPack
+from llama_index.query_engine import RetrieverQueryEngine
+from llama_index.schema import TextNode
+from llama_index.storage.storage_context import StorageContext
+from llama_index.vector_stores.chroma import ChromaVectorStore
+from llama_index.vector_stores.types import VectorStoreInfo
+
+
+class ChromaAutoretrievalPack(BaseLlamaPack):
+ """Chroma auto-retrieval pack."""
+
+ def __init__(
+ self,
+ collection_name: str,
+ vector_store_info: VectorStoreInfo,
+ nodes: Optional[List[TextNode]] = None,
+ client: Optional[Any] = None,
+ **kwargs: Any,
+ ) -> None:
+ """Init params."""
+ import chromadb
+
+ chroma_client = client or chromadb.EphemeralClient()
+ chroma_collection = chroma_client.get_or_create_collection(collection_name)
+
+ self._vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
+
+ if nodes is not None:
+ self._storage_context = StorageContext.from_defaults(
+ vector_store=self._vector_store
+ )
+ self._index = VectorStoreIndex(nodes, storage_context=self._storage_context, **kwargs)
+ else:
+ self._index = VectorStoreIndex.from_vector_store(self._vector_store, **kwargs)
+ self._storage_context = self._index.storage_context
+
+ self.retriever = VectorIndexAutoRetriever(
+ self._index, vector_store_info=vector_store_info
+ )
+ self.query_engine = RetrieverQueryEngine(self.retriever)
+
+ def get_modules(self) -> Dict[str, Any]:
+ """Get modules."""
+ return {
+ "vector_store": self._vector_store,
+ "storage_context": self._storage_context,
+ "index": self._index,
+ "retriever": self.retriever,
+ "query_engine": self.query_engine,
+ }
+
+ def retrieve(self, query_str: str) -> Any:
+ """Retrieve."""
+ return self.retriever.retrieve(query_str)
+
+ def run(self, *args: Any, **kwargs: Any) -> Any:
+ """Run the pipeline."""
+ return self.query_engine.query(*args, **kwargs)
diff --git a/llama_hub/llama_packs/chroma_autoretrieval/requirements.txt b/llama_hub/llama_packs/chroma_autoretrieval/requirements.txt
new file mode 100644
index 0000000000..99812b187b
--- /dev/null
+++ b/llama_hub/llama_packs/chroma_autoretrieval/requirements.txt
@@ -0,0 +1 @@
+chromadb
diff --git a/llama_hub/llama_packs/deeplake_deepmemory_retriever/README.md b/llama_hub/llama_packs/deeplake_deepmemory_retriever/README.md
new file mode 100644
index 0000000000..c6a9970395
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_deepmemory_retriever/README.md
@@ -0,0 +1,52 @@
+# DeepLake DeepMemory Pack
+
+This LlamaPack inserts your data into deeplake and insantiates a [deepmemory](https://docs.activeloop.ai/performance-features/deep-memory) retriever, which will use deepmemory during runtime to increase RAG's retrieval accuracy (recall).
+
+## Usage
+
+You can download the pack to a `./deepmemory_pack` directory:
+
+```python
+from llama_hub.llama_packs import download_llama_pack
+
+# download and install dependencies
+DeepMemoryRetriever = download_llama_pack(
+ "DeepMemoryRetrieverPack", "./deepmemory_pack"
+)
+```
+
+From here, you can use the pack, or inspect and modify the pack in `./deepmemory_pack`.
+
+Then, you can set up the pack like so:
+
+```python
+# setup pack arguments
+from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
+
+nodes = [...]
+
+# create the pack
+deepmemory_pack = DeepMemoryRetriever(
+ dataset_path="llama_index",
+ overwrite=False,
+ nodes=nodes,
+)
+```
+
+The `run()` function is a light wrapper around `query_engine.query()`.
+
+```python
+response = deepmemory_pack.run("Tell me a bout a Music celebritiy.")
+```
+
+You can also use modules individually.
+
+```python
+# use the retreiver
+retriever = deepmemory_pack.retriever
+nodes = retriever.retrieve("query_str")
+
+# use the query engine
+query_engine = deepmemory_pack.query_engine
+response = query_engine.query("query_str")
+```
\ No newline at end of file
diff --git a/llama_hub/llama_packs/deeplake_deepmemory_retriever/__init__.py b/llama_hub/llama_packs/deeplake_deepmemory_retriever/__init__.py
new file mode 100644
index 0000000000..1bb56f37b2
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_deepmemory_retriever/__init__.py
@@ -0,0 +1,4 @@
+from llama_hub.llama_packs.deeplake_deepmemory_retriever.base import DeepMemoryRetrieverPack
+
+
+__all__ = ["DeepMemoryRetrieverPack"]
diff --git a/llama_hub/llama_packs/deeplake_deepmemory_retriever/base.py b/llama_hub/llama_packs/deeplake_deepmemory_retriever/base.py
new file mode 100644
index 0000000000..fba81b31df
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_deepmemory_retriever/base.py
@@ -0,0 +1,66 @@
+"""DeepMemory Retrieval Pack."""
+
+
+from typing import Any, Dict, List, Optional
+
+from llama_index.indices.vector_store import VectorStoreIndex
+from llama_index.llama_pack.base import BaseLlamaPack
+from llama_index.query_engine import RetrieverQueryEngine
+from llama_index.schema import TextNode
+from llama_index.storage.storage_context import StorageContext
+from llama_index.vector_stores.deeplake import DeepLakeVectorStore
+
+
+class DeepMemoryRetrieverPack(BaseLlamaPack):
+ """DeepMemory retriever pack."""
+
+ def __init__(
+ self,
+ dataset_path: str = "llama_index",
+ token: Optional[str] = None,
+ read_only: Optional[bool] = False,
+ overwrite: bool = False,
+ verbose: bool = True,
+ nodes: Optional[List[TextNode]] = None,
+ top_k: int = 4,
+ **kwargs: Any,
+ ):
+ self._vector_store = DeepLakeVectorStore(
+ dataset_path=dataset_path,
+ token=token,
+ read_only=read_only,
+ overwrite=overwrite,
+ verbose=verbose,
+ )
+
+ if nodes is not None:
+ self._storage_context = StorageContext.from_defaults(
+ vector_store=self._vector_store
+ )
+ self._index = VectorStoreIndex(nodes, storage_context=self._storage_context, **kwargs)
+ else:
+ self._index = VectorStoreIndex.from_vector_store(self._vector_store, **kwargs)
+ self._storage_context = self._index.storage_context
+
+ self.retriever = self._index.as_retriever(
+ similarity_top_k=top_k, vector_store_kwargs={"deep_memory": True}
+ )
+ self.query_engine = RetrieverQueryEngine.from_args(retriever=self.retriever)
+
+ def get_modules(self) -> Dict[str, Any]:
+ """Get modules."""
+ return {
+ "vector_store": self._vector_store,
+ "storage_context": self._storage_context,
+ "index": self._index,
+ "retriever": self.retriever,
+ "query_engine": self.query_engine,
+ }
+
+ def retrieve(self, query_str: str) -> Any:
+ """Retrieve."""
+ return self.retriever.retrieve(query_str)
+
+ def run(self, *args: Any, **kwargs: Any) -> Any:
+ """Run the pipeline."""
+ return self.query_engine.query(*args, **kwargs)
\ No newline at end of file
diff --git a/llama_hub/llama_packs/deeplake_deepmemory_retriever/requirements.txt b/llama_hub/llama_packs/deeplake_deepmemory_retriever/requirements.txt
new file mode 100644
index 0000000000..bd1ea014dd
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_deepmemory_retriever/requirements.txt
@@ -0,0 +1 @@
+deeplake
\ No newline at end of file
diff --git a/llama_hub/llama_packs/deeplake_multimodal_retrieval/README.md b/llama_hub/llama_packs/deeplake_multimodal_retrieval/README.md
new file mode 100644
index 0000000000..d790d0b3ca
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_multimodal_retrieval/README.md
@@ -0,0 +1,53 @@
+# DeepLake DeepMemory Pack
+
+This LlamaPack inserts your multimodal data (texts, images) into deeplake and insantiates an deeplake retriever, which will use clip for embedding images and GPT4-V during runtime.
+
+## Usage
+
+You can download the pack to a `./deeplake_multimodal_pack` directory:
+
+```python
+from llama_hub.llama_packs import download_llama_pack
+
+# download and install dependencies
+DeepLakeMultimodalRetriever = download_llama_pack(
+ "DeepLakeMultimodalRetrieverPack", "./deeplake_multimodal_pack"
+)
+```
+
+From here, you can use the pack, or inspect and modify the pack in `./deepmemory_pack`.
+
+Then, you can set up the pack like so:
+
+```python
+# setup pack arguments
+from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
+
+# collection of image and text nodes
+nodes = [...]
+
+# create the pack
+deeplake_pack = DeepLakeMultimodalRetriever(
+ nodes=nodes,
+ dataset_path="llama_index",
+ overwrite=False
+)
+```
+
+The `run()` function is a light wrapper around `SimpleMultiModalQueryEngine`.
+
+```python
+response = deeplake_pack.run("Tell me a bout a Music celebritiy.")
+```
+
+You can also use modules individually.
+
+```python
+# use the retreiver
+retriever = deeplake_pack.retriever
+nodes = retriever.retrieve("query_str")
+
+# use the query engine
+query_engine = deeplake_pack.query_engine
+response = query_engine.query("query_str")
+```
\ No newline at end of file
diff --git a/llama_hub/llama_packs/deeplake_multimodal_retrieval/__init__.py b/llama_hub/llama_packs/deeplake_multimodal_retrieval/__init__.py
new file mode 100644
index 0000000000..d3bcd3cafa
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_multimodal_retrieval/__init__.py
@@ -0,0 +1,4 @@
+from llama_hub.llama_packs.deeplake_multimodal_retrieval.base import DeepLakeMultimodalRetrieverPack
+
+
+__all__ = ["DeepLakeMultimodalRetrieverPack"]
diff --git a/llama_hub/llama_packs/deeplake_multimodal_retrieval/base.py b/llama_hub/llama_packs/deeplake_multimodal_retrieval/base.py
new file mode 100644
index 0000000000..213a14206f
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_multimodal_retrieval/base.py
@@ -0,0 +1,79 @@
+"""DeepLake multimodal Retrieval Pack."""
+
+
+from typing import Any, Dict, List, Optional
+
+from llama_index.llama_pack.base import BaseLlamaPack
+from llama_index.indices.multi_modal import MultiModalVectorStoreIndex
+from llama_index.query_engine import SimpleMultiModalQueryEngine
+from llama_index.schema import BaseNode
+from llama_index.storage.storage_context import StorageContext
+from llama_index.vector_stores.deeplake import DeepLakeVectorStore
+from time import time, sleep
+
+
+class DeepLakeMultimodalRetrieverPack(BaseLlamaPack):
+ """DeepLake Multimodal retriever pack."""
+
+ def __init__(
+ self,
+ dataset_path: str = "llama_index",
+ token: Optional[str] = None,
+ read_only: Optional[bool] = False,
+ overwrite: bool = False,
+ verbose: bool = True,
+ nodes: Optional[List[BaseNode]] = None,
+ top_k: int = 4,
+ **kwargs: Any,
+ ):
+ # text vector store
+ self._text_vectorstore = DeepLakeVectorStore(
+ dataset_path=dataset_path+"_text",
+ token=token,
+ read_only=read_only,
+ overwrite=overwrite,
+ verbose=verbose,
+ )
+
+ # image vector store
+ self._image_vectorstore = DeepLakeVectorStore(
+ dataset_path=dataset_path+"_image",
+ token=token,
+ read_only=read_only,
+ overwrite=overwrite,
+ verbose=verbose,
+ )
+
+ if nodes is not None:
+ self._storage_context = StorageContext.from_defaults(vector_store=self._text_vectorstore)
+ self._index = MultiModalVectorStoreIndex(
+ nodes, storage_context=self._storage_context, image_vector_store=self._image_vectorstore,
+ )
+ else:
+ self._storage_context = StorageContext.from_defaults(vector_store=self._text_vectorstore)
+ self._index = MultiModalVectorStoreIndex.from_vector_store(
+ self._text_vectorstore, image_vector_store=self._image_vectorstore,
+ )
+ self.retriever = self._index.as_retriever(
+ similarity_top_k=top_k, vector_store_kwargs={"deep_memory": True}
+ )
+ self.query_engine = SimpleMultiModalQueryEngine(self.retriever)
+
+ def get_modules(self) -> Dict[str, Any]:
+ """Get modules."""
+ return {
+ "text_vectorstore": self._text_vectorstore,
+ "image_vectorstore": self._image_vectorstore,
+ "storage_context": self._storage_context,
+ "index": self._index,
+ "retriever": self.retriever,
+ "query_engine": self.query_engine,
+ }
+
+ def retrieve(self, query_str: str) -> Any:
+ """Retrieve."""
+ return self.query_engine.retrieve(query_str)
+
+ def run(self, *args: Any, **kwargs: Any) -> Any:
+ """Run the pipeline."""
+ return self.query_engine.query(*args, **kwargs)
diff --git a/llama_hub/llama_packs/deeplake_multimodal_retrieval/requirements.txt b/llama_hub/llama_packs/deeplake_multimodal_retrieval/requirements.txt
new file mode 100644
index 0000000000..0e678cadde
--- /dev/null
+++ b/llama_hub/llama_packs/deeplake_multimodal_retrieval/requirements.txt
@@ -0,0 +1,5 @@
+deeplake
+Pillow
+torch
+torchvision
+clip @ git+https://github.com/openai/CLIP.git
\ No newline at end of file
diff --git a/llama_hub/llama_packs/gmail_openai_agent/README.md b/llama_hub/llama_packs/gmail_openai_agent/README.md
new file mode 100644
index 0000000000..2f829184bb
--- /dev/null
+++ b/llama_hub/llama_packs/gmail_openai_agent/README.md
@@ -0,0 +1,39 @@
+# Gmail OpenAI Agent Pack
+
+Create an OpenAI agent pre-loaded with a tool to interact with Gmail. The tool used is the [Gmail LlamaHub tool](https://llamahub.ai/l/tools-gmail).
+
+## Usage
+
+You can download the pack to a the `./gmail_pack` directory:
+
+```python
+from llama_index.llama_packs import download_llama_pack
+
+# download and install dependencies
+GmailOpenAIAgentPack = download_llama_pack(
+ "GmailOpenAIAgentPack", "./gmail_pack"
+)
+
+gmail_agent_pack = GmailOpenAIAgentPack()
+```
+
+From here, you can use the pack, or inspect and modify the pack in `./gmail_pack`.
+
+The `run()` function is a light wrapper around `agent.chat()`.
+
+```python
+response = gmail_agent_pack.run("What is my most recent email?")
+```
+
+You can also use modules individually.
+
+```python
+# Use the agent
+agent = gmail_agent_pack.agent
+response = agent.chat("What is my most recent email?")
+
+# Use the tool spec in another agent
+from llama_index.agents import ReActAgent
+tool_spec = gmail_agent_pack.tool_spec
+agent = ReActAgent.from_tools(tool_spec.to_tool_lost())
+```
diff --git a/llama_hub/llama_packs/gmail_openai_agent/__init__.py b/llama_hub/llama_packs/gmail_openai_agent/__init__.py
new file mode 100644
index 0000000000..2112b2d72b
--- /dev/null
+++ b/llama_hub/llama_packs/gmail_openai_agent/__init__.py
@@ -0,0 +1,3 @@
+from llama_hub.llama_packs.gmail_openai_agent.base import GmailOpenAIAgentPack
+
+__all__ = ["GmailOpenAIAgentPack"]
diff --git a/llama_hub/llama_packs/gmail_openai_agent/base.py b/llama_hub/llama_packs/gmail_openai_agent/base.py
new file mode 100755
index 0000000000..702b465df2
--- /dev/null
+++ b/llama_hub/llama_packs/gmail_openai_agent/base.py
@@ -0,0 +1,27 @@
+"""LlamaPack class."""
+
+
+from typing import Any, Dict
+
+from llama_index.agent.openai_agent import OpenAIAgent
+from llama_index.llama_pack.base import BaseLlamaPack
+
+
+class GmailOpenAIAgentPack(BaseLlamaPack):
+ def __init__(self, gmail_tool_kwargs: Dict[str, Any]) -> None:
+ """Init params."""
+ try:
+ from llama_hub.tools.gmail.base import GmailToolSpec
+ except ImportError:
+ raise ImportError("llama_hub not installed.")
+
+ self.tool_spec = GmailToolSpec(**gmail_tool_kwargs)
+ self.agent = OpenAIAgent.from_tools(self.tool_spec.to_tool_list())
+
+ def get_modules(self) -> Dict[str, Any]:
+ """Get modules."""
+ return {"gmail_tool": self.tool_spec, "agent": self.agent}
+
+ def run(self, *args: Any, **kwargs: Any) -> Any:
+ """Run the pipeline."""
+ return self.agent.chat(*args, **kwargs)
diff --git a/llama_hub/llama_packs/gmail_openai_agent/requirements.txt b/llama_hub/llama_packs/gmail_openai_agent/requirements.txt
new file mode 100644
index 0000000000..d82f791c51
--- /dev/null
+++ b/llama_hub/llama_packs/gmail_openai_agent/requirements.txt
@@ -0,0 +1 @@
+llama-hub
diff --git a/llama_hub/llama_packs/library.json b/llama_hub/llama_packs/library.json
new file mode 100644
index 0000000000..c4f4e9093c
--- /dev/null
+++ b/llama_hub/llama_packs/library.json
@@ -0,0 +1,32 @@
+{
+ "GmailOpenAIAgentPack": {
+ "id": "llama_packs/gmail_openai_agent",
+ "author": "logan-markewich",
+ "keywords": ["math", "science", "research"]
+ },
+ "ChromaAutoretrievalPack": {
+ "id": "llama_packs/chroma_autoretrieval",
+ "author": "logan-markewich",
+ "keywords": ["chroma", "retrieval", "vector"]
+ },
+ "ZephyrQueryEnginePack": {
+ "id": "llama_packs/zephyr_query_engine",
+ "author": "logan-markewich",
+ "keywords": ["zephyr", "local", "query", "engine", "index", "huggingface"]
+ },
+ "LlavaCompletionPack": {
+ "id": "llama_packs/llava_completion",
+ "author": "wenqiglantz",
+ "keywords": ["llava", "multimodal", "image"]
+ },
+ "DeepMemoryRetrieverPack": {
+ "id": "llama_packs/deeplake_deepmemory_retriever",
+ "author": "AdkSarsen",
+ "keywords": ["deeplake", "deepmemory", "retriever"]
+ },
+ "DeepLakeMultimodalRetrieverPack": {
+ "id": "llama_packs/deeplake_multimodal_retrieval",
+ "author": "AdkSarsen",
+ "keywords": ["deeplake", "multimodal", "retriever"]
+ }
+}
diff --git a/llama_hub/llama_packs/llava_completion/README.md b/llama_hub/llama_packs/llava_completion/README.md
new file mode 100644
index 0000000000..385e717b3a
--- /dev/null
+++ b/llama_hub/llama_packs/llava_completion/README.md
@@ -0,0 +1,41 @@
+# LLaVA Completion Pack
+
+This LlamaPack creates the LLaVA multimodal model, and runs its `complete` endpoint to execute queries.
+
+## Usage
+
+You can download the pack to a `./llava_pack` directory:
+
+```python
+from llama_index.llama_packs import download_llama_pack
+
+# download and install dependencies
+LlavaCompletionPack = download_llama_pack(
+ "LlavaCompletionPack", "./llava_pack"
+)
+```
+
+From here, you can use the pack, or inspect and modify the pack in `./llava_pack`.
+
+Then, you can set up the pack like so:
+
+```python
+# create the pack
+llava_pack = LlavaCompletionPack(
+ image_url="./images/image1.jpg"
+)
+```
+
+The `run()` function is a light wrapper around `llm.complete()`.
+
+```python
+response = llava_pack.run("What dinner can I cook based on the picture of the food in the fridge?")
+```
+
+You can also use modules individually.
+
+```python
+# call the llm.complete()
+llm = llava_pack.llm
+response = llm.complete("query_str")
+```
\ No newline at end of file
diff --git a/llama_hub/llama_packs/llava_completion/__init__.py b/llama_hub/llama_packs/llava_completion/__init__.py
new file mode 100644
index 0000000000..d20a07ce07
--- /dev/null
+++ b/llama_hub/llama_packs/llava_completion/__init__.py
@@ -0,0 +1,3 @@
+from llama_hub.llama_packs.llava_completion.base import LlavaCompletionPack
+
+__all__ = ["LlavaCompletionPack"]
diff --git a/llama_hub/llama_packs/llava_completion/base.py b/llama_hub/llama_packs/llava_completion/base.py
new file mode 100644
index 0000000000..cde63bf43c
--- /dev/null
+++ b/llama_hub/llama_packs/llava_completion/base.py
@@ -0,0 +1,39 @@
+"""Llava Completion Pack."""
+
+
+from typing import Any, Dict
+
+from llama_index.llama_pack.base import BaseLlamaPack
+from llama_index.llms import Replicate
+
+class LlavaCompletionPack(BaseLlamaPack):
+ """Llava Completion pack."""
+
+ def __init__(
+ self,
+ image_url: str,
+ **kwargs: Any,
+ ) -> None:
+ """Init params."""
+ import os
+
+ if not os.environ.get("REPLICATE_API_TOKEN", None):
+ raise ValueError("Replicate API Token is missing or blank.")
+
+ self.image_url = image_url
+
+ self.llm = Replicate(
+ model="yorickvp/llava-13b:2facb4a474a0462c15041b78b1ad70952ea46b5ec6ad29583c0b29dbd4249591",
+ image=self.image_url,
+ )
+
+ def get_modules(self) -> Dict[str, Any]:
+ """Get modules."""
+ return {
+ "llm": self.llm,
+ "image_url": self.image_url,
+ }
+
+ def run(self, *args: Any, **kwargs: Any) -> Any:
+ """Run the pipeline."""
+ return self.llm.complete(*args, **kwargs)
diff --git a/llama_hub/llama_packs/llava_completion/requirements.txt b/llama_hub/llama_packs/llava_completion/requirements.txt
new file mode 100644
index 0000000000..7d1a9c99e7
--- /dev/null
+++ b/llama_hub/llama_packs/llava_completion/requirements.txt
@@ -0,0 +1 @@
+Replicate
diff --git a/llama_hub/llama_packs/zephyr_query_engine/README.md b/llama_hub/llama_packs/zephyr_query_engine/README.md
new file mode 100644
index 0000000000..bd32e748c4
--- /dev/null
+++ b/llama_hub/llama_packs/zephyr_query_engine/README.md
@@ -0,0 +1,40 @@
+# Zephyr Query Engine Pack
+
+Create a query engine using completely local and private models -- `HuggingFaceH4/zephyr-7b-beta` for the LLM and `BAAI/bge-base-en-v1.5` for embeddings.
+
+## Usage
+
+You can download the pack to a the `./zephyr_pack` directory:
+
+```python
+from llama_index.llama_packs import download_llama_pack
+
+# download and install dependencies
+ZephyrQueryEnginePack = download_llama_pack(
+ "ZephyrQueryEnginePack", "./zephyr_pack"
+)
+
+# You can use any llama-hub loader to get documents!
+zephyr_pack = ZephyrQueryEnginePack(documents)
+```
+
+From here, you can use the pack, or inspect and modify the pack in `./zephyr_pack`.
+
+The `run()` function is a light wrapper around `index.as_query_engine().query()`.
+
+```python
+response = zephyr_pack.run("What did the author do growing up?", similarity_top_k=2)
+```
+
+You can also use modules individually.
+
+```python
+# Use the llm
+llm = zephyr_pack.llm
+response = llm.complete("What is HuggingFace?")
+
+# Use the index directly
+index = zephyr_pack.index
+query_engine = index.as_query_engine()
+retriver = index.as_retriever()
+```
diff --git a/llama_hub/llama_packs/zephyr_query_engine/__init__.py b/llama_hub/llama_packs/zephyr_query_engine/__init__.py
new file mode 100644
index 0000000000..0946148a2b
--- /dev/null
+++ b/llama_hub/llama_packs/zephyr_query_engine/__init__.py
@@ -0,0 +1,3 @@
+from llama_hub.llama_packs.zephyr_query_engine.base import ZephyrQueryEnginePack
+
+__all__ = ["ZephyrQueryEnginePack"]
\ No newline at end of file
diff --git a/llama_hub/llama_packs/zephyr_query_engine/base.py b/llama_hub/llama_packs/zephyr_query_engine/base.py
new file mode 100644
index 0000000000..4cab9c95c2
--- /dev/null
+++ b/llama_hub/llama_packs/zephyr_query_engine/base.py
@@ -0,0 +1,76 @@
+"""LlamaPack class."""
+
+
+from typing import Any, Dict, List
+
+from llama_index import ServiceContext, VectorStoreIndex, set_global_tokenizer
+from llama_index.llama_pack.base import BaseLlamaPack
+from llama_index.llms import HuggingFaceLLM
+from llama_index.prompts import PromptTemplate
+from llama_index.schema import Document
+
+
+class ZephyrQueryEnginePack(BaseLlamaPack):
+ def __init__(self, documents: List[Document]) -> None:
+ """Init params."""
+ try:
+ from transformers import BitsAndBytesConfig
+ import torch
+ except ImportError:
+ raise ImportError(
+ "Dependencies missing, run "
+ "`pip install torch transformers accelerate bitsandbytes`"
+ )
+
+ quantization_config = BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_quant_type="nf4",
+ bnb_4bit_use_double_quant=True,
+ )
+
+ try:
+ llm = HuggingFaceLLM(
+ model_name="HuggingFaceH4/zephyr-7b-beta",
+ tokenizer_name="HuggingFaceH4/zephyr-7b-beta",
+ query_wrapper_prompt=PromptTemplate("<|system|>\n\n<|user|>\n{query_str}\n<|assistant|>\n"),
+ context_window=3900,
+ max_new_tokens=256,
+ model_kwargs={"quantization_config": quantization_config},
+ generate_kwargs={"do_sample": True, "temperature": 0.7, "top_k": 50, "top_p": 0.95},
+ device_map="auto",
+ )
+ except Exception:
+ print(
+ "Failed to load and quantize model, likely due to CUDA being missing. "
+ "Loading full precision model instead."
+ )
+ llm = HuggingFaceLLM(
+ model_name="HuggingFaceH4/zephyr-7b-beta",
+ tokenizer_name="HuggingFaceH4/zephyr-7b-beta",
+ query_wrapper_prompt=PromptTemplate("<|system|>\n\n<|user|>\n{query_str}\n<|assistant|>\n"),
+ context_window=3900,
+ max_new_tokens=256,
+ generate_kwargs={"do_sample": True, "temperature": 0.7, "top_k": 50, "top_p": 0.95},
+ device_map="auto",
+ )
+
+ # set tokenizer for proper token counting
+ from transformers import AutoTokenizer
+ tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
+ set_global_tokenizer(tokenizer.encode)
+
+ service_context = ServiceContext.from_defaults(llm=llm, embed_model="local:BAAI/bge-base-en-v1.5")
+
+ self.llm = llm
+ self.index = VectorStoreIndex.from_documents(documents, service_context=service_context)
+
+
+ def get_modules(self) -> Dict[str, Any]:
+ """Get modules."""
+ return {"llm": self.llm, "index": self.index}
+
+ def run(self, query_str: str, **kwargs: Any) -> Any:
+ """Run the pipeline."""
+ query_engine = self.index.as_query_engine(**kwargs)
+ return query_engine.query(query_str)
diff --git a/llama_hub/llama_packs/zephyr_query_engine/requirements.txt b/llama_hub/llama_packs/zephyr_query_engine/requirements.txt
new file mode 100644
index 0000000000..c48cb6244d
--- /dev/null
+++ b/llama_hub/llama_packs/zephyr_query_engine/requirements.txt
@@ -0,0 +1,4 @@
+torch
+transformers
+accelerate
+bitsandbytes
\ No newline at end of file
diff --git a/llama_hub/tools/__init__.py b/llama_hub/tools/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/test_library.py b/tests/test_library.py
index d8c137383a..291228a203 100644
--- a/tests/test_library.py
+++ b/tests/test_library.py
@@ -70,3 +70,35 @@ def test_tools_library_matches() -> None:
# make sure the specified class is in the loader file
assert hasattr(module, k)
+
+
+def test_llama_packs_library_matches() -> None:
+ """Check that library.json corresponds to valid files."""
+ hub_dir = Path(__file__).parent.parent / "llama_hub"
+ library_path = hub_dir / "llama_packs" / "library.json"
+ library_dict = json.load(open(library_path, "r"))
+ for k, entry in library_dict.items():
+ # make sure every entry has an "id" field
+ assert "id" in entry
+ entry_id = entry["id"]
+
+ # make sure the tool directory exists
+ entry_dir = hub_dir / entry_id
+ assert entry_dir.exists()
+
+ # make sure that the tool file exists
+ entry_file = entry_dir / "base.py"
+ assert entry_file.exists()
+
+ # make sure that the README file exists
+ readme_file = entry_dir / "README.md"
+ assert readme_file.exists()
+
+ spec = util.spec_from_file_location("custom_llama_pack", location=str(entry_file))
+ if spec is None:
+ raise ValueError(f"Could not find file: {str(entry_file)}.")
+ module = util.module_from_spec(spec)
+ spec.loader.exec_module(module) # type: ignore
+
+ # make sure the specified class is in the loader file
+ assert hasattr(module, k)