fix dependencies issue
Browse files
unit2/llama-index/agents.ipynb
CHANGED
@@ -21,11 +21,11 @@
|
|
21 |
},
|
22 |
{
|
23 |
"cell_type": "code",
|
24 |
-
"execution_count":
|
25 |
"metadata": {},
|
26 |
"outputs": [],
|
27 |
"source": [
|
28 |
-
"!pip install llama-index
|
29 |
]
|
30 |
},
|
31 |
{
|
@@ -167,7 +167,7 @@
|
|
167 |
},
|
168 |
{
|
169 |
"cell_type": "code",
|
170 |
-
"execution_count":
|
171 |
"metadata": {},
|
172 |
"outputs": [],
|
173 |
"source": [
|
@@ -175,7 +175,7 @@
|
|
175 |
"\n",
|
176 |
"from llama_index.core import VectorStoreIndex\n",
|
177 |
"from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
|
178 |
-
"from llama_index.embeddings.
|
179 |
"from llama_index.core.tools import QueryEngineTool\n",
|
180 |
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
|
181 |
"\n",
|
@@ -185,7 +185,7 @@
|
|
185 |
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
|
186 |
"\n",
|
187 |
"# Create a query engine\n",
|
188 |
-
"embed_model =
|
189 |
"llm = HuggingFaceInferenceAPI(model_name=\"Qwen/Qwen2.5-Coder-32B-Instruct\")\n",
|
190 |
"index = VectorStoreIndex.from_vector_store(\n",
|
191 |
" vector_store=vector_store, embed_model=embed_model\n",
|
|
|
21 |
},
|
22 |
{
|
23 |
"cell_type": "code",
|
24 |
+
"execution_count": null,
|
25 |
"metadata": {},
|
26 |
"outputs": [],
|
27 |
"source": [
|
28 |
+
"!pip install llama-index llama-index-vector-stores-chroma llama-index-llms-huggingface-api llama-index-embeddings-huggingface -U -q"
|
29 |
]
|
30 |
},
|
31 |
{
|
|
|
167 |
},
|
168 |
{
|
169 |
"cell_type": "code",
|
170 |
+
"execution_count": null,
|
171 |
"metadata": {},
|
172 |
"outputs": [],
|
173 |
"source": [
|
|
|
175 |
"\n",
|
176 |
"from llama_index.core import VectorStoreIndex\n",
|
177 |
"from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
|
178 |
+
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
179 |
"from llama_index.core.tools import QueryEngineTool\n",
|
180 |
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
|
181 |
"\n",
|
|
|
185 |
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
|
186 |
"\n",
|
187 |
"# Create a query engine\n",
|
188 |
+
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
|
189 |
"llm = HuggingFaceInferenceAPI(model_name=\"Qwen/Qwen2.5-Coder-32B-Instruct\")\n",
|
190 |
"index = VectorStoreIndex.from_vector_store(\n",
|
191 |
" vector_store=vector_store, embed_model=embed_model\n",
|
unit2/llama-index/components.ipynb
CHANGED
@@ -23,7 +23,7 @@
|
|
23 |
"metadata": {},
|
24 |
"outputs": [],
|
25 |
"source": [
|
26 |
-
"!pip install llama-index datasets llama-index-callbacks-arize-phoenix llama-index-vector-stores-chroma llama-index-llms-huggingface-api -U -q"
|
27 |
]
|
28 |
},
|
29 |
{
|
@@ -113,12 +113,12 @@
|
|
113 |
"cell_type": "markdown",
|
114 |
"metadata": {},
|
115 |
"source": [
|
116 |
-
"Now we have a list of `Document` objects, we can use the `IngestionPipeline` to create nodes from the documents and prepare them for the `QueryEngine`. We will use the `SentenceSplitter` to split the documents into smaller chunks and the `
|
117 |
]
|
118 |
},
|
119 |
{
|
120 |
"cell_type": "code",
|
121 |
-
"execution_count":
|
122 |
"metadata": {},
|
123 |
"outputs": [
|
124 |
{
|
@@ -142,7 +142,7 @@
|
|
142 |
}
|
143 |
],
|
144 |
"source": [
|
145 |
-
"from llama_index.embeddings.
|
146 |
"from llama_index.core.node_parser import SentenceSplitter\n",
|
147 |
"from llama_index.core.ingestion import IngestionPipeline\n",
|
148 |
"\n",
|
@@ -150,7 +150,7 @@
|
|
150 |
"pipeline = IngestionPipeline(\n",
|
151 |
" transformations=[\n",
|
152 |
" SentenceSplitter(),\n",
|
153 |
-
"
|
154 |
" ]\n",
|
155 |
")\n",
|
156 |
"\n",
|
@@ -175,7 +175,7 @@
|
|
175 |
},
|
176 |
{
|
177 |
"cell_type": "code",
|
178 |
-
"execution_count":
|
179 |
"metadata": {},
|
180 |
"outputs": [
|
181 |
{
|
@@ -200,7 +200,7 @@
|
|
200 |
"pipeline = IngestionPipeline(\n",
|
201 |
" transformations=[\n",
|
202 |
" SentenceSplitter(),\n",
|
203 |
-
"
|
204 |
" ],\n",
|
205 |
" vector_store=vector_store,\n",
|
206 |
")\n",
|
@@ -218,14 +218,14 @@
|
|
218 |
},
|
219 |
{
|
220 |
"cell_type": "code",
|
221 |
-
"execution_count":
|
222 |
"metadata": {},
|
223 |
"outputs": [],
|
224 |
"source": [
|
225 |
"from llama_index.core import VectorStoreIndex\n",
|
226 |
-
"from llama_index.embeddings.
|
227 |
"\n",
|
228 |
-
"embed_model =
|
229 |
"index = VectorStoreIndex.from_vector_store(\n",
|
230 |
" vector_store=vector_store, embed_model=embed_model\n",
|
231 |
")"
|
|
|
23 |
"metadata": {},
|
24 |
"outputs": [],
|
25 |
"source": [
|
26 |
+
"!pip install llama-index datasets llama-index-callbacks-arize-phoenix arize-phoenix llama-index-vector-stores-chroma llama-index-llms-huggingface-api llama-index-embeddings-huggingface -U -q"
|
27 |
]
|
28 |
},
|
29 |
{
|
|
|
113 |
"cell_type": "markdown",
|
114 |
"metadata": {},
|
115 |
"source": [
|
116 |
+
"Now we have a list of `Document` objects, we can use the `IngestionPipeline` to create nodes from the documents and prepare them for the `QueryEngine`. We will use the `SentenceSplitter` to split the documents into smaller chunks and the `HuggingFaceEmbedding` to embed the chunks."
|
117 |
]
|
118 |
},
|
119 |
{
|
120 |
"cell_type": "code",
|
121 |
+
"execution_count": null,
|
122 |
"metadata": {},
|
123 |
"outputs": [
|
124 |
{
|
|
|
142 |
}
|
143 |
],
|
144 |
"source": [
|
145 |
+
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
146 |
"from llama_index.core.node_parser import SentenceSplitter\n",
|
147 |
"from llama_index.core.ingestion import IngestionPipeline\n",
|
148 |
"\n",
|
|
|
150 |
"pipeline = IngestionPipeline(\n",
|
151 |
" transformations=[\n",
|
152 |
" SentenceSplitter(),\n",
|
153 |
+
" HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\"),\n",
|
154 |
" ]\n",
|
155 |
")\n",
|
156 |
"\n",
|
|
|
175 |
},
|
176 |
{
|
177 |
"cell_type": "code",
|
178 |
+
"execution_count": null,
|
179 |
"metadata": {},
|
180 |
"outputs": [
|
181 |
{
|
|
|
200 |
"pipeline = IngestionPipeline(\n",
|
201 |
" transformations=[\n",
|
202 |
" SentenceSplitter(),\n",
|
203 |
+
" HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\"),\n",
|
204 |
" ],\n",
|
205 |
" vector_store=vector_store,\n",
|
206 |
")\n",
|
|
|
218 |
},
|
219 |
{
|
220 |
"cell_type": "code",
|
221 |
+
"execution_count": null,
|
222 |
"metadata": {},
|
223 |
"outputs": [],
|
224 |
"source": [
|
225 |
"from llama_index.core import VectorStoreIndex\n",
|
226 |
+
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
227 |
"\n",
|
228 |
+
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
|
229 |
"index = VectorStoreIndex.from_vector_store(\n",
|
230 |
" vector_store=vector_store, embed_model=embed_model\n",
|
231 |
")"
|
unit2/llama-index/tools.ipynb
CHANGED
@@ -18,11 +18,11 @@
|
|
18 |
},
|
19 |
{
|
20 |
"cell_type": "code",
|
21 |
-
"execution_count":
|
22 |
"metadata": {},
|
23 |
"outputs": [],
|
24 |
"source": [
|
25 |
-
"!pip install llama-index
|
26 |
]
|
27 |
},
|
28 |
{
|
@@ -86,7 +86,7 @@
|
|
86 |
},
|
87 |
{
|
88 |
"cell_type": "code",
|
89 |
-
"execution_count":
|
90 |
"metadata": {},
|
91 |
"outputs": [
|
92 |
{
|
@@ -105,14 +105,14 @@
|
|
105 |
"\n",
|
106 |
"from llama_index.core import VectorStoreIndex\n",
|
107 |
"from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
|
108 |
-
"from llama_index.embeddings.
|
109 |
"from llama_index.core.tools import QueryEngineTool\n",
|
110 |
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
|
111 |
"\n",
|
112 |
"db = chromadb.PersistentClient(path=\"./alfred_chroma_db\")\n",
|
113 |
"chroma_collection = db.get_or_create_collection(\"alfred\")\n",
|
114 |
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
|
115 |
-
"embed_model =
|
116 |
"llm = HuggingFaceInferenceAPI(model_name=\"meta-llama/Llama-3.2-3B-Instruct\")\n",
|
117 |
"index = VectorStoreIndex.from_vector_store(\n",
|
118 |
" vector_store=vector_store, embed_model=embed_model\n",
|
|
|
18 |
},
|
19 |
{
|
20 |
"cell_type": "code",
|
21 |
+
"execution_count": null,
|
22 |
"metadata": {},
|
23 |
"outputs": [],
|
24 |
"source": [
|
25 |
+
"!pip install llama-index llama-index-vector-stores-chroma llama-index-llms-huggingface-api llama-index-embeddings-huggingface llama-index-tools-google -U -q"
|
26 |
]
|
27 |
},
|
28 |
{
|
|
|
86 |
},
|
87 |
{
|
88 |
"cell_type": "code",
|
89 |
+
"execution_count": null,
|
90 |
"metadata": {},
|
91 |
"outputs": [
|
92 |
{
|
|
|
105 |
"\n",
|
106 |
"from llama_index.core import VectorStoreIndex\n",
|
107 |
"from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
|
108 |
+
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
109 |
"from llama_index.core.tools import QueryEngineTool\n",
|
110 |
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
|
111 |
"\n",
|
112 |
"db = chromadb.PersistentClient(path=\"./alfred_chroma_db\")\n",
|
113 |
"chroma_collection = db.get_or_create_collection(\"alfred\")\n",
|
114 |
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
|
115 |
+
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
|
116 |
"llm = HuggingFaceInferenceAPI(model_name=\"meta-llama/Llama-3.2-3B-Instruct\")\n",
|
117 |
"index = VectorStoreIndex.from_vector_store(\n",
|
118 |
" vector_store=vector_store, embed_model=embed_model\n",
|
unit2/llama-index/workflows.ipynb
CHANGED
@@ -18,11 +18,11 @@
|
|
18 |
},
|
19 |
{
|
20 |
"cell_type": "code",
|
21 |
-
"execution_count":
|
22 |
"metadata": {},
|
23 |
"outputs": [],
|
24 |
"source": [
|
25 |
-
"!pip install llama-index
|
26 |
]
|
27 |
},
|
28 |
{
|
|
|
18 |
},
|
19 |
{
|
20 |
"cell_type": "code",
|
21 |
+
"execution_count": null,
|
22 |
"metadata": {},
|
23 |
"outputs": [],
|
24 |
"source": [
|
25 |
+
"!pip install llama-index llama-index-vector-stores-chroma llama-index-utils-workflow llama-index-llms-huggingface-api pyvis -U -q"
|
26 |
]
|
27 |
},
|
28 |
{
|