Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
-
from llama_index.core import VectorStoreIndex
|
3 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
4 |
from llama_index.core.node_parser import SentenceSplitter
|
5 |
from llama_index.core.ingestion import IngestionPipeline
|
@@ -7,18 +7,18 @@ import chromadb
|
|
7 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
8 |
from llama_index.llms.ollama import Ollama
|
9 |
|
|
|
|
|
|
|
10 |
|
11 |
# Ustawienia strony
|
12 |
st.title("Aplikacja z LlamaIndex")
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
db = chromadb.PersistentClient(path="./data")
|
17 |
-
chroma_collection = db.get_or_create_collection("zalacznik_nr12")
|
18 |
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
19 |
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
20 |
|
21 |
-
|
22 |
# Utw贸rz pipeline do przetwarzania dokument贸w
|
23 |
pipeline = IngestionPipeline(
|
24 |
transformations=[
|
@@ -28,12 +28,6 @@ pipeline = IngestionPipeline(
|
|
28 |
vector_store=vector_store
|
29 |
)
|
30 |
|
31 |
-
pipeline.arun(
|
32 |
-
documents=documents,
|
33 |
-
num_workers=4, # dopasuj do liczby rdzeni CPU
|
34 |
-
show_progress=True
|
35 |
-
)
|
36 |
-
|
37 |
# Utw贸rz indeks
|
38 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
39 |
|
@@ -75,3 +69,4 @@ if st.session_state.messages[-1]["role"] != "assistant":
|
|
75 |
message = {"role": "assistant", "content": content} # Zapisz ca艂膮 tre艣膰 w wiadomo艣ci
|
76 |
st.session_state.messages.append(message)
|
77 |
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from llama_index.core import VectorStoreIndex
|
3 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
4 |
from llama_index.core.node_parser import SentenceSplitter
|
5 |
from llama_index.core.ingestion import IngestionPipeline
|
|
|
7 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
8 |
from llama_index.llms.ollama import Ollama
|
9 |
|
10 |
+
from llama_index.llms.huggingface import HuggingFaceLLM
|
11 |
+
|
12 |
+
from llama_index.core import Settings
|
13 |
|
14 |
# Ustawienia strony
|
15 |
st.title("Aplikacja z LlamaIndex")
|
16 |
|
17 |
+
db = chromadb.PersistentClient(path="./abc")
|
18 |
+
chroma_collection = db.get_or_create_collection("pomoc_ukrainie")
|
|
|
|
|
19 |
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
20 |
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
21 |
|
|
|
22 |
# Utw贸rz pipeline do przetwarzania dokument贸w
|
23 |
pipeline = IngestionPipeline(
|
24 |
transformations=[
|
|
|
28 |
vector_store=vector_store
|
29 |
)
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
# Utw贸rz indeks
|
32 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
33 |
|
|
|
69 |
message = {"role": "assistant", "content": content} # Zapisz ca艂膮 tre艣膰 w wiadomo艣ci
|
70 |
st.session_state.messages.append(message)
|
71 |
|
72 |
+
|