Update app.py
Browse files
app.py
CHANGED
@@ -20,29 +20,33 @@ vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
|
20 |
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5", device="cpu")
|
21 |
|
22 |
# Utw贸rz pipeline do przetwarzania dokument贸w
|
23 |
-
pipeline = IngestionPipeline(
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
)
|
30 |
|
31 |
# Utw贸rz indeks
|
32 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
33 |
|
34 |
# Utw贸rz silnik zapyta艅
|
35 |
-
from transformers import AutoTokenizer
|
36 |
-
|
37 |
-
# Load the correct tokenizer for Qwen/Qwen2-7B-Instruct
|
38 |
-
tokeni = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B")
|
39 |
|
40 |
-
|
|
|
|
|
|
|
41 |
|
|
|
42 |
query_engine = index.as_query_engine(
|
43 |
llm=llm,
|
44 |
response_mode='compact')
|
45 |
|
|
|
|
|
|
|
|
|
46 |
# Store LLM generated responses
|
47 |
if "messages" not in st.session_state.keys():
|
48 |
st.session_state.messages = [{"role": "assistant", "content": "Zadaj mi pytanie..."}]
|
|
|
20 |
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5", device="cpu")
|
21 |
|
22 |
# Utw贸rz pipeline do przetwarzania dokument贸w
|
23 |
+
# pipeline = IngestionPipeline(
|
24 |
+
# transformations=[
|
25 |
+
# SentenceSplitter(),
|
26 |
+
# embed_model,
|
27 |
+
# ],
|
28 |
+
# vector_store=vector_store
|
29 |
+
# )
|
30 |
|
31 |
# Utw贸rz indeks
|
32 |
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
33 |
|
34 |
# Utw贸rz silnik zapyta艅
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
# Load the correct tokenizer and LLM
|
37 |
+
from transformers import AutoTokenizer
|
38 |
+
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
|
39 |
+
llm = HuggingFaceLLM(model_name="HuggingFaceH4/zephyr-7b-beta", tokenizer=tokenizer)
|
40 |
|
41 |
+
#Query Engine
|
42 |
query_engine = index.as_query_engine(
|
43 |
llm=llm,
|
44 |
response_mode='compact')
|
45 |
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
# app gui
|
50 |
# Store LLM generated responses
|
51 |
if "messages" not in st.session_state.keys():
|
52 |
st.session_state.messages = [{"role": "assistant", "content": "Zadaj mi pytanie..."}]
|