clui commited on
Commit
b106d86
1 Parent(s): e165d9b

add app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -0
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import streamlit as st
2
+ # from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
3
+ # from llama_index.embeddings.huggingface import HuggingFaceEmbedding
4
+ # from llama_index.core.node_parser import SentenceSplitter
5
+ # from llama_index.core.ingestion import IngestionPipeline
6
+ # import chromadb
7
+ # from llama_index.vector_stores.chroma import ChromaVectorStore
8
+ # from llama_index.llms.ollama import Ollama
9
+
10
+
11
+ # # Ustawienia strony
12
+ # st.title("Aplikacja z LlamaIndex")
13
+
14
+
15
+ # db = chromadb.PersistentClient(path="./zakazenia")
16
+ # chroma_collection = db.get_or_create_collection("zalacznik_nr12")
17
+ # vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
18
+ # embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
19
+
20
+ # # Utw贸rz pipeline do przetwarzania dokument贸w
21
+ # pipeline = IngestionPipeline(
22
+ # transformations=[
23
+ # SentenceSplitter(),
24
+ # embed_model,
25
+ # ],
26
+ # vector_store=vector_store
27
+ # )
28
+
29
+ # # Utw贸rz indeks
30
+
31
+ # index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
32
+
33
+ # # Utw贸rz silnik zapyta艅
34
+ # llm = Ollama(model="qwen2:7b")
35
+ # query_engine = index.as_query_engine(
36
+ # llm=llm,
37
+ # response_mode = 'compact')
38
+
39
+
40
+
41
+ # # Store LLM generated responses
42
+ # if "messages" not in st.session_state.keys():
43
+ # st.session_state.messages = [{"role": "assistant", "content": "Zadaj mi pytanie..."}]
44
+
45
+ # # Display chat messages
46
+ # for message in st.session_state.messages:
47
+ # with st.chat_message(message["role"]):
48
+ # st.write(message["content"])
49
+
50
+ # # User-provided prompt
51
+ # if input := st.chat_input():
52
+ # st.session_state.messages.append({"role": "user", "content": input})
53
+ # with st.chat_message("user"):
54
+ # st.write(input)
55
+
56
+ # # Generate a new response if last message is not from assistant
57
+ # if st.session_state.messages[-1]["role"] != "assistant":
58
+ # with st.chat_message("assistant"):
59
+ # with st.spinner("Czekaj, odpowied藕 jest generowana.."):
60
+ # response = query_engine.query(input)
61
+
62
+ # st.write(response.response)
63
+ # for node in response.source_nodes:
64
+ # st.write(node.score)
65
+
66
+ # message = {"role": "assistant", "content": response}
67
+ # st.session_state.messages.append(message)
68
+
69
+
70
+
71
+ import streamlit as st
72
+ from llama_index.core import VectorStoreIndex
73
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
74
+ from llama_index.core.node_parser import SentenceSplitter
75
+ from llama_index.core.ingestion import IngestionPipeline
76
+ import chromadb
77
+ from llama_index.vector_stores.chroma import ChromaVectorStore
78
+ from llama_index.llms.ollama import Ollama
79
+
80
+ # Ustawienia strony
81
+ st.title("Aplikacja z LlamaIndex")
82
+
83
+ db = chromadb.PersistentClient(path="./zakazenia")
84
+ chroma_collection = db.get_or_create_collection("zalacznik_nr12")
85
+ vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
86
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
87
+
88
+ # Utw贸rz pipeline do przetwarzania dokument贸w
89
+ pipeline = IngestionPipeline(
90
+ transformations=[
91
+ SentenceSplitter(),
92
+ embed_model,
93
+ ],
94
+ vector_store=vector_store
95
+ )
96
+
97
+ # Utw贸rz indeks
98
+ index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
99
+
100
+ # Utw贸rz silnik zapyta艅
101
+ llm = Ollama(model="qwen2:7b")
102
+ query_engine = index.as_query_engine(
103
+ llm=llm,
104
+ response_mode='compact')
105
+
106
+ # Store LLM generated responses
107
+ if "messages" not in st.session_state.keys():
108
+ st.session_state.messages = [{"role": "assistant", "content": "Zadaj mi pytanie..."}]
109
+
110
+ # Display chat messages
111
+ for message in st.session_state.messages:
112
+ with st.chat_message(message["role"]):
113
+ st.write(message["content"])
114
+
115
+ # User-provided prompt
116
+ if input := st.chat_input():
117
+ st.session_state.messages.append({"role": "user", "content": input})
118
+ with st.chat_message("user"):
119
+ st.write(input)
120
+
121
+ # Generate a new response if last message is not from assistant
122
+ if st.session_state.messages[-1]["role"] != "assistant":
123
+ with st.chat_message("assistant"):
124
+ with st.spinner("Czekaj, odpowied藕 jest generowana.."):
125
+ response = query_engine.query(input)
126
+
127
+ # Zbuduj tre艣膰 wiadomo艣ci z odpowiedzi膮 i score
128
+ content = str(response.response) # Upewnij si臋, 偶e response jest stringiem
129
+ if hasattr(response, 'source_nodes') and response.source_nodes: # Sprawd藕, czy source_nodes istnieje
130
+ # Dodaj score pierwszego w臋z艂a (je艣li istnieje)
131
+ content += f"\nScore: {response.source_nodes[0].score:.4f}" # Dodaj score
132
+
133
+ st.write(content) # Wy艣wietl ca艂膮 tre艣膰 w Streamlit
134
+
135
+ message = {"role": "assistant", "content": content} # Zapisz ca艂膮 tre艣膰 w wiadomo艣ci
136
+ st.session_state.messages.append(message)
137
+