Rulga commited on
Commit
437fe85
·
1 Parent(s): 5539560

Remove unused chat history logging script

Browse files
Files changed (1) hide show
  1. chat_history/app - Copy.py +0 -242
chat_history/app - Copy.py DELETED
@@ -1,242 +0,0 @@
1
- import os
2
- import time
3
- import streamlit as st
4
- from dotenv import load_dotenv
5
- from langchain_groq import ChatGroq
6
- from langchain_huggingface import HuggingFaceEmbeddings
7
- from langchain_community.vectorstores import FAISS
8
- from langchain_text_splitters import RecursiveCharacterTextSplitter
9
- from langchain_community.document_loaders import WebBaseLoader
10
- from langchain_core.prompts import PromptTemplate
11
- from langchain_core.output_parsers import StrOutputParser
12
- from langchain_core.runnables import RunnableLambda
13
- import requests
14
- import json
15
-
16
- # Логирует взаимодействие в JSON-файл
17
- from datetime import datetime
18
-
19
-
20
- def log_interaction(user_input: str, bot_response: str):
21
- """Логирует взаимодействие в JSON-файл"""
22
- log_entry = {
23
- "timestamp": datetime.now().isoformat(),
24
- "user_input": user_input,
25
- "bot_response": bot_response
26
- }
27
-
28
- log_dir = "chat_history"
29
- os.makedirs(log_dir, exist_ok=True)
30
-
31
- log_path = os.path.join(log_dir, "chat_logs.json")
32
- with open(log_path, "a") as f:
33
- f.write(json.dumps(log_entry) + "\n")
34
-
35
- #
36
-
37
-
38
-
39
- # Page configuration
40
- st.set_page_config(page_title="Status Law Assistant", page_icon="⚖️")
41
-
42
- # Knowledge base info in session_state
43
- if 'kb_info' not in st.session_state:
44
- st.session_state.kb_info = {
45
- 'build_time': None,
46
- 'size': None
47
- }
48
-
49
- # Display title and knowledge base info
50
- # st.title("www.Status.Law Legal Assistant")
51
-
52
- st.markdown(
53
- '''
54
- <h1>
55
- ⚖️
56
- <a href="https://status.law/" style="text-decoration: underline; color: blue; font-size: inherit;">
57
- Status.Law
58
- </a>
59
- Legal Assistant
60
- </h1>
61
- ''',
62
- unsafe_allow_html=True
63
- )
64
-
65
- if st.session_state.kb_info['build_time'] and st.session_state.kb_info['size']:
66
- st.caption(f"(Knowledge base build time: {st.session_state.kb_info['build_time']:.2f} seconds, "
67
- f"size: {st.session_state.kb_info['size']:.2f} MB)")
68
-
69
- # Path to store vector database
70
- VECTOR_STORE_PATH = "vector_store"
71
-
72
- # Создание папки истории, если она не существует
73
- if not os.path.exists("chat_history"):
74
- os.makedirs("chat_history")
75
-
76
- # Website URLs
77
- urls = [
78
- "https://status.law",
79
- "https://status.law/about",
80
- "https://status.law/careers",
81
- "https://status.law/tariffs-for-services-of-protection-against-extradition",
82
- "https://status.law/challenging-sanctions",
83
- "https://status.law/law-firm-contact-legal-protection"
84
- "https://status.law/cross-border-banking-legal-issues",
85
- "https://status.law/extradition-defense",
86
- "https://status.law/international-prosecution-protection",
87
- "https://status.law/interpol-red-notice-removal",
88
- "https://status.law/practice-areas",
89
- "https://status.law/reputation-protection",
90
- "https://status.law/faq"
91
- ]
92
-
93
- # Load secrets
94
- try:
95
- GROQ_API_KEY = st.secrets["GROQ_API_KEY"]
96
- except Exception as e:
97
- st.error("Error loading secrets. Please check your configuration.")
98
- st.stop()
99
-
100
- # Initialize models
101
- @st.cache_resource
102
- def init_models():
103
- llm = ChatGroq(
104
- model_name="llama-3.3-70b-versatile",
105
- temperature=0.6,
106
- api_key=GROQ_API_KEY
107
- )
108
- embeddings = HuggingFaceEmbeddings(
109
- model_name="intfloat/multilingual-e5-large-instruct"
110
- )
111
- return llm, embeddings
112
-
113
- # Build knowledge base
114
- def build_knowledge_base(embeddings):
115
- start_time = time.time()
116
-
117
- documents = []
118
- with st.status("Loading website content...") as status:
119
- for url in urls:
120
- try:
121
- loader = WebBaseLoader(url)
122
- docs = loader.load()
123
- documents.extend(docs)
124
- status.update(label=f"Loaded {url}")
125
- except Exception as e:
126
- st.error(f"Error loading {url}: {str(e)}")
127
-
128
- text_splitter = RecursiveCharacterTextSplitter(
129
- chunk_size=500,
130
- chunk_overlap=100
131
- )
132
- chunks = text_splitter.split_documents(documents)
133
-
134
- vector_store = FAISS.from_documents(chunks, embeddings)
135
- vector_store.save_local(VECTOR_STORE_PATH)
136
-
137
- end_time = time.time()
138
- build_time = end_time - start_time
139
-
140
- # Calculate knowledge base size
141
- total_size = 0
142
- for path, dirs, files in os.walk(VECTOR_STORE_PATH):
143
- for f in files:
144
- fp = os.path.join(path, f)
145
- total_size += os.path.getsize(fp)
146
- size_mb = total_size / (1024 * 1024)
147
-
148
- # Save knowledge base info
149
- st.session_state.kb_info['build_time'] = build_time
150
- st.session_state.kb_info['size'] = size_mb
151
-
152
- st.success(f"""
153
- Knowledge base created successfully:
154
- - Time taken: {build_time:.2f} seconds
155
- - Size: {size_mb:.2f} MB
156
- - Number of chunks: {len(chunks)}
157
- """)
158
-
159
- return vector_store
160
-
161
- # Main function
162
- def main():
163
- # Initialize models
164
- llm, embeddings = init_models()
165
-
166
- # Check if knowledge base exists
167
- if not os.path.exists(VECTOR_STORE_PATH):
168
- st.warning("Knowledge base not found.")
169
- if st.button("Create Knowledge Base"):
170
- vector_store = build_knowledge_base(embeddings)
171
- st.session_state.vector_store = vector_store
172
- st.rerun()
173
- else:
174
- if 'vector_store' not in st.session_state:
175
- st.session_state.vector_store = FAISS.load_local(
176
- VECTOR_STORE_PATH,
177
- embeddings,
178
- allow_dangerous_deserialization=True
179
- )
180
-
181
- # Chat mode
182
- if 'vector_store' in st.session_state:
183
- if 'messages' not in st.session_state:
184
- st.session_state.messages = []
185
-
186
- # Display chat history
187
- for message in st.session_state.messages:
188
- st.chat_message("user").write(message["question"])
189
- st.chat_message("assistant").write(message["answer"])
190
-
191
- # User input
192
- if question := st.chat_input("Ask your question"):
193
- st.chat_message("user").write(question)
194
-
195
- # Retrieve context and generate response
196
- with st.chat_message("assistant"):
197
- with st.spinner("Thinking..."):
198
- context = st.session_state.vector_store.similarity_search(question)
199
- context_text = "\n".join([doc.page_content for doc in context])
200
-
201
- prompt = PromptTemplate.from_template("""
202
- You are a helpful and polite legal assistant at Status Law.
203
- You answer in the language in which the question was asked.
204
- Answer the question based on the context provided.
205
- If you cannot answer based on the context, say so politely and offer to contact Status Law directly via the following channels:
206
- - For all users: +32465594521 (landline phone).
207
- - For English and Swedish speakers only: +46728495129 (available on WhatsApp, Telegram, Signal, IMO).
208
- - Provide a link to the contact form: [Contact Form](https://status.law/law-firm-contact-legal-protection/).
209
- If the user has questions about specific services and their costs, suggest they visit the page https://status.law/tariffs-for-services-of-protection-against-extradition-and-international-prosecution/ for detailed information.
210
-
211
- Ask the user additional questions to understand which service to recommend and provide an estimated cost. For example, clarify their situation and needs to suggest the most appropriate options.
212
-
213
- Also, offer free consultations if they are available and suitable for the user's request.
214
- Answer professionally but in a friendly manner.
215
-
216
- Example:
217
- Q: How can I challenge the sanctions?
218
- A: To challenge the sanctions, you should consult with our legal team, who specialize in this area. Please contact us directly for detailed advice. You can fill out our contact form here: [Contact Form](https://status.law/law-firm-contact-legal-protection/).
219
-
220
- Context: {context}
221
- Question: {question}
222
- """)
223
-
224
- chain = prompt | llm | StrOutputParser()
225
- response = chain.invoke({
226
- "context": context_text,
227
- "question": question
228
- })
229
-
230
- st.write(response)
231
-
232
-
233
- # В блоке генерации ответа (после st.write(response))
234
- log_interaction(question, response)
235
- # Save chat history
236
- st.session_state.messages.append({
237
- "question": question,
238
- "answer": response
239
- })
240
-
241
- if __name__ == "__main__":
242
- main()