Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +70 -0
- dataset_loader.py +20 -0
- requirements.txt +11 -0
app.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import uuid
|
3 |
+
from typing import List, Dict
|
4 |
+
import os
|
5 |
+
from dataset_loader import load_dataset
|
6 |
+
|
7 |
+
if not os.path.isdir("r_logic_data"):
|
8 |
+
load_dataset()
|
9 |
+
|
10 |
+
from r_logic_data.agent import Agent
|
11 |
+
|
12 |
+
|
13 |
+
class StreamlitChatbot:
|
14 |
+
def __init__(self):
|
15 |
+
self.agent = Agent()
|
16 |
+
|
17 |
+
def initialize_session_state(self):
|
18 |
+
if "messages" not in st.session_state:
|
19 |
+
st.session_state.messages = []
|
20 |
+
if "recipient_id" not in st.session_state:
|
21 |
+
st.session_state.recipient_id = str(uuid.uuid4())
|
22 |
+
|
23 |
+
def display_chat_history(self):
|
24 |
+
for message in st.session_state.messages:
|
25 |
+
with st.chat_message(message["role"]):
|
26 |
+
st.markdown(message["content"])
|
27 |
+
|
28 |
+
def get_chatbot_response(self, user_input: str) -> str:
|
29 |
+
item = {
|
30 |
+
"content": user_input,
|
31 |
+
"recipient_id": st.session_state.recipient_id,
|
32 |
+
"sender_id": "chatbot",
|
33 |
+
"history_id": f"history_{len(st.session_state.messages)}",
|
34 |
+
"qa_id": f"qa_{len(st.session_state.messages)}",
|
35 |
+
}
|
36 |
+
|
37 |
+
response = self.agent.answer(
|
38 |
+
question=user_input,
|
39 |
+
recipient_id=st.session_state.recipient_id,
|
40 |
+
sender_id="chatbot",
|
41 |
+
item=item,
|
42 |
+
mode="chat",
|
43 |
+
platform=1, # Assuming 1 is for Streamlit
|
44 |
+
history=st.session_state.messages,
|
45 |
+
)
|
46 |
+
|
47 |
+
return response[0] # Returning just the answer text
|
48 |
+
|
49 |
+
def run(self):
|
50 |
+
st.title("R-Logic Computer Repair Chatbot")
|
51 |
+
self.initialize_session_state()
|
52 |
+
|
53 |
+
self.display_chat_history()
|
54 |
+
|
55 |
+
if user_input := st.chat_input(
|
56 |
+
"Ask about your repair status or any questions..."
|
57 |
+
):
|
58 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
59 |
+
with st.chat_message("user"):
|
60 |
+
st.markdown(user_input)
|
61 |
+
|
62 |
+
response = self.get_chatbot_response(user_input)
|
63 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
64 |
+
with st.chat_message("assistant"):
|
65 |
+
st.markdown(response)
|
66 |
+
|
67 |
+
|
68 |
+
if __name__ == "__main__":
|
69 |
+
chatbot = StreamlitChatbot()
|
70 |
+
chatbot.run()
|
dataset_loader.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import Repository
|
2 |
+
from huggingface_hub import login
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
|
9 |
+
def load_dataset():
|
10 |
+
login(token=os.getenv("HUB_TOKEN"))
|
11 |
+
|
12 |
+
repo = Repository(
|
13 |
+
local_dir="r_logic_data",
|
14 |
+
repo_type="dataset",
|
15 |
+
clone_from="dahreply/r_logic_dataset",
|
16 |
+
git_user=os.getenv("GIT_USER"),
|
17 |
+
git_email=os.getenv("GIT_EMAIL"),
|
18 |
+
token=True,
|
19 |
+
)
|
20 |
+
repo.git_pull()
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
huggingface_hub
|
3 |
+
pandas
|
4 |
+
ipykernel
|
5 |
+
pandasql
|
6 |
+
requests
|
7 |
+
python-dotenv
|
8 |
+
icecream
|
9 |
+
boto3
|
10 |
+
pymysql
|
11 |
+
streamlit
|