Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -51,12 +51,12 @@ def ask_openai_gpt4(question):
|
|
51 |
return response.choices[0].message.content
|
52 |
|
53 |
def chatbot(user_input):
|
54 |
-
log_output = StringIO()
|
55 |
|
56 |
faiss_index, question_embeddings = load_embeddings_and_faiss()
|
57 |
embedding_model = OpenAIEmbeddings(openai_api_key=openai.api_key)
|
58 |
|
59 |
-
start_time = time.time()
|
60 |
|
61 |
log_output.write("Retrieving answer from FAISS...\n")
|
62 |
response_text = retrieve_answer(user_input, faiss_index, embedding_model, answers, log_output, threshold=0.3)
|
@@ -65,29 +65,24 @@ def chatbot(user_input):
|
|
65 |
log_output.write("No good match found in dataset. Using GPT-4o-mini to generate an answer.\n")
|
66 |
response_text = ask_openai_gpt4(user_input)
|
67 |
|
68 |
-
end_time = time.time()
|
69 |
-
response_time = end_time - start_time
|
70 |
|
71 |
-
# Log the final response time
|
72 |
-
|
73 |
-
# Return the chatbot response, response time, and log
|
74 |
return response_text, f"Response time: {response_time:.4f} seconds", log_output.getvalue()
|
75 |
|
76 |
-
# Simplified Gradio interface with response, response time, and logs
|
77 |
demo = gr.Interface(
|
78 |
-
fn=chatbot,
|
79 |
-
inputs="text",
|
80 |
outputs=[
|
81 |
-
gr.Textbox(label="Chatbot Response"),
|
82 |
-
gr.Textbox(label="Response Time"),
|
83 |
-
gr.Textbox(label="Logs")
|
84 |
],
|
85 |
title="Medical Chatbot with Custom Knowledge About Medical FAQ",
|
86 |
description="A chatbot with custom knowledge using FAISS for quick responses or fallback to GPT-4o-mini when no relevant answer is found. Response time is also tracked."
|
87 |
)
|
88 |
|
89 |
if __name__ == "__main__":
|
90 |
-
# Load dataset
|
91 |
df = pd.read_csv("medquad.csv")
|
92 |
questions = df['question'].tolist()
|
93 |
answers = df['answer'].tolist()
|
|
|
51 |
return response.choices[0].message.content
|
52 |
|
53 |
def chatbot(user_input):
|
54 |
+
log_output = StringIO()
|
55 |
|
56 |
faiss_index, question_embeddings = load_embeddings_and_faiss()
|
57 |
embedding_model = OpenAIEmbeddings(openai_api_key=openai.api_key)
|
58 |
|
59 |
+
start_time = time.time()
|
60 |
|
61 |
log_output.write("Retrieving answer from FAISS...\n")
|
62 |
response_text = retrieve_answer(user_input, faiss_index, embedding_model, answers, log_output, threshold=0.3)
|
|
|
65 |
log_output.write("No good match found in dataset. Using GPT-4o-mini to generate an answer.\n")
|
66 |
response_text = ask_openai_gpt4(user_input)
|
67 |
|
68 |
+
end_time = time.time()
|
69 |
+
response_time = end_time - start_time
|
70 |
|
|
|
|
|
|
|
71 |
return response_text, f"Response time: {response_time:.4f} seconds", log_output.getvalue()
|
72 |
|
|
|
73 |
demo = gr.Interface(
|
74 |
+
fn=chatbot,
|
75 |
+
inputs="text",
|
76 |
outputs=[
|
77 |
+
gr.Textbox(label="Chatbot Response"),
|
78 |
+
gr.Textbox(label="Response Time"),
|
79 |
+
gr.Textbox(label="Logs")
|
80 |
],
|
81 |
title="Medical Chatbot with Custom Knowledge About Medical FAQ",
|
82 |
description="A chatbot with custom knowledge using FAISS for quick responses or fallback to GPT-4o-mini when no relevant answer is found. Response time is also tracked."
|
83 |
)
|
84 |
|
85 |
if __name__ == "__main__":
|
|
|
86 |
df = pd.read_csv("medquad.csv")
|
87 |
questions = df['question'].tolist()
|
88 |
answers = df['answer'].tolist()
|