Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -108,7 +108,7 @@ def get_top_chunks(query, chunk_embeddings, text_chunks):
|
|
108 |
|
109 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
|
110 |
|
111 |
-
def
|
112 |
top_cool_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks) # Complete this line
|
113 |
#str_chunks = "\n".join(best_chunks)
|
114 |
|
@@ -130,51 +130,51 @@ def respond_cool(message, history, mom_type):
|
|
130 |
)
|
131 |
return response['choices'][0]['message']['content'].strip()
|
132 |
|
133 |
-
chatbot = gr.ChatInterface(
|
134 |
|
135 |
-
def respond_tutor(message, history, mom_type):
|
136 |
-
|
137 |
-
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
|
146 |
-
|
147 |
-
|
148 |
|
149 |
-
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
|
157 |
-
def respond_strict(message, history):
|
158 |
-
|
159 |
-
|
160 |
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
|
168 |
-
|
169 |
-
|
170 |
|
171 |
-
|
172 |
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
|
179 |
with gr.Blocks() as chatbot:
|
180 |
|
|
|
108 |
|
109 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
|
110 |
|
111 |
+
def respond(message, history, mom_type):
|
112 |
top_cool_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks) # Complete this line
|
113 |
#str_chunks = "\n".join(best_chunks)
|
114 |
|
|
|
130 |
)
|
131 |
return response['choices'][0]['message']['content'].strip()
|
132 |
|
133 |
+
chatbot = gr.ChatInterface(respond_cool, type="messages")
|
134 |
|
135 |
+
# def respond_tutor(message, history, mom_type):
|
136 |
+
# top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
|
137 |
+
# #str_chunks = "\n".join(best_chunks)
|
138 |
|
139 |
+
# messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely studious, tutor-like mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {mom_type}"},
|
140 |
+
# {"role": "user",
|
141 |
+
# "content": (
|
142 |
+
# f"Context:\n{top_tutor_results}\n\n"
|
143 |
+
# f"Question{message}"
|
144 |
+
# )}]
|
145 |
|
146 |
+
# if history:
|
147 |
+
# messages.extend(history)
|
148 |
|
149 |
+
# messages.append({"role": "user", "content": message})
|
150 |
|
151 |
+
# response = client.chat_completion(
|
152 |
+
# messages,
|
153 |
+
# temperature = 0.2
|
154 |
+
# )
|
155 |
+
# return response['choices'][0]['message']['content'].strip()
|
156 |
|
157 |
+
# def respond_strict(message, history):
|
158 |
+
# top_strict_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
|
159 |
+
# #str_chunks = "\n".join(best_chunks)
|
160 |
|
161 |
+
# messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely strict mom. Respond in full sentences, don't cut yourself off. Base your response on the provided context: {top_strict_results}"},
|
162 |
+
# {"role": "user",
|
163 |
+
# "content": (
|
164 |
+
# f"Context:\n{top_strict_results}\n\n"
|
165 |
+
# f"Question{message}"
|
166 |
+
# )}]
|
167 |
|
168 |
+
# if history:
|
169 |
+
# messages.extend(history)
|
170 |
|
171 |
+
# messages.append({"role": "user", "content": message})
|
172 |
|
173 |
+
# response = client.chat_completion(
|
174 |
+
# messages,
|
175 |
+
# temperature = 0.2
|
176 |
+
# )
|
177 |
+
# return response['choices'][0]['message']['content'].strip()
|
178 |
|
179 |
with gr.Blocks() as chatbot:
|
180 |
|