Spaces:
Running
Running
soyleyicicem
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -166,28 +166,12 @@ def chat_gpt_nofn(prompt=None, history=[], model=model, client=client):
|
|
166 |
history.append({"role": "user", "content": f"{prompt}"})
|
167 |
messages = history
|
168 |
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
model=model,
|
176 |
-
messages=messages,
|
177 |
-
stream=True
|
178 |
-
)
|
179 |
-
|
180 |
-
full_response = ""
|
181 |
-
for chunk in completion:
|
182 |
-
try:
|
183 |
-
if chunk.choices[0].delta.content is not None:
|
184 |
-
content = chunk.choices[0].delta.content
|
185 |
-
full_response += content
|
186 |
-
yield content
|
187 |
-
except:
|
188 |
-
pass
|
189 |
-
|
190 |
-
return full_response
|
191 |
|
192 |
def format_chat_prompt(chat_history):
|
193 |
prompt = []
|
@@ -331,26 +315,12 @@ def chat(question, manual, history, liked):
|
|
331 |
Ref-2:
|
332 |
...
|
333 |
"""
|
334 |
-
#
|
335 |
-
#
|
336 |
-
#
|
337 |
-
|
338 |
-
# history.append((question, response))
|
339 |
-
final_response = ""
|
340 |
-
for chunk in chat_gpt_nofn(prompt=prompt, history=conv):
|
341 |
-
final_response += chunk
|
342 |
-
yield "", history + [(question, final_response)]
|
343 |
-
|
344 |
-
last_interaction.value = {
|
345 |
-
"question": question,
|
346 |
-
"response": final_response,
|
347 |
-
"manual": manual,
|
348 |
-
"point_id": uuid.uuid4().hex
|
349 |
-
}
|
350 |
|
351 |
-
|
352 |
-
|
353 |
-
print("Answer:--- %s seconds ---" % (time.time() - start_time))
|
354 |
# Store the last interaction without saving to the database yet
|
355 |
#last_interaction.value = {
|
356 |
# "question": question,
|
@@ -361,6 +331,31 @@ def chat(question, manual, history, liked):
|
|
361 |
|
362 |
#return '', history
|
363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
def save_last_interaction(feedback):
|
365 |
if last_interaction.value:
|
366 |
DatabaseOperations.save_user_history_demo(
|
@@ -393,9 +388,8 @@ with gr.Blocks() as demo:
|
|
393 |
save_last_interaction("N/A") # Save previous interaction before starting a new one
|
394 |
return chat(question, manual, history, liked_state.value)
|
395 |
|
396 |
-
|
397 |
-
textbox.submit(gradio_chat, [textbox, manual, chatbot], [textbox, chatbot], api_name="chat")
|
398 |
chatbot.like(handle_like, None, None)
|
399 |
-
|
400 |
demo.launch()
|
401 |
|
|
|
166 |
history.append({"role": "user", "content": f"{prompt}"})
|
167 |
messages = history
|
168 |
|
169 |
+
completion = client.chat.completions.create(
|
170 |
+
model=model,
|
171 |
+
messages=messages,
|
172 |
+
stream=True)
|
173 |
+
|
174 |
+
return completion
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
|
176 |
def format_chat_prompt(chat_history):
|
177 |
prompt = []
|
|
|
315 |
Ref-2:
|
316 |
...
|
317 |
"""
|
318 |
+
#final_response = chat_gpt_nofn(prompt=prompt, history=conv)
|
319 |
+
#response = final_response.choices[-1].message.content
|
320 |
+
#conv.append(final_response.choices[-1].message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
321 |
|
322 |
+
#history.append((question, response))
|
323 |
+
#print("Answer:--- %s seconds ---" % (time.time() - start_time))
|
|
|
324 |
# Store the last interaction without saving to the database yet
|
325 |
#last_interaction.value = {
|
326 |
# "question": question,
|
|
|
331 |
|
332 |
#return '', history
|
333 |
|
334 |
+
final_response = chat_gpt_nofn(prompt=prompt, history=conv)
|
335 |
+
|
336 |
+
partial_response = ""
|
337 |
+
for chunk in final_response:
|
338 |
+
if chunk.choices[0].delta.content is not None:
|
339 |
+
partial_response += chunk.choices[0].delta.content
|
340 |
+
yield partial_response, history + [(question, partial_response)]
|
341 |
+
|
342 |
+
response = partial_response
|
343 |
+
conv.append({"role": "user", "content": prompt})
|
344 |
+
conv.append({"role": "assistant", "content": response})
|
345 |
+
|
346 |
+
history.append((question, response))
|
347 |
+
print("Answer:--- %s seconds ---" % (time.time() - start_time))
|
348 |
+
|
349 |
+
# Store the last interaction without saving to the database yet
|
350 |
+
last_interaction.value = {
|
351 |
+
"question": question,
|
352 |
+
"response": response,
|
353 |
+
"manual": manual,
|
354 |
+
"point_id": uuid.uuid4().hex
|
355 |
+
}
|
356 |
+
|
357 |
+
yield response, history
|
358 |
+
|
359 |
def save_last_interaction(feedback):
|
360 |
if last_interaction.value:
|
361 |
DatabaseOperations.save_user_history_demo(
|
|
|
388 |
save_last_interaction("N/A") # Save previous interaction before starting a new one
|
389 |
return chat(question, manual, history, liked_state.value)
|
390 |
|
391 |
+
textbox.submit(gradio_chat, [textbox, manual, chatbot], [textbox, chatbot])
|
|
|
392 |
chatbot.like(handle_like, None, None)
|
393 |
+
demo.queue()
|
394 |
demo.launch()
|
395 |
|