MegaTronX commited on
Commit
0ac0a2e
·
verified ·
1 Parent(s): cf2c79e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -64,6 +64,12 @@ def predict(message, history, system_prompt, temperature, max_new_tokens, top_k,
64
  yield "".join(outputs)
65
 
66
 
 
 
 
 
 
 
67
  # Load model
68
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
69
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
@@ -121,5 +127,6 @@ gr.ChatInterface(
121
  chatbot=gr.Chatbot(
122
  scale=1,
123
  show_copy_button=True
124
- )
 
125
  ).queue().launch()
 
64
  yield "".join(outputs)
65
 
66
 
67
+ def handle_retry(history, retry_data: gr.RetryData):
68
+ new_history = history[:retry_data.index]
69
+ previous_prompt = history[retry_data.index]['content']
70
+ yield from respond(previous_prompt, new_history)
71
+
72
+
73
  # Load model
74
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
75
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
 
127
  chatbot=gr.Chatbot(
128
  scale=1,
129
  show_copy_button=True
130
+ ),
131
+ chatbot.retry(handle_retry, chatbot, [chatbot])
132
  ).queue().launch()