TornadoAI commited on
Commit
2dc42b7
·
verified ·
1 Parent(s): 19626ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -51,16 +51,17 @@ def predict(
51
 
52
  json_response = response.json()
53
  if 'choices' in json_response and len(json_response['choices']) > 0:
54
- assistant_content = json_response['choices'][0]['message']['content']
55
  chat_history.append({"role": "assistant", "content": assistant_content})
 
56
  else:
57
  chat_history.append({"role": "assistant", "content": "Error: No response from assistant."})
58
 
59
- return chat_history
60
 
61
  except Exception as e:
62
  chat_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
63
- return chat_history
64
 
65
  css = """
66
  .gradio-container {
@@ -126,8 +127,8 @@ footer {
126
  with gr.Blocks(
127
  theme=gr.themes.Soft(
128
  primary_hue="orange",
129
- secondary_hue="gray",
130
- neutral_hue="slate",
131
  spacing_size="sm",
132
  radius_size="lg",
133
  font=["Inter", "ui-sans-serif", "system-ui"]
@@ -196,6 +197,7 @@ with gr.Blocks(
196
  step=0.05,
197
  label="Top-p"
198
  )
 
199
 
200
  def show_chat():
201
  return gr.update(visible=False), gr.update(visible=True)
@@ -203,7 +205,7 @@ with gr.Blocks(
203
  msg.submit(
204
  predict,
205
  [msg, chatbot, temperature, top_p],
206
- chatbot
207
  ).then(
208
  lambda: "",
209
  None,
@@ -213,7 +215,7 @@ with gr.Blocks(
213
  submit.click(
214
  predict,
215
  [msg, chatbot, temperature, top_p],
216
- chatbot
217
  ).then(
218
  lambda: "",
219
  None,
@@ -231,4 +233,4 @@ if __name__ == "__main__":
231
  server_name="0.0.0.0",
232
  server_port=7860,
233
  share=True
234
- )
 
51
 
52
  json_response = response.json()
53
  if 'choices' in json_response and len(json_response['choices']) > 0:
54
+ assistant_content = json_response['choices'][0]['message']['content']
55
  chat_history.append({"role": "assistant", "content": assistant_content})
56
+ stats_content = f"*Powered by Oxygen, Generation time: {json_response["usage"]["metrics"]["inference_time_ms"]} ms , Tokens per second: {json_response["usage"]["metrics"]["tokens_per_second"]} , Generation cost: {round(json_response["usage"]["cost"]["total"],10)} EUR*"
57
  else:
58
  chat_history.append({"role": "assistant", "content": "Error: No response from assistant."})
59
 
60
+ return chat_history, stats_content
61
 
62
  except Exception as e:
63
  chat_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
64
+ return chat_history, "*Generation error..*"
65
 
66
  css = """
67
  .gradio-container {
 
127
  with gr.Blocks(
128
  theme=gr.themes.Soft(
129
  primary_hue="orange",
130
+ secondary_hue="zinc",
131
+ neutral_hue="zinc",
132
  spacing_size="sm",
133
  radius_size="lg",
134
  font=["Inter", "ui-sans-serif", "system-ui"]
 
197
  step=0.05,
198
  label="Top-p"
199
  )
200
+ stats_display = gr.Markdown()
201
 
202
  def show_chat():
203
  return gr.update(visible=False), gr.update(visible=True)
 
205
  msg.submit(
206
  predict,
207
  [msg, chatbot, temperature, top_p],
208
+ [chatbot, stats_display]
209
  ).then(
210
  lambda: "",
211
  None,
 
215
  submit.click(
216
  predict,
217
  [msg, chatbot, temperature, top_p],
218
+ [chatbot, stats_display]
219
  ).then(
220
  lambda: "",
221
  None,
 
233
  server_name="0.0.0.0",
234
  server_port=7860,
235
  share=True
236
+ )