TejAndrewsACC commited on
Commit
cc3ebc0
·
verified ·
1 Parent(s): 5de825b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -20
app.py CHANGED
@@ -6,7 +6,6 @@ import torch.nn as nn
6
  import torch.optim as optim
7
  import numpy as np
8
  import random
9
- import pyttsx3
10
 
11
  hf_token = os.getenv("HF_TOKEN").strip()
12
  api_key = os.getenv("HF_KEY").strip()
@@ -1335,15 +1334,6 @@ class ConsciousSupermassiveNN10:
1335
  supermassive_nn = ConsciousSupermassiveNN10()
1336
 
1337
 
1338
- def speak_text(chat_history):
1339
- if chat_history and len(chat_history) > 0:
1340
- latest_response = chat_history[-1][1]
1341
- if latest_response:
1342
- engine = pyttsx3.init()
1343
- engine.say(latest_response)
1344
- engine.runAndWait()
1345
- return f"Speaking: {latest_response}"
1346
- return "No output to speak."
1347
 
1348
  def respond(message, history, max_tokens, temperature, top_p):
1349
  messages = [{"role": "system", "content": system_prompt}]
@@ -1359,16 +1349,16 @@ def respond(message, history, max_tokens, temperature, top_p):
1359
  response += token
1360
  yield response
1361
 
1362
- with gr.Blocks(theme="TejAndrewsACC/zetaofficalthemeacc") as demo:
1363
- chatbot = gr.Chatbot()
1364
- message = gr.Textbox(label="Your Message")
1365
- submit = gr.Button("Submit")
1366
- speak_button = gr.Button("Speak Output")
1367
- max_tokens = gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Maximum Response Length")
1368
- temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Creativity")
1369
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Neural Activity")
1370
- submit.click(respond, inputs=[message, chatbot, max_tokens, temperature, top_p], outputs=chatbot)
1371
- speak_button.click(speak_text, inputs=[chatbot], outputs=None)
1372
 
1373
  if __name__ == "__main__":
1374
  demo.launch(share=True)
 
6
  import torch.optim as optim
7
  import numpy as np
8
  import random
 
9
 
10
  hf_token = os.getenv("HF_TOKEN").strip()
11
  api_key = os.getenv("HF_KEY").strip()
 
1334
  supermassive_nn = ConsciousSupermassiveNN10()
1335
 
1336
 
 
 
 
 
 
 
 
 
 
1337
 
1338
  def respond(message, history, max_tokens, temperature, top_p):
1339
  messages = [{"role": "system", "content": system_prompt}]
 
1349
  response += token
1350
  yield response
1351
 
1352
+ demo = gr.ChatInterface(
1353
+ respond,
1354
+ additional_inputs=[
1355
+ gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Maximum Response Length"),
1356
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Creativity"),
1357
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Neural Activity")
1358
+ ],
1359
+ theme="TejAndrewsACC/zetaofficalthemeacc",
1360
+
1361
+ )
1362
 
1363
  if __name__ == "__main__":
1364
  demo.launch(share=True)