SalexAI commited on
Commit
eb0ccd7
·
verified ·
1 Parent(s): 7a2e8bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -28
app.py CHANGED
@@ -57,14 +57,15 @@ PROMPTS = {
57
  )
58
  }
59
 
60
- def respond(message, history, character):
 
 
 
61
  system_message = PROMPTS.get(character, "")
 
62
  messages = [{"role": "system", "content": system_message}]
63
- for user_msg, bot_msg in history:
64
- if user_msg:
65
- messages.append({"role": "user", "content": user_msg})
66
- if bot_msg:
67
- messages.append({"role": "assistant", "content": bot_msg})
68
  messages.append({"role": "user", "content": message})
69
 
70
  payload = {
@@ -81,22 +82,55 @@ def respond(message, history, character):
81
  content = response.json()["choices"][0]["message"]["content"]
82
 
83
  stream_response = ""
 
84
  for token in content.split():
85
  stream_response += token + " "
86
  time.sleep(0.02)
87
- # Yield each update as a message dictionary.
88
- yield {"role": "assistant", "content": stream_response.strip()}
89
  except Exception as e:
90
- yield {"role": "assistant", "content": f"Error: {str(e)}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  with gr.Blocks(css=css) as demo:
93
  # Header with QClone Public label.
94
  gr.HTML("<h1>QClone <span style='background-color:#3b82f6;color:white;font-size:0.75em;padding:2px 6px;border-radius:5px;margin-left:8px;'>Public</span></h1>")
95
 
96
  with gr.Row():
97
- with gr.Column():
98
- # Native dropdown with emojis and descriptions.
99
- character = gr.Dropdown(
100
  choices=[
101
  "Elon Ma (Official) 🟡 - Broken English salesman",
102
  "Cole (Community) 🔵 - Gen Z slang troll",
@@ -105,22 +139,19 @@ with gr.Blocks(css=css) as demo:
105
  value="Elon Ma (Official) 🟡 - Broken English salesman",
106
  label="Model"
107
  )
 
 
 
108
 
109
- # Helper to clean up the selected choice.
110
- def clean_choice(choice):
111
- if "Elon" in choice:
112
- return "Elon Ma (Official)"
113
- if "Cole" in choice:
114
- return "Cole (Community)"
115
- if "Shortreed" in choice:
116
- return "Mr. Shortreed (Official)"
117
- return "Elon Ma (Official)"
118
-
119
- # ChatInterface using OpenAI-style messages.
120
- chatbot = gr.ChatInterface(
121
- lambda msg, hist, char: respond(msg, hist, clean_choice(char)),
122
- additional_inputs=[character],
123
- type="messages"
124
- )
125
 
126
  demo.launch(share=True)
 
57
  )
58
  }
59
 
60
+ def stream_response(message, history, character):
61
+ """
62
+ Calls the API and yields partial responses for streaming.
63
+ """
64
  system_message = PROMPTS.get(character, "")
65
+ # Build messages using a list of dictionaries.
66
  messages = [{"role": "system", "content": system_message}]
67
+ for msg in history:
68
+ messages.append(msg)
 
 
 
69
  messages.append({"role": "user", "content": message})
70
 
71
  payload = {
 
82
  content = response.json()["choices"][0]["message"]["content"]
83
 
84
  stream_response = ""
85
+ # Simulate streaming by yielding token-by-token.
86
  for token in content.split():
87
  stream_response += token + " "
88
  time.sleep(0.02)
89
+ yield stream_response.strip()
 
90
  except Exception as e:
91
+ yield f"Error: {str(e)}"
92
+
93
+ def chat(user_message, history, character):
94
+ """
95
+ Appends the user message to the conversation history, then streams the assistant's reply.
96
+ The conversation history is a list of dictionaries with keys "role" and "content".
97
+ """
98
+ # Ensure history is a list.
99
+ history = history or []
100
+ history = history.copy()
101
+ # Append user message.
102
+ history.append({"role": "user", "content": user_message})
103
+
104
+ # Create a generator for the streaming response.
105
+ full_response = ""
106
+ for partial in stream_response(user_message, history, character):
107
+ full_response = partial
108
+ # Yield the conversation updated with the current assistant response.
109
+ yield history + [{"role": "assistant", "content": full_response}]
110
+ # Once done, append the final assistant message.
111
+ history.append({"role": "assistant", "content": full_response})
112
+ return history
113
+
114
+ def clean_choice(choice):
115
+ """
116
+ Extract the key for PROMPTS from the dropdown choice.
117
+ """
118
+ if "Elon" in choice:
119
+ return "Elon Ma (Official)"
120
+ if "Cole" in choice:
121
+ return "Cole (Community)"
122
+ if "Shortreed" in choice:
123
+ return "Mr. Shortreed (Official)"
124
+ return "Elon Ma (Official)"
125
 
126
  with gr.Blocks(css=css) as demo:
127
  # Header with QClone Public label.
128
  gr.HTML("<h1>QClone <span style='background-color:#3b82f6;color:white;font-size:0.75em;padding:2px 6px;border-radius:5px;margin-left:8px;'>Public</span></h1>")
129
 
130
  with gr.Row():
131
+ with gr.Column(scale=1):
132
+ # Dropdown for model selection (smaller width).
133
+ model_dropdown = gr.Dropdown(
134
  choices=[
135
  "Elon Ma (Official) 🟡 - Broken English salesman",
136
  "Cole (Community) 🔵 - Gen Z slang troll",
 
139
  value="Elon Ma (Official) 🟡 - Broken English salesman",
140
  label="Model"
141
  )
142
+ with gr.Column(scale=3):
143
+ # Chatbot component to display conversation.
144
+ chatbot = gr.Chatbot(label="QClone Chat")
145
 
146
+ # Textbox for user input.
147
+ msg = gr.Textbox(label="Your Message", placeholder="Type your message here...")
148
+ # State to hold conversation history.
149
+ state = gr.State([])
150
+
151
+ # When user submits text (via button or Enter), update chat.
152
+ msg.submit(fn=lambda user_message, history, choice: chat(user_message, history, clean_choice(choice)),
153
+ inputs=[msg, state, model_dropdown],
154
+ outputs=[chatbot, state],
155
+ show_progress=True)
 
 
 
 
 
 
156
 
157
  demo.launch(share=True)