SalexAI commited on
Commit
c8c1fbb
·
verified ·
1 Parent(s): 67340b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -24
app.py CHANGED
@@ -4,7 +4,33 @@ import requests
4
  import time
5
 
6
  css = """
7
- /* Your existing CSS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  """
9
 
10
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
@@ -15,36 +41,98 @@ HEADERS = {
15
  "Authorization": f"Bearer {ACCESS_TOKEN}",
16
  }
17
 
 
18
  PROMPTS = {
19
  "Elon Ma (Official)": (
20
  "You are Elon Ma, a Chinese car salesman selling the Edision Model S.\n"
21
- "Respond in broken English..."
22
  ),
23
  "Cole (Community)": (
24
- "You are Cole, a Gen Z troll selling..."
 
25
  ),
26
  "Mr. Shortreed (Official)": (
27
- "You are Mr. Shortreed, a teacher explaining..."
28
- ),
 
29
  }
30
 
31
  def stream_response(message, history, character):
32
- # [ Your exact streaming code ]
33
- yield "..."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  def chat(user_message, history, character):
36
- # [ Your code that updates `history` and calls `stream_response` ]
37
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  def clean_choice(choice):
40
- # [ Your code to parse the dropdown label into the key for PROMPTS ]
41
- pass
 
 
 
 
 
 
 
 
42
 
43
  with gr.Blocks(css=css) as demo:
44
- gr.HTML("<h1>QClone Public</h1>")
 
45
 
46
  with gr.Row():
47
- with gr.Column():
 
48
  model_dropdown = gr.Dropdown(
49
  choices=[
50
  "Elon Ma (Official) 🟡 - Broken English salesman",
@@ -54,20 +142,21 @@ with gr.Blocks(css=css) as demo:
54
  value="Elon Ma (Official) 🟡 - Broken English salesman",
55
  label="Model"
56
  )
57
- with gr.Column():
58
- chatbot = gr.Chatbot()
59
-
60
- msg = gr.Textbox(label="Your Message")
 
 
 
61
  state = gr.State([])
62
 
 
63
  msg.submit(
64
  fn=lambda user_message, history, choice: chat(user_message, history, clean_choice(choice)),
65
  inputs=[msg, state, model_dropdown],
66
- outputs=[chatbot, state]
 
67
  )
68
-
69
- # Here's the important part:
70
- demo.launch(
71
- show_api=False, # <--- disable Gradio's internal API docs
72
- # share=False, # HF Spaces doesn't support share=True anyway
73
- )
 
4
  import time
5
 
6
  css = """
7
+ .gradio-container {
8
+ background-color: #1e1e2f;
9
+ color: white;
10
+ max-width: 800px !important;
11
+ margin: auto;
12
+ padding-top: 50px;
13
+ }
14
+ h1 {
15
+ text-align: center;
16
+ font-size: 2em;
17
+ margin-bottom: 20px;
18
+ }
19
+ footer {
20
+ visibility: hidden;
21
+ }
22
+ select {
23
+ background-color: #2a2a40;
24
+ color: white;
25
+ padding: 6px 10px;
26
+ border-radius: 8px;
27
+ border: 1px solid #444;
28
+ width: 300px;
29
+ }
30
+ option {
31
+ background-color: #2a2a40;
32
+ color: white;
33
+ }
34
  """
35
 
36
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
 
41
  "Authorization": f"Bearer {ACCESS_TOKEN}",
42
  }
43
 
44
+ # Define system prompts for each model.
45
  PROMPTS = {
46
  "Elon Ma (Official)": (
47
  "You are Elon Ma, a Chinese car salesman selling the Edision Model S.\n"
48
+ "Respond in broken English, overhyping the car, never mentioning Tesla."
49
  ),
50
  "Cole (Community)": (
51
+ "You are Cole, a Gen Z troll who sells Edision Model S cars.\n"
52
+ "You type like you're on TikTok, casually roasting the user."
53
  ),
54
  "Mr. Shortreed (Official)": (
55
+ "You are Mr. Shortreed, a serious teacher explaining the Edision Model S.\n"
56
+ "You use formal, educational language."
57
+ )
58
  }
59
 
60
  def stream_response(message, history, character):
61
+ """
62
+ Calls the API and yields partial responses for streaming.
63
+ """
64
+ system_message = PROMPTS.get(character, "")
65
+ # Build messages using a list of dictionaries.
66
+ messages = [{"role": "system", "content": system_message}]
67
+ for msg in history:
68
+ messages.append(msg)
69
+ messages.append({"role": "user", "content": message})
70
+
71
+ payload = {
72
+ "model": "mistralai/Mistral-Small-24B-Instruct-2501",
73
+ "messages": messages,
74
+ "max_tokens": 512,
75
+ "temperature": 0.7,
76
+ "top_p": 0.95,
77
+ }
78
+
79
+ try:
80
+ response = requests.post(API_URL, headers=HEADERS, json=payload)
81
+ response.raise_for_status()
82
+ data = response.json()
83
+ if "choices" not in data:
84
+ # Yield the full response data for debugging.
85
+ yield f"Error: API returned an unexpected response: {data}"
86
+ return
87
+ content = data["choices"][0]["message"]["content"]
88
+
89
+ stream_response = ""
90
+ # Simulate streaming by yielding token-by-token.
91
+ for token in content.split():
92
+ stream_response += token + " "
93
+ time.sleep(0.02)
94
+ yield stream_response.strip()
95
+ except Exception as e:
96
+ yield f"Error: {str(e)}"
97
 
98
  def chat(user_message, history, character):
99
+ """
100
+ Appends the user message to the conversation history, then streams the assistant's reply.
101
+ """
102
+ # Ensure history is a list.
103
+ history = history or []
104
+ history = history.copy()
105
+ # Append the user's message.
106
+ history.append({"role": "user", "content": user_message})
107
+
108
+ full_response = ""
109
+ for partial in stream_response(user_message, history, character):
110
+ full_response = partial
111
+ # Yield the conversation updated with the current assistant response.
112
+ yield history + [{"role": "assistant", "content": full_response}]
113
+ # Append the final assistant message.
114
+ history.append({"role": "assistant", "content": full_response})
115
+ return history
116
 
117
  def clean_choice(choice):
118
+ """
119
+ Extract the key for PROMPTS from the dropdown choice.
120
+ """
121
+ if "Elon" in choice:
122
+ return "Elon Ma (Official)"
123
+ if "Cole" in choice:
124
+ return "Cole (Community)"
125
+ if "Shortreed" in choice:
126
+ return "Mr. Shortreed (Official)"
127
+ return "Elon Ma (Official)"
128
 
129
  with gr.Blocks(css=css) as demo:
130
+ # Header with QClone Public label.
131
+ gr.HTML("<h1>QClone <span style='background-color:#3b82f6;color:white;font-size:0.75em;padding:2px 6px;border-radius:5px;margin-left:8px;'>Public</span></h1>")
132
 
133
  with gr.Row():
134
+ with gr.Column(scale=1):
135
+ # Dropdown for model selection (smaller width).
136
  model_dropdown = gr.Dropdown(
137
  choices=[
138
  "Elon Ma (Official) 🟡 - Broken English salesman",
 
142
  value="Elon Ma (Official) 🟡 - Broken English salesman",
143
  label="Model"
144
  )
145
+ with gr.Column(scale=3):
146
+ # Chatbot component to display conversation.
147
+ chatbot = gr.Chatbot(label="QClone Chat")
148
+
149
+ # Textbox for user input.
150
+ msg = gr.Textbox(label="Your Message", placeholder="Type your message here...")
151
+ # State to hold conversation history.
152
  state = gr.State([])
153
 
154
+ # When user submits text, update chat.
155
  msg.submit(
156
  fn=lambda user_message, history, choice: chat(user_message, history, clean_choice(choice)),
157
  inputs=[msg, state, model_dropdown],
158
+ outputs=[chatbot, state],
159
+ show_progress=True
160
  )
161
+
162
+ demo.launch(show_api=False)