SalexAI commited on
Commit
ca8ce4a
·
verified ·
1 Parent(s): 112778b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -36
app.py CHANGED
@@ -19,22 +19,6 @@ h1 {
19
  footer {
20
  visibility: hidden;
21
  }
22
- .badge {
23
- background-color: #3b82f6;
24
- color: white;
25
- font-size: 0.75em;
26
- padding: 2px 6px;
27
- border-radius: 5px;
28
- margin-left: 8px;
29
- }
30
- .badge-official {
31
- background-color: #ffd700;
32
- color: black;
33
- }
34
- .description {
35
- color: #aaa;
36
- font-size: 0.85em;
37
- }
38
  select {
39
  background-color: #2a2a40;
40
  color: white;
@@ -57,19 +41,24 @@ HEADERS = {
57
  "Authorization": f"Bearer {ACCESS_TOKEN}",
58
  }
59
 
 
60
  PROMPTS = {
61
- "Elon Ma (Official)": """You are Elon Ma, a Chinese car salesman selling the Edision Model S.
62
- Respond in broken English, overhyping the car, never mentioning Tesla.""",
63
- "Cole (Community)": """You are Cole, a Gen Z troll who sells Edision Model S cars.
64
- You type like you're on TikTok, casually roasting the user.""",
65
- "Mr. Shortreed (Official)": """You are Mr. Shortreed, a serious teacher explaining the Edision Model S.
66
- You use formal, educational language."""
 
 
 
 
 
 
67
  }
68
 
69
-
70
  def respond(message, history, character):
71
  system_message = PROMPTS.get(character, "")
72
-
73
  messages = [{"role": "system", "content": system_message}]
74
  for user_msg, bot_msg in history:
75
  if user_msg:
@@ -77,7 +66,7 @@ def respond(message, history, character):
77
  if bot_msg:
78
  messages.append({"role": "assistant", "content": bot_msg})
79
  messages.append({"role": "user", "content": message})
80
-
81
  payload = {
82
  "model": "mistralai/Mistral-Small-24B-Instruct-2501",
83
  "messages": messages,
@@ -85,28 +74,29 @@ def respond(message, history, character):
85
  "temperature": 0.7,
86
  "top_p": 0.95,
87
  }
88
-
89
  try:
90
  response = requests.post(API_URL, headers=HEADERS, json=payload)
91
  response.raise_for_status()
92
  content = response.json()["choices"][0]["message"]["content"]
93
 
94
- # Streaming effect
95
  stream_response = ""
96
  for token in content.split():
97
  stream_response += token + " "
98
  time.sleep(0.02)
99
- yield stream_response.strip()
 
100
  except Exception as e:
101
- yield f"Error: {str(e)}"
102
-
103
 
104
  with gr.Blocks(css=css) as demo:
105
- gr.HTML("<h1>QClone <span class='badge'>Public</span></h1>")
106
-
 
 
 
107
  with gr.Row():
108
  with gr.Column():
109
- gr.HTML("<div style='margin-bottom:10px;'>Select Model</div>")
110
  character = gr.Dropdown(
111
  choices=[
112
  "Elon Ma (Official) 🟡 - Broken English salesman",
@@ -116,7 +106,8 @@ with gr.Blocks(css=css) as demo:
116
  value="Elon Ma (Official) 🟡 - Broken English salesman",
117
  label="Model"
118
  )
119
-
 
120
  def clean_choice(choice):
121
  if "Elon" in choice:
122
  return "Elon Ma (Official)"
@@ -125,10 +116,12 @@ with gr.Blocks(css=css) as demo:
125
  if "Shortreed" in choice:
126
  return "Mr. Shortreed (Official)"
127
  return "Elon Ma (Official)"
128
-
 
129
  chatbot = gr.ChatInterface(
130
  lambda msg, hist, char: respond(msg, hist, clean_choice(char)),
131
  additional_inputs=[character],
 
132
  )
133
-
134
  demo.launch(share=True)
 
19
  footer {
20
  visibility: hidden;
21
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  select {
23
  background-color: #2a2a40;
24
  color: white;
 
41
  "Authorization": f"Bearer {ACCESS_TOKEN}",
42
  }
43
 
44
+ # Define our system prompts
45
  PROMPTS = {
46
+ "Elon Ma (Official)": (
47
+ "You are Elon Ma, a Chinese car salesman selling the Edision Model S.\n"
48
+ "Respond in broken English, overhyping the car, never mentioning Tesla."
49
+ ),
50
+ "Cole (Community)": (
51
+ "You are Cole, a Gen Z troll who sells Edision Model S cars.\n"
52
+ "You type like you're on TikTok, casually roasting the user."
53
+ ),
54
+ "Mr. Shortreed (Official)": (
55
+ "You are Mr. Shortreed, a serious teacher explaining the Edision Model S.\n"
56
+ "You use formal, educational language."
57
+ )
58
  }
59
 
 
60
  def respond(message, history, character):
61
  system_message = PROMPTS.get(character, "")
 
62
  messages = [{"role": "system", "content": system_message}]
63
  for user_msg, bot_msg in history:
64
  if user_msg:
 
66
  if bot_msg:
67
  messages.append({"role": "assistant", "content": bot_msg})
68
  messages.append({"role": "user", "content": message})
69
+
70
  payload = {
71
  "model": "mistralai/Mistral-Small-24B-Instruct-2501",
72
  "messages": messages,
 
74
  "temperature": 0.7,
75
  "top_p": 0.95,
76
  }
77
+
78
  try:
79
  response = requests.post(API_URL, headers=HEADERS, json=payload)
80
  response.raise_for_status()
81
  content = response.json()["choices"][0]["message"]["content"]
82
 
 
83
  stream_response = ""
84
  for token in content.split():
85
  stream_response += token + " "
86
  time.sleep(0.02)
87
+ # Yield a dictionary so that ChatInterface's streaming works correctly
88
+ yield {"role": "assistant", "content": stream_response.strip()}
89
  except Exception as e:
90
+ yield {"role": "assistant", "content": f"Error: {str(e)}"}
 
91
 
92
  with gr.Blocks(css=css) as demo:
93
+ # Header with QClone Public label
94
+ gr.HTML("<h1>QClone <span style='background-color:#3b82f6;color:white;font-size:0.75em;padding:2px 6px;border-radius:5px;margin-left:8px;'>Public</span></h1>")
95
+
96
+ # Create a dropdown with rich labels in the text.
97
+ # The dropdown options include emojis and descriptions.
98
  with gr.Row():
99
  with gr.Column():
 
100
  character = gr.Dropdown(
101
  choices=[
102
  "Elon Ma (Official) 🟡 - Broken English salesman",
 
106
  value="Elon Ma (Official) 🟡 - Broken English salesman",
107
  label="Model"
108
  )
109
+
110
+ # Helper function to extract the clean key for our prompts.
111
  def clean_choice(choice):
112
  if "Elon" in choice:
113
  return "Elon Ma (Official)"
 
116
  if "Shortreed" in choice:
117
  return "Mr. Shortreed (Official)"
118
  return "Elon Ma (Official)"
119
+
120
+ # ChatInterface with streaming enabled
121
  chatbot = gr.ChatInterface(
122
  lambda msg, hist, char: respond(msg, hist, clean_choice(char)),
123
  additional_inputs=[character],
124
+ type="messages" # use openai-style message format
125
  )
126
+
127
  demo.launch(share=True)