Tijmen2 commited on
Commit
abe401d
·
verified ·
1 Parent(s): 6a2645a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -20
app.py CHANGED
@@ -29,26 +29,20 @@ GREETING_MESSAGES = [
29
  "The universe awaits! I'm AstroSage. What astronomical wonders shall we discuss?",
30
  ]
31
 
32
- def get_random_greeting():
33
- return random.choice(GREETING_MESSAGES)
34
-
35
  def respond_stream(message, history):
36
- if not message: # Handle empty messages
37
  return
38
 
39
- system_message = "You are AstroSage, a highly knowledgeable AI assistant..." # ... (your system message)
40
  messages = [{"role": "system", "content": system_message}]
41
-
42
- # Format history correctly (especially important if you use clear)
43
  for user, assistant in history:
44
  messages.append({"role": "user", "content": user})
45
- if assistant: # Check if assistant message exists
46
  messages.append({"role": "assistant", "content": assistant})
47
-
48
  messages.append({"role": "user", "content": message})
49
 
50
  try:
51
- response_content = ""
52
  for chunk in llm.create_chat_completion(
53
  messages=messages,
54
  max_tokens=512,
@@ -57,20 +51,24 @@ def respond_stream(message, history):
57
  stream=True
58
  ):
59
  delta = chunk["choices"][0]["delta"]
60
- if "content" in delta: # check if content exists in delta
61
- response_content += delta["content"]
62
- yield response_content # yield inside the loop for streaming
 
63
  except Exception as e:
64
  yield f"Error during generation: {e}"
65
 
66
-
67
- # Display the welcome message as the first assistant message
68
  initial_message = random.choice(GREETING_MESSAGES)
69
- chatbot = gr.Chatbot(value=[[None, initial_message]]) # Set initial value here
70
 
71
  with gr.Blocks() as demo:
72
- chatbot.render()
73
- clear = gr.Button("Clear")
74
- clear.click(lambda: None, None, chatbot, fn=lambda: [])
 
 
 
 
 
75
 
76
- demo.queue().launch()
 
29
  "The universe awaits! I'm AstroSage. What astronomical wonders shall we discuss?",
30
  ]
31
 
 
 
 
32
  def respond_stream(message, history):
33
+ if not message:
34
  return
35
 
36
+ system_message = "Assume the role of AstroSage, a helpful chatbot designed to answer user queries about astronomy, astrophysics, and cosmology."
37
  messages = [{"role": "system", "content": system_message}]
 
 
38
  for user, assistant in history:
39
  messages.append({"role": "user", "content": user})
40
+ if assistant:
41
  messages.append({"role": "assistant", "content": assistant})
 
42
  messages.append({"role": "user", "content": message})
43
 
44
  try:
45
+ past_tokens = "" # Accumulate and yield all tokens so far
46
  for chunk in llm.create_chat_completion(
47
  messages=messages,
48
  max_tokens=512,
 
51
  stream=True
52
  ):
53
  delta = chunk["choices"][0]["delta"]
54
+ if "content" in delta:
55
+ new_tokens = delta["content"]
56
+ past_tokens += new_tokens
57
+ yield past_tokens # Yield the accumulated response to allow streaming
58
  except Exception as e:
59
  yield f"Error during generation: {e}"
60
 
 
 
61
  initial_message = random.choice(GREETING_MESSAGES)
62
+ chatbot = gr.Chatbot([[None, initial_message]]).style(height=750) # Set height
63
 
64
  with gr.Blocks() as demo:
65
+ with gr.Row():
66
+ with gr.Column(scale=0.8):
67
+ chatbot.render()
68
+
69
+ with gr.Column(scale=0.2):
70
+ clear = gr.Button("Clear")
71
+
72
+ clear.click(lambda: [], None, chatbot,queue=False)
73
 
74
+ demo.queue().launch()