ZeusCabanas commited on
Commit
18fbc58
·
1 Parent(s): dd825f3
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -15,8 +15,7 @@ def format_messages(history: List[Tuple[str, str]], system_message: str, user_me
15
  messages.append({"role": "user", "content": str(user_message)}) # Convert user_message to string
16
  return messages
17
 
18
- def respond(message: str, history: List[Tuple[str, str]]) -> str:
19
- # Default values for parameters
20
  system_message = "You are a helpful AI assistant."
21
  max_tokens = 1000
22
  temperature = 0.7
@@ -26,7 +25,7 @@ def respond(message: str, history: List[Tuple[str, str]]) -> str:
26
  response = ""
27
 
28
  try:
29
- for msg in client.chat_completion(
30
  messages,
31
  max_tokens=max_tokens,
32
  stream=True,
@@ -38,9 +37,14 @@ def respond(message: str, history: List[Tuple[str, str]]) -> str:
38
  if token is not None:
39
  response += token
40
  yield response
 
 
 
 
41
  except Exception as e:
42
- return f"Error: {str(e)}"
43
 
 
44
  demo = gr.ChatInterface(
45
  fn=respond,
46
  title="Demo GPT-BI instruct",
 
15
  messages.append({"role": "user", "content": str(user_message)}) # Convert user_message to string
16
  return messages
17
 
18
+ async def respond(message: str, history: List[Tuple[str, str]]) -> str:
 
19
  system_message = "You are a helpful AI assistant."
20
  max_tokens = 1000
21
  temperature = 0.7
 
25
  response = ""
26
 
27
  try:
28
+ async for msg in client.chat_completion(
29
  messages,
30
  max_tokens=max_tokens,
31
  stream=True,
 
37
  if token is not None:
38
  response += token
39
  yield response
40
+
41
+ if not response: # Handle empty response case
42
+ yield "No response generated."
43
+
44
  except Exception as e:
45
+ yield f"Error: {str(e)}"
46
 
47
+ # Update the ChatInterface to use async function
48
  demo = gr.ChatInterface(
49
  fn=respond,
50
  title="Demo GPT-BI instruct",