Tonic commited on
Commit
7f4b3a6
·
unverified ·
1 Parent(s): a3f1b61

fix error content

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -31,7 +31,7 @@ hf_client = InferenceClient(api_key=HF_TOKEN)
31
  # Function to process message and get response
32
  async def get_ai_response(message_content):
33
  try:
34
- messages = [{ "role": "system", content: "tu es \"orion\" une ia crée par ethan " },{"role": "user", "content": message_content}]
35
  response = ""
36
  stream = hf_client.chat.completions.create(
37
  model="Qwen/Qwen2.5-72B-Instruct",
@@ -42,11 +42,17 @@ async def get_ai_response(message_content):
42
  stream=True
43
  )
44
  for chunk in stream:
45
- content = chunk.choices[0].delta.content
46
- if content:
47
- response += content
 
 
 
 
 
48
  return response if response else "I couldn't generate a response."
49
  except Exception as e:
 
50
  return f"An error occurred: {str(e)}"
51
 
52
  @client.event
 
31
  # Function to process message and get response
32
  async def get_ai_response(message_content):
33
  try:
34
+ messages = [{"role": "user", "content": message_content}]
35
  response = ""
36
  stream = hf_client.chat.completions.create(
37
  model="Qwen/Qwen2.5-72B-Instruct",
 
42
  stream=True
43
  )
44
  for chunk in stream:
45
+ # Safely handle the chunk content
46
+ try:
47
+ delta_content = chunk.choices[0].delta.content
48
+ if delta_content is not None: # Only append if content exists
49
+ response += delta_content
50
+ except (AttributeError, IndexError) as e:
51
+ logging.warning(f"Skipping invalid chunk: {e}")
52
+ continue
53
  return response if response else "I couldn't generate a response."
54
  except Exception as e:
55
+ logging.error(f"Error in get_ai_response: {e}")
56
  return f"An error occurred: {str(e)}"
57
 
58
  @client.event