z00logist commited on
Commit
3f73eba
·
verified ·
1 Parent(s): 8a86c13

use chat completion

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -65,15 +65,16 @@ def format_messages(user_text: str, level: int) -> t.Mapping[str, str]:
65
 
66
 
67
  def simplify_text(user_text: str, level: int) -> str:
68
- client = InferenceClient(token=HF_API_TOKEN)
69
  messages = format_messages(user_text, level)
70
 
71
- response = client.text_generation(
72
- model=MODEL_REPO_ID,
73
- inputs={"messages": messages},
74
- parameters={"max_new_tokens": 512, "temperature": 0.7, "top_p": 0.9},
75
- )
76
- return response
 
77
 
78
 
79
  with gr.Blocks() as demo:
 
65
 
66
 
67
  def simplify_text(user_text: str, level: int) -> str:
68
+ client = InferenceClient(model=MODEL_REPO_ID, token=HF_API_TOKEN)
69
  messages = format_messages(user_text, level)
70
 
71
+ response = client.chat_completion(messages, max_tokens=512)
72
+ if response is not None and response.choices is not None:
73
+ content = response.choices[0].message.content
74
+ return content
75
+ else:
76
+ return "Error: No valid response generated."
77
+
78
 
79
 
80
  with gr.Blocks() as demo: