Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -98,6 +98,17 @@ def run_huggingface_model(model, messages, max_tokens, temperature, top_p):
|
|
98 |
result = response.content
|
99 |
return result
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
def run_together_model(model, messages, max_tokens, temperature, top_p):
|
102 |
client = Together()
|
103 |
response = client.chat.completions.create(
|
@@ -193,7 +204,7 @@ with demo:
|
|
193 |
)
|
194 |
|
195 |
huggingface_run_button.click(
|
196 |
-
|
197 |
inputs=[huggingface_model_input, messages_state, max_tokens_slider, temperature_slider, top_p_slider],
|
198 |
outputs=[response_output],
|
199 |
)
|
|
|
98 |
result = response.content
|
99 |
return result
|
100 |
|
101 |
+
def run_huggingface_model_alt(model, messages, max_tokens, temperature, top_p):
|
102 |
+
client = InferenceClient(model)
|
103 |
+
response = client.chat_completion(
|
104 |
+
messages,
|
105 |
+
max_tokens=max_tokens,
|
106 |
+
stream=False,
|
107 |
+
temperature=temperature,
|
108 |
+
top_p=top_p,
|
109 |
+
)
|
110 |
+
return response.choices[0].message.content
|
111 |
+
|
112 |
def run_together_model(model, messages, max_tokens, temperature, top_p):
|
113 |
client = Together()
|
114 |
response = client.chat.completions.create(
|
|
|
204 |
)
|
205 |
|
206 |
huggingface_run_button.click(
|
207 |
+
run_huggingface_model_alt,
|
208 |
inputs=[huggingface_model_input, messages_state, max_tokens_slider, temperature_slider, top_p_slider],
|
209 |
outputs=[response_output],
|
210 |
)
|