Spaces:
Running
Running
Create app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,20 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import pandas as pd
|
4 |
-
import streamlit as st
|
5 |
-
from transformers import pipeline
|
6 |
from huggingface_hub import InferenceClient
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
12 |
]
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
)
|
23 |
-
out = pipe(chat, max_new_tokens=128)
|
24 |
-
st.subheader('Suggestion:')
|
25 |
-
st.write(out[0]['generated_text'][-1]['content'])
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
+
|
3 |
+
client = InferenceClient(api_key="hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
|
4 |
+
|
5 |
+
messages = [
|
6 |
+
{
|
7 |
+
"role": "user",
|
8 |
+
"content": "What is the capital of France?"
|
9 |
+
}
|
10 |
]
|
11 |
+
|
12 |
+
stream = client.chat.completions.create(
|
13 |
+
model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
14 |
+
messages=messages,
|
15 |
+
max_tokens=500,
|
16 |
+
stream=True
|
17 |
+
)
|
18 |
+
|
19 |
+
for chunk in stream:
|
20 |
+
print(chunk.choices[0].delta.content, end="")
|
|
|
|
|
|