Spaces:
Runtime error
Runtime error
File size: 1,418 Bytes
5aa525a 0f3d4bc 5aa525a 0f3d4bc 5aa525a cad7c07 5aa525a 92cca2a 5aa525a af96bbe 5aa525a 898c8a2 ed78754 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
"""
@author: idoia lerchundi
"""
import os
import streamlit as st
from huggingface_hub import InferenceClient
# Load the API token from an environment variable
api_key = os.getenv("HF_TOKEN")
# Instantiate the InferenceClient
client = InferenceClient(api_key=api_key)
# Streamlit app title
st.title("Serverless Inference API")
# Ensure the full_text key is initialized in session state
if "full_text" not in st.session_state:
st.session_state["full_text"] = ""
# Create a text input area for user prompts
with st.form("my_form"):
text = st.text_area("Enter text:", "Tell me a short joke to make me laugh.")
submitted = st.form_submit_button("Submit")
# Initialize the full_text variable
full_text = " "
if submitted:
messages = [
{"role": "user", "content": text}
]
# Create a new stream for each submission
stream = client.chat.completions.create(
model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
messages=messages,
temperature=0.5,
max_tokens=300,
top_p=0.7,
stream=True
)
# Concatenate chunks to form the full response
for chunk in stream:
full_text += chunk.choices[0].delta.content
# Update session state with the full response
st.session_state["full_text"] = full_text
# Display the full response
if st.session_state["full_text"]:
st.info(st.session_state["full_text"]) |