Spaces:
Runtime error
Runtime error
File size: 1,780 Bytes
5aa525a 1d7f870 5aa525a 0f3d4bc 5aa525a 0f3d4bc 5aa525a 30d0658 5aa525a 92cca2a 1d7f870 c10f547 1d7f870 5aa525a 6a03fb9 5aa525a af96bbe d471d1c 5aa525a 3952e87 5aa525a 898c8a2 ed78754 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
"""
@author: idoia lerchundi
"""
import os
import streamlit as st
from huggingface_hub import InferenceClient
import random
# Load the API token from an environment variable
api_key = os.getenv("HF_TOKEN")
# Instantiate the InferenceClient
client = InferenceClient(api_key=api_key)
# Streamlit app title
st.title("Serverless Inference API")
# Ensure the full_text key is initialized in session state
if "full_text" not in st.session_state:
st.session_state["full_text"] = ""
# Create a text input area for user prompts
with st.form("my_form"):
text = st.text_area("Enter text (using model TinyLlama/TinyLlama-1.1B-Chat-v1.0):", "Tell me a 4 sentence joke to make me laugh. A short joke, not a long one. With a random subject. You can not repeat the subject or the joke, so be creative.")
submitted = st.form_submit_button("Submit")
# Initialize the full_text variable
full_text = " "
# to get different jokes
top_p_init = 0.7
# Generate a random temperature between 0.5 and 1.0
temperature = random.uniform(0.5, 1.0)
if submitted:
top_p_init+=0.2
messages = [
{"role": "user", "content": text}
]
# Create a new stream for each submission
stream = client.chat.completions.create(
model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
messages=messages,
temperature=0.5,
max_tokens=300,
top_p=top_p_init,
stream=True
)
top_p_init +=0.1
# Concatenate chunks to form the full response
for chunk in stream:
full_text += chunk.choices[0].delta.content
# Update session state with the full response
st.session_state["full_text"] = full_text
# Display the full response
if st.session_state["full_text"]:
st.info(st.session_state["full_text"]) |