|
from transformers import pipeline |
|
from huggingface_hub import login as hf_login |
|
import streamlit as st |
|
|
|
|
|
hf_token = st.secrets["default"]["hf_token"] |
|
|
|
@st.cache_resource |
|
def load_model(): |
|
return pipeline("text-generation", model="MSey/tiny_CaLL_r10_O1_f10_LT_c1022", use_auth_token =hf_token) |
|
|
|
model = load_model() |
|
|
|
st.header("Test Environment for tiny_CaLL_r10_O1_f10_c1022") |
|
user_input = st.text_input("Enter your Prompt here:", "") |
|
|
|
if user_input: |
|
with st.spinner('Generating response...'): |
|
response = model(user_input, max_length=50, num_return_sequences=1) |
|
generated_text = response[0]['generated_text'] |
|
st.write("Generated Text:") |
|
st.write(generated_text) |