Spaces:
Sleeping
Sleeping
# try: | |
# import streamlit as st | |
# from transformers import AutoTokenizer,pipeline | |
# model_name ="NousResearch/Llama-2-7b-chat-hf" | |
# print('tokenizer_loading') | |
# tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
# tokenizer.pad_token = tokenizer.eos_token | |
# tokenizer.padding_side = "right" | |
# print('tokenizer_loaded') | |
# model = "Hardik1234/llama-finetune-reactjs" | |
# print('loading_model') | |
# pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=2048) | |
# print('model_loaded') | |
# prompt = st.text_area('Enter prompt: ') | |
# if prompt: | |
# print('taking prompt') | |
# result = pipe(f"<s> [INST] {prompt} [/INST] ") | |
# print('generating output') | |
# st.json(result[0]['generated_text']) | |
# except Exception as e: | |
# print(e) | |
import streamlit as st | |
from transformers import AutoTokenizer,pipeline | |
pipe = pipeline('sentiment-analysis') | |
text = st.text_area('Enter text:') | |
if text: | |
st.json(pipe(text)) |