File size: 1,057 Bytes
01c85e9
de2bacd
 
01c85e9
 
5d7aa6a
01c85e9
 
 
 
 
 
de2bacd
01c85e9
 
 
 
 
 
 
 
 
 
 
 
5d7aa6a
01c85e9
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# try:
    
        
#     import streamlit as st
#     from transformers import AutoTokenizer,pipeline

#     model_name ="NousResearch/Llama-2-7b-chat-hf"
#     print('tokenizer_loading')
#     tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
#     tokenizer.pad_token = tokenizer.eos_token
#     tokenizer.padding_side = "right" 
#     print('tokenizer_loaded')
    
#     model = "Hardik1234/llama-finetune-reactjs"
#     print('loading_model')
#     pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=2048)
#     print('model_loaded')
#     prompt = st.text_area('Enter prompt: ')
#     if prompt:
#         print('taking prompt')
#         result = pipe(f"<s> [INST] {prompt} [/INST] ")
#         print('generating output')
#         st.json(result[0]['generated_text'])
# except Exception as e:
#     print(e)

import streamlit as st
from transformers import AutoTokenizer,pipeline
pipe = pipeline('sentiment-analysis')
text = st.text_area('Enter text:')
if text:
    st.json(pipe(text))