Hardik1234 commited on
Commit
01c85e9
·
verified ·
1 Parent(s): 95ecca0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -27
app.py CHANGED
@@ -1,32 +1,32 @@
1
- try:
2
 
3
 
4
- import streamlit as st
5
- from transformers import AutoTokenizer,pipeline
6
 
7
- model_name ="NousResearch/Llama-2-7b-chat-hf"
8
- print('tokenizer_loading')
9
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
10
- tokenizer.pad_token = tokenizer.eos_token
11
- tokenizer.padding_side = "right"
12
- print('tokenizer_loaded')
13
 
14
- model = "Hardik1234/llama-finetune-reactjs"
15
- print('loading_model')
16
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=2048)
17
- print('model_loaded')
18
- prompt = st.text_area('Enter prompt: ')
19
- if prompt:
20
- print('taking prompt')
21
- result = pipe(f"<s> [INST] {prompt} [/INST] ")
22
- print('generating output')
23
- st.json(result[0]['generated_text'])
24
- except Exception as e:
25
- print(e)
26
 
27
- # import streamlit as st
28
- # from transformers import AutoTokenizer,pipeline
29
- # pipe = pipeline('sentiment-analysis')
30
- # text = st.text_area('Enter text:')
31
- # if text:
32
- # st.json(pipe(text))
 
1
+ # try:
2
 
3
 
4
+ # import streamlit as st
5
+ # from transformers import AutoTokenizer,pipeline
6
 
7
+ # model_name ="NousResearch/Llama-2-7b-chat-hf"
8
+ # print('tokenizer_loading')
9
+ # tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
10
+ # tokenizer.pad_token = tokenizer.eos_token
11
+ # tokenizer.padding_side = "right"
12
+ # print('tokenizer_loaded')
13
 
14
+ # model = "Hardik1234/llama-finetune-reactjs"
15
+ # print('loading_model')
16
+ # pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=2048)
17
+ # print('model_loaded')
18
+ # prompt = st.text_area('Enter prompt: ')
19
+ # if prompt:
20
+ # print('taking prompt')
21
+ # result = pipe(f"<s> [INST] {prompt} [/INST] ")
22
+ # print('generating output')
23
+ # st.json(result[0]['generated_text'])
24
+ # except Exception as e:
25
+ # print(e)
26
 
27
+ import streamlit as st
28
+ from transformers import AutoTokenizer,pipeline
29
+ pipe = pipeline('sentiment-analysis')
30
+ text = st.text_area('Enter text:')
31
+ if text:
32
+ st.json(pipe(text))