Hardik1234 commited on
Commit
de2bacd
·
verified ·
1 Parent(s): 95518d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -14
app.py CHANGED
@@ -1,14 +1,24 @@
1
- import streamlit as st
2
- from transformers import AutoTokenizer
3
- model_name ="NousResearch/Llama-2-7b-chat-hf"
4
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
5
- tokenizer.pad_token = tokenizer.eos_token
6
- tokenizer.padding_side = "right"
7
-
8
-
9
- model = "Hardik1234/llama-finetune-reactjs"
10
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=2048)
11
- prompt = st.text_area('Enter prompt: ')
12
- if prompt:
13
- result = pipe(f"<s> [INST] {prompt} [/INST] ")
14
- st.json(result[0]['generated_text'])
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+
3
+
4
+ import streamlit as st
5
+ from transformers import AutoTokenizer
6
+ model_name ="NousResearch/Llama-2-7b-chat-hf"
7
+ print('tokenizer_loading')
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
9
+ tokenizer.pad_token = tokenizer.eos_token
10
+ tokenizer.padding_side = "right"
11
+ print('tokenizer_loaded')
12
+
13
+ model = "Hardik1234/llama-finetune-reactjs"
14
+ print('loading_model')
15
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=2048)
16
+ print('model_loaded')
17
+ prompt = st.text_area('Enter prompt: ')
18
+ if prompt:
19
+ print('taking prompt')
20
+ result = pipe(f"<s> [INST] {prompt} [/INST] ")
21
+ print('generating output')
22
+ st.json(result[0]['generated_text'])
23
+ except error:
24
+ print(error)