shujjat commited on
Commit
df48231
·
verified ·
1 Parent(s): 3e50f51

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -6
app.py CHANGED
@@ -1,7 +1,12 @@
1
  import streamlit as st
2
  import os
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
 
 
 
 
5
  st.title("Meta LLaMA Text Generation")
6
 
7
  @st.cache_resource
@@ -9,16 +14,28 @@ def load_model():
9
  model_name = "meta-llama/Meta-Llama-3-8B"
10
  access_token = os.getenv('hf')
11
 
 
 
 
 
 
12
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token)
13
  model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=access_token)
14
  return tokenizer, model
15
 
16
  tokenizer, model = load_model()
17
 
18
- prompt = st.text_input("Enter a prompt:", "Once upon a time")
 
19
 
20
- if st.button("Generate Text"):
21
- inputs = tokenizer(prompt, return_tensors="pt")
22
- outputs = model.generate(**inputs, max_length=50)
23
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
- st.write(generated_text)
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import os
3
+ import logging
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
+ # Set up logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
+
10
  st.title("Meta LLaMA Text Generation")
11
 
12
  @st.cache_resource
 
14
  model_name = "meta-llama/Meta-Llama-3-8B"
15
  access_token = os.getenv('hf')
16
 
17
+ if not access_token:
18
+ st.error("Hugging Face access token is not set. Please set the environment variable 'hf'.")
19
+ return None, None
20
+
21
+ logger.info("Loading tokenizer and model...")
22
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token)
23
  model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=access_token)
24
  return tokenizer, model
25
 
26
  tokenizer, model = load_model()
27
 
28
+ if tokenizer is not None and model is not None:
29
+ prompt = st.text_input("Enter a prompt:", "Once upon a time")
30
 
31
+ if st.button("Generate Text"):
32
+ try:
33
+ inputs = tokenizer(prompt, return_tensors="pt")
34
+ outputs = model.generate(**inputs, max_length=50)
35
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
+ st.write(generated_text)
37
+ except Exception as e:
38
+ st.error(f"An error occurred: {e}")
39
+ logger.error(f"An error occurred during text generation: {e}")
40
+ else:
41
+ st.error("Failed to load the model. Check the logs for more details.")