Alaaeldin commited on
Commit
6bb7d92
Β·
verified Β·
1 Parent(s): 19894a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -4,23 +4,34 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  from peft import PeftModel, PeftConfig
5
  from huggingface_hub import login
6
 
 
 
 
 
 
 
7
  # Set your HuggingFace token
8
- hf_token = st.secrets["HF_TOKEN25"] # Using Streamlit secrets
9
  try:
10
  login(token=hf_token)
11
- st.success("Successfully logged in to Hugging Face!")
12
  except Exception as e:
13
- st.error(f"Error logging in to Hugging Face: {str(e)}")
14
 
15
- st.title("LLaMA Chatbot")
16
 
17
  @st.cache_resource
18
  def load_model():
19
  try:
20
  model_path = "Alaaeldin/llama2-app"
21
- st.info("Loading model... This might take a minute.")
22
 
 
 
23
  tokenizer = AutoTokenizer.from_pretrained(model_path, token=hf_token)
 
 
 
 
24
  model = AutoModelForCausalLM.from_pretrained(
25
  model_path,
26
  torch_dtype=torch.float16,
@@ -28,10 +39,13 @@ def load_model():
28
  load_in_8bit=True,
29
  token=hf_token
30
  )
31
- st.success("βœ… Model loaded successfully!")
 
 
 
32
  return model, tokenizer
33
  except Exception as e:
34
- st.error(f"❌ Error loading model: {str(e)}")
35
  return None, None
36
 
37
  model, tokenizer = load_model()
 
4
  from peft import PeftModel, PeftConfig
5
  from huggingface_hub import login
6
 
7
+ # Set page config for better display
8
+ st.set_page_config(page_title="LLaMA Chatbot", page_icon="πŸ¦™")
9
+
10
+ # Create a status placeholder
11
+ status_placeholder = st.empty()
12
+
13
  # Set your HuggingFace token
14
+ hf_token = st.secrets["HF_TOKEN25"]
15
  try:
16
  login(token=hf_token)
17
+ status_placeholder.success("πŸ”‘ Successfully logged in to Hugging Face!")
18
  except Exception as e:
19
+ status_placeholder.error(f"🚫 Error logging in to Hugging Face: {str(e)}")
20
 
21
+ st.title("πŸ¦™ LLaMA Chatbot")
22
 
23
  @st.cache_resource
24
  def load_model():
25
  try:
26
  model_path = "Alaaeldin/llama2-app"
 
27
 
28
+ # Update status for tokenizer loading
29
+ status_placeholder.info("πŸ”„ Loading tokenizer...")
30
  tokenizer = AutoTokenizer.from_pretrained(model_path, token=hf_token)
31
+ status_placeholder.success("βœ… Tokenizer loaded successfully!")
32
+
33
+ # Update status for model loading
34
+ status_placeholder.info("πŸ”„ Loading model... This might take a minute.")
35
  model = AutoModelForCausalLM.from_pretrained(
36
  model_path,
37
  torch_dtype=torch.float16,
 
39
  load_in_8bit=True,
40
  token=hf_token
41
  )
42
+ status_placeholder.success("βœ… Model loaded successfully!")
43
+
44
+ # Final success message
45
+ st.success("✨ System is ready! You can now start chatting with the model.")
46
  return model, tokenizer
47
  except Exception as e:
48
+ status_placeholder.error(f"❌ Error loading model: {str(e)}")
49
  return None, None
50
 
51
  model, tokenizer = load_model()