Alaaeldin commited on
Commit
70157ba
Β·
verified Β·
1 Parent(s): af8b2d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -8
app.py CHANGED
@@ -9,21 +9,69 @@ from huggingface_hub import login
9
  st.set_page_config(page_title="LLaMA Chatbot", page_icon="πŸ¦™")
10
  status_placeholder = st.empty()
11
 
12
- # Try to get the token from public variable
13
  try:
14
- # Note we changed HF_TOKEN25 to HF_TOKEN to match your variable name
15
  hf_token = os.environ.get("HF_TOKEN")
16
-
17
  if not hf_token:
18
  raise ValueError("Token not found")
19
-
20
- status_placeholder.success("πŸ”‘ Successfully found HF token!")
21
  login(token=hf_token)
22
  status_placeholder.success("πŸ”‘ Successfully logged in to Hugging Face!")
23
-
24
  except Exception as e:
25
  status_placeholder.error(f"🚫 Error with HF token: {str(e)}")
26
- st.error("Please ensure the token is properly set as a public variable")
27
  st.stop()
28
 
29
- st.title("πŸ¦™ LLaMA Chatbot")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  st.set_page_config(page_title="LLaMA Chatbot", page_icon="πŸ¦™")
10
  status_placeholder = st.empty()
11
 
12
+ # Authentication (keeping the working code)
13
  try:
 
14
  hf_token = os.environ.get("HF_TOKEN")
 
15
  if not hf_token:
16
  raise ValueError("Token not found")
 
 
17
  login(token=hf_token)
18
  status_placeholder.success("πŸ”‘ Successfully logged in to Hugging Face!")
 
19
  except Exception as e:
20
  status_placeholder.error(f"🚫 Error with HF token: {str(e)}")
 
21
  st.stop()
22
 
23
+ st.title("πŸ¦™ LLaMA Chatbot")
24
+
25
+ # Model loading with detailed status updates
26
+ @st.cache_resource
27
+ def load_model():
28
+ try:
29
+ model_path = "Alaaeldin/llama2-app"
30
+
31
+ with st.spinner("πŸ”„ Loading tokenizer..."):
32
+ tokenizer = AutoTokenizer.from_pretrained(
33
+ model_path,
34
+ token=hf_token,
35
+ trust_remote_code=True
36
+ )
37
+ st.success("βœ… Tokenizer loaded!")
38
+
39
+ with st.spinner("πŸ”„ Loading model... This might take a few minutes..."):
40
+ model = AutoModelForCausalLM.from_pretrained(
41
+ model_path,
42
+ torch_dtype=torch.float16,
43
+ device_map="auto",
44
+ load_in_8bit=True,
45
+ token=hf_token,
46
+ trust_remote_code=True
47
+ )
48
+ st.success("βœ… Model loaded!")
49
+
50
+ return model, tokenizer
51
+ except Exception as e:
52
+ st.error(f"❌ Error loading model: {str(e)}")
53
+ return None, None
54
+
55
+ # Initialize chat history
56
+ if "messages" not in st.session_state:
57
+ st.session_state.messages = []
58
+
59
+ # Load model
60
+ model, tokenizer = load_model()
61
+
62
+ # Display chat interface
63
+ if model and tokenizer:
64
+ st.success("✨ Ready to chat! Enter your message below.")
65
+
66
+ # Display chat history
67
+ for message in st.session_state.messages:
68
+ with st.chat_message(message["role"]):
69
+ st.markdown(message["content"])
70
+
71
+ # Chat input
72
+ if prompt := st.chat_input("Your message"):
73
+ # Add user message to chat history
74
+ st.session_state.messages.append({"role": "user", "content": prompt})
75
+ # Display user message
76
+ with st.chat_message("user"):
77
+ st.markdown(prompt)