lambdabrendan commited on
Commit
2929ed7
·
verified ·
1 Parent(s): 24a4729

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +1 -4
src/app.py CHANGED
@@ -34,9 +34,6 @@ model_list = [
34
  "tiiuae/falcon-40B-Instruct",
35
  "tiiuae/falcon-180B",
36
  "tiiuae/falcon-180B-Chat",
37
- "google/gemma-7b",
38
- "google/gemma-2b",
39
- "abacusai/TheProfessor-155b",
40
  ]
41
  st.title("Lambda LLM VRAM Calculator")
42
 
@@ -124,7 +121,7 @@ with col.expander("Detailed Information", expanded=False):
124
  st.markdown("""- For LoRa Fine-tuning, This assumes a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
125
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
126
 
127
- access_token = st.sidebar.text_input("Access token")
128
 
129
  # if access_token:
130
  # login(token=access_token)
 
34
  "tiiuae/falcon-40B-Instruct",
35
  "tiiuae/falcon-180B",
36
  "tiiuae/falcon-180B-Chat",
 
 
 
37
  ]
38
  st.title("Lambda LLM VRAM Calculator")
39
 
 
121
  st.markdown("""- For LoRa Fine-tuning, This assumes a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
122
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
123
 
124
+ # access_token = st.sidebar.text_input("Access token")
125
 
126
  # if access_token:
127
  # login(token=access_token)