lambdabrendan commited on
Commit
9ad9810
·
verified ·
1 Parent(s): 3685967

Update src/app.py

Browse files

Adding more models

Files changed (1) hide show
  1. src/app.py +6 -3
src/app.py CHANGED
@@ -34,6 +34,9 @@ model_list = [
34
  "tiiuae/falcon-40B-Instruct",
35
  "tiiuae/falcon-180B",
36
  "tiiuae/falcon-180B-Chat",
 
 
 
37
  ]
38
  st.title("Lambda LLM VRAM Calculator")
39
 
@@ -121,10 +124,10 @@ with col.expander("Detailed Information", expanded=False):
121
  st.markdown("""- For LoRa Fine-tuning, This assumes a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
122
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
123
 
124
- #access_token = st.sidebar.text_input("Access token")
125
 
126
- #if access_token:
127
- # login(token=access_token)
128
 
129
  #model_name = st.sidebar.text_input("Model name", value="mistralai/Mistral-7B-v0.1")
130
  with st.sidebar.container():
 
34
  "tiiuae/falcon-40B-Instruct",
35
  "tiiuae/falcon-180B",
36
  "tiiuae/falcon-180B-Chat",
37
+ "google/gemma-7b",
38
+ "google/gemma-2b",
39
+ "abacusai/TheProfessor-155b",
40
  ]
41
  st.title("Lambda LLM VRAM Calculator")
42
 
 
124
  st.markdown("""- For LoRa Fine-tuning, This assumes a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
125
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
126
 
127
+ access_token = st.sidebar.text_input("Access token")
128
 
129
+ if access_token:
130
+ login(token=access_token)
131
 
132
  #model_name = st.sidebar.text_input("Model name", value="mistralai/Mistral-7B-v0.1")
133
  with st.sidebar.container():