lambdabrendan commited on
Commit
bceca00
·
verified ·
1 Parent(s): 6826b0f

update app.py

Browse files
Files changed (1) hide show
  1. src/app.py +2 -2
src/app.py CHANGED
@@ -7,7 +7,7 @@ import numpy as np
7
  import gc
8
  from huggingface_hub import login
9
 
10
- st.set_page_config(page_title='Can you run it? LLM version', layout="wide", initial_sidebar_state="expanded")
11
 
12
  model_list = [
13
  "mistralai/Mistral-7B-v0.1",
@@ -114,7 +114,7 @@ with col.expander("Information", expanded=True):
114
  where is estimated as """)
115
 
116
  st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
117
- st.markdown("""- For LoRa Fine-tuning, I'm asuming a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
118
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
119
 
120
  access_token = st.sidebar.text_input("Access token")
 
7
  import gc
8
  from huggingface_hub import login
9
 
10
+ st.set_page_config(page_title='Lambda LLM Calculator', layout="wide", initial_sidebar_state="expanded")
11
 
12
  model_list = [
13
  "mistralai/Mistral-7B-v0.1",
 
114
  where is estimated as """)
115
 
116
  st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
117
+ st.markdown("""- For LoRa Fine-tuning, This assumes a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
118
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
119
 
120
  access_token = st.sidebar.text_input("Access token")