update app.py
Browse files- src/app.py +2 -2
src/app.py
CHANGED
@@ -7,7 +7,7 @@ import numpy as np
|
|
7 |
import gc
|
8 |
from huggingface_hub import login
|
9 |
|
10 |
-
st.set_page_config(page_title='
|
11 |
|
12 |
model_list = [
|
13 |
"mistralai/Mistral-7B-v0.1",
|
@@ -114,7 +114,7 @@ with col.expander("Information", expanded=True):
|
|
114 |
where is estimated as """)
|
115 |
|
116 |
st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
|
117 |
-
st.markdown("""- For LoRa Fine-tuning,
|
118 |
st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
|
119 |
|
120 |
access_token = st.sidebar.text_input("Access token")
|
|
|
7 |
import gc
|
8 |
from huggingface_hub import login
|
9 |
|
10 |
+
st.set_page_config(page_title='Lambda LLM Calculator', layout="wide", initial_sidebar_state="expanded")
|
11 |
|
12 |
model_list = [
|
13 |
"mistralai/Mistral-7B-v0.1",
|
|
|
114 |
where is estimated as """)
|
115 |
|
116 |
st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
|
117 |
+
st.markdown("""- For LoRa Fine-tuning, This assumes a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
|
118 |
st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
|
119 |
|
120 |
access_token = st.sidebar.text_input("Access token")
|