tomaszki commited on
Commit
39de0ac
·
verified ·
1 Parent(s): abdf1f2

Switch model from llama to a small 1.5B Qwen

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -4,7 +4,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import plotly.express as px
5
 
6
 
7
- model_name = 'meta-llama/Llama-2-7b-hf'
8
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
 
10
  @st.cache_resource
 
4
  import plotly.express as px
5
 
6
 
7
+ model_name = 'Qwen/Qwen2-1.5B'
8
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
 
10
  @st.cache_resource