Warlord-K commited on
Commit
19b4eba
·
1 Parent(s): 01df155

Change LLaMA2-70B to Sheared 1.3B

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -34,7 +34,7 @@ torch_device = "cuda" if torch.cuda.is_available() else "cpu"
34
  print("Running on device:", torch_device)
35
  print("CPU threads:", torch.get_num_threads())
36
 
37
- model_id = "meta-llama/Llama-2-70b-chat-hf"
38
  biencoder = SentenceTransformer("intfloat/e5-large-v2", device=torch_device)
39
  cross_encoder = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-12-v2", max_length=512, device=torch_device)
40
 
 
34
  print("Running on device:", torch_device)
35
  print("CPU threads:", torch.get_num_threads())
36
 
37
+ model_id = "princeton-nlp/Sheared-LLaMA-1.3B"
38
  biencoder = SentenceTransformer("intfloat/e5-large-v2", device=torch_device)
39
  cross_encoder = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-12-v2", max_length=512, device=torch_device)
40