schlenker commited on
Commit
d38687b
·
1 Parent(s): e75a56a

model bug fix

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -28,10 +28,11 @@ def get_models(llama=False):
28
  access_token = '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'
29
  llama_tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token, use_fast=True)#, use_fast=True)
30
  llama_model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=access_token, device_map={'':0})#, load_in_4bit=True)
31
-
32
- st.write("The assistant is loaded and ready to use!")
33
-
34
- return model, tokenizer, llama_model, llama_tokenizer if llama else model, tokenizer
 
35
 
36
  model, tokenizer = get_models()
37
 
 
28
  access_token = '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'
29
  llama_tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token, use_fast=True)#, use_fast=True)
30
  llama_model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=access_token, device_map={'':0})#, load_in_4bit=True)
31
+ st.write("The assistant is loaded and ready to use!")
32
+ return model, tokenizer, llama_model, llama_tokenizer
33
+
34
+ else:
35
+ return model, tokenizer
36
 
37
  model, tokenizer = get_models()
38