Vivien Chappelier commited on
Commit
ad681ae
·
1 Parent(s): 1dc9286
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -8,7 +8,7 @@ import time
8
  import gradio as gr
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
 
11
- hf_token = os.getenv('HF_TOKEN')
12
 
13
  device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
14
 
@@ -26,9 +26,9 @@ PAYLOAD_BITS = 2
26
  device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
27
 
28
 
29
- model = AutoModelForCausalLM.from_pretrained(args.model, use_auth_token=hf_token, torch_dtype=torch.float16,
30
  device_map='auto').to(device)
31
- tokenizer = AutoTokenizer.from_pretrained(args.model, use_auth_token=hf_token)
32
  tokenizer.pad_token = tokenizer.eos_token
33
 
34
  DEFAULT_SYSTEM_PROMPT = """\
 
8
  import gradio as gr
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
 
11
+ #hf_token = os.getenv('HF_TOKEN')
12
 
13
  device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
14
 
 
26
  device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
27
 
28
 
29
+ model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype=torch.float16,
30
  device_map='auto').to(device)
31
+ tokenizer = AutoTokenizer.from_pretrained(args.model)
32
  tokenizer.pad_token = tokenizer.eos_token
33
 
34
  DEFAULT_SYSTEM_PROMPT = """\