loubnabnl HF staff commited on
Commit
6e3059f
·
1 Parent(s): 439be7a
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -16,10 +16,10 @@ example = [
16
  [EXAMPLE_3, 11, 0.2, 42],
17
  ]
18
 
19
- # change model to the finetuned one
20
  tokenizer = AutoTokenizer.from_pretrained("loubnabnl/santacoder-code-to-text")
21
- model = AutoModelForCausalLM.from_pretrained("loubnabnl/santacoder-code-to-text", trust_remote_code=True)
22
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
23
 
24
  def make_doctring(gen_prompt):
25
  return gen_prompt + f"\n\n\"\"\"\nExplanation:"
 
16
  [EXAMPLE_3, 11, 0.2, 42],
17
  ]
18
 
19
+ device="cuda:0"
20
  tokenizer = AutoTokenizer.from_pretrained("loubnabnl/santacoder-code-to-text")
21
+ model = AutoModelForCausalLM.from_pretrained("loubnabnl/santacoder-code-to-text", trust_remote_code=True).to(device)
22
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
23
 
24
  def make_doctring(gen_prompt):
25
  return gen_prompt + f"\n\n\"\"\"\nExplanation:"