giseldo commited on
Commit
e630882
Β·
1 Parent(s): 2b827cf

inclusao do hf_token

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -1,12 +1,14 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
 
4
 
5
  # Carrega o modelo e o tokenizer localmente
6
  model_name = "google/gemma-3-1b-it" # Substitua pelo caminho local se jΓ‘ baixou
 
7
 
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
10
 
11
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.7)
12
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
+ import os
5
 
6
  # Carrega o modelo e o tokenizer localmente
7
  model_name = "google/gemma-3-1b-it" # Substitua pelo caminho local se jΓ‘ baixou
8
+ hf_token = os.getenv("HF_TOKEN")
9
 
10
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
11
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", use_auth_token=hf_token)
12
 
13
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.7)
14