AlexWortega commited on
Commit
be72e85
·
verified ·
1 Parent(s): 8ce1638

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -6
README.md CHANGED
@@ -52,18 +52,14 @@ def generate(model, tokenizer, prompt, generation_config):
52
  output = tokenizer.decode(output_ids, skip_special_tokens=True)
53
  return output.strip()
54
 
55
- config = PeftConfig.from_pretrained(MODEL_NAME)
56
  model = AutoModelForCausalLM.from_pretrained(
57
  config.base_model_name_or_path,
58
  load_in_8bit=True,
59
  torch_dtype=torch.float16,
60
  device_map="auto"
61
  )
62
- model = PeftModel.from_pretrained(
63
- model,
64
- MODEL_NAME,
65
- torch_dtype=torch.float16
66
- )
67
  model.eval()
68
 
69
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
 
52
  output = tokenizer.decode(output_ids, skip_special_tokens=True)
53
  return output.strip()
54
 
55
+ #config = PeftConfig.from_pretrained(MODEL_NAME)
56
  model = AutoModelForCausalLM.from_pretrained(
57
  config.base_model_name_or_path,
58
  load_in_8bit=True,
59
  torch_dtype=torch.float16,
60
  device_map="auto"
61
  )
62
+ #model = PeftModel.from_pretrained( model, MODEL_NAME, torch_dtype=torch.float16)
 
 
 
 
63
  model.eval()
64
 
65
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)