Bsbell21 commited on
Commit
3705913
Β·
verified Β·
1 Parent(s): c7a748f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -3,12 +3,10 @@ from peft import PeftModel, PeftConfig
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
  peft_model_id = f"Bsbell21/llm_instruction_generator"
6
- config = PeftConfig.from_pretrained(peft_model_id)
7
- model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto')
8
- tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
9
-
10
  # Load the Lora model
11
- model = PeftModel.from_pretrained(model, peft_model_id)
12
 
13
  def input_from_text(text):
14
  return "<s>[INST]Use the provided input to create an instruction that could have been used to generate the response with an LLM.\n" + text + "[/INST]"
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
  peft_model_id = f"Bsbell21/llm_instruction_generator"
6
+ model = AutoModelForCausalLM.from_pretrained(peft_model_id, return_dict=True, load_in_8bit=True, device_map='auto')
7
+ tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
 
 
8
  # Load the Lora model
9
+ # model = PeftModel.from_pretrained(model, peft_model_id)
10
 
11
  def input_from_text(text):
12
  return "<s>[INST]Use the provided input to create an instruction that could have been used to generate the response with an LLM.\n" + text + "[/INST]"