Bsbell21 commited on
Commit
5e52278
·
verified ·
1 Parent(s): cb73784

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -14,7 +14,7 @@ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
14
 
15
  # Load the Lora model
16
  model = PeftModel.from_pretrained(model, peft_model_id)
17
-
18
  def make_inference(product, description):
19
  batch = tokenizer(f"### INSTRUCTION\nBelow is a product and description, please write a marketing email for this product.\n\n### Product:\n{product}\n### Description:\n{description}\n\n### Marketing Email:\n", return_tensors='pt')
20
 
@@ -23,19 +23,16 @@ def make_inference(product, description):
23
  return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
24
  '''
25
  def make_inference(product_name, product_description):
26
- batch = tokenizer(
27
- f"### Product and Description:\n{product_name}: {product_description}\n\n### Ad:",
28
- return_tensors="pt",
29
- )
30
-
31
  batch = {key: value.to('cuda:0') for key, value in batch.items()}
32
 
33
  with torch.cuda.amp.autocast():
34
- output_tokens = model.generate(**batch, max_new_tokens=50)
35
 
36
  return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
37
 
38
- '''
39
 
40
  # Load the Lora model
41
  model = PeftModel.from_pretrained(model, peft_model_id)
 
14
 
15
  # Load the Lora model
16
  model = PeftModel.from_pretrained(model, peft_model_id)
17
+ '''
18
  def make_inference(product, description):
19
  batch = tokenizer(f"### INSTRUCTION\nBelow is a product and description, please write a marketing email for this product.\n\n### Product:\n{product}\n### Description:\n{description}\n\n### Marketing Email:\n", return_tensors='pt')
20
 
 
23
  return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
24
  '''
25
  def make_inference(product_name, product_description):
26
+ batch = tokenizer(f"### INSTRUCTION\nBelow is a product and description, please write a marketing email for this product.\n\n### Product:\n{product}\n### Description:\n{description}\n\n### Marketing Email:\n", return_tensors='pt')
27
+
 
 
 
28
  batch = {key: value.to('cuda:0') for key, value in batch.items()}
29
 
30
  with torch.cuda.amp.autocast():
31
+ output_tokens = model.generate(**batch, max_new_tokens=200)
32
 
33
  return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
34
 
35
+
36
 
37
  # Load the Lora model
38
  model = PeftModel.from_pretrained(model, peft_model_id)