Tonic commited on
Commit
b70cd82
·
verified ·
1 Parent(s): f792496

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -19,17 +19,18 @@ model = AutoModelForCausalLM.from_pretrained(model_path, device_map='cuda', quan
19
 
20
  @spaces.GPU
21
  def generate_text(usertitle, content, max_new_tokens=512,model=model, tokenizer=tokenizer, temperature=0.7):
22
- msg = [{
23
- 'title': title,
24
- 'content': content
25
- }]
26
- inputs = tokenizer.apply_chat_template(msg, return_tensors='pt').cuda()
27
- generated_ids = model.generate(inputs['input_ids'], max_new_tokens=max_new_tokens, temperature=temperature, pad_token_id=tokenizer.eos_token_id)
28
- generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
 
 
29
 
30
  return generated_text
31
 
32
-
33
  def gradio_app():
34
  with gr.Blocks() as demo:
35
  gr.Markdown(title)
 
19
 
20
  @spaces.GPU
21
  def generate_text(usertitle, content, max_new_tokens=512,model=model, tokenizer=tokenizer, temperature=0.7):
22
+ # msg = [{
23
+ # 'title': title,
24
+ # 'content': content
25
+ # }]
26
+ input_text = f"[[[Title:]]]{usertitle}\n[[[Content:]]]{content}\n"
27
+ inputs = tokenizer(input_text, return_tensors='pt').to('cuda')
28
+
29
+ generated_ids = model.generate(inputs['input_ids'], max_new_tokens=max_length, temperature=temperature, pad_token_id=tokenizer.eos_token_id)
30
+ generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=False)
31
 
32
  return generated_text
33
 
 
34
  def gradio_app():
35
  with gr.Blocks() as demo:
36
  gr.Markdown(title)