prabinpanta0 commited on
Commit
dc068d2
·
verified ·
1 Parent(s): 12add7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
- tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-medium")
7
  model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium")
8
 
9
  system_prompt = """
@@ -23,8 +23,10 @@ output: Potential widening of the achievement gap if data is not used equitably.
23
  def generate(text):
24
  try:
25
  prompt = system_prompt + f"\ninput: {text}\noutput:"
26
- inputs = tokenizer.encode(prompt, return_tensors="pt", return_attention_mask=True)
27
- outputs = model.generate(inputs["input_ids"], attention_mask=inputs["attention_mask"], max_length=256)
 
 
28
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True).split("output:")[-1].strip()
29
  return response_text if response_text else "No valid response generated."
30
 
 
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
+ tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-medium", clean_up_tokenization_spaces=True)
7
  model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium")
8
 
9
  system_prompt = """
 
23
  def generate(text):
24
  try:
25
  prompt = system_prompt + f"\ninput: {text}\noutput:"
26
+ inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=True)
27
+ input_ids = inputs["input_ids"].unsqueeze(0)
28
+ attention_mask = inputs["attention_mask"].unsqueeze(0)
29
+ outputs = model.generate(input_ids, attention_mask=attention_mask, max_length=256)
30
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True).split("output:")[-1].strip()
31
  return response_text if response_text else "No valid response generated."
32