NaimaAqeel commited on
Commit
8889a56
·
verified ·
1 Parent(s): 965462a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -52,13 +52,13 @@ def chunk_text(text, chunk_size=500, overlap=100):
52
  chunks.append(current_chunk.strip())
53
  return chunks
54
 
55
- def generate_with_gpt(prompt, max_length=150):
56
  """Generate text with GPT model"""
57
- inputs = gpt_tokenizer(prompt, return_tensors="pt")
58
  with torch.no_grad():
59
  outputs = gpt_model.generate(
60
  inputs.input_ids,
61
- max_length=max_length,
62
  num_return_sequences=1,
63
  no_repeat_ngram_size=2,
64
  do_sample=True,
@@ -76,7 +76,7 @@ def refine_answer_with_gpt(context, question, answer):
76
  f"Answer: {answer}\n\n"
77
  f"Please provide a clearer and more complete answer in simple language."
78
  )
79
- return generate_with_gpt(prompt, max_length=120)
80
 
81
  def extract_direct_definition(text, term):
82
  """Find a direct definition of a term in the text"""
@@ -169,4 +169,4 @@ with gr.Blocks() as demo:
169
  [question, chatbot]
170
  )
171
 
172
- demo.launch()
 
52
  chunks.append(current_chunk.strip())
53
  return chunks
54
 
55
+ def generate_with_gpt(prompt, max_new_tokens=100):
56
  """Generate text with GPT model"""
57
+ inputs = gpt_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
58
  with torch.no_grad():
59
  outputs = gpt_model.generate(
60
  inputs.input_ids,
61
+ max_new_tokens=max_new_tokens, # FIXED
62
  num_return_sequences=1,
63
  no_repeat_ngram_size=2,
64
  do_sample=True,
 
76
  f"Answer: {answer}\n\n"
77
  f"Please provide a clearer and more complete answer in simple language."
78
  )
79
+ return generate_with_gpt(prompt, max_new_tokens=120)
80
 
81
  def extract_direct_definition(text, term):
82
  """Find a direct definition of a term in the text"""
 
169
  [question, chatbot]
170
  )
171
 
172
+ demo.launch()