mohamedemam commited on
Commit
65cae45
·
1 Parent(s): 86f4489

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -20,11 +20,11 @@ model.eval()
20
 
21
  # Function to generate questions and answers with configurable parameters
22
  def generate_qa(context, temperature, top_p,num_seq,l_p, num_b):
23
- input_text = context
24
  input_ids = tokenizer(text=input_text, return_tensors='pt')
25
 
26
  # Generate with configurable parameters
27
- output = model.generate(**input_ids,
28
  temperature=temperature,
29
  top_p=top_p,
30
  num_return_sequences=num_seq,
@@ -42,7 +42,7 @@ def generate_qa(context, temperature, top_p,num_seq,l_p, num_b):
42
  return formatted_output
43
  iface = gr.Interface(
44
  fn=generate_qa,
45
- inputs=[
46
  gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
47
  gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),
48
  gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"),
 
20
 
21
  # Function to generate questions and answers with configurable parameters
22
  def generate_qa(context, temperature, top_p,num_seq,l_p, num_b):
23
+ input_text = context+f"\n\nSummarize the previous text in three sentences in arabic:\n\n"
24
  input_ids = tokenizer(text=input_text, return_tensors='pt')
25
 
26
  # Generate with configurable parameters
27
+ output = model.generate(input_ids['input_ids'],
28
  temperature=temperature,
29
  top_p=top_p,
30
  num_return_sequences=num_seq,
 
42
  return formatted_output
43
  iface = gr.Interface(
44
  fn=generate_qa,
45
+ inputs=["text",
46
  gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
47
  gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),
48
  gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"),