File size: 2,601 Bytes
5e20e58
 
a553d7d
ca6c9ce
e8a3ed8
ca6c9ce
 
 
 
 
 
 
 
 
 
 
3292ea1
 
 
 
 
 
 
 
 
 
 
5e20e58
b480894
ca6c9ce
b480894
 
5e20e58
 
 
af3b18b
5e20e58
ca6c9ce
b480894
3292ea1
 
 
b480894
3292ea1
5e20e58
4b1c971
212a5f9
e8a3ed8
 
 
2717f0e
5e20e58
 
 
ca6c9ce
3292ea1
ca6c9ce
e8a3ed8
b480894
 
 
35e7167
e8a3ed8
5e20e58
3292ea1
cdeab6e
a643b2a
5e20e58
b480894
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import re
# Load the tokenizer and model
model_name = "mohamedemam/QA_GeneraToR"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Recommended words for users to choose from
recommended_words = [
    "which", "how", "when", "where", "who", "whom", "whose", "why",
    "can", "could", "may", "might", "will", "would", "shall", "should",
    "do", "does", "did", "is", "are", "am", "was", "were", "be", "being", "been",
    "have", "has", "had", "if", "must",
]

# Example contexts
example_contexts = [
    "when: Lionel Andrés Messi...",
    "where: Lionel Andrés Messi...",
    "how: Lionel Andrés Messi...",
    "what: Lionel Andrés Messi...",
    "where: Egypt...",
    "where: There is evidence..."
    # Add more examples here
]

# Function to generate questions and answers with configurable parameters
def generate_qa(context, recommended_word, temperature, top_p,num_seq, num_samples=3):
    input_text = f"{recommended_word}: {context}"
    input_text=re.sub(f'\n'," ",input_text).lower()
    input_ids = tokenizer(input_text, return_tensors='pt')
    
    # Generate with configurable parameters
    output = model.generate(
        **input_ids,
        temperature=temperature,
        top_p=top_p,
        num_return_sequences=num_seq,
        do_sample=True,
        max_length=100,
        num_beams=6,
        length_penalty=1.4,
        top_k=0
    )
    #
    generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)

    formatted_output = "\n\n".join([f"Original Context: {context}", "Generated Sentences:"] + generated_text)
    return formatted_output

# Create the Gradio interface with sliders for temperature and top-p
iface = gr.Interface(
    fn=generate_qa,
    inputs=[
        gr.inputs.Dropdown(example_contexts, label="Choose an Example"),
        gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
        gr.inputs.Slider(minimum=0.0, maximum=2, default=2.1, step=0.01, label="Temperature"),
        gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),     
        gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance")

    ],
    outputs=gr.outputs.Textbox(label="Generated Output"),
    title="Question Generation and Answering",
    description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
)

# Launch the interface
iface.launch()