File size: 2,175 Bytes
5e20e58
 
 
 
 
 
 
 
35e7167
 
 
 
 
 
 
 
 
 
 
5e20e58
a643b2a
7948429
35e7167
5e20e58
 
 
af3b18b
5e20e58
a643b2a
5e20e58
 
212a5f9
5e20e58
 
 
 
 
35e7167
 
a643b2a
 
 
 
 
35e7167
5e20e58
 
35e7167
5e20e58
 
a643b2a
5e20e58
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Load the tokenizer and model
model_name = "mohamedemam/QA_GeneraTor"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Recommended words for users to choose from
recommended_words = [
    "which", "how", "when", "where", "who", "whom", "whose", "why",
    "which", "who", "whom", "whose", "whereas",
    "can", "could", "may", "might", "will", "would", "shall", "should",
    "do", "does", "did", "is", "are", "am", "was", "were", "be", "being", "been",
    "have", "has", "had", "if", "is", "are", "am", "was", "were", "do", "does", "did", "can", "could",
    "will", "would", "shall", "should", "might", "may", "must",
    "may", "might", "must",
]

# Function to generate questions and answers with configurable parameters
def generate_qa(context, recommended_word, temperature, top_p,numofseq):
    input_text = f"{recommended_word}: {context.lower()}"
    input_ids = tokenizer(input_text, max_length=400, truncation=True, padding="max_length", return_tensors='pt')
    
    # Generate with configurable parameters
    output = model.generate(
        **input_ids,
        temperature=temperature,
        top_p=top_p,num_return_sequences=numofseq
    )
    
    generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
    return generated_text

# Create the Gradio interface with sliders for temperature and top-p
iface = gr.Interface(
    fn=generate_qa,
    inputs=[
        "text",
        gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
        gr.inputs.Slider(minimum=0, maximum=4, default=1, step=0.05, label="Temperature"),
        gr.inputs.Slider(minimum=0.1, maximum=1, default=0.2, step=0.05, label="Top-p"),
        gr.inputs.Slider(minimum=1, maximum=50, default=1, step=1, label="num_return_sequences")

    ],
    outputs="text",
    title="Question Generation and Answering",
    description="Enter a context, choose a recommended word, and adjust temperature and top-p. The model will generate a question and answer.",
)


# Launch the interface
iface.launch()