import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Load the tokenizer and model model_name = "mohamedemam/QA_GeneraTor" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) # Recommended words for users to choose from recommended_words = [ "which", "how", "when", "where", "who", "whom", "whose", "why", "which", "who", "whom", "whose", "whereas", "can", "could", "may", "might", "will", "would", "shall", "should", "do", "does", "did", "is", "are", "am", "was", "were", "be", "being", "been", "have", "has", "had", "if", "is", "are", "am", "was", "were", "do", "does", "did", "can", "could", "will", "would", "shall", "should", "might", "may", "must", "may", "might", "must", ] # Function to generate questions and answers with configurable parameters def generate_qa(context, temperature, top_p,numofseq): s=context.lower() input_text = f"{s}" input_ids = tokenizer(input_text, max_length=400, truncation=True, padding="max_length", return_tensors='pt') # Generate with configurable parameters output = model.generate( **input_ids, temperature=temperature, top_p=top_p,num_return_sequences=numofseq ) generated_text = tokenizer.batch_decode(output, skip_special_tokens=True) return r"\n".join(generated_text) # Create the Gradio interface with sliders for temperature and top-p iface = gr.Interface( fn=generate_qa, inputs=[ "text", gr.inputs.Slider(minimum=0, maximum=4, default=1, step=0.05, label="Temperature"), gr.inputs.Slider(minimum=0.1, maximum=1, default=0.2, step=0.05, label="Top-p"), gr.inputs.Slider(minimum=1, maximum=50, default=1, step=1, label="num_return_sequences") ], outputs="text", title="Question Generation and Answering", description="Enter a context, choose a recommended word, and adjust temperature and top-p. The model will generate a question and answer.", ) # Launch the interface iface.launch()