from transformers import MBartForConditionalGeneration, MBart50Tokenizer import dat import gradio as gr # Load the model and tokenizer model_name = "LocalDoc/mbart_large_qa_azerbaijan" tokenizer = MBart50Tokenizer.from_pretrained(model_name, src_lang="en_XX", tgt_lang="az_AZ") model = MBartForConditionalGeneration.from_pretrained(model_name) def answer_question(context, question): # Prepare input text input_text = f"context: {context} question: {question}" inputs = tokenizer(input_text, return_tensors="pt", max_length=1280000, truncation=False, padding="max_length") # Generate answer outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_length=1280000, num_beams=5, early_stopping=True ) # Decode the answer answer = tokenizer.decode(outputs[0], skip_special_tokens=True) return answer demo = gr.Interface( fn = answer_question, inputs = ['context', 'question'], outputs = ['text'] ) demo.launch()