legacy107's picture
Update app.py
e861ca3
raw
history blame
1.41 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Load your fine-tuned model and tokenizer
model_name = "legacy107/flan-t5-large-bottleneck-adapter-cpgQA-unique"
tokenizer = AutoTokenizer.from_pretrained(model_name, device_map="auto")
model = AutoModelForSeq2SeqLM.from_pretrained(
model_checkpoint, device_map="auto"
)
model.set_active_adapters("question_answering")
max_length = 512
max_target_length = 128
# Define your function to generate answers
def generate_answer(question, context):
# Combine question and context
input_text = f"question: {question} context: {context}"
# Tokenize the input text
input_ids = tokenizer(
input_text,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=512,
).input_ids
# Generate the answer
with torch.no_grad():
generated_ids = model.generate(input_ids, max_new_tokens=max_target_length)
# Decode and return the generated answer
generated_answer = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
return generated_answer
# Create a Gradio interface
iface = gr.Interface(
fn=generate_answer,
inputs=[
gr.inputs.Textbox(label="Question"),
gr.inputs.Textbox(label="Context")
],
outputs=gr.outputs.Textbox(label="Generated Answer")
)
# Launch the Gradio interface
iface.launch()