File size: 1,004 Bytes
afa22c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
from transformers import T5ForConditionalGeneration, T5Tokenizer
import torch
# Load the model and tokenizer
t5ag_model = T5ForConditionalGeneration.from_pretrained("miiiciiii/I-Comprehend_ag")
t5ag_tokenizer = T5Tokenizer.from_pretrained("miiiciiii/I-Comprehend_ag")
def answer_question(question, context):
"""Generate an answer for a given question and context."""
input_text = f"question: {question} context: {context}"
input_ids = t5ag_tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
with torch.no_grad():
output = t5ag_model.generate(input_ids, max_length=512, num_return_sequences=1, max_new_tokens=200)
return t5ag_tokenizer.decode(output[0], skip_special_tokens=True)
# Example usage
question = "What is the location of the Eiffel Tower?"
context = "The Eiffel Tower is located in Paris and is one of the most famous landmarks in the world."
answer = answer_question(question, context)
print("Generated Answer:", answer) |