|
from transformers import T5ForConditionalGeneration, T5Tokenizer |
|
import torch |
|
|
|
|
|
t5ag_model = T5ForConditionalGeneration.from_pretrained("miiiciiii/I-Comprehend_ag") |
|
t5ag_tokenizer = T5Tokenizer.from_pretrained("miiiciiii/I-Comprehend_ag") |
|
|
|
def answer_question(question, context): |
|
"""Generate an answer for a given question and context.""" |
|
input_text = f"question: {question} context: {context}" |
|
input_ids = t5ag_tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True) |
|
|
|
with torch.no_grad(): |
|
output = t5ag_model.generate(input_ids, max_length=512, num_return_sequences=1, max_new_tokens=200) |
|
|
|
return t5ag_tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
question = "What is the location of the Eiffel Tower?" |
|
context = "The Eiffel Tower is located in Paris and is one of the most famous landmarks in the world." |
|
answer = answer_question(question, context) |
|
print("Generated Answer:", answer) |