TAPAS_WTQ_Chunking / tapas_utils.py
jskinner215's picture
Create tapas_utils.py
53d88c3
raw
history blame
537 Bytes
from transformers import AutoTokenizer, AutoModelForTableQuestionAnswering
import pandas as pd
from io import StringIO
def initialize_tapas():
tokenizer = AutoTokenizer.from_pretrained("google/tapas-large-finetuned-wtq")
model = AutoModelForTableQuestionAnswering.from_pretrained("google/tapas-large-finetuned-wtq")
return tokenizer, model
def ask_llm_chunk(tokenizer, model, chunk, questions):
# ... [same as in your code]
def summarize_map_reduce(tokenizer, model, data, questions):
# ... [same as in your code]