Spaces:
Build error
Build error
Commit
·
0e0ce94
1
Parent(s):
ed7f9aa
Update tapas_utils.py
Browse files- tapas_utils.py +59 -2
tapas_utils.py
CHANGED
@@ -7,8 +7,65 @@ def initialize_tapas():
|
|
7 |
model = AutoModelForTableQuestionAnswering.from_pretrained("google/tapas-large-finetuned-wtq")
|
8 |
return tokenizer, model
|
9 |
|
10 |
-
|
11 |
# ... [same as in your code]
|
12 |
|
13 |
-
|
14 |
# ... [same as in your code]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
model = AutoModelForTableQuestionAnswering.from_pretrained("google/tapas-large-finetuned-wtq")
|
8 |
return tokenizer, model
|
9 |
|
10 |
+
|
11 |
# ... [same as in your code]
|
12 |
|
13 |
+
|
14 |
# ... [same as in your code]
|
15 |
+
def ask_llm_chunk(tokenizer, model, chunk, questions):
|
16 |
+
chunk = chunk.astype(str)
|
17 |
+
try:
|
18 |
+
inputs = tokenizer(table=chunk, queries=questions, padding="max_length", truncation=True, return_tensors="pt")
|
19 |
+
except Exception as e:
|
20 |
+
log_debug_info(f"Tokenization error: {e}")
|
21 |
+
st.write(f"An error occurred: {e}")
|
22 |
+
return ["Error occurred while tokenizing"] * len(questions)
|
23 |
+
|
24 |
+
if inputs["input_ids"].shape[1] > 512:
|
25 |
+
log_debug_info("Token limit exceeded for chunk")
|
26 |
+
st.warning("Token limit exceeded for chunk")
|
27 |
+
return ["Token limit exceeded for chunk"] * len(questions)
|
28 |
+
|
29 |
+
outputs = model(**inputs)
|
30 |
+
predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions(
|
31 |
+
inputs,
|
32 |
+
outputs.logits.detach(),
|
33 |
+
outputs.logits_aggregation.detach()
|
34 |
+
)
|
35 |
+
|
36 |
+
answers = []
|
37 |
+
for coordinates in predicted_answer_coordinates:
|
38 |
+
if len(coordinates) == 1:
|
39 |
+
row, col = coordinates[0]
|
40 |
+
try:
|
41 |
+
value = chunk.iloc[row, col]
|
42 |
+
log_debug_info(f"Accessed value for row {row}, col {col}: {value}")
|
43 |
+
answers.append(value)
|
44 |
+
except Exception as e:
|
45 |
+
log_debug_info(f"Error accessing value for row {row}, col {col}: {e}")
|
46 |
+
st.write(f"An error occurred: {e}")
|
47 |
+
else:
|
48 |
+
cell_values = []
|
49 |
+
for coordinate in coordinates:
|
50 |
+
row, col = coordinate
|
51 |
+
try:
|
52 |
+
value = chunk.iloc[row, col]
|
53 |
+
cell_values.append(value)
|
54 |
+
except Exception as e:
|
55 |
+
log_debug_info(f"Error accessing value for row {row}, col {col}: {e}")
|
56 |
+
st.write(f"An error occurred: {e}")
|
57 |
+
answers.append(", ".join(map(str, cell_values)))
|
58 |
+
|
59 |
+
return answers
|
60 |
+
|
61 |
+
MAX_ROWS_PER_CHUNK = 200
|
62 |
+
|
63 |
+
def summarize_map_reduce(tokenizer, model, data, questions):
|
64 |
+
dataframe = pd.read_csv(StringIO(data))
|
65 |
+
num_chunks = len(dataframe) // MAX_ROWS_PER_CHUNK + 1
|
66 |
+
dataframe_chunks = [deepcopy(chunk) for chunk in np.array_split(dataframe, num_chunks)]
|
67 |
+
all_answers = []
|
68 |
+
for chunk in dataframe_chunks:
|
69 |
+
chunk_answers = ask_llm_chunk(chunk, questions)
|
70 |
+
all_answers.extend(chunk_answers)
|
71 |
+
return all_answers
|