Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,96 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
2 |
license: mit
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language: en
|
3 |
+
tags:
|
4 |
+
- multitabqa
|
5 |
+
- multi-table-question-answering
|
6 |
license: mit
|
7 |
+
pipeline_tag: table-question-answering
|
8 |
---
|
9 |
+
|
10 |
+
# MultiTabQA (base-sized model)
|
11 |
+
|
12 |
+
MultiTabQA was proposed in [MultiTabQA: Generating Tabular Answers for Multi-Table Question Answering](https://arxiv.org/abs/2305.12820) by Vaishali Pal, Andrew Yates, Evangelos Kanoulas, Maarten de Rijke. The original repo can be found [here](https://github.com/kolk/MultiTabQA).
|
13 |
+
|
14 |
+
## Model description
|
15 |
+
|
16 |
+
MultiTabQA is a tableQA model which generates the answer table from multiple-input tables. It can handle multi-table operators such as UNION, INTERSECT, EXCEPT, JOINS, etc.
|
17 |
+
|
18 |
+
MultiTabQA is based on the TAPEX(BART) architecture, which is a bidirectional (BERT-like) encoder and an autoregressive (GPT-like) decoder.
|
19 |
+
|
20 |
+
## Intended Uses
|
21 |
+
|
22 |
+
This pre-trained model can be used on SQL queries over multiple input tables.
|
23 |
+
|
24 |
+
### How to Use
|
25 |
+
|
26 |
+
Here is how to use this model in transformers:
|
27 |
+
|
28 |
+
```python
|
29 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
30 |
+
import pandas as pd
|
31 |
+
|
32 |
+
tokenizer = AutoTokenizer.from_pretrained("vaishali/multitabqa-base-sql")
|
33 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("vaishali/multitabqa-base-sql")
|
34 |
+
|
35 |
+
query = "select count(*) from department where department_id not in (select department_id from management)"
|
36 |
+
table_names = ['department', 'management']
|
37 |
+
tables=[{"columns":["Department_ID","Name","Creation","Ranking","Budget_in_Billions","Num_Employees"],
|
38 |
+
"index":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],
|
39 |
+
"data":[
|
40 |
+
[1,"State","1789",1,9.96,30266.0],
|
41 |
+
[2,"Treasury","1789",2,11.1,115897.0],
|
42 |
+
[3,"Defense","1947",3,439.3,3000000.0],
|
43 |
+
[4,"Justice","1870",4,23.4,112557.0],
|
44 |
+
[5,"Interior","1849",5,10.7,71436.0],
|
45 |
+
[6,"Agriculture","1889",6,77.6,109832.0],
|
46 |
+
[7,"Commerce","1903",7,6.2,36000.0],
|
47 |
+
[8,"Labor","1913",8,59.7,17347.0],
|
48 |
+
[9,"Health and Human Services","1953",9,543.2,67000.0],
|
49 |
+
[10,"Housing and Urban Development","1965",10,46.2,10600.0],
|
50 |
+
[11,"Transportation","1966",11,58.0,58622.0],
|
51 |
+
[12,"Energy","1977",12,21.5,116100.0],
|
52 |
+
[13,"Education","1979",13,62.8,4487.0],
|
53 |
+
[14,"Veterans Affairs","1989",14,73.2,235000.0],
|
54 |
+
[15,"Homeland Security","2002",15,44.6,208000.0]
|
55 |
+
]
|
56 |
+
},
|
57 |
+
{"columns":["department_ID","head_ID","temporary_acting"],
|
58 |
+
"index":[0,1,2,3,4],
|
59 |
+
"data":[
|
60 |
+
[2,5,"Yes"],
|
61 |
+
[15,4,"Yes"],
|
62 |
+
[2,6,"Yes"],
|
63 |
+
[7,3,"No"],
|
64 |
+
[11,10,"No"]
|
65 |
+
]
|
66 |
+
}]
|
67 |
+
|
68 |
+
input_tables = [pd.read_json(table, orient="split") for table in tables]
|
69 |
+
|
70 |
+
# flatten the model inputs in the format: query + " " + <table_name> : table_name1 + flattened_table1 + <table_name> : table_name2 + flattened_table2 + ...
|
71 |
+
#flattened_input = query + " " + [f"<table_name> : {table_name} linearize_table(table) for table_name, table in zip(table_names, tables)]
|
72 |
+
model_input_string = """select count(*) from department where department_id not in (select department_id from management) <table_name> : department col : Department_ID | Name | Creation | Ranking | Budget_in_Billions | Num_Employees row 1 : 1 | State | 1789 | 1 | 9.96 | 30266 row 2 : 2 | Treasury | 1789 | 2 | 11.1 | 115897 row 3 : 3 | Defense | 1947 | 3 | 439.3 | 3000000 row 4 : 4 | Justice | 1870 | 4 | 23.4 | 112557 row 5 : 5 | Interior | 1849 | 5 | 10.7 | 71436 row 6 : 6 | Agriculture | 1889 | 6 | 77.6 | 109832 row 7 : 7 | Commerce | 1903 | 7 | 6.2 | 36000 row 8 : 8 | Labor | 1913 | 8 | 59.7 | 17347 row 9 : 9 | Health and Human Services | 1953 | 9 | 543.2 | 67000 row 10 : 10 | Housing and Urban Development | 1965 | 10 | 46.2 | 10600 row 11 : 11 | Transportation | 1966 | 11 | 58.0 | 58622 row 12 : 12 | Energy | 1977 | 12 | 21.5 | 116100 row 13 : 13 | Education | 1979 | 13 | 62.8 | 4487 row 14 : 14 | Veterans Affairs | 1989 | 14 | 73.2 | 235000 row 15 : 15 | Homeland Security | 2002 | 15 | 44.6 | 208000 <table_name> : management col : department_ID | head_ID | temporary_acting row 1 : 2 | 5 | Yes row 2 : 15 | 4 | Yes row 3 : 2 | 6 | Yes row 4 : 7 | 3 | No row 5 : 11 | 10 | No"""
|
73 |
+
inputs = tokenizer(model_input_string, return_tensors="pt")
|
74 |
+
|
75 |
+
outputs = model.generate(**inputs)
|
76 |
+
|
77 |
+
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
78 |
+
# 'col : count(*) row 1 : 11'
|
79 |
+
```
|
80 |
+
|
81 |
+
### How to Fine-tune
|
82 |
+
|
83 |
+
Please find the fine-tuning script [here](https://github.com/kolk/MultiTabQA).
|
84 |
+
|
85 |
+
### BibTeX entry and citation info
|
86 |
+
|
87 |
+
```bibtex
|
88 |
+
@misc{pal2023multitabqa,
|
89 |
+
title={MultiTabQA: Generating Tabular Answers for Multi-Table Question Answering},
|
90 |
+
author={Vaishali Pal and Andrew Yates and Evangelos Kanoulas and Maarten de Rijke},
|
91 |
+
year={2023},
|
92 |
+
eprint={2305.12820},
|
93 |
+
archivePrefix={arXiv},
|
94 |
+
primaryClass={cs.CL}
|
95 |
+
}
|
96 |
+
```
|