idoco commited on
Commit
de70fb3
·
verified ·
1 Parent(s): dbacd1c

Upload script to reproduce dataset

Browse files
relation_templates/relation_templates_brands.csv ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ uri,relation,template
2
+ P17,country,In what country is [subj] located?
3
+ P112,founded by,Who founded [subj]?
4
+ P169,chief executive officer,Who is the CEO of [subj]?
5
+ P127,owned by,Who is the owner of [subj]?
6
+ P452,industry,In what industry is [subj]?
7
+ P138,named after,Who was [subj] named after?
8
+ P571,inception,In what year was [subj] founded?
9
+ P159,headquarters location,In what country is the headquarters of [subj]?
10
+ P822,mascot,What or who is the mascot of [subj]?
11
+ P1056,product or material produced,What is the product or material produced by [subj]?
relation_templates/relation_templates_celebs.csv ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ uri,relation,template
2
+ P569,date of birth,In what year was [subj] born?
3
+ P54,member of sports team,What sports team does [subj] play for?
4
+ P106,occupation,What is the occupation of [subj]?
5
+ P27,country of citizenship,What is the country of citizenship of [subj]?
6
+ P19,place of birth,In what [obj_type] was [subj] born?
7
+ P641,sport,What sport does [subj] play?
8
+ P1412,"languages spoken, written or signed",What language does [subj] speak?
9
+ P413,position played on team / speciality,What sports position does [subj] play?
10
+ P50,author,Who is the author of [subj]?
11
+ P20,place of death,In what [obj_type] did [subj] die?
12
+ P495,country of origin,What is the country of origin of [subj]?
13
+ P69,educated at,What is the alma mater of [subj]?
14
+ P407,language of work or name,What is the language of [subj]?
15
+ P1598,consecrator,Who is the consecrator of [subj]?
16
+ P123,publisher,Who is the publisher of [subj]?
17
+ P40,child,Who is the child of [subj]?
18
+ P140,religion or worldview,What is the religion of [subj]?
19
+ P57,director,Who was the director of [subj]?
20
+ P264,record label,What is the record label of [subj]?
21
+ P58,screenwriter,Who was the screenwriter for [subj]?
22
+ P103,native language,What is the native language of [subj]?
23
+ P1532,country for sport,What country does [subj] play for?
24
+ P22,father,Who is the father of [subj]?
25
+ P86,composer,Who was the composer of [subj]?
26
+ P175,performer,Who is the performer of [subj]?
27
+ P118,league,What sports league does [subj] play in?
28
+ P26,spouse,Who is the spouse of [subj]?
29
+ P162,producer,Who was the producer of [subj]?
30
+ P3373,sibling,Who is the sibling of [subj]?
31
+ P98,editor,Who is the editor of [subj]?
32
+ P25,mother,Who is the mother of [subj]?
33
+ P676,lyrics by,Who wrote the lyrics of [subj]?
34
+ P166,award received,What award did [subj] receive?
35
+ P451,unmarried partner,Who is the partner of [subj]?
36
+ P800,notable work,What notable [obj_type] did [subj] create?
37
+ P1303,instrument,What instrument does [subj] play?
38
+ P39,position held,What position is held by [subj]?
39
+ P101,field of work,What is the field of work of [subj]?
40
+ P102,member of political party,Which political party is [subj] affiliated with?
relation_templates/relation_templates_landmarks.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ uri,relation,template
2
+ P17,country,In what country is [subj] located?
3
+ P112,founded by,Who founded [subj]?
4
+ P127,owned by,Who is the owner of [subj]?
5
+ P149,architectural style,What is the architectural style of [subj]?
6
+ P571,inception,In what year was [subj] created?
7
+ P84,architect,Who was the architect of [subj]?
8
+ P580,start time,In what year did [subj] get its heritage designation?
9
+ P2048,hight,How high is [subj]?
10
+ P131,located in the administrative territorial entity,In what administrative territorial entity is [subj] located?
11
+ P669,located on street,On what street is [subj] located?
12
+ P1435,heritage designation,What is the heritage designation of [subj]?
relation_templates/relation_templates_paintings.csv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ uri,relation,template
2
+ P127,owned by,Who is the owner of [subj]?
3
+ P170,creator,Who painted [subj]?
4
+ P186,made from material,From what material is [subj] made?
5
+ P195,collection,A part of what collection is [subj]?
6
+ P2348,time period,From what artistic time period is [subj]?
7
+ P547,commemorates,What does [subj] commemorate?
8
+ P138,named after,Who was [subj] named after?
9
+ P571,inception,In what year was [subj] painted?
10
+ P17,country,In what country is [subj] located?
11
+ P495,country of origin,In what country was [subj] created?
12
+ P88,commissioned by,Who commissioned [subj]?
13
+
scripts/build_dataset.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ load_dotenv()
3
+ from dataset_utils import *
4
+ import argparse
5
+
6
+ '''
7
+ This is a script to build (or expand) the PopVQA dataset.
8
+ Before running this script, make sure your directory contains the following:
9
+ 1. A CSV file with the base dataframe containing the columns 's_uri' (from wikidata) and 'type' (to match against the relation templates).
10
+ 2. a dir named 'relation_templates' containing CSV files with the relation templates for each type. The templates are triplets 'uri' - 'relation' - 'template'.
11
+ See the existing files for reference.
12
+ Run the script with the following command:
13
+ python build_dataset.py --base-df <path_to_base_df> --start <start_step> --end <end_step>
14
+ '''
15
+
16
+ def main(args):
17
+ dir_name, file_name = os.path.split(args.base_df)
18
+ base_name, _ = os.path.splitext(file_name)
19
+ base_df = pd.read_csv(args.base_df).drop_duplicates('s_uri')
20
+ assert 'type' in base_df.columns, "The base dataframe must contain a 'type' column."
21
+
22
+ types = base_df['type'].unique()
23
+
24
+ for entity_type in base_df['type'].unique():
25
+ template_path = os.path.join(dir_name, "relation_templates", f"relation_templates_{entity_type}.csv")
26
+ assert os.path.isfile(template_path), f"Missing relation template for type '{entity_type}' at: {template_path}"
27
+
28
+ all_question_dfs = []
29
+
30
+ for entity_type in types:
31
+ type_df = base_df[base_df['type'] == entity_type].copy()
32
+ type_dir = os.path.join(dir_name, entity_type)
33
+ os.makedirs(type_dir, exist_ok=True)
34
+
35
+ template_path = os.path.join(dir_name, "relation_templates", f"relation_templates_{entity_type}.csv")
36
+ templates = pd.read_csv(template_path)
37
+
38
+ print(f"Processing type: {entity_type}")
39
+ if args.start <= 0:
40
+ subject_to_relation = get_all_properties(type_df)
41
+ subject_to_relation = subject_to_relation[subject_to_relation['r_uri'].isin(templates['uri'])]
42
+ subject_to_relation.to_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"), index=False)
43
+
44
+ if args.start <= 1 and args.end >= 1:
45
+ if args.start == 1:
46
+ subject_to_relation = pd.read_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"))
47
+ aliases = get_aliases(subject_to_relation)
48
+ aliases.to_csv(os.path.join(type_dir, f"{base_name}_all_aliases.csv"), index=False)
49
+
50
+ if args.start <= 2 and args.end >= 2:
51
+ if args.start == 2:
52
+ subject_to_relation = pd.read_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"))
53
+ aliases = pd.read_csv(os.path.join(type_dir, f"{base_name}_all_aliases.csv"))
54
+ a_types = attribute_type(subject_to_relation)
55
+ a_types.to_csv(os.path.join(type_dir, f"{base_name}_complete_attribute_types.csv"), index=False)
56
+
57
+ if args.start <= 3 and args.end >= 3:
58
+ if args.start == 3:
59
+ subject_to_relation = pd.read_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"))
60
+ aliases = pd.read_csv(os.path.join(type_dir, f"{base_name}_all_aliases.csv"))
61
+ a_types = pd.read_csv(os.path.join(type_dir, f"{base_name}_complete_attribute_types.csv"))
62
+ triplets = aggregate_triplets(type_df, aliases, subject_to_relation, a_types, add_unesco=False)
63
+ triplets.to_csv(os.path.join(type_dir, f"{base_name}_question_triplets.csv"), index=False)
64
+
65
+ if args.start <= 4 and args.end >= 4:
66
+ if args.start == 4:
67
+ triplets = pd.read_csv(os.path.join(type_dir, f"{base_name}_question_triplets.csv"))
68
+ triplets = build_prompts(type_df, triplets, templates)
69
+ triplets['type'] = entity_type
70
+ triplets.to_csv(os.path.join(type_dir, f"{base_name}_questions.csv"), index=False)
71
+ all_question_dfs.append(triplets)
72
+
73
+ # Combine all question files and write to the top-level directory
74
+ if all_question_dfs:
75
+ combined_df = pd.concat(all_question_dfs, ignore_index=True)
76
+ combined_df.to_csv(os.path.join(dir_name, f"{base_name}_all_questions.csv"), index=False)
77
+ print(f"Combined questions file saved to {os.path.join(dir_name, f'{base_name}_all_questions.csv')}")
78
+
79
+
80
+ def get_exp_parser():
81
+ parser = argparse.ArgumentParser(add_help=False)
82
+ parser.add_argument('--base-df', type=str)
83
+ parser.add_argument('--start', type=int, default=0, help="Start step for building the dataset.")
84
+ parser.add_argument('--end', type=int, default=4, help="End step for building the dataset.")
85
+ return parser
86
+
87
+
88
+ if __name__ == "__main__":
89
+ parser = get_exp_parser()
90
+ args = parser.parse_args()
91
+ main(args)
scripts/dataset_utils.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import numpy as np
4
+ from ast import literal_eval
5
+ from SPARQLWrapper import SPARQLWrapper, JSON
6
+ from tqdm import tqdm
7
+ from urllib.parse import urlparse
8
+ import requests
9
+ import re
10
+ from ast import literal_eval
11
+ from tqdm import tqdm
12
+ tqdm.pandas()
13
+
14
+ from dotenv import load_dotenv
15
+ load_dotenv()
16
+
17
+ DATA_DIR = os.environ['DATA_DIR']
18
+
19
+ replacements = {"celebs":"the subject of this image",
20
+ "brands":"the brand in this image",
21
+ "landmarks":"the place in this image",
22
+ "paintings":"the painting in this image",
23
+ }
24
+
25
+ def best_obj_type(obj_types):
26
+ if type(obj_types) == str:
27
+ obj_types = literal_eval(obj_types)
28
+ prioritized_obj_types = ["city", "capital city", 'metropolis', 'country', 'occupation', 'language', 'type of sport', 'music genre'] # 'cinematic technique', 'team sport'
29
+ for ot in prioritized_obj_types:
30
+ if ot in obj_types:
31
+ return ot
32
+ for ot_ in obj_types:
33
+ if "university" in ot_:
34
+ return "university"
35
+ if "city" in ot_:
36
+ return "city"
37
+ return obj_types[0]
38
+
39
+ def replace_for_image(row):
40
+ replace_with = replacements[row['type']]
41
+ return row["template"].replace("[subj]", replace_with)
42
+
43
+ class SPARQL:
44
+ def __init__(self):
45
+ self.agent = "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'"
46
+ self.sparql = SPARQLWrapper("https://query.wikidata.org/sparql", agent=self.agent)
47
+ self.sparql.setReturnFormat(JSON)
48
+
49
+ def parse_value(self, value):
50
+ parsed_uri = urlparse(value)
51
+ if all([parsed_uri.scheme, parsed_uri.netloc]):
52
+ return parsed_uri.path.split('/')[-1]
53
+ return value
54
+
55
+ def execute(self, query):
56
+ records = []
57
+ try:
58
+ self.sparql.setQuery(query)
59
+ responses = self.sparql.query().convert()
60
+ for response in responses['results']['bindings']:
61
+ record = {}
62
+ for key in response:
63
+ record[key] = self.parse_value(response[key]['value'])
64
+ records.append(record)
65
+ if records == 0:
66
+ print("request failed")
67
+ except Exception as e:
68
+ print(e)
69
+ return pd.DataFrame(records)
70
+
71
+
72
+ def add_aliases(df):
73
+ def _query(uris):
74
+ return f'''
75
+ SELECT ?s_uri ?alias
76
+ WHERE {{
77
+ {{VALUES ?s_uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }}
78
+ ?s_uri skos:altLabel ?alias.
79
+ FILTER(LANG(?alias) = "en")
80
+ }}
81
+ '''
82
+ sparql = SPARQL()
83
+
84
+ uris = list(set(df["s_uri"].tolist()))
85
+ uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)]
86
+
87
+ aliases = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)])
88
+ aliases = aliases.groupby("s_uri")["alias"].agg(list).reset_index(name="aliases")
89
+ res = pd.merge(df, aliases, how='left', on='s_uri')
90
+ res['aliases'] = res['aliases'].fillna('[]')
91
+ return res
92
+
93
+ def get_aliases(df):
94
+ def _query(uris):
95
+ return f'''
96
+ SELECT ?uri ?alias
97
+ WHERE {{
98
+ {{VALUES ?uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }}
99
+ ?uri skos:altLabel ?alias.
100
+ FILTER(LANG(?alias) = "en")
101
+ }}
102
+ '''
103
+ sparql = SPARQL()
104
+
105
+ uris = list(set(df["s_uri"].tolist()))# + df["a_uri"].tolist()))
106
+ uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)]
107
+
108
+ aliases = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)])
109
+ aliases = aliases.groupby("uri")["alias"].agg(list).reset_index(name="aliases")
110
+ return aliases
111
+
112
+ def add_images(df):
113
+ def _query(uris):
114
+ return f'''
115
+ SELECT ?s_uri ?image
116
+ WHERE {{
117
+ {{VALUES ?s_uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }}
118
+ ?s_uri wdt:P18 ?image .
119
+ }}
120
+ '''
121
+ sparql = SPARQL()
122
+
123
+ uris = list(set(df["s_uri"].tolist()))
124
+ uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)]
125
+
126
+ images = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)])
127
+ images['image'] = 'http://commons.wikimedia.org/wiki/Special:FilePath/' + images['image']
128
+ res = pd.merge(df, images, how='inner', on='s_uri')
129
+ return res
130
+
131
+
132
+ def get_attribute(df, attribute_name, attribute_id):
133
+ def _query(uris):
134
+ return f'''
135
+ SELECT ?s_uri ?attribute_name
136
+ WHERE {{
137
+ {{VALUES ?s_uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }}
138
+ ?s_uri wdt:{attribute_id} ?{attribute_name} .
139
+ }}
140
+ '''
141
+ sparql = SPARQL()
142
+
143
+ uris = list(set(df["s_uri"].tolist()))
144
+ uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)]
145
+
146
+ attributes = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)])
147
+ attributes = attributes.groupby("s_uri")[attribute_name].agg(list).reset_index(name=attribute_name)
148
+
149
+ res = pd.merge(df, attributes, how='inner', on='s_uri')
150
+ return res
151
+
152
+ def extract_year(timestamp):
153
+ parts = timestamp.split('-')
154
+ neg = False
155
+ if parts[0] == '':
156
+ year = parts[1]
157
+ neg = True
158
+ else:
159
+ year = parts[0]
160
+ if year.isdigit():
161
+ return str(-int(year)) if neg else str(int(year))
162
+ return np.nan
163
+
164
+ def get_all_properties(df):
165
+ def _query(relation_ids):
166
+ return f'''
167
+ SELECT ?item ?itemLabel ?wd ?wdLabel ?ps_ ?ps_Label WHERE {{
168
+ VALUES ?item {{
169
+ {" ".join([f"wd:{id}" for id in relation_ids])}
170
+ }}
171
+ ?item ?p ?statement .
172
+ ?statement ?ps ?ps_ .
173
+ ?wd wikibase:claim ?p .
174
+ ?wd wikibase:statementProperty ?ps .
175
+
176
+ SERVICE wikibase:label {{ bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }}
177
+ }}
178
+ '''
179
+ sparql = SPARQL()
180
+
181
+ # df = pd.read_csv(origin)
182
+ subjects = df["s_uri"].to_list()
183
+ subject_chunks = [subjects[i:i+20] for i in range(0, len(subjects), 20)]
184
+
185
+ df = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(subject_chunks)])
186
+ df = df[~df["wdLabel"].str.contains(r"ID|category|template|username|instance of|gallery|article|handle|url|wiki|copyright|classification|website|described|tag|archive|reddit|profile|image|list|file", case=False, na=False)]
187
+ tmp = df[(df['wd'] == 'P569') | (df['wd'] == 'P571')].copy()
188
+ tmp['ps_Label'] = tmp['ps_Label'].apply(extract_year)
189
+ tmp.dropna(subset=['ps_Label'], inplace=True)
190
+ tmp['ps_'] = 'Q000'
191
+ df = df[~((df['wd'] == 'P569') | (df['wd'] == 'P571'))]
192
+ df = df[~df["ps_Label"].str.contains(r'\d', na=False)]
193
+ df = df[df["ps_"].apply(lambda s: bool(re.fullmatch(r"Q\d+", s)))]
194
+ df = pd.concat([df, tmp])
195
+ df = df[["item", "itemLabel", "wd", "wdLabel", "ps_", "ps_Label"]]
196
+ df = df.rename(
197
+ columns = {
198
+ "item": "s_uri",
199
+ "itemLabel": "subject",
200
+ "wd": "r_uri",
201
+ "wdLabel": "relation",
202
+ "ps_": "a_uri",
203
+ "ps_Label": "attribute",
204
+ }
205
+ )
206
+ return df
207
+
208
+
209
+ def attribute_type(df):
210
+ def _query(uris):
211
+ return f'''
212
+ SELECT ?uri ?typeLabel
213
+ WHERE {{
214
+ {{VALUES ?uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }}
215
+ ?uri wdt:P31 ?type.
216
+ SERVICE wikibase:label {{ bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }}
217
+ }}
218
+ '''
219
+ sparql = SPARQL()
220
+
221
+ uris = df["a_uri"].drop_duplicates().to_list()
222
+ uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)]
223
+ a_types = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)])
224
+ a_types = a_types.groupby("uri")["typeLabel"].agg(list).reset_index(name="a_type")
225
+ a_types['a_type'] = a_types['a_type'].apply(lambda x: x if type(x) == list else [])
226
+ a_types = pd.concat([a_types, pd.DataFrame([{'uri': 'Q000', 'a_type': str(['year'])}])])
227
+ return a_types
228
+
229
+
230
+ def get_wikidata_id(name):
231
+ url = "https://www.wikidata.org/w/api.php"
232
+ params = {
233
+ "action": "wbsearchentities",
234
+ "format": "json",
235
+ "language": "en",
236
+ "search": name
237
+ }
238
+ response = requests.get(url, params=params).json()
239
+ if 'search' in response and response['search']:
240
+ return response['search'][0]['id']
241
+ return None
242
+
243
+ def add_wikidata_ids(df, name_col="subject"):
244
+ df["wikidata_id"] = df[name_col].apply(get_wikidata_id)
245
+ return df
246
+
247
+
248
+ def add_unesco_question(base_df):
249
+ def _query(qids):
250
+ return f"""
251
+ SELECT ?item ?itemLabel ?startTime WHERE {{
252
+ VALUES ?item {{{' '.join(f'wd:{qid}' for qid in qids)}}}
253
+ ?item p:P1435 ?heritageStatement.
254
+ ?heritageStatement ps:P1435 wd:Q9259.
255
+ OPTIONAL {{
256
+ ?heritageStatement pq:P580 ?startTime.
257
+ }}
258
+ SERVICE wikibase:label {{ bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }}
259
+ }}
260
+ """
261
+ sparql = SPARQL()
262
+
263
+ df = base_df[base_df['type'] == 'landmarks']
264
+ subjects = df["s_uri"].to_list()
265
+ subject_chunks = [subjects[i:i+20] for i in range(0, len(subjects), 20)]
266
+
267
+ df = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(subject_chunks)])
268
+ df.dropna(subset=['startTime'], inplace=True)
269
+ df['startTime'] = df['startTime'].apply(extract_year)
270
+ df = df.rename(
271
+ columns = {
272
+ "item": "s_uri",
273
+ "startTime": "attribute",
274
+ "itemLabel": "subject",
275
+ }
276
+ )
277
+ df['possible_answers'] = df['attribute'].apply(lambda x: str([x]))
278
+ df['r_uri'] = 'P580'
279
+ df['relation'] = 'start time'
280
+ df['a_uri'] = 'P580'
281
+ df['a_type'] = str(['year'])
282
+ return df
283
+
284
+
285
+ def aggregate_triplets(base, aliases, relations, attributes, add_unesco=False):
286
+ subjects = base[['s_uri']]
287
+ relations = relations.merge(subjects, on="s_uri")
288
+ aliases = pd.read_csv("data/all_aliases.csv", index_col=0)
289
+ if type(aliases.iloc[0]['aliases']) == str:
290
+ aliases["aliases"] = aliases["aliases"].apply(lambda x: literal_eval(x))
291
+ if type(attributes.iloc[0]['a_type']) == str:
292
+ attributes["a_type"] = attributes["a_type"].apply(lambda x: literal_eval(x))
293
+
294
+ relations = relations.merge(aliases, left_on="a_uri", right_on="uri", how="left")
295
+ relations = relations.drop(columns=["uri"])
296
+ relations["possible_answers"] = relations['aliases'].apply(lambda x: x if type(x) == list else [])
297
+ relations["possible_answers"] = relations.progress_apply(lambda x: x["possible_answers"] + [x["attribute"]], axis=1)
298
+
299
+ agg_funcs = {col: 'first' for col in relations.columns if col not in ['s_uri', 'r_uri', 'possible_answers']}
300
+ agg_funcs['possible_answers'] = sum
301
+ relations = relations.groupby(['s_uri', 'r_uri'], as_index=False).agg(agg_funcs)
302
+
303
+ relations = relations.drop(columns=["aliases"])
304
+ relations = relations.merge(attributes, left_on="a_uri", right_on="uri", how="left")
305
+ relations = relations.drop(columns=["uri"])
306
+
307
+ if add_unesco:
308
+ unesco = add_unesco_question(base)
309
+ relations = pd.concat([relations, unesco])
310
+
311
+ return relations
312
+
313
+
314
+ def subj_substitute(row):
315
+ if row['type'] == 'brands':
316
+ return f"the brand {row['subject']}"
317
+ if row['type'] == 'paintings':
318
+ return f"the painting {row['subject']}"
319
+ return row['subject']
320
+
321
+
322
+ def build_prompts(base_df, triplets, templates):
323
+ subjects = base_df[["s_uri", "subject"]]
324
+ base_df = base_df[["s_uri", "type"]]
325
+ triplets = triplets.drop("subject", axis=1)
326
+ triplets = triplets.merge(subjects, on=["s_uri"])
327
+ triplets = triplets.merge(base_df, on=["s_uri"], how='left')
328
+ triplets = triplets.merge(templates[["uri", "template"]], left_on="r_uri", right_on="uri")
329
+ triplets = triplets.drop(columns=["uri"])
330
+ triplets = triplets.dropna()
331
+
332
+ query_counts = triplets.drop_duplicates(["s_uri", "r_uri"]).groupby(["s_uri"])["r_uri"].count().reset_index(name="count")
333
+ triplets = triplets.merge(query_counts[query_counts["count"] > 1][["s_uri"]], on="s_uri")
334
+
335
+ triplets["question_for_image"] = triplets.progress_apply(replace_for_image, axis=1)
336
+ triplets["question_for_image"] = triplets.progress_apply(lambda row: row["question_for_image"].replace("[obj_type]", best_obj_type(row["a_type"])) if len(row["a_type"]) > 0 else row["question"], axis=1)
337
+ triplets["question"] = triplets.progress_apply(lambda row: row["template"].replace("[subj]", subj_substitute(row)), axis=1)
338
+ triplets["question"] = triplets.progress_apply(lambda row: row["question"].replace("[obj_type]", best_obj_type(row["a_type"])) if len(row["a_type"]) > 0 else row["question"], axis=1)
339
+ triplets = triplets.drop(columns=["template"])
340
+ triplets = triplets[['type','subject','question_for_image','question','possible_answers', 'relation', 's_uri', 'r_uri','a_uri','attribute','a_type']]
341
+ return triplets
342
+
343
+