Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
Lancelot53 commited on
Commit
d830889
·
verified ·
1 Parent(s): 56c51f9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -54
README.md CHANGED
@@ -33,17 +33,13 @@ language:
33
  size_categories:
34
  - n<1K
35
  ---
36
-
37
  # IllusionVQA: Optical Illusion Dataset
38
 
39
  Paper Link: <br>
40
- GitHub Link: <br>
41
-
42
  ## TL;DR
43
  IllusionVQA is a dataset of optical illusions and hard-to-interpret scenes designed to test the capability of Vision Language Models in comprehension and soft localization tasks. GPT4V achieved 62.99% accuracy on comprehension and 49.7% on localization, while humans achieved 91.03% and 100% respectively.
44
 
45
-
46
-
47
  ## Usage
48
  ```python
49
  from datasets import load_dataset
@@ -62,61 +58,34 @@ def construct_mcq(options, correct_option):
62
  correct_option_letter = None
63
  i = "a"
64
  mcq = ""
65
-
66
  for option in options:
67
  if option == correct_option:
68
  correct_option_letter = i
69
  mcq += f"{i}. {option}\n"
70
  i = chr(ord(i) + 1)
71
-
72
  mcq = mcq[:-1]
73
  return mcq, correct_option_letter
74
 
75
  def add_row(content, data, i, with_answer=False):
76
-
77
  mcq, correct_option_letter = construct_mcq(data["options"], data["answer"])
78
-
79
- content.append({
80
- "type": "text",
81
- "text": "Image "+str(i)+": "+data["question"]+"\n"+mcq
82
- })
83
-
84
- content.append(
85
- {
86
- "type": "image_url",
87
- "image_url": {
88
- "url": f"data:image/jpeg;base64,{encode_image(data["image"])}",
89
- "detail": "low"
90
- }
91
- }
92
- )
93
  if with_answer:
94
- content.append(
95
- {
96
- "type": "text",
97
- "text": "Answer {}: ".format(i)+correct_option_letter
98
- }
99
- )
100
  else:
101
- content.append(
102
- {
103
- "type": "text",
104
- "text": "Answer {}: ".format(i),
105
- }
106
- )
107
-
108
  return content
109
 
110
- dataset = load_dataset("csebuetnlp/illusionVQA-Comprehension")
111
  client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
112
 
113
-
114
- content = [
115
- {
116
  "type": "text",
117
  "text": "You'll be given an image, an instruction and some choices. You have to select the correct one. Do not explain your reasoning. Answer with the option's letter from the given choices directly. Here are a few examples:",
118
- }
119
- ]
120
 
121
  ### Add the few examples
122
  i = 1
@@ -124,11 +93,7 @@ for data in dataset["train"]:
124
  content = add_row(content, data, i, with_answer=True)
125
  i += 1
126
 
127
- content.append({
128
- "type": "text",
129
- "text": "Now you try it!",
130
- })
131
-
132
  next_idx = i
133
 
134
  ### Add the test data
@@ -138,12 +103,7 @@ content_t = add_row(content.copy(), test_data, next_idx, with_answer=False)
138
  ### Get the answer from GPT-4
139
  response = client.chat.completions.create(
140
  model="gpt-4-vision-preview",
141
- messages=[
142
- {
143
- "role": "user",
144
- "content": content_t,
145
- }
146
- ],
147
  max_tokens=5,
148
  )
149
  gpt4_answer = response.choices[0].message.content
@@ -158,4 +118,4 @@ The dataset creator makes no representations or warranties regarding the copyrig
158
  You agree to the terms and conditions specified in this license by downloading or using this dataset. If you do not agree with these terms, do not download or use the dataset.
159
 
160
 
161
- ### Citation
 
33
  size_categories:
34
  - n<1K
35
  ---
 
36
  # IllusionVQA: Optical Illusion Dataset
37
 
38
  Paper Link: <br>
39
+ Github Link: <be>
 
40
  ## TL;DR
41
  IllusionVQA is a dataset of optical illusions and hard-to-interpret scenes designed to test the capability of Vision Language Models in comprehension and soft localization tasks. GPT4V achieved 62.99% accuracy on comprehension and 49.7% on localization, while humans achieved 91.03% and 100% respectively.
42
 
 
 
43
  ## Usage
44
  ```python
45
  from datasets import load_dataset
 
58
  correct_option_letter = None
59
  i = "a"
60
  mcq = ""
 
61
  for option in options:
62
  if option == correct_option:
63
  correct_option_letter = i
64
  mcq += f"{i}. {option}\n"
65
  i = chr(ord(i) + 1)
 
66
  mcq = mcq[:-1]
67
  return mcq, correct_option_letter
68
 
69
  def add_row(content, data, i, with_answer=False):
 
70
  mcq, correct_option_letter = construct_mcq(data["options"], data["answer"])
71
+ content.append({ "type": "text",
72
+ "text": "Image "+str(i)+": "+data["question"]+"\n"+mcq })
73
+ content.append({ "type": "image_url",
74
+ "image_url": {"url": f"data:image/jpeg;base64,{encode_image(data["image"])}",
75
+ "detail": "low"}})
 
 
 
 
 
 
 
 
 
 
76
  if with_answer:
77
+ content.append({"type": "text", "text": "Answer {}: ".format(i)+correct_option_letter})
 
 
 
 
 
78
  else:
79
+ content.append({"type": "text", "text": "Answer {}: ".format(i), })
 
 
 
 
 
 
80
  return content
81
 
82
+ dataset = load_dataset("csebuetnlp/illusionVQA-Soft-Localization")
83
  client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
84
 
85
+ content = [{
 
 
86
  "type": "text",
87
  "text": "You'll be given an image, an instruction and some choices. You have to select the correct one. Do not explain your reasoning. Answer with the option's letter from the given choices directly. Here are a few examples:",
88
+ }]
 
89
 
90
  ### Add the few examples
91
  i = 1
 
93
  content = add_row(content, data, i, with_answer=True)
94
  i += 1
95
 
96
+ content.append({"type": "text","text": "Now you try it!",})
 
 
 
 
97
  next_idx = i
98
 
99
  ### Add the test data
 
103
  ### Get the answer from GPT-4
104
  response = client.chat.completions.create(
105
  model="gpt-4-vision-preview",
106
+ messages=[{"role": "user","content": content_t,}],
 
 
 
 
 
107
  max_tokens=5,
108
  )
109
  gpt4_answer = response.choices[0].message.content
 
118
  You agree to the terms and conditions specified in this license by downloading or using this dataset. If you do not agree with these terms, do not download or use the dataset.
119
 
120
 
121
+ ### Citation