|
--- |
|
dataset_info: |
|
- config_name: multi_image_datasets_AESOP(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 6143751.0 |
|
num_examples: 40 |
|
download_size: 5873935 |
|
dataset_size: 6143751.0 |
|
- config_name: multi_image_datasets_DocVQA(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 50860263.0 |
|
num_examples: 40 |
|
download_size: 49023061 |
|
dataset_size: 50860263.0 |
|
- config_name: multi_image_datasets_Flintstones(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 5959276.0 |
|
num_examples: 40 |
|
download_size: 5950206 |
|
dataset_size: 5959276.0 |
|
- config_name: multi_image_datasets_IEdit(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 3154768.0 |
|
num_examples: 40 |
|
download_size: 3149748 |
|
dataset_size: 3154768.0 |
|
- config_name: multi_image_datasets_MIT-States_PropertyCoherence(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1478441.0 |
|
num_examples: 40 |
|
download_size: 1466262 |
|
dataset_size: 1478441.0 |
|
- config_name: multi_image_datasets_OCR-VQA(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 4492663.0 |
|
num_examples: 40 |
|
download_size: 4484114 |
|
dataset_size: 4492663.0 |
|
- config_name: multi_image_datasets_PororoSV(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 5440149.0 |
|
num_examples: 40 |
|
download_size: 5443514 |
|
dataset_size: 5440149.0 |
|
- config_name: multi_image_datasets_RecipeQA_VisualCloze(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 3246251.0 |
|
num_examples: 40 |
|
download_size: 3222977 |
|
dataset_size: 3246251.0 |
|
- config_name: multi_image_datasets_TQA(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 46775870.0 |
|
num_examples: 40 |
|
download_size: 30682883 |
|
dataset_size: 46775870.0 |
|
- config_name: multi_image_datasets_alfred(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 2272866.0 |
|
num_examples: 40 |
|
download_size: 2264739 |
|
dataset_size: 2272866.0 |
|
- config_name: multi_image_datasets_birds-to-words(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 6879966.0 |
|
num_examples: 40 |
|
download_size: 6289105 |
|
dataset_size: 6879966.0 |
|
- config_name: multi_image_datasets_webqa(m4-instruct,llava_format) |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image_1 |
|
dtype: image |
|
- name: image_2 |
|
dtype: image |
|
- name: image_3 |
|
dtype: image |
|
- name: image_4 |
|
dtype: image |
|
- name: image_5 |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 11166036.0 |
|
num_examples: 40 |
|
download_size: 11155037 |
|
dataset_size: 11166036.0 |
|
- config_name: wit |
|
features: |
|
- name: id |
|
dtype: string |
|
- name: image |
|
dtype: image |
|
- name: conversations |
|
list: |
|
- name: from |
|
dtype: string |
|
- name: value |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 100886747.0 |
|
num_examples: 40 |
|
download_size: 100875512 |
|
dataset_size: 100886747.0 |
|
configs: |
|
- config_name: multi_image_datasets_AESOP(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_AESOP(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_DocVQA(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_DocVQA(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_Flintstones(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_Flintstones(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_IEdit(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_IEdit(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_MIT-States_PropertyCoherence(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_MIT-States_PropertyCoherence(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_OCR-VQA(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_OCR-VQA(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_PororoSV(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_PororoSV(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_RecipeQA_VisualCloze(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_RecipeQA_VisualCloze(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_TQA(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_TQA(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_alfred(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_alfred(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_birds-to-words(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_birds-to-words(m4-instruct,llava_format)/train-* |
|
- config_name: multi_image_datasets_webqa(m4-instruct,llava_format) |
|
data_files: |
|
- split: train |
|
path: multi_image_datasets_webqa(m4-instruct,llava_format)/train-* |
|
- config_name: wit |
|
data_files: |
|
- split: train |
|
path: wit/train-* |
|
--- |
|
|