Leyo commited on
Commit
e7fbbd2
·
1 Parent(s): b9d0029

create ActivityNet_captions

Browse files
Files changed (1) hide show
  1. ActivityNet_Captions +121 -0
ActivityNet_Captions ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ """TGIF: A New Dataset and Benchmark on Animated GIF Description"""
3
+
4
+ import os
5
+ import json
6
+ import datasets
7
+
8
+ _CITATION = """
9
+ @inproceedings{krishna2017dense,
10
+ title={Dense-Captioning Events in Videos},
11
+ author={Krishna, Ranjay and Hata, Kenji and Ren, Frederic and Fei-Fei, Li and Niebles, Juan Carlos},
12
+ booktitle={International Conference on Computer Vision (ICCV)},
13
+ year={2017}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ The ActivityNet Captions dataset connects videos to a series of temporally annotated sentence descriptions.
19
+ Each sentence covers an unique segment of the video, describing multiple events that occur. These events
20
+ may occur over very long or short periods of time and are not limited in any capacity, allowing them to
21
+ co-occur. On average, each of the 20k videos contains 3.65 temporally localized sentences, resulting in
22
+ a total of 100k sentences. We find that the number of sentences per video follows a relatively normal
23
+ distribution. Furthermore, as the video duration increases, the number of sentences also increases.
24
+ Each sentence has an average length of 13.48 words, which is also normally distributed. You can find more
25
+ details of the dataset under the ActivityNet Captions Dataset section, and under supplementary materials
26
+ in the paper.
27
+ """
28
+
29
+ _URL_BASE = "https://cs.stanford.edu/people/ranjaykrishna/densevid/"
30
+
31
+
32
+ class ActivityNetConfig(datasets.BuilderConfig):
33
+ """BuilderConfig for ActivityNet Captions."""
34
+
35
+ def __init__(self, **kwargs):
36
+ super(ActivityNetConfig, self).__init__(
37
+ version=datasets.Version("2.1.0", ""), **kwargs)
38
+
39
+
40
+ class ActivityNet(datasets.GeneratorBasedBuilder):
41
+
42
+ DEFAULT_CONFIG_NAME = "all"
43
+ BUILDER_CONFIGS = [
44
+ ActivityNetConfig(
45
+ name="all", description="All the ActivityNet Captions dataset"),
46
+ ]
47
+
48
+ def _info(self):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "video_id": datasets.Value("string"),
54
+ "path": datasets.Value("string"),
55
+ "duration": datasets.Value("float32"),
56
+ "starts": datasets.features.Sequence(datasets.Value("float32")),
57
+ "ends": datasets.features.Sequence(datasets.Value("float32")),
58
+ "captions": datasets.features.Sequence(datasets.Value("string"))
59
+ }
60
+ ),
61
+ supervised_keys=None,
62
+ homepage=_URL_BASE,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ archive_path = dl_manager.download_and_extract(
68
+ _URL_BASE + "captions.zip")
69
+
70
+ train_splits = [
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.TRAIN,
73
+ gen_kwargs={
74
+ "ids_file": os.path.join(archive_path, "train_ids.json"),
75
+ "infos_file": os.path.join(archive_path, "train.json")
76
+ },
77
+ )
78
+ ]
79
+ dev_splits = [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.VALIDATION,
82
+ gen_kwargs={
83
+ "ids_file": os.path.join(archive_path, "val_ids.json"),
84
+ "infos_file": os.path.join(archive_path, "val_1.json")
85
+ },
86
+ )
87
+ ]
88
+ test_splits = [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TEST,
91
+ gen_kwargs={
92
+ "ids_file": os.path.join(archive_path, "test_ids.json"),
93
+ "infos_file": os.path.join(archive_path, "val_2.json")
94
+ },
95
+ )
96
+ ]
97
+ return train_splits + dev_splits + test_splits
98
+
99
+ def _generate_examples(self, ids_file, infos_file):
100
+ """This function returns the examples."""
101
+
102
+ with open(infos_file, encoding="utf-8") as json_file:
103
+ infos = json.load(json_file)
104
+
105
+ with open(ids_file, encoding="utf-8") as json_file:
106
+ ids = json.load(json_file)
107
+ for idx, id in enumerate(ids):
108
+ path = "https://www.youtube.com/watch?v=" + id[2:]
109
+ starts = [timestamp[0]
110
+ for timestamp in infos[id]["timestamps"]]
111
+ ends = [timestamp[1] for timestamp in infos[id]["timestamps"]]
112
+ yield idx, {
113
+ "video_id": id,
114
+ "path": path,
115
+ "video_id": datasets.Value("string"),
116
+ "path": datasets.Value("string"),
117
+ "duration": infos[id]["duration"],
118
+ "starts": starts,
119
+ "ends": ends,
120
+ "captions": infos[id]["sentences"],
121
+ }