File size: 14,610 Bytes
69f95c1
955daea
69f95c1
955daea
 
ab899b5
69f95c1
 
955daea
 
69f95c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efae727
955daea
 
 
 
 
 
 
 
efae727
955daea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efae727
955daea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efae727
 
955daea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efae727
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
import csv
import json
import uuid
from collections import OrderedDict
from pathlib import Path
from typing import Any, Sequence

import filelock
import huggingface_hub

import gradio as gr
from gradio import utils
from gradio.flagging import client_utils, FlaggingCallback
from gradio_client.documentation import document
from gradio.components import Component


@document()
class HuggingFaceDatasetSaver(FlaggingCallback):
    """
    A callback that saves each flagged sample (both the input and output data) to a HuggingFace dataset.

    Example:
        import gradio as gr
        hf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, "image-classification-mistakes")
        def image_classifier(inp):
            return {'cat': 0.3, 'dog': 0.7}
        demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
                            allow_flagging="manual", flagging_callback=hf_writer)
    Guides: using-flagging
    """

    def __init__(
        self,
        hf_token: str,
        dataset_name: str,
        private: bool = False,
        info_filename: str = "dataset_info.json",
        separate_dirs: bool = False,
    ):
        """
        Parameters:
            hf_token: The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset (defaults to the registered one).
            dataset_name: The repo_id of the dataset to save the data to, e.g. "image-classifier-1" or "username/image-classifier-1".
            private: Whether the dataset should be private (defaults to False).
            info_filename: The name of the file to save the dataset info (defaults to "dataset_infos.json").
            separate_dirs: If True, each flagged item will be saved in a separate directory. This makes the flagging more robust to concurrent editing, but may be less convenient to use.
        """
        self.hf_token = hf_token
        self.dataset_id = dataset_name  # TODO: rename parameter (but ensure backward compatibility somehow)
        self.dataset_private = private
        self.info_filename = info_filename
        self.separate_dirs = separate_dirs

    def setup(self, components: Sequence[Component], flagging_dir: str):
        """
        Params:
        flagging_dir (str): local directory where the dataset is cloned,
        updated, and pushed from.
        """
        # Setup dataset on the Hub
        self.dataset_id = huggingface_hub.create_repo(
            repo_id=self.dataset_id,
            token=self.hf_token,
            private=self.dataset_private,
            repo_type="dataset",
            exist_ok=True,
        ).repo_id
        path_glob = "**/*.jsonl" if self.separate_dirs else "data.csv"
        huggingface_hub.metadata_update(
            repo_id=self.dataset_id,
            repo_type="dataset",
            metadata={
                "configs": [
                    {
                        "config_name": "default",
                        "data_files": [{"split": "train", "path": path_glob}],
                    }
                ]
            },
            overwrite=True,
            token=self.hf_token,
        )

        # Setup flagging dir
        self.components = components
        self.dataset_dir = (
            Path(flagging_dir).absolute() / self.dataset_id.split("/")[-1]
        )
        self.dataset_dir.mkdir(parents=True, exist_ok=True)
        self.infos_file = self.dataset_dir / self.info_filename

        # Download remote files to local
        remote_files = [self.info_filename]
        if not self.separate_dirs:
            # No separate dirs => means all data is in the same CSV file => download it to get its current content
            remote_files.append("data.csv")

        for filename in remote_files:
            try:
                huggingface_hub.hf_hub_download(
                    repo_id=self.dataset_id,
                    repo_type="dataset",
                    filename=filename,
                    local_dir=self.dataset_dir,
                    token=self.hf_token,
                )
            except huggingface_hub.utils.EntryNotFoundError:
                pass

    def flag(
        self,
        flag_data: list[Any],
        flag_option: str = "",
        username: str | None = None,
    ) -> int:
        if self.separate_dirs:
            # JSONL files to support dataset preview on the Hub
            unique_id = str(uuid.uuid4())
            components_dir = self.dataset_dir / unique_id
            data_file = components_dir / "metadata.jsonl"
            path_in_repo = unique_id  # upload in sub folder (safer for concurrency)
        else:
            # Unique CSV file
            components_dir = self.dataset_dir
            data_file = components_dir / "data.csv"
            path_in_repo = None  # upload at root level

        return self._flag_in_dir(
            data_file=data_file,
            components_dir=components_dir,
            path_in_repo=path_in_repo,
            flag_data=flag_data,
            flag_option=flag_option,
            username=username or "",
        )

    def _flag_in_dir(
        self,
        data_file: Path,
        components_dir: Path,
        path_in_repo: str | None,
        flag_data: list[Any],
        flag_option: str = "",
        username: str = "",
    ) -> int:
        # Deserialize components (write images/audio to files)
        features, row = self._deserialize_components(
            components_dir, flag_data, flag_option, username
        )

        # Write generic info to dataset_infos.json + upload
        with filelock.FileLock(str(self.infos_file) + ".lock"):
            if not self.infos_file.exists():
                self.infos_file.write_text(
                    json.dumps({"flagged": {"features": features}})
                )

                huggingface_hub.upload_file(
                    repo_id=self.dataset_id,
                    repo_type="dataset",
                    token=self.hf_token,
                    path_in_repo=self.infos_file.name,
                    path_or_fileobj=self.infos_file,
                )

        headers = list(features.keys())

        if not self.separate_dirs:
            with filelock.FileLock(components_dir / ".lock"):
                sample_nb = self._save_as_csv(data_file, headers=headers, row=row)
                sample_name = str(sample_nb)
                huggingface_hub.upload_folder(
                    repo_id=self.dataset_id,
                    repo_type="dataset",
                    commit_message=f"Flagged sample #{sample_name}",
                    path_in_repo=path_in_repo,
                    ignore_patterns="*.lock",
                    folder_path=components_dir,
                    token=self.hf_token,
                )
        else:
            sample_name = self._save_as_jsonl(data_file, headers=headers, row=row)
            sample_nb = len(
                [path for path in self.dataset_dir.iterdir() if path.is_dir()]
            )
            huggingface_hub.upload_folder(
                repo_id=self.dataset_id,
                repo_type="dataset",
                commit_message=f"Flagged sample #{sample_name}",
                path_in_repo=path_in_repo,
                ignore_patterns="*.lock",
                folder_path=components_dir,
                token=self.hf_token,
            )

        return sample_nb

    @staticmethod
    def _save_as_csv(data_file: Path, headers: list[str], row: list[Any]) -> int:
        """Save data as CSV and return the sample name (row number)."""
        is_new = not data_file.exists()

        with data_file.open("a", newline="", encoding="utf-8") as csvfile:
            writer = csv.writer(csvfile)

            # Write CSV headers if new file
            if is_new:
                writer.writerow(utils.sanitize_list_for_csv(headers))

            # Write CSV row for flagged sample
            writer.writerow(utils.sanitize_list_for_csv(row))

        with data_file.open(encoding="utf-8") as csvfile:
            return sum(1 for _ in csv.reader(csvfile)) - 1

    @staticmethod
    def _save_as_jsonl(data_file: Path, headers: list[str], row: list[Any]) -> str:
        """Save data as JSONL and return the sample name (uuid)."""
        Path.mkdir(data_file.parent, parents=True, exist_ok=True)
        with open(data_file, "w", encoding="utf-8") as f:
            json.dump(dict(zip(headers, row)), f)
        return data_file.parent.name

    def _deserialize_components(
        self,
        data_dir: Path,
        flag_data: list[Any],
        flag_option: str = "",
        username: str = "",
    ) -> tuple[dict[Any, Any], list[Any]]:
        """Deserialize components and return the corresponding row for the flagged sample.

        Images/audio are saved to disk as individual files.
        """
        # Components that can have a preview on dataset repos
        file_preview_types = {gr.Audio: "Audio", gr.Image: "Image"}

        # Generate the row corresponding to the flagged sample
        features = OrderedDict()
        row = []
        for component, sample in zip(self.components, flag_data):
            # Get deserialized object (will save sample to disk if applicable -file, audio, image,...-)
            label = component.label or ""
            save_dir = data_dir / client_utils.strip_invalid_filename_characters(label)
            save_dir.mkdir(exist_ok=True, parents=True)
            deserialized = utils.simplify_file_data_in_str(
                component.flag(sample, save_dir)
            )

            # Add deserialized object to row
            features[label] = {"dtype": "string", "_type": "Value"}
            try:
                deserialized_path = Path(deserialized)
                if not deserialized_path.exists():
                    raise FileNotFoundError(f"File {deserialized} not found")
                row.append(str(deserialized_path.relative_to(self.dataset_dir)))
            except (FileNotFoundError, TypeError, ValueError, OSError):
                deserialized = "" if deserialized is None else str(deserialized)
                row.append(deserialized)

            # If component is eligible for a preview, add the URL of the file
            # Be mindful that images and audio can be None
            if isinstance(component, tuple(file_preview_types)):  # type: ignore
                for _component, _type in file_preview_types.items():
                    if isinstance(component, _component):
                        features[label + " file"] = {"_type": _type}
                        break
                if deserialized:
                    path_in_repo = str(  # returned filepath is absolute, we want it relative to compute URL
                        Path(deserialized).relative_to(self.dataset_dir)
                    ).replace("\\", "/")
                    row.append(
                        huggingface_hub.hf_hub_url(
                            repo_id=self.dataset_id,
                            filename=path_in_repo,
                            repo_type="dataset",
                        )
                    )
                else:
                    row.append("")
        features["flag"] = {"dtype": "string", "_type": "Value"}
        features["username"] = {"dtype": "string", "_type": "Value"}
        row.append(flag_option)
        row.append(username)
        return features, row


class myHuggingFaceDatasetSaver(HuggingFaceDatasetSaver):
    """
    Custom HuggingFaceDatasetSaver to save images/audio to disk.
    Gradio's implementation seems to have a bug.
    """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    def _deserialize_components(
        self,
        data_dir: Path,
        flag_data: list[Any],
        flag_option: str = "",
        username: str = "",
    ) -> tuple[dict[Any, Any], list[Any]]:
        """Deserialize components and return the corresponding row for the flagged sample.

        Images/audio are saved to disk as individual files.
        """
        # Components that can have a preview on dataset repos
        file_preview_types = {gr.Audio: "Audio", gr.Image: "Image"}

        # Generate the row corresponding to the flagged sample
        features = OrderedDict()
        row = []
        for component, sample in zip(self.components, flag_data):
            # Get deserialized object (will save sample to disk if applicable -file, audio, image,...-)
            label = component.label or ""
            save_dir = data_dir / client_utils.strip_invalid_filename_characters(label)
            save_dir.mkdir(exist_ok=True, parents=True)
            deserialized = component.flag(sample, save_dir)
            if isinstance(component, gr.Image) and isinstance(sample, dict):
                deserialized = json.loads(deserialized)["path"]  # dirty hack

            # Add deserialized object to row
            features[label] = {"dtype": "string", "_type": "Value"}
            try:
                assert Path(deserialized).exists()
                row.append(str(Path(deserialized).relative_to(self.dataset_dir)))
            except (AssertionError, TypeError, ValueError):
                deserialized = "" if deserialized is None else str(deserialized)
                row.append(deserialized)

            # If component is eligible for a preview, add the URL of the file
            # Be mindful that images and audio can be None
            if isinstance(component, tuple(file_preview_types)):  # type: ignore
                for _component, _type in file_preview_types.items():
                    if isinstance(component, _component):
                        features[label + " file"] = {"_type": _type}
                        break
                if deserialized:
                    path_in_repo = str(
                        # returned filepath is absolute, we want it relative to compute URL
                        Path(deserialized).relative_to(self.dataset_dir)
                    ).replace("\\", "/")
                    row.append(
                        huggingface_hub.hf_hub_url(
                            repo_id=self.dataset_id,
                            filename=path_in_repo,
                            repo_type="dataset",
                        )
                    )
                else:
                    row.append("")
        features["flag"] = {"dtype": "string", "_type": "Value"}
        features["username"] = {"dtype": "string", "_type": "Value"}
        row.append(flag_option)
        row.append(username)
        return features, row