File size: 10,450 Bytes
3c2fc33 b2669f7 f230d42 3c2fc33 b2669f7 3c2fc33 0d14ea5 b2669f7 3c2fc33 8291c8c f230d42 3c2fc33 8291c8c 3c2fc33 099e99c 3c2fc33 0d14ea5 3c2fc33 0d14ea5 099e99c 3c2fc33 f230d42 3c2fc33 c54ccc3 3c2fc33 2841b26 ffa2ee0 b06a781 2841b26 b06a781 ffa2ee0 c54ccc3 3c2fc33 5c6644f 791a4a1 86f370f dc56474 86f370f 962b6f3 86f370f 714b133 86f370f bec79d4 5c6644f 3c2fc33 ad2dc0f 3c2fc33 86f370f 9029def 86f370f 2755e1e 86f370f 962b6f3 86f370f 714b133 86f370f 3c2fc33 bec79d4 3c2fc33 2995161 3c2fc33 bec79d4 7b7c1be 0d14ea5 b2669f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 |
import io
import uuid
from tqdm import tqdm
from typing import Union
import argilla as rg
import gradio as gr
import pandas as pd
from datasets import Dataset, concatenate_datasets, get_dataset_config_names, get_dataset_split_names, load_dataset
from gradio import OAuthToken
from huggingface_hub import HfApi, upload_file, repo_exists
from unstructured.chunking.title import chunk_by_title
from unstructured.partition.auto import partition
from synthetic_dataset_generator.constants import MAX_NUM_ROWS, SAVE_LOCAL_DIR
from synthetic_dataset_generator.utils import get_argilla_client
if SAVE_LOCAL_DIR is not None:
import os
os.makedirs(SAVE_LOCAL_DIR, exist_ok=True)
def validate_argilla_user_workspace_dataset(
dataset_name: str,
add_to_existing_dataset: bool = True,
oauth_token: Union[OAuthToken, None] = None,
progress=gr.Progress(),
) -> str:
progress(0.1, desc="Validating dataset configuration")
hf_user = HfApi().whoami(token=oauth_token.token)["name"]
client = get_argilla_client()
if dataset_name is None or dataset_name == "":
raise gr.Error("Dataset name is required")
# Create user if it doesn't exist
rg_user = client.users(username=hf_user)
if rg_user is None:
rg_user = client.users.add(
rg.User(username=hf_user, role="admin", password=str(uuid.uuid4()))
)
# Create workspace if it doesn't exist
workspace = client.workspaces(name=hf_user)
if workspace is None:
workspace = client.workspaces.add(rg.Workspace(name=hf_user))
workspace.add_user(hf_user)
# Check if dataset exists
dataset = client.datasets(name=dataset_name, workspace=hf_user)
if dataset and not add_to_existing_dataset:
raise gr.Error(f"Dataset {dataset_name} already exists")
progress(1.0, desc="Dataset configuration validated")
return ""
def push_pipeline_code_to_hub(
pipeline_code: str,
org_name: str,
repo_name: str,
oauth_token: Union[OAuthToken, None] = None,
progress=gr.Progress(),
):
repo_id: str | None = validate_push_to_hub(org_name, repo_name)
progress(0.1, desc="Uploading pipeline code")
with io.BytesIO(pipeline_code.encode("utf-8")) as f:
upload_file(
path_or_fileobj=f,
path_in_repo="pipeline.py",
repo_id=repo_id,
repo_type="dataset",
token=oauth_token.token,
commit_message="Include pipeline script",
create_pr=False,
)
progress(1.0, desc="Pipeline code uploaded")
def validate_push_to_hub(org_name: str, repo_name: str):
repo_id = (
f"{org_name}/{repo_name}"
if repo_name is not None and org_name is not None
else None
)
if repo_id is not None:
if not all([repo_id, org_name, repo_name]):
raise gr.Error(
"Please provide a `repo_name` and `org_name` to push the dataset to."
)
return repo_id
def combine_datasets(
repo_id: str, dataset: Dataset, oauth_token: Union[OAuthToken, None]
) -> Dataset:
try:
new_dataset = load_dataset(
repo_id,
split="train",
download_mode="force_redownload",
token=oauth_token.token,
)
return concatenate_datasets([dataset, new_dataset])
except Exception:
return dataset
def show_success_message(org_name: str, repo_name: str) -> gr.Markdown:
client = get_argilla_client()
if client is None:
return gr.Markdown(
value=f"""
<div style="padding: 1em; background-color: var(--block-background-fill); border-color: var(--border-color-primary); border-width: 1px; border-radius: 5px;">
<h3 style="color: #2e7d32; margin: 0;">Dataset Published Successfully!</h3>
<p style="margin-top: 0.5em;">
The generated dataset is in the right format for fine-tuning with TRL, AutoTrain, or other frameworks.
<div style="display: flex; gap: 10px;">
<a href="https://huggingface.co/datasets/{org_name}/{repo_name}" target="_blank" class="lg primary svelte-1137axg" style="color: white !important; margin-top: 0.5em; text-decoration: none;">
Open in Hugging Face
</a>
</div>
</p>
<p style="margin-top: 1em; color: var(--block-title-text-color)">
By configuring an `ARGILLA_API_URL` and `ARGILLA_API_KEY` you can curate the dataset in Argilla.
Unfamiliar with Argilla? Here are some docs to help you get started:
<br>• <a href="https://docs.argilla.io/latest/getting_started/quickstart/" target="_blank">How to get started with Argilla</a>
<br>• <a href="https://docs.argilla.io/latest/how_to_guides/annotate/" target="_blank">How to curate data in Argilla</a>
<br>• <a href="https://docs.argilla.io/latest/how_to_guides/import_export/" target="_blank">How to export data once you have reviewed the dataset</a>
</p>
</div>
""",
visible=True,
height=None,
min_height=None,
max_height=None,
)
argilla_api_url = client.api_url
# Transform Docker internal URL to localhost if needed
if "argilla:" in argilla_api_url:
argilla_api_url = argilla_api_url.replace("argilla:", "127.0.0.1:")
return gr.Markdown(
value=f"""
<div style="padding: 1em; background-color: var(--block-background-fill); border-color: var(--border-color-primary); border-width: 1px; border-radius: 5px;">
<h3 style="color: #2e7d32; margin: 0;">Dataset Published Successfully!</h3>
<p style="margin-top: 0.5em;">
The generated dataset is <a href="https://huggingface.co/datasets/{org_name}/{repo_name}" target="_blank">available in the Hub</a>. It is in the right format for fine-tuning with TRL, AutoTrain, or other frameworks.
<div style="display: flex; gap: 10px;">
<a href="{argilla_api_url}" target="_blank" class="lg primary svelte-1137axg" style="color: white !important; margin-top: 0.5em; text-decoration: none;">
Open in Argilla
</a>
</div>
</p>
<p style="margin-top: 1em; color: var(--block-title-text-color)">
Unfamiliar with Argilla? Here are some docs to help you get started:
<br>• <a href="https://docs.argilla.io/latest/how_to_guides/annotate/" target="_blank">How to curate data in Argilla</a>
<br>• <a href="https://docs.argilla.io/latest/how_to_guides/import_export/" target="_blank">How to export data once you have reviewed the dataset</a>
</p>
</div>
""",
visible=True,
height=None,
min_height=None,
max_height=None,
)
def hide_success_message() -> gr.Markdown:
return gr.Markdown(value="", visible=True, height=100)
def test_max_num_rows(num_rows: int) -> int:
if num_rows > MAX_NUM_ROWS:
num_rows = MAX_NUM_ROWS
gr.Info(
f"Number of rows is larger than the configured maximum. Setting number of rows to {MAX_NUM_ROWS}. Set environment variable `MAX_NUM_ROWS` to change this behavior."
)
return num_rows
def get_iframe(hub_repo_id: str) -> str:
if not hub_repo_id:
return ""
if not repo_exists(repo_id=hub_repo_id, repo_type="dataset"):
return ""
url = f"https://huggingface.co/datasets/{hub_repo_id}/embed/viewer"
iframe = f"""
<iframe
src="{url}"
frameborder="0"
width="100%"
height="600px"
></iframe>
"""
return iframe
def _get_valid_columns(dataframe: pd.DataFrame):
doc_valid_columns = []
for col in dataframe.columns:
sample_val = dataframe[col].iloc[0]
if isinstance(sample_val, str):
doc_valid_columns.append(col)
return doc_valid_columns
def load_dataset_from_hub(
repo_id: str,
num_rows: int = 10,
token: Union[OAuthToken, None] = None,
progress=gr.Progress(track_tqdm=True),
):
if not repo_id:
raise gr.Error("Please provide a Hub repo ID")
subsets = get_dataset_config_names(repo_id, token=token)
splits = get_dataset_split_names(repo_id, subsets[0], token=token)
ds = load_dataset(repo_id, subsets[0], split=splits[0], token=token, streaming=True)
rows = []
for idx, row in enumerate(tqdm(ds, desc="Loading the dataset", total=num_rows)):
rows.append(row)
if idx == num_rows:
break
ds = Dataset.from_list(rows)
dataframe = ds.to_pandas()
doc_valid_columns = _get_valid_columns(dataframe)
col_doc = doc_valid_columns[0] if doc_valid_columns else ""
return (
dataframe,
gr.Dropdown(
choices=doc_valid_columns,
label="Documents column",
value=col_doc,
interactive=(False if col_doc == "" else True),
multiselect=False,
),
)
def preprocess_input_data(
file_paths: list[str], num_rows: int, progress=gr.Progress(track_tqdm=True)
):
if not file_paths:
raise gr.Error("Please provide an input file")
data = {}
total_chunks = 0
for file_path in tqdm(file_paths, desc="Processing files", total=len(file_paths)):
partitioned_file = partition(filename=file_path)
chunks = [str(chunk) for chunk in chunk_by_title(partitioned_file)]
data[file_path] = chunks
total_chunks += len(chunks)
if total_chunks >= num_rows:
break
dataframe = pd.DataFrame.from_records(
[(k, v) for k, values in data.items() for v in values],
columns=["filename", "chunks"],
)
col_doc = "chunks"
return (
dataframe,
gr.Dropdown(
choices=["chunks"],
label="Documents column",
value=col_doc,
interactive=(False if col_doc == "" else True),
multiselect=False,
),
)
|