Spaces:
Sleeping
Sleeping
File size: 6,321 Bytes
19b0f3b 65e7bd1 19b0f3b 65e7bd1 19b0f3b 92db551 9ec4feb 92db551 19b0f3b 92db551 9ec4feb 92db551 19b0f3b 92db551 19b0f3b 92db551 19b0f3b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
# data_uploader.py
import streamlit as st
import pandas as pd
import numpy as np
from PIL import Image
import torch
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from datasets import Dataset as HFDataset, DatasetDict
from huggingface_hub import HfApi # For Hugging Face Hub interaction
import os
# Hugging Face Hub credentials
HF_TOKEN = os.getenv("HF_TOKEN")
REPO_ID = "louiecerv/american_sign_language" # Replace with your dataset repo name
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
st.write(f"Enabled GPU = {torch.cuda.is_available()}")
class MyDataset(Dataset):
def __init__(self, x_df, y_df):
self.xs = torch.tensor(x_df, dtype=torch.float32).to(device) # Explicitly set dtype
self.ys = torch.tensor(y_df, dtype=torch.long).to(device) # Explicitly set dtype
def __getitem__(self, idx):
x = self.xs[idx]
y = self.ys[idx]
return x, y
def __len__(self):
return len(self.xs)
# Load the dataset and convert to Hugging Face Dataset
def load_and_convert_to_hf_dataset(x, y, split="train"):
df = pd.DataFrame({"image": list(x), "label": y}) # Create a DataFrame
hf_dataset = HFDataset.from_pandas(df)
# Preprocess images (Important for Hugging Face)
def preprocess_function(examples):
images = [np.array(img).reshape(28, 28) for img in examples["image"]] #Reshape the image
# Convert to PIL images and apply transformations
transformed_images = []
for image in images:
image = Image.fromarray(image.astype('uint8'))
transform = transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
transformed_image = transform(image)
transformed_images.append(transformed_image)
examples["pixel_values"] = torch.stack(transformed_images) #Stack the images into a tensor
return examples
hf_dataset = hf_dataset.map(preprocess_function, batched=True, remove_columns=["image"])
hf_dataset.set_format("torch") # Set format to PyTorch
return hf_dataset
def upload_dataset_to_hub(dataset, repo_id):
api = HfApi(token=HF_TOKEN)
api.create_repo(repo_id, repo_type="dataset", exist_ok=True) # Create repo if it doesn't exist
dataset.push_to_hub(repo_id)
print(f"Dataset uploaded to {repo_id}")
def main():
st.title("American Sign Language Dataset Uploader")
about = """
## About This App
This app is designed to load, preprocess, and upload datasets to the Hugging Face Hub. The main functionalities are encapsulated in the following components:
### Custom Dataset Class
The `MyDataset` class inherits from `torch.utils.data.Dataset` and is used to handle the dataset.
- **Initialization (`__init__`)**:
- Converts input dataframes `x_df` and `y_df` to PyTorch tensors with explicit data types (`float32` for features and `long` for labels).
- Moves the tensors to the specified device (e.g., GPU).
- **Get Item (`__getitem__`)**:
- Retrieves the feature (`x`) and label (`y`) tensors at a given index `idx`.
- **Length (`__len__`)**:
- Returns the length of the dataset.
### Load and Convert to Hugging Face Dataset
The `load_and_convert_to_hf_dataset` function converts input data into a Hugging Face dataset.
- **DataFrame Creation**:
- Creates a Pandas DataFrame from the input features (`x`) and labels (`y`).
- **Preprocessing Function**:
- Reshapes images to 28x28 pixels.
- Converts images to PIL format and applies transformations (grayscale, resize, tensor conversion, and normalization).
- Stacks the transformed images into a tensor.
- **Dataset Mapping**:
- Applies the preprocessing function to the dataset.
- Sets the dataset format to PyTorch.
### Data Loading and Conversion
The app loads training and validation data from CSV files and converts them into Hugging Face datasets.
- **Training Data**:
- Loads data from `sign_mnist_train.csv`.
- Separates features and labels.
- Converts to a Hugging Face dataset.
- **Validation Data**:
- Loads data from `sign_mnist_valid.csv`.
- Separates features and labels.
- Converts to a Hugging Face dataset.
### Upload Dataset to Hugging Face Hub
The `upload_dataset_to_hub` function uploads the dataset to the Hugging Face Hub.
- **Repository Creation**:
- Creates a repository if it doesn't exist.
- **Dataset Upload**:
- Pushes the dataset to the specified repository.
### Main Function
The `main` function orchestrates the entire process.
- Loads and preprocesses training and validation data.
- Creates a `DatasetDict` containing both datasets.
- Uploads the dataset to the Hugging Face Hub.
### Execution
The script is executed by calling the `main` function if the script is run as the main module.
```python
if __name__ == "__main__":
main()"""
with st.expander("About", expanded=True):
st.write (about)
st.write("## Instructions")
st.write("Do not run this code on Huggingface. Download the code and run it on your local machine.")
st.write("Make sure you have the required files in the data/asl_data folder.")
st.stop()
try:
# Load and convert dataframes to Hugging Face datasets
train_df = pd.read_csv("data/asl_data/sign_mnist_train.csv")
y_train = train_df.pop('label').values
x_train = train_df.values
valid_df = pd.read_csv("data/asl_data/sign_mnist_valid.csv")
y_valid = valid_df.pop('label').values
x_valid = valid_df.values
train_dataset = load_and_convert_to_hf_dataset(x_train, y_train, "train")
valid_dataset = load_and_convert_to_hf_dataset(x_valid, y_valid, "validation")
# Create a DatasetDict
full_dataset = DatasetDict({
"train": train_dataset,
"validation": valid_dataset
})
upload_dataset_to_hub(full_dataset, REPO_ID) # Upload the DatasetDict
st.write("Data upload complete.")
except Exception as e:
st.error(f"An error occurred: {e}")
if __name__ == "__main__":
main() |