xixu-me commited on
Commit
26af90a
·
verified ·
1 Parent(s): f7aaf2b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -85
README.md CHANGED
@@ -261,9 +261,6 @@ with tempfile.TemporaryDirectory() as temp_dir:
261
  from collections import Counter
262
  import matplotlib.pyplot as plt
263
 
264
- # Load dataset
265
- dataset = load_dataset("xixu-me/fsl-product-classification")["train"]
266
-
267
  # Analyze class distribution
268
  class_counts = Counter(dataset['class_name'])
269
  print(f"Number of classes: {len(class_counts)}")
@@ -314,7 +311,6 @@ import random
314
  from collections import defaultdict
315
  import torch
316
  from torch.utils.data import DataLoader
317
- from datasets import load_dataset
318
 
319
  def create_few_shot_split(dataset, n_way=5, k_shot=5, n_query=15, seed=None):
320
  """
@@ -369,9 +365,6 @@ def create_few_shot_split(dataset, n_way=5, k_shot=5, n_query=15, seed=None):
369
 
370
  return support_set, query_set
371
 
372
- # Example usage
373
- dataset = load_dataset("xixu-me/fsl-product-classification")["train"]
374
-
375
  # Create a 5-way 5-shot episode
376
  support_set, query_set = create_few_shot_split(dataset, n_way=5, k_shot=5, n_query=15)
377
 
@@ -480,8 +473,7 @@ transform = transforms.Compose([
480
  ])
481
 
482
  # Load dataset
483
- hf_dataset = load_dataset("xixu-me/fsl-product-classification")["train"]
484
- fsl_dataset = FSLProductDataset(hf_dataset, transform=transform)
485
 
486
  # Create episode data
487
  support_data, query_data = fsl_dataset.create_episode_dataloader(
@@ -752,7 +744,6 @@ def create_cross_domain_split(dataset, train_ratio=0.6, val_ratio=0.2, test_rati
752
  }
753
 
754
  # Create cross-domain splits
755
- dataset = load_dataset("xixu-me/fsl-product-classification")["train"]
756
  splits = create_cross_domain_split(dataset)
757
 
758
  print(f"Train classes: {len(splits['train']['classes'])}")
@@ -988,60 +979,6 @@ def create_data_splits(dataset, split_ratios={'train': 0.8, 'test': 0.2},
988
  print(f"{split_name.capitalize()} split: {n_samples} samples, {n_classes} classes")
989
 
990
  return splits
991
-
992
- # Example usage of utility functions
993
- def analyze_dataset(dataset_path="data.tzst"):
994
- """
995
- Complete dataset analysis workflow
996
- """
997
- print("🔍 Analyzing FSL Product Classification Dataset")
998
- print("=" * 50)
999
-
1000
- # 1. Verify dataset integrity
1001
- print("\n1. Verifying dataset integrity...")
1002
- is_valid = verify_dataset_integrity(dataset_path)
1003
-
1004
- if not is_valid:
1005
- return
1006
-
1007
- # 2. Load dataset
1008
- print("\n2. Loading dataset...")
1009
- try:
1010
- dataset = load_dataset("xixu-me/fsl-product-classification")["train"]
1011
- print(f"✅ Successfully loaded dataset with {len(dataset)} samples")
1012
- except Exception as e:
1013
- print(f"❌ Error loading dataset: {e}")
1014
- return
1015
-
1016
- # 3. Generate statistics
1017
- print("\n3. Generating statistics...")
1018
- stats = dataset_statistics(dataset)
1019
-
1020
- print(f"📊 Dataset Statistics:")
1021
- print(f" Total samples: {stats['total_samples']:,}")
1022
- print(f" Total classes: {stats['total_classes']:,}")
1023
- print(f" Avg samples per class: {stats['avg_samples_per_class']:.1f}")
1024
- print(f" Min samples per class: {stats['min_samples_per_class']}")
1025
- print(f" Max samples per class: {stats['max_samples_per_class']}")
1026
- print(f" Std samples per class: {stats['std_samples_per_class']:.1f}")
1027
-
1028
- # 4. Plot distributions
1029
- print("\n4. Plotting class distribution...")
1030
- plot_class_distribution(dataset, top_k=30)
1031
-
1032
- # 5. Save dataset info
1033
- print("\n5. Saving dataset information...")
1034
- save_dataset_info(dataset)
1035
-
1036
- # 6. Create splits
1037
- print("\n6. Creating data splits...")
1038
- splits = create_data_splits(dataset, strategy='stratified')
1039
-
1040
- print("\n✅ Dataset analysis complete!")
1041
- return dataset, stats, splits
1042
-
1043
- # Run analysis
1044
- # dataset, stats, splits = analyze_dataset()
1045
  ```
1046
 
1047
  ## Troubleshooting
@@ -1069,26 +1006,7 @@ from tzst import extract_archive
1069
  extract_archive("data.tzst", "output/", streaming=True)
1070
  ```
1071
 
1072
- #### 2. Memory Issues with Large Dataset
1073
-
1074
- **Problem**: Out of memory when loading the full dataset
1075
-
1076
- **Solution**:
1077
-
1078
- ```python
1079
- # Use streaming dataset
1080
- from datasets import load_dataset
1081
- dataset = load_dataset("xixu-me/fsl-product-classification", streaming=True)
1082
-
1083
- # Or load in chunks
1084
- def load_dataset_chunked(chunk_size=1000):
1085
- dataset = load_dataset("xixu-me/fsl-product-classification")["train"]
1086
- for i in range(0, len(dataset), chunk_size):
1087
- chunk = dataset.select(range(i, min(i + chunk_size, len(dataset))))
1088
- yield chunk
1089
- ```
1090
-
1091
- #### 3. Non-continuous Class Labels
1092
 
1093
  **Problem**: Class labels are not continuous (0, 1, 2, ...)
1094
 
@@ -1107,7 +1025,7 @@ def map_labels(example):
1107
  dataset = dataset.map(map_labels)
1108
  ```
1109
 
1110
- #### 4. CUDA/GPU Issues
1111
 
1112
  **Problem**: CUDA out of memory during training
1113
 
 
261
  from collections import Counter
262
  import matplotlib.pyplot as plt
263
 
 
 
 
264
  # Analyze class distribution
265
  class_counts = Counter(dataset['class_name'])
266
  print(f"Number of classes: {len(class_counts)}")
 
311
  from collections import defaultdict
312
  import torch
313
  from torch.utils.data import DataLoader
 
314
 
315
  def create_few_shot_split(dataset, n_way=5, k_shot=5, n_query=15, seed=None):
316
  """
 
365
 
366
  return support_set, query_set
367
 
 
 
 
368
  # Create a 5-way 5-shot episode
369
  support_set, query_set = create_few_shot_split(dataset, n_way=5, k_shot=5, n_query=15)
370
 
 
473
  ])
474
 
475
  # Load dataset
476
+ fsl_dataset = FSLProductDataset(dataset, transform=transform)
 
477
 
478
  # Create episode data
479
  support_data, query_data = fsl_dataset.create_episode_dataloader(
 
744
  }
745
 
746
  # Create cross-domain splits
 
747
  splits = create_cross_domain_split(dataset)
748
 
749
  print(f"Train classes: {len(splits['train']['classes'])}")
 
979
  print(f"{split_name.capitalize()} split: {n_samples} samples, {n_classes} classes")
980
 
981
  return splits
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
982
  ```
983
 
984
  ## Troubleshooting
 
1006
  extract_archive("data.tzst", "output/", streaming=True)
1007
  ```
1008
 
1009
+ #### 2. Non-continuous Class Labels
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010
 
1011
  **Problem**: Class labels are not continuous (0, 1, 2, ...)
1012
 
 
1025
  dataset = dataset.map(map_labels)
1026
  ```
1027
 
1028
+ #### 3. CUDA/GPU Issues
1029
 
1030
  **Problem**: CUDA out of memory during training
1031