pavan-naik commited on
Commit
6ee4163
·
verified ·
1 Parent(s): e3488eb

Update dataset usage

Browse files
Files changed (1) hide show
  1. README.md +16 -6
README.md CHANGED
@@ -54,10 +54,22 @@ While `kn` translations are available in the NLLB dataset, their quality is poor
54
  ```python
55
  import pandas as pd
56
 
57
- # Load individual splits
58
- train_df = pd.read_csv('train.csv')
59
- test_df = pd.read_csv('test.csv')
60
- valid_df = pd.read_csv('validation.csv')
 
 
 
 
 
 
 
 
 
 
 
 
61
  ```
62
 
63
  ### Using HuggingFace 🤗 Datasets
@@ -72,8 +84,6 @@ train_data = dataset["train"]
72
  test_data = dataset["test"]
73
  validation_data = dataset["validation"]
74
 
75
- # Example usage
76
- print(train_data[0]) # Print first example
77
  ```
78
 
79
 
 
54
  ```python
55
  import pandas as pd
56
 
57
+ splits = {
58
+ 'train': 'data/train-00000-of-00001.parquet',
59
+ 'validation': 'data/validation-00000-of-00001.parquet',
60
+ 'test': 'data/test-00000-of-00001.parquet'
61
+ }
62
+
63
+ # Load all splits into DataFrames
64
+ dataframes = {}
65
+ for split, path in splits.items():
66
+ dataframes[split] = pd.read_parquet(f"hf://datasets/pavan-naik/mt-nllb-en-kn/{path}")
67
+
68
+ # Access individual splits
69
+ train_data = dataframes['train']
70
+ test_data = dataframes['test']
71
+ validation_data = dataframes['validation']
72
+
73
  ```
74
 
75
  ### Using HuggingFace 🤗 Datasets
 
84
  test_data = dataset["test"]
85
  validation_data = dataset["validation"]
86
 
 
 
87
  ```
88
 
89