system HF staff commited on
Commit
1b91f37
·
1 Parent(s): f83c122

Update files from the datasets library (from 1.7.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.7.0

Files changed (2) hide show
  1. README.md +5 -4
  2. senti_lex.py +3 -3
README.md CHANGED
@@ -60,7 +60,7 @@ languages:
60
  - mt
61
  - nl
62
  - nn
63
- - "no"
64
  - pl
65
  - pt
66
  - rm
@@ -264,6 +264,7 @@ task_categories:
264
  - text-classification
265
  task_ids:
266
  - sentiment-classification
 
267
  ---
268
 
269
  # Dataset Card for SentiWS
@@ -271,12 +272,12 @@ task_ids:
271
  ## Table of Contents
272
  - [Dataset Description](#dataset-description)
273
  - [Dataset Summary](#dataset-summary)
274
- - [Supported Tasks](#supported-tasks-and-leaderboards)
275
  - [Languages](#languages)
276
  - [Dataset Structure](#dataset-structure)
277
  - [Data Instances](#data-instances)
278
- - [Data Fields](#data-instances)
279
- - [Data Splits](#data-instances)
280
  - [Dataset Creation](#dataset-creation)
281
  - [Curation Rationale](#curation-rationale)
282
  - [Source Data](#source-data)
 
60
  - mt
61
  - nl
62
  - nn
63
+ - 'no'
64
  - pl
65
  - pt
66
  - rm
 
264
  - text-classification
265
  task_ids:
266
  - sentiment-classification
267
+ paperswithcode_id: null
268
  ---
269
 
270
  # Dataset Card for SentiWS
 
272
  ## Table of Contents
273
  - [Dataset Description](#dataset-description)
274
  - [Dataset Summary](#dataset-summary)
275
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
276
  - [Languages](#languages)
277
  - [Dataset Structure](#dataset-structure)
278
  - [Data Instances](#data-instances)
279
+ - [Data Fields](#data-fields)
280
+ - [Data Splits](#data-splits)
281
  - [Dataset Creation](#dataset-creation)
282
  - [Curation Rationale](#curation-rationale)
283
  - [Source Data](#source-data)
senti_lex.py CHANGED
@@ -204,19 +204,19 @@ class SentiLex(datasets.GeneratorBasedBuilder):
204
  os.path.join(data_dir, "sentiment-lexicons", "positive_words_" + self.config.name + ".txt"),
205
  ]
206
 
207
- for filepath in filepaths:
208
 
209
  with open(filepath, encoding="utf-8") as f:
210
 
211
  for id_, line in enumerate(f):
212
 
213
  if "negative" in filepath:
214
- yield id_, {
215
  "word": line.strip(" \n"),
216
  "sentiment": "negative",
217
  }
218
  elif "positive" in filepath:
219
- yield id_, {
220
  "word": line.strip(" \n"),
221
  "sentiment": "positive",
222
  }
 
204
  os.path.join(data_dir, "sentiment-lexicons", "positive_words_" + self.config.name + ".txt"),
205
  ]
206
 
207
+ for file_idx, filepath in enumerate(filepaths):
208
 
209
  with open(filepath, encoding="utf-8") as f:
210
 
211
  for id_, line in enumerate(f):
212
 
213
  if "negative" in filepath:
214
+ yield f"{file_idx}_{id_}", {
215
  "word": line.strip(" \n"),
216
  "sentiment": "negative",
217
  }
218
  elif "positive" in filepath:
219
+ yield f"{file_idx}_{id_}", {
220
  "word": line.strip(" \n"),
221
  "sentiment": "positive",
222
  }