chrisjay commited on
Commit
587a066
·
1 Parent(s): b6c30f8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -9
README.md CHANGED
@@ -22,9 +22,9 @@ model-index:
22
  type: automatic-speech-recognition
23
 
24
  dataset:
25
- -name: fon
26
- -type: fon_dataset
27
- -args: fon
28
 
29
  metrics:
30
  - name: Test WER
@@ -139,7 +139,7 @@ for root, dirs, files in os.walk(test_path):
139
  test_dataset= load_dataset("json", data_files=[os.path.join(root,i) for i in files],split="train")
140
 
141
  #Remove unnecessary chars
142
- chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\�]'
143
  def remove_special_characters(batch):
144
  batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " "
145
  return batch
@@ -155,15 +155,15 @@ model = Wav2Vec2ForCTC.from_pretrained("chrisjay/wav2vec2-large-xlsr-53-fon")
155
  # Preprocessing the datasets.
156
  # We need to read the audio files as arrays
157
  def speech_file_to_array_fn(batch):
158
- \\\\\\\\\\\\\\\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
159
- \\\\\\\\\\\\\\\\tbatch["speech"]=speech_array.squeeze().numpy()
160
- \\\\\\\\\\\\\\\\treturn batch
161
 
162
  test_dataset = test_dataset.map(speech_file_to_array_fn)
163
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
164
 
165
  with torch.no_grad():
166
- \\\\\\\\\\\\\\\\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
167
 
168
  predicted_ids = torch.argmax(logits, dim=-1)
169
 
@@ -186,7 +186,7 @@ import re
186
  for root, dirs, files in os.walk(test_path):
187
  test_dataset = load_dataset("json", data_files=[os.path.join(root,i) for i in files],split="train")
188
 
189
- chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\�]'
190
  def remove_special_characters(batch):
191
  batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " "
192
  return batch
 
22
  type: automatic-speech-recognition
23
 
24
  dataset:
25
+ - name: fon
26
+ - type: fon_dataset
27
+ - args: fon
28
 
29
  metrics:
30
  - name: Test WER
 
139
  test_dataset= load_dataset("json", data_files=[os.path.join(root,i) for i in files],split="train")
140
 
141
  #Remove unnecessary chars
142
+ chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\�]'
143
  def remove_special_characters(batch):
144
  batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " "
145
  return batch
 
155
  # Preprocessing the datasets.
156
  # We need to read the audio files as arrays
157
  def speech_file_to_array_fn(batch):
158
+ \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
159
+ \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\tbatch["speech"]=speech_array.squeeze().numpy()
160
+ \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\treturn batch
161
 
162
  test_dataset = test_dataset.map(speech_file_to_array_fn)
163
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
164
 
165
  with torch.no_grad():
166
+ \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
167
 
168
  predicted_ids = torch.argmax(logits, dim=-1)
169
 
 
186
  for root, dirs, files in os.walk(test_path):
187
  test_dataset = load_dataset("json", data_files=[os.path.join(root,i) for i in files],split="train")
188
 
189
+ chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\�]'
190
  def remove_special_characters(batch):
191
  batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " "
192
  return batch