anishka commited on
Commit
ddd15c1
1 Parent(s): 1174de2

Upload CodeSwitching-TE-EN.py

Browse files
Files changed (1) hide show
  1. CodeSwitching-TE-EN.py +55 -36
CodeSwitching-TE-EN.py CHANGED
@@ -48,18 +48,33 @@ class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
48
  "ner_tags": datasets.Sequence(
49
  datasets.features.ClassLabel(
50
  names=[
51
- "B-LOC",
52
- "B-MISC",
53
- "B-ORG",
54
- "B-PER",
55
- "I-LOC",
56
- "I-MISC",
57
- "I-ORG",
58
- "I-PER",
59
- "O"
 
 
 
 
 
 
 
 
 
60
  ]
61
  )
62
  ),
 
 
 
 
 
 
63
  }
64
  ),
65
  supervised_keys=None,
@@ -85,30 +100,34 @@ class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
85
  ]
86
 
87
  def _generate_examples(self, filepath):
88
- logger.info("⏳ Generating examples from = %s", filepath)
89
- with open(filepath, encoding="utf-8") as f:
90
- guid = 0
91
- tokens = []
92
- ner_tags = []
93
- for line in f:
94
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
95
- if tokens:
96
- yield guid, {
97
- "id": str(guid),
98
- "tokens": tokens,
99
- "ner_tags": ner_tags,
100
- }
101
- guid += 1
102
- tokens = []
103
- ner_tags = []
104
- else:
105
- # TeEnCodeSwitch tokens are space separated
106
- splits = line.split('\t')
107
- tokens.append(splits[0])
108
- ner_tags.append(splits[1].rstrip())
109
- # last example
110
- yield guid, {
111
- "id": str(guid),
112
- "tokens": tokens,
113
- "ner_tags": ner_tags,
114
- }
 
 
 
 
 
48
  "ner_tags": datasets.Sequence(
49
  datasets.features.ClassLabel(
50
  names=[
51
+ "NOUN",
52
+ "PUNCT",
53
+ "ADP",
54
+ "NUM",
55
+ "SYM",
56
+ "SCONJ",
57
+ "ADJ",
58
+ "PART",
59
+ "DET",
60
+ "CCONJ",
61
+ "PROPN",
62
+ "PRON",
63
+ "X",
64
+ "_",
65
+ "ADV",
66
+ "INTJ",
67
+ "VERB",
68
+ "AUX",
69
  ]
70
  )
71
  ),
72
+ "xpos": datasets.Sequence(datasets.Value("string")),
73
+ "feats": datasets.Sequence(datasets.Value("string")),
74
+ "head": datasets.Sequence(datasets.Value("string")),
75
+ "deprel": datasets.Sequence(datasets.Value("string")),
76
+ "deps": datasets.Sequence(datasets.Value("string")),
77
+ "misc": datasets.Sequence(datasets.Value("string")),
78
  }
79
  ),
80
  supervised_keys=None,
 
100
  ]
101
 
102
  def _generate_examples(self, filepath):
103
+ id = 0
104
+ for path in filepath:
105
+ with open(path, "r", encoding="utf-8") as data_file:
106
+ tokenlist = list(conllu.parse_incr(data_file))
107
+ for sent in tokenlist:
108
+ if "sent_id" in sent.metadata:
109
+ idx = sent.metadata["sent_id"]
110
+ else:
111
+ idx = id
112
+
113
+ tokens = [token["form"] for token in sent]
114
+
115
+ if "text" in sent.metadata:
116
+ txt = sent.metadata["text"]
117
+ else:
118
+ txt = " ".join(tokens)
119
+
120
+ yield id, {
121
+ "idx": str(idx),
122
+ "text": txt,
123
+ "tokens": [token["form"] for token in sent],
124
+ "lemmas": [token["lemma"] for token in sent],
125
+ "upos": [token["upos"] for token in sent],
126
+ "xpos": [token["xpos"] for token in sent],
127
+ "feats": [str(token["feats"]) for token in sent],
128
+ "head": [str(token["head"]) for token in sent],
129
+ "deprel": [str(token["deprel"]) for token in sent],
130
+ "deps": [str(token["deps"]) for token in sent],
131
+ "misc": [str(token["misc"]) for token in sent],
132
+ }
133
+ id += 1