asahi417 commited on
Commit
f88c143
·
1 Parent(s): 50f0349
experiments/huggingface_ops.py CHANGED
@@ -2,8 +2,10 @@ from huggingface_hub import HfApi, ModelFilter
2
  from pprint import pprint
3
 
4
  api = HfApi()
5
- models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
6
- models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
7
-
8
- pprint(sorted([i for i in models_filtered if i.endswith('twitter-roberta-base-2019-90m')]))
9
- pprint(sorted([i for i in models_filtered if i.endswith('twitter-roberta-base-dec2020')]))
 
 
 
2
  from pprint import pprint
3
 
4
  api = HfApi()
5
+ models = api.list_models(filter=ModelFilter(author='vocabtrimmer'))
6
+ models_filtered = [i.modelId for i in models if 'qg-' in i.modelId]
7
+ pprint(sorted(models_filtered))
8
+ # models = api.list_models(filter=ModelFilter(author='tweettemposhift'))
9
+ # models_filtered = [i.modelId for i in models if 'topic-' in i.modelId]
10
+ # pprint(sorted([i for i in models_filtered if i.endswith('twitter-roberta-base-2019-90m')]))
11
+ # pprint(sorted([i for i in models_filtered if i.endswith('twitter-roberta-base-dec2020')]))
experiments/model_finetuning_ner.py CHANGED
@@ -84,22 +84,6 @@ def main(
84
 
85
  tokenizer = AutoTokenizer.from_pretrained(model, add_prefix_space=True, use_fast=True)
86
 
87
- def align_labels_with_tokens(labels, word_ids):
88
- new_labels = []
89
- current_word = None
90
- for word_id in word_ids:
91
- if word_id is None:
92
- label = -100
93
- elif word_id != current_word:
94
- current_word = word_id
95
- label = labels[word_id]
96
- else:
97
- label = labels[word_id]
98
- if ID2LABEL[label].startswith("B-"): # If the label is B-XXX we change it to I-XXX
99
- label = LABEL2ID[ID2LABEL[label].replace("B-", "I-")]
100
- new_labels.append(label)
101
- return new_labels
102
-
103
  def tokenize_and_align_labels(examples):
104
  tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
105
  tokenized_inputs = tokenizer(
@@ -110,11 +94,6 @@ def main(
110
  max_length=128)
111
  all_labels = examples["gold_label_sequence"]
112
  new_labels = []
113
- # if tokenizer.is_fast:
114
- # for ind, labels in enumerate(all_labels):
115
- # word_ids = tokenized_inputs.word_ids(ind)
116
- # new_labels.append(align_labels_with_tokens(labels, word_ids))
117
- # else:
118
  for token, label in zip(tokens, all_labels):
119
  tmp_labels = [-100]
120
  for to, la in zip(token, label):
@@ -138,8 +117,8 @@ def main(
138
  predictions = np.argmax(logits, axis=-1)
139
  labels_new, predictions_new = [], []
140
  for label, prediction in zip(labels, predictions):
141
- label = [la for la in label if la != -100]
142
  prediction = [p for la, p in zip(label, prediction) if la != -100]
 
143
  labels_new.append([ID2LABEL[la] for la in label])
144
  predictions_new.append([ID2LABEL[p] for p in prediction])
145
  return seqeval.compute(predictions=predictions_new, references=labels_new)
 
84
 
85
  tokenizer = AutoTokenizer.from_pretrained(model, add_prefix_space=True, use_fast=True)
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  def tokenize_and_align_labels(examples):
88
  tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
89
  tokenized_inputs = tokenizer(
 
94
  max_length=128)
95
  all_labels = examples["gold_label_sequence"]
96
  new_labels = []
 
 
 
 
 
97
  for token, label in zip(tokens, all_labels):
98
  tmp_labels = [-100]
99
  for to, la in zip(token, label):
 
117
  predictions = np.argmax(logits, axis=-1)
118
  labels_new, predictions_new = [], []
119
  for label, prediction in zip(labels, predictions):
 
120
  prediction = [p for la, p in zip(label, prediction) if la != -100]
121
+ label = [la for la in label if la != -100]
122
  labels_new.append([ID2LABEL[la] for la in label])
123
  predictions_new.append([ID2LABEL[p] for p in prediction])
124
  return seqeval.compute(predictions=predictions_new, references=labels_new)