Josh98 commited on
Commit
d488842
1 Parent(s): fba9e95

revert back to nl2bash

Browse files
Files changed (1) hide show
  1. nl2bash_m.py +24 -30
nl2bash_m.py CHANGED
@@ -107,45 +107,39 @@ class nl2bash_m(evaluate.Metric):
107
  predictions = np.char.translate(predictions, table=repl_table)
108
  references = np.char.translate(references, table=repl_table)
109
 
110
-
111
  final_score = 0
112
- for pred, refs in zip(predictions, references):
113
  best_score = 0
114
- for ref in refs:
115
- if pred == ref:
116
- best_score = 1
117
- final_score += best_score
118
-
119
- # if len(pred) == 0 and min([len(ref) for ref in refs]) == 0:
120
- # score = 1
121
- # elif len(pred) == 0 or min([len(ref) for ref in refs]) == 0:
122
- # score = 0
123
- # else:
124
- # best_score = 0
125
- # for ref in refs:
126
- # pred_words, ref_words = pred.split(), ref.split()
127
 
128
 
129
- # # Get the cmd of predicted and ref
130
- # cmd_corr = 1 if pred_words.pop(0)==ref_words.pop(0) else 0
131
 
132
- # # Get the option of predicted and ref
133
- # pred_option = [ x for x in pred_words if x[0] == '-']
134
- # ref_option = [ x for x in ref_words if x[0] == '-']
135
 
136
- # # Get the arguments of predicted and ref
137
- # pred_args = [ x for x in pred_words if x[0] != '-']
138
- # ref_args = [ x for x in ref_words if x[0] != '-']
139
 
140
- # # Calculate scores
141
- # cmd_score = cmd_weight * cmd_corr
142
- # opt_score = opt_weight * self.get_score(pred_option, ref_option)
143
- # arg_score = arg_weight * self.get_score(pred_args, ref_args)
144
 
145
- # score = cmd_score + opt_score + arg_score
146
- # best_score = max(best_score, score)
147
 
148
- # final_score += best_score
149
 
150
  final_score = final_score/len(predictions)
151
 
 
107
  predictions = np.char.translate(predictions, table=repl_table)
108
  references = np.char.translate(references, table=repl_table)
109
 
110
+
111
  final_score = 0
112
+ for pred, refs in zip(predictions, references):
113
  best_score = 0
114
+ if len(pred) == 0 and min([len(ref) for ref in refs]) == 0:
115
+ best_score = 1
116
+ elif len(pred) == 0 or min([len(ref) for ref in refs]) == 0:
117
+ best_score = 0
118
+ else:
119
+ for ref in refs:
120
+ pred_words, ref_words = pred.split(), ref.split()
 
 
 
 
 
 
121
 
122
 
123
+ # Get the cmd of predicted and ref
124
+ cmd_corr = 1 if pred_words.pop(0)==ref_words.pop(0) else 0
125
 
126
+ # Get the option of predicted and ref
127
+ pred_option = [ x for x in pred_words if x[0] == '-']
128
+ ref_option = [ x for x in ref_words if x[0] == '-']
129
 
130
+ # Get the arguments of predicted and ref
131
+ pred_args = [ x for x in pred_words if x[0] != '-']
132
+ ref_args = [ x for x in ref_words if x[0] != '-']
133
 
134
+ # Calculate scores
135
+ cmd_score = cmd_weight * cmd_corr
136
+ opt_score = opt_weight * self.get_score(pred_option, ref_option)
137
+ arg_score = arg_weight * self.get_score(pred_args, ref_args)
138
 
139
+ score = cmd_score + opt_score + arg_score
140
+ best_score = max(best_score, score)
141
 
142
+ final_score += best_score
143
 
144
  final_score = final_score/len(predictions)
145