Josh98 commited on
Commit
fba9e95
1 Parent(s): 34bee80

temp update to exact_match

Browse files
Files changed (1) hide show
  1. nl2bash_m.py +30 -25
nl2bash_m.py CHANGED
@@ -91,8 +91,8 @@ class nl2bash_m(evaluate.Metric):
91
  cmd_weight = 0.65,
92
  opt_weight = 0.25,
93
  arg_weight = 0.15,
94
- ignore_case=False,
95
- ignore_numbers=False,
96
  ):
97
 
98
  predictions = np.asarray(predictions)
@@ -110,37 +110,42 @@ class nl2bash_m(evaluate.Metric):
110
 
111
  final_score = 0
112
  for pred, refs in zip(predictions, references):
 
 
 
 
 
113
 
114
- if len(pred) == 0 and min([len(ref) for ref in refs]) == 0:
115
- score = 1
116
- elif len(pred) == 0 or min([len(ref) for ref in refs]) == 0:
117
- score = 0
118
- else:
119
- best_score = 0
120
- for ref in refs:
121
- pred_words, ref_words = pred.split(), ref.split()
122
 
123
 
124
- # Get the cmd of predicted and ref
125
- cmd_corr = 1 if pred_words.pop(0)==ref_words.pop(0) else 0
126
 
127
- # Get the option of predicted and ref
128
- pred_option = [ x for x in pred_words if x[0] == '-']
129
- ref_option = [ x for x in ref_words if x[0] == '-']
130
 
131
- # Get the arguments of predicted and ref
132
- pred_args = [ x for x in pred_words if x[0] != '-']
133
- ref_args = [ x for x in ref_words if x[0] != '-']
134
 
135
- # Calculate scores
136
- cmd_score = cmd_weight * cmd_corr
137
- opt_score = opt_weight * self.get_score(pred_option, ref_option)
138
- arg_score = arg_weight * self.get_score(pred_args, ref_args)
139
 
140
- score = cmd_score + opt_score + arg_score
141
- best_score = max(best_score, score)
142
 
143
- final_score += best_score
144
 
145
  final_score = final_score/len(predictions)
146
 
 
91
  cmd_weight = 0.65,
92
  opt_weight = 0.25,
93
  arg_weight = 0.15,
94
+ ignore_case=True,
95
+ ignore_numbers=True,
96
  ):
97
 
98
  predictions = np.asarray(predictions)
 
110
 
111
  final_score = 0
112
  for pred, refs in zip(predictions, references):
113
+ best_score = 0
114
+ for ref in refs:
115
+ if pred == ref:
116
+ best_score = 1
117
+ final_score += best_score
118
 
119
+ # if len(pred) == 0 and min([len(ref) for ref in refs]) == 0:
120
+ # score = 1
121
+ # elif len(pred) == 0 or min([len(ref) for ref in refs]) == 0:
122
+ # score = 0
123
+ # else:
124
+ # best_score = 0
125
+ # for ref in refs:
126
+ # pred_words, ref_words = pred.split(), ref.split()
127
 
128
 
129
+ # # Get the cmd of predicted and ref
130
+ # cmd_corr = 1 if pred_words.pop(0)==ref_words.pop(0) else 0
131
 
132
+ # # Get the option of predicted and ref
133
+ # pred_option = [ x for x in pred_words if x[0] == '-']
134
+ # ref_option = [ x for x in ref_words if x[0] == '-']
135
 
136
+ # # Get the arguments of predicted and ref
137
+ # pred_args = [ x for x in pred_words if x[0] != '-']
138
+ # ref_args = [ x for x in ref_words if x[0] != '-']
139
 
140
+ # # Calculate scores
141
+ # cmd_score = cmd_weight * cmd_corr
142
+ # opt_score = opt_weight * self.get_score(pred_option, ref_option)
143
+ # arg_score = arg_weight * self.get_score(pred_args, ref_args)
144
 
145
+ # score = cmd_score + opt_score + arg_score
146
+ # best_score = max(best_score, score)
147
 
148
+ # final_score += best_score
149
 
150
  final_score = final_score/len(predictions)
151