text
stringlengths
0
15.3k
arr = []
if arr:
yield arr
class MultiChoice:
def __init__(self, choices) -> None:
self.choices = choices
def __contains__(self, values) -> bool:
for value in values.split(','):
if len(fnmatch.filter(self.choices, value)) == 0:
eval_logger.info('Available tasks to choose:')
for choice in self.choices:
eval_logger.info(f' - {choice}')
raise ValueError("'{}' is not in task list".format(value))
return True
def __iter__(self) -> Iterator:
for choice in self.choices:
yield choice
class Grouper:
def __init__(self, arr, fn) -> None:
self.size = len(arr)
arr = list(enumerate(arr))
def group_return_dict(arr, fn):
res = collections.defaultdict(list)
for ob in arr:
res[fn(ob)].append(ob)
return res
arr = group_return_dict(arr, lambda x: fn(x[1]))
self.arr = arr
self._grouped = None
def get_grouped(self):
if self._grouped:
return self._grouped
grouped = {}
for key in self.arr.keys():
grouped[key] = [y[1] for y in self.arr[key]]
self._grouped = grouped
return grouped
def get_original(self, grouped_dict):
res = [None] * self.size
cov = [False] * self.size
assert grouped_dict.keys() == self.arr.keys()
for key in grouped_dict.keys():
for ((ind, _), v) in zip(self.arr[key], grouped_dict[key]):
res[ind] = v
cov[ind] = True
assert all(cov)
return res
def pad_and_concat(max_length: int, tensors: List[torch.Tensor], padding_side: Literal['right', 'left']='right'):
assert padding_side == 'left' or padding_side == 'right', f"Unrecognized padding type: '{padding_side}' not 'left' or 'right'"
for (i, tensor) in enumerate(tensors):
if len(tensor.shape) == 2:
tensor = tensor.squeeze(0)
tensor_len = tensor.shape[0]
if tensor_len < max_length:
if padding_side == 'right':
tensors[i] = torch.cat([tensor, torch.zeros(max_length - tensor_len, dtype=torch.long, device=tensor.device)], dim=0).unsqueeze(0)
else:
tensors[i] = torch.cat([torch.zeros(max_length - tensor_len, dtype=torch.long, device=tensor.device), tensor], dim=0).unsqueeze(0)
else:
tensors[i] = tensor.unsqueeze(0)
return torch.cat(tensors, dim=0)
def clear_torch_cache() -> None:
gc.collect()
torch.cuda.empty_cache()
def get_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype:
if isinstance(dtype, str) and dtype != 'auto':
_torch_dtype = getattr(torch, dtype)
else:
_torch_dtype = dtype
return _torch_dtype
class MultiTokenEOSCriteria(transformers.StoppingCriteria):
def __init__(self, sequence: str, tokenizer: transformers.PreTrainedTokenizer, initial_decoder_input_length: int, batch_size: int) -> None:
self.initial_decoder_input_length = initial_decoder_input_length
self.done_tracker = [False] * batch_size
self.sequence = sequence
self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
self.sequence_id_len = len(self.sequence_ids) + 2
self.tokenizer = tokenizer
def __call__(self, input_ids, scores, **kwargs) -> bool:
lookback_ids_batch = input_ids[:, self.initial_decoder_input_length:]
lookback_ids_batch = lookback_ids_batch[:, -self.sequence_id_len:]
lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)
for (i, done) in enumerate(self.done_tracker):
if not done:
self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]