text
stringlengths
0
15.3k
return False not in self.done_tracker
def stop_sequences_criteria(tokenizer: transformers.PreTrainedTokenizer, stop_sequences: List[str], initial_decoder_input_length: int, batch_size: int) -> transformers.StoppingCriteriaList:
return transformers.StoppingCriteriaList([*[MultiTokenEOSCriteria(sequence, tokenizer, initial_decoder_input_length, batch_size) for sequence in stop_sequences]])
def undistribute(iterable):
return [x for x in itertools.chain.from_iterable(itertools.zip_longest(*[list(x) for x in iterable])) if x is not None]
def retry_on_specific_exceptions(on_exceptions: List[Type[Exception]], max_retries: Optional[int]=None, backoff_time: float=3.0, backoff_multiplier: float=1.5, on_exception_callback: Optional[Callable[[Exception, float], Any]]=None):
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
sleep_time = backoff_time
attempt = 0
while max_retries is None or attempt < max_retries:
try:
return func(*args, **kwargs)
except tuple(on_exceptions) as e:
if on_exception_callback is not None:
on_exception_callback(e, sleep_time)
time.sleep(sleep_time)
sleep_time *= backoff_multiplier
attempt += 1
return wrapper
return decorator
class Collator:
def __init__(self, arr: List, sort_fn: Callable=lambda x: x, group_fn: Callable=lambda x: x[1], group_by: Union[Literal['gen_kwargs', 'contexts'], None]=None) -> None:
self._group_by = group_by
self._sort_fn = lambda x: sort_fn(x[1])
self._group_fn = lambda x: group_fn(x[1])
self._reorder_indices: List = []
self._size = len(arr)
self._arr_with_indices: Union[Dict, Tuple[Tuple[int, Any], ...]] = tuple(enumerate(arr))
if self._group_by == 'contexts':
self._group_by_context()
elif self._group_by == 'gen_kwargs':
self._group_by_index()
def _group_by_index(self) -> None:
self._arr_with_indices = self.group(self._arr_with_indices, fn=self._group_fn, group_by='gen_kwargs')
def _group_by_context(self) -> None:
self._arr_with_indices = self.group(self._arr_with_indices, fn=self._group_fn, group_by='contexts')
def get_batched(self, n: int=1, batch_fn: Optional[Callable]=None) -> Iterator:
if self._group_by == 'gen_kwargs':
for (key, values) in self._arr_with_indices.items():
values = self._reorder(values)
batch = self.get_chunks(values, n=n, fn=batch_fn)
yield from batch
elif self._group_by == 'contexts':
values = self._reorder([value[0] for value in self._arr_with_indices.values()])
batch = self.get_chunks(values, n=n, fn=batch_fn)
yield from batch
else:
values = self._reorder(self._arr_with_indices)
batch = self.get_chunks(values, n=n, fn=batch_fn)
yield from batch
def get_cache(self, req_str: Tuple[str, str]=None, cxt_toks: List[int]=None, cont_toks: List[int]=None, logits: torch.Tensor=None) -> Iterator[Tuple[Tuple[str, str], List[int], torch.Tensor]]:
if self._group_by == 'contexts':
cache_hit: List[Tuple[int, Tuple[Tuple[str, str], List[int], List[int]]]] = self._arr_with_indices.pop(tuple(cxt_toks + cont_toks[:-1]))
if (cache_size := len(cache_hit)) == 1:
self._reorder_indices.extend((x[0] for x in cache_hit))
yield (req_str, cont_toks, logits)
else:
multilogits = logits.expand(cache_size, -1, -1).chunk(cache_size)
(indices, req_str, cont_toks) = zip(*[(x[0], x[1][0], x[-1][-1]) for x in cache_hit])
self._reorder_indices.extend(indices)
for (c_key, cont_tok, logit) in zip(req_str, cont_toks, multilogits):
yield (c_key, cont_tok, logit)
else:
yield (req_str, cont_toks, logits)
def _reorder(self, arr: Union[List, Tuple[Tuple[int, Any], ...]]) -> Iterator:
arr = sorted(arr, key=self._sort_fn)
if not self._group_by == 'contexts':
self._reorder_indices.extend([x[0] for x in arr])
yield from [x[1] for x in arr]
def get_original(self, newarr: List) -> List:
res = [None] * self._size
cov = [False] * self._size
for (ind, v) in zip(self._reorder_indices, newarr):
res[ind] = v
cov[ind] = True
assert all(cov)
return res
def __len__(self):
return self._size
@staticmethod
def group(arr: Iterable, fn: Callable, group_by: Literal['gen_kwargs', 'contexts']='gen_kwargs') -> dict:
res = collections.defaultdict(list)
for ob in arr: