text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Utility function for processing arguments that are singletons or lists.
<END_TASK>
<USER_TASK:>
Description:
def _maybe_repeat(self, x):
"""Utility function for processing arguments that are singletons or lists.
Args:
x: either a list of self.n elements, or not a list.
Returns:
a list of self.n elements.
""" |
if isinstance(x, list):
assert len(x) == self.n
return x
else:
return [x] * self.n |
<SYSTEM_TASK:>
Remove padding from the given tensor.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, x):
"""Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
""" |
with tf.name_scope("pad_reduce/remove"):
x_shape = x.get_shape().as_list()
x = tf.gather_nd(
x,
indices=self.nonpad_ids,
)
if not tf.executing_eagerly():
# This is a hack but for some reason, gather_nd return a tensor of
# undefined shape, so the shape is set up manually
x.set_shape([None] + x_shape[1:])
return x |
<SYSTEM_TASK:>
Add padding back to the given tensor.
<END_TASK>
<USER_TASK:>
Description:
def restore(self, x):
"""Add padding back to the given tensor.
Args:
x (tf.Tensor): of shape [dim_compressed,...]
Returns:
a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
dim is restored from the original reference tensor
""" |
with tf.name_scope("pad_reduce/restore"):
x = tf.scatter_nd(
indices=self.nonpad_ids,
updates=x,
shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
)
return x |
<SYSTEM_TASK:>
Sum together the expert output, weighted by the gates.
<END_TASK>
<USER_TASK:>
Description:
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
""" |
# see comments on convert_gradient_to_tensor
stitched = common_layers.convert_gradient_to_tensor(
tf.concat(expert_out, 0))
if multiply_by_gates:
stitched *= tf.expand_dims(self._nonzero_gates, 1)
combined = tf.unsorted_segment_sum(stitched, self._batch_index,
tf.shape(self._gates)[0])
return combined |
<SYSTEM_TASK:>
Batch indices corresponding to the examples in the per-expert `Tensor`s.
<END_TASK>
<USER_TASK:>
Description:
def expert_to_batch_indices(self):
"""Batch indices corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64`
and shapes `[expert_batch_size_i]`
""" |
return tf.split(
self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts) |
<SYSTEM_TASK:>
Sum together the expert output, multiplied by the corresponding gates.
<END_TASK>
<USER_TASK:>
Description:
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, multiplied by the corresponding gates.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean.
Returns:
a list of num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_output_dims>]`.
""" |
expert_part_sizes = tf.unstack(
tf.stack([d.part_sizes for d in self._dispatchers]),
num=self._ep.n,
axis=1)
# list of lists of shape [num_experts][num_datashards]
expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
def my_combine(dispatcher, parts):
return dispatcher.combine(
common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
multiply_by_gates=multiply_by_gates)
return self._dp(my_combine, self._dispatchers, expert_output_parts_t) |
<SYSTEM_TASK:>
Send the inputs to the experts.
<END_TASK>
<USER_TASK:>
Description:
def dispatch(self, inp):
"""Send the inputs to the experts.
Args:
inp: a `Tensor` of shape "[batch, length, depth]`
Returns:
a tensor with shape [batch, num_experts, expert_capacity, depth]
""" |
inp = tf.reshape(inp, [self._batch * self._length, -1])
# [batch, num_experts, expert_capacity, depth]
ret = tf.gather(inp, self._flat_indices)
return ret |
<SYSTEM_TASK:>
Return the output from the experts.
<END_TASK>
<USER_TASK:>
Description:
def combine(self, x):
"""Return the output from the experts.
When one example goes to multiple experts, the outputs are summed.
Args:
x: a Tensor with shape [batch, num_experts, expert_capacity, depth]
Returns:
a `Tensor` with shape `[batch, length, depth]
""" |
depth = tf.shape(x)[-1]
x *= tf.expand_dims(self._nonpadding, -1)
ret = tf.unsorted_segment_sum(
x, self._flat_indices, num_segments=self._batch * self._length)
ret = tf.reshape(ret, [self._batch, self._length, depth])
return ret |
<SYSTEM_TASK:>
Collects frames from real env for random starts of simulated env.
<END_TASK>
<USER_TASK:>
Description:
def collect_frames_for_random_starts(
storage_env, stacked_env, agent, frame_stack_size, random_starts_step_limit,
log_every_steps=None
):
"""Collects frames from real env for random starts of simulated env.""" |
del frame_stack_size
storage_env.start_new_epoch(0)
tf.logging.info(
"Collecting %d frames for random starts.", random_starts_step_limit
)
rl_utils.run_rollouts(
stacked_env, agent, stacked_env.reset(),
step_limit=random_starts_step_limit,
many_rollouts_from_each_env=True,
log_every_steps=log_every_steps,
)
# Save unfinished rollouts to history.
stacked_env.reset() |
<SYSTEM_TASK:>
Creates an Agent from hparams.
<END_TASK>
<USER_TASK:>
Description:
def make_agent_from_hparams(
agent_type, base_env, stacked_env, loop_hparams, policy_hparams,
planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=()
):
"""Creates an Agent from hparams.""" |
def sim_env_kwargs_fn():
return rl.make_simulated_env_kwargs(
base_env, loop_hparams, batch_size=planner_hparams.batch_size,
model_dir=model_dir
)
planner_kwargs = planner_hparams.values()
planner_kwargs.pop("batch_size")
planner_kwargs.pop("rollout_agent_type")
planner_kwargs.pop("env_type")
return make_agent(
agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs_fn, loop_hparams.frame_stack_size,
planner_hparams.rollout_agent_type,
inner_batch_size=planner_hparams.batch_size,
env_type=planner_hparams.env_type,
video_writers=video_writers, **planner_kwargs
) |
<SYSTEM_TASK:>
Given a representation of the board, returns a list of open spaces.
<END_TASK>
<USER_TASK:>
Description:
def get_open_spaces(board):
"""Given a representation of the board, returns a list of open spaces.""" |
open_spaces = []
for i in range(3):
for j in range(3):
if board[i][j] == 0:
open_spaces.append(encode_pos(i, j))
return open_spaces |
<SYSTEM_TASK:>
Given a representation of the board, returns reward and done.
<END_TASK>
<USER_TASK:>
Description:
def get_reward_and_done(board):
"""Given a representation of the board, returns reward and done.""" |
# Returns (reward, done) where:
# reward: -1 means lost, +1 means win, 0 means draw or continuing.
# done: True if the game is over, i.e. someone won or it is a draw.
# Sum all rows ...
all_sums = [np.sum(board[i, :]) for i in range(3)]
# ... all columns
all_sums.extend([np.sum(board[:, i]) for i in range(3)])
# and both diagonals.
all_sums.append(np.sum([board[i, i] for i in range(3)]))
all_sums.append(np.sum([board[i, 2 - i] for i in range(3)]))
if -3 in all_sums:
return -1, True
if 3 in all_sums:
return 1, True
done = True
if get_open_spaces(board):
done = False
return 0, done |
<SYSTEM_TASK:>
Hyperparameters for decoding.
<END_TASK>
<USER_TASK:>
Description:
def decode_hparams(overrides=""):
"""Hyperparameters for decoding.""" |
hp = hparam.HParams(
save_images=False,
log_results=True,
extra_length=100,
min_length_ratio=0.0,
batch_size=0,
beam_size=4,
alpha=0.6,
eos_penalty=0.0,
block_size=0,
guess_and_check_top_k=0,
guess_and_check_epsilon=-1,
insertion_parallel=False,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1, # Number of examples to decode.
delimiter="\n",
decode_to_file="", # str. Prefix for filename to write decodings to.
decode_reference="", # str. Filename to read references from.
decode_in_memory=False,
# How much decode should wait for the next checkpoint
decode_timeout_mins=240,
summaries_log_dir="decode", # Directory to write hook summaries.
shards=1, # How many shards of data to decode (treating 1 as None).
shard_id=0, # Which shard are we decoding if more than 1 above.
shards_start_offset=0, # Number of the first shard to decode.
shard_google_format=False, # If True use Google shard naming format.
num_decodes=1, # Number of times to go over the dataset.
force_decode_length=False,
display_decoded_images=False,
# Multi-problem decoding task id.
multiproblem_task_id=-1,
# Used for video decoding.
frames_per_second=10,
skip_eos_postprocess=False,
# Creates a blue/red border covering border_percent of the frame.
border_percent=2,
# Maximum number of videos displayed.
# number of videos displayed = max_display_outputs * max_display_decodes
max_display_outputs=10,
max_display_decodes=5,
# Used in computation of VGG feature based video metrics.
# Set this to be the path to a trained VGG ckpt to output
# useful metrics.
vgg_ckpt_path="",
# Used for MLPerf compliance logging.
mlperf_decode_step=0.0,
mlperf_threshold=25.0,
mlperf_success=False)
hp.parse(overrides)
return hp |
<SYSTEM_TASK:>
Perform decoding from dataset.
<END_TASK>
<USER_TASK:>
Description:
def decode_from_dataset(estimator,
problem_name,
hparams,
decode_hp,
decode_to_file=None,
dataset_split=None,
checkpoint_path=None):
"""Perform decoding from dataset.""" |
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_name))
# We assume that worker_id corresponds to shard number.
shard = decode_hp.shard_id if decode_hp.shards > 1 else None
# Setup output directory for any artifacts that may be written out.
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
# If decode_hp.batch_size is specified, use a fixed batch size
if decode_hp.batch_size:
hparams.batch_size = decode_hp.batch_size
hparams.use_fixed_batch_size = True
dataset_kwargs = {
"shard": shard,
"dataset_split": dataset_split,
"max_records": decode_hp.num_samples
}
# Build the inference input function
problem = hparams.problem
infer_input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs)
predictions, output_dirs = [], []
for decode_id in range(decode_hp.num_decodes):
tf.logging.info("Decoding {}".format(decode_id))
# Create decode directory if not in-memory decoding.
if not decode_hp.decode_in_memory:
output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id)
tf.gfile.MakeDirs(output_dir)
output_dirs.append(output_dir)
result = decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=decode_hp.log_results,
checkpoint_path=checkpoint_path)
if decode_hp.decode_in_memory:
output_dirs = [output_dir]
predictions.append(result)
if decode_hp.decode_to_file:
decode_hp.decode_to_file = _decode_filename(
decode_hp.decode_to_file, problem_name, decode_hp)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=problem,
output_dirs=output_dirs,
hparams=hparams,
decode_hparams=decode_hp,
predictions=predictions
), dataset_split)
return predictions |
<SYSTEM_TASK:>
Generates decode filename.
<END_TASK>
<USER_TASK:>
Description:
def _decode_filename(base_filename, problem_name, decode_hp):
"""Generates decode filename.
Args:
base_filename: A string, base of the decode filename.
problem_name: A string, name of the problem.
decode_hp: HParams for decoding.
Returns:
A string, produced decode filename.
""" |
if decode_hp.shards > 1:
base_filename = _add_shard_to_filename(base_filename, decode_hp)
if ("beam{beam}.alpha{alpha}.decodes".format(
beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))
in base_filename):
return base_filename
else:
return (
"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha))) |
<SYSTEM_TASK:>
Use py_func to yield elements from the given generator.
<END_TASK>
<USER_TASK:>
Description:
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator.""" |
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn |
<SYSTEM_TASK:>
Generator to produce batches of inputs.
<END_TASK>
<USER_TASK:>
Description:
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary,
batch_size, max_input_size,
task_id=-1, has_input=True):
"""Generator to produce batches of inputs.""" |
tf.logging.info(" batch %d" % num_decode_batches)
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
if has_input or task_id > -1: # Do not append EOS for pure LM tasks.
final_id = text_encoder.EOS_ID if task_id < 0 else task_id
input_ids.append(final_id)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
} |
<SYSTEM_TASK:>
Generator that reads from the terminal and yields "interactive inputs".
<END_TASK>
<USER_TASK:>
Description:
def _interactive_input_fn(hparams, decode_hp):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
""" |
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length,
"source_string" if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features |
<SYSTEM_TASK:>
Save frames of the videos into files.
<END_TASK>
<USER_TASK:>
Description:
def save_video(video, save_path_template):
"""Save frames of the videos into files.""" |
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp) |
<SYSTEM_TASK:>
Shows an image using matplotlib and saves it.
<END_TASK>
<USER_TASK:>
Description:
def show_and_save_image(img, save_path):
"""Shows an image using matplotlib and saves it.""" |
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
with tf.gfile.Open(save_path, "wb") as sp:
plt.savefig(sp) |
<SYSTEM_TASK:>
Read a file of partial texts to continue.
<END_TASK>
<USER_TASK:>
Description:
def _get_language_modeling_inputs(filename,
delimiter="\n",
repeat=1,
append_space_to_final_punctionation=True):
"""Read a file of partial texts to continue.
The purpose of append_space_to_final_punctionation is that SubwordTokenizer
groups punctuation and the ensuing space in the same token. Adding a space
causes the token to be completed.
Args:
filename: a string
delimiter: a string
repeat: an integer - we repeat the entire file that many times.
append_space_to_final_punctionation: a boolean
Returns:
a list of strings
""" |
with tf.gfile.Open(filename) as f:
text = f.read()
inputs = text.split(delimiter)
if not inputs[-1]:
inputs.pop()
inputs *= repeat
if append_space_to_final_punctionation:
inputs = [
s + " " if s and s[-1] in string.punctuation else s for s in inputs]
return inputs |
<SYSTEM_TASK:>
Returning inputs sorted according to decreasing length.
<END_TASK>
<USER_TASK:>
Description:
def _get_sorted_inputs(filename, delimiter="\n"):
"""Returning inputs sorted according to decreasing length.
This causes inputs of similar lengths to be processed in the same batch,
facilitating early stopping for short sequences.
Longer sequences are sorted first so that if you're going to get OOMs,
you'll see it in the first batch.
Args:
filename: path to file with inputs, 1 per line.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
""" |
tf.logging.info("Getting sorted inputs")
with tf.gfile.Open(filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys |
<SYSTEM_TASK:>
Run hooks after decodes have run.
<END_TASK>
<USER_TASK:>
Description:
def run_postdecode_hooks(decode_hook_args, dataset_split):
"""Run hooks after decodes have run.""" |
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet available.")
return
tf.logging.info("Running decode hooks.")
parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir)
child_dir = decode_hook_args.decode_hparams.summaries_log_dir
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
final_dir = os.path.join(parent_dir, child_dir)
summary_writer = tf.summary.FileWriter(final_dir)
for hook in hooks:
# Isolate each hook in case it creates TF ops
with tf.Graph().as_default():
summaries = hook(decode_hook_args)
if summaries:
summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(summary, global_step)
summary_writer.close()
tf.logging.info("Decode hooks done.") |
<SYSTEM_TASK:>
Splits of data to produce and number of output shards for each.
<END_TASK>
<USER_TASK:>
Description:
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each.""" |
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": _TRAIN_SHARDS,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": _DEV_SHARDS,
}] |
<SYSTEM_TASK:>
Image Transformer decoder with local1D spatial layers.
<END_TASK>
<USER_TASK:>
Description:
def local_attention1d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D spatial layers.""" |
batch_dim, length_dim, model_dim = x.shape.dims
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_length)
num_w_blocks_dim = mtf.Dimension("num_wblocks",
length_dim.size // blocks_w_dim.size)
x = mtf.reshape(
x, mtf.Shape([batch_dim, num_w_blocks_dim, blocks_w_dim, model_dim]))
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_w_dim=blocks_w_dim,
mask_right=True,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output |
<SYSTEM_TASK:>
Image Transformer decoder with local2D spatial layers.
<END_TASK>
<USER_TASK:>
Description:
def local_attention2d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local2D spatial layers.""" |
batch_dim, length_dim, model_dim = x.shape.dims
blocks_h_dim = mtf.Dimension("blocksh", hparams.block_height)
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_width)
num_h_blocks_dim = mtf.Dimension("num_h_blocks",
hparams.img_len // hparams.block_height)
num_w_blocks_dim = mtf.Dimension(
"num_w_blocks",
hparams.img_len * hparams.num_channels // hparams.block_width)
x = mtf.transpose(
mtf.reshape(
x,
mtf.Shape([
batch_dim, num_h_blocks_dim, blocks_h_dim,
num_w_blocks_dim, blocks_w_dim, model_dim
])),
mtf.Shape([
batch_dim, num_h_blocks_dim, num_w_blocks_dim,
blocks_h_dim, blocks_w_dim, model_dim
]))
# Image Transformer Decoder
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_2d_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_h_dim=num_h_blocks_dim,
memory_w_dim=num_w_blocks_dim,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output |
<SYSTEM_TASK:>
Image Transformer decoder with local1D masked layers.
<END_TASK>
<USER_TASK:>
Description:
def local_attention1d_masked_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D masked layers.""" |
print(x)
_, length_dim, model_dim = x.shape.dims
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
length_per_split = mtf.tensor_dim_to_size_per_split(
hparams.layout, hparams.mesh_shape, length_dim)
x += layer_prepostprocess_dropout(
mtf.layers.masked_local_attention_1d(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
window_size=hparams.block_length,
length_per_split=length_per_split,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output |
<SYSTEM_TASK:>
Returns a list of degree vectors, one for each input and hidden layer.
<END_TASK>
<USER_TASK:>
Description:
def create_degrees(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
""" |
if (isinstance(input_order, str) and
input_order not in ('random', 'left-to-right', 'right-to-left')):
raise ValueError('Input order is not valid.')
if hidden_order not in ('random', 'left-to-right'):
raise ValueError('Hidden order is not valid.')
degrees = []
if isinstance(input_order, str):
input_degrees = np.arange(1, input_dim + 1)
if input_order == 'right-to-left':
input_degrees = np.flip(input_degrees, 0)
elif input_order == 'random':
np.random.shuffle(input_degrees)
else:
input_order = np.array(input_order)
if np.all(np.sort(input_order) != np.arange(1, input_dim + 1)):
raise ValueError('invalid input order')
input_degrees = input_order
degrees.append(input_degrees)
for units in hidden_dims:
if hidden_order == 'random':
min_prev_degree = min(np.min(degrees[-1]), input_dim - 1)
hidden_degrees = np.random.randint(
low=min_prev_degree, high=input_dim, size=units)
elif hidden_order == 'left-to-right':
hidden_degrees = (np.arange(units) % max(1, input_dim - 1) +
min(1, input_dim - 1))
degrees.append(hidden_degrees)
return degrees |
<SYSTEM_TASK:>
Returns a list of binary mask matrices respecting autoregressive ordering.
<END_TASK>
<USER_TASK:>
Description:
def create_masks(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of binary mask matrices respecting autoregressive ordering.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
input_dim downstream. Each hidden unit size must be at least the size of
length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
""" |
degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order)
masks = []
# Create input-to-hidden and hidden-to-hidden masks.
for input_degrees, output_degrees in zip(degrees[:-1], degrees[1:]):
mask = tf.cast(input_degrees[:, np.newaxis] <= output_degrees, tf.float32)
masks.append(mask)
# Create hidden-to-output mask.
mask = tf.cast(degrees[-1][:, np.newaxis] < degrees[0], tf.float32)
masks.append(mask)
return masks |
<SYSTEM_TASK:>
Performs incomplete Sinkhorn normalization to inputs.
<END_TASK>
<USER_TASK:>
Description:
def sinkhorn(inputs, n_iters=20):
"""Performs incomplete Sinkhorn normalization to inputs.
By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix
with positive entries can be turned into a doubly-stochastic matrix
(i.e. its rows and columns add up to one) via the succesive row and column
normalization.
-To ensure positivity, the effective input to sinkhorn has to be
exp(inputs) (elementwise).
-However, for stability, sinkhorn works in the log-space. It is only at
return time that entries are exponentiated.
Code is adapted from Mena et al. [2].
[1] Richard Sinkhorn and Paul Knopp. Concerning nonnegative matrices and
doubly stochastic matrices. Pacific Journal of Mathematics, 1967.
[2] Gonzalo Mena, David Belanger, Scott Linderman, Jasper Snoek.
Learning latent permutations with Gumbel-Sinkhorn networks. International
Conference on Learning Representations, 2018.
Args:
inputs: A `Tensor` with shape `[..., vocab_size, vocab_size]`.
n_iters: Number of sinkhorn iterations (in practice, as little as 20
iterations are needed to achieve decent convergence for `vocab_size` ~100)
Returns:
outputs: A `Tensor` of close-to-doubly-stochastic matrices with shape
`[:, vocab_size, vocab_size]`.
""" |
vocab_size = tf.shape(inputs)[-1]
log_alpha = tf.reshape(inputs, [-1, vocab_size, vocab_size])
for _ in range(n_iters):
log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=2),
[-1, vocab_size, 1])
log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=1),
[-1, 1, vocab_size])
outputs = tf.exp(log_alpha)
return outputs |
<SYSTEM_TASK:>
Slice encoder hidden state into block_dim.
<END_TASK>
<USER_TASK:>
Description:
def slice_hidden(self, x):
"""Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim].
""" |
x_sliced = tf.reshape(
x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim])
return x_sliced |
<SYSTEM_TASK:>
Compute nearest neighbors and loss for training the embeddings.
<END_TASK>
<USER_TASK:>
Description:
def embedding_lookup(self, x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
""" |
x_means_hot = self.nearest_neighbor(x, means)
x_means_hot_flat = tf.reshape(
x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
x_means = tf.transpose(x_means, [1, 0, 2])
q_loss = tf.reduce_mean(
tf.squared_difference(tf.stop_gradient(x), x_means))
e_loss = tf.reduce_mean(
tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, x_means, q_loss, e_loss |
<SYSTEM_TASK:>
Discretization bottleneck for latent variables.
<END_TASK>
<USER_TASK:>
Description:
def discrete_bottleneck(self, x):
"""Discretization bottleneck for latent variables.
Args:
x: Input to the discretization bottleneck.
Returns:
Embedding to pass to the decoder, discrete latent, loss, and the
embedding
function.
Raises:
ValueError: If projection_tensors is None for reshape_method
project, or
ema_count or ema_means is None if we are using ema, or unknown
args.
""" |
x_reshaped = self.slice_hidden(x)
x_means_hot = []
x_means = 0
loss = 0
x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup(
x_reshaped, self.means)
if self.hparams.ema:
tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta))
updated_ema_count = \
moving_averages.assign_moving_average(
self.ema_count,
tf.reduce_sum(
tf.reshape(
x_means_hot,
shape=[-1, self.hparams.num_blocks,
self.hparams.block_v_size]),
axis=0),
self.hparams.decay,
zero_debias=False)
dw = tf.matmul(
tf.transpose(x_means_hot, perm=[1, 2, 0]),
tf.transpose(x_reshaped, perm=[1, 0, 2]))
updated_ema_means = \
moving_averages.assign_moving_average(
self.ema_means, dw, self.hparams.decay,
zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)
updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / (
n + 2**self.hparams.z_size * self.hparams.epsilon) * n)
updated_ema_means = updated_ema_means / tf.expand_dims(
updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = tf.assign(self.means, updated_ema_means)
with tf.control_dependencies([update_means]):
loss += self.hparams.beta * e_loss
else:
# Use a gradient based loss for learning the cluster centers
loss += q_loss + self.hparams.beta * e_loss
# Get the discrete latent representation
x_means_idx = tf.argmax(x_means_hot, axis=-1)
# Get the binary representation
num_bits = int(self.hparams.z_size // self.hparams.num_blocks)
x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2)
x_discrete = self.bit_to_int(
tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2)
# Reshape x_discrete
shape_x = common_layers.shape_list(x)
shape_discrete = shape_x[:-1]
x_discrete = tf.reshape(x_discrete, shape_discrete)
x_means = tf.reshape(x_means, shape=shape_x)
h1 = x + tf.stop_gradient(x_means - x)
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
res = tf.layers.dense(
tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
embed_fn = partial(self.embed)
return {
"dense": res,
"discrete": x_discrete,
"loss": loss,
"embed": embed_fn
} |
<SYSTEM_TASK:>
Switch from Adam to Adafactor, approximating the behavior of Adam.
<END_TASK>
<USER_TASK:>
Description:
def mimic_adam_with_adafactor(hparams):
"""Switch from Adam to Adafactor, approximating the behavior of Adam.
Some minor things may be different, like epsilon and beta1 correction.
Args:
hparams: model hyperparameters where "adam" in hparams.optimizer
""" |
assert "adam" in hparams.optimizer
hparams.optimizer = "adafactor"
hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1
hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2
hparams.optimizer_adafactor_multiply_by_parameter_scale = False
hparams.optimizer_adafactor_factored = False
hparams.optimizer_adafactor_clipping_threshold = None
hparams.optimizer_adafactor_decay_type = "adam" |
<SYSTEM_TASK:>
Adafactor with recommended learning rate schedule.
<END_TASK>
<USER_TASK:>
Description:
def afx_adafactor():
"""Adafactor with recommended learning rate schedule.""" |
hparams = afx_adam()
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams |
<SYSTEM_TASK:>
Small transformer model with small batch size for fast step times.
<END_TASK>
<USER_TASK:>
Description:
def afx_small():
"""Small transformer model with small batch size for fast step times.""" |
hparams = transformer.transformer_tpu()
hparams.filter_size = 1024
hparams.num_heads = 4
hparams.num_hidden_layers = 3
hparams.batch_size = 512
return hparams |
<SYSTEM_TASK:>
Convert a file to examples.
<END_TASK>
<USER_TASK:>
Description:
def main(_):
"""Convert a file to examples.""" |
if FLAGS.subword_text_encoder_filename:
encoder = text_encoder.SubwordTextEncoder(
FLAGS.subword_text_encoder_filename)
elif FLAGS.token_text_encoder_filename:
encoder = text_encoder.TokenTextEncoder(FLAGS.token_text_encoder_filename)
elif FLAGS.byte_text_encoder:
encoder = text_encoder.ByteTextEncoder()
else:
encoder = None
reader = tf.python_io.tf_record_iterator(FLAGS.input_filename)
total_sequences = 0
total_input_tokens = 0
total_target_tokens = 0
nonpadding_input_tokens = 0
nonpadding_target_tokens = 0
max_input_length = 0
max_target_length = 0
for record in reader:
x = tf.train.Example()
x.ParseFromString(record)
inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value]
targets = [int(i) for i in x.features.feature["targets"].int64_list.value]
if FLAGS.print_inputs:
print("INPUTS:\n" + encoder.decode(inputs) if encoder else inputs)
if FLAGS.print_targets:
print("TARGETS:\n" + encoder.decode(targets) if encoder else targets)
nonpadding_input_tokens += len(inputs) - inputs.count(0)
nonpadding_target_tokens += len(targets) - targets.count(0)
total_input_tokens += len(inputs)
total_target_tokens += len(targets)
total_sequences += 1
max_input_length = max(max_input_length, len(inputs))
max_target_length = max(max_target_length, len(targets))
if FLAGS.print_all:
for k, v in six.iteritems(x.features.feature):
print("%s: %s" % (k, v.int64_list.value))
print("total_sequences: %d" % total_sequences)
print("total_input_tokens: %d" % total_input_tokens)
print("total_target_tokens: %d" % total_target_tokens)
print("nonpadding_input_tokens: %d" % nonpadding_input_tokens)
print("nonpadding_target_tokens: %d" % nonpadding_target_tokens)
print("max_input_length: %d" % max_input_length)
print("max_target_length: %d" % max_target_length) |
<SYSTEM_TASK:>
Return a mix of env and video data fields and decoders.
<END_TASK>
<USER_TASK:>
Description:
def example_reading_spec(self):
"""Return a mix of env and video data fields and decoders.""" |
video_fields, video_decoders = (
video_utils.VideoProblem.example_reading_spec(self))
env_fields, env_decoders = env_problem.EnvProblem.example_reading_spec(self)
# Remove raw observations field since we want to capture them as videos.
env_fields.pop(env_problem.OBSERVATION_FIELD)
env_decoders.pop(env_problem.OBSERVATION_FIELD)
# Add frame number spec and decoder.
env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64)
env_decoders[
_FRAME_NUMBER_FIELD] = tf.contrib.slim.tfexample_decoder.Tensor(
_FRAME_NUMBER_FIELD)
# Add video fields and decoders
env_fields.update(video_fields)
env_decoders.update(video_decoders)
return env_fields, env_decoders |
<SYSTEM_TASK:>
Transforms time step observations to frames of a video.
<END_TASK>
<USER_TASK:>
Description:
def _generate_time_steps(self, trajectory_list):
"""Transforms time step observations to frames of a video.""" |
for time_step in env_problem.EnvProblem._generate_time_steps(
self, trajectory_list):
# Convert the rendered observations from numpy to png format.
frame_np = np.array(time_step.pop(env_problem.OBSERVATION_FIELD))
frame_np = frame_np.reshape(
[self.frame_height, self.frame_width, self.num_channels])
# TODO(msaffar) Add support for non RGB rendered environments
frame = png.from_array(frame_np, "RGB", info={"bitdepth": 8})
frame_buffer = six.BytesIO()
frame.save(frame_buffer)
# Put the encoded frame back.
time_step[_IMAGE_ENCODED_FIELD] = [frame_buffer.getvalue()]
time_step[_IMAGE_FORMAT_FIELD] = [_FORMAT]
time_step[_IMAGE_HEIGHT_FIELD] = [self.frame_height]
time_step[_IMAGE_WIDTH_FIELD] = [self.frame_width]
# Add the frame number
time_step[_FRAME_NUMBER_FIELD] = time_step[env_problem.TIMESTEP_FIELD]
yield time_step |
<SYSTEM_TASK:>
Yield dicts for Text2ClassProblem.generate_samples from lines of files.
<END_TASK>
<USER_TASK:>
Description:
def text2class_txt_iterator(source_txt_path, label_txt_path, class_strs=None):
"""Yield dicts for Text2ClassProblem.generate_samples from lines of files.
Args:
source_txt_path: txt file with record per line.
label_txt_path: txt file with label per line, either as int or str. If
string, must provide class_strs.
class_strs: list<str> of class label names. Must be in correct order (i.e.
["a", "b", "c"] means that "a" will get class ID 0, "b" ID 1, etc.).
Yields:
{"inputs": inputs, "label": label}
""" |
if class_strs:
class_strs = dict([(s, i) for i, s in enumerate(class_strs)])
for inputs, label in zip(
txt_line_iterator(source_txt_path), txt_line_iterator(label_txt_path)):
label = label.strip()
if class_strs:
label = class_strs[label]
else:
label = int(label)
yield {"inputs": inputs, "label": label} |
<SYSTEM_TASK:>
Yield dicts for Text2TextProblem.generate_samples from lines of txt_path.
<END_TASK>
<USER_TASK:>
Description:
def text2text_txt_tab_iterator(txt_path):
"""Yield dicts for Text2TextProblem.generate_samples from lines of txt_path.
Args:
txt_path: path to txt file with a record per line, source and target
are tab-separated.
Yields:
{"inputs": inputs, "targets": targets}
""" |
for line in txt_line_iterator(txt_path):
if line and "\t" in line:
parts = line.split("\t", 1)
inputs, targets = parts[:2]
yield {"inputs": inputs.strip(), "targets": targets.strip()} |
<SYSTEM_TASK:>
Encode Text2Text samples from the generator with the vocab.
<END_TASK>
<USER_TASK:>
Description:
def text2text_generate_encoded(sample_generator,
vocab,
targets_vocab=None,
has_inputs=True,
inputs_prefix="",
targets_prefix=""):
"""Encode Text2Text samples from the generator with the vocab.""" |
targets_vocab = targets_vocab or vocab
for sample in sample_generator:
if has_inputs:
sample["inputs"] = vocab.encode(inputs_prefix + sample["inputs"])
sample["inputs"].append(text_encoder.EOS_ID)
sample["targets"] = targets_vocab.encode(targets_prefix + sample["targets"])
sample["targets"].append(text_encoder.EOS_ID)
yield sample |
<SYSTEM_TASK:>
For packed datasets, returns a function to pack examples.
<END_TASK>
<USER_TASK:>
Description:
def _pack_fn(self):
"""For packed datasets, returns a function to pack examples.
Returns:
None or a function from list of TFRecords to list of TFRecords
""" |
if not self.packed_length:
return None
def my_fn(records):
"""Function from list of TFRecords to list of TFRecords."""
examples = []
for record in records:
x = tf.train.Example()
x.ParseFromString(record)
example_dict = {}
if self.has_inputs:
example_dict["inputs"] = [
int(i) for i in x.features.feature["inputs"].int64_list.value]
example_dict["targets"] = [
int(i) for i in x.features.feature["targets"].int64_list.value]
examples.append(example_dict)
examples = list(self._maybe_pack_examples(examples))
return [
generator_utils.to_example(x).SerializeToString() for x in examples]
return my_fn |
<SYSTEM_TASK:>
Wraps generator with packer if self.packed_length.
<END_TASK>
<USER_TASK:>
Description:
def _maybe_pack_examples(self, generator):
"""Wraps generator with packer if self.packed_length.""" |
if not self.packed_length:
return generator
return generator_utils.pack_examples(
generator,
self.has_inputs,
self.packed_length,
spacing=self.packed_spacing,
chop_long_sequences=not self.has_inputs) |
<SYSTEM_TASK:>
List of input filepaths for a particular training or dev shard.
<END_TASK>
<USER_TASK:>
Description:
def text_filepaths_for_task(self, tmp_dir, task_id):
"""List of input filepaths for a particular training or dev shard.
Args:
tmp_dir: a string
task_id: an integer less than self.num_shards
Returns:
a list of tuples (filepath, start_pos, num_bytes)
""" |
assert task_id >= 0
assert task_id < self.num_train_shards + self.num_dev_shards
if task_id < self.num_train_shards:
return [
f for i, f in enumerate(self.train_text_filepaths(tmp_dir))
if i % self.num_train_shards == task_id
]
else:
return [
f for i, f in enumerate(self.dev_text_filepaths(tmp_dir))
if i % self.num_dev_shards == task_id - self.num_train_shards
] |
<SYSTEM_TASK:>
Read text out of an input file.
<END_TASK>
<USER_TASK:>
Description:
def filepath_to_unicode_strings(self, filepath):
"""Read text out of an input file.
The default just reads the text, converts to unicode and yields one
unicode string.
Subclasses can override this function in order to preprocess, and can
yield any number of strings.
Args:
filepath: a string
Yields:
unicode strings.
""" |
f = tf.gfile.Open(filepath)
b = f.read()
yield text_encoder.to_unicode_ignore_errors(b) |
<SYSTEM_TASK:>
Read complete text of input files and yield unicode strings.
<END_TASK>
<USER_TASK:>
Description:
def file_generator(self,
filepaths,
max_chars_per_file=None,
max_chars_total=None):
"""Read complete text of input files and yield unicode strings.
By default, one unicode string is produced per file, but this is
not guaranteed, since subclasses can override
filepath_to_unicode_strings().
max_chars_per_file and max_chars_total can also be specified, in which
case some strings may be truncated or dropped to limit the total
amount of output.
Args:
filepaths: a list of strings
max_chars_per_file: an optional integer
max_chars_total: an optional integer
Yields:
unicode strings
""" |
chars_total = 0
for fname in filepaths:
chars_this_file = 0
tf.logging.info("reading file %s" % fname)
for text in self.filepath_to_unicode_strings(fname):
if (max_chars_per_file and
chars_this_file + len(text) > max_chars_per_file):
text = text[:max_chars_per_file - chars_this_file]
if max_chars_total and chars_total + len(text) > max_chars_total:
text = text[:max_chars_total - chars_total]
chars_total += len(text)
chars_this_file += len(text)
if text:
yield text
if max_chars_total and chars_total >= max_chars_total:
return
if max_chars_per_file and chars_this_file >= max_chars_per_file:
break |
<SYSTEM_TASK:>
Generator for examples.
<END_TASK>
<USER_TASK:>
Description:
def example_generator(self, encoder, tmp_dir, task_id):
"""Generator for examples.
Args:
encoder: a TextEncoder
tmp_dir: a string
task_id: an integer
Yields:
feature dictionaries
""" |
filepaths = self.text_filepaths_for_task(tmp_dir, task_id)
if task_id >= self.num_train_shards:
# this is dev data - limit the total length.
max_chars_per_file = self.max_dev_chars // (
self.num_dev_shards * len(filepaths))
else:
max_chars_per_file = None
tokens = []
for ftext in self.file_generator(
filepaths, max_chars_per_file=max_chars_per_file):
tokens.extend(encoder.encode(ftext))
pos = 0
while pos + self.sequence_length <= len(tokens):
yield {"targets": tokens[pos:pos + self.sequence_length]}
pos += self.sequence_length
if pos > 0:
tokens = tokens[pos:]
if self.remainder_policy == "pad":
if tokens:
targets = tokens + [0] * (self.sequence_length - len(tokens))
yield {"targets": targets}
else:
assert self.remainder_policy == "drop" |
<SYSTEM_TASK:>
Make sure that the data is prepared and the vocab is generated.
<END_TASK>
<USER_TASK:>
Description:
def prepare_to_generate(self, data_dir, tmp_dir):
"""Make sure that the data is prepared and the vocab is generated.""" |
self.get_or_create_vocab(data_dir, tmp_dir)
self.train_text_filepaths(tmp_dir)
self.dev_text_filepaths(tmp_dir) |
<SYSTEM_TASK:>
Builds a traditional GRU cell with dense internal transformations.
<END_TASK>
<USER_TASK:>
Description:
def GRUCell(units):
"""Builds a traditional GRU cell with dense internal transformations.
Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555
Args:
units: Number of hidden units.
Returns:
A Stax model representing a traditional GRU RNN cell.
""" |
return GeneralGRUCell(
candidate_transform=lambda: core.Dense(units=units),
memory_transform=combinators.Identity,
gate_nonlinearity=core.Sigmoid,
candidate_nonlinearity=core.Tanh) |
<SYSTEM_TASK:>
Create an attention mask to hide padding and future words.
<END_TASK>
<USER_TASK:>
Description:
def MakeTargetMask(target, pad=0):
"""Create an attention mask to hide padding and future words.""" |
target_mask = (target != pad)[ :, np.newaxis, :]
target_dtype = target_mask.dtype
causal_mask = onp.tril(onp.ones((1, target.shape[-1], target.shape[-1]),
dtype=target_dtype), k=0)
target_mask = target_mask & causal_mask
return np.expand_dims(target_mask, axis=1) |
<SYSTEM_TASK:>
Build masks for this batch.
<END_TASK>
<USER_TASK:>
Description:
def PreparePairedSequenceBatch(source, target_in, pad=0):
"""Build masks for this batch.
Args:
source: (batch, source_len) array of integer-coded symbols for inputs
target_in: (batch, batch_len) array of integer-coded symbols for targets
pad: int: the padding symbol used to pad the above
Returns:
Prepared batch of tuple of arrays: source, input-target, shifted-target,
source mask, target mask, source-target "memory" mask, minibatch token count
""" |
target = target_in[:, :-1]
target_y = target_in[:, 1:]
source_mask = np.reshape(source != pad,
(source.shape[0], 1, 1, source.shape[-1]))
target_mask = MakeTargetMask(target, pad)
memory_mask = (
np.reshape(np.arange(target.shape[-1]) < source.shape[-1], [-1, 1]))
ntokens = np.sum(target_y != pad)
return (source, target, target_y,
source_mask, target_mask, memory_mask, ntokens) |
<SYSTEM_TASK:>
Implements bare positional encoding.
<END_TASK>
<USER_TASK:>
Description:
def PositionalEncoding(x, params, **unused_kwargs):
"""Implements bare positional encoding.""" |
if not isinstance(x, (list, tuple)): # non-chunked inputs
symbol_size = np.shape(x)[1]
return x + params[:, :symbol_size, :]
# Chunked case: apply to all chunks selecting as much as needed.
offset = 0
results = []
for chunk in x:
symbol_size = np.shape(chunk)[1]
results.append(chunk + params[:, offset:offset + symbol_size, :])
offset += symbol_size
return results |
<SYSTEM_TASK:>
Core dot product self-attention.
<END_TASK>
<USER_TASK:>
Description:
def DotProductAttention(query, key, value, mask, dropout, mode, rng):
"""Core dot product self-attention.
Args:
query: array of representations
key: array of representations
value: array of representations
mask: attention-mask, gates attention
dropout: float: dropout rate
mode: 'eval' or 'train': whether to use dropout
rng: JAX PRNGKey: subkey for disposable use
Returns:
Self attention for q, k, v arrays.
""" |
depth = np.shape(query)[-1]
dots = np.matmul(query, np.swapaxes(key, -1, -2)) / np.sqrt(depth)
if mask is not None:
dots = np.where(mask, dots, -1e9)
# Softmax.
dots = np.exp(dots - backend.logsumexp(dots, axis=-1, keepdims=True))
if dropout >= 1.0:
raise ValueError('Dropout rates must be lower than 1.')
if dropout is not None and dropout > 0.0 and mode == 'train':
keep = backend.random.bernoulli(rng, 1.0 - dropout, dots.shape)
dots = np.where(keep, dots / (1.0 - dropout), 0)
out = np.matmul(dots, value)
return out |
<SYSTEM_TASK:>
Pure single-headed self-attention.
<END_TASK>
<USER_TASK:>
Description:
def PureDotProductAttention(dropout=0.0, mode='train'):
"""Pure single-headed self-attention.
Args:
dropout: float: dropout rate
mode: str: 'train' or 'eval'
Returns:
Pure single-headed attention layer. (No Dense transforms on input.)
""" |
def init_fun(_, input_shapes): # pylint: disable=invalid-name
q_shape, _, v_shape, _ = input_shapes
output_shape = q_shape[:-1] + (v_shape[-1],)
return output_shape, ()
def apply_fun(params, inputs, **kwargs): # pylint: disable=invalid-name
del params
q, k, v, mask = inputs
rng = kwargs.get('rng', None)
return DotProductAttention(q, k, v, mask,
dropout=dropout, mode=mode, rng=rng)
return init_fun, apply_fun |
<SYSTEM_TASK:>
Pure transformer-style multi-headed attention.
<END_TASK>
<USER_TASK:>
Description:
def PureMultiHeadedAttention(x, params, num_heads=8, dropout=0.0,
mode='train', **kwargs):
"""Pure transformer-style multi-headed attention.
Args:
x: inputs ((q, k, v), mask)
params: parameters (none)
num_heads: int: number of attention heads
dropout: float: dropout rate
mode: str: 'train' or 'eval'
**kwargs: other arguments including the rng
Returns:
Pure Multi-headed attention layer (no Dense transforms on input).
""" |
del params
rng = kwargs.get('rng', None)
(q, k, v), mask = x
feature_depth = q.shape[-1]
assert feature_depth % num_heads == 0
head_depth = feature_depth // num_heads
nbatch = np.shape(q)[0]
# nbatch, seqlen, feature_depth --> nbatch, num_heads, seqlen, head_depth
def SplitHeads(x):
return np.transpose(
np.reshape(x, (nbatch, -1, num_heads, head_depth)), (0, 2, 1, 3))
# nbatch, num_heads, seqlen, head_depth --> nbatch, seqlen, feature_depth
def JoinHeads(x): # pylint: disable=invalid-name
return np.reshape(
np.transpose(x, (0, 2, 1, 3)), (nbatch, -1, num_heads*head_depth))
# Split heads, dot-product attention, rejoin heads.
return JoinHeads(
DotProductAttention(
SplitHeads(q), SplitHeads(k), SplitHeads(v), mask,
dropout=dropout, mode=mode, rng=rng)) |
<SYSTEM_TASK:>
Select which chunks to attend to in chunked attention.
<END_TASK>
<USER_TASK:>
Description:
def ChunkedAttentionSelector(x, params, selector=None, **kwargs):
"""Select which chunks to attend to in chunked attention.
Args:
x: inputs, a list of elements of the form (q, k, v), mask for each chunk.
params: parameters (unused).
selector: a function from chunk_number -> list of chunk numbers that says
which other chunks should be appended to the given one (previous if None).
**kwargs: unused other arguments.
Returns:
a list of elements of the form (q, k', v'), mask' where k', v' and mask' are
concatenations of k, v and identity-extended masks from selected chunks.
""" |
del params, kwargs
selector = selector or (lambda x: [] if x < 1 else [x-1])
triples, masks = zip(*x)
(queries, keys, values) = zip(*triples)
result = []
for i in range(len(x)):
selected = selector(i)
# Since keys and values are [batch, length, depth] we concatenate on axis=1.
# We also always include the current key or value at the end.
new_key_list = [keys[j] for j in selected]
new_key = np.concatenate(new_key_list + [keys[i]], axis=1)
new_value = np.concatenate(
[values[j] for j in selected] + [values[i]], axis=1)
# Masks are (1, query-len, key-len) so we concatenate on axis=2.
new_mask_shapes = [(1, queries[i].shape[1], key.shape[1])
for key in new_key_list]
cur_mask = masks[i]
# Masks are all-1 for the added chunks (no masking).
new_mask_list = [np.ones(s, dtype=cur_mask.dtype) for s in new_mask_shapes]
# We still use the current (often causal) mask for the final chunk.
new_mask = np.concatenate(new_mask_list + [cur_mask], axis=2)
result.append(((queries[i], new_key, new_value), new_mask))
return tuple(result) |
<SYSTEM_TASK:>
Transformer-style causal multi-headed attention operating on chunks.
<END_TASK>
<USER_TASK:>
Description:
def ChunkedCausalMultiHeadedAttention(
feature_depth, num_heads=8, dropout=0.0, chunk_selector=None, mode='train'):
"""Transformer-style causal multi-headed attention operating on chunks.
Accepts inputs that are a list of chunks and applies causal attention.
Args:
feature_depth: int: depth of embedding
num_heads: int: number of attention heads
dropout: float: dropout rate
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
Multi-headed self-attention layer.
""" |
prepare_attention_input = combinators.Serial(
combinators.Branch(),
combinators.Parallel(
combinators.Branch(num_branches=3), # q = k = v = first input
CausalMask(axis=-2), # pylint: disable=no-value-for-parameter
),
combinators.Parallel(
combinators.Parallel(
core.Dense(feature_depth),
core.Dense(feature_depth),
core.Dense(feature_depth),
),
combinators.Identity()
)
)
return combinators.Serial(
combinators.Map(prepare_attention_input),
ChunkedAttentionSelector(selector=chunk_selector), # pylint: disable=no-value-for-parameter
combinators.Map(PureMultiHeadedAttention( # pylint: disable=no-value-for-parameter
feature_depth=feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode), check_shapes=False),
combinators.Map(core.Dense(feature_depth))
) |
<SYSTEM_TASK:>
Layer to shift the tensor to the right by padding on axis 1.
<END_TASK>
<USER_TASK:>
Description:
def ShiftRight(x, **unused_kwargs):
"""Layer to shift the tensor to the right by padding on axis 1.""" |
if not isinstance(x, (list, tuple)): # non-chunked inputs
pad_widths = [(0, 0), (1, 0)]
padded = np.pad(x, pad_widths, mode='constant')
return padded[:, :-1]
# Handling chunked inputs. Recall that the list of chunks represents a big
# sequence (the concatenation of the chunks). We want to shift that sequence,
# so we put a 0 in the beginning of the first chunk and the last element of
# that chunk is used as the new first element of the next chunk, and so on.
padded = []
last_value = np.zeros_like(x[0][:, -1])
for chunk in x:
padded_chunk = np.concatenate([last_value[:, np.newaxis], chunk], axis=1)
last_value = chunk[:, -1]
padded.append(padded_chunk[:, :-1])
return padded |
<SYSTEM_TASK:>
Generator for the reversing nlp-like task on sequences of symbols.
<END_TASK>
<USER_TASK:>
Description:
def reverse_generator_nlplike(nbr_symbols,
max_length,
nbr_cases,
scale_std_dev=100,
alpha=1.5):
"""Generator for the reversing nlp-like task on sequences of symbols.
The length of the sequence is drawn from a Gaussian(Normal) distribution
at random from [1, max_length] and with std deviation of 1%,
then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: integer, number of symbols.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
scale_std_dev: float, Normal distribution's standard deviation scale factor
used to draw the length of sequence. Default = 1% of the max_length.
alpha: float, Zipf's Law Distribution parameter. Default = 1.5.
Usually for modelling natural text distribution is in
the range [1.1-1.6].
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list is input-list reversed.
""" |
std_dev = max_length / scale_std_dev
distr_map = zipf_distribution(nbr_symbols, alpha)
for _ in range(nbr_cases):
l = int(abs(np.random.normal(loc=max_length / 2, scale=std_dev)) + 1)
inputs = zipf_random_sample(distr_map, l)
yield {"inputs": inputs, "targets": list(reversed(inputs))} |
<SYSTEM_TASK:>
Wait for SSH to be available at given IP address.
<END_TASK>
<USER_TASK:>
Description:
def wait_for_ssh(ip):
"""Wait for SSH to be available at given IP address.""" |
for _ in range(12):
with safe_socket() as s:
try:
s.connect((ip, 22))
return True
except socket.timeout:
pass
time.sleep(10)
return False |
<SYSTEM_TASK:>
Add attend-to-encoder layers to cache.
<END_TASK>
<USER_TASK:>
Description:
def _add_attend_to_encoder_cache(cache, attention_name, hparams, num_layers,
key_channels, value_channels,
vars_3d_num_heads, scope_prefix,
encoder_output):
"""Add attend-to-encoder layers to cache.""" |
for layer in range(num_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope("%sdecoder/%s/%s/multihead_attention" %
(scope_prefix, layer_name, attention_name)):
k_encdec = common_attention.compute_attention_component(
encoder_output,
key_channels,
name="k",
vars_3d_num_heads=vars_3d_num_heads)
k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)
v_encdec = common_attention.compute_attention_component(
encoder_output,
value_channels,
name="v",
vars_3d_num_heads=vars_3d_num_heads)
v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)
cache[layer_name][attention_name] = {
"k_encdec": k_encdec,
"v_encdec": v_encdec
}
return cache |
<SYSTEM_TASK:>
Create the initial cache for Evolved Transformer fast decoding.
<END_TASK>
<USER_TASK:>
Description:
def _init_evolved_transformer_cache(cache, hparams, batch_size,
attention_init_length, encoder_output,
encoder_decoder_attention_bias,
scope_prefix):
"""Create the initial cache for Evolved Transformer fast decoding.""" |
key_channels = hparams.attention_key_channels or hparams.hidden_size
value_channels = hparams.attention_value_channels or hparams.hidden_size
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
vars_3d_num_heads = (
hparams.num_heads if hparams.get("attention_variables_3d") else 0)
# Add self-attentions.
if cache is None:
cache = {}
cache.update({
"layer_%d" % layer: { # pylint: disable=g-complex-comprehension
_SIXTEEN_HEAD_ATTENTION_NAME: {
"k":
common_attention.split_heads(
tf.zeros(
[batch_size, attention_init_length, key_channels]),
_capped_double_heads(hparams.num_heads)),
"v":
common_attention.split_heads(
tf.zeros(
[batch_size, attention_init_length, value_channels]),
_capped_double_heads(hparams.num_heads)),
},
_VANILLA_ATTENTION_NAME: {
"k":
common_attention.split_heads(
tf.zeros(
[batch_size, attention_init_length, key_channels]),
hparams.num_heads),
"v":
common_attention.split_heads(
tf.zeros(
[batch_size, attention_init_length, value_channels]),
hparams.num_heads),
}
} for layer in range(num_layers)
})
# Add branched layers. Pad with additional zeros for causal convolution.
for layer in range(num_layers):
cache["layer_%d" % layer][_CONV_BRANCHES_FIRST_LAYER_NAME] = tf.zeros([
batch_size, attention_init_length + _DECODER_LEFT_CONV_PADDING,
hparams.hidden_size
])
cache["layer_%d" % layer][_CONV_BRANCHES_SECOND_LAYER_NAME] = tf.zeros([
batch_size, attention_init_length + _DECODER_FINAL_CONV_PADDING,
hparams.hidden_size * 2
])
# Add encoder embedding attentions.
if encoder_output is not None:
cache = _add_attend_to_encoder_cache(
cache=cache,
attention_name=_FIRST_ATTEND_TO_ENCODER_NAME,
hparams=hparams,
num_layers=num_layers,
key_channels=key_channels,
value_channels=value_channels,
vars_3d_num_heads=vars_3d_num_heads,
scope_prefix=scope_prefix,
encoder_output=encoder_output)
cache = _add_attend_to_encoder_cache(
cache=cache,
attention_name=_SECOND_ATTEND_TO_ENCODER_NAME,
hparams=hparams,
num_layers=num_layers,
key_channels=key_channels,
value_channels=value_channels,
vars_3d_num_heads=vars_3d_num_heads,
scope_prefix=scope_prefix,
encoder_output=encoder_output)
cache["encoder_output"] = encoder_output
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
return cache |
<SYSTEM_TASK:>
Add Evolved Transformer hparams.
<END_TASK>
<USER_TASK:>
Description:
def add_evolved_transformer_hparams(hparams):
"""Add Evolved Transformer hparams.
Note: These are for the Adam optimizer, not the Adafactor optimizer used in
the paper.
Args:
hparams: Current hparams.
Returns:
hparams updated with Evolved Transformer values.
""" |
# Evolved Transformer "layers" are twice as deep as Transformer, so roughly
# halve the number that we use. These numbers are taken from
# arxiv.org/abs/1901.11117 .
hparams.num_encoder_layers = 3
hparams.num_decoder_layers = 4
# Learning rate and decay scheme that mimics the transformer Adam config,
# but with cosine decay instead of rsqrt.
hparams.learning_rate_constant /= hparams.learning_rate_warmup_steps ** 0.5
hparams.learning_rate_schedule = (
"constant*linear_warmup*single_cycle_cos_decay*rsqrt_hidden_size")
# The current infrastructure does not support exposing
# `train_steps` to the decay functions, and so we are hard coding the decay
# steps here to match the default number of train steps used in `t2t_trainer`.
# TODO(davidso): Thread `train_steps` through to decay functions so we do not
# have to worry about a `learning_rate_decay_steps` mismatch.
hparams.learning_rate_decay_steps = 250000
return hparams |
<SYSTEM_TASK:>
Base parameters for Evolved Transformer model on TPU.
<END_TASK>
<USER_TASK:>
Description:
def evolved_transformer_base_tpu():
"""Base parameters for Evolved Transformer model on TPU.""" |
hparams = add_evolved_transformer_hparams(transformer.transformer_tpu())
hparams.learning_rate_constant = 1 / hparams.learning_rate_warmup_steps ** 0.5
hparams.learning_rate_schedule = (
"constant*single_cycle_cos_decay")
return hparams |
<SYSTEM_TASK:>
Add necessary hyperparameters for mixture-of-experts.
<END_TASK>
<USER_TASK:>
Description:
def set_default_moe_hparams(hparams):
"""Add necessary hyperparameters for mixture-of-experts.""" |
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-2
hparams.add_hparam("moe_gating", "top_2")
# Experts have fixed capacity per batch. We need some extra capacity
# in case gating is not perfectly balanced.
# moe_capacity_factor_* should be set to a value >=1.
hparams.add_hparam("moe_capacity_factor_train", 1.25)
hparams.add_hparam("moe_capacity_factor_eval", 2.0)
hparams.add_hparam("moe_capacity_factor_second_level", 1.0)
# Each expert has a hidden layer with this size.
hparams.add_hparam("moe_hidden_size", 4096)
# For gating, divide inputs into groups of this size before gating.
# Each group sends the same number of inputs to each expert.
# Ideally, the group size would be the whole batch, but this is expensive
# due to our use of matrix multiplication for reordering.
hparams.add_hparam("moe_group_size", 1024)
# For top_2 gating, whether to impose an additional loss in order to make
# the experts equally used as the second-place expert.
hparams.add_hparam("moe_use_second_place_loss", 0)
# In top_2 gating, policy for whether to use a second-place expert.
# Legal values are:
# "all": always
# "none": never
# "threshold": if gate value > the given threshold
# "random": if gate value > threshold*random_uniform(0,1)
hparams.add_hparam("moe_second_policy_train", "random")
hparams.add_hparam("moe_second_policy_eval", "random")
hparams.add_hparam("moe_second_threshold_train", 0.2)
hparams.add_hparam("moe_second_threshold_eval", 0.2) |
<SYSTEM_TASK:>
Helper function for figuring out how to split a dimensino into groups.
<END_TASK>
<USER_TASK:>
Description:
def _split_into_groups(n, max_group_size, mesh_dim_size):
"""Helper function for figuring out how to split a dimensino into groups.
We have a dimension with size n and we want to split it into
two dimensions: n = num_groups * group_size
group_size should be the largest possible value meeting the constraints:
group_size <= max_group_size
(num_groups = n/group_size) is a multiple of mesh_dim_size
Args:
n: an integer
max_group_size: an integer
mesh_dim_size: an integer
Returns:
num_groups: an integer
group_size: an integer
Raises:
ValueError: if n is not a multiple of mesh_dim_size
""" |
if n % mesh_dim_size != 0:
raise ValueError(
"n=%d is not a multiple of mesh_dim_size=%d" % (n, mesh_dim_size))
num_groups = max(1, n // max_group_size)
while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):
num_groups += 1
group_size = n // num_groups
tf.logging.info(
"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)"
" = (num_groups=%d group_size=%d)" %
(n, max_group_size, mesh_dim_size, num_groups, group_size))
return num_groups, group_size |
<SYSTEM_TASK:>
Makes validator for function to ensure it takes nargs args.
<END_TASK>
<USER_TASK:>
Description:
def _nargs_validator(nargs, message):
"""Makes validator for function to ensure it takes nargs args.""" |
if message is None:
message = "Registered function must take exactly %d arguments" % nargs
def f(key, value):
del key
spec = inspect.getfullargspec(value)
if (len(spec.args) != nargs or spec.varargs is not None or
spec.varkw is not None):
raise ValueError(message)
return f |
<SYSTEM_TASK:>
Get pre-registered optimizer keyed by name.
<END_TASK>
<USER_TASK:>
Description:
def optimizer(name):
"""Get pre-registered optimizer keyed by name.
`name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and
UpperCamelCase -> snake_case conversions included for legacy support.
Args:
name: name of optimizer used in registration. This should be a snake case
identifier, though others supported for legacy reasons.
Returns:
optimizer
""" |
warn_msg = ("Please update `registry.optimizer` callsite "
"(likely due to a `HParams.optimizer` value)")
if name == "SGD":
name = "sgd"
tf.logging.warning("'SGD' optimizer now keyed by 'sgd'. %s" % warn_msg)
elif name == "RMSProp":
name = "rms_prop"
tf.logging.warning(
"'RMSProp' optimizer now keyed by 'rms_prop'. %s" % warn_msg)
else:
snake_name = misc_utils.camelcase_to_snakecase(name)
if name != snake_name:
tf.logging.warning(
"optimizer names now keyed by snake_case names. %s" % warn_msg)
name = snake_name
return Registries.optimizers[name] |
<SYSTEM_TASK:>
Get and initialize the `EnvProblem` with the given name and batch size.
<END_TASK>
<USER_TASK:>
Description:
def env_problem(env_problem_name, **kwargs):
"""Get and initialize the `EnvProblem` with the given name and batch size.
Args:
env_problem_name: string name of the registered env problem.
**kwargs: forwarded to env problem's initialize method.
Returns:
an initialized EnvProblem with the given batch size.
""" |
ep_cls = Registries.env_problems[env_problem_name]
ep = ep_cls()
ep.initialize(**kwargs)
return ep |
<SYSTEM_TASK:>
Creates a help string for names_list grouped by prefix.
<END_TASK>
<USER_TASK:>
Description:
def display_list_by_prefix(names_list, starting_spaces=0):
"""Creates a help string for names_list grouped by prefix.""" |
cur_prefix, result_lines = None, []
space = " " * starting_spaces
for name in sorted(names_list):
split = name.split("_", 1)
prefix = split[0]
if cur_prefix != prefix:
result_lines.append(space + prefix + ":")
cur_prefix = prefix
result_lines.append(space + " * " + name)
return "\n".join(result_lines) |
<SYSTEM_TASK:>
Generate help string with contents of registry.
<END_TASK>
<USER_TASK:>
Description:
def help_string():
"""Generate help string with contents of registry.""" |
help_str = """
Registry contents:
------------------
Models:
%s
HParams:
%s
RangedHParams:
%s
Problems:
%s
Optimizers:
%s
Attacks:
%s
Attack HParams:
%s
Pruning HParams:
%s
Pruning Strategies:
%s
Env Problems:
%s
"""
lists = tuple(
display_list_by_prefix(entries, starting_spaces=4) for entries in [ # pylint: disable=g-complex-comprehension
list_models(),
list_hparams(),
list_ranged_hparams(),
list_base_problems(),
list_optimizers(),
list_attacks(),
list_attack_params(),
list_pruning_params(),
list_pruning_strategies(),
list_env_problems(),
])
return help_str % lists |
<SYSTEM_TASK:>
Decorator to register a function, or registration itself.
<END_TASK>
<USER_TASK:>
Description:
def register(self, key_or_value=None):
"""Decorator to register a function, or registration itself.
This is primarily intended for use as a decorator, either with or without
a key/parentheses.
```python
@my_registry.register('key1')
def value_fn(x, y, z):
pass
@my_registry.register()
def another_fn(x, y):
pass
@my_registry.register
def third_func():
pass
```
Note if key_or_value is provided as a non-callable, registration only
occurs once the returned callback is called with a callable as its only
argument.
```python
callback = my_registry.register('different_key')
'different_key' in my_registry # False
callback(lambda (x, y): x + y)
'different_key' in my_registry # True
```
Args:
key_or_value (optional): key to access the registered value with, or the
function itself. If `None` (default), `self.default_key` will be called
on `value` once the returned callback is called with `value` as the only
arg. If `key_or_value` is itself callable, it is assumed to be the value
and the key is given by `self.default_key(key)`.
Returns:
decorated callback, or callback generated a decorated function.
""" |
def decorator(value, key):
self[key] = value
return value
# Handle if decorator was used without parens
if callable(key_or_value):
return decorator(value=key_or_value, key=None)
else:
return lambda value: decorator(value, key=key_or_value) |
<SYSTEM_TASK:>
Check the dynamic symbol versions.
<END_TASK>
<USER_TASK:>
Description:
def check_dependicies(objdump_string):
"""Check the dynamic symbol versions.
Parameters
----------
objdump_string : string
The dynamic symbol table entries of the file (result of `objdump -T` command).
""" |
GLIBC_version = re.compile(r'0{16}[ \t]+GLIBC_(\d{1,2})[.](\d{1,3})[.]?\d{,3}[ \t]+')
versions = GLIBC_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
assert int(major) <= 2
assert int(minor) <= 14
GLIBCXX_version = re.compile(r'0{16}[ \t]+GLIBCXX_(\d{1,2})[.](\d{1,2})[.]?(\d{,3})[ \t]+')
versions = GLIBCXX_version.findall(objdump_string)
assert len(versions) > 1
for major, minor, patch in versions:
assert int(major) == 3
assert int(minor) == 4
assert patch == '' or int(patch) <= 19
GOMP_version = re.compile(r'0{16}[ \t]+G?OMP_(\d{1,2})[.](\d{1,2})[.]?\d{,3}[ \t]+')
versions = GOMP_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
assert int(major) == 1
assert int(minor) == 0 |
<SYSTEM_TASK:>
Decorate an objective function.
<END_TASK>
<USER_TASK:>
Description:
def _objective_function_wrapper(func):
"""Decorate an objective function.
Note
----
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
func : callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
group : array-like
Group/query data, used for ranking task.
Returns
-------
new_func : callable
The new objective function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
dataset : Dataset
The training set from which the labels will be extracted using ``dataset.get_label()``.
""" |
def inner(preds, dataset):
"""Call passed function with appropriate arguments."""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
grad, hess = func(labels, preds)
elif argc == 3:
grad, hess = func(labels, preds, dataset.get_group())
else:
raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
"""weighted for objective"""
weight = dataset.get_weight()
if weight is not None:
"""only one class"""
if len(weight) == len(grad):
grad = np.multiply(grad, weight)
hess = np.multiply(hess, weight)
else:
num_data = len(weight)
num_class = len(grad) // num_data
if num_class * num_data != len(grad):
raise ValueError("Length of grad and hess should equal to num_class * num_data")
for k in range_(num_class):
for i in range_(num_data):
idx = k * num_data + i
grad[idx] *= weight[i]
hess[idx] *= weight[i]
return grad, hess
return inner |
<SYSTEM_TASK:>
Decorate an eval function.
<END_TASK>
<USER_TASK:>
Description:
def _eval_function_wrapper(func):
"""Decorate an eval function.
Note
----
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
Parameters
----------
func : callable
Expects a callable with following signatures:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and returns (eval_name->string, eval_result->float, is_bigger_better->bool):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
weight : array-like of shape = [n_samples]
The weight of samples.
group : array-like
Group/query data, used for ranking task.
Returns
-------
new_func : callable
The new eval function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
dataset : Dataset
The training set from which the labels will be extracted using ``dataset.get_label()``.
""" |
def inner(preds, dataset):
"""Call passed function with appropriate arguments."""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
return func(labels, preds)
elif argc == 3:
return func(labels, preds, dataset.get_weight())
elif argc == 4:
return func(labels, preds, dataset.get_weight(), dataset.get_group())
else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
return inner |
<SYSTEM_TASK:>
Return the predicted value for each sample.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Return the predicted value for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
Note
----
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
**kwargs
Other parameters for the prediction.
Returns
-------
predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes]
The predicted values.
X_leaves : array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the feature contributions for each sample.
""" |
if self._n_features is None:
raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
if not isinstance(X, (DataFrame, DataTable)):
X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features_ is %s and "
"input n_features is %s "
% (self._n_features, n_features))
return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration,
pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs) |
<SYSTEM_TASK:>
Return the predicted probability for each class for each sample.
<END_TASK>
<USER_TASK:>
Description:
def predict_proba(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Return the predicted probability for each class for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
Note
----
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
**kwargs
Other parameters for the prediction.
Returns
-------
predicted_probability : array-like of shape = [n_samples, n_classes]
The predicted probability for each class for each sample.
X_leaves : array-like of shape = [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the feature contributions for each sample.
""" |
result = super(LGBMClassifier, self).predict(X, raw_score, num_iteration,
pred_leaf, pred_contrib, **kwargs)
if self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
return result
else:
return np.vstack((1. - result, result)).transpose() |
<SYSTEM_TASK:>
Parse config header file.
<END_TASK>
<USER_TASK:>
Description:
def get_parameter_infos(config_hpp):
"""Parse config header file.
Parameters
----------
config_hpp : string
Path to the config header file.
Returns
-------
infos : tuple
Tuple with names and content of sections.
""" |
is_inparameter = False
parameter_group = None
cur_key = None
cur_info = {}
keys = []
member_infos = []
with open(config_hpp) as config_hpp_file:
for line in config_hpp_file:
if "#pragma region Parameters" in line:
is_inparameter = True
elif "#pragma region" in line and "Parameters" in line:
cur_key = line.split("region")[1].strip()
keys.append(cur_key)
member_infos.append([])
elif '#pragma endregion' in line:
if cur_key is not None:
cur_key = None
elif is_inparameter:
is_inparameter = False
elif cur_key is not None:
line = line.strip()
if line.startswith("//"):
key, _, val = line[2:].partition("=")
key = key.strip()
val = val.strip()
if key not in cur_info:
if key == "descl2" and "desc" not in cur_info:
cur_info["desc"] = []
elif key != "descl2":
cur_info[key] = []
if key == "desc":
cur_info["desc"].append(("l1", val))
elif key == "descl2":
cur_info["desc"].append(("l2", val))
else:
cur_info[key].append(val)
elif line:
has_eqsgn = False
tokens = line.split("=")
if len(tokens) == 2:
if "default" not in cur_info:
cur_info["default"] = [tokens[1][:-1].strip()]
has_eqsgn = True
tokens = line.split()
cur_info["inner_type"] = [tokens[0].strip()]
if "name" not in cur_info:
if has_eqsgn:
cur_info["name"] = [tokens[1].strip()]
else:
cur_info["name"] = [tokens[1][:-1].strip()]
member_infos[-1].append(cur_info)
cur_info = {}
return keys, member_infos |
<SYSTEM_TASK:>
Get names of all parameters.
<END_TASK>
<USER_TASK:>
Description:
def get_names(infos):
"""Get names of all parameters.
Parameters
----------
infos : list
Content of the config header file.
Returns
-------
names : list
Names of all parameters.
""" |
names = []
for x in infos:
for y in x:
names.append(y["name"][0])
return names |
<SYSTEM_TASK:>
Get aliases of all parameters.
<END_TASK>
<USER_TASK:>
Description:
def get_alias(infos):
"""Get aliases of all parameters.
Parameters
----------
infos : list
Content of the config header file.
Returns
-------
pairs : list
List of tuples (param alias, param name).
""" |
pairs = []
for x in infos:
for y in x:
if "alias" in y:
name = y["name"][0]
alias = y["alias"][0].split(',')
for name2 in alias:
pairs.append((name2.strip(), name))
return pairs |
<SYSTEM_TASK:>
Construct code for auto config file for one param value.
<END_TASK>
<USER_TASK:>
Description:
def set_one_var_from_string(name, param_type, checks):
"""Construct code for auto config file for one param value.
Parameters
----------
name : string
Name of the parameter.
param_type : string
Type of the parameter.
checks : list
Constraints of the parameter.
Returns
-------
ret : string
Lines of auto config file with getting and checks of one parameter value.
""" |
ret = ""
univar_mapper = {"int": "GetInt", "double": "GetDouble", "bool": "GetBool", "std::string": "GetString"}
if "vector" not in param_type:
ret += " %s(params, \"%s\", &%s);\n" % (univar_mapper[param_type], name, name)
if len(checks) > 0:
for check in checks:
ret += " CHECK(%s %s);\n" % (name, check)
ret += "\n"
else:
ret += " if (GetString(params, \"%s\", &tmp_str)) {\n" % (name)
type2 = param_type.split("<")[1][:-1]
if type2 == "std::string":
ret += " %s = Common::Split(tmp_str.c_str(), ',');\n" % (name)
else:
ret += " %s = Common::StringToArray<%s>(tmp_str, ',');\n" % (name, type2)
ret += " }\n\n"
return ret |
<SYSTEM_TASK:>
Generate auto config file.
<END_TASK>
<USER_TASK:>
Description:
def gen_parameter_code(config_hpp, config_out_cpp):
"""Generate auto config file.
Parameters
----------
config_hpp : string
Path to the config header file.
config_out_cpp : string
Path to the auto config file.
Returns
-------
infos : tuple
Tuple with names and content of sections.
""" |
keys, infos = get_parameter_infos(config_hpp)
names = get_names(infos)
alias = get_alias(infos)
str_to_write = r"""/*!
* Copyright (c) 2018 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*
* \note
* This file is auto generated by LightGBM\helpers\parameter_generator.py from LightGBM\include\LightGBM\config.h file.
*/
"""
str_to_write += "#include<LightGBM/config.h>\nnamespace LightGBM {\n"
# alias table
str_to_write += "std::unordered_map<std::string, std::string> Config::alias_table({\n"
for pair in alias:
str_to_write += " {\"%s\", \"%s\"},\n" % (pair[0], pair[1])
str_to_write += "});\n\n"
# names
str_to_write += "std::unordered_set<std::string> Config::parameter_set({\n"
for name in names:
str_to_write += " \"%s\",\n" % (name)
str_to_write += "});\n\n"
# from strings
str_to_write += "void Config::GetMembersFromString(const std::unordered_map<std::string, std::string>& params) {\n"
str_to_write += " std::string tmp_str = \"\";\n"
for x in infos:
for y in x:
if "[doc-only]" in y:
continue
param_type = y["inner_type"][0]
name = y["name"][0]
checks = []
if "check" in y:
checks = y["check"]
tmp = set_one_var_from_string(name, param_type, checks)
str_to_write += tmp
# tails
str_to_write += "}\n\n"
str_to_write += "std::string Config::SaveMembersToString() const {\n"
str_to_write += " std::stringstream str_buf;\n"
for x in infos:
for y in x:
if "[doc-only]" in y:
continue
param_type = y["inner_type"][0]
name = y["name"][0]
if "vector" in param_type:
if "int8" in param_type:
str_to_write += " str_buf << \"[%s: \" << Common::Join(Common::ArrayCast<int8_t, int>(%s), \",\") << \"]\\n\";\n" % (name, name)
else:
str_to_write += " str_buf << \"[%s: \" << Common::Join(%s, \",\") << \"]\\n\";\n" % (name, name)
else:
str_to_write += " str_buf << \"[%s: \" << %s << \"]\\n\";\n" % (name, name)
# tails
str_to_write += " return str_buf.str();\n"
str_to_write += "}\n\n"
str_to_write += "} // namespace LightGBM\n"
with open(config_out_cpp, "w") as config_out_cpp_file:
config_out_cpp_file.write(str_to_write)
return keys, infos |
<SYSTEM_TASK:>
Convert a ctypes float pointer array to a numpy array.
<END_TASK>
<USER_TASK:>
Description:
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array.""" |
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.fromiter(cptr, dtype=np.float32, count=length)
else:
raise RuntimeError('Expected float pointer') |
<SYSTEM_TASK:>
Convert a ctypes double pointer array to a numpy array.
<END_TASK>
<USER_TASK:>
Description:
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array.""" |
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer') |
<SYSTEM_TASK:>
Convert Python dictionary to string, which is passed to C API.
<END_TASK>
<USER_TASK:>
Description:
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API.""" |
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
pairs.append(str(key) + '=' + ','.join(map(str, val)))
elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):
pairs.append(str(key) + '=' + str(val))
elif val is not None:
raise TypeError('Unknown type of parameter:%s, got:%s'
% (key, type(val).__name__))
return ' '.join(pairs) |
<SYSTEM_TASK:>
Fix the memory of multi-dimensional sliced object.
<END_TASK>
<USER_TASK:>
Description:
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object.""" |
if data.base is not None and isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
warnings.warn("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data |
<SYSTEM_TASK:>
Predict logic.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, data, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
is_reshape=True):
"""Predict logic.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
When data type is string, it represents the path of txt file.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
is_reshape : bool, optional (default=True)
Whether to reshape to (nrow, ncol).
Returns
-------
result : numpy array
Prediction result.
""" |
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if num_iteration > self.num_total_iteration:
num_iteration = self.num_total_iteration
if isinstance(data, string_type):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type)
elif isinstance(data, DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), num_iteration, predict_type)
else:
try:
warnings.warn('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
preds, nrow = self.__pred_for_csr(csr, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
if is_reshape and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)'
% (preds.size, nrow))
return preds |
<SYSTEM_TASK:>
Get size of prediction result.
<END_TASK>
<USER_TASK:>
Description:
def __get_num_preds(self, num_iteration, nrow, predict_type):
"""Get size of prediction result.""" |
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data'
'with number of rows greater than MAX_INT32 (%d).\n'
'You can split your data into chunks'
'and then concatenate predictions for them' % MAX_INT32)
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value |
<SYSTEM_TASK:>
Predict for a 2-D numpy matrix.
<END_TASK>
<USER_TASK:>
Description:
def __pred_for_np2d(self, mat, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix.""" |
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip_(np.array_split(mat, sections),
zip_(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, num_iteration, predict_type) |
<SYSTEM_TASK:>
Initialize data from a list of 2-D numpy matrices.
<END_TASK>
<USER_TASK:>
Description:
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices.""" |
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
# change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self |
<SYSTEM_TASK:>
Create validation data align with current Dataset.
<END_TASK>
<USER_TASK:>
Description:
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent=False, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query size for Dataset.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
""" |
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret |
<SYSTEM_TASK:>
Get subset of current Dataset.
<END_TASK>
<USER_TASK:>
Description:
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
""" |
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = used_indices
return ret |
<SYSTEM_TASK:>
Set property into the Dataset.
<END_TASK>
<USER_TASK:>
Description:
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
""" |
if self.handle is None:
raise Exception("Cannot set %s before construct dataset" % field_name)
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype))
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
return self |
<SYSTEM_TASK:>
Get property from the Dataset.
<END_TASK>
<USER_TASK:>
Description:
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : string
The field name of the information.
Returns
-------
info : numpy array
A numpy array with information from the Dataset.
""" |
if self.handle is None:
raise Exception("Cannot get %s before construct Dataset" % field_name)
tmp_out_len = ctypes.c_int()
out_type = ctypes.c_int()
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_INT8:
return cint8_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int8)), tmp_out_len.value)
else:
raise TypeError("Unknown type") |
<SYSTEM_TASK:>
Set categorical features.
<END_TASK>
<USER_TASK:>
Description:
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or strings
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
""" |
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
warnings.warn('Using categorical_feature in Dataset.')
return self
else:
warnings.warn('categorical_feature in Dataset is overridden.\n'
'New categorical_feature is {}'.format(sorted(list(categorical_feature))))
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.") |
<SYSTEM_TASK:>
Set predictor for continued training.
<END_TASK>
<USER_TASK:>
Description:
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
""" |
if predictor is self._predictor:
return self
if self.data is not None:
self._predictor = predictor
return self._free_handle()
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.") |
<SYSTEM_TASK:>
Set reference Dataset.
<END_TASK>
<USER_TASK:>
Description:
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
""" |
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.") |
<SYSTEM_TASK:>
Set weight of each instance.
<END_TASK>
<USER_TASK:>
Description:
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
""" |
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
return self |
<SYSTEM_TASK:>
Set init score of Booster to start from.
<END_TASK>
<USER_TASK:>
Description:
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, numpy 1-D array, pandas Series or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
""" |
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
return self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.