docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Lower the ReshapeOperation.
Reshaping can require collective communication between processors.
We haven't yet implemented all possible reshapes. We try to handle the
common cases here - otherwise we raise a NotImplementedError.
Args:
lowering: a Lowering
Raises:
NotImplementedError: if we haven't covered this case | def lower(self, lowering):
old_shape = self.inputs[0].shape
new_shape = self.outputs[0].shape
mesh_impl = lowering.mesh_impl(self)
slices = lowering.tensors[self.inputs[0]]
mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape)
mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape)
# Figure out what needs to be done for different mesh-axes
mesh_axes_allsplit = []
mesh_axes_allconcat = []
mesh_axes_alltoall = []
for mesh_axis, (old_cumprod, new_cumprod) in enumerate(
zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)):
if new_cumprod != old_cumprod:
if old_cumprod is None:
# split in new layout but not in old layout - we need an allsplit
mesh_axes_allsplit.append(mesh_axis)
elif new_cumprod is None:
# split in old layout but not in new layout - we need an allconcat
mesh_axes_allconcat.append(mesh_axis)
else:
# split differently in old and new layouts - we need an alltoall
mesh_axes_alltoall.append(mesh_axis)
laid_out_size = mesh_impl.laid_out_size(old_shape)
for mesh_axis in mesh_axes_allsplit:
tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_new[mesh_axis])
if tensor_axis is None:
# TODO(noam): try to handle this case
raise NotImplementedError(
"Try first reshaping to insert a new tf dimension,"
" then changing layout. input_shape=%s output_shape=%s"
% (self.inputs[0].shape, self.outputs[0].shape))
slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis)
laid_out_size //= mesh_impl.shape[mesh_axis].size
for mesh_axis in mesh_axes_alltoall:
split_tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_new[mesh_axis])
if split_tensor_axis is None:
# TODO(noam): try to handle this case
raise NotImplementedError(
"Try first reshaping to insert a new tf dimension,"
" then changing layout. input_shape=%s output_shape=%s"
% (self.inputs[0].shape, self.outputs[0].shape))
concat_tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_old[mesh_axis])
assert concat_tensor_axis is not None
slices = mesh_impl.alltoall(
slices, mesh_axis, split_tensor_axis, concat_tensor_axis)
lowering.add_counter(
"alltoall/%s/reshape_op" % mesh_axis, laid_out_size)
for mesh_axis in mesh_axes_allconcat:
tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_old[mesh_axis])
assert tensor_axis is not None
slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis)
laid_out_size *= mesh_impl.shape[mesh_axis].size
lowering.add_counter(
"allconcat/%s/reshape_op" % mesh_axis, laid_out_size)
# now reshape the slices
old_slice_shape = mesh_impl.slice_shape(old_shape)
new_slice_shape = mesh_impl.slice_shape(new_shape)
if new_slice_shape != old_slice_shape:
def reshape_fn(x):
return tf.reshape(x, new_slice_shape)
slices = mesh_impl.slicewise(reshape_fn, slices)
lowering.set_tensor_lowering(self.outputs[0], slices) | 213,822 |
Datatypes to use for the run.
Args:
master_dtype: string, datatype for checkpoints
keep this the same between training and eval/inference
slice_dtype: string, datatype for variables in memory
must be tf.float32 for training
activation_dtype: string, datatype for activations
less memory usage if tf.bfloat16 but possible numerical issues
Returns:
a mtf.VariableDtype | def get_variable_dtype(
master_dtype=tf.bfloat16,
slice_dtype=tf.float32,
activation_dtype=tf.float32):
return mtf.VariableDType(
master_dtype=tf.as_dtype(master_dtype),
slice_dtype=tf.as_dtype(slice_dtype),
activation_dtype=tf.as_dtype(activation_dtype)) | 213,829 |
Decode from a text file.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer (maximum decode length)
checkpoint_path: an optional string
input_filename: a string
output_filename: a string
eos_id: EOS id | def decode_from_file(estimator,
vocabulary,
model_type,
batch_size,
sequence_length,
checkpoint_path="",
input_filename=gin.REQUIRED,
output_filename=gin.REQUIRED,
eos_id=1):
with tf.gfile.Open(input_filename) as f:
text = f.read()
records = text.split("\n")
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
n = len(inputs)
# encode all inputs
all_input_ids = []
for line in inputs:
ids = inputs_vocabulary(vocabulary).encode(line.strip())
if model_type != "lm":
# for text2self problems, the inputs represent a partial sequence
# to be continued, and should not be terminated by EOS.
# for sequence-to-sequence problems, the input needs to be EOS-terminated
ids += [eos_id]
if len(ids) > sequence_length:
ids = ids[:sequence_length]
else:
ids.extend([0] * (sequence_length - len(ids)))
all_input_ids.append(ids)
# pad to make an integral number of batches
all_input_ids.extend([all_input_ids[0]] * (-n % batch_size))
padded_n = len(all_input_ids)
all_input_ids = np.array(all_input_ids, dtype=np.int32)
def input_fn(params):
del params
dataset = tf.data.Dataset.from_tensor_slices({"inputs": all_input_ids})
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
vocab_size = targets_vocabulary(vocabulary).vocab_size
decodes = []
for i, result in enumerate(result_iter):
output_ids = clean_decodes(list(result["outputs"]), vocab_size)
output_string = targets_vocabulary(vocabulary).decode(
[int(x) for x in output_ids])
decodes.append(output_string)
if i & (i - 1) == 0:
if i < len(inputs):
# LOG every power of 2, don't log if it's padded input i >= len(inputs)
tf.logging.info("decode %d input = %s" % (i, inputs[i]))
tf.logging.info(" output = %s" % output_string)
# BUG WORKAROUND - on TF1.13 and earlier, the output for each batch is
# repeated a number of times equal to the number of cores.
if len(decodes) == padded_n:
tf.logging.info("number of decodes matches number of inputs")
elif len(decodes) % padded_n == 0:
num_cores = len(decodes) // padded_n
tf.logging.info("output is repeated num_cores times - removing extras")
def keep(i):
return i % (batch_size * num_cores) < batch_size
decodes = [d for i, d in enumerate(decodes) if keep(i)]
else:
raise ValueError("unexpected number of outputs")
output_file = tf.gfile.Open(output_filename, "w")
decodes = decodes[:n]
for d in decodes:
output_file.write(d)
output_file.write("\n")
output_file.close() | 213,832 |
Stop at EOS or padding or OOV.
Args:
ids: a list of integers
vocab_size: an integer
eos_id: EOS id
Returns:
a list of integers | def clean_decodes(ids, vocab_size, eos_id=1):
ret = []
for i in ids:
if i == eos_id:
break
if i >= vocab_size:
break
ret.append(int(i))
return ret | 213,833 |
Automatically compute batch size.
Args:
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
tokens_per_split: an integer
Returns:
an integer | def auto_batch_size(sequence_length,
mesh_shape,
layout_rules,
tokens_per_split=2048):
num_splits = mtf.tensor_dim_to_mesh_dim_size(
layout_rules, mesh_shape, mtf.Dimension("batch", 0))
ret = max(1, tokens_per_split // sequence_length) * num_splits
tf.logging.info(
"AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s"
" sequence_length=%s batch_size=%s"
% (tokens_per_split, num_splits, sequence_length, ret))
return ret | 213,834 |
Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs | def _ring_2d(m, n):
if m == 1:
return [(0, i) for i in range(n)]
if n == 1:
return [(i, 0) for i in range(m)]
if m % 2 != 0:
tf.logging.warning("Odd dimension")
return [(i % m, i // m) for i in range(n * m)]
ret = [(0, 0)]
for i in range(m // 2):
for j in range(1, n):
ret.append((2 * i, j))
for j in range(n-1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(m-1, 0, -1):
ret.append((i, 0))
return ret | 213,837 |
Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented. | def allreduce(self, x, mesh_axes, reduction_fn_string):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_in.dtype
if dtype == tf.float32:
cast_to_float32 = False
elif dtype == tf.bfloat16:
cast_to_float32 = (
group_size > self._allreduce_in_bfloat16_max_group_size)
else:
tf.logging.info("Casting %s to float32 for allreduce" % tf_in.dtype)
cast_to_float32 = True
if cast_to_float32:
tf_in = tf.cast(tf_in, tf.float32)
tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)
if cast_to_float32:
tf_out = tf.cast(tf_out, dtype)
return self.LaidOutTensor([tf_out])
else:
for axis in mesh_axes:
x = self.allconcat(x, axis, 0, stack=True)
x = self.LaidOutTensor(
[mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])
return x | 213,842 |
Grouped allconcat (like MPI allgather followed by concat).
TODO(noam): inefficient - replace with a XLA allconcat when available
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
stack: a boolean - whether to stack instead of concat
Returns:
a LaidOutTensor | def allconcat(self, x, mesh_axis, concat_axis, stack=False):
x = x.to_laid_out_tensor()
coord = self.laid_out_pcoord(mesh_axis)
t = x.one_slice
old_shape = t.shape.as_list()
num_parts = self.shape[mesh_axis].size
t = tf.expand_dims(t, concat_axis)
t *= tf.reshape(
tf.one_hot(coord.one_slice, num_parts, dtype=t.dtype),
[num_parts if i == concat_axis else 1
for i in xrange(len(old_shape) + 1)])
if not stack:
new_shape = old_shape[:]
new_shape[concat_axis] *= num_parts
t = tf.reshape(t, new_shape)
return self.allreduce(self.LaidOutTensor([t]), [mesh_axis], "SUM") | 213,843 |
Grouped alltoall (like MPI alltoall with splitting and concatenation).
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor | def alltoall(self, x, mesh_axis, split_axis, concat_axis):
x = x.to_laid_out_tensor()
t = x.one_slice
group_assignment = self._create_group_assignment([mesh_axis])
dtype = t.dtype
if dtype == tf.float32:
# There seems to be a bug with float32 alltoall.
# Do it in bfloat16 until the bug is fixed.
# TODO(noam): file a bug
t = tf.to_bfloat16(t)
t = tpu_ops.all_to_all(
t,
concat_dimension=concat_axis,
split_dimension=split_axis,
split_count=len(group_assignment[0]),
group_assignment=group_assignment)
t = tf.cast(t, dtype)
x = self.LaidOutTensor([t])
return x | 213,844 |
Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple. | def slicewise(self, fn, *inputs):
if fn == tf.add:
assert len(inputs) == 2
if isinstance(inputs[0], mtf.LazyAllreduceSum):
# sum of LazyAllreduceSum (keep delaying the allreduce)
return inputs[0] + inputs[1]
# convert all inputs to LaidOutTensor where possible
inputs = mtf.convert_args_to_laid_out_tensors(inputs)
ret = fn(*[
x.one_slice if isinstance(x, self.LaidOutTensor) else x
for x in inputs])
if isinstance(ret, tuple):
return tuple([self.LaidOutTensor([t]) for t in ret])
else:
return self.LaidOutTensor([ret]) | 213,847 |
Call a random tf operation (e.g. random_uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor | def random(self, shape, tf_fn, kwargs):
# TODO(noam): can we make things better with stateless_random?
slice_shape = self.slice_shape(shape)
x = tf_fn(slice_shape, **kwargs)
# TPU does not have seeds enabled. Sync up the
# random choices by zeroing out all but the first core per group of
# identical slices, then allreducing by group.
layout = self.tensor_layout(shape)
# we need to sync across these axes.
mesh_axes = [i for i in xrange(self.ndims)
if i not in layout.tensor_axis_to_mesh_axis]
multiplier = 1.0
for axis in mesh_axes:
multiplier *= tf.cast(
tf.equal(self.laid_out_pcoord(axis).one_slice, 0), x.dtype)
x *= multiplier
x = self.LaidOutTensor([x])
x = self.allreduce(x, mesh_axes, "SUM")
return x | 213,848 |
Turn a Tensor into a tf.Tensor.
Args:
x: a Tensor
laid_out_x: a LaidOutTensor
Returns:
a tf.Tensor | def export_to_tf_tensor(self, x, laid_out_x):
tensor_layout = self.tensor_layout(x.shape)
if not tensor_layout.is_fully_replicated:
raise NotImplementedError(
"SimdMeshImpl only supports export_to_tf_tensor of fully-replicated "
"Tensors. Try reshaping to new dimension names. "
" x.shape = %s tensor_layout=%s"
% (x.shape, tensor_layout))
return laid_out_x.one_slice | 213,849 |
Predictions with the model. Returns posterior means and standard deviations at X. Note that this is different in GPy where the variances are given.
Parameters:
X (np.ndarray) - points to run the prediction for.
with_noise (bool) - whether to add noise to the prediction. Default is True. | def predict(self, X, with_noise=True):
m, v = self._predict(X, False, with_noise)
# We can take the square root because v is just a diagonal matrix of variances
return m, np.sqrt(v) | 214,905 |
Predicts the covariance matric for points in X.
Parameters:
X (np.ndarray) - points to run the prediction for.
with_noise (bool) - whether to add noise to the prediction. Default is True. | def predict_covariance(self, X, with_noise=True):
_, v = self._predict(X, True, with_noise)
return v | 214,906 |
Generates samples.
Parameters:
n_samples - number of samples to generate
log_p_function - a function that returns log density for a specific sample
burn_in_steps - number of burn-in steps for sampling
Returns a tuple of two array: (samples, log_p_function values for samples) | def get_samples(self, n_samples, log_p_function, burn_in_steps=50):
restarts = initial_design('random', self.space, n_samples)
sampler = emcee.EnsembleSampler(n_samples, self.space.input_dim(), log_p_function)
samples, samples_log, _ = sampler.run_mcmc(restarts, burn_in_steps)
# make sure we have an array of shape (n samples, space input dim)
if len(samples.shape) == 1:
samples = samples.reshape(-1, 1)
samples_log = samples_log.reshape(-1, 1)
return samples, samples_log | 214,937 |
Decorator routes Alexa SessionEndedRequest to the wrapped view function to end the skill.
@ask.session_ended
def session_ended():
return "{}", 200
The wrapped function is registered as the session_ended view function
and renders the response for requests to the end of the session.
Arguments:
f {function} -- session_ended view function | def session_ended(self, f):
self._session_ended_view_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f | 215,244 |
Decorator routes Alexa Display.ElementSelected request to the wrapped view function.
@ask.display_element_selected
def eval_element():
return "", 200
The wrapped function is registered as the display_element_selected view function
and renders the response for requests.
Arguments:
f {function} -- display_element_selected view function | def display_element_selected(self, f):
self._display_element_selected_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f | 215,247 |
Returns a string that is valid JSON or YAML and contains all the
variables in every extra_vars_opt inside of extra_vars_list.
Args:
parse_kv (bool): whether to allow key=value syntax.
force_json (bool): if True, always output json. | def process_extra_vars(extra_vars_list, force_json=True):
# Read from all the different sources and put into dictionary
extra_vars = {}
extra_vars_yaml = ""
for extra_vars_opt in extra_vars_list:
# Load file content if necessary
if extra_vars_opt.startswith("@"):
with open(extra_vars_opt[1:], 'r') as f:
extra_vars_opt = f.read()
# Convert text markup to a dictionary conservatively
opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)
else:
# Convert text markup to a dictionary liberally
opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)
# Rolling YAML-based string combination
if any(line.startswith("#") for line in extra_vars_opt.split('\n')):
extra_vars_yaml += extra_vars_opt + "\n"
elif extra_vars_opt != "":
extra_vars_yaml += yaml.dump(
opt_dict, default_flow_style=False) + "\n"
# Combine dictionary with cumulative dictionary
extra_vars.update(opt_dict)
# Return contents in form of a string
if not force_json:
try:
# Conditions to verify it is safe to return rolling YAML string
try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)
assert type(try_dict) is dict
debug.log('Using unprocessed YAML', header='decision', nl=2)
return extra_vars_yaml.rstrip()
except Exception:
debug.log('Failed YAML parsing, defaulting to JSON',
header='decison', nl=2)
if extra_vars == {}:
return ""
return json.dumps(extra_vars, ensure_ascii=False) | 216,641 |
Expand PyYAML's built-in dumper to support parsing OrderedDict. Return
a string as parse result of the original data structure, which includes
OrderedDict.
Args:
data: the data structure to be dumped(parsed) which is supposed to
contain OrderedDict.
Dumper: the yaml serializer to be expanded and used.
kws: extra key-value arguments to be passed to yaml.dump. | def ordered_dump(data, Dumper=yaml.Dumper, **kws):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict,
_dict_representer)
return yaml.dump(data, None, OrderedDumper, **kws) | 216,642 |
Extract a tarfile described by a file object to a specified path.
Args:
fileobj (file): File object wrapping the target tarfile.
dest_path (str): Path to extract the contents of the tarfile to. | def tarfile_extract(fileobj, dest_path):
# Though this method doesn't fit cleanly into the TarPartition object,
# tarballs are only ever extracted for partitions so the logic jives
# for the most part.
tar = tarfile.open(mode='r|', fileobj=fileobj,
bufsize=pipebuf.PIPE_BUF_BYTES)
# canonicalize dest_path so the prefix check below works
dest_path = os.path.realpath(dest_path)
# list of files that need fsyncing
extracted_files = []
# Iterate through each member of the tarfile individually. We must
# approach it this way because we are dealing with a pipe and the
# getmembers() method will consume it before we extract any data.
for member in tar:
assert not member.name.startswith('/')
relpath = os.path.join(dest_path, member.name)
# Workaround issue with tar handling of symlink, see:
# https://bugs.python.org/issue12800
if member.issym():
target_path = os.path.join(dest_path, member.name)
try:
os.symlink(member.linkname, target_path)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(target_path)
os.symlink(member.linkname, target_path)
else:
raise
continue
if member.isreg() and member.size >= pipebuf.PIPE_BUF_BYTES:
cat_extract(tar, member, relpath)
else:
tar.extract(member, path=dest_path)
filename = os.path.realpath(relpath)
extracted_files.append(filename)
# avoid accumulating an unbounded list of strings which
# could be quite large for a large database
if len(extracted_files) > 1000:
_fsync_files(extracted_files)
del extracted_files[:]
tar.close()
_fsync_files(extracted_files) | 216,785 |
Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout. | def get_blobstore(layout):
if layout.is_s3:
from wal_e.blobstore import s3
blobstore = s3
elif layout.is_wabs:
from wal_e.blobstore import wabs
blobstore = wabs
elif layout.is_swift:
from wal_e.blobstore import swift
blobstore = swift
elif layout.is_gs:
from wal_e.blobstore import gs
blobstore = gs
elif layout.is_file:
from wal_e.blobstore import file
blobstore = file
return blobstore | 216,832 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
cnt = 0
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
if func.name == "BatchNormalization":
bn_func = func
# TODO: should deal with both?
if bn_func.info.args["batch_stat"] == False:
o = self._bn_linear_conversion(bn_func, cnt)
cnt += 1
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable | 217,044 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for func in self.graph_info.funcs:
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable | 217,049 |
Remove and get parameter by key.
Args:
key(str): Key of parameter.
Returns: ~nnabla.Variable
Parameter if key found, otherwise None. | def pop_parameter(key):
names = key.split('/')
if len(names) > 1:
with parameter_scope(names[0]):
return pop_parameter('/'.join(names[1:]))
global current_scope
param = current_scope.get(key, None)
if param is not None:
del current_scope[key]
return param | 217,063 |
Get parameter Variables under the current parameter scope.
Args:
params (dict): Internal use. User doesn't set it manually.
path (str): Internal use. User doesn't set it manually.
grad_only (bool): Retrieve all parameters under the current scope if
False, while only parameters with need_grad=True are retrieved
if True.
Returns:
dict: {:obj:`str` : :obj:`~nnabla.Variable`} | def get_parameters(params=None, path='', grad_only=True):
global current_scope
if params is None:
params = OrderedDict()
for k, v in iteritems(current_scope):
if isinstance(v, dict):
with parameter_scope(k):
params = get_parameters(
params, '/'.join([path, k]) if path else k, grad_only=grad_only)
else:
assert isinstance(v, nn.Variable)
if not grad_only or v.need_grad:
params['/'.join([path, k]) if path else k] = v
return params | 217,066 |
Load parameters from a file with the specified format.
Args:
path : path or file object | def load_parameters(path, proto=None, needs_proto=False):
_, ext = os.path.splitext(path)
if ext == '.h5':
# TODO temporary work around to suppress FutureWarning message.
import warnings
warnings.simplefilter('ignore', category=FutureWarning)
import h5py
with h5py.File(path, 'r') as hd:
keys = []
def _get_keys(name):
ds = hd[name]
if not isinstance(ds, h5py.Dataset):
# Group
return
# To preserve order of parameters
keys.append((ds.attrs.get('index', None), name))
hd.visit(_get_keys)
for _, key in sorted(keys):
ds = hd[key]
var = get_parameter_or_create(
key, ds.shape, need_grad=ds.attrs['need_grad'])
var.data.cast(ds.dtype)[...] = ds[...]
if needs_proto:
if proto is None:
proto = nnabla_pb2.NNablaProtoBuf()
parameter = proto.parameter.add()
parameter.variable_name = key
parameter.shape.dim.extend(ds.shape)
parameter.data.extend(
numpy.array(ds[...]).flatten().tolist())
parameter.need_grad = False
if ds.attrs['need_grad']:
parameter.need_grad = True
else:
if proto is None:
proto = nnabla_pb2.NNablaProtoBuf()
if ext == '.protobuf':
with open(path, 'rb') as f:
proto.MergeFromString(f.read())
set_parameter_from_proto(proto)
elif ext == '.nntxt' or ext == '.prototxt':
with open(path, 'r') as f:
text_format.Merge(f.read(), proto)
set_parameter_from_proto(proto)
elif ext == '.nnp':
try:
tmpdir = tempfile.mkdtemp()
with zipfile.ZipFile(path, 'r') as nnp:
for name in nnp.namelist():
nnp.extract(name, tmpdir)
_, ext = os.path.splitext(name)
if ext in ['.protobuf', '.h5']:
proto = load_parameters(os.path.join(
tmpdir, name), proto, needs_proto)
finally:
shutil.rmtree(tmpdir)
logger.info("Parameter load ({}): {}".format(format, path))
else:
pass # TODO: Unknwon extension.
return proto | 217,068 |
Save all parameters into a file with the specified format.
Currently hdf5 and protobuf formats are supported.
Args:
path : path or file object
params (dict, optional): Parameters to be saved. Dictionary is of a parameter name (:obj:`str`) to :obj:`~nnabla.Variable`. | def save_parameters(path, params=None):
_, ext = os.path.splitext(path)
params = get_parameters(grad_only=False) if params is None else params
if ext == '.h5':
# TODO temporary work around to suppress FutureWarning message.
import warnings
warnings.simplefilter('ignore', category=FutureWarning)
import h5py
with h5py.File(path, 'w') as hd:
for i, (k, v) in enumerate(iteritems(params)):
hd[k] = v.d
hd[k].attrs['need_grad'] = v.need_grad
# To preserve order of parameters
hd[k].attrs['index'] = i
elif ext == '.protobuf':
proto = nnabla_pb2.NNablaProtoBuf()
for variable_name, variable in params.items():
parameter = proto.parameter.add()
parameter.variable_name = variable_name
parameter.shape.dim.extend(variable.shape)
parameter.data.extend(numpy.array(variable.d).flatten().tolist())
parameter.need_grad = variable.need_grad
with open(path, "wb") as f:
f.write(proto.SerializeToString())
else:
logger.critical('Only supported hdf5 or protobuf.')
assert False
logger.info("Parameter save ({}): {}".format(ext, path)) | 217,069 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
if func.name in self.inner_prod_functions:
inner_prod_func = func
o = self._fixed_point_weight_conversion(inner_prod_func)
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
if self.call_forward:
o.forward(clear_buffer=True)
return self.end_variable | 217,255 |
load
Load network information from files.
Args:
filenames (list): List of filenames.
Returns:
dict: Network information. | def load(filenames, prepare_data_iterator=True, batch_size=None, exclude_parameter=False, parameter_only=False):
class Info:
pass
info = Info()
proto = nnabla_pb2.NNablaProtoBuf()
for filename in filenames:
_, ext = os.path.splitext(filename)
# TODO: Here is some known problems.
# - Even when protobuf file includes network structure,
# it will not loaded.
# - Even when prototxt file includes parameter,
# it will not loaded.
if ext in ['.nntxt', '.prototxt']:
if not parameter_only:
with open(filename, 'rt') as f:
try:
text_format.Merge(f.read(), proto)
except:
logger.critical('Failed to read {}.'.format(filename))
logger.critical(
'2 byte characters may be used for file name or folder name.')
raise
if len(proto.parameter) > 0:
if not exclude_parameter:
nn.load_parameters(filename)
elif ext in ['.protobuf', '.h5']:
if not exclude_parameter:
nn.load_parameters(filename)
else:
logger.info('Skip loading parameter.')
elif ext == '.nnp':
try:
tmpdir = tempfile.mkdtemp()
with zipfile.ZipFile(filename, 'r') as nnp:
for name in nnp.namelist():
_, ext = os.path.splitext(name)
if name == 'nnp_version.txt':
nnp.extract(name, tmpdir)
with open(os.path.join(tmpdir, name), 'rt') as f:
pass # TODO currently do nothing with version.
elif ext in ['.nntxt', '.prototxt']:
nnp.extract(name, tmpdir)
if not parameter_only:
with open(os.path.join(tmpdir, name), 'rt') as f:
text_format.Merge(f.read(), proto)
if len(proto.parameter) > 0:
if not exclude_parameter:
nn.load_parameters(
os.path.join(tmpdir, name))
elif ext in ['.protobuf', '.h5']:
nnp.extract(name, tmpdir)
if not exclude_parameter:
nn.load_parameters(os.path.join(tmpdir, name))
else:
logger.info('Skip loading parameter.')
finally:
shutil.rmtree(tmpdir)
default_context = None
if proto.HasField('global_config'):
info.global_config = _global_config(proto)
default_context = info.global_config.default_context
if 'cuda' in default_context.backend:
import nnabla_ext.cudnn
elif 'cuda:float' in default_context.backend:
try:
import nnabla_ext.cudnn
except:
pass
else:
import nnabla_ext.cpu
default_context = nnabla_ext.cpu.context()
comm = current_communicator()
if comm:
default_context.device_id = str(comm.rank)
if proto.HasField('training_config'):
info.training_config = _training_config(proto)
info.datasets = _datasets(
proto, prepare_data_iterator if prepare_data_iterator is not None else info.training_config.max_epoch > 0)
info.networks = _networks(proto, default_context, batch_size)
info.optimizers = _optimizers(
proto, default_context, info.networks, info.datasets)
info.monitors = _monitors(
proto, default_context, info.networks, info.datasets)
info.executors = _executors(proto, info.networks)
return info | 217,274 |
Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
for converter in self.converters:
vroot = converter.convert(vroot, entry_variables)
return vroot | 217,336 |
Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name | def _get_unique_function_name(function_type, functions):
function_name = function_name_base = function_type
count = 2
while function_name in functions:
function_name = '{}_{}'.format(function_name_base, count)
count += 1
return function_name | 217,375 |
Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name | def _get_unique_variable_name(vname, variables):
count = 2
vname_base = vname
while vname in variables:
vname = '{}_{}'.format(vname_base, count)
count += 1
return vname | 217,376 |
Reduction along axes with sum operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which the sum is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array. | def sum(x, axis=None, keepdims=False):
from .function_bases import sum as sum_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return sum_base(x, axis, keepdims) | 217,402 |
Reduction along axes with mean operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which mean is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array. | def mean(x, axis=None, keepdims=False):
from .function_bases import mean as mean_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return mean_base(x, axis, keepdims) | 217,403 |
Reduction along axes with product operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which product is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
Note:
Backward computation is not accurate in a zero value input. | def prod(x, axis=None, keepdims=False):
from .function_bases import prod as prod_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return prod_base(x, axis, keepdims) | 217,406 |
Reduction function with given operation.
Args:
x (Variable): An input.
op (str): 'sum' or 'mean'.
Note:
This is deprecated. Use ``mean`` or ``sum`` instead. | def reduce(x, op='sum'):
import warnings
warnings.warn(
"Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning)
from .function_bases import reduce_sum, reduce_mean
if op == 'sum':
return reduce_sum(x)
elif op == 'mean':
return reduce_mean(x)
raise ValueError() | 217,407 |
Split arrays at the specified axis.
It returns a number corresponding the size of the given
axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis
Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s
See Also:
:func:`nnabla.function_bases.split`. | def split(x, axis=0):
from .function_bases import split as split_base
return split_base(x, axis, x.shape[axis]) | 217,408 |
Download a file from URL.
Args:
url (str): URL.
output_file (str, optional): If given, the downloaded file is written to the given path.
open_file (bool): If True, it returns an opened file stream of the downloaded file.
allow_overwrite (bool): If True, it overwrites an existing file.
Returns:
Returns file object if open_file is True, otherwise None. | def download(url, output_file=None, open_file=True, allow_overwrite=False):
filename = url.split('/')[-1]
if output_file is None:
cache = os.path.join(get_data_home(), filename)
else:
cache = output_file
if os.path.exists(cache) and not allow_overwrite:
logger.info("> {} already exists.".format(cache))
logger.info("> If you have any issue when using this file, ")
logger.info("> manually remove the file and try download again.")
else:
r = request.urlopen(url)
try:
if six.PY2:
content_length = int(r.info().dict['content-length'])
elif six.PY3:
content_length = int(r.info()['Content-Length'])
except:
content_length = 0
unit = 1000000
content = b''
with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t:
while True:
data = r.read(unit)
l = len(data)
t.update(l)
if l == 0:
break
content += data
with open(cache, 'wb') as f:
f.write(content)
if not open_file:
return
return open(cache, 'rb') | 217,430 |
Get learning rate with polymomial decay based on current iteration.
Args:
iter (int): current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power) | 217,479 |
Get learning rate with cosine decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
return self.init_lr * ((math.cos(iter * 1.0 / (self.max_iter) * math.pi) + 1.0) * 0.5) | 217,480 |
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
return self.init_lr * (self.gamma ** (iter // self.iter_interval)) | 217,482 |
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
lr = self.init_lr
for iter_step in self.iter_steps:
if iter >= iter_step:
lr *= self.gamma
return lr | 217,484 |
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
lr = self.scheduler.get_learning_rate(iter)
if iter < self.warmup_iter:
lr *= (iter + 1) * 1.0 / self.warmup_iter
return lr | 217,486 |
Create input :obj:`nnabla.Variable` from :obj:`Inspec`.
Args:
inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.
Returns:
:obj:`list` of :obj:`nnabla.Variable`: Input variables. | def create_inputs(inspecs):
ret = []
for i in inspecs:
v = nn.Variable(i.shape, need_grad=i.need_grad)
v.d = i.init(v.shape)
ret.append(v)
return ret | 217,515 |
Write a single function benchmark.
Args:
fb (FunctionBenchmark): FunctionBenchmark class instance.
Before passing to this, you should call ``fb.benchmark()``. | def write(self, fb):
print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)
print('class = {}'.format(fb.func_ins.name), file=self.file)
print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)
print('func_args = {}'.format(repr(fb.func_args)), file=self.file)
print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)
print('ext = ({}, {})'.format(
repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)
if self.setup_stat is not None:
self._write_a_stat('setup', self.setup_stat)
if self.foward_stat is not None:
self._write_a_stat('forward', self.forward_stat)
if self.backward_stat is not None:
self._write_a_stat('backward', self.backward_stat) | 217,520 |
Create a function instance and execute setup.
Args:
delete (bool): Delete buffered variables. | def _setup(self, delete=True):
if delete:
self.clear()
with nn.context_scope(self.ctx):
outputs = self.func(
*(self.inputs_f + self.func_args), **self.func_kwargs)
if not hasattr(outputs, '__iter__'):
self.outputs = [outputs]
else:
self.outputs = outputs
self.func_ins = self.outputs[0].parent
self.inputs = self.func_ins.inputs | 217,525 |
Convert an array with shape of (B, C, H, W) into a tiled image.
Args:
data (~numpy.ndarray): An array with shape of (B, C, H, W).
padsize (int): Each tile has padding with this size.
padval (float): Padding pixels are filled with this value.
Returns:
tile_image (~numpy.ndarray): A tile image. | def tile_images(data, padsize=1, padval=0):
assert(data.ndim == 4)
data = data.transpose(0, 2, 3, 1)
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (
(0, n ** 2 - data.shape[0]),
(0, padsize),
(0, padsize)
) + ((0, 0),) * (data.ndim - 3)
data = np.pad(
data, padding, mode='constant', constant_values=(padval, padval))
data = data.reshape(
(n, n)
+ data.shape[1:]
).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape(
(n * data.shape[1], n * data.shape[3]) + data.shape[4:])
if data.shape[2] == 1:
# Return as (H, W)
return data.reshape(data.shape[:2])
return data | 217,578 |
Plot series data from MonitorSeries output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required. | def plot_series(filename, plot_kwargs=None):
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])
index = data['k']
values = data['v']
plt.plot(index, values, **plot_kwargs) | 217,579 |
Plot series data from MonitorTimeElapsed output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
elapsed (bool): If ``True``, it plots the total elapsed time.
unit (str):
Time unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required. | def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None):
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data_column = 3 if elapsed else 1
data = np.genfromtxt(filename, dtype='i8,f4',
usecols=(0, data_column), names=['k', 'v'])
index = data['k']
values = data['v']
if unit == 's':
pass
elif unit == 'm':
values /= 60
elif unit == 'h':
values /= 3600
elif unit == 'd':
values /= 3600 * 24
else:
raise ValueError('The argument `unit` must be chosen from {s|m|h|d}.')
plt.plot(index, values, **plot_kwargs) | 217,580 |
Add a value to the series.
Args:
index (int): Index.
value (float): Value. | def add(self, index, value):
self.buf.append(value)
if (index - self.flush_at) < self.interval:
return
value = np.mean(self.buf)
if self.verbose:
logger.info("iter={} {{{}}}={}".format(index, self.name, value))
if self.fd is not None:
print("{} {:g}".format(index, value), file=self.fd)
self.flush_at = index
self.buf = [] | 217,583 |
Calculate time elapsed from the point previously called
this method or this object is created to this is called.
Args:
index (int): Index to be displayed, and be used to take intervals. | def add(self, index):
if (index - self.flush_at) < self.interval:
return
now = time.time()
elapsed = now - self.lap
elapsed_total = now - self.start
it = index - self.flush_at
self.lap = now
if self.verbose:
logger.info("iter={} {{{}}}={}[sec/{}iter] {}[sec]".format(
index, self.name, elapsed, it, elapsed_total))
if self.fd is not None:
print("{} {} {} {}".format(index, elapsed,
it, elapsed_total), file=self.fd)
self.flush_at = index | 217,585 |
Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array. | def add(self, index, var):
import nnabla as nn
from nnabla.utils.image_utils import imsave
if index != 0 and (index + 1) % self.interval != 0:
return
if isinstance(var, nn.Variable):
data = var.d.copy()
elif isinstance(var, nn.NdArray):
data = var.data.copy()
else:
assert isinstance(var, np.ndarray)
data = var.copy()
assert data.ndim > 2
channels = data.shape[-3]
data = data.reshape(-1, *data.shape[-3:])
data = data[:min(data.shape[0], self.num_images)]
data = self.normalize_method(data)
if channels > 3:
data = data[:, :3]
elif channels == 2:
data = np.concatenate(
[data, np.ones((data.shape[0], 1) + data.shape[-2:])], axis=1)
path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png')
for j in range(min(self.num_images, data.shape[0])):
img = data[j].transpose(1, 2, 0)
if img.shape[-1] == 1:
img = img[..., 0]
path = path_tmpl.format(index, '{:03d}'.format(j))
imsave(path, img)
if self.verbose:
logger.info("iter={} {{{}}} are written to {}.".format(
index, self.name, path_tmpl.format(index, '*'))) | 217,588 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
# Activation check
if func.name in self.activation_functions:
activation_func = func
o = self._fixed_point_activation_conversion(
activation_func)
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable | 217,626 |
Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool | def auto_forward(auto=True):
global __auto_forward_state
prev = __auto_forward_state
__auto_forward_state = auto
yield
__auto_forward_state = prev | 217,636 |
Manually print profiling result.
Args:
reset (bool): If False is specified, the profiling statistics so
far is maintained. If ``True`` (default),
:obj:`~reset_stats`
is called to reset the profiling statistics. | def print_stats(self, reset=True):
if not self.ncalls:
return
stats = self.stats
code = self.fn.__code__
print('--- Function Profiling ---')
print('File "{}", line {}, function {}'.format(
code.co_filename,
code.co_firstlineno,
self.fn.__name__))
stats.sort_stats(*self.sort_keys)
stats.print_stats(*self.print_restrictions)
print('--------------------------')
if reset:
self.reset_stats() | 217,640 |
Save the graph to a given file path.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is False.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration. | def save(self, vleaf, fpath, cleanup=False, format=None):
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.render(fpath, cleanup=cleanup) | 217,656 |
View the graph.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is True.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration. | def view(self, vleaf, fpath=None, cleanup=True, format=None):
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.view(fpath, cleanup=cleanup) | 217,657 |
Get parameters.
Args:
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
If you set this option as `False`, All parameters are returned. Default is `True`.
Returns:
dict: The dictionary of parameter name (`str`) to Variable (:obj:`~nnabla.Variable`). | def get_parameters(self, grad_only=True):
params = OrderedDict()
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
continue
pname = k
name = "{}/{}".format(prefix, pname)
if grad_only and v.need_grad == False:
continue
params[name] = v
return params | 217,661 |
Save all parameters into a file with the specified format.
Currently hdf5 and protobuf formats are supported.
Args:
path : path or file object
grad_only (bool, optional): Return parameters with `need_grad` option as `True`. | def save_parameters(self, path, grad_only=False):
params = self.get_parameters(grad_only=grad_only)
nn.save_parameters(path, params) | 217,663 |
Load parameters from a file with the specified format.
Args:
path : path or file object | def load_parameters(self, path):
nn.load_parameters(path)
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
continue
pname = k
name = "{}/{}".format(prefix, pname)
# Substitute
param0 = v
param1 = nn.parameter.pop_parameter(name)
if param0 is None:
raise ValueError(
"Model does not have {} parameter.".format(name))
param0.d = param1.d.copy()
nn.logger.info("`{}` loaded.)".format(name)) | 217,664 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
# TODO: error check
# Batch normalization check, then skip
if func.name == "BatchNormalization":
i0 = func.inputs[0]
bn_func = func
# Test mode check
if bn_func.info.args["batch_stat"] == False:
# `Target Func -> BN` check from BN
if i0.parent.info.type_name in self.inner_prod_functions:
nn.logger.info("{} is skipped.".format(func.name))
continue
# `Target Func -> BN` conversion
if func.name in self.inner_prod_functions:
inner_prod_func = func
o0 = inner_prod_func.outputs[0]
fs = self.graph_info.variable_to_funcs[o0]
# No branch check #TODO: branching check (really needed?)
if fs is not None and len(fs) == 1:
# `Target Func -> BN` check
bn_func = fs[0]
if bn_func.name == "BatchNormalization":
# Test mode check
if bn_func.info.args["batch_stat"] == False:
# Perform `Target Func -> BN` conversion
nn.logger.info("BatchNormalization parameters are folded to "
"the preceding convolution.")
o = self._inner_prod_bn_conversion(
inner_prod_func, bn_func)
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable | 217,666 |
Calculate length of a string for a given built-in font.
Args:
fontname: name of the font.
fontsize: size of font in points.
encoding: encoding to use (0=Latin, 1=Greek, 2=Cyrillic).
Returns:
(float) length of text. | def getTextlength(text, fontname="helv", fontsize=11, encoding=0):
fontname = fontname.lower()
basename = Base14_fontdict.get(fontname, None)
glyphs = None
if basename == "Symbol":
glyphs = symbol_glyphs
if basename == "ZapfDingbats":
glyphs = zapf_glyphs
if glyphs is not None:
w = sum([glyphs[ord(c)][1] if ord(c)<256 else glyphs[183][1] for c in text])
return w * fontsize
if fontname in Base14_fontdict.keys():
return TOOLS.measure_string(text, Base14_fontdict[fontname], fontsize, encoding)
if fontname in ["china-t", "china-s",
"china-ts", "china-ss",
"japan", "japan-s",
"korea", "korea-s"]:
return len(text) * fontsize
raise ValueError("Font '%s' is unsupported" % fontname) | 218,122 |
Returns the parsed table of a page in a PDF / (open) XPS / EPUB document.
Parameters:
page: fitz.Page object
bbox: containing rectangle, list of numbers [xmin, ymin, xmax, ymax]
columns: optional list of column coordinates. If None, columns are generated
Returns the parsed table as a list of lists of strings.
The number of rows is determined automatically
from parsing the specified rectangle. | def ParseTab(page, bbox, columns = None):
tab_rect = fitz.Rect(bbox).irect
xmin, ymin, xmax, ymax = tuple(tab_rect)
if tab_rect.isEmpty or tab_rect.isInfinite:
print("Warning: incorrect rectangle coordinates!")
return []
if type(columns) is not list or columns == []:
coltab = [tab_rect.x0, tab_rect.x1]
else:
coltab = sorted(columns)
if xmin < min(coltab):
coltab.insert(0, xmin)
if xmax > coltab[-1]:
coltab.append(xmax)
words = page.getTextWords()
if words == []:
print("Warning: page contains no text")
return []
alltxt = []
# get words contained in table rectangle and distribute them into columns
for w in words:
ir = fitz.Rect(w[:4]).irect # word rectangle
if ir in tab_rect:
cnr = 0 # column index
for i in range(1, len(coltab)): # loop over column coordinates
if ir.x0 < coltab[i]: # word start left of column border
cnr = i - 1
break
alltxt.append([ir.x0, ir.y0, ir.x1, cnr, w[4]])
if alltxt == []:
print("Warning: no text found in rectangle!")
return []
alltxt.sort(key = itemgetter(1)) # sort words vertically
# create the table / matrix
spantab = [] # the output matrix
for y, zeile in groupby(alltxt, itemgetter(1)):
schema = [""] * (len(coltab) - 1)
for c, words in groupby(zeile, itemgetter(3)):
entry = " ".join([w[4] for w in words])
schema[c] = entry
spantab.append(schema)
return spantab | 218,367 |
Show page number 'pno' of PDF 'src' in rectangle 'rect'.
Args:
rect: (rect-like) where to place the source image
src: (document) source PDF
pno: (int) source page number
overlay: (bool) put in foreground
keep_proportion: (bool) do not change width-height-ratio
rotate: (int) degrees (multiple of 90)
clip: (rect-like) part of source page rectangle
Returns:
xref of inserted object (for reuse) | def showPDFpage(
page,
rect,
src,
pno=0,
overlay=True,
keep_proportion=True,
rotate=0,
reuse_xref=0,
clip = None,
):
def calc_matrix(sr, tr, keep=True, rotate=0):
# calc center point of source rect
smp = Point((sr.x1 + sr.x0) / 2., (sr.y1 + sr.y0) / 2.)
# calc center point of target rect
tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.)
rot = Matrix(rotate) # rotation matrix
# m moves to (0, 0), then rotates
m = Matrix(1, 0, 0, 1, -smp.x, -smp.y) * rot
sr1 = sr * m # resulting source rect to calculate scale factors
fw = tr.width / sr1.width # scale the width
fh = tr.height / sr1.height # scale the height
if keep:
fw = fh = min(fw, fh) # take min if keeping aspect ratio
m *= Matrix(fw, fh) # concat scale matrix
m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y) # concat move to target center
return m
CheckParent(page)
doc = page.parent
if not doc.isPDF or not src.isPDF:
raise ValueError("not a PDF")
rect = page.rect & rect # intersect with page rectangle
if rect.isEmpty or rect.isInfinite:
raise ValueError("rect must be finite and not empty")
if reuse_xref > 0:
warnings.warn("ignoring 'reuse_xref'", DeprecationWarning)
while pno < 0: # support negative page numbers
pno += len(src)
src_page = src[pno] # load ource page
if len(src_page._getContents()) == 0:
raise ValueError("nothing to show - source page empty")
tar_rect = rect * ~page._getTransformation() # target rect in PDF coordinates
src_rect = src_page.rect if not clip else src_page.rect & clip # source rect
if src_rect.isEmpty or src_rect.isInfinite:
raise ValueError("clip must be finite and not empty")
src_rect = src_rect * ~src_page._getTransformation() # ... in PDF coord
matrix = calc_matrix(src_rect, tar_rect, keep=keep_proportion, rotate=rotate)
# list of existing /Form /XObjects
ilst = [i[1] for i in doc._getPageInfo(page.number, 3)]
# create a name that is not in that list
n = "fzFrm"
i = 0
_imgname = n + "0"
while _imgname in ilst:
i += 1
_imgname = n + str(i)
isrc = src._graft_id # used as key for graftmaps
if doc._graft_id == isrc:
raise ValueError("source document must not equal target")
# check if we have already copied objects from this source doc
if isrc in doc.Graftmaps: # yes: use the old graftmap
gmap = doc.Graftmaps[isrc]
else: # no: make a new graftmap
gmap = Graftmap(doc)
doc.Graftmaps[isrc] = gmap
# take note of generated xref for automatic reuse
pno_id = (isrc, pno) # id of src[pno]
xref = doc.ShownPages.get(pno_id, 0)
xref = page._showPDFpage(
src_page,
overlay=overlay,
matrix=matrix,
xref=xref,
clip=src_rect,
graftmap=gmap,
_imgname=_imgname,
)
doc.ShownPages[pno_id] = xref
return xref | 218,404 |
Insert an image in a rectangle on the current page.
Notes:
Exactly one of filename, pixmap or stream must be provided.
Args:
rect: (rect-like) where to place the source image
filename: (str) name of an image file
pixmap: (obj) a Pixmap object
stream: (bytes) an image in memory
rotate: (int) degrees (multiple of 90)
keep_proportion: (bool) whether to maintain aspect ratio
overlay: (bool) put in foreground | def insertImage(page, rect, filename=None, pixmap=None, stream=None, rotate=0,
keep_proportion = True,
overlay=True):
def calc_matrix(fw, fh, tr, rotate=0):
# center point of target rect
tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.)
rot = Matrix(rotate) # rotation matrix
# matrix m moves image center to (0, 0), then rotates
m = Matrix(1, 0, 0, 1, -0.5, -0.5) * rot
#sr1 = sr * m # resulting image rect
# --------------------------------------------------------------------
# calculate the scale matrix
# --------------------------------------------------------------------
small = min(fw, fh) # factor of the smaller side
if rotate not in (0, 180):
fw, fh = fh, fw # width / height exchange their roles
if fw < 1: # portrait
if (float(tr.width) / fw) > (float(tr.height) / fh):
w = tr.height * small
h = tr.height
else:
w = tr.width
h = tr.width / small
elif fw != fh: # landscape
if (float(tr.width) / fw) > (float(tr.height) / fh):
w = tr.height / small
h = tr.height
else:
w = tr.width
h = tr.width * small
else: # (treated as) equal sided
w = tr.width
h = tr.height
m *= Matrix(w, h) # concat scale matrix
m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y) # concat move to target center
return m
# -------------------------------------------------------------------------
CheckParent(page)
doc = page.parent
if not doc.isPDF:
raise ValueError("not a PDF")
if bool(filename) + bool(stream) + bool(pixmap) != 1:
raise ValueError("need exactly one of filename, pixmap, stream")
if filename and not os.path.exists(filename):
raise FileNotFoundError("No such file: '%s'" % filename)
elif stream and type(stream) not in (bytes, bytearray, io.BytesIO):
raise ValueError("stream must be bytes-like or BytesIO")
elif pixmap and type(pixmap) is not Pixmap:
raise ValueError("pixmap must be a Pixmap")
while rotate < 0:
rotate += 360
while rotate > 360:
rotate -= 360
if rotate not in (0, 90, 180, 270):
raise ValueError("bad rotate value")
r = page.rect & rect
if r.isEmpty or r.isInfinite:
raise ValueError("rect must be finite and not empty")
_imgpointer = None
if keep_proportion is True: # for this we need the image dimension
if pixmap: # this is the easy case
w = pixmap.width
h = pixmap.height
elif stream: # use tool to access the information
# we also pass through the generated fz_image address
img_size = TOOLS.image_size(stream, keep_image=True)
w, h = img_size[:2]
stream = None # make sure this arg is NOT used
_imgpointer = img_size[-1] # pointer to fz_image
else: # worst case, we need to read the file ourselves
img = open(filename, "rb")
stream = img.read()
img_size = TOOLS.image_size(stream, keep_image=True)
w, h = img_size[:2]
_imgpointer = img_size[-1] # pointer to fz_image
stream = None # make sure this arg is NOT used
filename = None # make sure this arg is NOT used
img.close() # close image file
maxf = max(w, h).__float__()
fw = w / maxf
fh = h / maxf
else:
fw = fh = 1.0
clip = r * ~page._getTransformation() # target rect in PDF coordinates
matrix = calc_matrix(fw, fh, clip, rotate=rotate)
ilst = [i[7] for i in doc.getPageImageList(page.number)]
n = "fzImg"
i = 0
_imgname = n + "0"
while _imgname in ilst:
i += 1
_imgname = n + str(i)
page._insertImage(
filename=filename, # image in file
pixmap=pixmap, # image in pixmap
stream=stream, # image in memory
matrix=matrix, # generated matrix
overlay=overlay,
_imgname=_imgname, # generated PDF resource name
_imgpointer=_imgpointer, # address of fz_image
) | 218,405 |
Search for a string on a page.
Args:
text: string to be searched for
hit_max: maximum hits
quads: return quads instead of rectangles
Returns:
a list of rectangles or quads, each containing one occurrence. | def searchFor(page, text, hit_max = 16, quads = False):
CheckParent(page)
dl = page.getDisplayList() # create DisplayList
tp = dl.getTextPage() # create TextPage
# return list of hitting reactangles
rlist = tp.search(text, hit_max = hit_max, quads = quads)
dl = None
tp = None
return rlist | 218,406 |
Search for a string on a page.
Args:
pno: page number
text: string to be searched for
hit_max: maximum hits
quads: return quads instead of rectangles
Returns:
a list of rectangles or quads, each containing an occurrence. | def searchPageFor(doc, pno, text, hit_max=16, quads=False):
return doc[pno].searchFor(text, hit_max = hit_max, quads = quads) | 218,407 |
Return the text blocks on a page.
Notes:
Lines in a block are concatenated with line breaks.
Args:
images: (bool) also return meta data of any images.
Image data are never returned with this method.
Returns:
A list of the blocks. Each item contains the containing rectangle coordinates,
text lines, block type and running block number. | def getTextBlocks(page, images=False):
CheckParent(page)
dl = page.getDisplayList()
flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE
if images:
flags |= TEXT_PRESERVE_IMAGES
tp = dl.getTextPage(flags)
l = tp._extractTextBlocks_AsList()
del tp
del dl
return l | 218,408 |
Extract a document page's text.
Args:
output: (str) text, html, dict, json, rawdict, xhtml or xml.
Returns:
the output of TextPage methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT, extractXHTML or etractXML respectively. Default and misspelling choice is "text". | def getText(page, output = "text"):
CheckParent(page)
dl = page.getDisplayList()
# available output types
formats = ("text", "html", "json", "xml", "xhtml", "dict", "rawdict")
# choose which of them also include images in the TextPage
images = (0, 1, 1, 0, 1, 1, 1) # controls image inclusion in text page
try:
f = formats.index(output.lower())
except:
f = 0
flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE
if images[f] :
flags |= TEXT_PRESERVE_IMAGES
tp = dl.getTextPage(flags) # TextPage with / without images
t = tp._extractText(f)
del dl
del tp
return t | 218,410 |
Create pixmap of page.
Args:
matrix: Matrix for transformation (default: Identity).
colorspace: (str/Colorspace) rgb, rgb, gray - case ignored, default csRGB.
clip: (irect-like) restrict rendering to this area.
alpha: (bool) include alpha channel | def getPixmap(page, matrix = None, colorspace = csRGB, clip = None,
alpha = True):
CheckParent(page)
# determine required colorspace
cs = colorspace
if type(colorspace) is str:
if colorspace.upper() == "GRAY":
cs = csGRAY
elif colorspace.upper() == "CMYK":
cs = csCMYK
else:
cs = csRGB
if cs.n not in (1,3,4):
raise ValueError("unsupported colorspace")
dl = page.getDisplayList() # create DisplayList
if clip:
scissor = Rect(clip)
else:
scissor = None
pix = dl.getPixmap(matrix = matrix,
colorspace = cs,
alpha = alpha,
clip = scissor)
del dl
return pix | 218,411 |
Create pixmap of document page by page number.
Notes:
Convenience function calling page.getPixmap.
Args:
pno: (int) page number
matrix: Matrix for transformation (default: Identity).
colorspace: (str/Colorspace) rgb, rgb, gray - case ignored, default csRGB.
clip: (irect-like) restrict rendering to this area.
alpha: (bool) include alpha channel | def getPagePixmap(doc, pno, matrix = None, colorspace = csRGB,
clip = None, alpha = True):
return doc[pno].getPixmap(matrix = matrix, colorspace = colorspace,
clip = clip, alpha = alpha) | 218,412 |
Create a table of contents.
Args:
simple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation. | def getToC(doc, simple = True):
def recurse(olItem, liste, lvl):
while olItem:
if olItem.title:
title = olItem.title
else:
title = " "
if not olItem.isExternal:
if olItem.uri:
page = olItem.page + 1
else:
page = -1
else:
page = -1
if not simple:
link = getLinkDict(olItem)
liste.append([lvl, title, page, link])
else:
liste.append([lvl, title, page])
if olItem.down:
liste = recurse(olItem.down, liste, lvl+1)
olItem = olItem.next
return liste
# check if document is open and not encrypted
if doc.isClosed:
raise ValueError("illegal operation on closed document")
olItem = doc.outline
if not olItem: return []
lvl = 1
liste = []
return recurse(olItem, liste, lvl) | 218,415 |
Draw a circle sector given circle center, one arc end point and the angle of the arc.
Parameters:
center -- center of circle
point -- arc end point
beta -- angle of arc (degrees)
fullSector -- connect arc ends with center | def drawSector(page, center, point, beta, color=None, fill=None,
dashes=None, fullSector=True, morph=None,
width=1, closePath=False, roundCap=False, overlay=True):
img = page.newShape()
Q = img.drawSector(Point(center), Point(point), beta, fullSector=fullSector)
img.finish(color=color, fill=fill, dashes=dashes, width=width,
roundCap=roundCap, morph=morph, closePath=closePath)
img.commit(overlay)
return Q | 218,434 |
Set a float option.
Args:
option (str): name of option.
value (float): value of the option.
Raises:
TypeError: Value must be a float. | def set_float(self, option, value):
if not isinstance(value, float):
raise TypeError("Value must be a float")
self.options[option] = value | 218,558 |
Set an integer option.
Args:
option (str): name of option.
value (int): value of the option.
Raises:
ValueError: Value must be an integer. | def set_integer(self, option, value):
try:
int_value = int(value)
except ValueError as err:
print(err.args)
self.options[option] = value | 218,559 |
Set a boolean option.
Args:
option (str): name of option.
value (bool): value of the option.
Raises:
TypeError: Value must be a boolean. | def set_boolean(self, option, value):
if not isinstance(value, bool):
raise TypeError("%s must be a boolean" % option)
self.options[option] = str(value).lower() | 218,560 |
Set a string option.
Args:
option (str): name of option.
value (str): value of the option.
Raises:
TypeError: Value must be a string. | def set_string(self, option, value):
if not isinstance(value, str):
raise TypeError("%s must be a string" % option)
self.options[option] = value | 218,561 |
Set the MetricsGraphics chart type.
Allowed charts are: line, histogram, point, and bar
Args:
value (str): chart type.
Raises:
ValueError: Not a valid chart type. | def chart_type(self, value):
if value not in self._allowed_charts:
raise ValueError("Not a valid chart type")
self.options["chart_type"] = value | 218,562 |
Set the custom line color map.
Args:
values (list): list of colors.
Raises:
TypeError: Custom line color map must be a list. | def custom_line_color_map(self, values):
if not isinstance(values, list):
raise TypeError("custom_line_color_map must be a list")
self.options["custom_line_color_map"] = values | 218,563 |
Set the legend labels.
Args:
values (list): list of labels.
Raises:
ValueError: legend must be a list of labels. | def legend(self, values):
if not isinstance(values, list):
raise TypeError("legend must be a list of labels")
self.options["legend"] = values | 218,564 |
Set the markers.
Args:
values (list): list of marker objects.
Raises:
ValueError: Markers must be a list of objects. | def markers(self, values):
if not isinstance(values, list):
raise TypeError("Markers must be a list of objects")
self.options["markers"] = values | 218,565 |
Show confidence band?
See metricsgraphics documentation
Args:
value (list): strings
Raises:
TypeError: show_confidence_band must be a list of strings. | def show_confidence_band(self, value):
if not isinstance(values, list):
raise TypeError("show_confidence_band must be a list of strings")
self.options["show_confidence_band"] = values | 218,566 |
Set margin of the chart.
Args:
top (int): size of top margin in pixels.
bottom (int): size of bottom margin in pixels.
left (int): size of left margin in pixels.
right (int): size of right margin in pixels.
buffer_size (int): buffer size in pixels between the chart and margins. | def set_margin(self, top=40, bottom=30, left=50, right=10, buffer_size=8):
self.set_integer("top", top)
self.set_integer("bottom", bottom)
self.set_integer("left", left)
self.set_integer("right", right)
self.set_integer("buffer", buffer_size) | 218,588 |
Set the size of the chart.
Args:
height (int): height in pixels.
width (int): width in pixels.
height_threshold (int): height threshold in pixels
width_threshold (int): width threshold in pixesls | def set_size(self, height=220, width=350,
height_threshold=120,
width_threshold=160):
self.set_integer("height", height)
self.set_integer("width", width)
self.set_integer("small_height_threshold", height_threshold)
self.set_integer("small_width_threshold", width_threshold) | 218,589 |
Formats props for the React template.
Args:
props (dict): properties to be written to the template.
Returns:
Two lists, one containing variable names and the other
containing a list of props to be fed to the React template. | def format_props(props, prop_template="{{k}} = { {{v}} }", delim="\n"):
vars_ = []
props_ = []
for k, v in list(props.items()):
vars_.append(Template("var {{k}} = {{v}};").render(k=k,v=json.dumps(v)))
props_.append(Template(prop_template).render(k=k, v=k))
return "\n".join(vars_), delim.join(props_) | 218,591 |
register UILayout with the flask app
create a function that will send props for each UILayout
Args:
layouts (dict): dict of UILayout objects by name
app (object): flask app
url (string): address of props; default is /api/props/ | def register_layouts(layouts, app, url="/api/props/", brand="Pyxley"):
def props(name):
if name not in layouts:
# cast as list for python3
name = list(layouts.keys())[0]
return jsonify({"layouts": layouts[name]["layout"]})
def apps():
paths = []
for i, k in enumerate(layouts.keys()):
if i == 0:
paths.append({
"path": "/",
"label": layouts[k].get("title", k)
})
paths.append({
"path": "/"+k,
"label": layouts[k].get("title", k)
})
return jsonify({"brand": brand, "navlinks": paths})
app.add_url_rule(url+"<string:name>/", view_func=props)
app.add_url_rule(url, view_func=apps) | 218,605 |
create a mg line plot
Args:
df (pandas.DataFrame): data to plot | def create_line_plot(df):
fig = Figure("/mg/line_plot/", "mg_line_plot")
fig.graphics.transition_on_update(True)
fig.graphics.animate_on_load()
fig.layout.set_size(width=450, height=200)
fig.layout.set_margin(left=40, right=40)
return LineChart(df, fig, "Date", ["value"],
init_params={"Data": "Steps"}, timeseries=True) | 218,630 |
create a mg line plot
Args:
df (pandas.DataFrame): data to plot | def create_histogram(df):
fig = Figure("/mg/histogram/", "mg_histogram")
fig.layout.set_size(width=450, height=200)
fig.layout.set_margin(left=40, right=40)
fig.graphics.animate_on_load()
# Make a histogram with 20 bins
return Histogram(df, fig, "value", 20, init_params={"Data": "Steps"}) | 218,631 |
create a mg line plot
Args:
df (pandas.DataFrame): data to plot | def create_scatterplot(df):
fig = Figure("/mg/scatter/", "mg_scatter")
fig.layout.set_size(width=450, height=200)
fig.layout.set_margin(left=40, right=40)
fig.graphics.animate_on_load()
init_params = {"Data": "Steps"}
def get_data():
y = request.args.get("Data", "Steps")
return jsonify(ScatterPlot.to_json(df, "Steps", y))
# Make a histogram with 20 bins
return ScatterPlot(df, fig, "Steps", "Distance",
init_params={}, route_func=get_data) | 218,632 |
Set x-axis limits.
Accepts a two-element list to set the x-axis limits.
Args:
xlim (list): lower and upper bounds
Raises:
ValueError: xlim must contain two elements
ValueError: Min must be less than max | def set_xlim(self, xlim):
if len(xlim) != 2:
raise ValueError("xlim must contain two elements")
if xlim[1] < xlim[0]:
raise ValueError("Min must be less than Max")
self.options["min_x"] = xlim[0]
self.options["max_x"] = xlim[1] | 218,635 |
Set y-axis limits.
Accepts a two-element list to set the y-axis limits.
Args:
ylim (list): lower and upper bounds
Raises:
ValueError: ylim must contain two elements
ValueError: Min must be less than max | def set_ylim(self, ylim):
if len(ylim) != 2:
raise ValueError("ylim must contain two elements")
if ylim[1] < ylim[0]:
raise ValueError("Min must be less than Max")
self.options["min_y"] = ylim[0]
self.options["max_y"] = ylim[1] | 218,636 |
basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
config (dict): config parameters | def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
_data.append(
{
"x": df[x].values.tolist(),
"y": df[y].values.tolist(),
"mode": mode
}
)
return {
"data": _data,
"layout": layout,
"config": config
} | 218,639 |
Return the function call result decoded.
Args:
function_name (str): One of the existing functions described in the
contract interface.
data (bin): The encoded result from calling `function_name`.
Return:
List[object]: The values returned by the call to `function_name`. | def decode_function_result(self, function_name, data):
description = self.function_data[function_name]
arguments = decode_abi(description['decode_types'], data)
return arguments | 219,309 |
Return a dictionary representation the log.
Note:
This function won't work with anonymous events.
Args:
log_topics (List[bin]): The log's indexed arguments.
log_data (bin): The encoded non-indexed arguments. | def decode_event(self, log_topics, log_data):
# https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI#function-selector-and-argument-encoding
# topics[0]: keccak(EVENT_NAME+"("+EVENT_ARGS.map(canonical_type_of).join(",")+")")
# If the event is declared as anonymous the topics[0] is not generated;
if not len(log_topics) or log_topics[0] not in self.event_data:
raise ValueError('Unknown log type')
event_id_ = log_topics[0]
event = self.event_data[event_id_]
# data: abi_serialise(EVENT_NON_INDEXED_ARGS)
# EVENT_NON_INDEXED_ARGS is the series of EVENT_ARGS that are not
# indexed, abi_serialise is the ABI serialisation function used for
# returning a series of typed values from a function.
unindexed_types = [
type_
for type_, indexed in zip(event['types'], event['indexed'])
if not indexed
]
unindexed_args = decode_abi(unindexed_types, log_data)
# topics[n]: EVENT_INDEXED_ARGS[n - 1]
# EVENT_INDEXED_ARGS is the series of EVENT_ARGS that are indexed
indexed_count = 1 # skip topics[0]
result = {}
for name, type_, indexed in zip(
event['names'], event['types'], event['indexed']):
if indexed:
topic_bytes = utils.zpad(
utils.encode_int(log_topics[indexed_count]),
32,
)
indexed_count += 1
value = decode_single(process_type(type_), topic_bytes)
else:
value = unindexed_args.pop(0)
result[name] = value
result['_event_type'] = utils.to_string(event['name'])
return result | 219,311 |
Return a dictionary representation of the Log instance.
Note:
This function won't work with anonymous events.
Args:
log (processblock.Log): The Log instance that needs to be parsed.
noprint (bool): Flag to turn off priting of the decoded log instance. | def listen(self, log, noprint=True):
try:
result = self.decode_event(log.topics, log.data)
except ValueError:
return # api compatibility
if not noprint:
print(result)
return result | 219,312 |
Return the compile contract code.
Args:
filepath (str): The path to the contract source code.
libraries (dict): A dictionary mapping library name to it's address.
combined (str): The argument for solc's --combined-json.
optimize (bool): Enable/disables compiler optimization.
Returns:
dict: A mapping from the contract name to it's binary. | def compile_file(filepath, libraries=None, combined='bin,abi',
optimize=True, extra_args=None):
workdir, filename = os.path.split(filepath)
args = solc_arguments(
libraries=libraries,
combined=combined,
optimize=optimize,
extra_args=extra_args)
args.insert(0, get_compiler_path())
args.append(filename)
output = subprocess.check_output(args, cwd=workdir)
return solc_parse_output(output) | 219,642 |
gpp -- model for the graph partitioning problem
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns a model, ready to be solved. | def gpp(V,E):
model = Model("gpp")
x = {}
y = {}
for i in V:
x[i] = model.addVar(vtype="B", name="x(%s)"%i)
for (i,j) in E:
y[i,j] = model.addVar(vtype="B", name="y(%s,%s)"%(i,j))
model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition")
for (i,j) in E:
model.addCons(x[i] - x[j] <= y[i,j], "Edge(%s,%s)"%(i,j))
model.addCons(x[j] - x[i] <= y[i,j], "Edge(%s,%s)"%(j,i))
model.setObjective(quicksum(y[i,j] for (i,j) in E), "minimize")
model.data = x
return model | 220,064 |
gpp -- model for the graph partitioning problem in soco
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns a model, ready to be solved. | def gpp_soco(V,E):
model = Model("gpp model -- soco")
x,s,z = {},{},{}
for i in V:
x[i] = model.addVar(vtype="B", name="x(%s)"%i)
for (i,j) in E:
s[i,j] = model.addVar(vtype="C", name="s(%s,%s)"%(i,j))
z[i,j] = model.addVar(vtype="C", name="z(%s,%s)"%(i,j))
model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition")
for (i,j) in E:
model.addCons((x[i] + x[j] -1)*(x[i] + x[j] -1) <= s[i,j], "S(%s,%s)"%(i,j))
model.addCons((x[j] - x[i])*(x[j] - x[i]) <= z[i,j], "Z(%s,%s)"%(i,j))
model.addCons(s[i,j] + z[i,j] == 1, "P(%s,%s)"%(i,j))
# # triangle inequalities (seem to make model slower)
# for i in V:
# for j in V:
# for k in V:
# if (i,j) in E and (j,k) in E and (i,k) in E:
# print("\t***",(i,j,k)
# model.addCons(z[i,j] + z[j,k] + z[i,k] <= 2, "T1(%s,%s,%s)"%(i,j,k))
# model.addCons(z[i,j] + s[j,k] + s[i,k] <= 2, "T2(%s,%s,%s)"%(i,j,k))
# model.addCons(s[i,j] + s[j,k] + z[i,k] <= 2, "T3(%s,%s,%s)"%(i,j,k))
# model.addCons(s[i,j] + z[j,k] + s[i,k] <= 2, "T4(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(z[i,j] for (i,j) in E), "minimize")
model.data = x,s,z
return model | 220,065 |
make_data: prepare data for a random graph
Parameters:
- n: number of vertices
- prob: probability of existence of an edge, for each pair of vertices
Returns a tuple with a list of vertices and a list edges. | def make_data(n,prob):
V = range(1,n+1)
E = [(i,j) for i in V for j in V if i < j and random.random() < prob]
return V,E | 220,066 |
maxflow: maximize flow from source to sink, taking into account arc capacities M
Parameters:
- V: set of vertices
- M[i,j]: dictionary or capacity for arcs (i,j)
- source: flow origin
- sink: flow target
Returns a model, ready to be solved. | def maxflow(V,M,source,sink):
# create max-flow underlying model, on which to find cuts
model = Model("maxflow")
f = {} # flow variable
for (i,j) in M:
f[i,j] = model.addVar(lb=-M[i,j], ub=M[i,j], name="flow(%s,%s)"%(i,j))
cons = {}
for i in V:
if i != source and i != sink:
cons[i] = model.addCons(
quicksum(f[i,j] for j in V if i<j and (i,j) in M) - \
quicksum(f[j,i] for j in V if i>j and (j,i) in M) == 0,
"FlowCons(%s)"%i)
model.setObjective(quicksum(f[i,j] for (i,j) in M if i==source), "maximize")
# model.write("tmp.lp")
model.data = f,cons
return model | 220,067 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.