Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
25,600 |
def unicode2encode(text, charmap):
if isinstance(text, (list, tuple)):
unitxt =
for line in text:
for val,key in charmap.items():
if key in line:
line = line.replace(key, val)
unitxt += line
return unitxt
elif isinstance(text, str):
for val,key in charmap.items():
if key in text:
text = text.replace(key, val)
return text
|
charmap : dictionary which has both encode as key, unicode as value
|
25,601 |
def set(self, name: str, value: Union[str, List[str]]) -> None:
self._headers[name] = value
|
设置 header
|
25,602 |
def radiance_to_bt(arr, wc_, a__, b__):
return a__ + b__ * (C2 * wc_ / (da.log(1 + (C1 * (wc_ ** 3) / arr))))
|
Convert to BT.
|
25,603 |
def detach(self):
log.debug("Removing %s from server sockets" % self)
if self.sessid in self.server.sockets:
self.server.sockets.pop(self.sessid)
|
Detach this socket from the server. This should be done in
conjunction with kill(), once all the jobs are dead, detach the
socket for garbage collection.
|
25,604 |
def function(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0):
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
gamma, q = self._param_bounds(gamma, q)
theta_E *= q
x_shift = x - center_x
y_shift = y - center_y
E = theta_E / (((3 - gamma) / 2.) ** (1. / (1 - gamma)) * np.sqrt(q))
eta = -gamma+3
xt1 = np.cos(phi_G)*x_shift+np.sin(phi_G)*y_shift
xt2 = -np.sin(phi_G)*x_shift+np.cos(phi_G)*y_shift
p2 = xt1**2+xt2**2/q**2
s2 = 0.
return 2 * E**2/eta**2 * ((p2 + s2)/E**2)**(eta/2)
|
:param x: set of x-coordinates
:type x: array of size (n)
:param theta_E: Einstein radius of lense
:type theta_E: float.
:param gamma: power law slope of mass profifle
:type gamma: <2 float
:param q: Axis ratio
:type q: 0<q<1
:param phi_G: position angel of SES
:type q: 0<phi_G<pi/2
:returns: function
:raises: AttributeError, KeyError
|
25,605 |
def match(self, pattern, context=None):
matches = []
regex = pattern
if regex == :
regex =
regex = re.compile(regex)
for choice in self.choices(context):
if regex.search(choice):
matches.append(choice)
return matches
|
This method returns a (possibly empty) list of strings that
match the regular expression ``pattern`` provided. You can
also provide a ``context`` as described above.
This method calls ``choices`` to get a list of all possible
choices and then filters the list by performing a regular
expression search on each choice using the supplied ``pattern``.
|
25,606 |
def strip_prefix(s, prefix, strict=False):
if s.startswith(prefix):
return s[len(prefix) :]
elif strict:
raise WimpyError("string doesn't start with prefix")
return s
|
Removes the prefix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the prefix was present
|
25,607 |
def _encode_bool(name, value, dummy0, dummy1):
return b"\x08" + name + (value and b"\x01" or b"\x00")
|
Encode a python boolean (True/False).
|
25,608 |
def print_param_defaults(self_):
cls = self_.cls
for key,val in cls.__dict__.items():
if isinstance(val,Parameter):
print(cls.__name__++key+ + repr(val.default))
|
Print the default values of all cls's Parameters.
|
25,609 |
def ephem(self, *args, **kwargs):
return self.__class__(self.ephemeris(*args, **kwargs))
|
Create an Ephem object which is a subset of this one
Take the same keyword arguments as :py:meth:`ephemeris`
Return:
Ephem:
|
25,610 |
def _attach_original_exception(self, exc):
original_exception = sys.exc_info()
if original_exception[0] is not None:
exc.original_exception = original_exception
|
Often, a retry will be raised inside an "except" block.
This Keep track of the first exception for debugging purposes
|
25,611 |
def register_target(self, target: Target):
if target.name in self.targets:
first = self.targets[target.name]
raise NameError(
.format(
target, split_build_module(target.name),
first, split_build_module(first.name)))
self.targets[target.name] = target
self.targets_by_module[split_build_module(target.name)].add(
target.name)
|
Register a `target` instance in this build context.
A registered target is saved in the `targets` map and in the
`targets_by_module` map, but is not added to the target graph until
target extraction is completed (thread safety considerations).
|
25,612 |
def score_samples(self, X, lengths=None):
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
posteriors = np.zeros((n_samples, self.n_components))
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
bwdlattice = self._do_backward_pass(framelogprob)
posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)
return logprob, posteriors
|
Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
|
25,613 |
def getPrevUrl(self, url, data):
prevUrl = None
if self.prevSearch:
try:
prevUrl = self.fetchUrl(url, data, self.prevSearch)
except ValueError as msg:
out.warn(u"%s Assuming no previous comic strips exist." % msg)
else:
prevUrl = self.prevUrlModifier(prevUrl)
out.debug(u"Found previous URL %s" % prevUrl)
getHandler().comicPageLink(self.getName(), url, prevUrl)
return prevUrl
|
Find previous URL.
|
25,614 |
def get_diskinfo(opts, show_all=False, local_only=False):
disks = []
outunit = opts.outunit
label_map = get_label_map(opts)
try:
with open(mntfname) as infile:
lines = infile.readlines()
lines.sort()
except IOError:
return None
for i, line in enumerate(lines):
device, mntp, fmt, mntops, *_ = line.split()
if device in (,):
continue
disk = DiskInfo()
dev = basename(device)
disk.isnet = in device
if local_only and disk.isnet:
continue
disk.isimg = is_img = dev.startswith()
is_tmpfs = (device == )
for selector in selectors:
if selector in device:
if show_all:
if is_tmpfs:
disk.isram = True
else:
if (is_img or
is_tmpfs or
mntp == ):
continue
break
else:
continue
disk.dev = dev
disk.fmt = fmt
disk.mntp = mntp = decode_mntp(mntp) if in mntp else mntp
disk.ismntd = bool(mntp)
disk.isopt = check_optical(disk)
if device[0] == :
disk.isrem = check_removable(dev, opts)
disk.label = label_map.get(device)
stat = os.statvfs(mntp)
disk.ocap = stat.f_frsize * stat.f_blocks
disk.cap = disk.ocap / outunit
disk.free = stat.f_frsize * stat.f_bavail / outunit
disk.oused = stat.f_frsize * (stat.f_blocks - stat.f_bfree)
disk.used = disk.oused / outunit
disk.pcnt = disk.oused / disk.ocap * 100
if mntops.startswith():
disk.rw = True
elif mntops.startswith():
disk.rw = False
else:
disk.rw = not bool(stat.f_flag & os.ST_RDONLY)
disks.append(disk)
if show_all:
for devname in label_map:
dev = basename(devname)
exists = [ disk for disk in disks if disk.dev == dev ]
if not exists:
disk = DiskInfo(
cap=0, free=0, ocap=0, pcnt=0, used=0,
dev = dev,
ismntd = False, mntp = ,
isnet = False,
isopt = check_optical(DiskInfo(dev=dev, fmt=None)),
isram = False,
isrem = check_removable(dev, opts),
label = label_map[devname],
rw = None,
)
disks.append(disk)
disks.sort(key=lambda disk: disk.dev)
if opts.debug:
print()
for disk in disks:
print(disk.dev, disk)
print()
return disks
|
Returns a list holding the current disk info.
Stats are divided by the outputunit.
|
25,615 |
def unpack_unordered_pairs(self, pairs):
items = [(False, k, v) for k, v in pairs]
result = []
max_loop = 2
while items and max_loop:
next_items = []
for key_unpacked, key_data, value_data in items:
if key_unpacked:
key = key_data
else:
blob = self._begin()
try:
key = self.unpack_data(key_data)
self._commit(blob)
except DelayPacking:
self._rollback(blob)
next_items.append((False, key_data, value_data))
continue
blob = self._begin()
try:
value = self.unpack_data(value_data)
self._commit(blob)
except DelayPacking:
self._rollback(blob)
next_items.append((True, key, value_data))
continue
result.append((key, value))
items = next_items
max_loop -= 1
if items:
raise DelayPacking()
return result
|
Unpack an unordered list of value pairs taking DelayPacking
exceptions into account to resolve circular references .
Used to unpack dictionary items when the order is not guarennteed
by the serializer. When item order change between packing
and unpacking, references are not guaranteed to appear before
dereferences anymore. So if unpacking an item fail because
of unknown dereference, we must keep it aside, continue unpacking
the other items and continue later.
|
25,616 |
def relcurveto(self, h1x, h1y, h2x, h2y, x, y):
if self._path is None:
raise ShoebotError(_("No current path. Use beginpath() first."))
self._path.relcurveto(h1x, h1y, h2x, h2y, x, y)
|
Draws a curve relatively to the last point.
|
25,617 |
def next_doc_with_tag(self, doc_tag):
while True:
try:
doc = next(self)
if doc.tag == doc_tag:
return doc
except StopIteration:
raise
|
Returns the next document with the specified tag. Empty string is no doc is found.
|
25,618 |
def surrounding_nodes(self, position):
n_node_index, n_node_position, n_node_error = self.nearest_node(position)
if n_node_error == 0.0:
index_mod = []
for i in range(len(n_node_index)):
new_point = np.asarray(n_node_position)
new_point[i] += 1.e-5*np.abs(new_point[i])
try:
self.nearest_node(tuple(new_point))
index_mod.append(-1)
except ValueError:
index_mod.append(1)
else:
index_mod = []
for i in range(len(n_node_index)):
if n_node_position[i] > position[i]:
index_mod.append(-1)
else:
index_mod.append(1)
return tuple(n_node_index), tuple(index_mod)
|
Returns nearest node indices and direction of opposite node.
:param position: Position inside the mesh to search nearest node for as (x,y,z)
:return: Nearest node indices and direction of opposite node.
|
25,619 |
def get_maxloss_rupture(dstore, loss_type):
lti = dstore[].lti[loss_type]
ridx = dstore.get_attr(, )[lti]
[rgetter] = gen_rupture_getters(dstore, slice(ridx, ridx + 1))
[ebr] = rgetter.get_ruptures()
return ebr
|
:param dstore: a DataStore instance
:param loss_type: a loss type string
:returns:
EBRupture instance corresponding to the maximum loss for the
given loss type
|
25,620 |
def bit_reversal(qubits: List[int]) -> Program:
p = Program()
n = len(qubits)
for i in range(int(n / 2)):
p.inst(SWAP(qubits[i], qubits[-i - 1]))
return p
|
Generate a circuit to do bit reversal.
:param qubits: Qubits to do bit reversal with.
:return: A program to do bit reversal.
|
25,621 |
def tangent_bundle(self):
if self.ndim == 0:
return ProductSpace(field=self.field)
else:
return ProductSpace(self, self.ndim)
|
The tangent bundle associated with `domain` using `partition`.
The tangent bundle of a space ``X`` of functions ``R^d --> F`` can be
interpreted as the space of vector-valued functions ``R^d --> F^d``.
This space can be identified with the power space ``X^d`` as used
in this implementation.
|
25,622 |
def temporary(self, path):
if path is None:
raise ValueError()
prior = self._root_dir
self._root_dir = path
try:
yield
finally:
self._root_dir = prior
|
Establishes a temporary build root, restoring the prior build root on exit.
|
25,623 |
def copy_non_ecf(props, target):
target.update(
{key: value for key, value in props.items() if key not in ECFPROPNAMES}
)
return target
|
Copies non-ECF properties from ``props`` to ``target``
:param props: An input dictionary
:param target: The dictionary to copy non-ECF properties to
:return: The ``target`` dictionary
|
25,624 |
def derive_fields(self):
if self.fields:
return self.fields
else:
fields = []
for field in self.object_list.model._meta.fields:
if field.name != :
fields.append(field.name)
return fields
|
Derives our fields.
|
25,625 |
def get_contigs(self):
contigs = {}
pyfastaq.tasks.file_to_dict(self.contigs_fasta, contigs)
return contigs
|
Returns a dictionary of contig_name -> pyfastaq.Sequences.Fasta object
|
25,626 |
def cleanup(self):
for key in self._find_keys(identity=):
image_file = self._get(key)
if image_file and not image_file.exists():
self.delete(image_file)
for key in self._find_keys(identity=):
image_file = self._get(key)
if image_file:
thumbnail_keys = self._get(key, identity=) or []
thumbnail_keys_set = set(thumbnail_keys)
for thumbnail_key in thumbnail_keys:
if not self._get(thumbnail_key):
thumbnail_keys_set.remove(thumbnail_key)
thumbnail_keys = list(thumbnail_keys_set)
if thumbnail_keys:
self._set(key, thumbnail_keys, identity=)
continue
|
Cleans up the key value store. In detail:
1. Deletes all key store references for image_files that do not exist
and all key references for its thumbnails *and* their image_files.
2. Deletes or updates all invalid thumbnail keys
|
25,627 |
def check_index(i):
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
|
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
|
25,628 |
def _get(self, operation, field):
self._check_exists()
query = {Mark.FLD_OP: operation.name,
Mark.FLD_MARK + "." + field: {"$exists": True}}
return self._track.find_one(query)
|
Get tracked position for a given operation and field.
|
25,629 |
def read_data(self, scaling_factor=1E-9, strain_headers=None):
if strain_headers:
self.strain.data_variables = strain_headers
else:
self.strain.data_variables = STRAIN_VARIABLES
datafile = open(self.filename, )
reader = csv.DictReader(datafile)
self.strain.data = dict([(name, []) for name in reader.fieldnames])
for row in reader:
for name in row.keys():
if in name.lower():
self.strain.data[name].append(row[name])
elif name in self.strain.data_variables:
self.strain.data[name].append(
scaling_factor * float(row[name]))
else:
self.strain.data[name].append(float(row[name]))
for key in self.strain.data.keys():
if in key:
self.strain.data[key] = np.array(self.strain.data[key],
dtype=)
else:
self.strain.data[key] = np.array(self.strain.data[key])
self._check_invalid_longitudes()
if not in self.strain.data:
print()
self.strain.data_variables = self.strain.data.keys()
self.strain.get_secondary_strain_data()
return self.strain
|
Reads the data from the csv file
:param float scaling_factor:
Scaling factor used for all strain values (default 1E-9 for
nanostrain)
:param list strain_headers:
List of the variables in the file that correspond to strain
parameters
:returns:
strain - Strain model as an instance of the :class:
openquake.hmtk.strain.geodetic_strain.GeodeticStrain
|
25,630 |
def from_stream(cls, stream, marker_code, offset):
if JPEG_MARKER_CODE.is_standalone(marker_code):
segment_length = 0
else:
segment_length = stream.read_short(offset)
return cls(marker_code, offset, segment_length)
|
Return a generic |_Marker| instance for the marker at *offset* in
*stream* having *marker_code*.
|
25,631 |
def get_call_repr(func, *args, **kwargs):
if ismethod(func) or isfunction(func) or isbuiltin(func):
func_repr = .format(func.__module__, func.__qualname__)
elif not isclass(func) and hasattr(func, ):
func_repr = .format(func.__module__, func.__class__.__name__)
else:
func_repr = repr(func)
args_reprs = [repr(arg) for arg in args]
kwargs_reprs = [k + + repr(v) for k, v in sorted(kwargs.items())]
return .format(func_repr, .join(args_reprs + kwargs_reprs))
|
Return the string representation of the function call.
:param func: A callable (e.g. function, method).
:type func: callable
:param args: Positional arguments for the callable.
:param kwargs: Keyword arguments for the callable.
:return: String representation of the function call.
:rtype: str
|
25,632 |
def transition(self):
instruction = self.program[self.program_counter]
if isinstance(instruction, Gate):
if instruction.name in self.defined_gates:
self.wf_simulator.do_gate_matrix(matrix=self.defined_gates[instruction.name],
qubits=[q.index for q in instruction.qubits])
else:
self.wf_simulator.do_gate(gate=instruction)
for noise_type, noise_prob in self.post_gate_noise_probabilities.items():
self.wf_simulator.do_post_gate_noise(noise_type, noise_prob,
qubits=[q.index for q in instruction.qubits])
self.program_counter += 1
elif isinstance(instruction, Measurement):
measured_val = self.wf_simulator.do_measurement(qubit=instruction.qubit.index)
x = instruction.classical_reg
self.ram[x.name][x.offset] = measured_val
self.program_counter += 1
elif isinstance(instruction, Declare):
if instruction.shared_region is not None:
raise NotImplementedError("SHARING is not (yet) implemented.")
self.ram[instruction.name] = np.zeros(instruction.memory_size,
dtype=QUIL_TO_NUMPY_DTYPE[
instruction.memory_type])
self.program_counter += 1
elif isinstance(instruction, Pragma):
self.program_counter += 1
elif isinstance(instruction, Jump):
self.program_counter = self.find_label(instruction.target)
elif isinstance(instruction, JumpTarget):
self.program_counter += 1
elif isinstance(instruction, JumpConditional):
x = instruction.condition
cond = self.ram[x.name][x.offset]
if not isinstance(cond, (bool, np.bool, np.int8)):
raise ValueError("{} requires a data type of BIT; not {}"
.format(instruction.op, type(cond)))
dest_index = self.find_label(instruction.target)
if isinstance(instruction, JumpWhen):
jump_if_cond = True
elif isinstance(instruction, JumpUnless):
jump_if_cond = False
else:
raise TypeError("Invalid JumpConditional")
if not (cond ^ jump_if_cond):
self.program_counter = dest_index
else:
self.program_counter += 1
elif isinstance(instruction, UnaryClassicalInstruction):
target = instruction.target
old = self.ram[target.name][target.offset]
if isinstance(instruction, ClassicalNeg):
if not isinstance(old, (int, float, np.int, np.float)):
raise ValueError("NEG requires a data type of REAL or INTEGER; not {}"
.format(type(old)))
self.ram[target.name][target.offset] *= -1
elif isinstance(instruction, ClassicalNot):
if not isinstance(old, (bool, np.bool)):
raise ValueError("NOT requires a data type of BIT; not {}"
.format(type(old)))
self.ram[target.name][target.offset] = not old
else:
raise TypeError("Invalid UnaryClassicalInstruction")
self.program_counter += 1
elif isinstance(instruction, (LogicalBinaryOp, ArithmeticBinaryOp, ClassicalMove)):
left_ind = instruction.left
left_val = self.ram[left_ind.name][left_ind.offset]
if isinstance(instruction.right, MemoryReference):
right_ind = instruction.right
right_val = self.ram[right_ind.name][right_ind.offset]
else:
right_val = instruction.right
if isinstance(instruction, ClassicalAnd):
new_val = left_val & right_val
elif isinstance(instruction, ClassicalInclusiveOr):
new_val = left_val | right_val
elif isinstance(instruction, ClassicalExclusiveOr):
new_val = left_val ^ right_val
elif isinstance(instruction, ClassicalAdd):
new_val = left_val + right_val
elif isinstance(instruction, ClassicalSub):
new_val = left_val - right_val
elif isinstance(instruction, ClassicalMul):
new_val = left_val * right_val
elif isinstance(instruction, ClassicalDiv):
new_val = left_val / right_val
elif isinstance(instruction, ClassicalMove):
new_val = right_val
else:
raise ValueError("Unknown BinaryOp {}".format(type(instruction)))
self.ram[left_ind.name][left_ind.offset] = new_val
self.program_counter += 1
elif isinstance(instruction, ClassicalExchange):
left_ind = instruction.left
right_ind = instruction.right
tmp = self.ram[left_ind.name][left_ind.offset]
self.ram[left_ind.name][left_ind.offset] = self.ram[right_ind.name][right_ind.offset]
self.ram[right_ind.name][right_ind.offset] = tmp
self.program_counter += 1
elif isinstance(instruction, Reset):
self.wf_simulator.reset()
self.program_counter += 1
elif isinstance(instruction, ResetQubit):
raise NotImplementedError("Need to implement in wf simulator")
self.program_counter += 1
elif isinstance(instruction, Wait):
warnings.warn("WAIT does nothing for a noiseless simulator")
self.program_counter += 1
elif isinstance(instruction, Nop):
self.program_counter += 1
elif isinstance(instruction, DefGate):
if instruction.parameters is not None and len(instruction.parameters) > 0:
raise NotImplementedError("PyQVM does not support parameterized DEFGATEs")
self.defined_gates[instruction.name] = instruction.name
self.program_counter += 1
elif isinstance(instruction, RawInstr):
raise NotImplementedError("PyQVM does not support raw instructions. "
"Parse your program")
elif isinstance(instruction, Halt):
return True
else:
raise ValueError("Unsupported instruction type: {}".format(instruction))
return self.program_counter == len(self.program)
|
Implements a QAM-like transition.
This function assumes ``program`` and ``program_counter`` instance variables are set
appropriately, and that the wavefunction simulator and classical memory ``ram`` instance
variables are in the desired QAM input state.
:return: whether the QAM should halt after this transition.
|
25,633 |
def _merge_args(qCmd, parsed_args, _extra_values, value_specs):
temp_values = _extra_values.copy()
for key, value in six.iteritems(temp_values):
if hasattr(parsed_args, key):
arg_value = getattr(parsed_args, key)
if arg_value is not None and value is not None:
if isinstance(arg_value, list):
if value and isinstance(value, list):
if (not arg_value or
isinstance(arg_value[0], type(value[0]))):
arg_value.extend(value)
_extra_values.pop(key)
|
Merge arguments from _extra_values into parsed_args.
If an argument value are provided in both and it is a list,
the values in _extra_values will be merged into parsed_args.
@param parsed_args: the parsed args from known options
@param _extra_values: the other parsed arguments in unknown parts
@param values_specs: the unparsed unknown parts
|
25,634 |
def data(ctx, path):
_rws = partial(rws_call, ctx)
if len(path) == 0:
_rws(ClinicalStudiesRequest(), default_attr=)
elif len(path) == 1:
_rws(StudySubjectsRequest(path[0], ), default_attr=)
elif len(path) == 2:
_rws(StudySubjectsRequest(path[0], path[1]), default_attr=)
elif len(path) == 3:
try:
click.echo(get_data(ctx, path[0], path[1], path[2]))
except RWSException as e:
click.echo(str(e))
except requests.exceptions.HTTPError as e:
click.echo(str(e))
else:
click.echo()
|
List EDC data for [STUDY] [ENV] [SUBJECT]
|
25,635 |
def find_window_id(pattern, method=, error=):
import utool as ut
winid_candidates = XCtrl.findall_window_ids(pattern)
if len(winid_candidates) == 0:
if error == :
available_windows = ut.cmd2()[]
msg = % (pattern,)
msg += % (available_windows,)
print(msg)
raise Exception(msg)
win_id = None
elif len(winid_candidates) == 1:
win_id = winid_candidates[0]
else:
win_id = XCtrl.sort_window_ids(winid_candidates, method)[0]
return win_id
|
xprop -id 0x00a00007 | grep "WM_CLASS(STRING)"
|
25,636 |
def classify(label_dict,image_fname=None,image_label=None):
anatomySPGRMPRAGEanatanatomydtiDTIfield_mapfieldmapTE7B0
min_acceptable_match = 80
if image_fname:
label_info = info_for_tags(image_fname,[(0x8,0x103e)])
image_label = label_info[(0x8,0x103e)]
flat_dict = [i for j in [[(b,x) for x in label_dict[b]] for b in label_dict] for i in j]
best_match = process.extractOne(image_label,[x[1] for x in flat_dict])
if best_match[1]<min_acceptable_match:
return None
else:
return [x[0] for x in flat_dict if x[1]==best_match[0]][0]
|
tries to classify a DICOM image based on known string patterns (with fuzzy matching)
Takes the label from the DICOM header and compares to the entries in ``label_dict``. If it finds something close
it will return the image type, otherwise it will return ``None``. Alternatively, you can supply your own string, ``image_label``,
and it will try to match that.
``label_dict`` is a dictionary where the keys are dataset types and the values are lists of strings that match that type.
For example::
{
'anatomy': ['SPGR','MPRAGE','anat','anatomy'],
'dti': ['DTI'],
'field_map': ['fieldmap','TE7','B0']
}
|
25,637 |
def diff_encode(line, transform):
coords = [transform(x, y) for (x, y) in line.coords]
pairs = zip(coords[:], coords[1:])
diffs = [(x2 - x1, y2 - y1) for ((x1, y1), (x2, y2)) in pairs]
return coords[:1] + [(x, y) for (x, y) in diffs if (x, y) != (0, 0)]
|
Differentially encode a shapely linestring or ring.
|
25,638 |
def set_board_options(self, options, team_context, id):
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if team is not None:
route_values[] = self._serialize.url(, team, )
if id is not None:
route_values[] = self._serialize.url(, id, )
content = self._serialize.body(options, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, self._unwrap_collection(response))
|
SetBoardOptions.
Update board options
:param {str} options: options to updated
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str id: identifier for board, either category plural name (Eg:"Stories") or guid
:rtype: {str}
|
25,639 |
async def remove_all_pumps_async(self, reason):
pump_tasks = [self.remove_pump_async(p_id, reason) for p_id in self.partition_pumps]
await asyncio.gather(*pump_tasks)
return True
|
Stops all partition pumps
(Note this might be wrong and need to await all tasks before returning done).
:param reason: A reason for closing.
:type reason: str
:rtype: bool
|
25,640 |
def parse_info(raw_info, apply_tag=None):
parse_releases(raw_info)
parse_packages(raw_info, apply_tag=apply_tag)
return raw_info
|
Parse raw rdoinfo metadata inplace.
:param raw_info: raw info to parse
:param apply_tag: tag to apply
:returns: dictionary containing all packages in rdoinfo
|
25,641 |
def action(self, *args, **kwargs):
try:
return self._action(*args, **kwargs)
except KeyError, x:
self.config.logger.debug(
,
to_str(self.__class__),
x,
)
except Exception, x:
self.config.logger.debug(
,
to_str(self.__class__),
x,
exc_info=True
)
return False
|
the default action for Support Classifiers invokes any derivied
_action function, trapping any exceptions raised in the process. We
are obligated to catch these exceptions to give subsequent rules the
opportunity to act and perhaps mitigate the error. An error during the
action application is a failure of the rule, not a failure of the
classification system itself.
|
25,642 |
def _aggregate_on_chunks(x, f_agg, chunk_len):
return [getattr(x[i * chunk_len: (i + 1) * chunk_len], f_agg)() for i in range(int(np.ceil(len(x) / chunk_len)))]
|
Takes the time series x and constructs a lower sampled version of it by applying the aggregation function f_agg on
consecutive chunks of length chunk_len
:param x: the time series to calculate the aggregation of
:type x: numpy.ndarray
:param f_agg: The name of the aggregation function that should be an attribute of the pandas.Series
:type f_agg: str
:param chunk_len: The size of the chunks where to aggregate the time series
:type chunk_len: int
:return: A list of the aggregation function over the chunks
:return type: list
|
25,643 |
def get_parent_data(tree, node, current):
if not current:
current = []
parent = tree.parent(node.identifier)
if parent.is_root():
return current
current.insert(0, (parent.tag, parent.data))
return PlateManager.get_parent_data(tree, parent, current)
|
Recurse up the tree getting parent data
:param tree: The tree
:param node: The current node
:param current: The current list
:return: The hierarchical dictionary
|
25,644 |
def gradient_rgb(
self, text=None, fore=None, back=None, style=None,
start=None, stop=None, step=1, linemode=True, movefactor=0):
gradargs = {
: step,
: fore,
: back,
: style,
}
start = start or (0, 0, 0)
stop = stop or (255, 255, 255)
if linemode:
method = self._gradient_rgb_lines
gradargs[] = movefactor
else:
method = self._gradient_rgb_line
if text:
return self.__class__(
.join((
self.data or ,
method(
text,
start,
stop,
**gradargs
),
))
)
return self.__class__(
method(
self.stripped(),
start,
stop,
**gradargs
)
)
|
Return a black and white gradient.
Arguments:
text : String to colorize.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting rgb value.
stop : Stopping rgb value.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
This will always be greater than 0.
linemode : Colorize each line in the input.
Default: True
movefactor : Amount to shift gradient for each line when
`linemode` is set.
|
25,645 |
def read_backend(self, client=None):
session = self.session
if session:
return session.model(self).read_backend
|
The read :class:`stdnet.BackendDatServer` for this instance.
It can be ``None``.
|
25,646 |
def average_over_area(q, x, y):
area = (np.max(x) - np.min(x))*(np.max(y) - np.min(y))
integral = np.trapz(np.trapz(q, y, axis=0), x)
return integral/area
|
Averages a quantity `q` over a rectangular area given a 2D array and
the x and y vectors for sample locations, using the trapezoidal rule
|
25,647 |
def create_api_vlan(self):
return ApiVlan(
self.networkapi_url,
self.user,
self.password,
self.user_ldap)
|
Get an instance of Api Vlan services facade.
|
25,648 |
def get_valid_filename(s, max_length=FILENAME_MAX_LENGTH):
s = str(s).strip().replace(, )
s = re.sub(r, , s).strip()
return s[:max_length]
|
Returns the given string converted to a string that can be used for a clean filename.
Removes leading and trailing spaces; converts anything that is not an alphanumeric,
dash or underscore to underscore; converts behave examples separator ` -- @` to underscore.
It also cuts the resulting name to `max_length`.
@see https://github.com/django/django/blob/master/django/utils/text.py
|
25,649 |
def get_absolute_url_link(self, text=None, cls=None, icon_class=None,
**attrs):
if text is None:
text = self.get_link_text()
return build_link(href=self.get_absolute_url(),
text=text,
cls=cls,
icon_class=icon_class,
**attrs)
|
Gets the html link for the object.
|
25,650 |
def FanOut(self, obj, parent=None):
if parent and obj == parent:
results = [utils.SmartUnicode(obj).strip()]
elif isinstance(obj, (string_types, rdf_structs.EnumNamedValue)):
results = [utils.SmartUnicode(obj).strip()]
elif isinstance(obj, rdf_protodict.DataBlob):
results = self.FanOut(obj.GetValue())
elif isinstance(obj, (collections.Mapping, rdf_protodict.Dict)):
results = []
for k, v in iteritems(obj):
expanded_v = [utils.SmartUnicode(r) for r in self.FanOut(v)]
results.append("%s:%s" % (utils.SmartUnicode(k), ",".join(expanded_v)))
elif isinstance(obj, (collections.Iterable,
rdf_structs.RepeatedFieldHelper)):
results = []
for rslt in [self.FanOut(o, obj) for o in obj]:
results.extend(rslt)
else:
results = [utils.SmartUnicode(obj).strip()]
return results
|
Expand values from various attribute types.
Strings are returned as is.
Dictionaries are returned with a key string, and an expanded set of values.
Other iterables are expanded until they flatten out.
Other items are returned in string format.
Args:
obj: The object to expand out.
parent: The parent object: Used to short-circuit infinite recursion.
Returns:
a list of expanded values as strings.
|
25,651 |
def get_old_filename(diff_part):
regexps = (
r,
r,
)
for regexp in regexps:
r = re.compile(regexp, re.MULTILINE)
match = r.search(diff_part)
if match is not None:
return match.groups()[0]
raise MalformedGitDiff("No old filename in diff part found. "
"Examined diff part: {}".format(diff_part))
|
Returns the filename for the original file that was changed in a diff part.
|
25,652 |
def multi_zset(self, name, **kvs):
for k,v in kvs.items():
kvs[k] = get_integer(k, int(v))
return self.execute_command(, name, *dict_to_list(kvs))
|
Return a dictionary mapping key/value by ``keys`` from zset ``names``
:param string name: the zset name
:param list keys: a list of keys
:return: the number of successful creation
:rtype: int
>>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70)
4
>>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70)
0
>>> ssdb.multi_zset('zset_4', a=100, b=80, c=90, d=70, e=60)
1
|
25,653 |
def load_learner(path:PathOrStr, file:PathLikeOrBinaryStream=, test:ItemList=None, **db_kwargs):
"Load a `Learner` object saved with `export_state` in `path/file` with empty data, optionally add `test` and load on `cpu`. `file` can be file-like (file or buffer)"
source = Path(path)/file if is_pathlike(file) else file
state = torch.load(source, map_location=) if defaults.device == torch.device() else torch.load(source)
model = state.pop()
src = LabelLists.load_state(path, state.pop())
if test is not None: src.add_test(test)
data = src.databunch(**db_kwargs)
cb_state = state.pop()
clas_func = state.pop()
res = clas_func(data, model, **state)
res.callback_fns = state[]
res.callbacks = [load_callback(c,s, res) for c,s in cb_state.items()]
return res
|
Load a `Learner` object saved with `export_state` in `path/file` with empty data, optionally add `test` and load on `cpu`. `file` can be file-like (file or buffer)
|
25,654 |
def verify_signature(args):
key_data = args.keyfile.read()
if b"-BEGIN EC PRIVATE KEY" in key_data:
sk = ecdsa.SigningKey.from_pem(key_data)
vk = sk.get_verifying_key()
elif b"-BEGIN PUBLIC KEY" in key_data:
vk = ecdsa.VerifyingKey.from_pem(key_data)
elif len(key_data) == 64:
vk = ecdsa.VerifyingKey.from_string(key_data,
curve=ecdsa.NIST256p)
else:
raise esptool.FatalError("Verification key does not appear to be an EC key in PEM format or binary EC public key data. Unsupported")
if vk.curve != ecdsa.NIST256p:
raise esptool.FatalError("Public key uses incorrect curve. ESP32 Secure Boot only supports NIST256p (openssl calls this curve 'prime256v1")
binary_content = args.datafile.read()
data = binary_content[0:-68]
sig_version, signature = struct.unpack("I64s", binary_content[-68:])
if sig_version != 0:
raise esptool.FatalError("Signature block has version %d. This version of espsecure only supports version 0." % sig_version)
print("Verifying %d bytes of data" % len(data))
try:
if vk.verify(signature, data, hashlib.sha256):
print("Signature is valid")
else:
raise esptool.FatalError("Signature is not valid")
except ecdsa.keys.BadSignatureError:
raise esptool.FatalError("Signature is not valid")
|
Verify a previously signed binary image, using the ECDSA public key
|
25,655 |
def parse_model_file(path):
context = FilePathContext(path)
format = resolve_format(None, context.filepath)
if format == :
logger.debug(.format(context.filepath))
with context.open() as f:
for reaction_id in parse_model_table_file(context, f):
yield reaction_id
elif format == :
logger.debug(.format(context.filepath))
with context.open() as f:
for reaction_id in parse_model_yaml_file(context, f):
yield reaction_id
|
Parse a file as a list of model reactions
The file format is detected and the file is parsed accordinly. The file is
specified as a file path that will be opened for reading. Path can be given
as a string or a context.
|
25,656 |
def _busy_wait_ms(self, ms):
start = time.time()
delta = ms/1000.0
while (time.time() - start) <= delta:
pass
|
Busy wait for the specified number of milliseconds.
|
25,657 |
def open_recruitment(self, n=1):
logger.info("Opening HotAir recruitment for {} participants".format(n))
recruitments = self.recruit(n)
message = "Recruitment requests will open browser windows automatically."
return {"items": recruitments, "message": message}
|
Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
|
25,658 |
def AddOperands(self, lhs, rhs):
if isinstance(lhs, Expression) and isinstance(rhs, Expression):
self.args = [lhs, rhs]
else:
raise errors.ParseError(
.format(
lhs, self.operator, rhs))
|
Add an operand.
|
25,659 |
def get_batch_unlock_gain(
channel_state: NettingChannelState,
) -> UnlockGain:
gain_from_partner_locks = TokenAmount(sum(
unlock.lock.amount
for unlock in channel_state.partner_state.secrethashes_to_onchain_unlockedlocks.values()
))
our_locked_locks_amount = sum(
lock.amount
for lock in channel_state.our_state.secrethashes_to_lockedlocks.values()
)
our_unclaimed_locks_amount = sum(
lock.amount for lock in channel_state.our_state.secrethashes_to_unlockedlocks.values()
)
gain_from_our_locks = TokenAmount(our_locked_locks_amount + our_unclaimed_locks_amount)
return UnlockGain(
from_partner_locks=gain_from_partner_locks,
from_our_locks=gain_from_our_locks,
)
|
Collect amounts for unlocked/unclaimed locks and onchain unlocked locks.
Note: this function does not check expiry, so the values make only sense during settlement.
Returns:
gain_from_partner_locks: locks amount received and unlocked on-chain
gain_from_our_locks: locks amount which are unlocked or unclaimed
|
25,660 |
def copy_contents(self, fileinstance, progress_callback=None,
chunk_size=None, **kwargs):
if not fileinstance.readable:
raise ValueError()
if not self.size == 0:
raise ValueError()
self.set_uri(
*self.storage(**kwargs).copy(
fileinstance.storage(**kwargs),
chunk_size=chunk_size,
progress_callback=progress_callback))
|
Copy this file instance into another file instance.
|
25,661 |
def _item_sources(self):
return [self.data_vars, self.coords, {d: self[d] for d in self.dims},
LevelCoordinatesSource(self)]
|
List of places to look-up items for key-completion
|
25,662 |
def evaluate(self, dataset, metric=, missing_value_action=):
_raise_error_evaluation_metric_is_valid(
metric, [, , ])
return super(RandomForestRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric)
|
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Possible values are:
'auto' : Compute all metrics.
'rmse' : Rooted mean squared error.
'max_error' : Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
--------
create, predict
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
|
25,663 |
def _index2word(self):
compute_index2word = lambda: self.keys()
try:
self._index2word_cache
except AttributeError:
self._index2word_cache = compute_index2word()
if len(self._index2word_cache) != len(self):
self._index2word_cache = compute_index2word()
return self._index2word_cache
|
Mapping from indices to words.
WARNING: this may go out-of-date, because it is a copy, not a view into the Vocab.
:return: a list of strings
|
25,664 |
def is_likely_link(text):
text = text.lower()
if (
text.startswith()
or text.startswith()
or text.startswith()
or text.startswith()
or text.startswith()
or text.endswith()
or text.startswith()
):
return True
dummy, dot, file_extension = text.rpartition()
if dot and file_extension and len(file_extension) <= 4:
file_extension_set = frozenset(file_extension)
if file_extension_set \
and file_extension_set <= ALPHANUMERIC_CHARS \
and not file_extension_set <= NUMERIC_CHARS:
if file_extension in COMMON_TLD:
return False
file_type = mimetypes.guess_type(text, strict=False)[0]
if file_type:
return True
else:
return False
|
Return whether the text is likely to be a link.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
|
25,665 |
def forward(self, agent_qs, states):
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim)
agent_qs = agent_qs.view(-1, 1, self.n_agents)
w1 = th.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view(-1, self.n_agents, self.embed_dim)
b1 = b1.view(-1, 1, self.embed_dim)
hidden = F.elu(th.bmm(agent_qs, w1) + b1)
w_final = th.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim, 1)
v = self.V(states).view(-1, 1, 1)
y = th.bmm(hidden, w_final) + v
q_tot = y.view(bs, -1, 1)
return q_tot
|
Forward pass for the mixer.
Arguments:
agent_qs: Tensor of shape [B, T, n_agents, n_actions]
states: Tensor of shape [B, T, state_dim]
|
25,666 |
def _get_table_info(self):
self.fields = []
self.field_info = {}
self.cursor.execute( %self.name)
for field_info in self.cursor.fetchall():
fname = field_info[1].encode()
self.fields.append(fname)
ftype = field_info[2].encode()
info = {:ftype}
info[] = field_info[3] != 0
default = field_info[4]
if isinstance(default,unicode):
default = guess_default_fmt(default)
info[] = default
self.field_info[fname] = info
|
Inspect the base to get field names
|
25,667 |
def requirements(collector):
out = sys.stdout
artifact = collector.configuration[].artifact
if artifact not in (None, "", NotSpecified):
if isinstance(artifact, six.string_types):
out = open(artifact, )
else:
out = artifact
for active in collector.configuration[].values():
for requirement in active.requirements():
out.write("{0}\n".format(requirement))
|
Just print out the requirements
|
25,668 |
async def create(
cls, node: Union[Node, str],
interface_type: InterfaceType = InterfaceType.PHYSICAL, *,
name: str = None, mac_address: str = None,
tags: Iterable[str] = None, vlan: Union[Vlan, int] = None,
parent: Union[Interface, int] = None,
parents: Iterable[Union[Interface, int]] = None, mtu: int = None,
accept_ra: bool = None, autoconf: bool = None,
bond_mode: str = None, bond_miimon: int = None,
bond_downdelay: int = None, bond_updelay: int = None,
bond_lacp_rate: str = None, bond_xmit_hash_policy: str = None,
bridge_stp: bool = None, bridge_fd: int = None):
params = {}
if isinstance(node, str):
params[] = node
elif isinstance(node, Node):
params[] = node.system_id
else:
raise TypeError(
% (
type(node).__name__))
if name is not None:
params[] = name
if tags is not None:
params[] = tags
if mtu is not None:
params[] = mtu
if vlan is not None:
if isinstance(vlan, Vlan):
params[] = vlan.id
elif isinstance(vlan, int):
params[] = vlan
else:
raise TypeError(
% (
type(vlan).__name__))
if accept_ra is not None:
params[] = accept_ra
if autoconf is not None:
params[] = autoconf
handler = None
if not isinstance(interface_type, InterfaceType):
raise TypeError(
% (
type(interface_type).__name__))
if interface_type == InterfaceType.PHYSICAL:
handler = cls._handler.create_physical
if mac_address:
params[] = mac_address
else:
raise ValueError(
)
elif interface_type == InterfaceType.BOND:
handler = cls._handler.create_bond
if parent is not None:
raise ValueError("use parents not parent for bond interface")
if not isinstance(parents, Iterable):
raise TypeError(
% (
type(parents).__name__))
if len(parents) == 0:
raise ValueError(
)
params[] = list(gen_parents(parents))
if not name:
raise ValueError()
if mac_address is not None:
params[] = mac_address
if bond_mode is not None:
params[] = bond_mode
if bond_miimon is not None:
params[] = bond_miimon
if bond_downdelay is not None:
params[] = bond_downdelay
if bond_updelay is not None:
params[] = bond_updelay
if bond_lacp_rate is not None:
params[] = bond_lacp_rate
if bond_xmit_hash_policy is not None:
params[] = bond_xmit_hash_policy
elif interface_type == InterfaceType.VLAN:
handler = cls._handler.create_vlan
if parents is not None:
raise ValueError("use parent not parents for VLAN interface")
if parent is None:
raise ValueError("parent is required for VLAN interface")
params[] = get_parent(parent)
if vlan is None:
raise ValueError("vlan is required for VLAN interface")
elif interface_type == InterfaceType.BRIDGE:
handler = cls._handler.create_bridge
if parents is not None:
raise ValueError("use parent not parents for bridge interface")
if parent is None:
raise ValueError("parent is required for bridge interface")
params[] = get_parent(parent)
if not name:
raise ValueError()
if mac_address is not None:
params[] = mac_address
if bridge_stp is not None:
params[] = bridge_stp
if bridge_fd is not None:
params[] = bridge_fd
else:
raise ValueError(
"cannot create an interface of type: %s" % interface_type)
return cls._object(await handler(**params))
|
Create a `Interface` in MAAS.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param interface_type: Type of interface to create (optional).
:type interface_type: `InterfaceType`
:param name: The name for the interface (optional).
:type name: `str`
:param tags: List of tags to add to the interface.
:type tags: sequence of `str`
:param mtu: The MTU for the interface (optional).
:type mtu: `int`
:param vlan: VLAN the interface is connected to (optional).
:type vlan: `Vlan` or `int`
:param accept_ra: True if the interface should accepted router
advertisements. (optional)
:type accept_ra: `bool`
:param autoconf: True if the interface should auto configure.
:type autoconf: `bool`
Following parameters specific to physical interface.
:param mac_address: The MAC address for the interface.
:type mac_address: `str`
Following parameters specific to a bond interface.
:param parents: Parent interfaces that make up the bond.
:type parents: sequence of `Interface` or `int`
:param mac_address: MAC address to use for the bond (optional).
:type mac_address: `str`
:param bond_mode: The operating mode of the bond (optional).
:type bond_mode: `str`
:param bond_miimon: The link monitoring freqeuncy in
milliseconds (optional).
:type bond_miimon: `int`
:param bond_downdelay: Specifies the time, in milliseconds, to wait
before disabling a slave after a link failure has been detected
(optional).
:type bond_downdelay: `int`
:param bond_updelay: Specifies the time, in milliseconds, to wait
before enabling a slave after a link recovery has been detected.
:type bond_updelay: `int`
:param bond_lacp_rate: Option specifying the rate in which we'll ask
our link partner to transmit LACPDU packets in 802.3ad
mode (optional).
:type bond_lacp_rate: `str`
:param bond_xmit_hash_policy: The transmit hash policy to use for
slave selection in balance-xor, 802.3ad, and tlb modes(optional).
:type bond_xmit_hash_policy: `str`
Following parameters specific to a VLAN interface.
:param parent: Parent interface for this VLAN interface.
:type parent: `Interface` or `int`
Following parameters specific to a Bridge interface.
:param parent: Parent interface for this bridge interface.
:type parent: `Interface` or `int`
:param mac_address: The MAC address for the interface (optional).
:type mac_address: `str`
:param bridge_stp: Turn spanning tree protocol on or off (optional).
:type bridge_stp: `bool`
:param bridge_fd: Set bridge forward delay to time seconds (optional).
:type bridge_fd: `int`
:returns: The created Interface.
:rtype: `Interface`
|
25,669 |
def format_help(help):
help = help.replace("Options:", str(crayons.normal("Options:", bold=True)))
help = help.replace(
"Usage: pipenv", str("Usage: {0}".format(crayons.normal("pipenv", bold=True)))
)
help = help.replace(" check", str(crayons.red(" check", bold=True)))
help = help.replace(" clean", str(crayons.red(" clean", bold=True)))
help = help.replace(" graph", str(crayons.red(" graph", bold=True)))
help = help.replace(" install", str(crayons.magenta(" install", bold=True)))
help = help.replace(" lock", str(crayons.green(" lock", bold=True)))
help = help.replace(" open", str(crayons.red(" open", bold=True)))
help = help.replace(" run", str(crayons.yellow(" run", bold=True)))
help = help.replace(" shell", str(crayons.yellow(" shell", bold=True)))
help = help.replace(" sync", str(crayons.green(" sync", bold=True)))
help = help.replace(" uninstall", str(crayons.magenta(" uninstall", bold=True)))
help = help.replace(" update", str(crayons.green(" update", bold=True)))
additional_help = .format(
crayons.red("pipenv --three"),
crayons.red("pipenv --python 3.7"),
crayons.red("pipenv install --dev"),
crayons.red("pipenv lock"),
crayons.red("pipenv graph"),
crayons.red("pipenv install -e ."),
crayons.red("pipenv lock --pre"),
crayons.red("pipenv check"),
crayons.red("pipenv run pip freeze"),
crayons.red("pipenv --rm"),
)
help = help.replace("Commands:", additional_help)
return help
|
Formats the help string.
|
25,670 |
def __if_not_basestring(text_object):
converted_str = text_object
if not isinstance(text_object, str):
converted_str = str(text_object)
return converted_str
|
Convert to str
|
25,671 |
def _ReadMemberFooter(self, file_object):
file_offset = file_object.get_offset()
member_footer = self._ReadStructure(
file_object, file_offset, self._MEMBER_FOOTER_SIZE,
self._MEMBER_FOOTER, )
self.uncompressed_data_size = member_footer.uncompressed_data_size
|
Reads a member footer.
Args:
file_object (FileIO): file-like object to read from.
Raises:
FileFormatError: if the member footer cannot be read.
|
25,672 |
def characters (self, data):
data = data.encode(self.encoding, "ignore")
self.fd.write(data)
|
Print characters.
@param data: the character data
@type data: string
@return: None
|
25,673 |
def load_modes(self, input_modes=None):
_modes = [
GameMode(
mode="normal", priority=2, digits=4, digit_type=DigitWord.DIGIT, guesses_allowed=10
),
GameMode(
mode="easy", priority=1, digits=3, digit_type=DigitWord.DIGIT, guesses_allowed=6
),
GameMode(
mode="hard", priority=3, digits=6, digit_type=DigitWord.DIGIT, guesses_allowed=6
),
GameMode(
mode="hex", priority=4, digits=4, digit_type=DigitWord.HEXDIGIT, guesses_allowed=10
)
]
if input_modes is not None:
if not isinstance(input_modes, list):
raise TypeError("Expected list of input_modes")
for mode in input_modes:
if not isinstance(mode, GameMode):
raise TypeError("Expected list to contain only GameMode objects")
_modes.append(mode)
self._game_modes = copy.deepcopy(_modes)
|
Loads modes (GameMode objects) to be supported by the game object. Four default
modes are provided (normal, easy, hard, and hex) but others could be provided
either by calling load_modes directly or passing a list of GameMode objects to
the instantiation call.
:param input_modes: A list of GameMode objects; nb: even if only one new GameMode
object is provided, it MUST be passed as a list - for example, passing GameMode gm1
would require passing [gm1] NOT gm1.
:return: A list of GameMode objects (both defaults and any added).
|
25,674 |
def multi_subplots_time(DataArray, SubSampleN=1, units=, xlim=None, ylim=None, LabelArray=[], show_fig=True):
unit_prefix = units[:-1]
NumDataSets = len(DataArray)
if LabelArray == []:
LabelArray = ["DataSet {}".format(i)
for i in _np.arange(0, len(DataArray), 1)]
fig, axs = _plt.subplots(NumDataSets, 1)
for i, data in enumerate(DataArray):
axs[i].plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN],
alpha=0.8, label=LabelArray[i])
axs[i].set_xlabel("time ({})".format(units))
axs[i].grid(which="major")
axs[i].legend(loc="best")
axs[i].set_ylabel("voltage (V)")
if xlim != None:
axs[i].set_xlim(xlim)
if ylim != None:
axs[i].set_ylim(ylim)
if show_fig == True:
_plt.show()
return fig, axs
|
plot the time trace on multiple axes
Parameters
----------
DataArray : array-like
array of DataObject instances for which to plot the PSDs
SubSampleN : int, optional
Number of intervals between points to remove (to sub-sample data so
that you effectively have lower sample rate to make plotting easier
and quicker.
xlim : array-like, optional
2 element array specifying the lower and upper x limit for which to
plot the time signal
LabelArray : array-like, optional
array of labels for each data-set to be plotted
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
axs : list of matplotlib.axes.Axes objects
The list of axes object created
|
25,675 |
def logout(self):
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
|
logout func (quit browser)
|
25,676 |
def auth_user(self, username, password):
password_hash = hashlib.sha512(password.encode("utf-8")).hexdigest()
user = self._database.users.find_one(
{"username": username, "password": password_hash, "activate": {"$exists": False}})
return user if user is not None and self.connect_user(username, user["realname"], user["email"], user["language"]) else None
|
Authenticate the user in database
:param username: Username/Login
:param password: User password
:return: Returns a dict represrnting the user
|
25,677 |
def make_source_mask(data, snr, npixels, mask=None, mask_value=None,
filter_fwhm=None, filter_size=3, filter_kernel=None,
sigclip_sigma=3.0, sigclip_iters=5, dilate_size=11):
from scipy import ndimage
threshold = detect_threshold(data, snr, background=None, error=None,
mask=mask, mask_value=None,
sigclip_sigma=sigclip_sigma,
sigclip_iters=sigclip_iters)
kernel = None
if filter_kernel is not None:
kernel = filter_kernel
if filter_fwhm is not None:
sigma = filter_fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=filter_size,
y_size=filter_size)
if kernel is not None:
kernel.normalize()
segm = detect_sources(data, threshold, npixels, filter_kernel=kernel)
selem = np.ones((dilate_size, dilate_size))
return ndimage.binary_dilation(segm.data.astype(np.bool), selem)
|
Make a source mask using source segmentation and binary dilation.
Parameters
----------
data : array_like
The 2D array of the image.
snr : float
The signal-to-noise ratio per pixel above the ``background`` for
which to consider a pixel as possibly being part of a source.
npixels : int
The number of connected pixels, each greater than ``threshold``,
that an object must have to be detected. ``npixels`` must be a
positive integer.
mask : array_like, bool, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when computing the image background
statistics.
mask_value : float, optional
An image data value (e.g., ``0.0``) that is ignored when
computing the image background statistics. ``mask_value`` will
be ignored if ``mask`` is input.
filter_fwhm : float, optional
The full-width at half-maximum (FWHM) of the Gaussian kernel to
filter the image before thresholding. ``filter_fwhm`` and
``filter_size`` are ignored if ``filter_kernel`` is defined.
filter_size : float, optional
The size of the square Gaussian kernel image. Used only if
``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size``
are ignored if ``filter_kernel`` is defined.
filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional
The 2D array of the kernel used to filter the image before
thresholding. Filtering the image will smooth the noise and
maximize detectability of objects with a shape similar to the
kernel. ``filter_kernel`` overrides ``filter_fwhm`` and
``filter_size``.
sigclip_sigma : float, optional
The number of standard deviations to use as the clipping limit
when calculating the image background statistics.
sigclip_iters : int, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the last
iteration clips nothing) when calculating the image background
statistics.
dilate_size : int, optional
The size of the square array used to dilate the segmentation
image.
Returns
-------
mask : 2D `~numpy.ndarray`, bool
A 2D boolean image containing the source mask.
|
25,678 |
def start_drag(self, sprite, cursor_x = None, cursor_y = None):
cursor_x, cursor_y = cursor_x or sprite.x, cursor_y or sprite.y
self._mouse_down_sprite = self._drag_sprite = sprite
sprite.drag_x, sprite.drag_y = self._drag_sprite.x, self._drag_sprite.y
self.__drag_start_x, self.__drag_start_y = cursor_x, cursor_y
self.__drag_started = True
|
start dragging given sprite
|
25,679 |
def stellar_luminosity2(self, steps=10000):
msg = ": ADW 2017-09-20"%self.__class__.__name__
DeprecationWarning(msg)
mass_init, mass_pdf, mass_act, mag_1, mag_2 = self.sample(mass_steps=steps)
luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False)
luminosity = luminosity_interpolation(mass_init)
return np.sum(luminosity * mass_pdf)
|
DEPRECATED: ADW 2017-09-20
Compute the stellar luminosity (L_Sol; average per star).
Uses "sample" to generate mass sample and pdf. The range of
integration only covers the input isochrone data (no
extrapolation used), but this seems like a sub-percent effect
if the isochrone goes to 0.15 Msun for the old and metal-poor
stellar populations of interest.
Note that the stellar luminosity is very sensitive to the
post-AGB population.
|
25,680 |
def design(self, max_stimuli=-1, max_inhibitors=-1, max_experiments=10, relax=False, configure=None):
self.designs = []
args = [ % max_stimuli, % max_inhibitors, ]
clingo = gringo.Control(args)
clingo.conf.solve.opt_mode =
if configure is not None:
configure(clingo.conf)
clingo.add("base", [], self.instance)
clingo.load(self.encodings[])
clingo.ground([("base", [])])
if relax:
parts = [("step", [step]) for step in xrange(1, max_experiments+1)]
parts.append(("diff", [max_experiments + 1]))
clingo.ground(parts)
ret = clingo.solve(on_model=self.__save__)
else:
step, ret = 0, gringo.SolveResult.UNKNOWN
while step <= max_experiments and ret != gringo.SolveResult.SAT:
parts = []
parts.append(("check", [step]))
if step > 0:
clingo.release_external(gringo.Fun("query", [step-1]))
parts.append(("step", [step]))
clingo.cleanup_domains()
clingo.ground(parts)
clingo.assign_external(gringo.Fun("query", [step]), True)
ret, step = clingo.solve(on_model=self.__save__), step + 1
self.stats[] = clingo.stats[]
self.stats[] = clingo.stats[]
self._logger.info("%s optimal experimental designs found in %.4fs", len(self.designs), self.stats[])
|
Finds all optimal experimental designs using up to :attr:`max_experiments` experiments, such that each experiment has
up to :attr:`max_stimuli` stimuli and :attr:`max_inhibitors` inhibitors. Each optimal experimental design is appended in the
attribute :attr:`designs` as an instance of :class:`caspo.core.clamping.ClampingList`.
Example::
>>> from caspo import core, design
>>> networks = core.LogicalNetworkList.from_csv('behaviors.csv')
>>> setup = core.Setup.from_json('setup.json')
>>> designer = design.Designer(networks, setup)
>>> designer.design(3, 2)
>>> for i,d in enumerate(designer.designs):
... f = 'design-%s' % i
... d.to_csv(f, stimuli=self.setup.stimuli, inhibitors=self.setup.inhibitors)
Parameters
----------
max_stimuli : int
Maximum number of stimuli per experiment
max_inhibitors : int
Maximum number of inhibitors per experiment
max_experiments : int
Maximum number of experiments per design
relax : boolean
Whether to relax the full-pairwise networks discrimination (True) or not (False).
If relax equals True, the number of experiments per design is fixed to :attr:`max_experiments`
configure : callable
Callable object responsible of setting clingo configuration
|
25,681 |
def getAsKmlGridAnimation(self, tableName, timeStampedRasters=[], rasterIdFieldName=, rasterFieldName=,
documentName=, alpha=1.0, noDataValue=0, discreet=False):
if not (alpha >= 0 and alpha <= 1.0):
raise ValueError("RASTER CONVERSION ERROR: alpha must be between 0.0 and 1.0.")
rasterIds = []
for timeStampedRaster in timeStampedRasters:
if not in timeStampedRaster:
raise ValueError()
elif not in timeStampedRaster:
raise ValueError()
rasterIds.append(str(timeStampedRaster[]))
minValue, maxValue = self.getMinMaxOfRasters(session=self._session,
table=tableName,
rasterIds=rasterIds,
rasterIdField=rasterIdFieldName,
rasterField=rasterFieldName,
noDataValue=noDataValue)
mappedColorRamp = ColorRampGenerator.mapColorRampToValues(colorRamp=self._colorRamp,
minValue=minValue,
maxValue=maxValue,
alpha=alpha)
deltaTime = None
time1 = timeStampedRasters[0][]
if len(timeStampedRasters) >= 2:
time2 = timeStampedRasters[1][]
deltaTime = time2 - time1
kml = ET.Element(, xmlns=)
document = ET.SubElement(kml, )
docName = ET.SubElement(document, )
docName.text = documentName
if not discreet:
document.append(ET.fromstring(mappedColorRamp.getColorMapAsContinuousSLD()))
else:
values = []
document.append(ET.fromstring(mappedColorRamp.getColorMapAsDiscreetSLD(values)))
style = ET.SubElement(document, , id=)
listStyle = ET.SubElement(style, )
listItemType = ET.SubElement(listStyle, )
listItemType.text =
styleUrl = ET.SubElement(document, )
styleUrl.text =
uniqueValues = []
for timeStampedRaster in timeStampedRasters:
rasterId = timeStampedRaster[]
if deltaTime:
dateTime = timeStampedRaster[]
prevDateTime = dateTime - deltaTime
statement = .format(rasterFieldName, tableName, rasterIdFieldName, rasterId)
result = self._session.execute(statement)
groupValue = -9999999.0
for row in result:
if row.val:
value = float(row.val)
else:
value = None
polygonString = row.polygon
i = int(row.x)
j = int(row.y)
if value:
if value not in uniqueValues:
uniqueValues.append(value)
if value != groupValue:
placemark = ET.SubElement(document, )
placemarkName = ET.SubElement(placemark, )
placemarkName.text = str(value)
style = ET.SubElement(placemark, )
lineStyle = ET.SubElement(style, )
lineColor = ET.SubElement(lineStyle, )
lineColor.text = self.LINE_COLOR
lineWidth = ET.SubElement(lineStyle, )
lineWidth.text = str(self.LINE_WIDTH)
polyStyle = ET.SubElement(style, )
polyColor = ET.SubElement(polyStyle, )
integerAlpha = mappedColorRamp.getAlphaAsInteger()
integerRGB = mappedColorRamp.getColorForValue(value)
hexABGR = % (integerAlpha,
integerRGB[mappedColorRamp.B],
integerRGB[mappedColorRamp.G],
integerRGB[mappedColorRamp.R])
polyColor.text = hexABGR
if deltaTime:
timeSpan = ET.SubElement(placemark, )
begin = ET.SubElement(timeSpan, )
begin.text = prevDateTime.strftime()
end = ET.SubElement(timeSpan, )
end.text = dateTime.strftime()
multigeometry = ET.SubElement(placemark, )
extendedData = ET.SubElement(placemark, )
valueData = ET.SubElement(extendedData, , name=)
valueValue = ET.SubElement(valueData, )
valueValue.text = str(value)
iData = ET.SubElement(extendedData, , name=)
valueI = ET.SubElement(iData, )
valueI.text = str(i)
jData = ET.SubElement(extendedData, , name=)
valueJ = ET.SubElement(jData, )
valueJ.text = str(j)
if deltaTime:
tData = ET.SubElement(extendedData, , name=)
valueT = ET.SubElement(tData, )
valueT.text = dateTime.strftime()
groupValue = value
polygon = ET.fromstring(polygonString)
multigeometry.append(polygon)
if not discreet:
document.append(ET.fromstring(mappedColorRamp.getColorMapAsContinuousSLD()))
else:
uniqueValues.sort()
document.append(ET.fromstring(mappedColorRamp.getColorMapAsDiscreetSLD(uniqueValues)))
return ET.tostring(kml)
|
Return a sequence of rasters with timestamps as a kml with time markers for animation.
:param tableName: Name of the table to extract rasters from
:param timeStampedRasters: List of dictionaries with keys: rasterId, dateTime
rasterId = a unique integer identifier used to locate the raster (usually value of primary key column)
dateTime = a datetime object representing the time the raster occurs
e.g:
timeStampedRasters = [{ 'rasterId': 1, 'dateTime': datetime(1970, 1, 1)},
{ 'rasterId': 2, 'dateTime': datetime(1970, 1, 2)},
{ 'rasterId': 3, 'dateTime': datetime(1970, 1, 3)}]
:param rasterIdFieldName: Name of the id field for rasters (usually the primary key field)
:param rasterFieldName: Name of the field where rasters are stored (of type raster)
:param documentName: The name to give to the KML document (will be listed in legend under this name)
:param alpha: The transparency to apply to each raster cell
:param noDataValue: The value to be used as the no data value (default is 0)
:rtype : string
|
25,682 |
def _evolve(self, state, qargs=None):
if qargs is not None:
return SuperOp(self)._evolve(state, qargs)
state = self._format_state(state)
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is not equal to state dimension."
)
if state.ndim == 1 and self._data[1] is None and len(
self._data[0]) == 1:
return np.dot(self._data[0][0], state)
state = self._format_state(state, density_matrix=True)
kraus_l, kraus_r = self._data
if kraus_r is None:
kraus_r = kraus_l
return np.einsum(, kraus_l, state,
np.conjugate(kraus_r))
|
Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
|
25,683 |
def Run(self):
if data_store.RelationalDBEnabled():
data_store.REL_DB.RegisterMessageHandler(
self._ProcessMessageHandlerRequests,
self.well_known_flow_lease_time,
limit=100)
data_store.REL_DB.RegisterFlowProcessingHandler(self.ProcessFlow)
try:
while 1:
processed = self.RunOnce()
if processed == 0:
if time.time() - self.last_active > self.SHORT_POLL_TIME:
interval = self.POLLING_INTERVAL
else:
interval = self.SHORT_POLLING_INTERVAL
time.sleep(interval)
else:
self.last_active = time.time()
except KeyboardInterrupt:
logging.info("Caught interrupt, exiting.")
self.thread_pool.Join()
|
Event loop.
|
25,684 |
def update_headers(self, headers: Optional[LooseHeaders]) -> None:
self.headers = CIMultiDict()
netloc = cast(str, self.url.raw_host)
if helpers.is_ipv6_address(netloc):
netloc = .format(netloc)
if not self.url.is_default_port():
netloc += + str(self.url.port)
self.headers[hdrs.HOST] = netloc
if headers:
if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
headers = headers.items()
for key, value in headers:
if key.lower() == :
self.headers[key] = value
else:
self.headers.add(key, value)
|
Update request headers.
|
25,685 |
def get_bucket_website_config(self, bucket):
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_website_config)
return d
|
Get the website configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's website
configuration.
|
25,686 |
def modify(self, modification, parameters):
if modification not in self.MODIFICATIONS:
raise ValueError( %
(modification, type(self).__name__))
meth = getattr(self, % modification)
meth(**parameters)
self.check_constraints()
|
Apply a single modification to an MFD parameters.
Reflects the modification method and calls it passing ``parameters``
as keyword arguments. See also :attr:`MODIFICATIONS`.
Modifications can be applied one on top of another. The logic
of stacking modifications is up to a specific MFD implementation.
:param modification:
String name representing the type of modification.
:param parameters:
Dictionary of parameters needed for modification.
:raises ValueError:
If ``modification`` is missing from :attr:`MODIFICATIONS`.
|
25,687 |
def show_lbaas_pool(self, lbaas_pool, **_params):
return self.get(self.lbaas_pool_path % (lbaas_pool),
params=_params)
|
Fetches information for a lbaas_pool.
|
25,688 |
def tagsInString_process(self, d_DICOM, astr, *args, **kwargs):
b_tagsFound = False
str_replace =
l_tags = []
l_tagsToSub = []
l_funcTag = []
l_args = []
func =
tag =
chars =
if in astr:
l_tags = astr.split()[1:]
l_tagsToSub = [i for i in d_DICOM[] if any(i in b for b in l_tags)]
l_tagsToSubSort = sorted(
l_tagsToSub,
key = lambda x: [i for i, s in enumerate(l_tags) if x in s][0]
)
for tag, func in zip(l_tagsToSubSort, l_tags):
b_tagsFound = True
str_replace = d_DICOM[][tag]
if in func:
str_replace = hashlib.md5(str_replace.encode()).hexdigest()
l_funcTag = func.split()[1:]
func = l_funcTag[0]
l_args = func.split()
if len(l_args) > 1:
chars = l_args[1]
str_replace = str_replace[0:int(chars)]
astr = astr.replace( % func, )
if in func:
l_funcTag = func.split()[1:]
func = l_funcTag[0]
str_msk = func.split()[1]
l_n = []
for i, j in zip(list(str_replace), list(str_msk)):
if j == : l_n.append(i)
else: l_n.append(j)
str_replace = .join(l_n)
astr = astr.replace( % func, )
if in func:
l_funcTag = func.split()[1:]
func = l_funcTag[0]
l_args = func.split()
str_char =
if len(l_args) > 1:
str_char = l_args[1]
str_replace = re.sub(r, , str_replace)
str_replace = str_char.join(str_replace.split())
astr = astr.replace( % func, )
astr = astr.replace( + tag, str_replace)
return {
: True,
: b_tagsFound,
: astr
}
|
This method substitutes DICOM tags that are '%'-tagged
in a string template with the actual tag lookup.
For example, an output filename that is specified as the
following string:
%PatientAge-%PatientID-output.txt
will be parsed to
006Y-4412364-ouptut.txt
It is also possible to apply certain permutations/functions
to a tag. For example, a function is identified by an underscore
prefixed and suffixed string as part of the DICOM tag. If
found, this function is applied to the tag value. For example,
%PatientAge-%_md5|4_PatientID-output.txt
will apply an md5 hash to the PatientID and use the first 4
characters:
006Y-7f38-output.txt
|
25,689 |
def add_clause(self, clause):
if clause.query_loc == MongoClause.LOC_MAIN:
self._main.append(clause)
elif clause.query_loc == MongoClause.LOC_MAIN2:
self._main2.append(clause)
elif clause.query_loc == MongoClause.LOC_WHERE:
self._where.append(clause)
else:
raise RuntimeError(.format(clause.query_loc))
|
Add a new clause to the existing query.
:param clause: The clause to add
:type clause: MongoClause
:return: None
|
25,690 |
def inspect_file(self, commit, path):
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
|
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
|
25,691 |
def create_marginalized_hist(ax, values, label, percentiles=None,
color=, fillcolor=, linecolor=,
linestyle=,
title=True, expected_value=None,
expected_color=, rotated=False,
plot_min=None, plot_max=None):
if fillcolor is None:
htype =
else:
htype =
if rotated:
orientation =
else:
orientation =
ax.hist(values, bins=50, histtype=htype, orientation=orientation,
facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2,
density=True)
if percentiles is None:
percentiles = [5., 50., 95.]
if len(percentiles) > 0:
plotp = numpy.percentile(values, percentiles)
else:
plotp = []
for val in plotp:
if rotated:
ax.axhline(y=val, ls=, color=linecolor, lw=2, zorder=3)
else:
ax.axvline(x=val, ls=, color=linecolor, lw=2, zorder=3)
if expected_value is not None:
if rotated:
ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2)
else:
ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2)
if title:
if len(percentiles) > 0:
minp = min(percentiles)
maxp = max(percentiles)
medp = (maxp + minp) / 2.
else:
minp = 5
medp = 50
maxp = 95
values_min = numpy.percentile(values, minp)
values_med = numpy.percentile(values, medp)
values_max = numpy.percentile(values, maxp)
negerror = values_med - values_min
poserror = values_max - values_med
fmt = .format(str_utils.format_value(
values_med, negerror, plus_error=poserror))
if rotated:
ax.yaxis.set_label_position("right")
set_marginal_histogram_title(ax, fmt, color,
label=label, rotated=rotated)
ax.set_xticks([])
ax.set_xlabel()
ymin, ymax = ax.get_ylim()
if plot_min is not None:
ymin = plot_min
if plot_max is not None:
ymax = plot_max
ax.set_ylim(ymin, ymax)
else:
set_marginal_histogram_title(ax, fmt, color, label=label)
ax.set_yticks([])
ax.set_ylabel()
xmin, xmax = ax.get_xlim()
if plot_min is not None:
xmin = plot_min
if plot_max is not None:
xmax = plot_max
ax.set_xlim(xmin, xmax)
|
Plots a 1D marginalized histogram of the given param from the given
samples.
Parameters
----------
ax : pyplot.Axes
The axes on which to draw the plot.
values : array
The parameter values to plot.
label : str
A label to use for the title.
percentiles : {None, float or array}
What percentiles to draw lines at. If None, will draw lines at
`[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the
median).
color : {'k', string}
What color to make the histogram; default is black.
fillcolor : {'gray', string, or None}
What color to fill the histogram with. Set to None to not fill the
histogram. Default is 'gray'.
linestyle : str, optional
What line style to use for the histogram. Default is '-'.
linecolor : {'navy', string}
What color to use for the percentile lines. Default is 'navy'.
title : bool, optional
Add a title with a estimated value +/- uncertainty. The estimated value
is the pecentile halfway between the max/min of ``percentiles``, while
the uncertainty is given by the max/min of the ``percentiles``. If no
percentiles are specified, defaults to quoting the median +/- 95/5
percentiles.
rotated : {False, bool}
Plot the histogram on the y-axis instead of the x. Default is False.
plot_min : {None, float}
The minimum value to plot. If None, will default to whatever `pyplot`
creates.
plot_max : {None, float}
The maximum value to plot. If None, will default to whatever `pyplot`
creates.
scalefac : {1., float}
Factor to scale the default font sizes by. Default is 1 (no scaling).
|
25,692 |
def _calculate_num_queries(self):
request_totals = self._totals("request")
response_totals = self._totals("response")
return request_totals[2] + response_totals[2]
|
Calculate the total number of request and response queries.
Used for count header and count table.
|
25,693 |
def admin_url_params(request, params=None):
params = params or {}
if popup_status(request):
params[IS_POPUP_VAR] =
pick_type = popup_pick_type(request)
if pick_type:
params[] = pick_type
return params
|
given a request, looks at GET and POST values to determine which params
should be added. Is used to keep the context of popup and picker mode.
|
25,694 |
async def on_raw_762(self, message):
if not self._metadata_queue:
return
nickname = self._metadata_queue.pop()
future = self._pending[].pop(nickname)
future.set_result(self._metadata_info.pop(nickname))
|
End of metadata.
|
25,695 |
def config_generator(search_space, max_search, rng, shuffle=True):
def dict_product(d):
keys = d.keys()
for element in product(*d.values()):
yield dict(zip(keys, element))
def range_param_func(v):
scale = v.get("scale", "linear")
mini = min(v["range"])
maxi = max(v["range"])
if scale == "linear":
func = lambda rand: mini + (maxi - mini) * rand
elif scale == "log":
mini = np.log(mini)
maxi = np.log(maxi)
func = lambda rand: np.exp(mini + (maxi - mini) * rand)
else:
raise ValueError(f"Unrecognized scale for " "parameter {k}")
return func
discretes = {}
ranges = {}
for k, v in search_space.items():
if isinstance(v, dict):
ranges[k] = range_param_func(v)
elif isinstance(v, list):
discretes[k] = v
else:
discretes[k] = [v]
discrete_configs = list(dict_product(discretes))
if shuffle:
rng.shuffle(discrete_configs)
if ranges and max_search:
discrete_configs = cycle(discrete_configs)
for i, config in enumerate(discrete_configs):
config = config.copy()
if max_search and i == max_search:
break
for k, v in ranges.items():
config[k] = float(v(rng.random()))
yield config
|
Generates config dicts from the given search space
Args:
search_space: (dict) A dictionary of parameters to search over.
See note below for more details.
max_search: (int) The maximum number of configurations to search.
If max_search is None, do a full grid search of all discrete
parameters, filling in range parameters as needed.
Otherwise, do a full grid search of all discrete
parameters and then cycle through again filling in new
range parameters values; if there are no range parameters,
stop after yielding the full cross product of parameters
once.
shuffle: (bool) If True, shuffle the order of generated configs
Yields:
configs: each config is a dict of parameter values based on the
provided search space
The search_space dictionary may consist of two types of parameters:
--discrete: a discrete parameter is either a single value or a
list of values. Use single values, for example, to override
a default model parameter or set a flag such as 'verbose'=True.
--range: a range parameter is a dict of the form:
{'range': [<min>, <max>], 'scale': <scale>}
where <min> and <max> are the min/max values to search between
and scale is one of ['linear', 'log'] (defaulting to 'linear')
representing the scale to use when searching the given range
Example:
search_space = {
'verbose': True, # discrete
'n_epochs': 100, # discrete
'momentum': [0.0, 0.9, 0.99], # discrete
'l2': {'range': [0.0001, 10]} # linear range
'lr': {'range': [0.001, 1], 'scale': 'log'}, # log range
}
If max_search is None, this will return 3 configurations (enough to
just cover the full cross-product of discrete values, filled
in with sampled range values)
Otherewise, this will return max_search configurations
(cycling through the discrete value combinations multiple
time if necessary)
|
25,696 |
def message_loop(self, t_q, r_q):
t_msg = {}
while t_msg.get("state", "") != "__DIE__":
try:
t_msg = t_q.get(True, self.cycle_sleep)
self.task = t_msg.get("task", "")
if self.task != "":
self.task.task_start = time.time()
self.r_q_send(
{"w_id": self.w_id, "task": self.task, "state": "__ACK__"}
)
self.cycle_sleep = self.task.worker_loop_delay
self.task.result = self.task.run()
self.task.task_stop = time.time()
self.r_q_send(
{"w_id": self.w_id, "task": self.task, "state": "__FINISHED__"}
)
self.task = None
except Empty:
pass
except Full:
time.sleep(0.1)
except:
if self.task is not None:
self.task.task_stop = time.time()
tb_str = "".join(tb.format_exception(*(sys.exc_info())))
self.r_q_send(
{
"w_id": self.w_id,
"task": self.task,
"error": tb_str,
"state": "__ERROR__",
}
)
return
|
Loop through messages and execute tasks
|
25,697 |
def _interpolate_doy_calendar(source, doy_max):
if not in source.coords.keys():
raise AttributeError("source should have dayofyear coordinates.")
doy_max_source = source.dayofyear.max()
tmp = source.interpolate_na(dim=)
tmp.coords[] = np.linspace(start=1, stop=doy_max, num=doy_max_source)
return tmp.interp(dayofyear=range(1, doy_max + 1))
|
Interpolate from one set of dayofyear range to another
Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1
to 365).
Parameters
----------
source : xarray.DataArray
Array with `dayofyear` coordinates.
doy_max : int
Largest day of the year allowed by calendar.
Returns
-------
xarray.DataArray
Interpolated source array over coordinates spanning the target `dayofyear` range.
|
25,698 |
def get_discovery_doc(self, services, hostname=None):
if not isinstance(services, (tuple, list)):
services = [services]
util.check_list_type(services, remote._ServiceClass, ,
allow_none=False)
return self.__discovery_doc_descriptor(services, hostname=hostname)
|
JSON dict description of a protorpc.remote.Service in discovery format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
dict, The discovery document as a JSON dict.
|
25,699 |
def deepest_common_ancestor(goterms, godag):
return max(common_parent_go_ids(goterms, godag), key=lambda t: godag[t].depth)
|
This function gets the nearest common ancestor
using the above function.
Only returns single most specific - assumes unique exists.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.