Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,200 | def get_url_from_model_core(request, obj):
from is_core.site import get_model_core
model_core = get_model_core(obj.__class__)
if model_core and hasattr(model_core, ):
edit_pattern = model_core.ui_patterns.get()
return (
edit_pattern.get_url_string(request, obj=obj)
if edit_pattern and edit_pattern.has_permission(, request, obj=obj) else None
)
else:
return None | Returns object URL from model core. |
4,201 | def from_code(cls, co):
co_code = co.co_code
labels = dict((addr, Label()) for addr in findlabels(co_code))
linestarts = dict(cls._findlinestarts(co))
cellfree = co.co_cellvars + co.co_freevars
code = CodeList()
n = len(co_code)
i = 0
extended_arg = 0
while i < n:
op = Opcode(ord(co_code[i]))
if i in labels:
code.append((labels[i], None))
if i in linestarts:
code.append((SetLineno, linestarts[i]))
i += 1
if op in hascode:
lastop, lastarg = code[-1]
if lastop != LOAD_CONST:
raise ValueError(
"%s should be preceded by LOAD_CONST code" % op)
code[-1] = (LOAD_CONST, Code.from_code(lastarg))
if op not in hasarg:
code.append((op, None))
else:
arg = ord(co_code[i]) + ord(co_code[i+1])*256 + extended_arg
extended_arg = 0
i += 2
if op == opcode.EXTENDED_ARG:
extended_arg = arg << 16
elif op in hasconst:
code.append((op, co.co_consts[arg]))
elif op in hasname:
code.append((op, co.co_names[arg]))
elif op in hasjabs:
code.append((op, labels[arg]))
elif op in hasjrel:
code.append((op, labels[i + arg]))
elif op in haslocal:
code.append((op, co.co_varnames[arg]))
elif op in hascompare:
code.append((op, cmp_op[arg]))
elif op in hasfree:
code.append((op, cellfree[arg]))
else:
code.append((op, arg))
varargs = bool(co.co_flags & CO_VARARGS)
varkwargs = bool(co.co_flags & CO_VARKEYWORDS)
newlocals = bool(co.co_flags & CO_NEWLOCALS)
args = co.co_varnames[:co.co_argcount + varargs + varkwargs]
if co.co_consts and isinstance(co.co_consts[0], basestring):
docstring = co.co_consts[0]
else:
docstring = None
return cls(code = code,
freevars = co.co_freevars,
args = args,
varargs = varargs,
varkwargs = varkwargs,
newlocals = newlocals,
name = co.co_name,
filename = co.co_filename,
firstlineno = co.co_firstlineno,
docstring = docstring,
) | Disassemble a Python code object into a Code object. |
4,202 | def save_photon_hdf5(self, identity=None, overwrite=True, path=None):
filepath = self.filepath
if path is not None:
filepath = Path(path, filepath.name)
self.merge_da()
data = self._make_photon_hdf5(identity=identity)
phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath),
overwrite=overwrite) | Create a smFRET Photon-HDF5 file with current timestamps. |
4,203 | def quote_datetime(self, value):
if value:
if isinstance(value, type_check):
self._quote_datetime = parse(value)
elif isinstance(value, datetime.datetime):
self._quote_datetime = value | Force the quote_datetime to always be a datetime
:param value:
:return: |
4,204 | def parse_ndxlist(output):
m = NDXLIST.search(output)
grouplist = m.group()
return parse_groups(grouplist) | Parse output from make_ndx to build list of index groups::
groups = parse_ndxlist(output)
output should be the standard output from ``make_ndx``, e.g.::
rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True)
(or simply use
rc,output,junk = cbook.make_ndx_captured(...)
which presets input, stdout and stderr; of course input can be overriden.)
:Returns:
The function returns a list of dicts (``groups``) with fields
name
name of the groups
nr
number of the group (starts at 0)
natoms
number of atoms in the group |
4,205 | def getBucketIndices(self, input):
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None] * len(self.encoders)
else:
assert isinstance(input, datetime.datetime)
scalars = self.getScalars(input)
result = []
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
result.extend(encoder.getBucketIndices(scalars[i]))
return result | See method description in base.py |
4,206 | def contents(self, from_date=DEFAULT_DATETIME,
offset=None, max_contents=MAX_CONTENTS):
resource = self.RCONTENTS + + self.MSEARCH
date = from_date.strftime("%Y-%m-%d %H:%M")
cql = self.VCQL % {: date}
params = {
self.PCQL: cql,
self.PLIMIT: max_contents,
self.PEXPAND: self.PANCESTORS
}
if offset:
params[self.PSTART] = offset
for response in self._call(resource, params):
yield response | Get the contents of a repository.
This method returns an iterator that manages the pagination
over contents. Take into account that the seconds of `from_date`
parameter will be ignored because the API only works with
hours and minutes.
:param from_date: fetch the contents updated since this date
:param offset: fetch the contents starting from this offset
:param limit: maximum number of contents to fetch per request |
4,207 | def apply_backspaces_and_linefeeds(text):
orig_lines = text.split()
orig_lines_len = len(orig_lines)
new_lines = []
for orig_line_idx, orig_line in enumerate(orig_lines):
chars, cursor = [], 0
orig_line_len = len(orig_line)
for orig_char_idx, orig_char in enumerate(orig_line):
if orig_char == and (orig_char_idx != orig_line_len - 1 or
orig_line_idx != orig_lines_len - 1):
cursor = 0
elif orig_char == :
cursor = max(0, cursor - 1)
else:
if (orig_char == and
orig_char_idx == orig_line_len - 1 and
orig_line_idx == orig_lines_len - 1):
cursor = len(chars)
if cursor == len(chars):
chars.append(orig_char)
else:
chars[cursor] = orig_char
cursor += 1
new_lines.append(.join(chars))
return .join(new_lines) | Interpret backspaces and linefeeds in text like a terminal would.
Interpret text like a terminal by removing backspace and linefeed
characters and applying them line by line.
If final line ends with a carriage it keeps it to be concatenable with next
output chunk. |
4,208 | def set_color_zones(self, start_index, end_index, color, duration=0, apply=1, callb=None, rapid=False):
if len(color) == 4:
args = {
"start_index": start_index,
"end_index": end_index,
"color": color,
"duration": duration,
"apply": apply,
}
mypartial=partial(self.resp_set_multizonemultizone, args=args)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
if rapid:
self.fire_and_forget(MultiZoneSetColorZones, args, num_repeats=1)
mycallb(self, None)
else:
self.req_with_ack(MultiZoneSetColorZones, args, callb=mycallb) | Convenience method to set the colour status zone of the device
This method will send a MultiZoneSetColorZones message to the device, and request callb be executed
when an ACK is received. The default callback will simply cache the value.
:param start_index: Index of the start of the zone of interest
:type start_index: int
:param end_index: Index of the end of the zone of interest. By default start_index+7
:type end_index: int
:param apply: Indicates if the colour change is to be applied or memorized. Default: 1
:type apply: int
:param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin
:type value: dict
:param duration: The duration, in seconds, of the power state transition.
:type duration: int
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:param rapid: Whether to ask for ack (False) or not (True). Default False
:type rapid: bool
:returns: None
:rtype: None |
4,209 | def latencies(self):
return [(shard_id, shard.ws.latency) for shard_id, shard in self.shards.items()] | List[Tuple[:class:`int`, :class:`float`]]: A list of latencies between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This returns a list of tuples with elements ``(shard_id, latency)``. |
4,210 | def getargnames(argspecs, with_unbox=False):
args = argspecs.args
vargs = argspecs.varargs
try:
kw = argspecs.keywords
except AttributeError:
kw = argspecs.varkw
try:
kwonly = argspecs.kwonlyargs
except AttributeError:
kwonly = None
res = []
if not args is None:
res.extend(args)
if not vargs is None:
res.append(+vargs if with_unbox else vargs)
if not kwonly is None:
res.extend(kwonly)
if not kw is None:
res.append(+kw if with_unbox else kw)
return res | Resembles list of arg-names as would be seen in a function signature, including
var-args, var-keywords and keyword-only args. |
4,211 | def context(fname, node):
try:
yield node
except Exception:
etype, exc, tb = sys.exc_info()
msg = % (
striptag(node.tag), exc, getattr(node, , ), fname)
raise_(etype, msg, tb) | Context manager managing exceptions and adding line number of the
current node and name of the current file to the error message.
:param fname: the current file being processed
:param node: the current node being processed |
4,212 | def check_X_y(X, y):
if len(X) != len(y):
raise ValueError(\
.format(X.shape, y.shape)) | tool to ensure input and output data have the same number of samples
Parameters
----------
X : array-like
y : array-like
Returns
-------
None |
4,213 | def filterOverlappingAlignments(alignments):
l = []
alignments = alignments[:]
sortAlignments(alignments)
alignments.reverse()
for pA1 in alignments:
for pA2 in l:
if pA1.contig1 == pA2.contig1 and getPositiveCoordinateRangeOverlap(pA1.start1+1, pA1.end1, pA2.start1+1, pA2.end1) is not None:
break
if pA1.contig2 == pA2.contig2 and getPositiveCoordinateRangeOverlap(pA1.start2+1, pA1.end2, pA2.start2+1, pA2.end2) is not None:
break
if pA1.contig2 == pA2.contig1 and getPositiveCoordinateRangeOverlap(pA1.start2+1, pA1.end2, pA2.start1+1, pA2.end1) is not None:
break
if pA1.contig1 == pA2.contig2 and getPositiveCoordinateRangeOverlap(pA1.start1+1, pA1.end1, pA2.start2+1, pA2.end2) is not None:
break
else:
l.append(pA1)
l.reverse()
return l | Filter alignments to be non-overlapping. |
4,214 | def _gridmake2(x1, x2):
if x1.ndim == 1 and x2.ndim == 1:
return np.column_stack([np.tile(x1, x2.shape[0]),
np.repeat(x2, x1.shape[0])])
elif x1.ndim > 1 and x2.ndim == 1:
first = np.tile(x1, (x2.shape[0], 1))
second = np.repeat(x2, x1.shape[0])
return np.column_stack([first, second])
else:
raise NotImplementedError("Come back here") | Expands two vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
x1 : np.ndarray
First vector to be expanded.
x2 : np.ndarray
Second vector to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake2`` in CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002. |
4,215 | def ensure_path(path):
if isinstance(path, vistir.compat.Path):
return path
path = vistir.compat.Path(os.path.expandvars(path))
return path.absolute() | Given a path (either a string or a Path object), expand variables and return a Path object.
:param path: A string or a :class:`~pathlib.Path` object.
:type path: str or :class:`~pathlib.Path`
:return: A fully expanded Path object.
:rtype: :class:`~pathlib.Path` |
4,216 | def load(self, dump_fn=, prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None):
r = self.database_renderer(name=name, site=site)
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format(
"Database dump file {dump_fn} does not exist."
)
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
self.upload_snapshot(name=name, site=site, local_dump_fn=r.env.dump_fn, remote_dump_fn=r.env.remote_dump_fn)
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
if force_host:
r.env.db_host = force_host
with settings(warn_only=True):
r.sudo(, user=r.env.postgres_user)
r.sudo(, user=r.env.postgres_user)
with settings(warn_only=True):
if r.env.engine == POSTGIS:
r.sudo(,
user=r.env.postgres_user)
r.sudo(,
user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo(, user=r.env.postgres_user)
with settings(warn_only=True):
r.sudo(, user=r.env.postgres_user)
r.sudo(
{db_password}\
, user=r.env.postgres_user)
for createlang in r.env.createlangs:
r.env.createlang = createlang
r.sudo(, user=r.env.postgres_user)
if not prep_only:
with settings(warn_only=True):
r.sudo(r.env.load_command, user=r.env.postgres_user) | Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot. |
4,217 | def _query(self, *criterion):
return self.session.query(
self.model_class
).filter(
*criterion
) | Construct a query for the model. |
4,218 | def p_arguments(self, p):
if len(p) == 4:
p[0] = self.asttypes.Arguments(p[2])
else:
p[0] = self.asttypes.Arguments([])
p[0].setpos(p) | arguments : LPAREN RPAREN
| LPAREN argument_list RPAREN |
4,219 | def destroy(self, request, *args, **kwargs):
return super(PriceListItemViewSet, self).destroy(request, *args, **kwargs) | Run **DELETE** request against */api/price-list-items/<uuid>/* to delete price list item.
Only customer owner and staff can delete price items. |
4,220 | def update_all(self, criteria: Q, *args, **kwargs):
items = self._filter(criteria, self.conn[][self.schema_name])
update_count = 0
for key in items:
item = items[key]
item.update(*args)
item.update(kwargs)
self.conn[][self.schema_name][key] = item
update_count += 1
return update_count | Update all objects satisfying the criteria |
4,221 | def decode(self, descriptor):
i = iter(descriptor)
n = len(self._schema)
schema = self._schema + (,)
tuple_gen = (tuple(itertools.islice(i, n)) + (d, )
for d in self._dimensions)
return [{ k: v for k, v in zip(schema, t) } for t in tuple_gen] | Produce a list of dictionaries for each dimension in this transcoder |
4,222 | def write(self, process_tile, data):
if data is None or len(data) == 0:
return
if not isinstance(data, (list, types.GeneratorType)):
raise TypeError(
"GeoJSON driver data has to be a list or generator of GeoJSON objects"
)
data = list(data)
if not len(data):
logger.debug("no features to write")
else:
bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None
for tile in self.pyramid.intersecting(process_tile):
out_path = self.get_path(tile)
self.prepare_path(tile)
out_tile = BufferedTile(tile, self.pixelbuffer)
write_vector_window(
in_data=data,
out_schema=self.output_params["schema"],
out_tile=out_tile,
out_path=out_path,
bucket_resource=bucket_resource
) | Write data from process tiles into GeoJSON file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid`` |
4,223 | def get_comparable_values_for_ordering(self):
return (0 if self.position >= 0 else 1, int(self.position), str(self.name), str(self.description)) | Return a tupple of values representing the unicity of the object |
4,224 | def get_releasenotes(repo_path, from_commit=None, bugtracker_url=):
repo = dulwich.repo.Repo(repo_path)
tags = get_tags(repo)
refs = get_refs(repo)
maj_version = 0
feat_version = 0
fix_version = 0
start_including = False
release_notes_per_major = OrderedDict()
cur_line =
if from_commit is None:
start_including = True
prev_version = (maj_version, feat_version, fix_version)
prev_version_str = % prev_version
bugs = []
features = []
api_break_changes = []
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
maj_version, feat_version, fix_version = get_version(
commit=commit,
tags=tags,
maj_version=maj_version,
feat_version=feat_version,
fix_version=fix_version,
children=children,
)
version = (maj_version, feat_version, fix_version)
version_str = % version
if (
start_including or commit_sha.startswith(from_commit) or
fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))
):
start_including = True
parent_commit_type = get_commit_type(
commit=commit,
children=children,
tags=tags,
prev_version=prev_version,
)
cur_line = pretty_commit(
commit=commit,
version=version_str,
bugtracker_url=bugtracker_url,
commit_type=parent_commit_type,
)
for child in children:
commit_type = get_commit_type(
commit=commit,
tags=tags,
prev_version=prev_version,
)
cur_line += pretty_commit(
commit=child,
version=None,
commit_type=commit_type,
bugtracker_url=bugtracker_url,
)
if parent_commit_type == :
release_notes_per_major[prev_version_str] = (
api_break_changes,
features,
bugs,
)
bugs, features, api_break_changes = [], [], []
api_break_changes.append(cur_line)
elif parent_commit_type == :
features.append(cur_line)
else:
bugs.append(cur_line)
prev_version = version
prev_version_str = version_str
release_notes_per_major[prev_version_str] = (
api_break_changes,
features,
bugs,
)
releasenotes =
for major_version, lines in reversed(release_notes_per_major.items()):
api_break_changes, features, bugs = lines
releasenotes += u % (
major_version,
(
.join(reversed(api_break_changes)) or
),
.join(reversed(features)) or ,
.join(reversed(bugs)) or ,
)
return releasenotes.strip() | Given a repo and optionally a base revision to start from, will return
a text suitable for the relase notes announcement, grouping the bugs, the
features and the api-breaking changes.
Args:
repo_path(str): Path to the code git repository.
from_commit(str): Refspec of the commit to start aggregating the
authors from.
bugtracker_url(str): URL to be prepended to any bug ids found in the
commits.
Returns:
str: Release notes text. |
4,225 | def copyFile(src, dest):
try:
if os.path.isfile(src):
dpath, dfile = os.path.split(dest)
if not os.path.isdir(dpath):
os.makedirs(dpath)
if not os.path.exists(dest):
touch(dest)
try:
shutil.copy2(src, dest)
except shutil.Error as e:
logging.exception( % e)
| Copies a source file to a destination whose path may not yet exist.
Keyword arguments:
src -- Source path to a file (string)
dest -- Path for destination file (also a string) |
4,226 | def cmd_example(self, args):
if len(args) == 0:
print(self.usage())
elif args[0] == "status":
print(self.status())
elif args[0] == "set":
self.example_settings.command(args[1:])
else:
print(self.usage()) | control behaviour of the module |
4,227 | def __process_node(self, node: yaml.Node,
expected_type: Type) -> yaml.Node:
logger.info(.format(
node, expected_type))
recognized_types, message = self.__recognizer.recognize(
node, expected_type)
if len(recognized_types) != 1:
raise RecognitionError(message)
recognized_type = recognized_types[0]
logger.debug(.format(node))
if recognized_type in self._registered_classes.values():
node = self.__savorize(node, recognized_type)
logger.debug(.format(node))
logger.debug()
if is_generic_list(recognized_type):
if node.tag != :
raise RecognitionError(.format(
node.start_mark, os.linesep,
type_to_desc(expected_type)))
for item in node.value:
self.__process_node(item,
generic_type_args(recognized_type)[0])
elif is_generic_dict(recognized_type):
if node.tag != :
raise RecognitionError(.format(
node.start_mark, os.linesep,
type_to_desc(expected_type)))
for _, value_node in node.value:
self.__process_node(value_node,
generic_type_args(recognized_type)[1])
elif recognized_type in self._registered_classes.values():
if (not issubclass(recognized_type, enum.Enum)
and not issubclass(recognized_type, str)
and not issubclass(recognized_type, UserString)):
for attr_name, type_, _ in class_subobjects(recognized_type):
cnode = Node(node)
if cnode.has_attribute(attr_name):
subnode = cnode.get_attribute(attr_name)
new_subnode = self.__process_node(
subnode.yaml_node, type_)
cnode.set_attribute(attr_name, new_subnode)
else:
logger.debug(
)
node.tag = self.__type_to_tag(recognized_type)
logger.debug(.format(node))
return node | Processes a node.
This is the main function that implements yatiml's \
functionality. It figures out how to interpret this node \
(recognition), then applies syntactic sugar, and finally \
recurses to the subnodes, if any.
Args:
node: The node to process.
expected_type: The type we expect this node to be.
Returns:
The transformed node, or a transformed copy. |
4,228 | def astype(self, dtype):
if dtype not in _supported_dtypes:
raise ValueError( % (dtype, _supported_dtypes))
pixeltype = _npy_to_itk_map[dtype]
return self.clone(pixeltype) | Cast & clone an ANTsImage to a given numpy datatype.
Map:
uint8 : unsigned char
uint32 : unsigned int
float32 : float
float64 : double |
4,229 | def _parse(self):
try:
self.vars[] = self.json[]
except (KeyError, ValueError, TypeError):
pass
for v in [, ]:
try:
self.vars[v] = self.summarize_notices(self.json[v])
except (KeyError, ValueError, TypeError):
pass
try:
self.vars[] = self.summarize_links(self.json[])
except (KeyError, ValueError, TypeError):
pass
try:
self.vars[] = self.summarize_events(self.json[])
except (KeyError, ValueError, TypeError):
pass | The function for parsing the JSON response to the vars dictionary. |
4,230 | def to_schema(self):
if not self.name or not self.process:
raise ValueError("field is not registered with process")
schema = {
: self.name,
: self.get_field_type(),
}
if self.required is not None:
schema[] = self.required
if self.label is not None:
schema[] = self.label
if self.description is not None:
schema[] = self.description
if self.default is not None:
schema[] = self.default
if self.hidden is not None:
schema[] = self.hidden
if self.choices is not None:
for choice, label in self.choices:
schema.setdefault(, []).append({
: label,
: choice,
})
return schema | Return field schema for this field. |
4,231 | def retinotopy_comparison(arg1, arg2, arg3=None,
eccentricity_range=None, polar_angle_range=None, visual_area_mask=None,
weight=Ellipsis, weight_min=None, visual_area=Ellipsis,
method=, distance=, gold=None):
visualweightpolar_angle_1polar_angle_2eccentricity_1eccenticity_2x_1x_2y_1y_2z_1z_2radius_1radius_2polar_angle_erroreccentricity_errorcenter_errorcenter_errorradius_errorvisual_area_1visual_area_2visual_areavisual_arealhrvfrhlvfs labels are
used if there is a gold standard and the overlapping labels are used otherwise.
* visual_area_mask (default: None) specifies a list of visual areas included in the
calculation; this is applied to all datasets with a visual_area key; see the
columns above and the visual_area option. If None, then no visual areas are filtered;
otherwise, arguments should like (1,2,3), which would usually specify that areas V1, V2, and
V3, be included.
* gold (default: None) specifies which dataset should be considered the gold standard; this
should be either 1 or 2. If a gold-standard dataset is specified, then it is used in certain
calculations; for example, when scaling an error by eccentricity, the gold-standard
if arg3 is not None: (obj, dsets) = (arg1, [retinotopy_data(arg1, aa) for aa in (arg2,arg3)])
else: (obj, dsets) = (None, [arg1, arg2])
(gi,gold) = (None,False) if not gold else (gold-1,True)
wgt = np.array(wgt)
if weight_min is not None: wgt[wgt < weight_min] = 0
lbl = None if lbl is None else np.array(lbl)
if lbl is not None and visual_area_mask is not None:
if pimms.is_int(visual_area_mask): visual_area_mask = [visual_area_mask]
oomask = (0 == np.sum([lbl == va for va in visual_area_mask], axis=0))
wgt[oomask] = 0
lbl[oomask] = 0
if lbl is not None: result[] = lbl
if eccentricity_range is not None:
er = eccentricity_range
if pimms.is_real(er): er = (0,er)
if gold: wgt[(es[gi] < er[0]) | (es[gi] > er[1])] = 0
else: wgt[(es[0] < er[0]) | (es[0] > er[1]) | (es[1] < er[0]) | (es[1] > er[1])] = 0
if polar_angle_range is not None:
pr = polar_angle_range
if pimms.is_str(pr):
pr = pr.lower()
if pr in [, ]: pr = ( 0, 180)
elif pr in [, ]: pr = (-180, 0)
else: raise ValueError( % pr)
if gold: wgt[(ps[gi] < pr[0]) | (ps[gi] > pr[1])] = 0
else: wgt[(ps[0] < pr[0]) | (ps[0] > pr[1]) | (ps[1] < pr[0]) | (ps[1] > pr[1])] = 0
result[] = wgt * zinv(np.sum(wgt))
gsecc = es[gi] if gold else np.mean(es, axis=0)
gsang = ps[gi] if gold else np.mean(ps, axis=0)
gsrad = rs[gi] if gold else rs[0] if rs[1] is None else rs[1] if rs[0] is None else \
np.mean(rs, axis=0)
gsecc_inv = zinv(gsecc)
gsrad_inv = None if gsrad is None else zinv(gsrad)
for (tag,resprop) in [(, ), (, ),
(, ), (, ), (, )]:
serr = result[tag + ] - result[tag + ]
aerr = np.abs(serr)
result[resprop + ] = serr
result[resprop + ] = aerr
result[resprop + ] = aerr * gsecc_inv
if gsrad_inv is not None:
result[resprop + ] = aerr * gsrad_inv
return pimms.itable(result) | retinotopy_comparison(dataset1, dataset2) yields a pimms itable comparing the two retinotopy
datasets.
retinotopy_error(obj, dataset1, dataset2) is equivalent to retinotopy_comparison(x, y) where x
and y are retinotopy(obj, dataset1) and retinotopy_data(obj, dataset2).
The datasets may be specified in a number of ways, some of which may be incompatible with
certain options. The simplest way to specify a dataset is as a vector of complex numbers, which
are taken to represent positions in the visual field with (a + bi) corresponding to the
coordinate (a deg, b deg) in the visual field. Alternately, an n x 2 or 2 x n matrix will be
interpreted as (polar angle, eccentricity) coordinates, in terms of visual degrees (see the
as_retinotopy function: as_retinotopy(arg, 'visual') yields this input format). Alternately,
the datasets may be mappings such as those retuend by the retinotopy_data function; in this case
as_retinotopy is used to extract the visual coordinates (so they need not be specified in visual
coordinates specifically in this case). In this last case, additional properties such as the
variance explained and pRF size can be returned, making it valuable for more sophisticated error
methods or distance metrics.
The returned dataset will always have a row for each row in the two datasets (which must have
the same number of rows). However, many rows may have a weight of 0 even if no weights were
specified in the options; this is because other limitations may have been specified (such as
in the eccentricity_range or visual_areas). The returned dataset will always contain the
following columns:
* 'weight' gives the weight assigned to this particular vertex; the weights will always sum to
1 unless all vertices have 0 weight.
* 'polar_angle_1' and 'polar_angle_2', 'eccentricity_1', 'eccenticity_2', 'x_1', 'x_2', 'y_1',
'y_2', 'z_1', and 'z_2' all give the visual field coordinates in degrees; the z values give
complex numbers equivalent to the x/y values.
* 'radius_1' and 'radius_2' give the radii (sigma parameters) of the pRF gaussians.
* 'polar_angle_error', 'eccentricity_error', and 'center_error' all give the difference
between the visual field points in the two datasets; note that polar_angle_error in
particular is an error measure of rotations around the visual field and not of visual field
position. The 'center_error' is the distance between the centers of the visual field, in
degrees. The 'radius_error' value is also given.
* 'visual_area_1' and 'visual_area_2' specify the visual areas of the individual datasets; if
either of the datasets did not have a visual area, it will be omitted. Additionally, the
property 'visual_area' specifies the visual area suggested for use in analyses; this is
chosen based on the following: (1) if there is a gold standard dataset specified that has
a visual area, use it; (2) if only one of the datasets has a visual area, use it; (3) if
both have a visual area, then use the (varea1 == varea2) * varea1 (the areas that agree are
kept and all others are set to 0); (4) if neither has a visual area, then this property is
omitted. In all cases where a 'visual_area' property is included, those vertices that do not
overlap with the given visual_area_option option will be set to 0 along with the
corresponding weights.
* A variety of other lazily-calculated error metrics are included.
The following options are accepted:
* eccentricity_range (default: None) specifies the range of eccentricity to include in the
calculation (in degrees). This may be specified as emax or (emin, emax).
* polar_angle_range (default: None) specifies the range of polar angles to include in the
calculation. Like eccentricity range it may be specified as (amin, amax) but amax alone is
not allowed. Additionally the strings 'lh' and 'rvf' are equivalent to (0,180) and the
strings 'rh' and 'lvf' are equivalent to (-180,0).
* weight (default: Ellipsis) specifies the weights to be used in the calculation. This may be
None to specify that no weights should be used, or a property name or an array of weight
values. Alternately, it may be a tuple (w1, w2) of the weights for datasets 1 and 2. If the
argument is Ellipsis, then it will use weights if they are found in the retinotopy dataset;
both datasets may contain weights in which the product is used.
* weight_min (default: None) specifies the minimum weight a vertex must have to be included in
the calculation.
* visual_area (default: Ellipsis) specifies the visual area labels to be used in the
calculation. This may be None to specify that no labels should be used, or a property name
or an array of labels. Alternately, it may be a tuple (l1, l2) of the labels for datasets 1
and 2. If the argument is Ellipsis, then it will use labels if they are found in the
retinotopy dataset; both datasets may contain labels in which the gold standard's labels are
used if there is a gold standard and the overlapping labels are used otherwise.
* visual_area_mask (default: None) specifies a list of visual areas included in the
calculation; this is applied to all datasets with a visual_area key; see the 'visual_area'
columns above and the visual_area option. If None, then no visual areas are filtered;
otherwise, arguments should like (1,2,3), which would usually specify that areas V1, V2, and
V3, be included.
* gold (default: None) specifies which dataset should be considered the gold standard; this
should be either 1 or 2. If a gold-standard dataset is specified, then it is used in certain
calculations; for example, when scaling an error by eccentricity, the gold-standard's
eccentricity will be used unless there is no gold standard, in which case the mean of the
two values are used. |
4,232 | def get_neuroml_from_sonata(sonata_filename, id, generate_lems = True, format=):
from neuroml.hdf5.NetworkBuilder import NetworkBuilder
neuroml_handler = NetworkBuilder()
sr = SonataReader(filename=sonata_filename, id=id)
sr.parse(neuroml_handler)
nml_doc = neuroml_handler.get_nml_doc()
sr.add_neuroml_components(nml_doc)
if format == :
nml_file_name = %id
from neuroml.writers import NeuroMLWriter
NeuroMLWriter.write(nml_doc, nml_file_name)
elif format == :
nml_file_name = %id
from neuroml.writers import NeuroMLHdf5Writer
NeuroMLHdf5Writer.write(nml_doc, nml_file_name)
print_v(%nml_file_name)
if generate_lems:
lems_file_name = sr.generate_lems_file(nml_file_name, nml_doc)
return sr, lems_file_name, nml_file_name, nml_doc
return nml_doc | Return a NeuroMLDocument with (most of) the contents of the Sonata model |
4,233 | def attr_subresource(raml_resource, route_name):
static_parent = get_static_parent(raml_resource, method=)
if static_parent is None:
return False
schema = resource_schema(static_parent) or {}
properties = schema.get(, {})
if route_name in properties:
db_settings = properties[route_name].get(, {})
return db_settings.get() in (, )
return False | Determine if :raml_resource: is an attribute subresource.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
:param route_name: Name of the :raml_resource:. |
4,234 | def root_sync(args, l, config):
from requests.exceptions import ConnectionError
all_remote_names = [ r.short_name for r in l.remotes ]
if args.all:
remotes = all_remote_names
else:
remotes = args.refs
prt("Sync with {} remotes or bundles ".format(len(remotes)))
if not remotes:
return
for ref in remotes:
l.commit()
try:
if ref in all_remote_names:
l.checkin_remote_bundle(ref)
except NotFoundError as e:
warn(e)
continue
except ConnectionError as e:
warn(e)
continue | Sync with the remote. For more options, use library sync |
4,235 | def has_minimum_version(raises=True):
if get_version() < LooseVersion(TMUX_MIN_VERSION):
if raises:
raise exc.VersionTooLow(
% (TMUX_MIN_VERSION, get_version())
)
else:
return False
return True | Return if tmux meets version requirement. Version >1.8 or above.
Parameters
----------
raises : bool
raise exception if below minimum version requirement
Returns
-------
bool
True if tmux meets minimum required version.
Raises
------
libtmux.exc.VersionTooLow
tmux version below minimum required for libtmux
Notes
-----
.. versionchanged:: 0.7.0
No longer returns version, returns True or False
.. versionchanged:: 0.1.7
Versions will now remove trailing letters per `Issue 55`_.
.. _Issue 55: https://github.com/tmux-python/tmuxp/issues/55. |
4,236 | def fingerprint(self):
if self._fingerprint is None:
params = self[][]
key = self[].parsed
if self.algorithm == :
to_hash = % (
key[].native,
key[].native,
)
elif self.algorithm == :
public_key = self.public_key
to_hash = % (
params[].native,
params[].native,
params[].native,
public_key.native,
)
elif self.algorithm == :
public_key = key[].native
if public_key is None:
public_key = self.public_key.native
if params.name == :
to_hash = % params.chosen.native
to_hash = to_hash.encode()
to_hash += public_key
elif params.name == :
to_hash = public_key
elif params.name == :
to_hash = % params.chosen[][].native
to_hash = to_hash.encode()
to_hash += b + params.chosen[][].native
to_hash += b + params.chosen[][].native
to_hash += public_key
if isinstance(to_hash, str_cls):
to_hash = to_hash.encode()
self._fingerprint = hashlib.sha256(to_hash).digest()
return self._fingerprint | Creates a fingerprint that can be compared with a public key to see if
the two form a pair.
This fingerprint is not compatible with fingerprints generated by any
other software.
:return:
A byte string that is a sha256 hash of selected components (based
on the key type) |
4,237 | def genlmsg_parse(nlh, hdrlen, tb, maxtype, policy):
if not genlmsg_valid_hdr(nlh, hdrlen):
return -NLE_MSG_TOOSHORT
ghdr = genlmsghdr(nlmsg_data(nlh))
return int(nla_parse(tb, maxtype, genlmsg_attrdata(ghdr, hdrlen), genlmsg_attrlen(ghdr, hdrlen), policy)) | Parse Generic Netlink message including attributes.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L191
Verifies the validity of the Netlink and Generic Netlink headers using genlmsg_valid_hdr() and calls nla_parse() on
the message payload to parse eventual attributes.
Positional arguments:
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of user header (integer).
tb -- empty dict, to be updated with nlattr class instances to store parsed attributes.
maxtype -- maximum attribute id expected (integer).
policy -- dictionary of nla_policy class instances as values, with nla types as keys.
Returns:
0 on success or a negative error code. |
4,238 | def draw_heading(self, writer):
if self.dirty == self.STATE_REFRESH:
writer(u.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, ,
self.screen.header, ,)))
return True | Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``. |
4,239 | def get_background_rms(self):
if self._rms is None:
data = numpy.extract(self.hdu.data > -9999999, self.hdu.data)
p25 = scipy.stats.scoreatpercentile(data, 25)
p75 = scipy.stats.scoreatpercentile(data, 75)
iqr = p75 - p25
self._rms = iqr / 1.34896
return self._rms | Calculate the rms of the image. The rms is calculated from the interqurtile range (IQR), to
reduce bias from source pixels.
Returns
-------
rms : float
The image rms.
Notes
-----
The rms value is cached after first calculation. |
4,240 | def top_referrers(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples()) | What domains send us the most traffic? |
4,241 | def export_recordeddata_to_file(time_min=None, time_max=None, filename=None, active_vars=None, file_extension=None,
append_to_file=False, no_mean_value=False, mean_value_period=5.0,
backgroundprocess_id=None, export_task_id=None, **kwargs):
if backgroundprocess_id is not None:
tp = BackgroundProcess.objects.get(id=backgroundprocess_id)
tp.message =
tp.last_update = now()
tp.save()
else:
tp = None
if isinstance(time_max, string_types):
time_max = mktime(datetime.strptime(time_max, "%d-%b-%Y %H:%M:%S").timetuple())
if isinstance(time_min, string_types):
time_min = mktime(datetime.strptime(time_min, "%d-%b-%Y %H:%M:%S").timetuple())
if time_max is None:
time_max = time()
if time_min is None:
time_min = time() - 24 * 60 * 60
if file_extension is None and filename is None:
file_extension =
elif filename is not None:
file_extension = + filename.split()[-1]
if file_extension not in [, , ]:
if tp is not None:
tp.last_update = now()
tp.message =
tp.failed = 1
tp.save()
return
if filename is None:
if hasattr(settings, ):
if in settings.PYSCADA_EXPORT:
backup_file_path = os.path.expanduser(settings.PYSCADA_EXPORT[])
else:
backup_file_path = os.path.expanduser()
else:
backup_file_path = os.path.expanduser()
backup_file_name =
if hasattr(settings, ):
if in settings.PYSCADA_EXPORT:
backup_file_name = settings.PYSCADA_EXPORT[] + backup_file_name
if not os.path.exists(backup_file_path):
os.mkdir(backup_file_path)
db_time_min = RecordedData.objects.first()
if not db_time_min:
if tp is not None:
tp.last_update = now()
tp.message =
tp.failed = 1
tp.save()
return
time_min = max(db_time_min.time_value(), time_min)
db_time_max = RecordedData.objects.last()
if not db_time_max:
if tp is not None:
tp.last_update = now()
tp.message =
tp.failed = 1
tp.save()
return
time_max = min(db_time_max.time_value(), time_max)
cdstr_from = datetime.fromtimestamp(time_min).strftime("%Y_%m_%d_%H%M")
cdstr_to = datetime.fromtimestamp(time_max).strftime("%Y_%m_%d_%H%M")
if in kwargs:
filename = os.path.join(backup_file_path,
backup_file_name + + cdstr_from + + cdstr_to + + kwargs[
])
else:
filename = os.path.join(backup_file_path, backup_file_name + + cdstr_from + + cdstr_to)
if os.path.exists(filename + file_extension) and not append_to_file:
count = 0
filename_old = filename
while os.path.exists(filename + file_extension):
filename = filename_old + % count
count += 1
filename = filename + file_extension
if export_task_id is not None:
job = ExportTask.objects.filter(pk=export_task_id).first()
if job:
job.filename = filename
job.save()
if active_vars is None:
active_vars = Variable.objects.filter(active=1, device__active=1)
else:
if type(active_vars) is str:
if active_vars == :
active_vars = Variable.objects.all()
else:
active_vars = Variable.objects.filter(active=1, device__active=1)
else:
active_vars = Variable.objects.filter(pk__in=active_vars, active=1, device__active=1)
if mean_value_period == 0:
no_mean_value = True
mean_value_period = 5.0
timevalues = arange(math.ceil(time_min / mean_value_period) * mean_value_period,
math.floor(time_max / mean_value_period) * mean_value_period, mean_value_period)
if hasattr(settings, ):
if in settings.PYSCADA_META:
description = settings.PYSCADA_META[]
else:
description =
if in settings.PYSCADA_META:
name = settings.PYSCADA_META[]
else:
name =
else:
description =
name =
if file_extension in [, ]:
bf = MatCompatibleH5(filename, version=, description=description, name=name,
creation_date=strftime())
out_timevalues = [unix_time_stamp_to_matlab_datenum(element) for element in timevalues]
elif file_extension in []:
bf = ExcelCompatibleCSV(filename, version=, description=description, name=name,
creation_date=strftime())
out_timevalues = [unix_time_stamp_to_excel_datenum(element) for element in timevalues]
else:
return
bf.write_data(, float64(out_timevalues),
id=0,
description="global time vector",
value_class=validate_value_class(),
unit="Days since 0000-1-1 00:00:00",
color=,
short_name=,
chart_line_thickness=3
)
for var_idx in range(0, active_vars.count(), 10):
if tp is not None:
tp.last_update = now()
tp.message = % var_idx
tp.save()
var_slice = active_vars[var_idx:var_idx + 10]
data = RecordedData.objects.get_values_in_time_range(
variable_id__in=list(var_slice.values_list(, flat=True)),
time_min=time_min,
time_max=time_max,
query_first_value=True)
for var in var_slice:
if tp is not None:
tp.last_update = now()
tp.message = % (var.name, var.pk)
tp.save()
if var.scaling is None or var.value_class.upper() in [, ]:
value_class = var.value_class
else:
value_class =
if hasattr(var.unit, ):
udunit = var.unit.udunit
else:
udunit =
if var.pk not in data:
bf.write_data(var.name, _cast_value([0] * len(timevalues), validate_value_class(value_class)),
id=var.pk,
description=var.description,
value_class=validate_value_class(value_class),
unit=udunit,
color=var.chart_line_color_code(),
short_name=var.short_name,
chart_line_thickness=var.chart_line_thickness)
continue
out_data = np.zeros(len(timevalues))
ii = 0
last_value = None
max_ii = len(data[var.pk]) - 1
for i in range(len(timevalues)):
if ii >= max_ii + 1:
if last_value is not None:
out_data[i] = last_value
continue
tmp = 0.0
tmp_i = 0.0
if data[var.pk][ii][0] < timevalues[i]:
while data[var.pk][ii][0] < timevalues[i] and ii < max_ii:
last_value = data[var.pk][ii][1]
ii += 1
if ii >= max_ii:
if last_value is not None:
out_data[i] = last_value
continue
if timevalues[i] <= data[var.pk][ii][0] < timevalues[i] + mean_value_period:
while timevalues[i] <= data[var.pk][ii][0] < timevalues[i] + mean_value_period and ii < max_ii:
if no_mean_value:
tmp = data[var.pk][ii][1]
tmp_i = 1
else:
tmp += data[var.pk][ii][1]
tmp_i += 1
last_value = data[var.pk][ii][1]
ii += 1
if tmp_i > 0:
out_data[i] = tmp / tmp_i
else:
out_data[i] = data[var.pk][ii][1]
last_value = data[var.pk][ii][1]
else:
if last_value is not None:
out_data[i] = last_value
bf.write_data(var.name, _cast_value(out_data, validate_value_class(value_class)),
id=var.pk,
description=var.description,
value_class=validate_value_class(value_class),
unit=udunit,
color=var.chart_line_color_code(),
short_name=var.short_name,
chart_line_thickness=var.chart_line_thickness)
bf.close_file()
if tp is not None:
tp.last_update = now()
tp.message =
tp.done = True
tp.save() | read all data |
4,242 | def get_closest(self, lon, lat, depth=0):
xyz = spherical_to_cartesian(lon, lat, depth)
min_dist, idx = self.kdtree.query(xyz)
return self.objects[idx], min_dist | Get the closest object to the given longitude and latitude
and its distance.
:param lon: longitude in degrees
:param lat: latitude in degrees
:param depth: depth in km (default 0)
:returns: (object, distance) |
4,243 | def add_forwarding_rules(self, forwarding_rules):
rules_dict = [rule.__dict__ for rule in forwarding_rules]
return self.get_data(
"load_balancers/%s/forwarding_rules/" % self.id,
type=POST,
params={"forwarding_rules": rules_dict}
) | Adds new forwarding rules to a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects |
4,244 | def move_item(self, item, origin, destination):
if self.equality:
item_index = 0
for i, element in enumerate(origin):
if self.equality(element, item):
item_index = i
break
else:
item_index = origin.index(item)
destination.append(origin.pop(item_index)) | Moves an item from one cluster to anoter cluster.
:param item: the item to be moved.
:param origin: the originating cluster.
:param destination: the target cluster. |
4,245 | def saveSession(self):
if self.cookies_file:
self.r.cookies.save(ignore_discard=True)
with open(self.token_file, ) as f:
f.write( % (self.token_type, self.access_token)) | Save cookies/session. |
4,246 | def edit_message_media(
self,
chat_id: Union[int, str],
message_id: int,
media: InputMedia,
reply_markup: "pyrogram.InlineKeyboardMarkup" = None
) -> "pyrogram.Message":
style = self.html if media.parse_mode.lower() == "html" else self.markdown
caption = media.caption
if isinstance(media, InputMediaPhoto):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedPhoto(
file=self.save_file(media.media)
)
)
)
media = types.InputMediaPhoto(
id=types.InputPhoto(
id=media.photo.id,
access_hash=media.photo.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaPhotoExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 2:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaPhoto(
id=types.InputPhoto(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaVideo):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeVideo(
supports_streaming=media.supports_streaming or None,
duration=media.duration,
w=media.width,
h=media.height
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 4:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaAudio):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "audio/mpeg",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeAudio(
duration=media.duration,
performer=media.performer,
title=media.title
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 9:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaAnimation):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "video/mp4",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeVideo(
supports_streaming=True,
duration=media.duration,
w=media.width,
h=media.height
),
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
),
types.DocumentAttributeAnimated()
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] != 10:
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
if isinstance(media, InputMediaDocument):
if os.path.exists(media.media):
media = self.send(
functions.messages.UploadMedia(
peer=self.resolve_peer(chat_id),
media=types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(media.media) or "application/zip",
thumb=None if media.thumb is None else self.save_file(media.thumb),
file=self.save_file(media.media),
attributes=[
types.DocumentAttributeFilename(
file_name=os.path.basename(media.media)
)
]
)
)
)
media = types.InputMediaDocument(
id=types.InputDocument(
id=media.document.id,
access_hash=media.document.access_hash,
file_reference=b""
)
)
elif media.media.startswith("http"):
media = types.InputMediaDocumentExternal(
url=media.media
)
else:
try:
decoded = utils.decode(media.media)
fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq"
unpacked = struct.unpack(fmt, decoded)
except (AssertionError, binascii.Error, struct.error):
raise FileIdInvalid from None
else:
if unpacked[0] not in (5, 10):
media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None)
if media_type:
raise FileIdInvalid("The file_id belongs to a {}".format(media_type))
else:
raise FileIdInvalid("Unknown media type: {}".format(unpacked[0]))
media = types.InputMediaDocument(
id=types.InputDocument(
id=unpacked[2],
access_hash=unpacked[3],
file_reference=b""
)
)
r = self.send(
functions.messages.EditMessage(
peer=self.resolve_peer(chat_id),
id=message_id,
reply_markup=reply_markup.write() if reply_markup else None,
media=media,
**style.parse(caption)
)
)
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
) | Use this method to edit audio, document, photo, or video messages.
If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise,
message type can be changed arbitrarily. When inline message is edited, new file can't be uploaded.
Use previously uploaded file via its file_id or specify a URL. On success, if the edited message was sent
by the bot, the edited Message is returned, otherwise True is returned.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Message identifier in the chat specified in chat_id.
media (:obj:`InputMedia`)
One of the InputMedia objects describing an animation, audio, document, photo or video.
reply_markup (:obj:`InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
On success, the edited :obj:`Message <pyrogram.Message>` is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
4,247 | def addsystemhook(self, url):
data = {"url": url}
request = requests.post(
self.hook_url, headers=self.headers, data=data,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return True
else:
return False | Add a system hook
:param url: url of the hook
:return: True if success |
4,248 | def set(self, data, start=None, count=None, stride=None):
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error()
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error(\
)
unlimited = self.isrecord()
for n in range(rank):
ok = 1
if start[n] < 0:
ok = 0
elif n > 0 or not unlimited:
if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
ok = 0
if not ok:
raise HDF4Error(\
\
% (dim_sizes[n], n))
if not data_type in SDC.equivNumericTypes:
raise HDF4Error(\
)
_C._SDwritedata_0(self._id, data_type, start, count, data, stride) | Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access". |
4,249 | def format_help(self, description):
for bold in ("``", "*"):
parts = []
if description is None:
description = ""
for i, s in enumerate(description.split(bold)):
parts.append(s if i % 2 == 0 else "<b>%s</b>" % s)
description = "".join(parts)
description = urlize(description, autoescape=False)
return mark_safe(description.replace("\n", "<br>")) | Format the setting's description into HTML. |
4,250 | def mkdir(self, name=None, folder_id=):
return self( , method=, encode=,
data=dict(name=name, parent=dict(id=folder_id)) ) | Create a folder with a specified "name" attribute.
folder_id allows to specify a parent folder. |
4,251 | def explode_line(argument_line: str) -> typing.Tuple[str, str]:
parts = tuple(argument_line.split(, 1)[-1].split(, 1))
return parts if len(parts) > 1 else (parts[0], ) | Returns a tuple containing the parameter name and the description parsed
from the given argument line |
4,252 | def obtain_licenses():
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute()
licenses = {r[0]: r[1] for r in cursor.fetchall()}
return licenses | Obtain the licenses in a dictionary form, keyed by url. |
4,253 | def get_key_codes(keys):
keys = keys.strip().upper().split()
codes = list()
for key in keys:
code = ks_settings.KEY_CODES.get(key.strip())
if code:
codes.append(code)
return codes | Calculates the list of key codes from a string with key combinations.
Ex: 'CTRL+A' will produce the output (17, 65) |
4,254 | def predict(self, Xnew=None, filteronly=False, include_likelihood=True, balance=None, **kw):
if balance is None:
p_balance = self.balance
else:
p_balance = balance
(m, V) = self._raw_predict(Xnew,filteronly=filteronly, p_balance=p_balance)
if include_likelihood:
V += float(self.likelihood.variance)
return m, V | Inputs:
------------------
balance: bool
Whether to balance or not the model as a whole |
4,255 | def with_(self, replacement):
ensure_string(replacement)
if is_mapping(self._replacements):
raise ReplacementError("string replacements already provided")
self._replacements = dict.fromkeys(self._replacements, replacement)
return self | Provide replacement for string "needles".
:param replacement: Target replacement for needles given in constructor
:return: The :class:`Replacement` object
:raise TypeError: If ``replacement`` is not a string
:raise ReplacementError: If replacement has been already given |
4,256 | def process_summary(article):
summary = article.summary
summary_parsed = BeautifulSoup(summary, )
math = summary_parsed.find_all(class_=)
if len(math) > 0:
last_math_text = math[-1].get_text()
if len(last_math_text) > 3 and last_math_text[-3:] == :
content_parsed = BeautifulSoup(article._content, )
full_text = content_parsed.find_all(class_=)[len(math)-1].get_text()
math[-1].string = "%s ..." % full_text
summary = summary_parsed.decode()
import functools
if isinstance(article.get_summary, functools.partial):
memoize_instance = article.get_summary.func.__self__
memoize_instance.cache.clear()
article._summary = "%s<script type=>%s</script>" % (summary, process_summary.mathjax_script) | Ensures summaries are not cut off. Also inserts
mathjax script so that math will be rendered |
4,257 | def add_untagged_ok(self, text: MaybeBytes,
code: Optional[ResponseCode] = None) -> None:
response = ResponseOk(b, text, code)
self.add_untagged(response) | Add an untagged ``OK`` response.
See Also:
:meth:`.add_untagged`, :class:`ResponseOk`
Args:
text: The response text.
code: Optional response code. |
4,258 | def disco_loop_asm_format(opc, version, co, real_out,
fn_name_map, all_fns):
if version < 3.0:
co = code2compat(co)
else:
co = code3compat(co)
co_name = co.co_name
mapped_name = fn_name_map.get(co_name, co_name)
new_consts = []
for c in co.co_consts:
if iscode(c):
if version < 3.0:
c_compat = code2compat(c)
else:
c_compat = code3compat(c)
disco_loop_asm_format(opc, version, c_compat, real_out,
fn_name_map, all_fns)
m = re.match(".* object <(.+)> at", str(c))
if m:
basename = m.group(1)
if basename != :
mapped_name = code_uniquify(basename, c.co_code)
c_compat.co_name = mapped_name
c_compat.freeze()
new_consts.append(c_compat)
else:
new_consts.append(c)
pass
co.co_consts = new_consts
m = re.match("^<(.+)>$", co.co_name)
if m or co_name in all_fns:
if co_name in all_fns:
basename = co_name
else:
basename = m.group(1)
if basename != :
mapped_name = code_uniquify(basename, co.co_code)
co_name = mapped_name
assert mapped_name not in fn_name_map
fn_name_map[mapped_name] = basename
co.co_name = mapped_name
pass
elif co_name in fn_name_map:
mapped_name = code_uniquify(co_name, co.co_code)
fn_name_map[mapped_name] = co_name
co.co_name = mapped_name
pass
co = co.freeze()
all_fns.add(co_name)
if co.co_name != or co.co_filename:
real_out.write("\n" + format_code_info(co, version, mapped_name) + "\n")
bytecode = Bytecode(co, opc, dup_lines=True)
real_out.write(bytecode.dis(asm_format=True) + "\n") | Produces disassembly in a format more conducive to
automatic assembly by producing inner modules before they are
used by outer ones. Since this is recusive, we'll
use more stack space at runtime. |
4,259 | def segments(self, **kwargs):
segmentBase = self.segmentBase or self.walk_back_get_attr("segmentBase")
segmentLists = self.segmentList or self.walk_back_get_attr("segmentList")
segmentTemplate = self.segmentTemplate or self.walk_back_get_attr("segmentTemplate")
if segmentTemplate:
for segment in segmentTemplate.segments(RepresentationID=self.id,
Bandwidth=int(self.bandwidth * 1000),
**kwargs):
if segment.init:
yield segment
else:
yield segment
elif segmentLists:
for segmentList in segmentLists:
for segment in segmentList.segments:
yield segment
else:
yield Segment(self.base_url, 0, True, True) | Segments are yielded when they are available
Segments appear on a time line, for dynamic content they are only available at a certain time
and sometimes for a limited time. For static content they are all available at the same time.
:param kwargs: extra args to pass to the segment template
:return: yields Segments |
4,260 | def dump(obj, fp, **kw):
r
xml = dumps(obj, **kw)
if isinstance(fp, basestring):
with open(fp, ) as fobj:
fobj.write(xml)
else:
fp.write(xml) | r"""Dump python object to file.
>>> import lazyxml
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> lazyxml.dump(data, 'dump.xml')
>>> with open('dump-fp.xml', 'w') as fp:
>>> lazyxml.dump(data, fp)
>>> from cStringIO import StringIO
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> buffer = StringIO()
>>> lazyxml.dump(data, buffer)
>>> buffer.getvalue()
<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
>>> buffer.close()
.. note::
``kw`` argument have the same meaning as in :func:`dumps`
:param obj: data for dump to xml.
:param fp: a filename or a file or file-like object that support ``.write()`` to write the xml content
.. versionchanged:: 1.2
The `fp` is a filename of string before this. It can now be a file or file-like object that support ``.write()`` to write the xml content. |
4,261 | def estimate_gas(
self,
block_identifier,
function: str,
*args,
**kwargs,
) -> typing.Optional[int]:
fn = getattr(self.contract.functions, function)
address = to_checksum_address(self.jsonrpc_client.address)
if self.jsonrpc_client.eth_node is constants.EthClient.GETH:
block_identifier = None
try:
return fn(*args, **kwargs).estimateGas(
transaction={: address},
block_identifier=block_identifier,
)
except ValueError as err:
action = inspect_client_error(err, self.jsonrpc_client.eth_node)
will_fail = action in (
ClientErrorInspectResult.INSUFFICIENT_FUNDS,
ClientErrorInspectResult.ALWAYS_FAIL,
)
if will_fail:
return None
raise err | Returns a gas estimate for the function with the given arguments or
None if the function call will fail due to Insufficient funds or
the logic in the called function. |
4,262 | def handle_resourceset(ltext, **kwargs):
fullprop=kwargs.get()
rid=kwargs.get()
base=kwargs.get(, VERSA_BASEIRI)
model=kwargs.get()
iris = ltext.strip().split()
for i in iris:
model.add(rid, fullprop, I(iri.absolutize(i, base)))
return None | A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs |
4,263 | def is_lazy_user(user):
if user.is_anonymous:
return False
backend = getattr(user, , None)
if backend == :
return True
from lazysignup.models import LazyUser
return bool(LazyUser.objects.filter(user=user).count() > 0) | Return True if the passed user is a lazy user. |
4,264 | def str_to_datetime(self,format="%Y-%m-%dT%H:%M:%S%ZP"):
if(self.dtype != str):
raise TypeError("str_to_datetime expects SArray of str as input SArray")
with cython_context():
return SArray(_proxy=self.__proxy__.str_to_datetime(format)) | Create a new SArray with all the values cast to datetime. The string format is
specified by the 'format' parameter.
Parameters
----------
format : str
The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
If format is "ISO", the the format is "%Y%m%dT%H%M%S%F%q"
Returns
-------
out : SArray[datetime.datetime]
The SArray converted to the type 'datetime'.
Examples
--------
>>> sa = turicreate.SArray(["20-Oct-2011 09:30:10 GMT-05:30"])
>>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP")
dtype: datetime
Rows: 1
datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5))
See Also
----------
datetime_to_str
References
----------
[1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html) |
4,265 | def roi_pooling(input, rois, pool_height, pool_width):
out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width)
output, argmax_output = out[0], out[1]
return output | returns a tensorflow operation for computing the Region of Interest Pooling
@arg input: feature maps on which to perform the pooling operation
@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)
@arg pool_width: size of the pooling sections |
4,266 | def calculate_size(name, sequence):
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size | Calculates the request payload size |
4,267 | def system_find_databases(input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest(, input_params, always_retry=always_retry, **kwargs) | Invokes the /system/findDatabases API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindDatabases |
4,268 | def remove(parent, idx):
if isinstance(parent, dict):
del parent[idx]
elif isinstance(parent, list):
del parent[int(idx)]
else:
raise JSONPathError("Invalid path for operation") | Remove a value from a dict. |
4,269 | def modified(self):
try:
dt = datetime.datetime.fromtimestamp(self.stats.st_mtime)
return dt.strftime()
except OSError:
return None | Get human-readable last modification date-time.
:returns: iso9008-like date-time string (without timezone)
:rtype: str |
4,270 | def _init_create_child(self):
if self._requires_pty():
self.create_child = mitogen.parent.hybrid_tty_create_child
else:
self.create_child = mitogen.parent.create_child
self.create_child_args = {
: True,
} | Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not. |
4,271 | def _save_state_and_schedule_next(self, shard_state, tstate, task_directive):
spec = tstate.mapreduce_spec
if task_directive == self._TASK_DIRECTIVE.DROP_TASK:
return
if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,
self._TASK_DIRECTIVE.RETRY_TASK):
return self.retry_task()
elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
logging.info("Aborting shard %d of job ",
shard_state.shard_number, shard_state.mapreduce_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
logging.critical("Shard %s failed permanently.", shard_state.shard_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
logging.warning("Shard %s is going to be attempted for the %s time.",
shard_state.shard_id,
shard_state.retries + 1)
task = self._state_to_task(tstate, shard_state)
elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
logging.warning("Shard %s slice %s is being recovered.",
shard_state.shard_id,
shard_state.slice_id)
task = self._state_to_task(tstate, shard_state)
else:
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
countdown = self._get_countdown_for_next_slice(spec)
task = self._state_to_task(tstate, shard_state, countdown=countdown)
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
"default")
config = util.create_datastore_write_config(spec)
@db.transactional(retries=5)
def _tx():
fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_shard_state:
raise db.Rollback()
if (not fresh_shard_state.active or
"worker_active_state_collision" in _TEST_INJECTED_FAULTS):
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning("Datastores %s", str(shard_state))
return
fresh_shard_state.copy_from(shard_state)
fresh_shard_state.put(config=config)
"Will retry slice %s %s for the %s time.",
tstate.shard_id,
tstate.slice_id,
self.task_retry_count() + 1)
self._try_free_lease(shard_state)
raise e | Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable. |
4,272 | def _configure_context(ctx, opts, skip=()):
for oper in opts:
if oper in skip:
continue
if isinstance(oper,chartype):
op = oper.encode("ascii")
else:
op = oper
if isinstance(opts[oper],chartype):
value = opts[oper].encode("ascii")
elif isinstance(opts[oper],bintype):
value = opts[oper]
else:
if pyver == 2:
value = str(opts[oper])
else:
value = str(opts[oper]).encode()
ret = libcrypto.EVP_PKEY_CTX_ctrl_str(ctx, op, value)
if ret == -2:
raise PKeyError("Parameter %s is not supported by key" % oper)
if ret < 1:
raise PKeyError("Error setting parameter %s" % oper) | Configures context of public key operations
@param ctx - context to configure
@param opts - dictionary of options (from kwargs of calling
function)
@param skip - list of options which shouldn't be passed to
context |
4,273 | def _lookup_vpc_count_min_max(session=None, **bfilter):
if session is None:
session = bc.get_reader_session()
try:
res = session.query(
func.count(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.min(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.max(nexus_models_v2.NexusVPCAlloc.vpc_id),
).filter(nexus_models_v2.NexusVPCAlloc.switch_ip ==
bfilter[]).one()
count = res[0]
sw_min = res[1]
sw_max = res[2]
return count, sw_min, sw_max
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) | Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound. |
4,274 | def numDomtblout(domtblout, numHits, evalueT, bitT, sort):
if sort is True:
for hit in numDomtblout_sort(domtblout, numHits, evalueT, bitT):
yield hit
return
header = [, , ,
, , ,
, , ,
, ,
, , , ,
, , , , , ,
, ]
yield header
prev, hits = None, []
for line in domtblout:
if line.startswith():
continue
line = line.strip().split()
desc = .join(line[18:])
line = line[0:18]
line.append(desc)
ID = line[0] + line[9]
line[11], line[13] = float(line[11]), float(line[13])
evalue, bitscore = line[11], line[13]
line[11], line[13] = evalue, bitscore
if ID != prev:
if len(hits) > 0:
for hit in top_hits(hits, numHits, 13, True):
yield hit
hits = []
if evalueT == False and bitT == False:
hits.append(line)
elif evalue <= evalueT and bitT == False:
hits.append(line)
elif evalue <= evalueT and bit >= bitT:
hits.append(line)
elif evalueT == False and bit >= bitT:
hits.append(line)
prev = ID
for hit in top_hits(hits, numHits, 13, True):
yield hit | parse hmm domain table output
this version is faster but does not work unless the table is sorted |
4,275 | def _get_config_instance(group_or_term, session, **kwargs):
path = group_or_term._get_path()
cached = group_or_term._top._cached_configs.get(path)
if cached:
config = cached
created = False
else:
config, created = get_or_create(session, Config, **kwargs)
return config, created | Finds appropriate config instance and returns it.
Args:
group_or_term (Group or Term):
session (Sqlalchemy session):
kwargs (dict): kwargs to pass to get_or_create.
Returns:
tuple of (Config, bool): |
4,276 | def register_master():
tango_db = Database()
device = "sip_sdp/elt/master"
device_info = DbDevInfo()
device_info._class = "SDPMasterDevice"
device_info.server = "sdp_master_ds/1"
device_info.name = device
devices = tango_db.get_device_name(device_info.server, device_info._class)
if device not in devices:
LOG.info(,
device_info.name, device_info.server)
tango_db.add_device(device_info) | Register the SDP Master device. |
4,277 | def prepare_batch(self):
for _, metadata in self.certificates_to_issue.items():
self.certificate_handler.validate_certificate(metadata)
with FinalizableSigner(self.secret_manager) as signer:
for _, metadata in self.certificates_to_issue.items():
self.certificate_handler.sign_certificate(signer, metadata)
self.merkle_tree.populate(self.get_certificate_generator())
logging.info(, b2h(self.merkle_tree.get_blockchain_data()))
return self.merkle_tree.get_blockchain_data() | Propagates exception on failure
:return: byte array to put on the blockchain |
4,278 | def P_conditional(self, i, li, j, lj, y):
Z = np.sum([self._P(i, _li, j, lj, y) for _li in range(self.k + 1)])
return self._P(i, li, j, lj, y) / Z | Compute the conditional probability
P_\theta(li | lj, y)
=
Z^{-1} exp(
theta_{i|y} \indpm{ \lambda_i = Y }
+ \theta_{i,j} \indpm{ \lambda_i = \lambda_j }
)
In other words, compute the conditional probability that LF i outputs
li given that LF j output lj, and Y = y, parameterized by
- a class-conditional LF accuracy parameter \theta_{i|y}
- a symmetric LF correlation paramter \theta_{i,j} |
4,279 | def encoder_data(self, data):
prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]
val = int((data[self.MSB] << 7) + data[self.LSB])
if val > 8192:
val -= 16384
pin = data[0]
with self.pymata.data_lock:
self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val
if prev_val != val:
callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]
if callback is not None:
callback([self.pymata.ENCODER, pin,
self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]]) | This method handles the incoming encoder data message and stores
the data in the digital response table.
:param data: Message data from Firmata
:return: No return value. |
4,280 | def process_user_record(cls, info):
keys = list(info.keys())
for k in keys:
v = info[k]
if v in (, ):
info[k] = None
elif v == :
info[k] = False
elif v == :
info[k] = True
for p, t in cls.list_sub_objects:
obj = dict([(k[len(p):], info.pop(k))
for k in keys if k.startswith(p)])
if obj.get(, False):
info.setdefault(t, []).append(obj)
return info | Type convert the csv record, modifies in place. |
4,281 | def monkeypatch_method(cls, patch_name):
def decorator(func):
fname = func.__name__
old_func = getattr(cls, fname, None)
if old_func is not None:
old_ref = "_old_%s_%s" % (patch_name, fname)
old_attr = getattr(cls, old_ref, None)
if old_attr is None:
setattr(cls, old_ref, old_func)
else:
raise KeyError("%s.%s already exists."
% (cls.__name__, old_ref))
setattr(cls, fname, func)
return func
return decorator | Add the decorated method to the given class; replace as needed.
If the named method already exists on the given class, it will
be replaced, and a reference to the old method is created as
cls._old<patch_name><name>. If the "_old_<patch_name>_<name>" attribute
already exists, KeyError is raised. |
4,282 | def to_python(self, omobj):
if omobj.__class__ in self._omclass_to_py:
return self._omclass_to_py[omobj.__class__](omobj)
elif isinstance(omobj, om.OMSymbol):
return self._lookup_to_python(omobj.cdbase, omobj.cd, omobj.name)
elif isinstance(omobj, om.OMApplication):
elem = self.to_python(omobj.elem)
arguments = [self.to_python(x) for x in omobj.arguments]
return elem(*arguments)
raise ValueError( % omobj.__class__.__name__) | Convert OpenMath object to Python |
4,283 | def conv2d(self, filter_size, output_channels, stride=1, padding=, bn=True, activation_fn=tf.nn.relu,
b_value=0.0, s_value=1.0, trainable=True):
self.count[] += 1
scope = + str(self.count[])
with tf.variable_scope(scope):
input_channels = self.input.get_shape()[3]
if filter_size == 0:
filter_size = self.input.get_shape()[2]
padding =
output_shape = [filter_size, filter_size, input_channels, output_channels]
w = self.weight_variable(name=, shape=output_shape, trainable=trainable)
self.input = tf.nn.conv2d(self.input, w, strides=[1, stride, stride, 1], padding=padding)
if bn is True:
self.input = self.batch_norm(self.input)
if b_value is not None:
b = self.const_variable(name=, shape=[output_channels], value=b_value, trainable=trainable)
self.input = tf.add(self.input, b)
if s_value is not None:
s = self.const_variable(name=, shape=[output_channels], value=s_value, trainable=trainable)
self.input = tf.multiply(self.input, s)
if activation_fn is not None:
self.input = activation_fn(self.input)
print(scope + + str(self.input.get_shape())) | 2D Convolutional Layer.
:param filter_size: int. assumes square filter
:param output_channels: int
:param stride: int
:param padding: 'VALID' or 'SAME'
:param activation_fn: tf.nn function
:param b_value: float
:param s_value: float |
4,284 | def parse_message(self, msg, msg_signature, timestamp, nonce):
content = self.crypto.decrypt_message(msg, msg_signature, timestamp, nonce)
message = xmltodict.parse(to_text(content))[]
message_type = message[].lower()
message_class = COMPONENT_MESSAGE_TYPES.get(message_type, ComponentUnknownMessage)
msg = message_class(message)
if msg.type == :
self.session.set(msg.type, msg.verify_ticket)
elif msg.type in (, ):
msg.query_auth_result = self.query_auth(msg.authorization_code)
return msg | 处理 wechat server 推送消息
:params msg: 加密内容
:params msg_signature: 消息签名
:params timestamp: 时间戳
:params nonce: 随机数 |
4,285 | def html_visit_inheritance_diagram(
self: NodeVisitor, node: inheritance_diagram
) -> None:
inheritance_graph = node["graph"]
urls = build_urls(self, node)
graphviz_graph = inheritance_graph.build_graph(urls)
dot_code = format(graphviz_graph, "graphviz")
aspect_ratio = inheritance_graph.aspect_ratio
if aspect_ratio:
aspect_ratio = math.ceil(math.sqrt(aspect_ratio[1] / aspect_ratio[0]))
if aspect_ratio > 1:
process = subprocess.Popen(
["unflatten", "-l", str(aspect_ratio), "-c", str(aspect_ratio), "-f"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate(dot_code.encode())
dot_code = stdout.decode()
render_dot_html(self, node, dot_code, {}, "inheritance", "inheritance")
raise SkipNode | Builds HTML output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node. |
4,286 | def split_fixed_pattern(path):
_first_pattern_pos = path.find()
_path_separator_pos = path.rfind(, 0, _first_pattern_pos) + 1
_path_fixed = path[:_path_separator_pos]
_path_pattern = path[_path_separator_pos:]
return _path_fixed, _path_pattern | Split path into fixed and masked parts
:param path: e.g
https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/*.*.*/vc110/x86/win/boost.*.*.*.tar.gz
:return:
_path_fixed: https://repo.example.com/artifactory/libs-cpp-release.snapshot/boost/1.60-pm/
_path_pattern: *.*.*/vc110/x86/win/boost.*.*.*.tar.gz |
4,287 | def merge_errors(self, errors_local, errors_remote):
for prop in errors_remote:
return errors_local | Merge errors
Recursively traverses error graph to merge remote errors into local
errors to return a new joined graph.
:param errors_local: dict, local errors, will be updated
:param errors_remote: dict, remote errors, provides updates
:return: dict |
4,288 | def read_fastq(filename):
if not filename:
return itertools.cycle((None,))
if filename == "-":
filename_fh = sys.stdin
elif filename.endswith():
if is_python3:
filename_fh = gzip.open(filename, mode=)
else:
filename_fh = BufferedReader(gzip.open(filename, mode=))
else:
filename_fh = open(filename)
return stream_fastq(filename_fh) | return a stream of FASTQ entries, handling gzipped and empty files |
4,289 | def imprints2marc(self, key, value):
return {
: value.get(),
: value.get(),
: value.get(),
} | Populate the ``260`` MARC field. |
4,290 | def preprocess(self, dataset, mode, hparams, interleave=True):
def _preprocess(example):
examples = self.preprocess_example(example, mode, hparams)
if not isinstance(examples, tf.data.Dataset):
examples = tf.data.Dataset.from_tensors(examples)
return examples
if interleave:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_preprocess, sloppy=True, cycle_length=8))
else:
dataset = dataset.flat_map(_preprocess)
return dataset | Runtime preprocessing on the whole dataset.
Return a tf.data.Datset -- the preprocessed version of the given one.
By default this function calls preprocess_example.
Args:
dataset: the Dataset of already decoded but not yet preprocessed features.
mode: tf.estimator.ModeKeys
hparams: HParams, model hyperparameters
interleave: bool, whether to use parallel_interleave, which is faster
but will alter the order of samples non-deterministically, or flat_map,
which is slower but will preserve the sample order.
Returns:
a Dataset |
4,291 | def record_participation(self, client, dt=None):
if dt is None:
date = datetime.now()
else:
date = dt
experiment_key = self.experiment.name
pipe = self.redis.pipeline()
pipe.sadd(_key("p:{0}:years".format(experiment_key)), date.strftime())
pipe.sadd(_key("p:{0}:months".format(experiment_key)), date.strftime())
pipe.sadd(_key("p:{0}:days".format(experiment_key)), date.strftime())
pipe.execute()
keys = [
_key("p:{0}:_all:all".format(experiment_key)),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime())),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime())),
_key("p:{0}:_all:{1}".format(experiment_key, date.strftime())),
_key("p:{0}:{1}:all".format(experiment_key, self.name)),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime())),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime())),
_key("p:{0}:{1}:{2}".format(experiment_key, self.name, date.strftime())),
]
msetbit(keys=keys, args=([self.experiment.sequential_id(client), 1] * len(keys))) | Record a user's participation in a test along with a given variation |
4,292 | def _parse_members(self, contents, anexec, params, mode="insert"):
members = self.vparser.parse(contents, anexec)
for param in list(params):
lparam = param.lower()
if lparam in members:
if mode == "insert" and not lparam in anexec.parameters:
anexec.add_parameter(members[lparam])
elif mode == "delete":
anexec.remove_parameter(members[lparam])
for key in members:
if mode == "insert":
if not key.lower() in anexec.parameters:
anexec.members[key] = members[key]
elif mode == "delete" and key in anexec.members:
del anexec.members[key]
if mode == "insert":
memdocs = self.docparser.parse_docs(contents, anexec)
if anexec.name in memdocs:
docs = self.docparser.to_doc(memdocs[anexec.name][0], anexec.name)
self.docparser.process_memberdocs(docs, anexec)
self.docparser.process_embedded(memdocs, anexec) | Parses the local variables for the contents of the specified executable. |
4,293 | def neg_int(i):
try:
if isinstance(i, string_types):
i = int(i)
if not isinstance(i, int) or i > 0:
raise Exception()
except:
raise ValueError("Not a negative integer")
return i | Simple negative integer validation. |
4,294 | def opener_from_zipfile(zipfile):
def opener(filename):
inner_file = zipfile.open(filename)
if PY3:
from io import TextIOWrapper
return TextIOWrapper(inner_file)
else:
return inner_file
return opener | Returns a function that will open a file in a zipfile by name.
For Python3 compatibility, the raw file will be converted to text. |
4,295 | def get_letters( word ):
ta_letters = list()
not_empty = False
WLEN,idx = len(word),0
while (idx < WLEN):
c = word[idx]
if c in uyir_letter_set or c == ayudha_letter:
ta_letters.append(c)
not_empty = True
elif c in grantha_agaram_set:
ta_letters.append(c)
not_empty = True
elif c in accent_symbol_set:
if not not_empty:
ta_letters.append(c)
not_empty = True
else:
ta_letters[-1] += c
else:
if ord(c) < 256 or not (is_tamil_unicode(c)):
ta_letters.append( c )
else:
if not_empty:
ta_letters[-1]+= c
else:
ta_letters.append(c)
not_empty = True
idx = idx + 1
return ta_letters | splits the word into a character-list of tamil/english
characters present in the stream |
4,296 | def dzip(items1, items2, cls=dict):
try:
len(items1)
except TypeError:
items1 = list(items1)
try:
len(items2)
except TypeError:
items2 = list(items2)
if len(items1) == 0 and len(items2) == 1:
items2 = []
if len(items2) == 1 and len(items1) > 1:
items2 = items2 * len(items1)
if len(items1) != len(items2):
raise ValueError( % (
len(items1), len(items2)))
return cls(zip(items1, items2)) | Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {} |
4,297 | def render(self, name, value, attrs=None, renderer=None):
location = getattr(value, , )
if location and not hasattr(value, ):
value.url =
if hasattr(self, ):
self.template_with_initial = (
)
attrs = attrs or {}
attrs.update({: self.url})
hidden_name = self.get_hidden_name(name)
kwargs = {}
if django_version >= (1, 11):
kwargs[] = renderer
parent = super(StickyUploadWidget, self).render(name, value, attrs=attrs, **kwargs)
hidden = forms.HiddenInput().render(hidden_name, location, **kwargs)
return mark_safe(parent + + hidden) | Include a hidden input to store the serialized upload value. |
4,298 | def parse_markdown(markdown_content, site_settings):
markdown_extensions = set_markdown_extensions(site_settings)
html_content = markdown.markdown(
markdown_content,
extensions=markdown_extensions,
)
return html_content | Parse markdown text to html.
:param markdown_content: Markdown text lists #TODO# |
4,299 | def _to_autoassign(self):
autoassign_str = "
"\t\t".join([str(i + 1) + "Dim" for i in range(len(self.labels))]))
for peak_idx, peak in enumerate(self):
dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list])
autoassign_str += "{}\t\t{}\t\t{}\t\t{}\n".format(peak_idx+1, dimensions_str, 0, self.spectrum_name)
return autoassign_str | Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string.
:return: Peak list representation in AutoAssign format.
:rtype: :py:class:`str` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.