Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
19,600 | def greater_than_obs_constraints(self):
obs = self.observation_data
gt_obs = obs.loc[obs.apply(lambda x: self._is_greater_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return gt_obs | get the names of the observations that
are listed as greater than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
greater than constraints |
19,601 | def knn_impute_with_argpartition(
X,
missing_mask,
k,
verbose=False,
print_interval=100):
start_t = time.time()
n_rows, n_cols = X.shape
neighbor_indices = array([
neighbor_index
for neighbor_index in neighbor_indices
if d_copy[neighbor_index] < effective_infinity
])
n_current_neighbors = len(neighbor_indices)
if n_current_neighbors > 0:
neighbor_weights = inv_d[neighbor_indices]
X_row_major[i, j] = (
dot(X[:, j][neighbor_indices], neighbor_weights) /
neighbor_weights.sum()
)
return X_row_major | Fill in the given incomplete matrix using k-nearest neighbor imputation.
This version is a simpler algorithm meant primarily for testing but
surprisingly it's faster for many (but not all) dataset sizes, particularly
when most of the columns are missing in any given row. The crucial
bottleneck is the call to numpy.argpartition for every missing element
in the array.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
Returns a row-major copy of X with imputed values. |
19,602 | def IsEquivalent(self, other):
if self.name and other.name:
return self.name == other.name
if self.name:
self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get(
self.name, self._DEFAULT_FAMILY_AND_VERSION)
return (
self_family == other.family and
self_version_tuple == other.version_tuple)
if self.family and self.version:
if other.name:
other_family, other_version_tuple = (
self._FAMILY_AND_VERSION_PER_NAME.get(
other.name, self._DEFAULT_FAMILY_AND_VERSION))
else:
other_family = other.family
other_version_tuple = other.version_tuple
return (
self.family == other_family and
self.version_tuple == other_version_tuple)
if self.family:
if other.name:
other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get(
other.name, self._DEFAULT_FAMILY_AND_VERSION)
else:
other_family = other.family
return self.family == other_family
return False | Determines if 2 operating system artifacts are equivalent.
This function compares the operating systems based in order of:
* name derived from product
* family and version
* family
Args:
other (OperatingSystemArtifact): operating system artifact attribute
container to compare with.
Returns:
bool: True if the operating systems are considered equivalent, False if
the most specific criteria do no match, or no criteria are available. |
19,603 | def _get_group_dataframes(self):
if isinstance(self.data, GroupedDataFrame):
grouper = self.data.groupby()
return (gdf for _, gdf in grouper if not gdf.empty)
else:
return (self.data, ) | Get group dataframes
Returns
-------
out : tuple or generator
Group dataframes |
19,604 | def matches():
wvw_matches = get_cached("wvw/matches.json", False).get("wvw_matches")
for match in wvw_matches:
match["start_time"] = parse_datetime(match["start_time"])
match["end_time"] = parse_datetime(match["end_time"])
return wvw_matches | This resource returns a list of the currently running WvW matches, with
the participating worlds included in the result. Further details about a
match can be requested using the ``match_details`` function.
The response is a list of match objects, each of which contains the
following properties:
wvw_match_id (string):
The WvW match id.
red_world_id (number):
The world id of the red world.
blue_world_id (number):
The world id of the blue world.
green_world_id (number):
The world id of the green world.
start_time (datetime):
A timestamp of when the match started.
end_time (datetime):
A timestamp of when the match ends. |
19,605 | def create(self, container, instances=None, map_name=None, **kwargs):
return self.run_actions(, container, instances=instances, map_name=map_name, **kwargs) | Creates container instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance name to create. If not specified, will create all instances as specified in the
configuration (or just one default instance).
:type instances: tuple | list
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container creation.
:return: Return values of created containers.
:rtype: list[dockermap.map.runner.ActionOutput] |
19,606 | def images_grouped_by_type(self):
type = -1
images = []
for wc in self:
if wc.type != type:
if images:
yield (type, images)
role = wc.role
creators = []
images.append(wc.image)
if images:
yield (type, images) | :return: A generator yielding 2-tuples of (type, [creators]) where
adjacent creators who share the same role are grouped together. |
19,607 | def process(self):
for tag_proc in self.tag_procs:
before_count = self.entry_count
self.run_tag_processor(tag_proc)
after_count = self.entry_count
if self.verbose:
print( % (
after_count - before_count, tag_proc), file=sys.stderr)
if self.verbose:
print( % self.entry_count,
file=sys.stderr) | Run all tag processors. |
19,608 | def replace_first_key_in_makefile(buf, key, replacement, outfile=None):
bufkeyreplacementoutfile
regexp = re.compile(r\S+.format(key), re.VERBOSE)
matches = regexp.findall(buf)
if matches is None:
msg = "Could not find key = {0} in the provided buffer. "\
"Pattern used = {1}".format(key, regexp.pattern)
raise ValueError(msg)
newbuf = regexp.sub(replacement, buf, count=1)
if outfile is not None:
write_text_file(outfile, newbuf)
return newbuf | Replaces first line in 'buf' matching 'key' with 'replacement'.
Optionally, writes out this new buffer into 'outfile'.
Returns: Buffer after replacement has been done |
19,609 | def otp(password, seed, sequence):
if len(password) not in list(range(4, 64)):
raise ValueError()
if len(seed) not in list(range(1, 17)):
raise ValueError()
for x in seed:
if not x in _VALIDSEEDCHARACTERS:
raise ValueError()
if sequence < 0:
raise ValueError()
seed = seed.encode()
password = password.encode()
thehash = MD4.new(seed + password).digest()
thehash = _fold_md4_or_md5(thehash)
for i in range(0, sequence):
thehash = _fold_md4_or_md5(MD4.new(thehash).digest())
return _sixword_from_raw(thehash) | Calculates a one-time password hash using the given password, seed, and
sequence number and returns it.
Uses the MD4/sixword algorithm as supported by TACACS+ servers.
:type password: str
:param password: A password.
:type seed: str
:param seed: A cryptographic seed.
:type sequence: int
:param sequence: A sequence number.
:rtype: string
:return: A hash. |
19,610 | def not_unless(*desired_flags):
def _decorator(func):
action_id = _action_id(func)
short_action_id = _short_action_id(func)
@wraps(func)
def _wrapped(*args, **kwargs):
active_flags = get_flags()
missing_flags = [flag for flag in desired_flags if flag not in active_flags]
if missing_flags:
hookenv.log( % (
short_action_id,
if len(missing_flags) > 1 else ,
.join(missing_flags)), hookenv.WARNING)
return func(*args, **kwargs)
_wrapped._action_id = action_id
_wrapped._short_action_id = short_action_id
return _wrapped
return _decorator | Assert that the decorated function can only be called if the desired_flags
are active.
Note that, unlike :func:`when`, this does **not** trigger the decorated
function if the flags match. It **only** raises an exception if the
function is called when the flags do not match.
This is primarily for informational purposes and as a guard clause. |
19,611 | def _convert(self, desired_type: Type[T], obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
for converter in self._converters_list[:-1]:
obj = converter.convert(converter.to_type, obj, logger, options)
return self._converters_list[-1].convert(desired_type, obj, logger, options) | Apply the converters of the chain in order to produce the desired result. Only the last converter will see the
'desired type', the others will be asked to produce their declared to_type.
:param desired_type:
:param obj:
:param logger:
:param options:
:return: |
19,612 | def default_setup():
service = LXCService
lxc_types = dict(LXC=LXC, LXCWithOverlays=LXCWithOverlays,
__default__=UnmanagedLXC)
loader = LXCLoader(lxc_types, service)
manager = LXCManager(loader, service)
return LXCAPI(manager=manager, service=service) | The default API setup for lxc4u
This is the API that you access globally from lxc4u. |
19,613 | def itemcounts(self, **kwargs):
return {k: len(v) for k, v in self._dict.items()} | Returns a dict where the keys are the keys of the container.
The values are the *lengths* of the value sequences stored
in this container. |
19,614 | def handle_message(self, msg):
self.messages.append(
{
"type": msg.category,
"module": msg.module,
"obj": msg.obj,
"line": msg.line,
"column": msg.column,
"path": msg.path,
"symbol": msg.symbol,
"message": html.escape(msg.msg or "", quote=False),
"message-id": msg.msg_id,
}
) | Manage message of different type and in the context of path. |
19,615 | def checker(location, receiver):
path = filepath.FilePath(location)
files = set()
filesContents = {}
def _check(path):
currentFiles = set(fname for fname in os.listdir(location)
if not fname.endswith())
removed = files - currentFiles
added = currentFiles - files
for fname in added:
contents = path.child(fname).getContent()
filesContents[fname] = contents
receiver.add(fname, contents)
for fname in removed:
receiver.remove(fname)
same = currentFiles & files
for fname in same:
newContents = path.child(fname).getContent()
oldContents = filesContents[fname]
if newContents == oldContents:
continue
receiver.remove(fname)
filesContents[fname] = newContents
receiver.add(fname, newContents)
files.clear()
files.update(currentFiles)
return functools.partial(_check, path) | Construct a function that checks a directory for process configuration
The function checks for additions or removals
of JSON process configuration files and calls the appropriate receiver
methods.
:param location: string, the directory to monitor
:param receiver: IEventReceiver
:returns: a function with no parameters |
19,616 | def _mainthread_poll_readable(self):
events = self._recv_selector.select(self.block_time)
for key, mask in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable,
args=(key.fileobj,)) | Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable |
19,617 | def monitor(args):
r = fapi.list_submissions(args.project, args.workspace)
fapi._check_response_code(r, 200)
statuses = sorted(r.json(), key=lambda k: k[], reverse=True)
header = .join(list(statuses[0].keys()))
expander = lambda v: .format(v)
def expander(thing):
if isinstance(thing, dict):
entityType = thing.get("entityType", None)
if entityType:
return "{0}:{1}".format(entityType, thing[])
return "{0}".format(thing)
return [header] + [.join( map(expander, v.values())) for v in statuses] | Retrieve status of jobs submitted from a given workspace, as a list
of TSV lines sorted by descending order of job submission date |
19,618 | def unicode2auto(unicode_text, encode_text):
_all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes()
unique_chars = _get_unique_ch(encode_text, _all_common_encodes_)
clen = len(_all_common_encodes_)
msg = "Sorry, couldnNeed more words to find unique encode out side of %d common compound characters' | This function will convert unicode (first argument) text into other
encodes by auto find the encode (from available encodes) by using sample
encode text in second argument of this function.
unicode_text : Pass unicode string which has to convert into other encode.
encode_text : Pass sample encode string to identify suitable encode for it.
This function tries to identify encode in available encodings.
If it finds, then it will convert unicode_text into encode string.
Author : Arulalan.T
08.08.2014 |
19,619 | def _append_utc_datetime(self, tag, format, ts, precision, header):
if ts is None:
t = datetime.datetime.utcnow()
elif type(ts) is float:
t = datetime.datetime.utcfromtimestamp(ts)
else:
t = ts
s = t.strftime(format)
if precision == 3:
s += ".%03d" % (t.microsecond / 1000)
elif precision == 6:
s += ".%06d" % t.microsecond
elif precision != 0:
raise ValueError("Precision should be one of 0, 3 or 6 digits")
return self.append_pair(tag, s, header=header) | (Internal) Append formatted datetime. |
19,620 | def savecsv(filename, datadict, mode="w"):
if mode == "a" :
header = False
else:
header = True
with open(filename, mode) as f:
_pd.DataFrame(datadict).to_csv(f, index=False, header=header) | Save a dictionary of data to CSV. |
19,621 | def put(self, resource_id):
resource = self.__model__.query.get(resource_id)
if resource:
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
resource.update(request.json)
db.session().merge(resource)
db.session().commit()
return jsonify(resource)
resource = self.__model__(**request.json)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().add(resource)
db.session().commit()
return self._created_response(resource) | Return the JSON representation of a new resource created or updated
through an HTTP PUT call.
If resource_id is not provided, it is assumed the primary key field is
included and a totally new resource is created. Otherwise, the existing
resource referred to by *resource_id* is updated with the provided JSON
data. This method is idempotent.
:returns: ``HTTP 201`` if a new resource is created
:returns: ``HTTP 200`` if a resource is updated
:returns: ``HTTP 400`` if the request is malformed or missing data |
19,622 | def setup_menu_actions(self):
self.recent_notebook_menu.clear()
self.recent_notebooks_actions = []
if self.recent_notebooks:
for notebook in self.recent_notebooks:
name = notebook
action = \
create_action(self,
name,
icon=ima.icon(),
triggered=lambda v,
path=notebook:
self.create_new_client(filename=path))
self.recent_notebooks_actions.append(action)
self.recent_notebooks_actions += \
[None, self.clear_recent_notebooks_action]
else:
self.recent_notebooks_actions = \
[self.clear_recent_notebooks_action]
add_actions(self.recent_notebook_menu, self.recent_notebooks_actions)
self.update_notebook_actions() | Setup and update the menu actions. |
19,623 | def _MergeDifferentId(self):
for a in self._GetIter(self.feed_merger.a_schedule):
for b in self._GetIter(self.feed_merger.b_schedule):
try:
self._Add(a, b, self._MergeEntities(a, b))
self._num_merged += 1
except MergeError:
continue
for a in self._GetIter(self.feed_merger.a_schedule):
if a not in self.feed_merger.a_merge_map:
self._num_not_merged_a += 1
newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))
self._Add(a, None,
self._Migrate(a, self.feed_merger.a_schedule, newid))
for b in self._GetIter(self.feed_merger.b_schedule):
if b not in self.feed_merger.b_merge_map:
self._num_not_merged_b += 1
newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))
self._Add(None, b,
self._Migrate(b, self.feed_merger.b_schedule, newid))
return self._num_merged | Tries to merge all possible combinations of entities.
This tries to merge every entity in the old schedule with every entity in
the new schedule. Unlike _MergeSameId, the ids do not need to match.
However, _MergeDifferentId is much slower than _MergeSameId.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities. |
19,624 | def cudnnSetTensor4dDescriptor(tensorDesc, format, dataType, n, c, h, w):
status = _libcudnn.cudnnSetTensor4dDescriptor(tensorDesc, format, dataType,
n, c, h, w)
cudnnCheckStatus(status) | Initialize a previously created Tensor 4D object.
This function initializes a previously created Tensor4D descriptor object. The strides of
the four dimensions are inferred from the format parameter and set in such a way that
the data is contiguous in memory with no padding between dimensions.
Parameters
----------
tensorDesc : cudnnTensorDescriptor
Handle to a previously created tensor descriptor.
format : cudnnTensorFormat
Type of format.
dataType : cudnnDataType
Data type.
n : int
Number of images.
c : int
Number of feature maps per image.
h : int
Height of each feature map.
w : int
Width of each feature map. |
19,625 | def _group_matching(tlist, cls):
opens = []
tidx_offset = 0
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset
if token.is_whitespace:
continue
if token.is_group and not isinstance(token, cls):
_group_matching(token, cls)
continue
if token.match(*cls.M_OPEN):
opens.append(tidx)
elif token.match(*cls.M_CLOSE):
try:
open_idx = opens.pop()
except IndexError:
continue
close_idx = tidx
tlist.group_tokens(cls, open_idx, close_idx)
tidx_offset += close_idx - open_idx | Groups Tokens that have beginning and end. |
19,626 | def all_address_target_pairs(cls, address_families):
addr_tgt_pairs = []
for af in address_families:
addr_tgt_pairs.extend(af.addressables.items())
return addr_tgt_pairs | Implementation of `address_target_pairs_from_address_families()` which does no filtering. |
19,627 | def load_json_file(filepath):
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
deserialized_object = None
if filepath.endswith():
logger.debug( + str(filepath) + )
fileobject = six.StringIO(gzip.open(filepath).read().decode())
else:
logger.debug( + str(filepath) + )
fileobject = open(filepath)
try:
deserialized_object = json.load(fileobject)
except (ValueError, TypeError) as e:
raise securesystemslib.exceptions.Error(
+ repr(filepath))
else:
fileobject.close()
return deserialized_object
finally:
fileobject.close() | <Purpose>
Deserialize a JSON object from a file containing the object.
<Arguments>
filepath:
Absolute path of JSON file.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to
a Python object.
IOError in case of runtime IO exceptions.
<Side Effects>
None.
<Return>
Deserialized object. For example, a dictionary. |
19,628 | def _create_ring(self, nodes):
_weight_sum = 0
for node_conf in self._nodes.values():
_weight_sum += node_conf[]
self._weight_sum = _weight_sum
_distribution = Counter()
_keys = []
_ring = {}
for node_name, node_conf in self._nodes.items():
for h in self._hashi_weight_generator(node_name, node_conf):
_ring[h] = node_name
insort(_keys, h)
_distribution[node_name] += 1
self._distribution = _distribution
self._keys = _keys
self._ring = _ring | Generate a ketama compatible continuum/ring. |
19,629 | def _cmdline(argv=None):
parser = ArgumentParser()
parser.add_argument("--checkout", default="HEAD",
help="branch, tag, or commit to use [HEAD]")
parser.add_argument("--name", default=_NAME,
help="application name [{:s}]".format(_NAME))
parser.add_argument("--repo", default=_REPO,
help="source repo [{:s}]".format(_REPO))
parser.add_argument("--test", action="store_true",
help="run test suite after installation")
parser.add_argument("root", help="installation root")
return parser.parse_args(argv) | Parse command line arguments.
By default, sys.argv is parsed. |
19,630 | def get_fpath(self, cachedir=None, cfgstr=None, ext=None):
_dpath = self.get_cachedir(cachedir)
_fname = self.get_prefix()
_cfgstr = self.get_cfgstr() if cfgstr is None else cfgstr
_ext = self.ext if ext is None else ext
fpath = _args2_fpath(_dpath, _fname, _cfgstr, _ext)
return fpath | Ignore:
fname = _fname
cfgstr = _cfgstr |
19,631 | async def build_get_revoc_reg_request(submitter_did: Optional[str],
revoc_reg_def_id: str,
timestamp: int) -> str:
logger = logging.getLogger(__name__)
logger.debug("build_get_revoc_reg_request: >>> submitter_did: %r, revoc_reg_def_id: %r, timestamp: %r",
submitter_did, revoc_reg_def_id, timestamp)
if not hasattr(build_get_revoc_reg_request, "cb"):
logger.debug("build_get_revoc_reg_request: Creating callback")
build_get_revoc_reg_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_submitter_did = c_char_p(submitter_did.encode()) if submitter_did is not None else None
c_revoc_reg_def_id = c_char_p(revoc_reg_def_id.encode())
c_timestamp = c_int64(timestamp)
request_json = await do_call(,
c_submitter_did,
c_revoc_reg_def_id,
c_timestamp,
build_get_revoc_reg_request.cb)
res = request_json.decode()
logger.debug("build_get_revoc_reg_request: <<< res: %r", res)
return res | Builds a GET_REVOC_REG request. Request to get the accumulated state of the Revocation Registry
by ID. The state is defined by the given timestamp.
:param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
:param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger.
:param timestamp: Requested time represented as a total number of seconds from Unix Epoch
:return: Request result as json. |
19,632 | def get_device_name_list():
dev_names = ctypes.create_string_buffer(1024)
pydaq.DAQmxGetSysDevNames(dev_names, len(dev_names))
return dev_names.value.split() | Returns a list of device names installed. |
19,633 | def register_schemas_dir(self, directory):
for root, dirs, files in os.walk(directory):
dir_path = os.path.relpath(root, directory)
if dir_path == :
dir_path =
for file_ in files:
if file_.lower().endswith(()):
schema_name = os.path.join(dir_path, file_)
if schema_name in self.schemas:
raise JSONSchemaDuplicate(
schema_name,
self.schemas[schema_name],
directory
)
self.schemas[schema_name] = os.path.abspath(directory) | Recursively register all json-schemas in a directory.
:param directory: directory path. |
19,634 | def copy(self, source, destination, recursive=False, use_sudo=False):
func = use_sudo and run_as_root or self.run
options = if recursive else
func(.format(options, quote(source), quote(destination))) | Copy a file or directory |
19,635 | def load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ):
s mrf format to
syntactic analyzers analysis could
be mapped to multiple syntactic analyzer
rules = {}
in_f = codecs.open(rulesFile, mode=, encoding=)
for line in in_f:
line = line.rstrip()
if line.startswith():
continue
parts = line.split()
if len(parts) < 4:
raise Exception(, line)
if parts[1] not in rules:
rules[parts[1]] = []
rules[parts[1]].append( parts[3] )
in_f.close()
return rules | Loads rules that can be used to convert from Filosoft's mrf format to
syntactic analyzer's format. Returns a dict containing rules.
Expects that each line in the input file contains a single rule, and that
different parts of the rule separated by @ symbols, e.g.
1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk-
32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk-
313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus
Only 2nd element and 4th element are extracted from each line; 2nd element
will be the key of the dict entry, and 4th element will be added to the
value of the dict entry (the value is a list of strings);
A list is used for storing values because one Filosoft's analysis could
be mapped to multiple syntactic analyzer's analyses;
Lines that have ¤ in the beginning of the line will be skipped; |
19,636 | def pip_install_package(source_name, pip_version=None, python_version=None,
mode=InstallMode.min_deps, release=False):
installed_variants = []
skipped_variants = []
pip_exe, context = find_pip(pip_version, python_version)
packages_path = (config.release_packages_path if release
else config.local_packages_path)
tmpdir = mkdtemp(suffix="-rez", prefix="pip-")
stagingdir = os.path.join(tmpdir, "rez_staging")
stagingsep = "".join([os.path.sep, "rez_staging", os.path.sep])
destpath = os.path.join(stagingdir, "python")
binpath = os.path.join(stagingdir, "bin")
incpath = os.path.join(stagingdir, "include")
datapath = stagingdir
if context and config.debug("package_release"):
buf = StringIO()
print >> buf, "\n\npackage download environment:"
context.print_info(buf)
_log(buf.getvalue())
cmd = [pip_exe, "install",
"--install-option=--install-lib=%s" % destpath,
"--install-option=--install-scripts=%s" % binpath,
"--install-option=--install-headers=%s" % incpath,
"--install-option=--install-data=%s" % datapath]
if mode == InstallMode.no_deps:
cmd.append("--no-deps")
cmd.append(source_name)
_cmd(context=context, command=cmd)
_system = System()
distribution_path = DistributionPath([destpath], include_egg=True)
distributions = [d for d in distribution_path.get_distributions()]
for distribution in distribution_path.get_distributions():
requirements = []
if distribution.metadata.run_requires:
for requirement in distribution.metadata.run_requires:
if "environment" in requirement:
if interpret(requirement["environment"]):
requirements.extend(_get_dependencies(requirement, distributions))
elif "extra" in requirement:
pass
else:
requirements.extend(_get_dependencies(requirement, distributions))
tools = []
src_dst_lut = {}
for installed_file in distribution.list_installed_files(allow_fail=True):
source_file = os.path.normpath(os.path.join(destpath, installed_file[0]))
if os.path.exists(source_file):
destination_file = installed_file[0].split(stagingsep)[1]
exe = False
if is_exe(source_file) and \
destination_file.startswith("%s%s" % ("bin", os.path.sep)):
_, _file = os.path.split(destination_file)
tools.append(_file)
exe = True
data = [destination_file, exe]
src_dst_lut[source_file] = data
else:
_log("Source file does not exist: " + source_file + "!")
def make_root(variant, path):
for source_file, data in src_dst_lut.items():
destination_file, exe = data
destination_file = os.path.normpath(os.path.join(path, destination_file))
if not os.path.exists(os.path.dirname(destination_file)):
os.makedirs(os.path.dirname(destination_file))
shutil.copyfile(source_file, destination_file)
if exe:
shutil.copystat(source_file, destination_file)
variant_reqs = []
variant_reqs.append("platform-%s" % _system.platform)
variant_reqs.append("arch-%s" % _system.arch)
variant_reqs.append("os-%s" % _system.os)
if context is None:
py_ver = .join(map(str, sys.version_info[:2]))
else:
python_variant = context.get_resolved_package("python")
py_ver = python_variant.version.trim(2)
variant_reqs.append("python-%s" % py_ver)
name, _ = parse_name_and_version(distribution.name_and_version)
name = distribution.name[0:len(name)].replace("-", "_")
with make_package(name, packages_path, make_root=make_root) as pkg:
pkg.version = distribution.version
if distribution.metadata.summary:
pkg.description = distribution.metadata.summary
pkg.variants = [variant_reqs]
if requirements:
pkg.requires = requirements
commands = []
commands.append("env.PYTHONPATH.append()")
if tools:
pkg.tools = tools
commands.append("env.PATH.append()")
pkg.commands = .join(commands)
installed_variants.extend(pkg.installed_variants or [])
skipped_variants.extend(pkg.skipped_variants or [])
shutil.rmtree(tmpdir)
return installed_variants, skipped_variants | Install a pip-compatible python package as a rez package.
Args:
source_name (str): Name of package or archive/url containing the pip
package source. This is the same as the arg you would pass to
the 'pip install' command.
pip_version (str or `Version`): Version of pip to use to perform the
install, uses latest if None.
python_version (str or `Version`): Python version to use to perform the
install, and subsequently have the resulting rez package depend on.
mode (`InstallMode`): Installation mode, determines how dependencies are
managed.
release (bool): If True, install as a released package; otherwise, it
will be installed as a local package.
Returns:
2-tuple:
List of `Variant`: Installed variants;
List of `Variant`: Skipped variants (already installed). |
19,637 | def extents(triangles, areas=None):
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError()
if areas is None:
areas = area(triangles=triangles,
sum=False)
a = triangles[:, 1] - triangles[:, 0]
b = triangles[:, 2] - triangles[:, 0]
length_a = (a**2).sum(axis=1)**.5
length_b = (b**2).sum(axis=1)**.5
nonzero_a = length_a > tol.merge
nonzero_b = length_b > tol.merge
box = np.zeros((len(triangles), 2), dtype=np.float64)
box[:, 0][nonzero_a] = (areas[nonzero_a] * 2) / length_a[nonzero_a]
box[:, 1][nonzero_b] = (areas[nonzero_b] * 2) / length_b[nonzero_b]
return box | Return the 2D bounding box size of each triangle.
Parameters
----------
triangles : (n, 3, 3) float
Triangles in space
areas : (n,) float
Optional area of input triangles
Returns
----------
box : (n, 2) float
The size of each triangle's 2D oriented bounding box |
19,638 | def pset(self, n):
s = set()
for ns in n.nsprefixes.items():
if self.permit(ns):
s.add(ns[1])
return s | Convert the nodes nsprefixes into a set.
@param n: A node.
@type n: L{Element}
@return: A set of namespaces.
@rtype: set |
19,639 | def run_blast_commands(ncbicommandline_method, **keywords):
blast_out_tmp = tempfile.NamedTemporaryFile(mode="w+",delete=False)
keywords[] = blast_out_tmp.name
query_file_object_tmp = keywords[]
keywords[] = query_file_object_tmp.name
stderr =
error_string =
try:
blastplusx_cline = ncbicommandline_method(**keywords)
stdout, stderr = blastplusx_cline()
except ApplicationError as e:
error_string = "Runtime error: " + stderr + "\n" + e.cmd
os.unlink(query_file_object_tmp.name)
return blast_out_tmp, error_string | Runs blastplus/tblastn search, collects result and pass as a xml temporary file. |
19,640 | def get_resolve_diff(self, other):
if self.package_paths != other.package_paths:
from difflib import ndiff
diff = ndiff(self.package_paths, other.package_paths)
raise ResolvedContextError("Cannot diff resolves, package search "
"paths differ:\n%s" % .join(diff))
d = {}
self_pkgs_ = set(x.parent for x in self._resolved_packages)
other_pkgs_ = set(x.parent for x in other._resolved_packages)
self_pkgs = self_pkgs_ - other_pkgs_
other_pkgs = other_pkgs_ - self_pkgs_
if not (self_pkgs or other_pkgs):
return d
self_fams = dict((x.name, x) for x in self_pkgs)
other_fams = dict((x.name, x) for x in other_pkgs)
newer_packages = {}
older_packages = {}
added_packages = set()
removed_packages = set()
for pkg in self_pkgs:
if pkg.name not in other_fams:
removed_packages.add(pkg)
else:
other_pkg = other_fams[pkg.name]
if other_pkg.version > pkg.version:
r = VersionRange.as_span(lower_version=pkg.version,
upper_version=other_pkg.version)
it = iter_packages(pkg.name, range_=r)
pkgs = sorted(it, key=lambda x: x.version)
newer_packages[pkg.name] = pkgs
elif other_pkg.version < pkg.version:
r = VersionRange.as_span(lower_version=other_pkg.version,
upper_version=pkg.version)
it = iter_packages(pkg.name, range_=r)
pkgs = sorted(it, key=lambda x: x.version, reverse=True)
older_packages[pkg.name] = pkgs
for pkg in other_pkgs:
if pkg.name not in self_fams:
added_packages.add(pkg)
if newer_packages:
d["newer_packages"] = newer_packages
if older_packages:
d["older_packages"] = older_packages
if added_packages:
d["added_packages"] = added_packages
if removed_packages:
d["removed_packages"] = removed_packages
return d | Get the difference between the resolve in this context and another.
The difference is described from the point of view of the current context
- a newer package means that the package in `other` is newer than the
package in `self`.
Diffs can only be compared if their package search paths match, an error
is raised otherwise.
The diff is expressed in packages, not variants - the specific variant
of a package is ignored.
Returns:
A dict containing:
- 'newer_packages': A dict containing items:
- package name (str);
- List of `Package` objects. These are the packages up to and
including the newer package in `self`, in ascending order.
- 'older_packages': A dict containing:
- package name (str);
- List of `Package` objects. These are the packages down to and
including the older package in `self`, in descending order.
- 'added_packages': Set of `Package` objects present in `self` but
not in `other`;
- 'removed_packages': Set of `Package` objects present in `other`,
but not in `self`.
If any item ('added_packages' etc) is empty, it is not added to the
resulting dict. Thus, an empty dict is returned if there is no
difference between contexts. |
19,641 | def register_xml_mapping(self, clsdict):
member_to_xml = self._get_or_add_member_to_xml(clsdict)
member_to_xml[self.value] = self.xml_value
xml_to_member = self._get_or_add_xml_to_member(clsdict)
xml_to_member[self.xml_value] = self.value | Add XML mappings to the enumeration class state for this member. |
19,642 | def random(self, length=22):
random_num = int(binascii.b2a_hex(os.urandom(length)), 16)
return self._num_to_string(random_num, pad_to_length=length)[:length] | Generate and return a cryptographically-secure short random string
of the specified length. |
19,643 | def initialize(self):
self._lb = [b[0] for b in self.bounds]
self._ub = [b[1] for b in self.bounds] | initialize in base class |
19,644 | def cross_lists(*sets):
wheels = [iter(_) for _ in sets]
digits = [next(it) for it in wheels]
while True:
yield digits[:]
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = next(wheels[i])
break
except StopIteration:
wheels[i] = iter(sets[i])
digits[i] = next(wheels[i])
else:
break | Return the cross product of the arguments |
19,645 | def read(self, vals):
i = 0
if len(vals[i]) == 0:
self.year = None
else:
self.year = vals[i]
i += 1
if len(vals[i]) == 0:
self.month = None
else:
self.month = vals[i]
i += 1
if len(vals[i]) == 0:
self.day = None
else:
self.day = vals[i]
i += 1
if len(vals[i]) == 0:
self.hour = None
else:
self.hour = vals[i]
i += 1
if len(vals[i]) == 0:
self.minute = None
else:
self.minute = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_source_and_uncertainty_flags = None
else:
self.data_source_and_uncertainty_flags = vals[i]
i += 1
if len(vals[i]) == 0:
self.dry_bulb_temperature = None
else:
self.dry_bulb_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.dew_point_temperature = None
else:
self.dew_point_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.relative_humidity = None
else:
self.relative_humidity = vals[i]
i += 1
if len(vals[i]) == 0:
self.atmospheric_station_pressure = None
else:
self.atmospheric_station_pressure = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_horizontal_radiation = None
else:
self.extraterrestrial_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_direct_normal_radiation = None
else:
self.extraterrestrial_direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.horizontal_infrared_radiation_intensity = None
else:
self.horizontal_infrared_radiation_intensity = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_radiation = None
else:
self.global_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_radiation = None
else:
self.direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_radiation = None
else:
self.diffuse_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_illuminance = None
else:
self.global_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_illuminance = None
else:
self.direct_normal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_illuminance = None
else:
self.diffuse_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.zenith_luminance = None
else:
self.zenith_luminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_direction = None
else:
self.wind_direction = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_speed = None
else:
self.wind_speed = vals[i]
i += 1
if len(vals[i]) == 0:
self.total_sky_cover = None
else:
self.total_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.opaque_sky_cover = None
else:
self.opaque_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.visibility = None
else:
self.visibility = vals[i]
i += 1
if len(vals[i]) == 0:
self.ceiling_height = None
else:
self.ceiling_height = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_observation = None
else:
self.present_weather_observation = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_codes = None
else:
self.present_weather_codes = vals[i]
i += 1
if len(vals[i]) == 0:
self.precipitable_water = None
else:
self.precipitable_water = vals[i]
i += 1
if len(vals[i]) == 0:
self.aerosol_optical_depth = None
else:
self.aerosol_optical_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.snow_depth = None
else:
self.snow_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.days_since_last_snowfall = None
else:
self.days_since_last_snowfall = vals[i]
i += 1
if len(vals[i]) == 0:
self.albedo = None
else:
self.albedo = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_depth = None
else:
self.liquid_precipitation_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_quantity = None
else:
self.liquid_precipitation_quantity = vals[i]
i += 1 | Read values.
Args:
vals (list): list of strings representing values |
19,646 | def resolve_parameter_refs(self, input):
return self._traverse(input, self.parameters, self._try_resolve_parameter_refs) | Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as
!GetAtt, !Sub or !Ref to non-parameters will be left untouched.
Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into
transform's output because it changes the template structure by inlining parameter values.
:param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions
:return: A copy of a dictionary with parameter references replaced by actual value. |
19,647 | def _setup_log_prefix(self, plugin_id=):
self._logger_console_fmtter.prefix = % plugin_id
self._logger_console_fmtter.plugin_id = plugin_id
self._logger_file_fmtter.prefix =
self._logger_file_fmtter.plugin_id = % plugin_id | Setup custom warning notification. |
19,648 | def fit_delta_ts(data, livetime, fit_background=True):
data = data / livetime
start = -(data.shape[1] - 1) / 2
end = -start + 1
xs = np.arange(start, end)
rates = []
sigmas = []
means = []
popts = []
pcovs = []
for combination in data:
mean0 = np.argmax(combination) + start
try:
if fit_background:
popt, pcov = optimize.curve_fit(
gaussian,
xs,
combination,
p0=[mean0, 4., 5., 0.1],
bounds=([start, 0, 0, 0], [end, 10, 10, 1])
)
else:
popt, pcov = optimize.curve_fit(
gaussian_wo_offset,
xs,
combination,
p0=[mean0, 4., 5.],
bounds=([start, 0, 0], [end, 10, 10])
)
except RuntimeError:
popt = (0, 0, 0, 0)
rates.append(popt[2])
means.append(popt[0])
sigmas.append(popt[1])
popts.append(popt)
pcovs.append(pcov)
return (
np.array(rates), np.array(means), np.array(sigmas), np.array(popts),
np.array(pcovs)
) | Fits gaussians to delta t for each PMT pair.
Parameters
----------
data: 2d np.array: x = PMT combinations (465), y = time, entry = frequency
livetime: length of data taking in seconds
fit_background: if True: fits gaussian with offset, else without offset
Returns
-------
numpy arrays with rates and means for all PMT combinations |
19,649 | def _save_account(self, account, username):
default_project_name = self._null_project
if account.default_project is not None:
default_project_name = account.default_project.pid
ds_user = self.get_user(username)
if account.date_deleted is None:
logger.debug("account is active")
if ds_user is None:
self._call([
"add", "user",
"accounts=%s" % default_project_name,
"defaultaccount=%s" % default_project_name,
"name=%s" % username])
else:
self._call([
"modify", "user",
"set", "defaultaccount=%s" % default_project_name,
"where", "name=%s" % username])
slurm_projects = self.get_projects_in_user(username)
slurm_projects = [project.lower() for project in slurm_projects]
slurm_projects = set(slurm_projects)
for project in account.person.projects.all():
if project.pid.lower() not in slurm_projects:
self._call([
"add", "user",
"name=%s" % username,
"accounts=%s" % project.pid])
else:
logger.debug("account is not active")
self._delete_account(username)
return | Called when account is created/updated. With username override. |
19,650 | def output(id, url):
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
output_dir_url = "%s/%s/files" % (floyd.floyd_web_host, experiment.name)
if url:
floyd_logger.info(output_dir_url)
else:
floyd_logger.info("Opening output path in your browser ...")
webbrowser.open(output_dir_url) | View the files from a job. |
19,651 | def fill_zeros(result, x, y, name, fill):
if fill is None or is_float_dtype(result):
return result
if name.startswith((, )):
x, y = y, x
is_variable_type = (hasattr(y, ) or hasattr(y, ))
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y):
if (y == 0).any():
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype(, copy=False).ravel()
np.putmask(result, mask, fill)
if np.isinf(fill):
signs = y if name.startswith((, )) else x
signs = np.sign(signs.astype(, copy=False))
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
if "floordiv" in name:
nan_mask = ((y == 0) & (x == 0)).ravel()
np.putmask(result, nan_mask, np.nan)
result = result.reshape(shape)
return result | If this is a reversed op, then flip x,y
If we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result.
Mask the nan's from x. |
19,652 | def set_itunes_subtitle(self):
try:
self.itunes_subtitle = self.soup.find().string
except AttributeError:
self.itunes_subtitle = None | Parses subtitle from itunes tags and sets value |
19,653 | def stisObsCount(input):
count = 0
toclose = False
if isinstance(input, str):
input = fits.open(input)
toclose = True
for ext in input:
if in ext.header:
if (ext.header[].upper() == ):
count += 1
if toclose:
input.close()
return count | Input: A stis multiextension file
Output: Number of stis science extensions in input |
19,654 | def validate_email(email):
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email(email)
return True
except ValidationError:
return False | Validates an email address
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
email: str
Returns
-------
bool |
19,655 | def fix_journal_name(journal, knowledge_base):
if not journal:
return ,
if not knowledge_base:
return journal,
if len(journal) < 2:
return journal,
volume =
if (journal[-1] <= and journal[-1] >= ) \
and (journal[-2] == or journal[-2] == ):
volume += journal[-1]
journal = journal[:-1]
journal = journal.strip()
if journal.upper() in knowledge_base:
journal = knowledge_base[journal.upper()].strip()
elif journal in knowledge_base:
journal = knowledge_base[journal].strip()
elif in journal:
journalnodots = journal.replace(, )
journalnodots = journalnodots.replace(, ).strip().upper()
if journalnodots in knowledge_base:
journal = knowledge_base[journalnodots].strip()
journal = journal.replace(, )
return journal, volume | Convert journal name to Inspire's short form. |
19,656 | def guest_live_resize_cpus(self, userid, cpu_cnt):
action = "live resize guest to have virtual cpus" % (userid,
cpu_cnt)
LOG.info("Begin to %s" % action)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.live_resize_cpus(userid, cpu_cnt)
LOG.info("%s successfully." % action) | Live resize virtual cpus of guests.
:param userid: (str) the userid of the guest to be live resized
:param cpu_cnt: (int) The number of virtual cpus that the guest should
have in active state after live resize. The value should be an
integer between 1 and 64. |
19,657 | def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError()
if self._current_offset < 0:
raise IOError(
.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self._range_size
elif whence != os.SEEK_SET:
raise IOError()
if offset < 0:
raise IOError()
self._current_offset = offset | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. |
19,658 | def form_invalid(self, form, context=None, **kwargs):
if not context:
context = {}
context[] = form.errors
return super(ApiFormView, self).render_to_response(context=context,
status=400) | This will return the request with form errors as well as any
additional context. |
19,659 | def restore(self, key, ttl, value):
return self.execute(b, key, ttl, value) | Creates a key associated with a value that is obtained via DUMP. |
19,660 | def get_node_by_name(self, nodename):
nodes = dict((n.name, n) for n in self.get_all_nodes())
try:
return nodes[nodename]
except KeyError:
raise NodeNotFound("Node %s not found" % nodename) | Return the node corresponding with name `nodename`
:params nodename: Name of the node
:type nodename: str |
19,661 | def __split_nonleaf_node(self, node):
[farthest_node1, farthest_node2] = node.get_farthest_successors(self.__type_measurement);
new_node1 = non_leaf_node(farthest_node1.feature, node.parent, [ farthest_node1 ], None);
new_node2 = non_leaf_node(farthest_node2.feature, node.parent, [ farthest_node2 ], None);
farthest_node1.parent = new_node1;
farthest_node2.parent = new_node2;
for successor in node.successors:
if ( (successor is not farthest_node1) and (successor is not farthest_node2) ):
distance1 = new_node1.get_distance(successor, self.__type_measurement);
distance2 = new_node2.get_distance(successor, self.__type_measurement);
if (distance1 < distance2):
new_node1.insert_successor(successor);
else:
new_node2.insert_successor(successor);
return [new_node1, new_node2]; | !
@brief Performs splitting of the specified non-leaf node.
@param[in] node (non_leaf_node): Non-leaf node that should be splitted.
@return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2]. |
19,662 | def make_blastcmd_builder(
mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST"
):
if mode == "ANIb":
blastcmds = BLASTcmds(
BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline),
BLASTexes(
format_exe or pyani_config.MAKEBLASTDB_DEFAULT,
blast_exe or pyani_config.BLASTN_DEFAULT,
),
prefix,
outdir,
)
else:
blastcmds = BLASTcmds(
BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline),
BLASTexes(
format_exe or pyani_config.FORMATDB_DEFAULT,
blast_exe or pyani_config.BLASTALL_DEFAULT,
),
prefix,
outdir,
)
return blastcmds | Returns BLASTcmds object for construction of BLAST commands. |
19,663 | def annot_boxplot(ax,dmetrics,xoffwithin=0.85,xoff=1.6,
yoff=0,annotby=,
test=False):
xlabel=ax.get_xlabel()
ylabel=ax.get_ylabel()
if test:
dmetrics.index.name=
dmetrics.columns.name=
dm=dmetrics.melt()
dm[]=1
ax=sns.boxplot(data=dm,x=,y=)
for huei,hue in enumerate(dmetrics.index):
for xi,x in enumerate(dmetrics.columns):
if not pd.isnull(dmetrics.loc[hue,x]):
xco=xi+(huei*xoffwithin/len(dmetrics.index)+(xoff/len(dmetrics.index)))
yco=ax.get_ylim()[1]+yoff
if annotby==:
xco,yco=yco,xco
ax.text(xco,yco,dmetrics.loc[hue,x],ha=)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax | :param dmetrics: hue in index, x in columns
#todos
#x|y off in %
xmin,xmax=ax.get_xlim()
(xmax-xmin)+(xmax-xmin)*0.35+xmin |
19,664 | def gatherInput(**Config):
r
_type = Config.get()
while True:
try:
got = raw_input( % getLabel(Config))
except EOFError:
got = None
if not got and in Config:
return Config[]
try:
return _type(got) if _type else got
except ValueError as e:
err(str(e) or )
except TypeError:
err(str(e) or ) | r"""Helps to interactively get user input. |
19,665 | def preamble(self, lenient=False):
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if self.atchunk is None:
raise FormatError()
if self.atchunk[1] == b:
return
self.process_chunk(lenient=lenient) | Extract the image metadata by reading
the initial part of the PNG file up to
the start of the ``IDAT`` chunk.
All the chunks that precede the ``IDAT`` chunk are
read and either processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions. |
19,666 | def _handle_fetch_response(self, responses):
self.retry_delay = self.retry_init_delay
self._fetch_attempt_count = 1
if self._msg_block_d:
self._request_d = None
messages = []
try:
for resp in responses:
if resp.partition != self.partition:
log.warning(
"%r: Got response with partition: %r not our own: %r",
self, resp.partition, self.partition)
continue
for message in resp.messages:
if message.offset < self._fetch_offset:
log.debug(
,
message.offset, self._fetch_offset)
continue
messages.append(
SourcedMessage(
message=message.message,
offset=message.offset, topic=self.topic,
partition=self.partition))
self._fetch_offset = message.offset + 1
except ConsumerFetchSizeTooSmall:
factor = 2
if self.buffer_size <= 2**20:
factor = 16
if self.max_buffer_size is None:
self.buffer_size *= factor
elif (self.max_buffer_size is not None and
self.buffer_size < self.max_buffer_size):
self.buffer_size = min(
self.buffer_size * factor, self.max_buffer_size)
else:
log.error("Max fetch size %d too small", self.max_buffer_size)
failure = Failure(
ConsumerFetchSizeTooSmall(
"Max buffer size:%d too small for message",
self.max_buffer_size))
self._start_d.errback(failure)
return
log.debug(
"Next message larger than fetch size, increasing "
"to %d (~2x) and retrying", self.buffer_size)
finally:
if messages:
self._msg_block_d = Deferred()
self._process_messages(messages)
self._retry_fetch(0) | The callback handling the successful response from the fetch request
Delivers the message list to the processor, handles per-message errors
(ConsumerFetchSizeTooSmall), triggers another fetch request
If the processor is still processing the last batch of messages, we
defer this processing until it's done. Otherwise, we start another
fetch request and submit the messages to the processor |
19,667 | def resolve_ports(self, ports):
if not ports:
return None
hwaddr_to_nic = {}
hwaddr_to_ip = {}
for nic in list_nics():
if not is_phy_iface(nic):
continue
_nic = get_bond_master(nic)
if _nic:
log("Replacing iface with bond master " % (nic, _nic),
level=DEBUG)
nic = _nic
hwaddr = get_nic_hwaddr(nic)
hwaddr_to_nic[hwaddr] = nic
addresses = get_ipv4_addr(nic, fatal=False)
addresses += get_ipv6_addr(iface=nic, fatal=False)
hwaddr_to_ip[hwaddr] = addresses
resolved = []
mac_regex = re.compile(r, re.I)
for entry in ports:
if re.match(mac_regex, entry):
if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
resolved.append(hwaddr_to_nic[entry])
else:
resolved.append(entry)
return list(set(resolved)) | Resolve NICs not yet bound to bridge(s)
If hwaddress provided then returns resolved hwaddress otherwise NIC. |
19,668 | def decrypt(self):
if not self._crypter:
return b
try:
plaintext = self._crypter.decrypt(self._ciphertext, **self._decrypt_params)
return plaintext
except Exception as e:
exc_info = sys.exc_info()
six.reraise(
ValueError( % (self._ciphertext, e)),
None,
exc_info[2]
) | Decrypt decrypts the secret and returns the plaintext.
Calling decrypt() may incur side effects such as a call to a remote service for decryption. |
19,669 | def get_portchannel_info_by_intf_output_lacp_actor_max_deskew(self, **kwargs):
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
actor_max_deskew = ET.SubElement(lacp, "actor-max-deskew")
actor_max_deskew.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
19,670 | def run_console_rules(self, options_bootstrapper, goals, target_roots):
subject = target_roots.specs
console = Console()
for goal in goals:
goal_product = self.goal_map[goal]
params = Params(subject, options_bootstrapper, console)
logger.debug(.format(goal_product, goal))
try:
exit_code = self.scheduler_session.run_console_rule(goal_product, params)
finally:
console.flush()
if exit_code != PANTS_SUCCEEDED_EXIT_CODE:
return exit_code
return PANTS_SUCCEEDED_EXIT_CODE | Runs @console_rules sequentially and interactively by requesting their implicit Goal products.
For retryable failures, raises scheduler.ExecutionError.
:param list goals: The list of requested goal names as passed on the commandline.
:param TargetRoots target_roots: The targets root of the request.
:returns: An exit code. |
19,671 | def smsc(self, smscNumber):
if smscNumber != self._smscNumber:
if self.alive:
self.write(.format(smscNumber))
self._smscNumber = smscNumber | Set the default SMSC number to use when sending SMS messages |
19,672 | def write(self, out):
if not self.rawtagtable:
self.rawtagtable = self.rawtagdict.items()
tags = tagblock(self.rawtagtable)
self.writeHeader(out, 128 + len(tags))
out.write(tags)
out.flush()
return self | Write ICC Profile to the file. |
19,673 | def list_xattrs(self, path, **kwargs):
return simplejson.loads(_json(self._get(path, , **kwargs))[]) | Get all of the xattr names for a file or directory.
:rtype: list |
19,674 | def parse_logs(log_list, date, machine_name, log_type):
output = []
count = fail = skip = updated = 0
try:
machine = Machine.objects.get(name=machine_name)
except Machine.DoesNotExist:
return "ERROR: Couldnuserprojectjobidjobnamecpu_usagecoresact_wall_timeest_wall_timememvmemlist_pmemlist_memlist_pvmemctimeqtimeetimestartusert find user account - Assign to user None
output.append(
"line %d: Couldnuseruserprojectprojectt find specified project %s"
% (line_no, data[]))
fail += 1
continue
if machine.mem_per_core:
avail_mem_per_core = machine.mem_per_core * 1024
avail_mem_for_job = avail_mem_per_core * data[]
if data[] * data[] > data[]:
memory_used_per_core = data[]
memory_used_for_job = data[] * data[]
else:
memory_used_per_core = data[] / data[]
memory_used_for_job = data[]
if memory_used_for_job > avail_mem_for_job:
data[] = ceil(
memory_used_per_core / avail_mem_per_core
* data[]
* data[])
data[] = data[] * machine.scaling_factor
queue, created = Queue.objects.get_or_create(name=data[])
try:
cpujob, created = CPUJob.objects.get_or_create(jobid=data[])
cpujob.account = account
cpujob.username = data[]
cpujob.project = project
cpujob.machine = machine
cpujob.date = date
cpujob.queue = queue
cpujob.cpu_usage = data[]
cpujob.est_wall_time = data[]
cpujob.act_wall_time = data[]
cpujob.mem = data[]
cpujob.vmem = data[]
cpujob.ctime = data[]
cpujob.qtime = data[]
cpujob.etime = data[]
cpujob.start = data[]
cpujob.cores = data[]
cpujob.exit_status = data[]
cpujob.jobname = data[]
cpujob.list_mem = data[]
cpujob.list_vmem = data[]
cpujob.list_pmem = data[]
cpujob.list_pvmem = data[]
cpujob.save()
except Exception as e:
output.append(
"line %d: Failed to insert a line - %s" % (line_no, e))
fail += 1
continue
if created:
count += 1
else:
updated += 1
summary = (
% (count, updated, fail, skip)
)
logger.debug( % count)
logger.debug( % updated)
logger.debug( % fail)
logger.debug( % skip)
return summary, output | Parse log file lines in log_type format. |
19,675 | def vector(x, y=None, z=0.0):
if y is None:
return np.array(x, dtype=np.float64)
return np.array([x, y, z], dtype=np.float64) | Return a 3D numpy array representing a vector (of type `numpy.float64`).
If `y` is ``None``, assume input is already in the form `[x,y,z]`. |
19,676 | def parse(cls, url_path):
if not url_path:
return cls()
nodes = []
for node in url_path.rstrip().split():
if in node or in node:
m = PATH_NODE_RE.match(node)
if not m:
raise ValueError("Invalid path param: {}".format(node))
name, param_type, param_arg = m.groups()
try:
type_ = Type[param_type]
except KeyError:
if param_type is not None:
raise ValueError("Unknown param type `{}` in: {}".format(param_type, node))
type_ = Type.Integer
nodes.append(PathParam(name, type_, param_arg))
else:
nodes.append(node)
return cls(*nodes) | Parse a string into a URL path (simple eg does not support typing of URL parameters) |
19,677 | def get_agent_requirement_line(check, version):
package_name = get_package_name(check)
if check in (, ):
return .format(package_name, version)
m = load_manifest(check)
platforms = sorted(m.get(, []))
if platforms == ALL_PLATFORMS:
return .format(package_name, version)
elif len(platforms) == 1:
return "{}=={}; sys_platform == ".format(package_name, version, PLATFORMS_TO_PY.get(platforms[0]))
elif platforms:
if not in platforms:
return "{}=={}; sys_platform != ".format(package_name, version)
elif not in platforms:
return "{}=={}; sys_platform != ".format(package_name, version)
elif not in platforms:
return "{}=={}; sys_platform != ".format(package_name, version)
raise ManifestError("Can't parse the `supported_os` list for the check {}: {}".format(check, platforms)) | Compose a text line to be used in a requirements.txt file to install a check
pinned to a specific version. |
19,678 | def start_list(self):
self._ordered = False
self.start_container(List)
self.set_next_paragraph_style(
if self._item_level <= 0
else ) | Start a list. |
19,679 | def call_env_doctree_read(cls, kb_app, sphinx_app: Sphinx,
doctree: doctree):
for callback in EventAction.get_callbacks(kb_app,
SphinxEvent.DREAD):
callback(kb_app, sphinx_app, doctree) | On doctree-read, do callbacks |
19,680 | def get_comparable_values(self):
return (str(self.name), str(self.description), str(self.type), bool(self.optional), str(self.constraints) if isinstance(self, Constraintable) else "") | Return a tupple of values representing the unicity of the object |
19,681 | def get_thumbnail_of_file(image_name, width):
hdr = {: }
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
logging.debug("Retrieving %s", url)
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
message = e.fp.read()
raise get_exception_based_on_api_message(message, image_name) | Return the file contents of the thumbnail of the given file. |
19,682 | def _handle_actionpush(self, length):
init_pos = self._src.tell()
while self._src.tell() < init_pos + length:
obj = _make_object("ActionPush")
obj.Type = unpack_ui8(self._src)
push_types = {
0: ("String", self._get_struct_string),
1: ("Float", lambda: unpack_float(self._src)),
2: ("Null", lambda: None),
4: ("RegisterNumber", lambda: unpack_ui8(self._src)),
5: ("Boolean", lambda: unpack_ui8(self._src)),
6: ("Double", lambda: unpack_double(self._src)),
7: ("Integer", lambda: unpack_ui32(self._src)),
8: ("Constant8", lambda: unpack_ui8(self._src)),
9: ("Constant16", lambda: unpack_ui16(self._src)),
}
name, func = push_types[obj.Type]
setattr(obj, name, func())
yield obj | Handle the ActionPush action. |
19,683 | def enable_root_user(self):
uri = "/instances/%s/root" % self.id
resp, body = self.manager.api.method_post(uri)
return body["user"]["password"] | Enables login from any host for the root user and provides
the user with a generated root password. |
19,684 | def package_version():
version_path = os.path.join(os.path.dirname(__file__), )
version = read_version(version_path)
write_version(version_path, version)
return version | Get the package version via Git Tag. |
19,685 | def add_bucket_key_data(self, bucket, key, data, bucket_type=None):
if self._input_mode == :
raise ValueError(t add an object.queryAlready added a query, can\)
else:
if isinstance(key, Iterable) and \
not isinstance(key, string_types):
if bucket_type is not None:
for k in key:
self._inputs.append([bucket, k, data, bucket_type])
else:
for k in key:
self._inputs.append([bucket, k, data])
else:
if bucket_type is not None:
self._inputs.append([bucket, key, data, bucket_type])
else:
self._inputs.append([bucket, key, data])
return self | Adds a bucket/key/keydata triple to the inputs.
:param bucket: the bucket
:type bucket: string
:param key: the key or list of keys
:type key: string
:param data: the key-specific data
:type data: string, list, dict, None
:param bucket_type: Optional name of a bucket type
:type bucket_type: string, None
:rtype: :class:`RiakMapReduce` |
19,686 | def restore_package_version_from_recycle_bin(self, package_version_details, feed_id, package_name, package_version):
route_values = {}
if feed_id is not None:
route_values[] = self._serialize.url(, feed_id, )
if package_name is not None:
route_values[] = self._serialize.url(, package_name, )
if package_version is not None:
route_values[] = self._serialize.url(, package_version, )
content = self._serialize.body(package_version_details, )
self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content) | RestorePackageVersionFromRecycleBin.
[Preview API] Restore a package version from the recycle bin to its associated feed.
:param :class:`<PyPiRecycleBinPackageVersionDetails> <azure.devops.v5_0.py_pi_api.models.PyPiRecycleBinPackageVersionDetails>` package_version_details: Set the 'Deleted' state to 'false' to restore the package to its feed.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package. |
19,687 | def create(tournament, name, **params):
params.update({"name": name})
return api.fetch_and_parse(
"POST",
"tournaments/%s/participants" % tournament,
"participant",
**params) | Add a participant to a tournament. |
19,688 | def make_step_rcont (transition):
if not np.isfinite (transition):
raise ValueError ( % transition)
def step_rcont (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = (x1 >= transition).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
step_rcont.__doc__ = (
) % (transition,)
return step_rcont | Return a ufunc-like step function that is right-continuous. Returns 1 if
x >= transition, 0 otherwise. |
19,689 | def boolean(cls, true_code, false_code=None):
def func(response):
if response is not None:
status_code = response.status
if status_code == true_code:
return True
if false_code is not None and status_code == false_code:
return False
raise error_for(response)
return func | Callback to validate a response code.
The returned callback checks whether a given response has a
``status_code`` that is considered good (``true_code``) and
raise an appropriate error if not.
The optional ``false_code`` allows for a non-successful status
code to return False instead of throwing an error. This is used,
for example in relationship mutation to indicate that the
relationship was not modified.
Args:
true_code(int): The http status code to consider as a success
Keyword Args:
false_code(int): The http status code to consider a failure
Returns:
A function that given a response returns ``True`` if the
response's status code matches the given code. Raises
a :class:`HeliumError` if the response code does not
match. |
19,690 | def delete_namespaced_daemon_set(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs)
return data | delete_namespaced_daemon_set # noqa: E501
delete a DaemonSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
19,691 | def set_maxdays(name, days):
*
minutes = days * 24 * 60
_set_account_policy(
name, .format(minutes))
return get_maxdays(name) == days | Set the maximum age of the password in days
:param str name: The username of the account
:param int days: The maximum age of the account in days
:return: True if successful, False if not
:rtype: bool
:raises: CommandExecutionError on user not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' shadow.set_maxdays admin 90 |
19,692 | def extract_assets(dstore, what):
qdict = parse(what)
dic = {}
dic1, dic2 = dstore[].__toh5__()
dic.update(dic1)
dic.update(dic2)
arr = dstore[].value
for tag, vals in qdict.items():
cond = numpy.zeros(len(arr), bool)
for val in vals:
tagidx, = numpy.where(dic[tag] == val)
cond |= arr[tag] == tagidx
arr = arr[cond]
return ArrayWrapper(arr, dic) | Extract an array of assets, optionally filtered by tag.
Use it as /extract/assets?taxonomy=RC&taxonomy=MSBC&occupancy=RES |
19,693 | def loop_misc(self):
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
now = time.time()
self._check_keepalive()
if self._last_retry_check+1 < now:
self._message_retry_check()
self._last_retry_check = now
if self._ping_t > 0 and now - self._ping_t >= self._keepalive:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._callback_mutex.acquire()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_CONN_LOST
return MQTT_ERR_SUCCESS | Process miscellaneous network events. Use in place of calling loop() if you
wish to call select() or equivalent on.
Do not use if you are using the threaded interface loop_start(). |
19,694 | def _find_short_paths(self, paths):
path_parts_s = [path.split(os.path.sep) for path in paths]
root_node = {}
node.clear()
short_path_s = set()
self._collect_leaf_paths(
node=root_node,
path_parts=(),
leaf_paths=short_path_s,
)
return short_path_s | Find short paths of given paths.
E.g. if both `/home` and `/home/aoik` exist, only keep `/home`.
:param paths:
Paths.
:return:
Set of short paths. |
19,695 | def changeLane(self, vehID, laneIndex, duration):
self._connection._beginMessage(
tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID, 1 + 4 + 1 + 1 + 1 + 4)
self._connection._string += struct.pack(
"!BiBBBi", tc.TYPE_COMPOUND, 2, tc.TYPE_BYTE, laneIndex, tc.TYPE_INTEGER, duration)
self._connection._sendExact() | changeLane(string, int, int) -> None
Forces a lane change to the lane with the given index; if successful,
the lane will be chosen for the given amount of time (in ms). |
19,696 | def element_css_attribute_should_be(self, locator, prop, expected):
self._info("Verifying element has css attribute with a value of " % (locator, prop, expected))
self._check_element_css_value(locator, prop, expected) | Verifies the element identified by `locator` has the expected
value for the targeted `prop`.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| prop | targeted css attribute | background-color |
| expected | expected value | rgba(0, 128, 0, 1) | |
19,697 | def set_logger(name=None, filename=None, mode=, level=,
fmt=
,
backup_count=5, limit=20480, when=None, with_filehandler=True):
level = level.split()
if len(level) == 1:
s_level = f_level = level[0]
else:
s_level = level[0]
f_level = level[1]
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
import_log_funcs() | Configure the global logger. |
19,698 | def createSimulate (netParams=None, simConfig=None, output=False):
from .. import sim
(pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True)
sim.simulate()
if output: return (pops, cells, conns, stims, simData) | Sequence of commands create, simulate and analyse network |
19,699 | def set_emissions(self, scenario):
for section in emissions:
for source in emissions[section]:
if source not in scenario.columns:
continue
self._set_timed_array(
section, source, list(scenario.index), list(scenario[source])
) | Set emissions from Pandas DataFrame. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.