Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
26,100 |
def add_config_path(path):
if not os.path.isfile(path):
warnings.warn("Config file does not exist: {path}".format(path=path))
return False
_base, ext = os.path.splitext(path)
if ext and ext[1:] in PARSERS:
parser = ext[1:]
else:
parser = PARSER
parser_class = PARSERS[parser]
_check_parser(parser_class, parser)
if parser != PARSER:
msg = (
"Config for {added} parser added, but used {used} parser. "
"Set up right parser via env var: "
"export LUIGI_CONFIG_PARSER={added}"
)
warnings.warn(msg.format(added=parser, used=PARSER))
parser_class.add_config_path(path)
return True
|
Select config parser by file extension and add path into parser.
|
26,101 |
def generate_look_up_table():
poly = 0xA001
table = []
for index in range(256):
data = index << 1
crc = 0
for _ in range(8, 0, -1):
data >>= 1
if (data ^ crc) & 0x0001:
crc = (crc >> 1) ^ poly
else:
crc >>= 1
table.append(crc)
return table
|
Generate look up table.
:return: List
|
26,102 |
def _get_envs_from_ref_paths(self, refs):
def _check_ref(env_set, rname):
if rname in self.saltenv_revmap:
env_set.update(self.saltenv_revmap[rname])
else:
if rname == self.base:
env_set.add()
elif not self.disable_saltenv_mapping:
env_set.add(rname)
use_branches = in self.ref_types
use_tags = in self.ref_types
ret = set()
if salt.utils.stringutils.is_hex(self.base):
ret.add()
for ref in salt.utils.data.decode(refs):
if ref.startswith():
ref = ref[5:]
rtype, rname = ref.split(, 1)
if rtype == and use_branches:
parted = rname.partition()
rname = parted[2] if parted[2] else parted[0]
_check_ref(ret, rname)
elif rtype == and use_tags:
_check_ref(ret, rname)
return ret
|
Return the names of remote refs (stripped of the remote name) and tags
which are map to the branches and tags.
|
26,103 |
def make_route_refresh_request(self, peer_ip, *route_families):
LOG.debug(,
peer_ip, route_families)
if not SUPPORTED_GLOBAL_RF.intersection(route_families):
raise ValueError( %
route_families)
peer_list = []
if peer_ip == :
peer_list.extend(self.get_peers_in_established())
else:
given_peer = self._peers.get(peer_ip)
if not given_peer:
raise ValueError( % peer_ip)
if not given_peer.in_established:
raise ValueError(
)
peer_list.append(given_peer)
for peer in peer_list:
peer.request_route_refresh(*route_families)
return True
|
Request route-refresh for peer with `peer_ip` for given
`route_families`.
Will make route-refresh request for a given `route_family` only if such
capability is supported and if peer is in ESTABLISHED state. Else, such
requests are ignored. Raises appropriate error in other cases. If
`peer_ip` is equal to 'all' makes refresh request to all valid peers.
|
26,104 |
def _parse_coords(coord_lines):
paras = {}
var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1).strip("=")] = float(m.group(2))
species = []
coords = []
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput._xyz_patt.match(l):
m = GaussianInput._xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r"[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput._zmat_patt.match(l):
zmode = True
toks = re.split(r"[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def _parse_species(sp_str):
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = [_parse_species(sp) for sp in species]
return Molecule(species, coords)
|
Helper method to parse coordinates.
|
26,105 |
def get_popular_aliases(self, *args, **kwargs):
aliases_count_total = defaultdict(int)
aliases_counts = self._timesheets_callback()(*args, **kwargs)
for aliases_count in aliases_counts:
for alias, count in aliases_count:
aliases_count_total[alias] += count
sorted_aliases_count_total = sorted(aliases_count_total.items(), key=lambda item: item[1], reverse=True)
return sorted_aliases_count_total
|
Return the aggregated results of :meth:`Timesheet.get_popular_aliases`.
|
26,106 |
def process(in_path, out_file, n_jobs, framesync):
if os.path.isfile(in_path):
file_struct = msaf.io.FileStruct(in_path)
file_struct.features_file = out_file
compute_all_features(file_struct, framesync)
else:
file_structs = msaf.io.get_dataset_files(in_path)
return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)(
file_struct, framesync) for file_struct in file_structs)
|
Computes the features for the selected dataset or file.
|
26,107 |
def _update_project(self, request, data):
domain_id = identity.get_domain_id_for_operation(request)
try:
project_id = data[]
if keystone.VERSIONS.active >= 3:
EXTRA_INFO = getattr(settings, , {})
kwargs = dict((key, data.get(key)) for key in EXTRA_INFO)
else:
kwargs = {}
return api.keystone.tenant_update(
request,
project_id,
name=data[],
description=data[],
enabled=data[],
domain=domain_id,
**kwargs)
except exceptions.Conflict:
msg = _() % data[]
self.failure_message = msg
return
except Exception as e:
LOG.debug(, e)
exceptions.handle(request, ignore=True)
return
|
Update project info
|
26,108 |
def readtxt(filepath):
with open(filepath, ) as f:
lines = f.readlines()
return .join(lines)
|
read file as is
|
26,109 |
def run_id(self):
s1 = re.sub(, r, self.__class__.__name__)
return re.sub(, r, s1).lower()
|
Run name without whitespace
|
26,110 |
def dicom_to_nifti(dicom_input, output_file=None):
assert common.is_siemens(dicom_input)
if _is_4d(dicom_input):
logger.info()
return _mosaic_4d_to_nifti(dicom_input, output_file)
grouped_dicoms = _classic_get_grouped_dicoms(dicom_input)
if _is_classic_4d(grouped_dicoms):
logger.info()
return _classic_4d_to_nifti(grouped_dicoms, output_file)
logger.info()
return convert_generic.dicom_to_nifti(dicom_input, output_file)
|
This is the main dicom to nifti conversion function for ge images.
As input ge images are required. It will then determine the type of images and do the correct conversion
:param output_file: filepath to the output nifti
:param dicom_input: directory with dicom files for 1 scan
|
26,111 |
def calculate_mrcas(self, c1 : ClassId, c2 : ClassId) -> Set[ClassId]:
G = self.G
ancs1 = self._ancestors(c1) | {c1}
ancs2 = self._ancestors(c2) | {c2}
common_ancestors = ancs1 & ancs2
redundant = set()
for a in common_ancestors:
redundant = redundant | nx.ancestors(G, a)
return common_ancestors - redundant
|
Calculate the MRCA for a class pair
|
26,112 |
def update_to_v24(self):
self.__update_common()
try:
date = text_type(self.get("TYER", ""))
if date.strip(u"\x00"):
self.pop("TYER")
dat = text_type(self.get("TDAT", ""))
if dat.strip("\x00"):
self.pop("TDAT")
date = "%s-%s-%s" % (date, dat[2:], dat[:2])
time = text_type(self.get("TIME", ""))
if time.strip("\x00"):
self.pop("TIME")
date += "T%s:%s:00" % (time[:2], time[2:])
if "TDRC" not in self:
self.add(TDRC(encoding=0, text=date))
except UnicodeDecodeError:
pass
if "TORY" in self:
f = self.pop("TORY")
if "TDOR" not in self:
try:
self.add(TDOR(encoding=0, text=str(f)))
except UnicodeDecodeError:
pass
if "IPLS" in self:
f = self.pop("IPLS")
if "TIPL" not in self:
self.add(TIPL(encoding=f.encoding, people=f.people))
for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME"]:
if key in self:
del(self[key])
for f in self.getall("CHAP"):
f.sub_frames.update_to_v24()
for f in self.getall("CTOC"):
f.sub_frames.update_to_v24()
|
Convert older tags into an ID3v2.4 tag.
This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to
TDRC). If you intend to save tags, you must call this function
at some point; it is called by default when loading the tag.
|
26,113 |
def add(self, *constraints: Tuple[Bool]) -> None:
raw_constraints = [
c.raw for c in cast(Tuple[Bool], constraints)
]
self.constraints.extend(raw_constraints)
|
Adds the constraints to this solver.
:param constraints: constraints to add
|
26,114 |
def _cast_inplace(terms, acceptable_dtypes, dtype):
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
|
Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
|
26,115 |
def setup_stream_handlers(conf):
class StdoutFilter(logging.Filter):
def filter(self, record):
return record.levelno in (logging.DEBUG, logging.INFO)
log.handlers = []
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.WARNING)
stdout_handler.addFilter(StdoutFilter())
if conf.debug:
stdout_handler.setLevel(logging.DEBUG)
elif conf.verbose:
stdout_handler.setLevel(logging.INFO)
else:
stdout_handler.setLevel(logging.WARNING)
log.addHandler(stdout_handler)
stderr_handler = logging.StreamHandler(sys.stderr)
msg_format = "%(levelname)s: %(message)s"
stderr_handler.setFormatter(logging.Formatter(fmt=msg_format))
stderr_handler.setLevel(logging.WARNING)
log.addHandler(stderr_handler)
|
Setup logging stream handlers according to the options.
|
26,116 |
def create_network(name, router_ext=None, admin_state_up=True, network_type=None, physical_network=None, segmentation_id=None, shared=None, profile=None):
**
conn = _auth(profile)
return conn.create_network(name, admin_state_up, router_ext, network_type, physical_network, segmentation_id, shared)
|
Creates a new network
CLI Example:
.. code-block:: bash
salt '*' neutron.create_network network-name
salt '*' neutron.create_network network-name profile=openstack1
:param name: Name of network to create
:param admin_state_up: should the state of the network be up?
default: True (Optional)
:param router_ext: True then if create the external network (Optional)
:param network_type: the Type of network that the provider is such as GRE, VXLAN, VLAN, FLAT, or LOCAL (Optional)
:param physical_network: the name of the physical network as neutron knows it (Optional)
:param segmentation_id: the vlan id or GRE id (Optional)
:param shared: is the network shared or not (Optional)
:param profile: Profile to build on (Optional)
:return: Created network information
|
26,117 |
def transform(self, data):
with timer( % self.name, logging.DEBUG):
transformed = super(Token, self).transform(self.tokenize(data))
return transformed.reshape((len(data), self.sequence_length))
|
:param data: DataFrame with column to encode
:return: encoded Series
|
26,118 |
def p_review_comment_1(self, p):
try:
if six.PY2:
value = p[2].decode(encoding=)
else:
value = p[2]
self.builder.add_review_comment(self.document, value)
except CardinalityError:
self.more_than_one_error(, p.lineno(1))
except OrderError:
self.order_error(, , p.lineno(1))
|
review_comment : REVIEW_COMMENT TEXT
|
26,119 |
def sdk_version(self):
if self.__sdk == 0:
try:
self.__sdk = int(self.adb.cmd("shell", "getprop", "ro.build.version.sdk").communicate()[0].decode("utf-8").strip())
except:
pass
return self.__sdk
|
sdk version of connected device.
|
26,120 |
def appendOps(self, ops, append_to=None):
if isinstance(ops, list):
self.ops.extend(ops)
else:
self.ops.append(ops)
parent = self.parent
if parent:
parent._set_require_reconstruction()
|
Append op(s) to the transaction builder
:param list ops: One or a list of operations
|
26,121 |
def create_sample_file(ip, op, num_lines):
with open(ip, "rb") as f:
with open(op, "wb") as fout:
for _ in range(num_lines):
fout.write(f.readline() )
|
make a short version of an RDF file
|
26,122 |
def child_folder(self, fragment):
return Folder(os.path.join(self.path, Folder(fragment).path))
|
Returns a folder object by combining the fragment to this folder's path
|
26,123 |
def init_app(self, app):
app.url_rule_class = partial(NavigationRule, copilot=self)
app.context_processor(self.inject_context)
|
Register the extension with the application.
Args:
app (flask.Flask): The application to register with.
|
26,124 |
def push_json_file(json_file,
url,
dry_run=False,
batch_size=100,
anonymize_fields=[],
remove_fields=[],
rename_fields=[]):
batch = []
json_data = json.loads(json_file.read())
if isinstance(json_data, list):
for item in json_data:
for field_name in anonymize_fields:
if field_name in item:
item[field_name] = md5sum(item[field_name])
for field_name in remove_fields:
if field_name in item:
del item[field_name]
for (field_name, new_field_name) in rename_fields:
if field_name in item:
item[new_field_name] = item[field_name]
del item[field_name]
batch.append(item)
if len(batch) >= batch_size:
post(batch,
url,
dry_run=dry_run)
batch = []
if len(batch) > 0:
post(batch,
url,
dry_run=dry_run)
else:
post(json_data,
url,
dry_run=dry_run)
|
read the json file provided and POST in batches no bigger than the
batch_size specified to the specified url.
|
26,125 |
def get_phi_subvariables(self, var):
if not self.is_phi_variable(var):
return set()
return self._phi_variables[var]
|
Get sub-variables that phi variable `var` represents.
:param SimVariable var: The variable instance.
:return: A set of sub-variables, or an empty set if `var` is not a phi variable.
:rtype: set
|
26,126 |
def _rotate(coordinates, theta, around):
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError()
return Rotation(theta, around).apply_to(coordinates)
|
Rotate a set of coordinates around an arbitrary vector.
Parameters
----------
coordinates : np.ndarray, shape=(n,3), dtype=float
The coordinates being rotated.
theta : float
The angle by which to rotate the coordinates, in radians.
around : np.ndarray, shape=(3,), dtype=float
The vector about which to rotate the coordinates.
|
26,127 |
def GetWindowsEventMessage(self, log_source, message_identifier):
database_reader = self._GetWinevtRcDatabaseReader()
if not database_reader:
return None
if self._lcid != self.DEFAULT_LCID:
message_string = database_reader.GetMessage(
log_source, self.lcid, message_identifier)
if message_string:
return message_string
return database_reader.GetMessage(
log_source, self.DEFAULT_LCID, message_identifier)
|
Retrieves the message string for a specific Windows Event Log source.
Args:
log_source (str): Event Log source, such as "Application Error".
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
|
26,128 |
def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
items = shared.add_highdepth_genome_exclusion(items)
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
|
Run VarDict variant calling.
|
26,129 |
def process_request(self, request):
celery_task = getattr(request, , False)
if not request.method in (, ):
request._cache_update_cache = False
return None
request._cache_update_cache = False
return response
|
Checks whether the page is already cached and returns the cached
version if available.
|
26,130 |
def probable_languages(
self,
text: str,
max_languages: int = 3) -> Tuple[str, ...]:
scores = self.scores(text)
sorted_scores = sorted(scores.items(), key=itemgetter(1), reverse=True)
languages, probabilities = list(zip(*sorted_scores))
rescaled_probabilities = [log(proba) for proba in probabilities]
distances = [
rescaled_probabilities[pos] - rescaled_probabilities[pos+1]
for pos in range(len(rescaled_probabilities)-1)]
max_distance_pos = max(enumerate(distances, 1), key=itemgetter(1))[0]
limit = min(max_distance_pos, max_languages)
return languages[:limit]
|
List of most probable programming languages,
the list is ordered from the most probable to the least probable one.
:param text: source code.
:param max_languages: maximum number of listed languages.
:return: languages list
|
26,131 |
def get_running_time(self):
if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA:
dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION
else:
dwAccess = win32.PROCESS_QUERY_INFORMATION
hProcess = self.get_handle(dwAccess)
(CreationTime, ExitTime, _, _) = win32.GetProcessTimes(hProcess)
if self.is_alive():
ExitTime = win32.GetSystemTimeAsFileTime()
CreationTime = CreationTime.dwLowDateTime + (CreationTime.dwHighDateTime << 32)
ExitTime = ExitTime.dwLowDateTime + ( ExitTime.dwHighDateTime << 32)
RunningTime = ExitTime - CreationTime
return RunningTime / 10000
|
Determines how long has this process been running.
@rtype: long
@return: Process running time in milliseconds.
|
26,132 |
def score(self, model):
score = 0
for node in model.nodes():
score += self.local_score(node, model.predecessors(node))
score += self.structure_prior(model)
return score
|
Computes a score to measure how well the given `BayesianModel` fits to the data set.
(This method relies on the `local_score`-method that is implemented in each subclass.)
Parameters
----------
model: `BayesianModel` instance
The Bayesian network that is to be scored. Nodes of the BayesianModel need to coincide
with column names of data set.
Returns
-------
score: float
A number indicating the degree of fit between data and model
Examples
-------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import K2Score
>>> # create random data sample with 3 variables, where B and C are identical:
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 2)), columns=list('AB'))
>>> data['C'] = data['B']
>>> K2Score(data).score(BayesianModel([['A','B'], ['A','C']]))
-24242.367348745247
>>> K2Score(data).score(BayesianModel([['A','B'], ['B','C']]))
-16273.793897051042
|
26,133 |
def list_aliases(self):
return aliases
|
List aliases linked to the index
|
26,134 |
def onchain_exchange(self, withdraw_crypto, withdraw_address, value, unit=):
self.onchain_rate = get_onchain_exchange_rates(
self.crypto, withdraw_crypto, best=True, verbose=self.verbose
)
exchange_rate = float(self.onchain_rate[])
result = self.onchain_rate[].get_onchain_exchange_address(
self.crypto, withdraw_crypto, withdraw_address
)
address = result[]
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f) via onchain exchange, converting to %s %s" % (
value_satoshi, (value_satoshi / 1e8),
exchange_rate * value_satoshi / 1e8, withdraw_crypto.upper()
))
self.outs.append({
: address,
: value_satoshi
})
|
This method is like `add_output` but it sends to another
|
26,135 |
def _serve_dir(self, abspath, params):
relpath = os.path.relpath(abspath, self._root)
breadcrumbs = self._create_breadcrumbs(relpath)
entries = [{: os.path.join(relpath, e), : e} for e in os.listdir(abspath)]
args = self._default_template_args()
args.update({: os.path.dirname(self._root),
: breadcrumbs,
: entries,
: params})
content = self._renderer.render_name(, args).encode("utf-8")
self._send_content(content, )
|
Show a directory listing.
|
26,136 |
def get_community_names():
*
ret = dict()
if __utils__[](_HKEY, _COMMUNITIES_GPO_KEY):
_LOG.debug()
current_values = __utils__[](
_HKEY, _COMMUNITIES_GPO_KEY, include_default=False)
if isinstance(current_values, list):
for current_value in current_values:
if not isinstance(current_value, dict):
continue
ret[current_value[]] =
if not ret:
_LOG.debug()
current_values = __utils__[](
_HKEY, _COMMUNITIES_KEY, include_default=False)
if isinstance(current_values, list):
for current_value in current_values:
if not isinstance(current_value, dict):
continue
permissions = six.text_type()
for permission_name in _PERMISSION_TYPES:
if current_value[] == _PERMISSION_TYPES[permission_name]:
permissions = permission_name
break
ret[current_value[]] = permissions
if not ret:
_LOG.debug()
return ret
|
Get the current accepted SNMP community names and their permissions.
If community names are being managed by Group Policy, those values will be
returned instead like this:
.. code-block:: bash
TestCommunity:
Managed by GPO
Community names managed normally will denote the permission instead:
.. code-block:: bash
TestCommunity:
Read Only
Returns:
dict: A dictionary of community names and permissions.
CLI Example:
.. code-block:: bash
salt '*' win_snmp.get_community_names
|
26,137 |
def _urlopen_as_json(self, url, headers=None):
req = Request(url, headers=headers)
return json.loads(urlopen(req).read())
|
Shorcut for return contents as json
|
26,138 |
def log_parameters(self):
arg_params, aux_params = self.module.get_params()
total_parameters = 0
fixed_parameters = 0
learned_parameters = 0
info = []
for name, array in sorted(arg_params.items()):
info.append("%s: %s" % (name, array.shape))
num_parameters = reduce(lambda x, y: x * y, array.shape)
total_parameters += num_parameters
if name in self.module._fixed_param_names:
fixed_parameters += num_parameters
else:
learned_parameters += num_parameters
percent_fixed = 100 * (fixed_parameters / max(1, total_parameters))
percent_learned = 100 * (learned_parameters / max(1, total_parameters))
logger.info("Model parameters: %s", ", ".join(info))
logger.info("Fixed model parameters: %s", ", ".join(self.module._fixed_param_names))
logger.info("Fixing %d parameters (%0.2f%%)", fixed_parameters, percent_fixed)
logger.info("Learning %d parameters (%0.2f%%)", learned_parameters, percent_learned)
logger.info("Total
|
Logs information about model parameters.
|
26,139 |
def open(self, data_source, *args, **kwargs):
if self.sources[data_source]._meta.data_reader.is_file_reader:
filename = kwargs.get()
path = kwargs.get(, )
rel_path = kwargs.get(, )
if len(args) > 0:
filename = args[0]
if len(args) > 1:
path = args[1]
if len(args) > 2:
rel_path = args[2]
args = ()
kwargs = {: os.path.join(rel_path, path, filename)}
LOGGER.debug(, kwargs[])
self.objects[data_source] = self.sources[data_source](*args, **kwargs)
data_src_obj = self.objects[data_source]
meta = [getattr(data_src_obj, m) for m in self.reg.meta_names]
self.reg.register(data_src_obj.data, *meta)
|
Open filename to get data for data_source.
:param data_source: Data source for which the file contains data.
:type data_source: str
Positional and keyword arguments can contain either the data to use for
the data source or the full path of the file which contains data for the
data source.
|
26,140 |
def _minimum_coloring_qubo(x_vars, chi_lb, chi_ub, magnitude=1.):
if chi_lb == chi_ub:
return {}
scaling = magnitude / (chi_ub - chi_lb)
Q = {}
for v in x_vars:
for f, color in enumerate(range(chi_lb, chi_ub)):
idx = x_vars[v][color]
Q[(idx, idx)] = (f + 1) * scaling
return Q
|
We want to disincentivize unneeded colors. Generates the QUBO
that does that.
|
26,141 |
def generate(cls, strategy, **kwargs):
assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)
action = getattr(cls, strategy)
return action(**kwargs)
|
Generate a new instance.
The instance will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
Returns:
object: the generated instance
|
26,142 |
def _make_connection(self, addr, port):
"make our proxy connection"
sender = self._create_connection(addr, port)
self._sender = sender
self._when_done.fire(sender)
|
make our proxy connection
|
26,143 |
def delete_blobs(self, blobs, on_error=None, client=None):
for blob in blobs:
try:
blob_name = blob
if not isinstance(blob_name, six.string_types):
blob_name = blob.name
self.delete_blob(blob_name, client=client)
except NotFound:
if on_error is not None:
on_error(blob)
else:
raise
|
Deletes a list of blobs from the current bucket.
Uses :meth:`delete_blob` to delete each individual blob.
If :attr:`user_project` is set, bills the API request to that project.
:type blobs: list
:param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
blob names to delete.
:type on_error: callable
:param on_error: (Optional) Takes single argument: ``blob``. Called
called once for each blob raising
:class:`~google.cloud.exceptions.NotFound`;
otherwise, the exception is propagated.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`~google.cloud.exceptions.NotFound` (if
`on_error` is not passed).
|
26,144 |
def before(self, idx):
if not isinstance(idx, datetime):
raise TypeError(" is not %s" % (idx, datetime))
day = min(idx.date(), self._hi_limit - DAY)
while day >= self._lo_limit:
if day < self._rd_cache.lo or day >= self._rd_cache.hi:
self._load(self._rd_cache, day)
self._rd_cache.set_ptr(idx)
if self._rd_cache.ptr > 0:
return self._rd_cache.data[self._rd_cache.ptr - 1][]
day = self._rd_cache.lo - DAY
return None
|
Return datetime of newest existing data record whose
datetime is < idx.
Might not even be in the same year! If no such record exists,
return None.
|
26,145 |
def update_status(self, *args, **kwargs):
post_data = {}
media_ids = kwargs.pop(, None)
if media_ids is not None:
post_data[] = list_to_csv(media_ids)
return bind_api(
api=self,
path=,
method=,
payload_type=,
allowed_param=[, , ,
, , , , , ],
require_auth=True
)(post_data=post_data, *args, **kwargs)
|
:reference: https://dev.twitter.com/rest/reference/post/statuses/update
:allowed_param:'status', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates', 'media_ids'
|
26,146 |
def determine_paths(self, package_name=None, create_package_dir=False, dry_run=False):
self.project_dir = Path(os.getenv() or os.getcwd())
distribution = self.get_distribution()
if distribution:
self.project_name = distribution.get_name()
else:
self.project_name = self.project_dir.name
if os.path.isdir(self.project_dir / "src"):
package_search_dir = self.project_dir / "src"
else:
package_search_dir = self.project_dir
created_package_dir = False
if not package_name:
package_name = self.project_name.replace("-", "_")
def get_matches(name):
possibles = [n for n in os.listdir(package_search_dir) if os.path.isdir(package_search_dir / n)]
return difflib.get_close_matches(name, possibles, n=1, cutoff=0.8)
close = get_matches(package_name)
if not close and "_" in package_name:
short_package_name = "_".join(package_name.split("_")[1:])
close = get_matches(short_package_name)
if not close:
if create_package_dir:
package_dir = package_search_dir / package_name
created_package_dir = True
if not dry_run:
print("Creating package directory at %s" % package_dir)
os.mkdir(package_dir)
else:
print("Would have created package directory at %s" % package_dir)
else:
raise CommandError("Could not guess the package name. Specify it using --name.")
else:
package_name = close[0]
self.package_name = package_name
self.package_dir = package_search_dir / package_name
if not os.path.exists(self.package_dir) and not created_package_dir:
raise CommandError("Package directory did not exist at %s. Perhaps specify it using --name" % self.package_dir)
|
Determine paths automatically and a little intelligently
|
26,147 |
def match_list(lst, pattern, group_names=[]):
filtfn = re.compile(pattern).match
filtlst = filter_list(lst, filtfn)
if not group_names:
return [m.string for m in filtlst]
else:
return [m.group(group_names) for m in filtlst]
|
Parameters
----------
lst: list of str
regex: string
group_names: list of strings
See re.MatchObject group docstring
Returns
-------
list of strings
Filtered list, with the strings that match the pattern
|
26,148 |
def unindex_item(self, item):
name_property = getattr(self.__class__, "name_property", None)
if name_property is None:
return
name = getattr(item, name_property, None)
if name is None:
return
self.name_to_item.pop(name, None)
|
Un-index an item from our name_to_item dict.
:param item: the item to un-index
:type item: alignak.objects.item.Item
:return: None
|
26,149 |
def x_fit(self, test_length):
if (self.x + test_length) >= self.xmax:
return False
else:
return True
|
Test to see if the line can has enough space for the given length.
|
26,150 |
def guard_multi_verify(analysis):
remaining_verifications = analysis.getNumberOfRemainingVerifications()
if remaining_verifications <= 1:
return False
if was_submitted_by_current_user(analysis):
if not analysis.isSelfVerificationEnabled():
return False
if was_verified_by_current_user(analysis):
if not is_multi_verification_allowed(analysis):
return False
if current_user_was_last_verifier(analysis):
if not is_consecutive_multi_verification_allowed(analysis):
return False
for dependency in analysis.getDependencies():
if not is_verified_or_verifiable(dependency):
return False
return True
|
Return whether the transition "multi_verify" can be performed or not
The transition multi_verify will only take place if multi-verification of
results is enabled.
|
26,151 |
def check_image_is_3d(img):
if len(img.shape) < 3:
raise ValueError()
elif len(img.shape) == 3:
for dim_size in img.shape:
if dim_size < 1:
raise ValueError()
elif len(img.shape) == 4:
if img.shape[3] != 1:
raise ValueError()
else:
img = np.squeeze(img, axis=3)
elif len(img.shape) > 4:
raise ValueError(.format(img.shape))
return img
|
Ensures the image loaded is 3d and nothing else.
|
26,152 |
def compile_file(self, filename, encoding="utf-8", bare=False):
if isinstance(filename, _BaseString):
filename = [filename]
scripts = []
for f in filename:
with io.open(f, encoding=encoding) as fp:
scripts.append(fp.read())
return self.compile(.join(scripts), bare=bare)
|
compile a CoffeeScript script file to a JavaScript code.
filename can be a list or tuple of filenames,
then contents of files are concatenated with line feeds.
if bare is True, then compile the JavaScript without the top-level
function safety wrapper (like the coffee command).
|
26,153 |
def parseDateText(self, dateString, sourceTime=None):
if sourceTime is None:
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
else:
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
currentMth = mth
currentDy = dy
accuracy = []
debug and log.debug(,
mth, dy)
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group()
mth = self.ptc.MonthOffsets[mth]
accuracy.append()
if m.group() is not None:
dy = int(m.group())
accuracy.append()
else:
dy = 1
if m.group() is not None:
yr = int(m.group())
accuracy.append()
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
yr += self.ptc.YearParseStyle
with self.context() as ctx:
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
ctx.updateAccuracy(*accuracy)
else:
sourceTime = time.localtime()
debug and log.debug(
,
mth, dy, yr, sourceTime)
return sourceTime
|
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
|
26,154 |
def connect(self, uri, link_quality_callback, link_error_callback):
if not re.search(, uri):
raise WrongUriType()
if not re.search(,
uri):
raise WrongUriType()
uri_data = re.search(,
uri)
self.uri = uri
if self.cfusb is None:
self.cfusb = CfUsb(devid=int(uri_data.group(1)))
if self.cfusb.dev:
self.cfusb.set_crtp_to_usb(True)
else:
self.cfusb = None
raise Exception(.format(self.uri))
else:
raise Exception()
self.in_queue = queue.Queue()
self.out_queue = queue.Queue(50)
self._thread = _UsbReceiveThread(self.cfusb, self.in_queue,
link_quality_callback,
link_error_callback)
self._thread.start()
self.link_error_callback = link_error_callback
|
Connect the link driver to a specified URI of the format:
radio://<dongle nbr>/<radio channel>/[250K,1M,2M]
The callback for linkQuality can be called at any moment from the
driver to report back the link quality in percentage. The
callback from linkError will be called when a error occues with
an error message.
|
26,155 |
def register(CommandSubClass):
name = CommandSubClass.name()
if name in Command._all_commands:
raise ValueError("Command already exists: " + name)
Command._all_commands[name] = CommandSubClass
return CommandSubClass
|
A class decorator for Command classes to register in the default set.
|
26,156 |
def __get_dbms_version(self, make_connection=True):
major, minor, _, _ = self.get_server_version(make_connection=make_connection)
return .format(major, minor)
|
Returns the 'DBMS Version' string
|
26,157 |
def merge(self, dataset):
def merge_data(source, dest):
for key, value in source.items():
if isinstance(value, dict):
merge_data(value, dest.setdefault(key, {}))
else:
dest[key] = value
return dest
merge_data(dataset.data, self._data)
for h in dataset.task_history:
if h not in self._task_history:
self._task_history.append(h)
|
Merge the specified dataset on top of the existing data.
This replaces all values in the existing dataset with the values from the
given dataset.
Args:
dataset (TaskData): A reference to the TaskData object that should be merged
on top of the existing object.
|
26,158 |
def set_dft_grid(self, radical_points=128, angular_points=302,
grid_type="Lebedev"):
available_lebedev_angular_points = {6, 18, 26, 38, 50, 74, 86, 110, 146,
170, 194, 230, 266, 302, 350, 434,
590, 770, 974, 1202, 1454, 1730,
2030, 2354, 2702, 3074, 3470, 3890,
4334, 4802, 5294}
if grid_type.lower() == "sg-0":
self.params["rem"]["xc_grid"] = 0
elif grid_type.lower() == "sg-1":
self.params["rem"]["xc_grid"] = 1
elif grid_type.lower() == "lebedev":
if angular_points not in available_lebedev_angular_points:
raise ValueError(str(angular_points) + " is not a valid "
"Lebedev angular points number")
self.params["rem"]["xc_grid"] = "{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
elif grid_type.lower() == "gauss-legendre":
self.params["rem"]["xc_grid"] = "-{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
else:
raise ValueError("Grid type " + grid_type + " is not supported "
"currently")
|
Set the grid for DFT numerical integrations.
Args:
radical_points: Radical points. (Integer)
angular_points: Angular points. (Integer)
grid_type: The type of of the grid. There are two standard grids:
SG-1 and SG-0. The other two supported grids are "Lebedev" and
"Gauss-Legendre"
|
26,159 |
def _migration_required(connection):
stored_version = get_stored_version(connection)
actual_version = SCHEMA_VERSION
assert isinstance(stored_version, int)
assert isinstance(actual_version, int)
assert stored_version <= actual_version, \
return stored_version < actual_version
|
Returns True if ambry models do not match to db tables. Otherwise returns False.
|
26,160 |
def _discover_cover_image(zf, opf_xmldoc, opf_filepath):
content = None
filepath = None
extension = None
tag = find_tag(opf_xmldoc, , , )
if tag and in tag.attributes.keys():
item_id = tag.attributes[].value
if item_id:
filepath, extension = find_img_tag(opf_xmldoc, , , item_id)
if not filepath:
filepath, extension = find_img_tag(opf_xmldoc, , , )
if not filepath:
filepath, extension = find_img_tag(opf_xmldoc, , , )
if filepath:
base_dir = os.path.dirname(opf_filepath)
coverpath = os.path.normpath(os.path.join(base_dir, filepath))
content = zf.read(coverpath)
content = base64.b64encode(content)
return content, extension
|
Find the cover image path in the OPF file.
Returns a tuple: (image content in base64, file extension)
|
26,161 |
def Max(self, k):
cdf = self.MakeCdf()
cdf.ps = [p ** k for p in cdf.ps]
return cdf
|
Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
|
26,162 |
def handle_document(self, item_session: ItemSession, filename: str) -> Actions:
self._waiter.reset()
action = self.handle_response(item_session)
if action == Actions.NORMAL:
self._statistics.increment(item_session.response.body.size())
item_session.set_status(Status.done, filename=filename)
return action
|
Process a successful document response.
Returns:
A value from :class:`.hook.Actions`.
|
26,163 |
def bezier_real_minmax(p):
local_extremizers = [0, 1]
if len(p) == 4:
a = [p.real for p in p]
denom = a[0] - 3*a[1] + 3*a[2] - a[3]
if denom != 0:
delta = a[1]**2 - (a[0] + a[1])*a[2] + a[2]**2 + (a[0] - a[1])*a[3]
if delta >= 0:
sqdelta = sqrt(delta)
tau = a[0] - 2*a[1] + a[2]
r1 = (tau + sqdelta)/denom
r2 = (tau - sqdelta)/denom
if 0 < r1 < 1:
local_extremizers.append(r1)
if 0 < r2 < 1:
local_extremizers.append(r2)
local_extrema = [bezier_point(a, t) for t in local_extremizers]
return min(local_extrema), max(local_extrema)
dcoeffs = bezier2polynomial(a, return_poly1d=True).deriv().coeffs
local_extremizers += polyroots01(dcoeffs)
local_extrema = [bezier_point(a, t) for t in local_extremizers]
return min(local_extrema), max(local_extrema)
|
returns the minimum and maximum for any real cubic bezier
|
26,164 |
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
with tf.name_scope():
return tf.cond(
self._is_training,
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
|
Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
|
26,165 |
def add_comment(node, text, location=):
anno.setanno(node, , dict(location=location, text=text), safe=False)
return node
|
Add a comment to the given node.
If the `SourceWithCommentGenerator` class is used these comments will be
output as part of the source code.
Note that a node can only contain one comment. Subsequent calls to
`add_comment` will ovverride the existing comments.
Args:
node: The AST node whose containing statement will be commented.
text: A comment string.
location: Where the comment should appear. Valid values are 'above',
'below' and 'right'
Returns:
The node with the comment stored as an annotation.
|
26,166 |
def _get_all_objs(
self, server_instance, regexes=None, include_only_marked=False, tags=None, use_guest_hostname=False
):
start = time.time()
if tags is None:
tags = []
obj_list = defaultdict(list)
all_objects = self._collect_mors_and_attributes(server_instance)
rootFolder = server_instance.content.rootFolder
all_objects[rootFolder] = {"name": rootFolder.name, "parent": None}
for obj, properties in all_objects.items():
instance_tags = []
if not self._is_excluded(obj, properties, regexes, include_only_marked) and any(
isinstance(obj, vimtype) for vimtype in RESOURCE_TYPE_METRICS
):
if use_guest_hostname:
hostname = properties.get("guest.hostName", properties.get("name", "unknown"))
else:
hostname = properties.get("name", "unknown")
if properties.get("parent"):
instance_tags += self._get_parent_tags(obj, all_objects)
if isinstance(obj, vim.VirtualMachine):
vsphere_type =
vimtype = vim.VirtualMachine
mor_type = "vm"
power_state = properties.get("runtime.powerState")
if power_state != vim.VirtualMachinePowerState.poweredOn:
self.log.debug("Skipping VM in state {}".format(ensure_unicode(power_state)))
continue
host_mor = properties.get("runtime.host")
host = "unknown"
if host_mor:
host = ensure_unicode(all_objects.get(host_mor, {}).get("name", "unknown"))
instance_tags.append(.format(ensure_unicode(host)))
elif isinstance(obj, vim.HostSystem):
vsphere_type =
vimtype = vim.HostSystem
mor_type = "host"
elif isinstance(obj, vim.Datastore):
vsphere_type =
instance_tags.append(
.format(ensure_unicode(properties.get("name", "unknown")))
)
hostname = None
vimtype = vim.Datastore
mor_type = "datastore"
elif isinstance(obj, vim.Datacenter):
vsphere_type =
instance_tags.append(
"vsphere_datacenter:{}".format(ensure_unicode(properties.get("name", "unknown")))
)
hostname = None
vimtype = vim.Datacenter
mor_type = "datacenter"
elif isinstance(obj, vim.ClusterComputeResource):
vsphere_type =
instance_tags.append("vsphere_cluster:{}".format(ensure_unicode(properties.get("name", "unknown"))))
hostname = None
vimtype = vim.ClusterComputeResource
mor_type = "cluster"
else:
vsphere_type = None
if vsphere_type:
instance_tags.append(vsphere_type)
obj_list[vimtype].append(
{"mor_type": mor_type, "mor": obj, "hostname": hostname, "tags": tags + instance_tags}
)
self.log.debug("All objects with attributes cached in {} seconds.".format(time.time() - start))
return obj_list
|
Explore vCenter infrastructure to discover hosts, virtual machines, etc.
and compute their associated tags.
Start at the vCenter `rootFolder`, so as to collect every objet.
Example topology:
```
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
```
If it's a node we want to query metric for, it will be enqueued at the
instance level and will be processed by a subsequent job.
|
26,167 |
def get_version(self, dependency):
logger.debug("getting installed version for %s", dependency)
stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)])
version = [line for line in stdout if line.startswith()]
if len(version) == 1:
version = version[0].strip().split()[1]
logger.debug("Installed version of %s is: %s", dependency, version)
return version
else:
logger.error(
)
return
|
Return the installed version parsing the output of 'pip show'.
|
26,168 |
def p_expression_sla(self, p):
p[0] = Sll(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
expression : expression LSHIFTA expression
|
26,169 |
def set(self, dic, val=None, force=False):
if val is not None:
dic = {dic:val}
for key_original, val in list(dict(dic).items()):
key = self.corrected_key(key_original)
if not self._lock_setting or \
key in CMAOptions.versatile_options():
self[key] = val
else:
_print_warning( + str(key_original) +
,
, )
return self
|
set can assign versatile options from
`CMAOptions.versatile_options()` with a new value, use `init()`
for the others.
Arguments
---------
`dic`
either a dictionary or a key. In the latter
case, `val` must be provided
`val`
value for `key`, approximate match is sufficient
`force`
force setting of non-versatile options, use with caution
This method will be most probably used with the ``opts`` attribute of
a `CMAEvolutionStrategy` instance.
|
26,170 |
def write(self, data, sections=None):
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != :
try:
if self.chunked:
self.conn.sendall(b( % (len(data), data)))
else:
self.conn.sendall(data)
except socket.timeout:
self.closeConnection = True
except socket.error:
self.closeConnection = True
|
Write the data to the output socket.
|
26,171 |
def workflow_close(object_id, input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /workflow-xxxx/close API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Data-Object-Lifecycle#API-method%3A-%2Fclass-xxxx%2Fclose
|
26,172 |
def save_pid(name):
if os.environ.get():
with open( % (name,), ) as fp:
fp.write(str(os.getpid()))
|
When debugging and profiling, it is very annoying to poke through the
process list to discover the currently running Ansible and MuxProcess IDs,
especially when trying to catch an issue during early startup. So here, if
a magic environment variable set, stash them in hidden files in the CWD::
alias muxpid="cat .ansible-mux.pid"
alias anspid="cat .ansible-controller.pid"
gdb -p $(muxpid)
perf top -p $(anspid)
|
26,173 |
def expand_factor_conditions(s, env):
try:
factor, value = re.split(r, s)
except ValueError:
return s
if matches_factor_conditions(factor, env):
return value
else:
return
|
If env matches the expanded factor then return value else return ''.
Example
-------
>>> s = 'py{33,34}: docformatter'
>>> expand_factor_conditions(s, Env(name="py34", ...))
"docformatter"
>>> expand_factor_conditions(s, Env(name="py26", ...))
""
|
26,174 |
def get_platforms_set():
platforms = set([x.lower() for x in platform._supported_dists])
platforms |= set([, , , ])
return platforms
|
Returns set of all possible platforms
|
26,175 |
def filter(self, *args, **kwargs):
if args or kwargs:
self.q_filters = Q(self.q_filters & Q(*args, **kwargs))
return self
|
Apply filters to the existing nodes in the set.
:param kwargs: filter parameters
Filters mimic Django's syntax with the double '__' to separate field and operators.
e.g `.filter(salary__gt=20000)` results in `salary > 20000`.
The following operators are available:
* 'lt': less than
* 'gt': greater than
* 'lte': less than or equal to
* 'gte': greater than or equal to
* 'ne': not equal to
* 'in': matches one of list (or tuple)
* 'isnull': is null
* 'regex': matches supplied regex (neo4j regex format)
* 'exact': exactly match string (just '=')
* 'iexact': case insensitive match string
* 'contains': contains string
* 'icontains': case insensitive contains
* 'startswith': string starts with
* 'istartswith': case insensitive string starts with
* 'endswith': string ends with
* 'iendswith': case insensitive string ends with
:return: self
|
26,176 |
def decode(self, msgbuf):
if msgbuf[0] != PROTOCOL_MARKER_V1:
headerlen = 10
try:
magic, mlen, incompat_flags, compat_flags, seq, srcSystem, srcComponent, msgIdlow, msgIdhigh = struct.unpack(, msgbuf[:headerlen])
except struct.error as emsg:
raise MAVError( % emsg)
msgId = msgIdlow | (msgIdhigh<<16)
mapkey = msgId
else:
headerlen = 6
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack(, msgbuf[:headerlen])
incompat_flags = 0
compat_flags = 0
except struct.error as emsg:
raise MAVError( % emsg)
mapkey = msgId
if (incompat_flags & MAVLINK_IFLAG_SIGNED) != 0:
signature_len = MAVLINK_SIGNATURE_BLOCK_LEN
else:
signature_len = 0
if ord(magic) != PROTOCOL_MARKER_V1 and ord(magic) != PROTOCOL_MARKER_V2:
raise MAVError("invalid MAVLink prefix " % magic)
if mlen != len(msgbuf)-(headerlen+2+signature_len):
raise MAVError( % (len(msgbuf)-(headerlen+2+signature_len), mlen, msgId, headerlen))
if not mapkey in mavlink_map:
raise MAVError( % str(mapkey))
type = mavlink_map[mapkey]
fmt = type.format
order_map = type.orders
len_map = type.lengths
crc_extra = type.crc_extra
try:
crc, = struct.unpack(, msgbuf[-(2+signature_len):][:2])
except struct.error as emsg:
raise MAVError( % emsg)
crcbuf = msgbuf[1:-(2+signature_len)]
if True:
crcbuf.append(crc_extra)
crc2 = x25crc(crcbuf)
if crc != crc2.crc:
raise MAVError( % (msgId, crc, crc2.crc))
sig_ok = False
if self.signing.secret_key is not None:
accept_signature = False
if signature_len == MAVLINK_SIGNATURE_BLOCK_LEN:
sig_ok = self.check_signature(msgbuf, srcSystem, srcComponent)
accept_signature = sig_ok
if sig_ok:
self.signing.goodsig_count += 1
else:
self.signing.badsig_count += 1
if not accept_signature and self.signing.allow_unsigned_callback is not None:
accept_signature = self.signing.allow_unsigned_callback(self, msgId)
if accept_signature:
self.signing.unsigned_count += 1
else:
self.signing.reject_count += 1
elif self.signing.allow_unsigned_callback is not None:
accept_signature = self.signing.allow_unsigned_callback(self, msgId)
if accept_signature:
self.signing.unsigned_count += 1
else:
self.signing.reject_count += 1
if not accept_signature:
raise MAVError()
csize = struct.calcsize(fmt)
mbuf = msgbuf[headerlen:-(2+signature_len)]
if len(mbuf) < csize:
mbuf.extend([0]*(csize - len(mbuf)))
if len(mbuf) < csize:
raise MAVError( % (
type, len(mbuf), csize))
mbuf = mbuf[:csize]
try:
t = struct.unpack(fmt, mbuf)
except struct.error as emsg:
raise MAVError( % (
type, fmt, len(mbuf), emsg))
tlist = list(t)
if True:
t = tlist[:]
if sum(len_map) == len(len_map):
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
else:
tlist = []
for i in range(0, len(order_map)):
order = order_map[i]
L = len_map[order]
tip = sum(len_map[:order])
field = t[tip]
if L == 1 or isinstance(field, str):
tlist.append(field)
else:
tlist.append(t[tip:(tip + L)])
for i in range(0, len(tlist)):
if isinstance(tlist[i], str):
tlist[i] = str(MAVString(tlist[i]))
t = tuple(tlist)
try:
m = type(*t)
except Exception as emsg:
raise MAVError( % (type, emsg))
m._signed = sig_ok
if m._signed:
m._link_id = msgbuf[-13]
m._msgbuf = msgbuf
m._payload = msgbuf[6:-(2+signature_len)]
m._crc = crc
m._header = MAVLink_header(msgId, incompat_flags, compat_flags, mlen, seq, srcSystem, srcComponent)
return m
|
decode a buffer as a MAVLink message
|
26,177 |
def get_queryset(self, value, row, *args, **kwargs):
return self.model.objects.all()
|
Returns a queryset of all objects for this Model.
Overwrite this method if you want to limit the pool of objects from
which the related object is retrieved.
:param value: The field's value in the datasource.
:param row: The datasource's current row.
As an example; if you'd like to have ForeignKeyWidget look up a Person
by their pre- **and** lastname column, you could subclass the widget
like so::
class FullNameForeignKeyWidget(ForeignKeyWidget):
def get_queryset(self, value, row):
return self.model.objects.filter(
first_name__iexact=row["first_name"],
last_name__iexact=row["last_name"]
)
|
26,178 |
def _init_client(self, from_archive=False):
return BugzillaClient(self.url, user=self.user, password=self.password,
max_bugs_csv=self.max_bugs_csv,
archive=self.archive, from_archive=from_archive)
|
Init client
|
26,179 |
def _try_resolve_sam_resource_refs(self, input, supported_resource_refs):
if not self._is_intrinsic_dict(input):
return input
function_type = list(input.keys())[0]
return self.supported_intrinsics[function_type].resolve_resource_refs(input, supported_resource_refs)
|
Try to resolve SAM resource references on the given template. If the given object looks like one of the
supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input
unmodified.
:param dict input: Dictionary that may represent an intrinsic function
:param SupportedResourceReferences supported_resource_refs: Object containing information about available
resource references and the values they resolve to.
:return: Modified input dictionary with references resolved
|
26,180 |
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype):
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
bins = _convert_bin_to_datelike_type(bins, dtype)
return fac, bins
|
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
|
26,181 |
def endpoint_catalog(catalog=None):
if connexion.request.is_json:
catalog = UserAuth.from_dict(connexion.request.get_json())
if (not hasAccess()):
return redirectUnauthorized()
driver = LoadedDrivers.getDefaultDriver()
auth = None
if (catalog):
auth = catalog
return Response(status=200, body=driver.getCatalog(auth))
|
Retrieve the endpoint catalog
Retrieve the endpoint catalog # noqa: E501
:param catalog: The data needed to get a catalog
:type catalog: dict | bytes
:rtype: Response
|
26,182 |
def compute_threat_list_diff(
self,
threat_type,
constraints,
version_token=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "compute_threat_list_diff" not in self._inner_api_calls:
self._inner_api_calls[
"compute_threat_list_diff"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.compute_threat_list_diff,
default_retry=self._method_configs["ComputeThreatListDiff"].retry,
default_timeout=self._method_configs["ComputeThreatListDiff"].timeout,
client_info=self._client_info,
)
request = webrisk_pb2.ComputeThreatListDiffRequest(
threat_type=threat_type,
constraints=constraints,
version_token=version_token,
)
return self._inner_api_calls["compute_threat_list_diff"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Gets the most recent threat list diffs.
Example:
>>> from google.cloud import webrisk_v1beta1
>>> from google.cloud.webrisk_v1beta1 import enums
>>>
>>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client()
>>>
>>> # TODO: Initialize `threat_type`:
>>> threat_type = enums.ThreatType.THREAT_TYPE_UNSPECIFIED
>>>
>>> # TODO: Initialize `constraints`:
>>> constraints = {}
>>>
>>> response = client.compute_threat_list_diff(threat_type, constraints)
Args:
threat_type (~google.cloud.webrisk_v1beta1.types.ThreatType): Required. The ThreatList to update.
constraints (Union[dict, ~google.cloud.webrisk_v1beta1.types.Constraints]): The constraints associated with this request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.webrisk_v1beta1.types.Constraints`
version_token (bytes): The current version token of the client for the requested list (the
client version that was received from the last successful diff).
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.webrisk_v1beta1.types.ComputeThreatListDiffResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
26,183 |
def CrearLiquidacion(self, tipo_cbte, pto_vta, nro_cbte, fecha, periodo,
iibb_adquirente=None, domicilio_sede=None,
inscripcion_registro_publico=None,
datos_adicionales=None, alicuota_iva=None, **kwargs):
"Inicializa internamente los datos de una liquidación para autorizar"
liq = {: tipo_cbte, : pto_vta,
: nro_cbte, : fecha,
: periodo, : iibb_adquirente,
: domicilio_sede,
: inscripcion_registro_publico,
: datos_adicionales,
: alicuota_iva,
}
liq["condicionVenta"] = []
self.solicitud = dict(liquidacion=liq,
bonificacionPenalizacion=[],
otroImpuesto=[],
remito=[]
)
return True
|
Inicializa internamente los datos de una liquidación para autorizar
|
26,184 |
def bishop88_i_from_v(voltage, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
method=):
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
def fv(x, v, *a):
return bishop88(x, *a)[1] - v
if method.lower() == :
voc_est = estimate_voc(photocurrent, saturation_current, nNsVth)
def vd_from_brent(voc, v, iph, isat, rs, rsh, gamma):
return brentq(fv, 0.0, voc, args=(v, iph, isat, rs, rsh, gamma))
vd_from_brent_vectorized = np.vectorize(vd_from_brent)
vd = vd_from_brent_vectorized(voc_est, voltage, *args)
elif method.lower() == :
args, v0 = _prepare_newton_inputs((voltage,), args, voltage)
vd = newton(func=lambda x, *a: fv(x, voltage, *a), x0=v0,
fprime=lambda x, *a: bishop88(x, *a, gradients=True)[4],
args=args)
else:
raise NotImplementedError("Method isn't implemented" % method)
return bishop88(vd, *args)[0]
|
Find current given any voltage.
Parameters
----------
voltage : numeric
voltage (V) in volts [V]
photocurrent : numeric
photogenerated current (Iph or IL) in amperes [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) in amperes [A]
resistance_series : numeric
series resistance (Rs) in ohms
resistance_shunt : numeric
shunt resistance (Rsh) in ohms
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
method : str
one of two optional search methods: either ``'brentq'``, a reliable and
bounded method or ``'newton'`` which is the default.
Returns
-------
current : numeric
current (I) at the specified voltage (V) in amperes [A]
|
26,185 |
def _unpack_result(klass, result):
result = result.unpack()
if len(result) == 1:
result = result[0]
elif len(result) == 0:
result = None
return result
|
Convert a D-BUS return variant into an appropriate return value
|
26,186 |
def find_all(self, *args, **kwargs):
op = operator.methodcaller(, *args, **kwargs)
return self._wrap_multi(op)
|
Like :meth:`find`, but selects all matches (not just the first one).
Returns a :class:`Collection`.
If no elements match, this returns a Collection with no items.
|
26,187 |
def get_lock_requests(self):
d = defaultdict(list)
if self._context:
for variant in self._context.resolved_packages:
name = variant.name
version = variant.version
lock = self.patch_locks.get(name)
if lock is None:
lock = self.default_patch_lock
request = get_lock_request(name, version, lock)
if request is not None:
d[lock].append(request)
return d
|
Take the current context, and the current patch locks, and determine
the effective requests that will be added to the main request.
Returns:
A dict of (PatchLock, [Requirement]) tuples. Each requirement will be
a weak package reference. If there is no current context, an empty
dict will be returned.
|
26,188 |
def calls(self, call):
exp = self._get_current_call()
exp.call_replacement = call
return self
|
Redefine a call.
The fake method will execute your function. I.E.::
>>> f = Fake().provides('hello').calls(lambda: 'Why, hello there')
>>> f.hello()
'Why, hello there'
|
26,189 |
def subscriptions_list(**kwargs):
result = {}
subconn = __utils__[](, **kwargs)
try:
subs = __utils__[](subconn.subscriptions.list())
for sub in subs:
result[sub[]] = sub
except CloudError as exc:
__utils__[](, str(exc), **kwargs)
result = {: str(exc)}
return result
|
.. versionadded:: 2019.2.0
List all subscriptions for a tenant.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.subscriptions_list
|
26,190 |
def set_group(self, group):
_check_call(_LIB.XGDMatrixSetGroup(self.handle,
c_array(ctypes.c_uint, group),
c_bst_ulong(len(group))))
|
Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group
|
26,191 |
def _group(self, rdd):
return rdd.reduceByKey(lambda x, y: x.append(y))
|
Group together the values with the same key.
|
26,192 |
def _advapi32_generate_pair(algorithm, bit_size=None):
if algorithm == :
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
algorithm_id = Advapi32Const.CALG_RSA_SIGN
struct_type =
else:
provider = Advapi32Const.MS_ENH_DSS_DH_PROV
algorithm_id = Advapi32Const.CALG_DSS_SIGN
struct_type =
context_handle = None
key_handle = None
try:
context_handle = open_context_handle(provider, verify_only=False)
key_handle_pointer = new(advapi32, )
flags = (bit_size << 16) | Advapi32Const.CRYPT_EXPORTABLE
res = advapi32.CryptGenKey(context_handle, algorithm_id, flags, key_handle_pointer)
handle_error(res)
key_handle = unwrap(key_handle_pointer)
out_len = new(advapi32, )
res = advapi32.CryptExportKey(
key_handle,
null(),
Advapi32Const.PRIVATEKEYBLOB,
0,
null(),
out_len
)
handle_error(res)
buffer_length = deref(out_len)
buffer_ = buffer_from_bytes(buffer_length)
res = advapi32.CryptExportKey(
key_handle,
null(),
Advapi32Const.PRIVATEKEYBLOB,
0,
buffer_,
out_len
)
handle_error(res)
blob_struct_pointer = struct_from_buffer(advapi32, struct_type, buffer_)
blob_struct = unwrap(blob_struct_pointer)
struct_size = sizeof(advapi32, blob_struct)
private_blob = bytes_from_buffer(buffer_, buffer_length)[struct_size:]
if algorithm == :
public_info, private_info = _advapi32_interpret_rsa_key_blob(bit_size, blob_struct, private_blob)
else:
public_out_len = new(advapi32, )
res = advapi32.CryptExportKey(
key_handle,
null(),
Advapi32Const.PUBLICKEYBLOB,
0,
null(),
public_out_len
)
handle_error(res)
public_buffer_length = deref(public_out_len)
public_buffer = buffer_from_bytes(public_buffer_length)
res = advapi32.CryptExportKey(
key_handle,
null(),
Advapi32Const.PUBLICKEYBLOB,
0,
public_buffer,
public_out_len
)
handle_error(res)
public_blob = bytes_from_buffer(public_buffer, public_buffer_length)[struct_size:]
public_info, private_info = _advapi32_interpret_dsa_key_blob(bit_size, public_blob, private_blob)
return (load_public_key(public_info), load_private_key(private_info))
finally:
if context_handle:
close_context_handle(context_handle)
if key_handle:
advapi32.CryptDestroyKey(key_handle)
|
Generates a public/private key pair using CryptoAPI
:param algorithm:
The key algorithm - "rsa" or "dsa"
:param bit_size:
An integer - used for "rsa" and "dsa". For "rsa" the value maye be 1024,
2048, 3072 or 4096. For "dsa" the value may be 1024.
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A 2-element tuple of (PublicKey, PrivateKey). The contents of each key
may be saved by calling .asn1.dump().
|
26,193 |
def normalize_partial_name(decl):
if decl.cache.normalized_partial_name is None:
decl.cache.normalized_partial_name = normalize(decl.partial_name)
return decl.cache.normalized_partial_name
|
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
26,194 |
def f_translate_key(self, key):
if isinstance(key, int):
if key == 0:
key = self.v_name
else:
key = self.v_name + % key
return key
|
Translates integer indices into the appropriate names
|
26,195 |
def add_step(step_name, func):
if isinstance(func, Callable):
logger.debug(.format(step_name))
_STEPS[step_name] = _StepFuncWrapper(step_name, func)
else:
raise TypeError()
|
Add a step function to Orca.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
Parameters
----------
step_name : str
func : callable
|
26,196 |
def zoom(params, factor):
params.zoom /= factor
n_x = params.mb_cx / params.zoom
n_y = params.mb_cy / params.zoom
params.plane_x0 = int((n_x + 1.0) * params.plane_w / (2.0 * params.plane_ratio)) - params.plane_w // 2
params.plane_y0 = int((n_y + 1.0) * params.plane_h / 2.0) - params.plane_h // 2
|
Applies a zoom on the current parameters.
Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates.
:param params: Current application parameters.
:param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom)
|
26,197 |
def read_until_done(self, command, timeout=None):
message = self.read_message(timeout)
while message.command != :
message.assert_command_is(command)
yield message
message = self.read_message(timeout)
|
Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read.
|
26,198 |
def clean_up(self):
if self.selected_col:
col_label_value = self.grid.GetColLabelValue(self.selected_col)
col_label_value = col_label_value.strip()
self.grid.SetColLabelValue(self.selected_col, col_label_value)
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellBackgroundColour(row, self.selected_col, )
self.grid.ForceRefresh()
|
de-select grid cols, refresh grid
|
26,199 |
def lint(fix_imports):
from glob import glob
from subprocess import call
skip = [
,
,
,
,
,
]
root_files = glob()
root_dirs = [name for name in next(os.walk())[1]
if not name.startswith()]
files_and_dirs = [x for x in root_files + root_dirs if x not in skip]
def execute_tool(desc, *args):
command = list(args) + files_and_dirs
click.echo(f"{desc}: {.join(command)}")
ret = call(command)
if ret != 0:
exit(ret)
if fix_imports:
execute_tool(, , )
execute_tool(, )
|
Run flake8.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.