Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
19,900 | def set_shell(self, svc_ref):
if svc_ref is None:
return
with self._lock:
self._shell_ref = svc_ref
self._shell = self._context.get_service(self._shell_ref)
if readline is not None:
readline.set_completer(self.readline_completer)
self._shell_event.set() | Binds the given shell service.
:param svc_ref: A service reference |
19,901 | def extract_keys(self,key_list):
if isinstance(key_list,basestring):
key_list = key_list.split()
return type(self)([ (k,self[k]) for k in key_list if k in self ]) | >>> d = {'a':1,'b':2,'c':3}
>>> print d.extract_keys('b,c,d')
>>> {'b':2,'c':3}
>>> print d.extract_keys(['b','c','d'])
>>> {'b':2,'c':3} |
19,902 | def _validate_action_parameters(func, params):
if params is not None:
valid_fields = [getattr(fields, f) for f in dir(fields) \
if f.startswith("FIELD_")]
for param in params:
param_name, field_type = param[], param[]
if param_name not in func.__code__.co_varnames:
raise AssertionError("Unknown parameter name {0} specified for"\
" action {1}".format(
param_name, func.__name__))
if field_type not in valid_fields:
raise AssertionError("Unknown field type {0} specified for"\
" action {1} param {2}".format(
field_type, func.__name__, param_name)) | Verifies that the parameters specified are actual parameters for the
function `func`, and that the field types are FIELD_* types in fields. |
19,903 | def _migrate_subresource(subresource, parent, migrations):
for key, doc in getattr(parent, subresource.parent_key, {}).items():
for migration in migrations[]:
instance = migration(subresource(id=key, **doc))
parent._resource[] = unicode(migration.version)
instance = _migrate_subresources(
instance,
migrations[]
)
doc = instance._resource
doc.pop(, None)
doc.pop(instance.resource_type + , None)
getattr(parent, subresource.parent_key)[key] = doc
return parent | Migrate a resource's subresource
:param subresource: the perch.SubResource instance
:param parent: the parent perch.Document instance
:param migrations: the migrations for a resource |
19,904 | def _make_request_with_auth_fallback(self, url, headers=None, params=None):
self.log.debug("Request URL and Params: %s, %s", url, params)
try:
resp = requests.get(
url,
headers=headers,
verify=self._ssl_verify,
params=params,
timeout=DEFAULT_API_REQUEST_TIMEOUT,
proxies=self.proxy_config,
)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
self.log.debug("Error contacting openstack endpoint: %s", e)
if resp.status_code == 401:
self.log.info()
self.delete_current_scope()
elif resp.status_code == 409:
raise InstancePowerOffFailure()
elif resp.status_code == 404:
raise e
else:
raise
return resp.json() | Generic request handler for OpenStack API requests
Raises specialized Exceptions for commonly encountered error codes |
19,905 | def _pretend_to_run(self, migration, method):
for query in self._get_queries(migration, method):
name = migration.__class__.__name__
self._note( % (name, query)) | Pretend to run the migration.
:param migration: The migration
:type migration: eloquent.migrations.migration.Migration
:param method: The method to execute
:type method: str |
19,906 | def send_command(self, command, as_list=False):
action = actions.Action({: command, : },
as_list=as_list)
return self.send_action(action) | Send a :class:`~panoramisk.actions.Command` to the server::
manager = Manager()
resp = manager.send_command('http show status')
Return a response :class:`~panoramisk.message.Message`.
See https://wiki.asterisk.org/wiki/display/AST/ManagerAction_Command |
19,907 | def port_bindings(val, **kwargs):
validate_ip_addrs = kwargs.get(, True)
if not isinstance(val, dict):
if not isinstance(val, list):
try:
val = helpers.split(val)
except AttributeError:
val = helpers.split(six.text_type(val))
for idx in range(len(val)):
if not isinstance(val[idx], six.string_types):
val[idx] = six.text_type(val[idx])
def _format_port(port_num, proto):
return six.text_type(port_num) + if proto.lower() == else port_num
bindings = {}
for binding in val:
bind_parts = helpers.split(binding, )
num_bind_parts = len(bind_parts)
if num_bind_parts == 1:
container_port = six.text_type(bind_parts[0])
if container_port == :
raise SaltInvocationError(
)
container_port, _, proto = container_port.partition()
try:
start, end = helpers.get_port_range(container_port)
except ValueError as exc:
raise SaltInvocationError(exc.__str__())
bind_vals = [
(_format_port(port_num, proto), None)
for port_num in range(start, end + 1)
]
elif num_bind_parts == 2:
if bind_parts[0] == :
raise SaltInvocationError(
{0}\.format(binding)
)
if bind_parts[1] == :
raise SaltInvocationError(
{0}\.format(binding)
)
container_port, _, proto = bind_parts[1].partition()
try:
cport_start, cport_end = \
helpers.get_port_range(container_port)
hport_start, hport_end = \
helpers.get_port_range(bind_parts[0])
except ValueError as exc:
raise SaltInvocationError(exc.__str__())
if (hport_end - hport_start) != (cport_end - cport_start):
raise SaltInvocationError(
.format(bind_parts[0], container_port)
)
cport_list = list(range(cport_start, cport_end + 1))
hport_list = list(range(hport_start, hport_end + 1))
bind_vals = [
(_format_port(cport_list[x], proto), hport_list[x])
for x in range(len(cport_list))
]
elif num_bind_parts == 3:
host_ip, host_port = bind_parts[0:2]
if validate_ip_addrs:
helpers.validate_ip(host_ip)
container_port, _, proto = bind_parts[2].partition()
try:
cport_start, cport_end = \
helpers.get_port_range(container_port)
except ValueError as exc:
raise SaltInvocationError(exc.__str__())
cport_list = list(range(cport_start, cport_end + 1))
if host_port == :
hport_list = [None] * len(cport_list)
else:
try:
hport_start, hport_end = \
helpers.get_port_range(host_port)
except ValueError as exc:
raise SaltInvocationError(exc.__str__())
hport_list = list(range(hport_start, hport_end + 1))
if (hport_end - hport_start) != (cport_end - cport_start):
raise SaltInvocationError(
.format(host_port, container_port)
)
bind_vals = [(
_format_port(val, proto),
(host_ip,) if hport_list[idx] is None
else (host_ip, hport_list[idx])
) for idx, val in enumerate(cport_list)]
else:
raise SaltInvocationError(
{0}\
.format(
binding, num_bind_parts
)
)
for cport, bind_def in bind_vals:
if cport not in bindings:
bindings[cport] = bind_def
else:
if isinstance(bindings[cport], list):
bindings[cport].append(bind_def)
else:
bindings[cport] = [bindings[cport], bind_def]
for idx in range(len(bindings[cport])):
if bindings[cport][idx] is None:
try:
bindings[cport][idx] = int(cport.split()[0])
except AttributeError:
bindings[cport][idx] = cport
val = bindings
return val | On the CLI, these are passed as multiple instances of a given CLI option.
In Salt, we accept these as a comma-delimited list but the API expects a
Python dictionary mapping ports to their bindings. The format the API
expects is complicated depending on whether or not the external port maps
to a different internal port, or if the port binding is for UDP instead of
TCP (the default). For reference, see the "Port bindings" section in the
docker-py documentation at the following URL:
http://docker-py.readthedocs.io/en/stable/api.html |
19,908 | def run_command(self, cmd, new_prompt=True):
if cmd == :
self.exit_flag = True
self.write()
return
special_pattern = r"^%s (?:r\)?\"?\run^([a-zA-Z0-9_\.]+)\?$?([a-zA-Z0-9_ \.]+)", cmd)
if help_match:
cmd = % help_match.group(1)
elif run_match:
filename = guess_filename(run_match.groups()[0])
cmd = "runfile(, args=None)" % remove_backslashes(filename)
elif cd_match:
cmd = % cd_match.groups()[0].strip()
elif clear_match:
varnames = clear_match.groups()[0].replace(, ).split()
for varname in varnames:
try:
self.namespace.pop(varname)
except KeyError:
pass
elif cmd.startswith():
pipe = programs.run_shell_command(cmd[1:])
txt_out = encoding.transcode( pipe.stdout.read().decode() )
txt_err = encoding.transcode( pipe.stderr.read().decode().rstrip() )
if txt_err:
self.stderr_write.write(txt_err)
if txt_out:
self.stdout_write.write(txt_out)
self.stdout_write.write()
self.more = False
else:
self.more = self.push(cmd)
if new_prompt:
self.widget_proxy.new_prompt(self.p2 if self.more else self.p1)
if not self.more:
self.resetbuffer() | Run command in interpreter |
19,909 | def wrap_as_node(self, func):
name = self.get_name(func)
@wraps(func)
def wrapped(*args, **kwargs):
message = self.get_message_from_call(*args, **kwargs)
self.logger.info(, name, message)
result = func(message)
if isinstance(result, GeneratorType):
results = [
self.wrap_result(name, item)
for item in result
if item is not NoResult
]
self.logger.debug(
, func, len(results)
)
[self.route(name, item) for item in results]
return tuple(results)
else:
if result is NoResult:
return result
result = self.wrap_result(name, result)
self.logger.debug(
, func, result
)
self.route(name, result)
return result
return wrapped | wrap a function as a node |
19,910 | def iso8601_datetime(d):
if d == values.unset:
return d
elif isinstance(d, datetime.datetime) or isinstance(d, datetime.date):
return d.strftime()
elif isinstance(d, str):
return d | Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date |
19,911 | def __get_sigmas(self):
stack_sigma = {}
_stack = self.stack
_file_path = os.path.abspath(os.path.dirname(__file__))
_database_folder = os.path.join(_file_path, , self.database)
_list_compounds = _stack.keys()
for _compound in _list_compounds:
_list_element = _stack[_compound][]
stack_sigma[_compound] = {}
for _element in _list_element:
stack_sigma[_compound][_element] = {}
_list_isotopes = _stack[_compound][_element][][]
_list_file_names = _stack[_compound][_element][][]
_list_isotopic_ratio = _stack[_compound][_element][][]
_iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio)
stack_sigma[_compound][_element][] = _list_isotopic_ratio
_sigma_all_isotopes = 0
_energy_all_isotpes = 0
for _iso, _file, _ratio in _iso_file_ratio:
stack_sigma[_compound][_element][_iso] = {}
_file = os.path.join(_database_folder, _file)
_dict = _utilities.get_sigma(database_file_name=_file,
e_min=self.energy_min,
e_max=self.energy_max,
e_step=self.energy_step)
stack_sigma[_compound][_element][_iso][] = _dict[]
stack_sigma[_compound][_element][_iso][] = _dict[] * _ratio
stack_sigma[_compound][_element][_iso][] = _dict[]
_sigma_all_isotopes += _dict[] * _ratio
_energy_all_isotpes += _dict[]
_mean_energy_all_isotopes = _energy_all_isotpes / len(_list_isotopes)
stack_sigma[_compound][_element][] = _mean_energy_all_isotopes
stack_sigma[_compound][_element][] = _sigma_all_isotopes
self.stack_sigma = stack_sigma | will populate the stack_sigma dictionary with the energy and sigma array
for all the compound/element and isotopes |
19,912 | def pagination_calc(items_count, page_size, cur_page=1, nearby=2):
if type(cur_page) == str:
cur_page = int(cur_page) if cur_page.isdigit() else 1
elif type(cur_page) == int:
if cur_page <= 0:
cur_page = 1
else:
cur_page = 1
page_count = 1 if page_size == -1 else int(math.ceil(items_count / page_size))
items_length = nearby * 2 + 1
first_page = None
last_page = None
prev_page = cur_page - 1 if cur_page != 1 else None
next_page = cur_page + 1 if cur_page != page_count else None
if page_count <= items_length:
items = range(1, page_count + 1)
elif cur_page <= nearby:
items = range(1, items_length + 1)
last_page = True
elif cur_page >= page_count - nearby:
items = range(page_count - items_length + 1, page_count + 1)
first_page = True
else:
items = range(cur_page - nearby, cur_page + nearby + 1)
first_page, last_page = True, True
if first_page:
first_page = 1
if last_page:
last_page = page_count
return {
: cur_page,
: prev_page,
: next_page,
: first_page,
: last_page,
: list(items),
: {
: page_size,
: page_count,
: items_count,
}
} | :param nearby:
:param items_count: count of all items
:param page_size: size of one page
:param cur_page: current page number, accept string digit
:return: num of pages, an iterator |
19,913 | def _array(group_idx, a, size, fill_value, dtype=None):
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind=)
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret | groups a into separate arrays, keeping the order intact. |
19,914 | def process(self):
self.modules.sort(key=lambda x: x.priority)
for module in self.modules:
transforms = module.transform(self.data)
transforms.sort(key=lambda x: x.linenum, reverse=True)
for transform in transforms:
linenum = transform.linenum
if isinstance(transform.data, basestring):
transform.data = [transform.data]
if transform.oper == "prepend":
self.data[linenum:linenum] = transform.data
elif transform.oper == "append":
self.data[linenum+1:linenum+1] = transform.data
elif transform.oper == "swap":
self.data[linenum:linenum+1] = transform.data
elif transform.oper == "drop":
self.data[linenum:linenum+1] = []
elif transform.oper == "noop":
pass | This method handles the actual processing of Modules and Transforms |
19,915 | def shared(self, value, name=None):
if type(value) == int:
final_value = np.array(value, dtype="int32")
elif type(value) == float:
final_value = np.array(value, dtype=env.FLOATX)
else:
final_value = value
return theano.shared(final_value, name=name) | Create a shared theano scalar value. |
19,916 | def get_most_distinct_words(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None):
return _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n) | Order the words from `vocab` by "distinctiveness score" (Chuang et al. 2012) from most to least distinctive.
Optionally only return the `n` most distinctive words.
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models" |
19,917 | def add_portal(self, origin, destination, symmetrical=False, **kwargs):
if isinstance(origin, Node):
origin = origin.name
if isinstance(destination, Node):
destination = destination.name
super().add_edge(origin, destination, **kwargs)
if symmetrical:
self.add_portal(destination, origin, is_mirror=True) | Connect the origin to the destination with a :class:`Portal`.
Keyword arguments are the :class:`Portal`'s
attributes. Exception: if keyword ``symmetrical`` == ``True``,
a mirror-:class:`Portal` will be placed in the opposite
direction between the same nodes. It will always appear to
have the placed :class:`Portal`'s stats, and any change to the
mirror :class:`Portal`'s stats will affect the placed
:class:`Portal`. |
19,918 | def mark_dead(self, proxy, _time=None):
if proxy not in self.proxies:
logger.warn("Proxy <%s> was not found in proxies list" % proxy)
return
if proxy in self.good:
logger.debug("GOOD proxy became DEAD: <%s>" % proxy)
else:
logger.debug("Proxy <%s> is DEAD" % proxy)
self.unchecked.discard(proxy)
self.good.discard(proxy)
self.dead.add(proxy)
now = _time or time.time()
state = self.proxies[proxy]
state.backoff_time = self.backoff(state.failed_attempts)
state.next_check = now + state.backoff_time
state.failed_attempts += 1 | Mark a proxy as dead |
19,919 | def get_group_hidden(self):
for element in self.group_list:
if element.form.view_type != :
return False
return False
return True | Determine if the entire group of elements is hidden
(decide whether to hide the entire group). |
19,920 | def execute(self, resource, **kw):
params = kw.pop(, {})
json = kw.pop(, None)
task = self.make_request(
TaskRunFailed,
method=,
params=params,
json=json,
resource=resource)
timeout = kw.pop(, 5)
wait_for_finish = kw.pop(, True)
return TaskOperationPoller(
task=task, timeout=timeout,
wait_for_finish=wait_for_finish,
**kw) | Execute the task and return a TaskOperationPoller.
:rtype: TaskOperationPoller |
19,921 | def api_reference(root_url, service, version):
root_url = root_url.rstrip()
if root_url == OLD_ROOT_URL:
return .format(service, version)
else:
return .format(root_url, service, version) | Generate URL for a Taskcluster api reference. |
19,922 | def get_oauth_access_token(url_base, client_id, client_secret, company_id, user_id, user_type):
SAPSuccessFactorsGlobalConfiguration = apps.get_model(
,
)
global_sap_config = SAPSuccessFactorsGlobalConfiguration.current()
url = url_base + global_sap_config.oauth_api_path
response = requests.post(
url,
json={
: ,
: {
: user_id,
: company_id,
: user_type,
: ,
}
},
auth=(client_id, client_secret),
headers={: }
)
response.raise_for_status()
data = response.json()
try:
return data[], datetime.datetime.utcfromtimestamp(data[] + int(time.time()))
except KeyError:
raise requests.RequestException(response=response) | Retrieves OAuth 2.0 access token using the client credentials grant.
Args:
url_base (str): Oauth2 access token endpoint
client_id (str): client ID
client_secret (str): client secret
company_id (str): SAP company ID
user_id (str): SAP user ID
user_type (str): type of SAP user (admin or user)
Returns:
tuple: Tuple containing access token string and expiration datetime.
Raises:
HTTPError: If we received a failure response code from SAP SuccessFactors.
RequestException: If an unexpected response format was received that we could not parse. |
19,923 | def print_clusters(fastas, info, ANI):
header = [, , , , , , \
, , ]
yield header
in_cluster = []
for cluster_num, cluster in enumerate(connected_components(ANI)):
cluster = sorted([genome_info(genome, info[genome]) \
for genome in cluster], \
key = lambda x: x[0:], reverse = True)
rep = cluster[0][-1]
cluster = [i[-1] for i in cluster]
size = len(cluster)
for genome in cluster:
in_cluster.append(genome)
try:
stats = [size, rep, genome, \
info[genome][], info[genome][], \
info[genome][], info[genome][], cluster]
except:
stats = [size, rep, genome, \
, , \
info[genome][], info[genome][], cluster]
if rep == genome:
stats = [ % (cluster_num)] + stats
else:
stats = [cluster_num] + stats
yield stats
try:
start = cluster_num + 1
except:
start = 0
fastas = set([i.rsplit(, 1)[0].rsplit(, 1)[-1].rsplit()[0] for i in fastas])
for cluster_num, genome in \
enumerate(fastas.difference(set(in_cluster)), start):
try:
stats = [ % (cluster_num), 1, genome, genome, \
info[genome][], info[genome][], \
info[genome][], info[genome][], [genome]]
except:
stats = [ % (cluster_num), 1, genome, genome, \
, , \
info[genome][], info[genome][], [genome]]
yield stats | choose represenative genome and
print cluster information
*if ggKbase table is provided, use SCG info to choose best genome |
19,924 | def _populate_unknown_statuses(set_tasks):
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visited) | Add the "upstream_*" and "not_run" statuses my mutating set_tasks. |
19,925 | def get_request_params(self) -> List[ExtensionParameter]:
return _build_parameters(
self.server_no_context_takeover,
self.client_no_context_takeover,
self.server_max_window_bits,
self.client_max_window_bits,
) | Build request parameters. |
19,926 | def load(self, key, noexpire=None):
with self.load_fd(key, noexpire=noexpire) as fd:
return fd.read() | Lookup an item in the cache and return the raw content of
the file as a string. |
19,927 | def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
return d | Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
@deprecated use get_range_slices instead with range.row_filter specified
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level |
19,928 | def inactive_response(self, request):
inactive_url = getattr(settings, , )
if inactive_url:
return HttpResponseRedirect(inactive_url)
else:
return self.error_to_response(request, {: _("This user account is marked as inactive.")}) | Return an inactive message. |
19,929 | def _construct_state_machines(self):
state_machines = dict()
for state_machine in [StateMachineRecomputing(self.logger, self),
StateMachineContinuous(self.logger, self),
StateMachineDiscrete(self.logger, self),
StateMachineFreerun(self.logger)]:
state_machines[state_machine.name] = state_machine
return state_machines | :return: dict in format <state_machine_common_name: instance_of_the_state_machine> |
19,930 | def apply(self, df):
if hasattr(self.definition, ):
r = self.definition(df)
elif self.definition in df.columns:
r = df[self.definition]
elif not isinstance(self.definition, string_types):
r = pd.Series(self.definition, index=df.index)
else:
raise ValueError("Invalid column definition: %s" % str(self.definition))
return r.astype(self.astype) if self.astype else r | Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`. |
19,931 | def unique(iterable, key=identity):
seen = set()
for item in iterable:
item_key = key(item)
if item_key not in seen:
seen.add(item_key)
yield item | Yields all the unique values in an iterable maintaining order |
19,932 | def calendar(self, val):
self._calendar = val
if val is not None and not val.empty:
self._calendar_i = self._calendar.set_index("service_id")
else:
self._calendar_i = None | Update ``self._calendar_i``if ``self.calendar`` changes. |
19,933 | def load_from_db(self, cache=False):
a = {}
db_prefs = {p.preference.identifier(): p for p in self.queryset}
for preference in self.registry.preferences():
try:
db_pref = db_prefs[preference.identifier()]
except KeyError:
db_pref = self.create_db_pref(
section=preference.section.name,
name=preference.name,
value=preference.get())
else:
if cache:
self.to_cache(db_pref)
a[preference.identifier()] = db_pref.value
return a | Return a dictionary of preferences by section directly from DB |
19,934 | def get_all_targets(self):
result = []
for batch in self.batches:
result.extend(batch.targets)
return result | Returns all targets for all batches of this Executor. |
19,935 | def put(self, url: StrOrURL,
*, data: Any=None, **kwargs: Any) -> :
return _RequestContextManager(
self._request(hdrs.METH_PUT, url,
data=data,
**kwargs)) | Perform HTTP PUT request. |
19,936 | def tag_array(events):
all_tags = sorted(set(tag for event in events for tag in event.tags))
array = np.zeros((len(events), len(all_tags)))
for row, event in enumerate(events):
for tag in event.tags:
array[row, all_tags.index(tag)] = 1
return array | Return a numpy array mapping events to tags
- Rows corresponds to events
- Columns correspond to tags |
19,937 | def _neg_bounded_fun(fun, bounds, x, args=()):
if _check_bounds(x, bounds):
return -fun(x, *args)
else:
return np.inf | Wrapper for bounding and taking the negative of `fun` for the
Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba.
Parameters
----------
fun : callable
The objective function to be minimized.
`fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables at which `fun` is
to be evaluated.
args : tuple, optional
Extra arguments passed to the objective function.
Returns
----------
scalar
`-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise. |
19,938 | def load_metadata_csv(input_filepath):
with open(input_filepath) as f:
csv_in = csv.reader(f)
header = next(csv_in)
if in header:
tags_idx = header.index()
else:
raise ValueError()
if header[0] == :
if header[1] == :
metadata = load_metadata_csv_multi_user(csv_in, header,
tags_idx)
else:
raise ValueError()
elif header[0] == :
metadata = load_metadata_csv_single_user(csv_in, header, tags_idx)
else:
raise ValueError( +
+
+
+
)
return metadata | Return dict of metadata.
Format is either dict (filenames are keys) or dict-of-dicts (project member
IDs as top level keys, then filenames as keys).
:param input_filepath: This field is the filepath of the csv file. |
19,939 | def _validate(self, validator, data, key, position=None, includes=None):
errors = []
if position:
position = % (position, key)
else:
position = key
try:
data_item = util.get_value(data, key)
except KeyError:
return errors
return self._validate_item(validator, data_item, position, includes) | Run through a schema and a data structure,
validating along the way.
Ignores fields that are in the data structure, but not in the schema.
Returns an array of errors. |
19,940 | def __start_waiting_for_events(self):
d rather stay on the
safe side, as my experience of threading in Python is limited.
Starting ioloop...ioloop is owned by connection %s...re now open for events.
self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events()
self.thread.continue_gently_closing_if_applicable()
self.thread._connection.ioloop.start()
except PIDServerException as e:
raise e
except Exception as e:
time_passed = datetime.datetime.now() - self.__start_connect_time
time_passed_seconds = time_passed.total_seconds()
if isinstance(e, pika.exceptions.ProbableAuthenticationError):
errorname = self.__make_error_name(e, )
elif isinstance(e, pika.exceptions.ProbableAccessDeniedError):
errorname = self.__make_error_name(e, )
elif isinstance(e, pika.exceptions.IncompatibleProtocolError):
errorname = self.__make_error_name(e, )
else:
errorname = self.__make_error_name(e)
logdebug(LOGGER, s lifetime (after %s seconds): %sm quite sure that this cannot happen, as the connection object
logdebug(LOGGER, )
logerror(LOGGER, ) | This waits until the whole chain of callback methods triggered by
"trigger_connection_to_rabbit_etc()" has finished, and then starts
waiting for publications.
This is done by starting the ioloop.
Note: In the pika usage example, these things are both called inside the run()
method, so I wonder if this check-and-wait here is necessary. Maybe not.
But the usage example does not implement a Thread, so it probably blocks during
the opening of the connection. Here, as it is a different thread, the run()
might get called before the __init__ has finished? I'd rather stay on the
safe side, as my experience of threading in Python is limited. |
19,941 | async def shutdown(self, container, force=False):
p = self._connpool
self._connpool = []
self._shutdown = True
if self._defaultconn:
p.append(self._defaultconn)
self._defaultconn = None
if self._subscribeconn:
p.append(self._subscribeconn)
self._subscribeconn = None
await container.execute_all([self._shutdown_conn(container, o, force)
for o in p]) | Shutdown all connections. Exclusive connections created by get_connection will shutdown after release() |
19,942 | def toIndex(self, value):
if self._isIrNull(value):
ret = IR_NULL_STR
else:
ret = self._toIndex(value)
if self.isIndexHashed is False:
return ret
return md5(tobytes(ret)).hexdigest() | toIndex - An optional method which will return the value prepped for index.
By default, "toStorage" will be called. If you provide "hashIndex=True" on the constructor,
the field will be md5summed for indexing purposes. This is useful for large strings, etc. |
19,943 | def status_for_all_orders_in_a_stock(self, stock):
url_fragment = .format(
stock=stock,
venue=self.venue,
account=self.account,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json() | Status for all orders in a stock
https://starfighter.readme.io/docs/status-for-all-orders-in-a-stock |
19,944 | def predict(self, choosers, alternatives, debug=False):
self.assert_fitted()
logger.debug(.format(self.name))
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if len(choosers) == 0:
return pd.Series()
if len(alternatives) == 0:
return pd.Series(index=choosers.index)
probabilities = self.probabilities(
choosers, alternatives, filter_tables=False)
if debug:
self.sim_pdf = probabilities
if self.choice_mode == :
choices = unit_choice(
choosers.index.values,
probabilities.index.get_level_values().values,
probabilities.values)
elif self.choice_mode == :
def mkchoice(probs):
probs.reset_index(0, drop=True, inplace=True)
return np.random.choice(
probs.index.values, p=probs.values / probs.sum())
choices = probabilities.groupby(level=, sort=False)\
.apply(mkchoice)
else:
raise ValueError(
.format(self.choice_mode))
logger.debug(.format(self.name))
return choices | Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers. |
19,945 | def _get_or_create_uaa(self, uaa):
if isinstance(uaa, predix.admin.uaa.UserAccountAuthentication):
return uaa
logging.debug("Initializing a new UAA")
return predix.admin.uaa.UserAccountAuthentication() | Returns a valid UAA instance for performing administrative functions
on services. |
19,946 | def ccor(alt, r, h1, zh):
e = (alt - zh) / h1
if(e>70.0):
return 1.0
elif (e < -70.0):
return exp(r)
ex = exp(e)
e = r / (1.0 + ex)
return exp(e) | /* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS
* ALT - altitude
* R - target ratio
* H1 - transition scale length
* ZH - altitude of 1/2 R
*/ |
19,947 | def get_default_config_file(rootdir=None):
if rootdir is None:
return DEFAULT_CONFIG_FILE
for path in CONFIG_FILES:
path = os.path.join(rootdir, path)
if os.path.isfile(path) and os.access(path, os.R_OK):
return path | Search for configuration file. |
19,948 | def _learn(
permanences, rng,
activeCells, activeInput, growthCandidateInput,
sampleSize, initialPermanence, permanenceIncrement,
permanenceDecrement, connectedPermanence):
permanences.incrementNonZerosOnOuter(
activeCells, activeInput, permanenceIncrement)
permanences.incrementNonZerosOnRowsExcludingCols(
activeCells, activeInput, -permanenceDecrement)
permanences.clipRowsBelowAndAbove(
activeCells, 0.0, 1.0)
if sampleSize == -1:
permanences.setZerosOnOuter(
activeCells, activeInput, initialPermanence)
else:
existingSynapseCounts = permanences.nNonZerosPerRowOnCols(
activeCells, activeInput)
maxNewByCell = numpy.empty(len(activeCells), dtype="int32")
numpy.subtract(sampleSize, existingSynapseCounts, out=maxNewByCell)
permanences.setRandomZerosOnOuter(
activeCells, growthCandidateInput, maxNewByCell, initialPermanence, rng) | For each active cell, reinforce active synapses, punish inactive synapses,
and grow new synapses to a subset of the active input bits that the cell
isn't already connected to.
Parameters:
----------------------------
@param permanences (SparseMatrix)
Matrix of permanences, with cells as rows and inputs as columns
@param rng (Random)
Random number generator
@param activeCells (sorted sequence)
Sorted list of the cells that are learning
@param activeInput (sorted sequence)
Sorted list of active bits in the input
@param growthCandidateInput (sorted sequence)
Sorted list of active bits in the input that the activeCells may
grow new synapses to
For remaining parameters, see the __init__ docstring. |
19,949 | def dereference(self, data, host=None):
return self.deep_decode(self.deep_encode(data, host), deref=True) | Dereferences RefObjects stuck in the hierarchy. This is a bit
of an ugly hack. |
19,950 | def _apply_replace_backrefs(m, repl=None, flags=0):
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m) | Expand with either the `ReplaceTemplate` or compile on the fly, or return None. |
19,951 | def get_lm_challenge_response(self):
if self._negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY and self._ntlm_compatibility < 3:
response = ComputeResponse._get_LMv1_with_session_security_response(self._client_challenge)
elif 0 <= self._ntlm_compatibility <= 1:
response = ComputeResponse._get_LMv1_response(self._password, self._server_challenge)
elif self._ntlm_compatibility == 2:
return response | [MS-NLMP] v28.0 2016-07-14
3.3.1 - NTLM v1 Authentication
3.3.2 - NTLM v2 Authentication
This method returns the LmChallengeResponse key based on the ntlm_compatibility chosen
and the target_info supplied by the CHALLENGE_MESSAGE. It is quite different from what
is set in the document as it combines the NTLMv1, NTLM2 and NTLMv2 methods into one
and calls separate methods based on the ntlm_compatibility flag chosen.
:return: response (LmChallengeResponse) - The LM response to the server challenge. Computed by the client |
19,952 | def load_and_assign_npz_dict(name=, sess=None):
if sess is None:
raise ValueError("session is None.")
if not os.path.exists(name):
logging.error("file {} doesn't exist.".format(name))
return False
params = np.load(name)
if len(params.keys()) != len(set(params.keys())):
raise Exception("Duplication in model npz_dict %s" % name)
ops = list()
for key in params.keys():
try:
varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=key)
if len(varlist) > 1:
raise Exception("[!] Multiple candidate variables to be assigned for name %s" % key)
elif len(varlist) == 0:
raise KeyError
else:
ops.append(varlist[0].assign(params[key]))
logging.info("[*] params restored: %s" % key)
except KeyError:
logging.info("[!] Warning: Tensor named %s not found in network." % key)
sess.run(ops)
logging.info("[*] Model restored from npz_dict %s" % name) | Restore the parameters saved by ``tl.files.save_npz_dict()``.
Parameters
----------
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session. |
19,953 | def rename(name, new_name, root=None):
*
if info(new_name, root=root):
raise CommandExecutionError({0}\.format(new_name))
return _chattrib(name, , new_name, , root=root) | Change the username for a named user
name
User to modify
new_name
New value of the login name
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.rename name new_name |
19,954 | def map(self, func):
return dict((key, func(value)) for key, value in self.iteritems()) | Return a dictionary of the results of func applied to each
of the segmentlist objects in self.
Example:
>>> x = segmentlistdict()
>>> x["H1"] = segmentlist([segment(0, 10)])
>>> x["H2"] = segmentlist([segment(5, 15)])
>>> x.map(lambda l: 12 in l)
{'H2': True, 'H1': False} |
19,955 | def list_tables(self, limit=None, start_table=None):
result = self.layer1.list_tables(limit, start_table)
return result[] | Return a list of the names of all Tables associated with the
current account and region.
TODO - Layer2 should probably automatically handle pagination.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing. |
19,956 | def create(self, serviceBinding):
if not isinstance(serviceBinding, ServiceBindingCreateRequest):
if serviceBinding["type"] == "cloudant":
serviceBinding = CloudantServiceBindingCreateRequest(**serviceBinding)
elif serviceBinding["type"] == "eventstreams":
serviceBinding = EventStreamsServiceBindingCreateRequest(**serviceBinding)
else:
raise Exception("Unsupported service binding type")
url = "api/v0002/s2s/services"
r = self._apiClient.post(url, data=serviceBinding)
if r.status_code == 201:
return ServiceBinding(**r.json())
else:
raise ApiException(r) | Create a new external service.
The service must include all of the details required to connect
and authenticate to the external service in the credentials property.
Parameters:
- serviceName (string) - Name of the service
- serviceType (string) - must be either eventstreams or cloudant
- credentials (json object) - Should have a valid structure for the service type.
- description (string) - description of the service
Throws APIException on failure |
19,957 | def getDynDnsClientForConfig(config, plugins=None):
initparams = {}
if "interval" in config:
initparams["detect_interval"] = config["interval"]
if plugins is not None:
initparams["plugins"] = plugins
if "updater" in config:
for updater_name, updater_options in config["updater"]:
initparams["updater"] = get_updater_class(updater_name)(**updater_options)
if "detector" in config:
detector_name, detector_opts = config["detector"][-1]
try:
klass = get_detector_class(detector_name)
except KeyError as exc:
LOG.warning("Invalid change detector configuration: ",
detector_name, exc_info=exc)
return None
thedetector = klass(**detector_opts)
initparams["detector"] = thedetector
return DynDnsClient(**initparams) | Instantiate and return a complete and working dyndns client.
:param config: a dictionary with configuration keys
:param plugins: an object that implements PluginManager |
19,958 | def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None):
changes = {}
ret[] = loaded[]
if in loaded:
changes[] = loaded[]
if in loaded:
changes[] = loaded[]
if in loaded:
if compliance_report:
changes[] = loaded[]
if debug and in loaded:
changes[] = loaded[]
if changes.get():
ret[] = .format(comment_base=ret[],
diff=changes[])
if changes.get():
ret[] = .format(
comment_base=ret[],
loaded_cfg=changes[])
if changes.get():
ret[] = .format(
comment_base=ret[],
compliance=salt.output.string_format(changes[], , opts=opts))
if not loaded.get(, False):
return ret
if not loaded.get(, True):
})
return ret | Return the final state output.
ret
The initial state output structure.
loaded
The loaded dictionary. |
19,959 | def get_root_families(self):
if self._catalog_session is not None:
return self._catalog_session.get_root_catalogs()
return FamilyLookupSession(
self._proxy,
self._runtime).get_families_by_ids(list(self.get_root_family_ids())) | Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.* |
19,960 | def get_flux(self, reaction):
return self._prob.result.get_value(self._v(reaction)) | Get resulting flux value for reaction. |
19,961 | def create_powerflow_problem(timerange, components):
network, snapshots = init_pypsa_network(timerange)
for component in components.keys():
network.import_components_from_dataframe(components[component],
component)
return network, snapshots | Create PyPSA network object and fill with data
Parameters
----------
timerange: Pandas DatetimeIndex
Time range to be analyzed by PF
components: dict
Returns
-------
network: PyPSA powerflow problem object |
19,962 | def process_amqp_msgs(self):
LOG.info()
while True:
(mtd_fr, hdr_fr, body) = (None, None, None)
try:
if self.consume_channel:
(mtd_fr, hdr_fr, body) = self.consume_channel.basic_get(
self._dcnm_queue_name)
if mtd_fr:
LOG.info(, body)
self._cb_dcnm_msg(mtd_fr, body)
self.consume_channel.basic_ack(mtd_fr.delivery_tag)
else:
try:
self._conn.sleep(1)
except AttributeError:
time.sleep(1)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_str = traceback.format_exception(exc_type,
exc_value, exc_tb)
LOG.exception("Failed to read from queue: %(queue)s "
"%(exc_type)s, %(exc_value)s, %(exc_tb)s.", {
: self._dcnm_queue_name,
: exc_type,
: exc_value,
: tb_str}) | Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue. |
19,963 | def make_rendition(self, width, height):
s aspect ratio
width x height -> will make an image potentialy cropped
%dx%d%s/%s_%s%s' % (
IMAGE_DIRECTORY,
filename,
rendition_key,
ext
)
fd = BytesIO()
image.save(fd, format)
default_storage.save(rendition_name, fd)
self.renditions[rendition_key] = rendition_name
self.save()
return rendition_name
return self.master.name | build a rendition
0 x 0 -> will give master URL
only width -> will make a renditions with master's aspect ratio
width x height -> will make an image potentialy cropped |
19,964 | def add_precip_file(self, precip_file_path, interpolation_type=None):
self._update_card(, precip_file_path, True)
if interpolation_type is None:
if not self.project_manager.getCard() \
and not self.project_manager.getCard():
self._update_card(, )
else:
if interpolation_type.upper() not in self.PRECIP_INTERP_TYPES:
raise IndexError("Invalid interpolation_type {0}".format(interpolation_type))
interpolation_type = interpolation_type.upper()
if interpolation_type == "INV_DISTANCE":
self._update_card(, )
self.project_manager.deleteCard(, self.db_session)
else:
self._update_card(, )
self.project_manager.deleteCard(, self.db_session) | Adds a precip file to project with interpolation_type |
19,965 | def info():
try:
platform_info = {"system": platform.system(), "release": platform.release()}
except IOError:
platform_info = {"system": "Unknown", "release": "Unknown"}
implementation = platform.python_implementation()
if implementation == "CPython":
implementation_version = platform.python_version()
elif implementation == "PyPy":
implementation_version = "%s.%s.%s" % (
sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro,
)
if sys.pypy_version_info.releaselevel != "final":
implementation_version = "".join(
[implementation_version, sys.pypy_version_info.releaselevel]
)
else:
implementation_version = "Unknown"
return {
"platform": platform_info,
"implementation": {"name": implementation, "version": implementation_version},
"cryptography": {"version": getattr(cryptography, "__version__", "")},
"pyjwt": {"version": pyjwt_version},
} | Generate information for a bug report.
Based on the requests package help utility module. |
19,966 | def _check_uuid_fmt(self):
if self.uuid_fmt not in UUIDField.FORMATS:
raise FieldValueRangeException(
"Unsupported uuid_fmt ({})".format(self.uuid_fmt)) | Checks .uuid_fmt, and raises an exception if it is not valid. |
19,967 | def release(ctx, version):
invoke.run("git tag -s {0} -m ".format(version))
invoke.run("git push --tags")
invoke.run("python setup.py sdist")
invoke.run("twine upload -s dist/PyNaCl-{0}* ".format(version))
session = requests.Session()
token = getpass.getpass("Input the Jenkins token: ")
response = session.post(
"{0}/build".format(JENKINS_URL),
params={
"cause": "Building wheels for {0}".format(version),
"token": token
}
)
response.raise_for_status()
wait_for_build_completed(session)
paths = download_artifacts(session)
invoke.run("twine upload {0}".format(" ".join(paths))) | ``version`` should be a string like '0.4' or '1.0'. |
19,968 | def fromCSV(csvfile,out=None,fieldnames=None,fmtparams=None,conv_func={},
empty_to_None=[]):
import csv
import time
import datetime
if out is None:
out = os.path.splitext(csvfile)[0]+".pdl"
if fieldnames is None:
reader = csv.reader(open(csvfile))
fieldnames = reader.next()
reader = csv.DictReader(open(csvfile),fieldnames,fmtparams)
reader.next()
db = PyDbLite.Base(out)
conv_func.update({"__id__":int})
auto_id = not "__id__" in fieldnames
fieldnames = [ f for f in fieldnames if not f in ("__id__") ]
kw = {"mode":"override"}
db.create(*fieldnames,**kw)
print db.fields
next_id = 0
records = {}
while True:
try:
record = reader.next()
except StopIteration:
break
if auto_id:
record["__id__"] = next_id
next_id += 1
for field in empty_to_None:
if not record[field]:
record[field] = None
for field in conv_func:
if not isinstance(conv_func[field],(tuple,list)):
record[field] = conv_func[field](record[field])
else:
date_class,date_fmt = conv_func[field]
if not record[field]:
record[field] = None
else:
time_tuple = time.strptime(record[field],date_fmt)
if date_class is datetime.date:
time_tuple = time_tuple[:3]
record[field] = date_class(*time_tuple)
records[record["__id__"]] = record
db.records = records
db.commit()
print len(db)
return db | Conversion from CSV to PyDbLite
csvfile : name of the CSV file in the file system
out : path for the new PyDbLite base in the file system
fieldnames : list of field names. If set to None, the field names must
be present in the first line of the CSV file
fmtparams : the format parameters for the CSV file, as described in
the csv module of the standard distribution
conv_func is a dictionary mapping a field name to the function used to
convert the string read in the CSV to the appropriate Python type. For
instance if field "age" must be converted to an integer :
conv_func["age"] = int
empty_to_None is a list of the fields such that when the value read in
the CSV file is the empty string, the field value is set to None |
19,969 | def index():
accessible_institutes = current_user.institutes
if not in current_user.roles:
accessible_institutes = current_user.institutes
if not accessible_institutes:
flash()
return redirect(url_for())
LOG.debug(.format(accessible_institutes))
institutes = [inst for inst in store.institutes(accessible_institutes)]
institutes.insert(0, {: None, : })
institute_id = None
slice_query = None
panel=1
if request.method==:
institute_id = request.form.get()
slice_query = request.form.get()
panel=request.form.get()
elif request.method==:
institute_id = request.args.get()
slice_query = request.args.get()
| Display the Scout dashboard. |
19,970 | def refresh(self, only_closed=False):
if only_closed:
opened = filter(self.__check_port, self.__closed)
self.__closed = self.__closed.difference(opened)
self.__ports = self.__ports.union(opened)
else:
ports = self.__closed.union(self.__ports)
self.__ports = set(filter(self.__check_port, ports))
self.__closed = ports.difference(self.__ports) | refresh ports status
Args:
only_closed - check status only for closed ports |
19,971 | def _summarize_combined(samples, vkey):
validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], vkey))
combined, _ = _group_validate_samples(samples, vkey, [["metadata", "validate_combine"]])
for vname, vitems in combined.items():
if vname:
cur_combined = collections.defaultdict(int)
for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x))):
validations = [variant.get(vkey) for variant in data.get("variants", [])]
validations = [v for v in validations if v]
if len(validations) == 0 and vkey in data:
validations = [data.get(vkey)]
for validate in validations:
with open(validate["summary"]) as in_handle:
reader = csv.reader(in_handle)
next(reader)
for _, caller, vtype, metric, value in reader:
cur_combined[(caller, vtype, metric)] += int(value)
out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname)
with open(out_csv, "w") as out_handle:
writer = csv.writer(out_handle)
header = ["sample", "caller", "vtype", "metric", "value"]
writer.writerow(header)
for (caller, variant_type, category), val in cur_combined.items():
writer.writerow(["combined-%s" % vname, caller, variant_type, category, val])
plots = validateplot.classifyplot_from_valfile(out_csv) | Prepare summarized CSV and plot files for samples to combine together.
Helps handle cases where we want to summarize over multiple samples. |
19,972 | def cleanup(self):
current = self.join()
if not os.path.exists(current):
LOGGER.debug(, current)
os.unlink(self.join())
self.current = None
try:
self._update_current()
except PrefixNotFound:
if not os.listdir(self.path):
LOGGER.debug(, self.path)
os.rmdir(self.path)
else:
raise MalformedWorkdir(
(
).format(self.path)
) | Attempt to set a new current symlink if it is broken. If no other
prefixes exist and the workdir is empty, try to delete the entire
workdir.
Raises:
:exc:`~MalformedWorkdir`: if no prefixes were found, but the
workdir is not empty. |
19,973 | def upgrade(*pkgs):
libxslt-1.1.0libxslt-1.1.10**
cmd = _quietnix()
cmd.append()
cmd.extend(pkgs)
out = _run(cmd)
upgrades = [_format_upgrade(s.split(maxsplit=1)[1])
for s in out[].splitlines()
if s.startswith()]
return [[_strip_quotes(s_) for s_ in s]
for s in upgrades] | Runs an update operation on the specified packages, or all packages if none is specified.
:type pkgs: list(str)
:param pkgs:
List of packages to update
:return: The upgraded packages. Example element: ``['libxslt-1.1.0', 'libxslt-1.1.10']``
:rtype: list(tuple(str, str))
.. code-block:: bash
salt '*' nix.update
salt '*' nix.update pkgs=one,two |
19,974 | def interface_lookup(interfaces, hwaddr, address_type):
for interface in interfaces.values():
if interface.get() == hwaddr:
for address in interface.get():
if address.get() == address_type:
return address.get() | Search the address within the interface list. |
19,975 | def _check_perpendicular_r2_axis(self, axis):
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True | Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules. |
19,976 | def call_runtime(self):
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts[]
recompile = self.opts.get(, 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals[] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners[.format(self.reg_ret)](chunks)
r_start = time.time() | Execute the runtime |
19,977 | def dataframe(self, spark, group_by=, limit=None, sample=1, seed=42, decode=None, summaries=None, schema=None, table_name=None):
rdd = self.records(spark.sparkContext, group_by, limit, sample, seed, decode, summaries)
if not schema:
df = rdd.map(lambda d: Row(**d)).toDF()
else:
df = spark.createDataFrame(rdd, schema=schema)
if table_name:
df.createOrReplaceTempView(table_name)
return df | Convert RDD returned from records function to a dataframe
:param spark: a SparkSession object
:param group_by: specifies a paritition strategy for the objects
:param limit: maximum number of objects to retrieve
:param decode: an optional transformation to apply to the objects retrieved
:param sample: percentage of results to return. Useful to return a sample
of the dataset. This parameter is ignored when 'limit' is set.
:param seed: initialize internal state of the random number generator (42 by default).
This is used to make the dataset sampling reproducible. It an be set to None to obtain
different samples.
:param summaries: an iterable containing the summary for each item in the dataset. If None, it
will compute calling the summaries dataset.
:param schema: a Spark schema that overrides automatic conversion to a dataframe
:param table_name: allows resulting dataframe to easily be queried using SparkSQL
:return: a Spark DataFrame |
19,978 | def _validate_type(cls, typeobj):
if not (hasattr(typeobj, "convert") or hasattr(typeobj, "convert_binary")):
raise ArgumentError("type is invalid, does not have convert or convert_binary function", type=typeobj, methods=dir(typeobj))
if not hasattr(typeobj, "default_formatter"):
raise ArgumentError("type is invalid, does not have default_formatter function", type=typeobj, methods=dir(typeobj)) | Validate that all required type methods are implemented.
At minimum a type must have:
- a convert() or convert_binary() function
- a default_formatter() function
Raises an ArgumentError if the type is not valid |
19,979 | def _create_event(instance, action):
user = None
user_repr = repr(user)
if CUSER:
user = CuserMiddleware.get_user()
user_repr = repr(user)
if user is not None and user.is_anonymous:
user = None
return TrackingEvent.objects.create(
action=action,
object=instance,
object_repr=repr(instance),
user=user,
user_repr=user_repr,
) | Create a new event, getting the use if django-cuser is available. |
19,980 | def decode_body(cls, header, f):
assert header.packet_type == MqttControlPacketType.pingresp
if header.remaining_len != 0:
raise DecodeError()
return 0, MqttPingresp() | Generates a `MqttPingresp` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `pingresp`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttPingresp
Object extracted from ``f``. |
19,981 | def validate_token(self, request, consumer, token):
oauth_server, oauth_request = oauth_provider.utils.initialize_server_request(request)
oauth_server.verify_request(oauth_request, consumer, token) | Check the token and raise an `oauth.Error` exception if invalid. |
19,982 | def temp_url(self, duration=120):
return self.bucket._boto_s3.meta.client.generate_presigned_url(
,
Params={: self.bucket.name, : self.name},
ExpiresIn=duration
) | Returns a temporary URL for the given key. |
19,983 | def _init(self):
read_values = []
read = self._file.read
last = read(1)
current = read(1)
while last != b and current != b and not \
(last == b and current == b):
read_values.append(last)
last = current
current = read(1)
if current == b and last != b:
read_values.append(last)
self._bytes = b.join(read_values) | Read the b"\\r\\n" at the end of the message. |
19,984 | def searchFilesIndex(self, nameData, fileData, fileIndex, searchString, category="", math=False, game=False, extension=""):
try:
fileFile = open(fileIndex, )
except IOError:
self.repo.printd("Error: Unable to read index file " + self.fileIndex)
return None, None
count = 1
for line in fileFile:
count += 1
try:
if nameData[count] != None:
if category in line:
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
if extension in line:
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
if (game and math):
if ("/games/" in line or "/math/" in line or "/science" in line):
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
elif game:
if "/games/" in line:
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
elif math:
if ("/math/" in line or "/science/" in line):
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
except:
pass
fileFile.close()
return fileData, nameData | Search the files index using the namedata and returns the filedata |
19,985 | def getAsKmlGridAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0.0):
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp[], colorRamp[])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString = converter.getAsKmlGridAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName=,
rasterFieldName=,
documentName=documentName,
alpha=alpha,
noDataValue=noDataValue)
if path:
with open(path, ) as f:
f.write(kmlString)
return kmlString | Retrieve the WMS dataset as a gridded time stamped KML string.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string |
19,986 | def disable_snapshots(self, volume_id, schedule_type):
return self.client.call(, ,
schedule_type, id=volume_id) | Disables snapshots for a specific block volume at a given schedule
:param integer volume_id: The id of the volume
:param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY'
:return: Returns whether successfully disabled or not |
19,987 | def normalize_weekly(data):
if "tblMenu" not in data["result_data"]["Document"]:
data["result_data"]["Document"]["tblMenu"] = []
if isinstance(data["result_data"]["Document"]["tblMenu"], dict):
data["result_data"]["Document"]["tblMenu"] = [data["result_data"]["Document"]["tblMenu"]]
for day in data["result_data"]["Document"]["tblMenu"]:
if "tblDayPart" not in day:
continue
if isinstance(day["tblDayPart"], dict):
day["tblDayPart"] = [day["tblDayPart"]]
for meal in day["tblDayPart"]:
if isinstance(meal["tblStation"], dict):
meal["tblStation"] = [meal["tblStation"]]
for station in meal["tblStation"]:
if isinstance(station["tblItem"], dict):
station["tblItem"] = [station["tblItem"]]
return data | Normalization for dining menu data |
19,988 | def normalize_name(name):
if not name or not isinstance(name, basestring):
raise ValueError(+ repr(name))
if len(name) > 1:
name = name.lower()
if name != and in name:
name = name.replace(, )
return canonical_names.get(name, name) | Given a key name (e.g. "LEFT CONTROL"), clean up the string and convert to
the canonical representation (e.g. "left ctrl") if one is known. |
19,989 | def nCr(n, r):
f = math.factorial
return int(f(n) / f(r) / f(n-r)) | Calculates nCr.
Args:
n (int): total number of items.
r (int): items to choose
Returns:
nCr. |
19,990 | def repl_update(self, config):
cfg = config.copy()
cfg[] += 1
try:
result = self.run_command("replSetReconfig", cfg)
if int(result.get(, 0)) != 1:
return False
except pymongo.errors.AutoReconnect:
self.update_server_map(cfg)
self.waiting_member_state()
self.waiting_config_state()
return self.connection() and True | Reconfig Replicaset with new config |
19,991 | def _mean_prediction(self, mu, Y, h, t_z):
Y_exp = Y.copy()
for t in range(0,h):
if self.ar != 0:
Y_exp_normalized = (Y_exp[-self.ar:][::-1] - self._norm_mean) / self._norm_std
new_value = self.predict_new(np.append(1.0, Y_exp_normalized), self.latent_variables.get_z_values())
else:
new_value = self.predict_new(np.array([1.0]), self.latent_variables.get_z_values())
Y_exp = np.append(Y_exp, [self.link(new_value)])
return Y_exp | Creates a h-step ahead mean prediction
Parameters
----------
mu : np.ndarray
The past predicted values
Y : np.ndarray
The past data
h : int
How many steps ahead for the prediction
t_z : np.ndarray
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions |
19,992 | def get_authn_contexts(self):
authn_context_nodes = self.__query_assertion()
return [OneLogin_Saml2_Utils.element_text(node) for node in authn_context_nodes] | Gets the authentication contexts
:returns: The authentication classes for the SAML Response
:rtype: list |
19,993 | def _set_bgp_state(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 6}, u: {: 4}, u: {: 0}, u: {: 1}, u: {: 3}, u: {: 2}, u: {: 5}},), is_leaf=True, yang_name="bgp-state", rest_name="bgp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=, defining_module=, yang_type=, is_config=False)
except (TypeError, ValueError):
raise ValueError({
: ,
: "brocade-bgp-operational:bgp-states",
: ,
})
self.__bgp_state = t
if hasattr(self, ):
self._set() | Setter method for bgp_state, mapped from YANG variable /bgp_state/neighbor/evpn/bgp_state (bgp-states)
If this variable is read-only (config: false) in the
source YANG file, then _set_bgp_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bgp_state() directly.
YANG Description: BGP state |
19,994 | def validateDocument(self, ctxt):
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)
return ret | Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. |
19,995 | def put_file(client, source_file, destination_file):
try:
sftp_client = client.open_sftp()
sftp_client.put(source_file, destination_file)
except Exception as error:
raise IpaUtilsException(
.format(error)
)
finally:
with ignored(Exception):
sftp_client.close() | Copy file to instance using Paramiko client connection. |
19,996 | def generate_string_to_sign(date, region, canonical_request):
formatted_date_time = date.strftime("%Y%m%dT%H%M%SZ")
canonical_request_hasher = hashlib.sha256()
canonical_request_hasher.update(canonical_request.encode())
canonical_request_sha256 = canonical_request_hasher.hexdigest()
scope = generate_scope_string(date, region)
return .join([_SIGN_V4_ALGORITHM,
formatted_date_time,
scope,
canonical_request_sha256]) | Generate string to sign.
:param date: Date is input from :meth:`datetime.datetime`
:param region: Region should be set to bucket region.
:param canonical_request: Canonical request generated previously. |
19,997 | def _get_ids_from_hostname(self, hostname):
results = self.list_instances(hostname=hostname, mask="id")
return [result[] for result in results] | List VS ids which match the given hostname. |
19,998 | def _init_map(self):
QuestionTextFormRecord._init_map(self)
QuestionFilesFormRecord._init_map(self)
super(QuestionTextAndFilesMixin, self)._init_map() | stub |
19,999 | def _check_import_source():
path_rel =
path = os.path.expanduser(path_rel)
if not os.path.isfile(path):
try:
corpus_importer = CorpusImporter()
corpus_importer.import_corpus()
except Exception as exc:
logger.error(, exc)
raise | Check if tlgu imported, if not import it. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.