Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
23,300 | def convolve2d_disk(fn, r, sig, nstep=200):
r = np.array(r, ndmin=1)
sig = np.array(sig, ndmin=1)
rmin = r - sig
rmax = r + sig
rmin[rmin < 0] = 0
delta = (rmax - rmin) / nstep
redge = rmin[..., np.newaxis] + \
delta[..., np.newaxis] * np.linspace(0, nstep, nstep + 1)
rp = 0.5 * (redge[..., 1:] + redge[..., :-1])
dr = redge[..., 1:] - redge[..., :-1]
fnv = fn(rp)
r = r.reshape(r.shape + (1,))
cphi = -np.ones(dr.shape)
m = ((rp + r) / sig < 1) | (r == 0)
rrp = r * rp
sx = r ** 2 + rp ** 2 - sig ** 2
cphi[~m] = sx[~m] / (2 * rrp[~m])
dphi = 2 * np.arccos(cphi)
v = rp * fnv * dphi * dr / (np.pi * sig * sig)
s = np.sum(v, axis=-1)
return s | Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
step function given by:
g(r) = H(1-r/s)
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Radius parameter of the step function.
nstep : int
Number of sampling point for numeric integration. |
23,301 | def delete(self, route: str(), callback: object()):
self.__set_route(, {route: callback})
return RouteMapping | Binds a PUT route with the given callback
:rtype: object |
23,302 | def request_connect(self, act, coro):
"Requests a connect for `coro` corutine with parameters and completion \
passed via `act`"
result = self.try_run_act(act, perform_connect)
if result:
return result, coro
else:
self.add_token(act, coro, perform_connect) | Requests a connect for `coro` corutine with parameters and completion \
passed via `act` |
23,303 | def has_connection(self, i, j):
if ( (self._ccore_network_pointer is not None) and (self._osc_conn is None) ):
self._osc_conn = wrapper.sync_connectivity_matrix(self._ccore_network_pointer);
return super().has_connection(i, j); | !
@brief Returns True if there is connection between i and j oscillators and False - if connection doesn't exist.
@param[in] i (uint): index of an oscillator in the network.
@param[in] j (uint): index of an oscillator in the network. |
23,304 | def _get_jamo_short_name(jamo):
if not _is_jamo(jamo):
raise ValueError("Value 0x%0.4x passed in does not represent a Jamo!" % jamo)
if not _jamo_short_names:
_load_jamo_short_names()
return _jamo_short_names[jamo] | Function for taking a Unicode scalar value representing a Jamo and determining the correct value for its
Jamo_Short_Name property. For more information on the Jamo_Short_Name property see the Unicode Standard,
ch. 03, section 3.12, Conjoining Jamo Behavior.
https://www.unicode.org/versions/latest/ch03.pdf
:param jamo: Unicode scalar value representing a Jamo
:return: Returns a string representing its Jamo_Short_Name property |
23,305 | def p_elision(self, p):
if len(p) == 2:
p[0] = [ast.Elision(p[1])]
else:
p[1].append(ast.Elision(p[2]))
p[0] = p[1] | elision : COMMA
| elision COMMA |
23,306 | def create(self, basedir, outdir, name, prefix=None):
zippath = os.path.join(outdir, .format(name, self.extension))
with open_zip(zippath, , compression=self.compression) as zip:
for root, _, files in safe_walk(basedir, followlinks=True):
root = ensure_text(root)
for file in files:
file = ensure_text(file)
full_path = os.path.join(root, file)
relpath = os.path.relpath(full_path, basedir)
if prefix:
relpath = os.path.join(ensure_text(prefix), relpath)
zip.write(full_path, relpath)
return zippath | :API: public |
23,307 | def _job_statistics(self):
statistics = self._properties.get("statistics", {})
return statistics.get(self._JOB_TYPE, {}) | Helper for job-type specific statistics-based properties. |
23,308 | def _add(self, codeobj):
assert isinstance(codeobj, CodeExpression.TYPES)
self.value = codeobj | Add a child (value) to this object. |
23,309 | def _get_exception_class_from_status_code(status_code):
if status_code == :
return None
exc_class = STATUS_CODE_MAPPING.get(status_code)
if not exc_class:
else:
return exc_class | Utility function that accepts a status code, and spits out a reference
to the correct exception class to raise.
:param str status_code: The status code to return an exception class for.
:rtype: PetfinderAPIError or None
:returns: The appropriate PetfinderAPIError subclass. If the status code
is not an error, return ``None``. |
23,310 | def get_all_adv_settings() -> Dict[str, Dict[str, Union[str, bool, None]]]:
settings_file = CONFIG[]
values, _ = _read_settings_file(settings_file)
return {
key: {**settings_by_id[key].__dict__,
: value}
for key, value in values.items()
} | :return: a dict of settings keyed by setting ID, where each value is a
dict with keys "id", "title", "description", and "value" |
23,311 | def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"nb_set_weights": [nb_set_weight.as_dict() for nb_set_weight in self.nb_set_weights],
"ce_estimator": self.ce_estimator,
} | Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object.
:return: Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object. |
23,312 | def get_bac(age, weight, height, sex, volume, percent):
return gramm_to_promille(
calculate_alcohol(volume, percent),
age, weight, height, sex
) | Returns the *Blood Alcohol Content* (raise) for a person (described by
the given attributes) after a drink containing *volume* ml of alcohol with
the given *percent* (vol/vol). |
23,313 | def get_full_angles(self):
if (self.sun_azi is not None and self.sun_zen is not None and
self.sat_azi is not None and self.sat_zen is not None):
return self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen
self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen = self._get_full_angles()
self.sun_azi = da.from_delayed(self.sun_azi, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.sun_zen = da.from_delayed(self.sun_zen, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.sat_azi = da.from_delayed(self.sat_azi, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.sat_zen = da.from_delayed(self.sat_zen, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
return self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen | Get the interpolated lons/lats. |
23,314 | def save_thumbnail(self, thumbnail):
filename = thumbnail.name
try:
self.thumbnail_storage.delete(filename)
except Exception:
pass
self.thumbnail_storage.save(filename, thumbnail)
thumb_cache = self.get_thumbnail_cache(
thumbnail.name, create=True, update=True)
if settings.THUMBNAIL_CACHE_DIMENSIONS:
dimensions_cache, created = (
models.ThumbnailDimensions.objects.get_or_create(
thumbnail=thumb_cache,
defaults={: thumbnail.width,
: thumbnail.height}))
if not created:
dimensions_cache.width = thumbnail.width
dimensions_cache.height = thumbnail.height
dimensions_cache.save()
signals.thumbnail_created.send(sender=thumbnail) | Save a thumbnail to the thumbnail_storage.
Also triggers the ``thumbnail_created`` signal and caches the
thumbnail values and dimensions for future lookups. |
23,315 | def set_portfast(self, name, value=None, default=False, disable=False):
if value is False:
disable = True
string =
cmds = self.command_builder(string, value=value, default=default,
disable=disable)
return self.configure_interface(name, cmds) | Configures the portfast value for the specified interface
Args:
name (string): The interface identifier to configure. The name
must be the full interface name (eg Ethernet1, not Et1)
value (bool): True if portfast is enabled otherwise False
default (bool): Configures the portfast parameter to its default
value using the EOS CLI default config command
disable (bool): Negates the portfast parameter using the EOS
CLI no config command
Returns:
True if the command succeeds, otherwise False
Raises:
ValueError: Rasied if an invalid interface name is specified
TypeError: Raised if the value keyword argument does not evaluate
to a valid boolean |
23,316 | def _make_weirdness_regex():
groups = []
groups.append()
groups.append()
groups.append()
groups.append()
groups.append()
return re.compile(regex) | Creates a list of regexes that match 'weird' character sequences.
The more matches there are, the weirder the text is. |
23,317 | def _notebook_model_from_db(self, record, content):
path = to_api_path(record[] + record[])
model = base_model(path)
model[] =
model[] = model[] = record[]
if content:
content = reads_base64(record[])
self.mark_trusted_cells(content, path)
model[] = content
model[] =
self.validate_notebook_model(model)
return model | Build a notebook model from database record. |
23,318 | def list_parameters(self, parameter_type=None, page_size=None):
params = {: True}
if parameter_type is not None:
params[] = parameter_type
if page_size is not None:
params[] = page_size
return pagination.Iterator(
client=self._client,
path=.format(self._instance),
params=params,
response_class=mdb_pb2.ListParametersResponse,
items_key=,
item_mapper=Parameter,
) | Lists the parameters visible to this client.
Parameters are returned in lexicographical order.
:param str parameter_type: The type of parameter
:rtype: :class:`.Parameter` iterator |
23,319 | def _platform_patterns(self, platform=, compiled=False):
patterns = self._dict_compiled.get(platform, None) if compiled else self._dict_text.get(platform, None)
if patterns is None:
raise KeyError("Unknown platform: {}".format(platform))
return patterns | Return all the patterns for specific platform. |
23,320 | def delete_maintenance_window(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_maintenance_window_with_http_info(id, **kwargs)
else:
(data) = self.delete_maintenance_window_with_http_info(id, **kwargs)
return data | Delete a specific maintenance window # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_maintenance_window(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerMaintenanceWindow
If the method is called asynchronously,
returns the request thread. |
23,321 | def _tryConnect(src, unit, intfName):
try:
dst = getattr(unit, intfName)
except AttributeError:
return
if not dst._sig.drivers:
connect(src, dst) | Try connect src to interface of specified name on unit.
Ignore if interface is not present or if it already has driver. |
23,322 | def _find_known(row):
out = []
clinvar_no = set(["unknown", "untested", "non-pathogenic", "probable-non-pathogenic",
"uncertain_significance", "uncertain_significance", "not_provided",
"benign", "likely_benign"])
if row["cosmic_ids"] or row["cosmic_id"]:
out.append("cosmic")
if row["clinvar_sig"] and not row["clinvar_sig"].lower() in clinvar_no:
out.append("clinvar")
return out | Find variant present in known pathogenic databases. |
23,323 | def _add_zone(self, zone, name=, status=Zone.CLEAR, expander=False):
if not zone in self._zones:
self._zones[zone] = Zone(zone=zone, name=name, status=None, expander=expander)
self._update_zone(zone, status=status) | Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int |
23,324 | def bin_remove(self):
packages = self.args[1:]
options = [
"-r",
"--removepkg"
]
additional_options = [
"--deps",
"--check-deps",
"--tag",
"--checklist"
]
flag, extra = "", []
flags = [
"-warn",
"-preserve",
"-copy",
"-keep"
]
if (additional_options[1] in self.args and
additional_options[0] not in self.args):
self.args.append(additional_options[0])
if len(self.args) > 1 and self.args[0] in options:
for additional in additional_options:
if additional in self.args:
extra.append(additional)
self.args.remove(additional)
packages = self.args[1:]
for fl in flags:
if fl in self.args:
flag = self.args[1]
packages = self.args[2:]
PackageManager(packages).remove(flag, extra)
else:
usage("") | Remove Slackware packages |
23,325 | def roles_remove(user, role):
user, role = _datastore._prepare_role_modify_args(user, role)
if user is None:
raise click.UsageError()
if role is None:
raise click.UsageError()
if _datastore.remove_role_from_user(user, role):
click.secho(
.format(role, user), fg=)
else:
raise click.UsageError() | Remove user from role. |
23,326 | def url_defaults(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:
self.url_default_functions[name].append(func)
return func | Add a url default preprocessor.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.url_defaults
def default(endpoint, values):
... |
23,327 | def get_mac_acl_for_intf_input_interface_name(self, **kwargs):
config = ET.Element("config")
get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf")
config = get_mac_acl_for_intf
input = ET.SubElement(get_mac_acl_for_intf, "input")
interface_name = ET.SubElement(input, "interface-name")
interface_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
23,328 | def _prepare_nameparser_constants():
constants = Constants()
roman_numeral_suffixes = [u, u, u, u, u, u,
u, u, u, u]
titles = [u, u, u, u, u, u, u,
u, u, u, u, u, u]
constants.titles.remove(*constants.titles).add(*titles)
constants.suffix_not_acronyms.add(*roman_numeral_suffixes)
return constants | Prepare nameparser Constants.
Remove nameparser's titles and use our own and add as suffixes the roman numerals.
Configuration is the same for all names (i.e. instances). |
23,329 | def run_sequential(self):
try:
result = self.empty_result(*self.context)
for obj in self.iterable:
r = self.compute(obj, *self.context)
result = self.process_result(r, result)
self.progress.update(1)
if self.done:
break
except Exception as e:
raise e
finally:
self.progress.close()
return result | Perform the computation sequentially, only holding two computed
objects in memory at a time. |
23,330 | def merge_plugin_from_baseline(baseline_plugins, args):
def _remove_key(d, key):
r = dict(d)
r.pop(key)
return r
baseline_plugins_dict = {
vars(plugin)["name"]: _remove_key(vars(plugin), "name")
for plugin in baseline_plugins
}
if args.use_all_plugins:
plugins_dict = dict(args.plugins)
for plugin_name, param_name, param_value in _get_prioritized_parameters(
baseline_plugins_dict,
args.is_using_default_value,
prefer_default=True,
):
try:
plugins_dict[plugin_name][param_name] = param_value
except KeyError:
log.warning(
% (plugin_name),
)
return from_parser_builder(
plugins_dict,
exclude_lines_regex=args.exclude_lines,
)
disabled_plugins = PluginOptions.get_disabled_plugins(args)
plugins_dict = {
plugin_name: plugin_params
for plugin_name, plugin_params in baseline_plugins_dict.items()
if plugin_name not in disabled_plugins
}
input_plugins_dict = dict(args.plugins)
for plugin_name, param_name, param_value in _get_prioritized_parameters(
input_plugins_dict,
args.is_using_default_value,
prefer_default=False,
):
try:
plugins_dict[plugin_name][param_name] = param_value
except KeyError:
log.warning(
% ("".join(["--", param_name.replace("_", "-")]), plugin_name),
)
return from_parser_builder(
plugins_dict,
exclude_lines_regex=args.exclude_lines,
) | :type baseline_plugins: tuple of BasePlugin
:param baseline_plugins: BasePlugin instances from baseline file
:type args: dict
:param args: diction of arguments parsed from usage
param priority: input param > baseline param > default
:Returns tuple of initialized plugins |
23,331 | def _get_consecutive_portions_of_front(front):
last_f = None
ls = []
for f, s in front:
if last_f is not None and f != last_f + 1:
yield ls
ls = []
ls.append((f, s))
last_f = f
yield ls | Yields lists of the form [(f, s), (f, s)], one at a time from the given front (which is a list of the same form),
such that each list yielded is consecutive in frequency. |
23,332 | def batch_per(hyps: Sequence[Sequence[T]],
refs: Sequence[Sequence[T]]) -> float:
macro_per = 0.0
for i in range(len(hyps)):
ref = [phn_i for phn_i in refs[i] if phn_i != 0]
hyp = [phn_i for phn_i in hyps[i] if phn_i != 0]
macro_per += distance.edit_distance(ref, hyp)/len(ref)
return macro_per/len(hyps) | Calculates the phoneme error rate of a batch. |
23,333 | def list_migration_issues_accounts(self, account_id, content_migration_id):
path = {}
data = {}
params = {}
path["account_id"] = account_id
path["content_migration_id"] = content_migration_id
self.logger.debug("GET /api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True) | List migration issues.
Returns paginated migration issues |
23,334 | def read(self, size=None):
while size is None or len(self.buffer) < size:
try:
self.buffer += next(self.data_stream)
except StopIteration:
break
sized_chunk = self.buffer[:size]
if size is None:
self.buffer = ""
else:
self.buffer = self.buffer[size:]
return sized_chunk | Read bytes from an iterator. |
23,335 | def number(type=None, length=None, prefixes=None):
if type and type in CARDS:
card = type
else:
card = random.choice(list(CARDS.keys()))
if not length:
length = CARDS[card][]
result = str(prefix)
for d in range(length - len(str(prefix))):
result += str(basic.number())
last_digit = check_digit(int(result))
return int(result[:-1] + str(last_digit)) | Return a random credit card number.
:param type: credit card type. Defaults to a random selection.
:param length: length of the credit card number.
Defaults to the length for the selected card type.
:param prefixes: allowed prefixes for the card number.
Defaults to prefixes for the selected card type.
:return: credit card randomly generated number (int) |
23,336 | def record_exists(self, table, keys):
keyspace = self.keyspace
if in table:
keyspace, table = table.split(, 1)
ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys())
cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format(
keyspace=keyspace, table=table, keys=ks)
try:
rs = self.get_conn().execute(cql, keys)
return rs.one() is not None
except Exception:
return False | Checks if a record exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
:param keys: The keys and their values to check the existence.
:type keys: dict |
23,337 | def iget(self, irods_path, attempts=1, pause=15):
if attempts > 1:
cmd =
cmd = lstrip(cmd)
cmd = cmd.format(attempts, irods_path, pause)
self.add(cmd)
else:
self.add(.format(irods_path)) | Add an iget command to retrieve a file from iRODS.
Parameters
----------
irods_path: str
Filepath which should be fetched using iget
attempts: int (default: 1)
Number of retries, if iRODS access fails
pause: int (default: 15)
Pause between two access attempts in seconds |
23,338 | def openRtpPort(self):
self.rtpSocket.settimeout(0.5)
try:
self.rtpSocket.bind((self.serverAddr,self.rtpPort))
print("Bind RtpPort Success")
except:
tkinter.messagebox.showwarning(, ) | Open RTP socket binded to a specified port. |
23,339 | def ClearAllVar(self):
self.value = None
_ = [option.OnClearAllVar() for option in self.options] | Clear this Value. |
23,340 | def fetchall(self):
result = self.query.result()
return [row.values() for row in result] | Fetch all rows. |
23,341 | def GaussianCdfInverse(p, mu=0, sigma=1):
x = ROOT2 * erfinv(2 * p - 1)
return mu + x * sigma | Evaluates the inverse CDF of the gaussian distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float |
23,342 | def get_index_translog_disable_flush(self):
disabled = {}
settings = self.get()
setting_getters = [
lambda s: s[],
lambda s: s[][][]]
for idx in settings:
idx_settings = settings[idx][]
for getter in setting_getters:
try:
disabled[idx] = booleanise(getter(idx_settings))
except KeyError as e:
pass
if not idx in disabled:
disabled[idx] =
return disabled | Return a dictionary showing the position of the
'translog.disable_flush' knob for each index in the cluster.
The dictionary will look like this:
{
"index1": True, # Autoflushing DISABLED
"index2": False, # Autoflushing ENABLED
"index3": "unknown", # Using default setting (probably enabled)
...
} |
23,343 | def kakwani(values, ineq_axis, weights = None):
from scipy.integrate import simps
if weights is None:
weights = ones(len(values))
PLCx, PLCy = pseudo_lorenz(values, ineq_axis, weights)
LCx, LCy = lorenz(ineq_axis, weights)
del PLCx
return simps((LCy - PLCy), LCx) | Computes the Kakwani index |
23,344 | def dequeue(self, destination):
if not self.has_frames(destination):
return None
message_id = self.queue_metadata[destination][].pop()
self.queue_metadata[destination][] += 1
frame = self.frame_store[message_id]
del self.frame_store[message_id]
self._opcount += 1
self._sync()
return frame | Removes and returns an item from the queue (or C{None} if no items in queue).
@param destination: The queue name (destinationination).
@type destination: C{str}
@return: The first frame in the specified queue, or C{None} if there are none.
@rtype: C{stompclient.frame.Frame} |
23,345 | def determine_extra_packages(self, packages):
args = [
"pip",
"freeze",
]
installed = subprocess.check_output(args, universal_newlines=True)
installed_list = set()
lines = installed.strip().split()
for (package, version) in self._parse_requirements(lines):
installed_list.add(package)
package_list = set()
for (package, version) in self._parse_requirements(packages.readlines()):
package_list.add(package)
removal_list = installed_list - package_list
return tuple(removal_list) | Return all packages that are installed, but missing from "packages".
Return value is a tuple of the package names |
23,346 | def watch(static_root, watch_paths=None, on_reload=None, host=, port=5555, server_base_path="/",
watcher_interval=1.0, recursive=True, open_browser=True, open_browser_delay=1.0):
server = httpwatcher.HttpWatcherServer(
static_root,
watch_paths=watch_paths,
on_reload=on_reload,
host=host,
port=port,
server_base_path=server_base_path,
watcher_interval=watcher_interval,
recursive=recursive,
open_browser=open_browser,
open_browser_delay=open_browser_delay
)
server.listen()
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
server.shutdown() | Initialises an HttpWatcherServer to watch the given path for changes. Watches until the IO loop
is terminated, or a keyboard interrupt is intercepted.
Args:
static_root: The path whose contents are to be served and watched.
watch_paths: The paths to be watched for changes. If not supplied, this defaults to the static root.
on_reload: An optional callback to pass to the watcher server that will be executed just before the
server triggers a reload in connected clients.
host: The host to which to bind our server.
port: The port to which to bind our server.
server_base_path: If the content is to be served from a non-standard base path, specify it here.
watcher_interval: The maximum refresh rate of the watcher server.
recursive: Whether to monitor the watch path recursively.
open_browser: Whether or not to automatically attempt to open the user's browser at the root URL of
the project (default: True).
open_browser_delay: The number of seconds to wait before attempting to open the user's browser. |
23,347 | def later(timeout, f, *args, **kwargs):
thread
def wrap(*args, **kwargs):
sleep(timeout)
return f(*args, **kwargs)
return spawn(wrap, *args, **kwargs) | Sets a timer that will call the *f* function past *timeout* seconds.
See example in :ref:`sample_inter`
:return: :class:`Greenlet` new 'thread' which will perform the call
when specified. |
23,348 | def _flow_check_handler_internal(self):
integ_flow = self.integ_br_obj.dump_flows_for(
in_port=self.int_peer_port_num)
ext_flow = self.ext_br_obj.dump_flows_for(
in_port=self.phy_peer_port_num)
for net_uuid, lvm in six.iteritems(self.local_vlan_map):
vdp_vlan = lvm.any_consistent_vlan()
flow_required = False
if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)):
return
if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid):
LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on Integ bridge",
{: vdp_vlan, : lvm.lvid})
flow_required = True
if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan):
LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on External bridge",
{: vdp_vlan, : lvm.lvid})
flow_required = True
if flow_required:
LOG.info("Programming flows for lvid %(lvid)s vdp vlan"
" %(vdp)s",
{: lvm.lvid, : vdp_vlan})
self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan) | Periodic handler to check if installed flows are present.
This handler runs periodically to check if installed flows are present.
This function cannot detect and delete the stale flows, if present.
It requires more complexity to delete stale flows. Generally, stale
flows are not present. So, that logic is not put here. |
23,349 | def render(self, template, **data):
dct = self.global_data.copy()
dct.update(data)
try:
html = self.env.get_template(template).render(**dct)
except TemplateNotFound:
raise JinjaTemplateNotFound
return html | Render data with template, return html unicodes.
parameters
template str the template's filename
data dict the data to render |
23,350 | def delete_audio_mp3_profile(apps, schema_editor):
Profile = apps.get_model(, )
Profile.objects.filter(profile_name=AUDIO_MP3_PROFILE).delete() | Delete audio_mp3 profile |
23,351 | def delete(self, moveFixIssuesTo=None, moveAffectedIssuesTo=None):
params = {}
if moveFixIssuesTo is not None:
params[] = moveFixIssuesTo
if moveAffectedIssuesTo is not None:
params[] = moveAffectedIssuesTo
return super(Version, self).delete(params) | Delete this project version from the server.
If neither of the arguments are specified, the version is
removed from all issues it is attached to.
:param moveFixIssuesTo: in issues for which this version is a fix
version, add this argument version to the fix version list
:param moveAffectedIssuesTo: in issues for which this version is an
affected version, add this argument version to the affected version list |
23,352 | def _init_properties(self):
super(BaseCRUDView, self)._init_properties()
self.related_views = self.related_views or []
self._related_views = self._related_views or []
self.description_columns = self.description_columns or {}
self.validators_columns = self.validators_columns or {}
self.formatters_columns = self.formatters_columns or {}
self.add_form_extra_fields = self.add_form_extra_fields or {}
self.edit_form_extra_fields = self.edit_form_extra_fields or {}
self.show_exclude_columns = self.show_exclude_columns or []
self.add_exclude_columns = self.add_exclude_columns or []
self.edit_exclude_columns = self.edit_exclude_columns or []
list_cols = self.datamodel.get_user_columns_list()
self.list_columns = self.list_columns or [list_cols[0]]
self._gen_labels_columns(self.list_columns)
self.order_columns = (
self.order_columns or
self.datamodel.get_order_columns_list(list_columns=self.list_columns)
)
if self.show_fieldsets:
self.show_columns = []
for fieldset_item in self.show_fieldsets:
self.show_columns = self.show_columns + list(
fieldset_item[1].get("fields")
)
else:
if not self.show_columns:
self.show_columns = [
x for x in list_cols if x not in self.show_exclude_columns
]
if self.add_fieldsets:
self.add_columns = []
for fieldset_item in self.add_fieldsets:
self.add_columns = self.add_columns + list(
fieldset_item[1].get("fields")
)
else:
if not self.add_columns:
self.add_columns = [
x for x in list_cols if x not in self.add_exclude_columns
]
if self.edit_fieldsets:
self.edit_columns = []
for fieldset_item in self.edit_fieldsets:
self.edit_columns = self.edit_columns + list(
fieldset_item[1].get("fields")
)
else:
if not self.edit_columns:
self.edit_columns = [
x for x in list_cols if x not in self.edit_exclude_columns
] | Init Properties |
23,353 | def crystal(positions, molecules, group,
cellpar=[1.0, 1.0, 1.0, 90, 90, 90], repetitions=[1, 1, 1]):
sp = Spacegroup(group)
sites, kind = sp.equivalent_sites(positions)
nx, ny, nz = repetitions
reptot = nx*ny*nz
a,b,c = cellpar_to_cell(cellpar)
cry = System()
i = 0
with cry.batch() as batch:
for x in range(nx):
for y in range(ny):
for z in range(nz):
for s, ki in zip(sites, kind):
tpl = molecules[ki]
tpl.move_to(s[0]*a +s[1]*b + s[2]*c + a*x + b*y + c*z)
batch.append(tpl.copy())
cry.box_vectors = np.array([a*nx, b*ny, c*nz])
return cry | Build a crystal from atomic positions, space group and cell
parameters.
**Parameters**
positions: list of coordinates
A list of the atomic positions
molecules: list of Molecule
The molecules corresponding to the positions, the molecule will be
translated in all the equivalent positions.
group: int | str
Space group given either as its number in International Tables
or as its Hermann-Mauguin symbol.
repetitions:
Repetition of the unit cell in each direction
cellpar:
Unit cell parameters
This function was taken and adapted from the *spacegroup* module
found in `ASE <https://wiki.fysik.dtu.dk/ase/>`_.
The module *spacegroup* module was originally developed by Jesper
Frills. |
23,354 | def _Open(self, path_spec, mode=):
if not path_spec.HasParent():
raise errors.PathSpecError(
)
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
zip_file = zipfile.ZipFile(file_object, )
except:
file_object.close()
raise
self._file_object = file_object
self._zip_file = zip_file | Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification of the file system.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. |
23,355 | def analyze_xml(xml):
f = StringIO(xml)
try:
xml = packtools.XMLValidator.parse(f, sps_version=)
except packtools.exceptions.PacktoolsError as e:
logger.exception(e)
summary = {}
summary[] = False
summary[] = False
summary[] = False
summary[] = True
summary[] = []
summary[] = []
return summary
except XMLSyntaxError as e:
logger.exception(e)
summary = {}
summary[] = False
summary[] = False
summary[] = False
summary[] = True
summary[] = [e.message]
summary[] = []
return summary
else:
summary = summarize(xml)
return summary | Analyzes `file` against packtools' XMLValidator. |
23,356 | def revoke(self, only_access=False):
if only_access or self.refresh_token is None:
super(Authorizer, self).revoke()
else:
self._authenticator.revoke_token(
self.refresh_token, "refresh_token"
)
self._clear_access_token()
self.refresh_token = None | Revoke the current Authorization.
:param only_access: (Optional) When explicitly set to True, do not
evict the refresh token if one is set.
Revoking a refresh token will in-turn revoke all access tokens
associated with that authorization. |
23,357 | def parse_quadrant_measurement(quad_azimuth):
def rotation_direction(first, second):
return np.cross(_azimuth2vec(first), _azimuth2vec(second))
quad_azimuth = quad_azimuth.strip()
try:
first_dir = quadrantletter_to_azimuth(quad_azimuth[0].upper())
sec_dir = quadrantletter_to_azimuth(quad_azimuth[-1].upper())
except KeyError:
raise ValueError(.format(quad_azimuth))
angle = float(quad_azimuth[1:-1])
direc = rotation_direction(first_dir, sec_dir)
azi = first_dir + direc * angle
if abs(direc) < 0.9:
raise ValueError(.format(quad_azimuth))
if azi < 0:
azi += 360
elif azi > 360:
azi -= 360
return azi | Parses a quadrant measurement of the form "AxxB", where A and B are cardinal
directions and xx is an angle measured relative to those directions.
In other words, it converts a measurement such as E30N into an azimuth of
60 degrees, or W10S into an azimuth of 260 degrees.
For ambiguous quadrant measurements such as "N30S", a ValueError is raised.
Parameters
-----------
quad_azimuth : string
An azimuth measurement in quadrant form.
Returns
-------
azi : float
An azimuth in degrees clockwise from north.
See Also
--------
parse_azimuth |
23,358 | def _run_tRNA_scan(fasta_file):
out_file = fasta_file + "_trnascan"
se_file = fasta_file + "_second_str"
cmd = "tRNAscan-SE -q -o {out_file} -f {se_file} {fasta_file}"
run(cmd.format(**locals()))
return out_file, se_file | Run tRNA-scan-SE to predict tRNA |
23,359 | def register(
model,
app=None,
manager_name="history",
records_class=None,
table_name=None,
**records_config
):
from . import models
if records_class is None:
records_class = models.HistoricalRecords
records = records_class(**records_config)
records.manager_name = manager_name
records.table_name = table_name
records.module = app and ("%s.models" % app) or model.__module__
records.cls = model
records.add_extra_methods(model)
records.finalize(model) | Create historical model for `model` and attach history manager to `model`.
Keyword arguments:
app -- App to install historical model into (defaults to model.__module__)
manager_name -- class attribute name to use for historical manager
records_class -- class to use for history relation (defaults to
HistoricalRecords)
table_name -- Custom name for history table (defaults to
'APPNAME_historicalMODELNAME')
This method should be used as an alternative to attaching an
`HistoricalManager` instance directly to `model`. |
23,360 | def _getZoomLevelRange(self, resolution, unit=):
"Return lower and higher zoom level given a resolution"
assert unit in (, )
if unit == and self.unit == :
resolution = resolution / self.metersPerUnit
elif unit == and self.unit == :
resolution = resolution * EPSG4326_METERS_PER_UNIT
lo = 0
hi = len(self.RESOLUTIONS)
while lo < hi:
mid = (lo + hi) // 2
if resolution > self.RESOLUTIONS[mid]:
hi = mid
else:
lo = mid + 1
return lo, hi | Return lower and higher zoom level given a resolution |
23,361 | def ReadUserDefinedFunction(self, udf_link, options=None):
if options is None:
options = {}
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.Read(path, , udf_id, None, options) | Reads a user defined function.
:param str udf_link:
The link to the user defined function.
:param dict options:
The request options for the request.
:return:
The read UDF.
:rtype:
dict |
23,362 | async def rt_connect(self, loop):
if self.sub_manager is not None:
return
self.sub_manager = SubscriptionManager(
loop, "token={}".format(self._access_token), SUB_ENDPOINT
)
self.sub_manager.start() | Start subscription manager for real time data. |
23,363 | def iteration(self, node_status=True):
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
if actual_status[u] == 1:
continue
neighbors = list(self.graph.neighbors(u))
if isinstance(self.graph, nx.DiGraph):
neighbors = list(self.graph.predecessors(u))
infected = 0
for v in neighbors:
infected += self.status[v]
if len(neighbors) > 0:
infected_ratio = float(infected)/len(neighbors)
if infected_ratio >= self.params[][][u]:
actual_status[u] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) |
23,364 | def on_connection_close(self) -> None:
if _has_stream_request_body(self.__class__):
if not self.request._body_future.done():
self.request._body_future.set_exception(iostream.StreamClosedError())
self.request._body_future.exception() | Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection. |
23,365 | def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
blob_keys = blob_keys.split(",")
blob_files = {}
total_size = 0
for blob_key in blob_keys:
zip_input = zipfile.ZipFile(_reader(blob_key))
blob_files[blob_key] = zip_input.infolist()
total_size += sum(x.file_size for x in blob_files[blob_key])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
size_per_shard = total_size // shard_count
readers = []
for blob_key in blob_keys:
bfiles = blob_files[blob_key]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in bfiles:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
return readers | Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_keys' parameter with one or more blob keys.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning the subfiles within the blobs.
There will be at least one reader per blob, but it will otherwise
attempt to keep the expanded size even. |
23,366 | def closeEvent(self, event):
self.hide()
if self.data["state"]["is_closing"]:
self.info(self.tr("Cleaning up models.."))
for v in self.data["views"].values():
v.model().deleteLater()
v.setModel(None)
self.info(self.tr("Cleaning up terminal.."))
for item in self.data["models"]["terminal"].items:
del(item)
self.info(self.tr("Cleaning up controller.."))
self.controller.cleanup()
self.info(self.tr("All clean!"))
self.info(self.tr("Good bye"))
return super(Window, self).closeEvent(event)
self.info(self.tr("Closing.."))
def on_problem():
self.heads_up("Warning", "Had trouble closing down. "
"Please tell someone and try again.")
self.show()
if self.controller.is_running:
self.info(self.tr("..as soon as processing is finished.."))
self.controller.is_running = False
self.finished.connect(self.close)
util.defer(2000, on_problem)
return event.ignore()
self.data["state"]["is_closing"] = True
util.defer(200, self.close)
return event.ignore() | Perform post-flight checks before closing
Make sure processing of any kind is wrapped up before closing |
23,367 | def _setLocation(self, path):
D:/D:/folderd:/folder
model = self._filesystemWidget.model()
if not path.startswith(model.root.path):
raise ValueError()
segments = self._segmentPath(path)
for segment in reversed(segments):
pathIndex = model.pathIndex(segment)
model.fetchMore(pathIndex)
self._filesystemWidget.setRootIndex(model.pathIndex(path))
self._locationWidget.clear()
for segment in segments:
index = model.pathIndex(segment)
if not index.isValid():
icon = model.iconFactory.icon(
riffle.icon_factory.IconType.Computer
)
self._locationWidget.addItem(
icon, model.root.path or model.root.name, model.root.path
)
else:
icon = model.icon(index)
self._locationWidget.addItem(icon, segment, segment)
if self._locationWidget.count() > 1:
self._upButton.setEnabled(True)
self._upShortcut.setEnabled(True)
else:
self._upButton.setEnabled(False)
self._upShortcut.setEnabled(False) | Set current location to *path*.
*path* must be the same as root or under the root.
.. note::
Comparisons are case-sensitive. If you set the root as 'D:/' then
location can be set as 'D:/folder' *not* 'd:/folder'. |
23,368 | def do_it(self, dbg):
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if in variable:
scope, attrs = variable.split(, 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write( % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd) | Starts a thread that will load values asynchronously |
23,369 | def parse_uint(self, buff, start, end):
return struct.unpack_from(self.ustructmap[end - start], buff, start)[0] | parse an integer from the buffer given the interval of bytes
:param buff:
:param start:
:param end: |
23,370 | def average_dtu_configurations(list_of_objects):
result = DtuConfiguration()
if len(list_of_objects) == 0:
return result
list_of_members = result.__dict__.keys()
for member in list_of_members:
result.__dict__[member] = np.mean(
[tmp_dtu.__dict__[member] for tmp_dtu in list_of_objects]
)
return result | Return DtuConfiguration instance with averaged values.
Parameters
----------
list_of_objects : python list
List of DtuConfiguration instances to be averaged.
Returns
-------
result : DtuConfiguration instance
Object with averaged values. |
23,371 | def getCachedOrUpdatedValue(self, key, channel=None):
if channel:
return self._hmchannels[channel].getCachedOrUpdatedValue(key)
try:
return self._VALUES[key]
except KeyError:
value = self._VALUES[key] = self.getValue(key)
return value | Gets the channel's value with the given key.
If the key is not found in the cache, the value is queried from the host.
If 'channel' is given, the respective channel's value is returned. |
23,372 | def get_iso_packet_buffer_list(transfer_p):
transfer = transfer_p.contents
offset = 0
result = []
append = result.append
for iso_transfer in _get_iso_packet_list(transfer):
length = iso_transfer.length
append(_get_iso_packet_buffer(transfer, offset, length))
offset += length
return result | Python-specific helper extracting a list of iso packet buffers. |
23,373 | def fit(self, X=None, u=None):
X = X.copy()
if self.mode is :
Xall = X.copy()
X = np.reshape(Xall.copy(), (-1, Xall.shape[-1]))
if ((u is None)):
nmin = min([X.shape[0], X.shape[1]])
nmin = np.minimum(nmin-1, self.nPC)
u,sv,v = svdecon(np.float64(X), k=nmin)
u = u * sv
NN, self.nPC = u.shape
self.u = u
U = self._map(u.copy(), self.n_components, self.n_X, u.copy())
return self | Fit X into an embedded space.
Inputs
----------
X : array, shape (n_samples, n_features)
u,s,v : svd decomposition of X (optional)
Assigns
----------
embedding : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
u,sv,v : singular value decomposition of data S, potentially with smoothing
isort1 : sorting along first dimension of matrix
isort2 : sorting along second dimension of matrix (if n_Y > 0)
cmap: correlation of each item with all locations in the embedding map (before upsampling)
A: PC coefficients of each Fourier mode |
23,374 | def cells_from_defaults(clz, jsonobj):
if isinstance(jsonobj, (str, unicode)):
jsonobj = json.loads(jsonobj)
assert in jsonobj, "No cells in object"
domain = TaxonomyCell.get_domain()
cells = []
for num, cell_dna in enumerate(jsonobj[]):
assert in cell_dna, "No type definition"
classgenerator = domain.node[cell_dna[]][]
cell = classgenerator()
cell[].merge(num)
for attr, val in cell_dna.items():
if not attr in []:
cell[attr].merge(val)
cells.append(cell)
return cells | Creates a referent instance of type `json.kind` and
initializes it to default values. |
23,375 | def histogram(values, num_bins, bounds, normalized=True, plot=False, color=):
hist, bins = np.histogram(values, bins=num_bins, range=bounds)
width = (bins[1] - bins[0])
if normalized:
if np.sum(hist) > 0:
hist = hist.astype(np.float32) / np.sum(hist)
if plot:
import matplotlib.pyplot as plt
plt.bar(bins[:-1], hist, width=width, color=color)
return hist, bins | Generate a histogram plot.
Parameters
----------
values : :obj:`numpy.ndarray`
An array of values to put in the histogram.
num_bins : int
The number equal-width bins in the histogram.
bounds : :obj:`tuple` of float
Two floats - a min and a max - that define the lower and upper
ranges of the histogram, respectively.
normalized : bool
If True, the bins will show the percentage of elements they contain
rather than raw counts.
plot : bool
If True, this function uses pyplot to plot the histogram.
color : :obj:`str`
The color identifier for the plotted bins.
Returns
-------
:obj:`tuple of `:obj:`numpy.ndarray`
The values of the histogram and the bin edges as ndarrays. |
23,376 | def encode_aes256(key, plaintext):
if len(key) != 64:
raise TypeError("encode_aes256() expects a 256 bit key encoded as a 64 hex character string")
iv = os.urandom(AES.block_size)
cipher = AES.new(binascii.unhexlify(key.encode()), mode=AES.MODE_CBC, IV=iv)
ciphertext = cipher.encrypt(ensure_bytes(pad_aes256(plaintext)))
iv_plus_encrypted = binascii.hexlify(iv) + binascii.hexlify(ciphertext)
return iv_plus_encrypted | Utility method to encode some given plaintext with the given key. Important thing to note:
This is not a general purpose encryption method - it has specific semantics (see below for
details).
Takes the given hex string key and converts it to a 256 bit binary blob. Then pads the given
plaintext to AES block size which is always 16 bytes, regardless of AES key size. Then
encrypts using AES-256-CBC using a random IV. Then converts both the IV and the ciphertext
to hex. Finally returns the IV appended by the ciphertext.
:param key: string, 64 hex chars long
:param plaintext: string, any amount of data |
23,377 | def build_package_from_pr_number(gh_token, sdk_id, pr_number, output_folder, *, with_comment=False):
con = Github(gh_token)
repo = con.get_repo(sdk_id)
sdk_pr = repo.get_pull(pr_number)
package_names = {f.filename.split()[0] for f in sdk_pr.get_files() if f.filename.startswith("azure")}
absolute_output_folder = Path(output_folder).resolve()
with tempfile.TemporaryDirectory() as temp_dir, \
manage_git_folder(gh_token, Path(temp_dir) / Path("sdk"), sdk_id, pr_number=pr_number) as sdk_folder:
for package_name in package_names:
_LOGGER.debug("Build {}".format(package_name))
execute_simple_command(
["python", "./build_package.py", "--dest", str(absolute_output_folder), package_name],
cwd=sdk_folder
)
_LOGGER.debug("Build finished: {}".format(package_name))
if with_comment:
files = [f.name for f in absolute_output_folder.iterdir()]
comment_message = None
dashboard = DashboardCommentableObject(sdk_pr, "(message created by the CI based on PR content)")
try:
installation_message = build_installation_message(sdk_pr)
download_message = build_download_message(sdk_pr, files)
comment_message = installation_message + "\n\n" + download_message
dashboard.create_comment(comment_message)
except Exception:
_LOGGER.critical("Unable to do PR comment:\n%s", comment_message) | Will clone the given PR branch and vuild the package with the given name. |
23,378 | def get_bgcolor(self, index):
value = self.get_value(index)
if index.column() < 3:
color = ReadOnlyCollectionsModel.get_bgcolor(self, index)
else:
if self.remote:
color_name = value[]
else:
color_name = get_color_name(value)
color = QColor(color_name)
color.setAlphaF(.2)
return color | Background color depending on value |
23,379 | def configure_roles_on_host(api, host):
for role_ref in host.roleRefs:
if role_ref.get() is None:
continue
role = api.get_cluster(role_ref[])\
.get_service(role_ref[])\
.get_role(role_ref[])
LOG.debug("Evaluating %s (%s)" % (role.name, host.hostname))
config = None
if role.type == :
config = DATANODE_CONF
elif role.type == :
config = TASKTRACKER_CONF
elif role.type == :
config = REGIONSERVER_CONF
else:
continue
LOG.info("Configuring %s (%s)" % (role.name, host.hostname))
role.update_config(config) | Go through all the roles on this host, and configure them if they
match the role types that we care about. |
23,380 | def get_data(__pkg: str, __name: str) -> str:
for dname in get_data_dirs(__pkg):
test_path = path.join(dname, __name)
if path.exists(test_path):
return test_path
raise FileNotFoundError(.format(__name, __pkg)) | Return top-most data file for given package.
Args:
__pkg: Package name
__name: Data file name |
23,381 | def _wrap_OCLArray(cls):
def prepare(arr):
return np.require(arr, None, "C")
@classmethod
def from_array(cls, arr, *args, **kwargs):
queue = get_device().queue
return cl_array.to_device(queue, prepare(arr), *args, **kwargs)
@classmethod
def empty(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.empty(queue, shape, dtype)
@classmethod
def empty_like(cls, arr):
return cls.empty(arr.shape, arr.dtype)
@classmethod
def zeros(cls, shape, dtype=np.float32):
queue = get_device().queue
return cl_array.zeros(queue, shape, dtype)
@classmethod
def zeros_like(cls, arr):
queue = get_device().queue
return cl_array.zeros_like(queue, arr)
def copy_buffer(self, buf, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, buf.data,
**kwargs)
def write_array(self, data, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, prepare(data),
**kwargs)
def copy_image(self, img, **kwargs):
queue = get_device().queue
return cl.enqueue_copy(queue, self.data, img, offset=0,
origin=(0,)*len(img.shape), region=img.shape,
**kwargs)
def copy_image_resampled(self, img, **kwargs):
if self.dtype.type == np.float32:
type_str = "float"
elif self.dtype.type == np.complex64:
type_str = "complex"
else:
raise NotImplementedError("only resampling of float32 and complex64 arrays possible ")
kern_str = "img%dd_to_buf_%s" % (len(img.shape), type_str)
OCLArray._resample_prog.run_kernel(kern_str,
self.shape[::-1], None,
img, self.data)
def wrap_module_func(mod, f):
def func(self, *args, **kwargs):
return getattr(mod, f)(self, *args, **kwargs)
return func
cls.from_array = from_array
cls.empty = empty
cls.empty_like = empty_like
cls.zeros = zeros
cls.zeros_like = zeros_like
cls.copy_buffer = copy_buffer
cls.copy_image = copy_image
cls.copy_image_resampled = copy_image_resampled
cls.write_array = write_array
cls._resample_prog = OCLProgram(abspath("kernels/copy_resampled.cl"))
for f in ["sum", "max", "min", "dot", "vdot"]:
setattr(cls, f, wrap_module_func(cl_array, f))
for f in dir(cl_math):
if isinstance(getattr(cl_math, f), collections.Callable):
setattr(cls, f, wrap_module_func(cl_math, f))
cls.__name__ = str("OCLArray")
return cls | WRAPPER |
23,382 | def rollback(self):
self._check_thread()
if self.state not in (_STATE_ACTIVE, _STATE_PARTIAL_COMMIT):
raise TransactionError("Transaction is not active.")
try:
if self.state != _STATE_PARTIAL_COMMIT:
request = transaction_rollback_codec.encode_request(self.id, self.thread_id)
self.client.invoker.invoke_on_connection(request, self.connection).result()
self.state = _STATE_ROLLED_BACK
finally:
self._locals.transaction_exists = False | Rollback of this current transaction. |
23,383 | def parse_domains(self, domain, params):
domain_id = self.get_non_aws_id(domain[])
domain[] = domain.pop()
self.domains[domain_id] = domain | Parse a single Route53Domains domain |
23,384 | def make_default_docstr(func, with_args=True, with_ret=True,
with_commandline=True, with_example=True,
with_header=False, with_debug=False):
r
import utool as ut
funcinfo = ut.util_inspect.infer_function_info(func)
argname_list = funcinfo.argname_list
argtype_list = funcinfo.argtype_list
argdesc_list = funcinfo.argdesc_list
return_header = funcinfo.return_header
return_type = funcinfo.return_type
return_name = funcinfo.return_name
return_desc = funcinfo.return_desc
funcname = funcinfo.funcname
modname = funcinfo.modname
defaults = funcinfo.defaults
num_indent = funcinfo.num_indent
needs_surround = funcinfo.needs_surround
funcname = funcinfo.funcname
ismethod = funcinfo.ismethod
va_name = funcinfo.va_name
kw_name = funcinfo.kw_name
kw_keys = funcinfo.kw_keys
docstr_parts = []
if with_header:
header_block = funcname
docstr_parts.append(header_block)
if with_args and len(argname_list) > 0:
argheader =
arg_docstr = make_args_docstr(argname_list, argtype_list, argdesc_list,
ismethod, va_name, kw_name, kw_keys)
argsblock = make_docstr_block(argheader, arg_docstr)
docstr_parts.append(argsblock)
if with_ret and return_header is not None:
if return_header is not None:
return_doctr = make_returns_or_yeilds_docstr(return_type, return_name, return_desc)
returnblock = make_docstr_block(return_header, return_doctr)
docstr_parts.append(returnblock)
if with_commandline:
cmdlineheader =
cmdlinecode = make_cmdline_docstr(funcname, modname)
cmdlineblock = make_docstr_block(cmdlineheader, cmdlinecode)
docstr_parts.append(cmdlineblock)
if with_example:
exampleheader =
examplecode = make_example_docstr(funcname, modname, argname_list,
defaults, return_type, return_name,
ismethod)
examplecode_ = ut.indent(examplecode, )
exampleblock = make_docstr_block(exampleheader, examplecode_)
docstr_parts.append(exampleblock)
if with_debug:
debugheader =
debugblock = ut.codeblock(
).format(num_indent=num_indent)
debugblock = make_docstr_block(debugheader, debugblock)
docstr_parts.append(debugblock)
if needs_surround:
docstr_parts = []
default_docstr = .join(docstr_parts)
else:
default_docstr = .join(docstr_parts)
docstr_indent = * (num_indent + 4)
default_docstr = ut.indent(default_docstr, docstr_indent)
return default_docstr | r"""
Tries to make a sensible default docstr so the user
can fill things in without typing too much
# TODO: Interleave old documentation with new documentation
Args:
func (function): live python function
with_args (bool):
with_ret (bool): (Defaults to True)
with_commandline (bool): (Defaults to True)
with_example (bool): (Defaults to True)
with_header (bool): (Defaults to False)
with_debug (bool): (Defaults to False)
Returns:
tuple: (argname, val)
Ignore:
pass
CommandLine:
python -m utool.util_autogen --exec-make_default_docstr --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_autogen import * # NOQA
>>> import utool as ut
>>> func = ut.make_default_docstr
>>> #func = ut.make_args_docstr
>>> #func = PythonStatement
>>> func = auto_docstr
>>> default_docstr = make_default_docstr(func)
>>> result = str(default_docstr)
>>> print(result) |
23,385 | def add_at_risk_counts(*fitters, **kwargs):
from matplotlib import pyplot as plt
ax = kwargs.get("ax", None)
if ax is None:
ax = plt.gca()
fig = kwargs.get("fig", None)
if fig is None:
fig = plt.gcf()
if "labels" not in kwargs:
labels = [f._label for f in fitters]
else:
labels = kwargs["labels"]
if labels is None:
labels = [None] * len(fitters)
ax2 = plt.twiny(ax=ax)
ax2_ypos = -0.15 * 6.0 / fig.get_figheight()
move_spines(ax2, ["bottom"], [ax2_ypos])
remove_spines(ax2, ["top", "right", "bottom", "left"])
ax2.xaxis.tick_bottom()
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks(ax.get_xticks())
remove_ticks(ax2, x=True, y=True)
ticklabels = []
for tick in ax2.get_xticks():
lbl = ""
for f, l in zip(fitters, labels):
if tick == ax2.get_xticks()[0] and l is not None:
if is_latex_enabled():
s = "\n{}\\quad".format(l) + "{}"
else:
s = "\n{} ".format(l) + "{}"
else:
s = "\n{}"
lbl += s.format(f.durations[f.durations >= tick].shape[0])
ticklabels.append(lbl.strip())
ax2.set_xticklabels(ticklabels, ha="right")
ax2.xaxis.set_label_coords(0, ax2_ypos)
ax2.set_xlabel("At risk")
plt.tight_layout()
return ax | Add counts showing how many individuals were at risk at each time point in
survival/hazard plots.
Parameters
----------
fitters:
One or several fitters, for example KaplanMeierFitter,
NelsonAalenFitter, etc...
Returns
--------
ax: The axes which was used.
Examples
--------
>>> # First train some fitters and plot them
>>> fig = plt.figure()
>>> ax = plt.subplot(111)
>>>
>>> f1 = KaplanMeierFitter()
>>> f1.fit(data)
>>> f1.plot(ax=ax)
>>>
>>> f2 = KaplanMeierFitter()
>>> f2.fit(data)
>>> f2.plot(ax=ax)
>>>
>>> # There are equivalent
>>> add_at_risk_counts(f1, f2)
>>> add_at_risk_counts(f1, f2, ax=ax, fig=fig)
>>>
>>> # This overrides the labels
>>> add_at_risk_counts(f1, f2, labels=['fitter one', 'fitter two'])
>>>
>>> # This hides the labels
>>> add_at_risk_counts(f1, f2, labels=None) |
23,386 | def popen(self, stdout, stderr):
self.logger.info(, self.command_str)
return subprocess.Popen([self._executor_script], stdout=stdout, stderr=stderr) | Build popen object to run
:rtype: subprocess.Popen |
23,387 | def perform(cls, entity_cls, usecase_cls, request_object_cls,
payload: dict, raise_error=False):
use_case = usecase_cls()
payload.update({: entity_cls})
request_object = request_object_cls.from_dict(payload)
resp = use_case.execute(request_object)
if raise_error and isinstance(resp, ResponseFailure):
raise UsecaseExecutionError(
(resp.code, resp.value),
orig_exc=getattr(resp, , None),
orig_trace=getattr(resp, , None)
)
return resp | This method bundles all essential artifacts and initiates usecase
execution.
:param entity_cls: The entity class to be used for running the usecase
:param usecase_cls: The usecase class that will be executed by
the tasklet.
:param request_object_cls: The request object to be used as input to the
use case
:type request_object_cls: protean.core.Request
:param payload: The payload to be passed to the request object
:type payload: dict
:param raise_error: Raise error when a failure response is generated
:type raise_error: bool |
23,388 | def airspeed_ratio(VFR_HUD):
import mavutil
mav = mavutil.mavfile_global
airspeed_pressure = (VFR_HUD.airspeed**2) / ratio
airspeed = sqrt(airspeed_pressure * ratio)
return airspeed | recompute airspeed with a different ARSPD_RATIO |
23,389 | def get_last_doc(self):
try:
result = self.elastic.search(
index=self.meta_index_name,
body={
"query": {"match_all": {}},
"sort": [{"_ts": "desc"}],
},
size=1
)["hits"]["hits"]
for r in result:
r[][] = r[]
return r[]
except es_exceptions.RequestError:
return None | Get the most recently modified document from Elasticsearch.
This method is used to help define a time window within which documents
may be in conflict after a MongoDB rollback. |
23,390 | def recurse(self, factory_meta, extras):
return self.__class__(factory_meta, extras, strategy=self.strategy) | Recurse into a sub-factory call. |
23,391 | def on_menu_criteria_file(self, event):
if self.data_model == 3:
default_file = "criteria.txt"
else:
default_file = "pmag_criteria.txt"
read_sucsess = False
dlg = wx.FileDialog(
self, message="choose pmag criteria file",
defaultDir=self.WD,
defaultFile=default_file,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if self.show_dlg(dlg) == wx.ID_OK:
criteria_file = dlg.GetPath()
print(("-I- Read new criteria file: %s" % criteria_file))
try:
mag_meas_data, file_type = pmag.magic_read(criteria_file)
except:
dlg = wx.MessageDialog(
self, caption="Error", message="not a valid pmag_criteria file", style=wx.OK)
result = self.show_dlg(dlg)
if result == wx.ID_OK:
dlg.Destroy()
dlg.Destroy()
return
self.acceptance_criteria = self.read_criteria_file(criteria_file)
read_sucsess = True
dlg.Destroy()
if read_sucsess:
self.on_menu_change_criteria(None) | read pmag_criteria.txt file
and open changecriteria dialog |
23,392 | def has_return_exprs(self, node):
results = {}
if self.return_expr.match(node, results):
return True
for child in node.children:
if child.type not in (syms.funcdef, syms.classdef):
if self.has_return_exprs(child):
return True
return False | Traverse the tree below node looking for 'return expr'.
Return True if at least 'return expr' is found, False if not.
(If both 'return' and 'return expr' are found, return True.) |
23,393 | def check_dependency(self, operation, dependency):
if isinstance(dependency[1], SQLBlob):
return dependency[3] == operation
return super(MigrationAutodetector, self).check_dependency(operation, dependency) | Enhances default behavior of method by checking dependency for matching operation. |
23,394 | def load(self):
publish = self._get_publish()
self.architectures = publish[]
for source in publish[]:
component = source[]
snapshot = source[]
self.publish_snapshots.append({
: component,
: snapshot
})
snapshot_remote = self._find_snapshot(snapshot)
for source in self._get_source_snapshots(snapshot_remote, fallback_self=True):
self.add(source, component) | Load publish info from remote |
23,395 | def single_gene_deletion(model, gene_list=None, method="fba", solution=None,
processes=None, **kwargs):
return _multi_deletion(
model, , element_lists=_element_lists(model.genes, gene_list),
method=method, solution=solution, processes=processes, **kwargs) | Knock out each gene from a given list.
Parameters
----------
model : cobra.Model
The metabolic model to perform deletions in.
gene_list : iterable
``cobra.Gene``s to be deleted. If not passed,
all the genes from the model are used.
method: {"fba", "moma", "linear moma", "room", "linear room"}, optional
Method used to predict the growth rate.
solution : cobra.Solution, optional
A previous solution to use as a reference for (linear) MOMA or ROOM.
processes : int, optional
The number of parallel processes to run. Can speed up the computations
if the number of knockouts to perform is large. If not passed,
will be set to the number of CPUs found.
kwargs :
Keyword arguments are passed on to underlying simulation functions
such as ``add_room``.
Returns
-------
pandas.DataFrame
A representation of all single gene deletions. The columns are
'growth' and 'status', where
index : frozenset([str])
The gene identifier that was knocked out.
growth : float
The growth rate of the adjusted model.
status : str
The solution's status. |
23,396 | def parse(content):
values = {}
for line in content.splitlines():
lexer = shlex.shlex(line, posix=True)
tokens = list(lexer)
if len(tokens) < 3:
continue
name, op = tokens[:2]
value = .join(tokens[2:])
if op != :
continue
if not re.match(r, name):
continue
value = value.replace(r, )
value = value.replace(r, )
values[name] = value
return values | Parse the content of a .env file (a line-delimited KEY=value format) into a
dictionary mapping keys to values. |
23,397 | def ensure_unicoded_and_unique(args_list, application):
unicoded_args = []
for argument in args_list:
argument = (six.u(argument)
if not isinstance(argument, six.text_type) else argument)
if argument not in unicoded_args or argument == application:
unicoded_args.append(argument)
return unicoded_args | Iterate over args_list, make it unicode if needed and ensure that there
are no duplicates.
Returns list of unicoded arguments in the same order. |
23,398 | def add_category(self, category):
self._categories = self._ensure_append(category, self._categories) | Add a category assigned to this message
:rtype: Category |
23,399 | def extract_request_details(request_object, session_object=None):
request_details = {
: ,
: ,
: 200,
: request_object.method,
: {},
: request_object.url_root,
: request_object.path,
: {},
: {},
: {},
: {},
:
}
request_details[].update(**request_object.headers)
for key in request_object.args.keys():
request_details[][key] = request_object.args.get(key)
if session_object:
request_details[].update(**session_object)
if request_object.is_json:
try:
json_details = request_object.get_json(silent=True)
if isinstance(json_details, dict):
request_details[] = json_details
except:
pass
else:
try:
from base64 import b64encode
request_details[] = b64encode(request_object.data).decode()
except:
pass
try:
for key, value in request_object.form.items():
request_details[][key] = value
except:
pass
return request_details | a method for extracting request details from request and session objects
NOTE: method is also a placeholder funnel for future validation
processes, request logging, request context building and
counter-measures for the nasty web
:param request_object: request object generated by flask from request route
:param session_object: session object generated by flask from client cookie
:return: dictionary with request details |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.