Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
384,200 | def _validate_contains(self, expected_values, field, value):
if not isinstance(value, Iterable):
return
if not isinstance(expected_values, Iterable) or isinstance(
expected_values, _str_type
):
expected_values = set((expected_values,))
else:
expected_values = set(expected_values)
missing_values = expected_values - set(value)
if missing_values:
self._error(field, errors.MISSING_MEMBERS, missing_values) | {'empty': False } |
384,201 | def add_to_group(server_context, user_ids, group_id, container_path=None):
return __make_security_group_api_request(server_context, , user_ids, group_id, container_path) | Add user to group
:param server_context: A LabKey server context. See utils.create_server_context.
:param user_ids: users to add
:param group_id: to add to
:param container_path:
:return: |
384,202 | def unsubscribe_from_data(
self,
subscriber: Callable[[bytes], bool],
) -> None:
self._data_subscribers.remove(subscriber) | Not thread-safe. |
384,203 | def import_string(import_name, silent=False):
import_name = str(import_name).replace(, )
try:
try:
__import__(import_name)
except ImportError:
if not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(, 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
raise e | Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object |
384,204 | def create_new_file(help_string=NO_HELP, default=NO_DEFAULT, suffixes=None):
return ParamFilename(
help_string=help_string,
default=default,
type_name="new_file",
suffixes=suffixes,
) | Create a new file parameter
:param help_string:
:param default:
:param suffixes:
:return: |
384,205 | def setup_auditlog_catalog(portal):
logger.info("*** Setup Audit Log Catalog ***")
catalog_id = auditlog_catalog.CATALOG_AUDITLOG
catalog = api.get_tool(catalog_id)
for name, meta_type in auditlog_catalog._indexes.iteritems():
indexes = catalog.indexes()
if name in indexes:
logger.info("*** Index already in Catalog [SKIP]" % name)
continue
logger.info("*** Adding Index for field to catalog ..."
% (meta_type, name))
catalog.addIndex(name, meta_type)
if meta_type == "TextIndexNG3":
index = catalog._catalog.getIndex(name)
index.index.default_encoding = "utf-8"
index.index.query_parser = "txng.parsers.en"
index.index.autoexpand = "always"
index.index.autoexpand_limit = 3
logger.info("*** Added Index for field to catalog [DONE]"
% (meta_type, name))
at = api.get_tool("archetype_tool")
pt = api.get_tool("portal_types")
for portal_type in pt.listContentTypes():
catalogs = at.getCatalogsByType(portal_type)
if catalog not in catalogs:
new_catalogs = map(lambda c: c.getId(), catalogs) + [catalog_id]
at.setCatalogsByType(portal_type, new_catalogs)
logger.info("*** Adding catalog for ".format(
catalog_id, portal_type)) | Setup auditlog catalog |
384,206 | def query_saved_guest_screen_info(self, screen_id):
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
(origin_x, origin_y, width, height, enabled) = self._call("querySavedGuestScreenInfo",
in_p=[screen_id])
return (origin_x, origin_y, width, height, enabled) | Returns the guest dimensions from the saved state.
in screen_id of type int
Saved guest screen to query info from.
out origin_x of type int
The X position of the guest monitor top left corner.
out origin_y of type int
The Y position of the guest monitor top left corner.
out width of type int
Guest width at the time of the saved state was taken.
out height of type int
Guest height at the time of the saved state was taken.
out enabled of type bool
Whether the monitor is enabled in the guest. |
384,207 | def validate_wavelengths(wavelengths):
if isinstance(wavelengths, u.Quantity):
units.validate_wave_unit(wavelengths.unit)
wave = wavelengths.value
else:
wave = wavelengths
if np.isscalar(wave):
wave = [wave]
wave = np.asarray(wave)
if np.any(wave <= 0):
raise exceptions.ZeroWavelength(
,
rows=np.where(wave <= 0)[0])
sorted_wave = np.sort(wave)
if not np.alltrue(sorted_wave == wave):
if np.alltrue(sorted_wave[::-1] == wave):
pass
else:
raise exceptions.UnsortedWavelength(
,
rows=np.where(sorted_wave != wave)[0])
if wave.size > 1:
dw = sorted_wave[1:] - sorted_wave[:-1]
if np.any(dw == 0):
raise exceptions.DuplicateWavelength(
,
rows=np.where(dw == 0)[0]) | Check wavelengths for ``synphot`` compatibility.
Wavelengths must satisfy these conditions:
* valid unit type, if given
* no zeroes
* monotonic ascending or descending
* no duplicate values
Parameters
----------
wavelengths : array-like or `~astropy.units.quantity.Quantity`
Wavelength values.
Raises
------
synphot.exceptions.SynphotError
Wavelengths unit type is invalid.
synphot.exceptions.DuplicateWavelength
Wavelength array contains duplicate entries.
synphot.exceptions.UnsortedWavelength
Wavelength array is not monotonic.
synphot.exceptions.ZeroWavelength
Negative or zero wavelength occurs in wavelength array. |
384,208 | def friends(self, delegate, params={}, extra_args=None):
return self.__get(, delegate, params,
txml.Statuses, extra_args=extra_args) | Get updates from friends.
Calls the delgate once for each status object received. |
384,209 | def offset_data(data_section, offset, readable = False, wraparound = False):
for pos in range(0, len(data_section)/2):
data_section = offset_byte_in_data(data_section, offset, pos, readable, wraparound)
return data_section | Offset the whole data section.
see offset_byte_in_data for more information
Returns: the entire data section + offset on each byte |
384,210 | def map(self, func):
self._data = xmap(func, self._data)
return self | A lazy way to apply the given function to each element in the stream.
Useful for type casting, like:
>>> from audiolazy import count
>>> count().take(5)
[0, 1, 2, 3, 4]
>>> my_stream = count().map(float)
>>> my_stream.take(5) # A float counter
[0.0, 1.0, 2.0, 3.0, 4.0] |
384,211 | def class_variables(self):
p = lambda o: isinstance(o, Variable) and self.module._docfilter(o)
return filter(p, self.doc.values()) | Returns all documented class variables in the class, sorted
alphabetically as a list of `pydoc.Variable`. |
384,212 | def GetSources(self, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter(.format(
event.data_type))
source_long = getattr(event, , )
source_append = getattr(event, , None)
if source_append:
source_long = .format(source_long, source_append)
return self.SOURCE_SHORT, source_long | Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. |
384,213 | def usearch(query, db, type, out, threads = , evalue = , alignment = , max_hits = 100, cluster = False):
if in os.environ:
usearch_loc = os.environ[]
else:
usearch_loc =
if os.path.exists(out) is False:
db = usearchdb(db, alignment, usearch_loc)
print( % (query, db), file=sys.stderr)
if type == :
strand =
else:
strand =
if alignment == and cluster is False:
os.system( \
% (usearch_loc, query, db, out, evalue, threads, strand, max_hits))
elif alignment == and cluster is False:
os.system( \
% (usearch_loc, query, db, out, threads, strand))
elif alignment == and cluster is True:
qsub =
os.system( \
% (usearch_loc, query, db, out, evalue, threads, strand, max_hits, qsub))
else:
print(, file=sys.stderr)
exit()
else:
print( % (query, db), file=sys.stderr) | run usearch |
384,214 | def _reregister_types(self):
for _type in self._register_types:
psycopg2.extensions.register_type(psycopg2.extensions.new_type(*_type)) | Registers existing types for a new connection |
384,215 | def unfreeze_extensions(self):
output_path = os.path.join(_registry_folder(), )
if not os.path.isfile(output_path):
raise ExternalError("There is no frozen extension list")
os.remove(output_path)
ComponentRegistry._frozen_extensions = None | Remove a previously frozen list of extensions. |
384,216 | async def run_asgi(self):
try:
result = await self.app(self.scope, self.asgi_receive, self.asgi_send)
except BaseException as exc:
self.closed_event.set()
msg = "Exception in ASGI application\n"
self.logger.error(msg, exc_info=exc)
if not self.handshake_started_event.is_set():
self.send_500_response()
else:
await self.handshake_completed_event.wait()
self.transport.close()
else:
self.closed_event.set()
if not self.handshake_started_event.is_set():
msg = "ASGI callable returned without sending handshake."
self.logger.error(msg)
self.send_500_response()
self.transport.close()
elif result is not None:
msg = "ASGI callable should return None, but returned ."
self.logger.error(msg, result)
await self.handshake_completed_event.wait()
self.transport.close() | Wrapper around the ASGI callable, handling exceptions and unexpected
termination states. |
384,217 | def read_config(args):
configfile = os.path.expanduser()
if os.path.isfile(configfile):
with open(configfile, ) as f:
config = toml.loads(f.read())
for key in config:
param = key.replace(, )
if not param in args or args[param] in [False, None]:
args[param] = config[key]
return args | Read configuration options from ~/.shakedown (if exists)
:param args: a dict of arguments
:type args: dict
:return: a dict of arguments
:rtype: dict |
384,218 | def _register_client(self, client, region_name):
for item in client.meta.method_to_api_mapping:
method = getattr(client, item)
wrapped_method = functools.partial(self._wrap_client, region_name, method)
setattr(client, item, wrapped_method) | Uses functools.partial to wrap all methods on a client with the self._wrap_client method
:param botocore.client.BaseClient client: the client to proxy
:param str region_name: AWS Region ID (ex: us-east-1) |
384,219 | def _create_dmnd_database(self, unaligned_sequences_path, daa_output):
logging.debug("Building diamond database")
cmd = "diamond makedb --in -d " % (unaligned_sequences_path, daa_output)
extern.run(cmd) | Build a diamond database using diamond makedb
Parameters
----------
unaligned_sequences_path: str
path to a FASTA file containing unaligned sequences
daa_output: str
Name of output database. |
384,220 | def _ClientPathToString(client_path, prefix=""):
return os.path.join(prefix, client_path.client_id, client_path.vfs_path) | Returns a path-like String of client_path with optional prefix. |
384,221 | def get_composition_smart_repository_session(self, repository_id, proxy):
if repository_id is None:
raise NullArgument()
if not self.supports_composition_smart_repository():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise
proxy = self._convert_proxy(proxy)
try:
session = sessions.CompositionSmartRepositorySession(repository_id, proxy, runtime=self._runtime)
except AttributeError:
raise
return session | Gets a composition smart repository session for the given
repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionSmartRepositorySession) - a
CompositionSmartRepositorySession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_composition_smart_repository()
false
compliance: optional - This method must be implemented if
supports_composition_smart_repository() is true. |
384,222 | def op_token(self, display_name, opt):
args = {
: opt.lease,
: display_name,
: token_meta(opt)
}
try:
token = self.create_token(**args)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == :
emsg = "Permission denied creating operational token"
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
LOG.debug("Created operational token with lease of %s", opt.lease)
return token[][] | Return a properly annotated token for our use. This
token will be revoked at the end of the session. The token
will have some decent amounts of metadata tho. |
384,223 | def escape_dictionary(dictionary, datetime_format=):
for k, v in dictionary.iteritems():
if isinstance(v, datetime.datetime):
v = v.strftime(datetime_format)
if isinstance(v, basestring):
v = CoyoteDb.db_escape(str(v))
v = .format(v)
if v is True:
v = 1
if v is False:
v = 0
if v is None:
v =
dictionary[k] = v | Escape dictionary values with keys as column names and values column values
@type dictionary: dict
@param dictionary: Key-values |
384,224 | def genargs() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument("infile", help="Input ShExC specification")
parser.add_argument("-nj", "--nojson", help="Do not produce json output", action="store_true")
parser.add_argument("-nr", "--nordf", help="Do not produce rdf output", action="store_true")
parser.add_argument("-j", "--jsonfile", help="Output ShExJ file (Default: {infile}.json)")
parser.add_argument("-r", "--rdffile", help="Output ShExR file (Default: {infile}.{fmt suffix})")
parser.add_argument("--context", help="Alternative @context")
parser.add_argument("-f", "--format",
choices=list(set(x.name for x in rdflib_plugins(None, rdflib_Serializer)
if not in str(x.name))),
help="Output format (Default: turtle)", default="turtle")
return parser | Create a command line parser
:return: parser |
384,225 | def spam(self, msg, *args, **kw):
if self.isEnabledFor(SPAM):
self._log(SPAM, msg, args, **kw) | Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`. |
384,226 | def request_start(self):
self._queue.put(command_packet(CMD_START_STREAM))
_LOGGER.info()
self._source.run() | Indicate readiness to receive stream.
This is a blocking call. |
384,227 | def text(self, x, y, txt=):
"Output a string"
txt = self.normalize_text(txt)
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font[].append(uni)
else:
txt2 = self._escape(txt)
s=sprintf(,x*self.k,(self.h-y)*self.k, txt2)
if(self.underline and txt!=):
s+=+self._dounderline(x,y,txt)
if(self.color_flag):
s=+self.text_color++s+
self._out(s) | Output a string |
384,228 | def parse_params(self, core_params):
params = []
for core_param in core_params:
params.append(self.parse_param(core_param))
return params | Goes through a set of parameters, extracting information about each.
:param core_params: The collection of parameters
:type core_params: A collection of ``<botocore.parameters.Parameter>``
subclasses
:returns: A list of dictionaries |
384,229 | def serialize_to_file(
root_processor,
value,
xml_file_path,
encoding=,
indent=None
):
serialized_value = serialize_to_string(root_processor, value, indent)
with open(xml_file_path, , encoding=encoding) as xml_file:
xml_file.write(serialized_value) | Serialize the value to an XML file using the root processor.
:param root_processor: Root processor of the XML document.
:param value: Value to serialize.
:param xml_file_path: Path to the XML file to which the serialized value will be written.
:param encoding: Encoding of the file.
:param indent: If specified, then the XML will be formatted with the specified indentation. |
384,230 | def step_a_new_working_directory(context):
command_util.ensure_context_attribute_exists(context, "workdir", None)
command_util.ensure_workdir_exists(context)
shutil.rmtree(context.workdir, ignore_errors=True) | Creates a new, empty working directory |
384,231 | def add_var_arg(self, arg):
self.__args.append(arg)
self.__job.add_var_arg(self.__arg_index)
self.__arg_index += 1 | Add a variable (or macro) argument to the condor job. The argument is
added to the submit file and a different value of the argument can be set
for each node in the DAG.
@param arg: name of option to add. |
384,232 | def Storage_clearDataForOrigin(self, origin, storageTypes):
assert isinstance(origin, (str,)
), "Argument must be of type str. Received type: " % type(
origin)
assert isinstance(storageTypes, (str,)
), "Argument must be of type str. Received type: " % type(
storageTypes)
subdom_funcs = self.synchronous_command(,
origin=origin, storageTypes=storageTypes)
return subdom_funcs | Function path: Storage.clearDataForOrigin
Domain: Storage
Method name: clearDataForOrigin
Parameters:
Required arguments:
'origin' (type: string) -> Security origin.
'storageTypes' (type: string) -> Comma separated origin names.
No return value.
Description: Clears storage for origin. |
384,233 | def are_forms_valid(self, forms):
for form in six.itervalues(forms):
if not form.is_valid():
return False
return True | Check if all forms defined in `form_classes` are valid. |
384,234 | def render_css_classes(self):
ret = []
if not self.enabled:
ret.append()
if self.draggable:
ret.append()
if self.collapsible:
ret.append()
if self.deletable:
ret.append()
ret += self.css_classes
return .join(ret) | Return a string containing the css classes for the module.
>>> mod = DashboardModule(enabled=False, draggable=True,
... collapsible=True, deletable=True)
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable'
>>> mod.css_classes.append('foo')
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable foo'
>>> mod.enabled = True
>>> mod.render_css_classes()
'dashboard-module draggable collapsible deletable foo' |
384,235 | def clearness_index_zenith_independent(clearness_index, airmass,
max_clearness_index=2.0):
kt_prime = clearness_index / _kt_kt_prime_factor(airmass)
kt_prime = np.maximum(kt_prime, 0)
kt_prime = np.minimum(kt_prime, max_clearness_index)
return kt_prime | Calculate the zenith angle independent clearness index.
Parameters
----------
clearness_index : numeric
Ratio of global to extraterrestrial irradiance on a horizontal
plane
airmass : numeric
Airmass
max_clearness_index : numeric, default 2.0
Maximum value of the clearness index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
NREL's SRRL Fortran code used 0.82 for hourly data.
Returns
-------
kt_prime : numeric
Zenith independent clearness index
References
----------
.. [1] Perez, R., P. Ineichen, E. Maxwell, R. Seals and A. Zelenka,
(1992). "Dynamic Global-to-Direct Irradiance Conversion Models".
ASHRAE Transactions-Research Series, pp. 354-369 |
384,236 | def remove(self, recursive=True, ignore_error=True):
try:
if recursive or self._cleanup == :
shutil.rmtree(self.path)
else:
os.rmdir(self.path)
except Exception as e:
if not ignore_error:
raise e | Remove the directory. |
384,237 | def network_create(provider, names, **kwargs):
salt192.168.100.0/24
client = _get_client()
return client.extra_action(provider=provider, names=names, action=, **kwargs) | Create private network
CLI Example:
.. code-block:: bash
salt minionname cloud.network_create my-nova names=['salt'] cidr='192.168.100.0/24' |
384,238 | def get_points_within_r(center_points, target_points, r):
r
tree = cKDTree(target_points)
indices = tree.query_ball_point(center_points, r)
return tree.data[indices].T | r"""Get all target_points within a specified radius of a center point.
All data must be in same coordinate system, or you will get undetermined results.
Parameters
----------
center_points: (X, Y) ndarray
location from which to grab surrounding points within r
target_points: (X, Y) ndarray
points from which to return if they are within r of center_points
r: integer
search radius around center_points to grab target_points
Returns
-------
matches: (X, Y) ndarray
A list of points within r distance of, and in the same
order as, center_points |
384,239 | def request(self, *args, **kwargs) -> XMLResponse:
r = super(XMLSession, self).request(*args, **kwargs)
return XMLResponse._from_response(r) | Makes an HTTP Request, with mocked User–Agent headers.
Returns a class:`HTTPResponse <HTTPResponse>`. |
384,240 | def _print_topics(self, header: str, cmds: List[str], verbose: bool) -> None:
import io
if cmds:
if not verbose:
self.print_topics(header, cmds, 15, 80)
else:
self.stdout.write(.format(str(header)))
widest = 0
for command in cmds:
width = utils.ansi_safe_wcswidth(command)
if width > widest:
widest = width
widest += 4
if widest < 20:
widest = 20
if self.ruler:
self.stdout.write(.format(, ruler=self.ruler, width=80))
topics = self.get_help_topics()
for command in cmds:
cmd_func = self.cmd_func(command)
if not hasattr(cmd_func, ) and command in topics:
help_func = getattr(self, HELP_FUNC_PREFIX + command)
result = io.StringIO()
with redirect_stdout(result):
stdout_orig = self.stdout
try:
self.stdout = result
help_func()
finally:
self.stdout = stdout_orig
doc = result.getvalue()
else:
doc = cmd_func.__doc__
if not doc:
doc_block = []
else:
doc_block = []
found_first = False
for doc_line in doc.splitlines():
stripped_line = doc_line.strip()
self.stdout.write("\n") | Customized version of print_topics that can switch between verbose or traditional output |
384,241 | def getList(self, aspList):
objects = self._elements(self.SIG_OBJECTS, self.N, [0])
houses = self._elements(self.SIG_HOUSES, self.N, [0])
angles = self._elements(self.SIG_ANGLES, self.N, [0])
significators = objects + houses + angles
objects = self._elements(self.SIG_OBJECTS, self.N, aspList)
terms = self._terms()
antiscias = self._elements(self.SIG_OBJECTS, self.A, [0])
cantiscias = self._elements(self.SIG_OBJECTS, self.C, [0])
promissors = objects + terms + antiscias + cantiscias
res = []
for prom in promissors:
for sig in significators:
if (prom[] == sig[]):
continue
arcs = self._arc(prom, sig)
for (x,y) in [(, ), (, )]:
arc = arcs[x]
if 0 < arc < self.MAX_ARC:
res.append([
arcs[x],
prom[],
sig[],
y,
])
return sorted(res) | Returns a sorted list with all
primary directions. |
384,242 | def createPenStyleCti(nodeName, defaultData=0, includeNone=False):
displayValues=PEN_STYLE_DISPLAY_VALUES
configValues=PEN_STYLE_CONFIG_VALUES
if includeNone:
displayValues = [] + list(displayValues)
configValues = [None] + list(configValues)
return ChoiceCti(nodeName, defaultData,
displayValues=displayValues, configValues=configValues) | Creates a ChoiceCti with Qt PenStyles.
If includeEmtpy is True, the first option will be None. |
384,243 | def identity(obj):
if hasattr(obj, ):
return mark_safe("{0},{1}".format(unlocalize(obj.pk),
get_revision_of_object(obj)))
else:
return mark_safe(unlocalize(obj.pk)) | returns a string representing "<pk>,<version>" of the passed object |
384,244 | def getEyeToHeadTransform(self, eEye):
fn = self.function_table.getEyeToHeadTransform
result = fn(eEye)
return result | Returns the transform from eye space to the head space. Eye space is the per-eye flavor of head
space that provides stereo disparity. Instead of Model * View * Projection the sequence is Model * View * Eye^-1 * Projection.
Normally View and Eye^-1 will be multiplied together and treated as View in your application. |
384,245 | def placebo_session(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
session_kwargs = {
: os.environ.get(, )
}
profile_name = os.environ.get(, None)
if profile_name:
session_kwargs[] = profile_name
session = boto3.Session(**session_kwargs)
self = args[0]
prefix = self.__class__.__name__ + + function.__name__
base_dir = os.environ.get(
"PLACEBO_DIR", os.path.join(os.getcwd(), "placebo"))
record_dir = os.path.join(base_dir, prefix)
record_format = os.environ.get(, Format.DEFAULT)
if not os.path.exists(record_dir):
os.makedirs(record_dir)
pill = placebo.attach(session, data_path=record_dir,
record_format=record_format)
if os.environ.get() == :
pill.record()
else:
pill.playback()
kwargs[] = session
return function(*args, **kwargs)
return wrapper | Decorator to help do testing with placebo.
Simply wrap the function you want to test and make sure to add
a "session" argument so the decorator can pass the placebo session.
Accepts the following environment variables to configure placebo:
PLACEBO_MODE: set to "record" to record AWS calls and save them
PLACEBO_PROFILE: optionally set an AWS credential profile to record with
PLACEBO_DIR: set the directory to record to / read from |
384,246 | def get_instance(self, payload):
return CredentialListInstance(self._version, payload, account_sid=self._solution[], ) | Build an instance of CredentialListInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.credential_list.CredentialListInstance
:rtype: twilio.rest.api.v2010.account.sip.credential_list.CredentialListInstance |
384,247 | def _bss_decomp_mtifilt(reference_sources, estimated_source, j, C, Cj):
filters_len = Cj.shape[-2]
s_true = _zeropad(reference_sources[j], filters_len - 1, axis=0)
e_spat = _project(reference_sources[j], Cj) - s_true
e_interf = _project(reference_sources, C) - s_true - e_spat
e_artif = - s_true - e_spat - e_interf
e_artif[:estimated_source.shape[0], :] += estimated_source
return (s_true, e_spat, e_interf, e_artif) | Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters. |
384,248 | def tabulate_state_blocks(x, states, pos=None):
x = asarray_ndim(x, 1)
check_integer_dtype(x)
x = memoryview_safe(x)
switch_points, transitions, observations = state_transitions(x, states)
t = transitions[1:, 0]
o = observations[1:]
s1 = switch_points[:-1]
s2 = switch_points[1:]
is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0)
size_min = s2[:, 0] - s1[:, 1] + 1
size_max = s2[:, 1] - s1[:, 0] - 1
size_max[is_marginal] = -1
items = [
(, t),
(, o),
(, s1[:, 0]),
(, s1[:, 1]),
(, s2[:, 0]),
(, s2[:, 1]),
(, size_min),
(, size_max),
(, is_marginal)
]
if pos is not None:
pos = asarray_ndim(pos, 1)
check_dim0_aligned(x, pos)
check_integer_dtype(pos)
switch_positions = np.take(pos, switch_points)
switch_positions[0, 0] = -1
switch_positions[-1, 1] = -1
p1 = switch_positions[:-1]
p2 = switch_positions[1:]
length_min = p2[:, 0] - p1[:, 1] + 1
length_max = p2[:, 1] - p1[:, 0] - 1
length_max[is_marginal] = -1
items += [
(, p1[:, 0]),
(, p1[:, 1]),
(, p2[:, 0]),
(, p2[:, 1]),
(, length_min),
(, length_max),
]
import pandas
return pandas.DataFrame.from_dict(OrderedDict(items)) | Construct a dataframe where each row provides information about continuous state blocks.
Parameters
----------
x : array_like, int
1-dimensional array of state values.
states : set
Set of states of interest. Any state value not in this set will be ignored.
pos : array_like, int, optional
Array of positions corresponding to values in `x`.
Returns
-------
df : DataFrame
Examples
--------
>>> import allel
>>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1]
>>> df = allel.tabulate_state_blocks(x, states={1, 2})
>>> df
state support start_lidx ... size_min size_max is_marginal
0 1 4 -1 ... 5 -1 True
1 2 3 4 ... 4 4 False
2 1 2 8 ... 2 -1 True
[3 rows x 9 columns]
>>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31]
>>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos)
>>> df
state support start_lidx ... stop_rpos length_min length_max
0 1 4 -1 ... 14 9 -1
1 2 3 4 ... 30 15 19
2 1 2 8 ... -1 2 -1
[3 rows x 15 columns] |
384,249 | def rename(old_name, new_name):
with Session() as session:
try:
session.VFolder(old_name).rename(new_name)
print_done()
except Exception as e:
print_error(e)
sys.exit(1) | Rename the given virtual folder. This operation is irreversible!
You cannot change the vfolders that are shared by other users,
and the new name must be unique among all your accessible vfolders
including the shared ones.
OLD_NAME: The current name of a virtual folder.
NEW_NAME: The new name of a virtual folder. |
384,250 | def select_ipam_strategy(self, network_id, network_strategy, **kwargs):
LOG.info("Selecting IPAM strategy for network_id:%s "
"network_strategy:%s" % (network_id, network_strategy))
net_type = "tenant"
if STRATEGY.is_provider_network(network_id):
net_type = "provider"
strategy = self._ipam_strategies.get(net_type, {})
default = strategy.get("default")
overrides = strategy.get("overrides", {})
if network_strategy in overrides:
LOG.info("Selected overridden IPAM strategy: %s"
% (overrides[network_strategy]))
return overrides[network_strategy]
if default:
LOG.info("Selected default IPAM strategy for tenant "
"network: %s" % (default))
return default
LOG.info("Selected network strategy for tenant "
"network: %s" % (network_strategy))
return network_strategy | Return relevant IPAM strategy name.
:param network_id: neutron network id.
:param network_strategy: default strategy for the network.
NOTE(morgabra) This feels like a hack but I can't think of a better
idea. The root problem is we can now attach ports to networks with
a different backend driver/ipam strategy than the network speficies.
We handle the the backend driver part with allowing network_plugin to
be specified for port objects. This works pretty well because nova or
whatever knows when we are hooking up an Ironic node so it can pass
along that key during port_create().
IPAM is a little trickier, especially in Ironic's case, because we
*must* use a specific IPAM for provider networks. There isn't really
much of an option other than involve the backend driver when selecting
the IPAM strategy. |
384,251 | def inet_pton(address_family, ip_string):
global __inet_pton
if __inet_pton is None:
if hasattr(socket, ):
__inet_pton = socket.inet_pton
else:
from ospd import win_socket
__inet_pton = win_socket.inet_pton
return __inet_pton(address_family, ip_string) | A platform independent version of inet_pton |
384,252 | def prepare_framework_container_def(model, instance_type, s3_operations):
deploy_image = model.image
if not deploy_image:
region_name = model.sagemaker_session.boto_session.region_name
deploy_image = fw_utils.create_image_uri(
region_name, model.__framework_name__, instance_type, model.framework_version, model.py_version)
base_name = utils.base_name_from_image(deploy_image)
model.name = model.name or utils.name_from_base(base_name)
bucket = model.bucket or model.sagemaker_session._default_bucket
script = os.path.basename(model.entry_point)
key = .format(model.name)
if model.source_dir and model.source_dir.lower().startswith():
code_dir = model.source_dir
model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script)
else:
code_dir = .format(bucket, key)
model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script)
s3_operations[] = [{
: model.source_dir or script,
: bucket,
: key,
: True
}]
deploy_env = dict(model.env)
deploy_env.update(model._framework_env_vars())
try:
if model.model_server_workers:
deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(model.model_server_workers)
except AttributeError:
pass
return sagemaker.container_def(deploy_image, model.model_data, deploy_env) | Prepare the framework model container information. Specify related S3 operations for Airflow to perform.
(Upload `source_dir`)
Args:
model (sagemaker.model.FrameworkModel): The framework model
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
s3_operations (dict): The dict to specify S3 operations (upload `source_dir`).
Returns:
dict: The container information of this framework model. |
384,253 | def post_info(self, name, message):
self.post_command(OPERATIONS.CMD_POST_MESSAGE,
_create_message(name, states.INFO_LEVEL, message)) | Asynchronously post a user facing info message about a service.
Args:
name (string): The name of the service
message (string): The user facing info message that will be stored
for the service and can be queried later. |
384,254 | def new_driver(browser_name, *args, **kwargs):
if browser_name == FIREFOX:
return webdriver.Firefox(*args, **kwargs)
elif browser_name == PHANTOMJS:
executable_path = os.path.join(os.path.dirname(__file__), )
driver = webdriver.PhantomJS(executable_path=executable_path, **kwargs)
driver.set_window_size(1280, 800)
return driver
else:
driver = webdriver.Remote(*args, **kwargs)
return driver | Instantiates a new WebDriver instance, determining class by environment variables |
384,255 | def get_message_content(self):
body = self.doc.find(
".//{http://salmon-protocol.org/ns/magic-env}data").text
body = urlsafe_b64decode(body.encode("ascii"))
logger.debug("diaspora.protocol.get_message_content: %s", body)
return body | Given the Slap XML, extract out the payload. |
384,256 | def add_node(self, node):
new = ClusterNode.from_uri(node["addr"])
cluster_member = self.nodes[0]
check_new_nodes([new], [cluster_member])
new.meet(cluster_member.host, cluster_member.port)
self.nodes.append(new)
self.wait()
if node["role"] != "slave":
return
if "master" in node:
target = self.get_node(node["master"])
if not target:
raise NodeNotFound(node["master"])
else:
masters = sorted(self.masters, key=lambda x: len(x.slaves(x.name)))
target = masters[0]
new.replicate(target.name)
new.flush_cache()
target.flush_cache() | Add a node to cluster.
:param node: should be formated like this
`{"addr": "", "role": "slave", "master": "master_node_id"} |
384,257 | def _cache_is_expired():
now = timezone.now()
timediff = TransCache.SINGLETON_CREATION_DATETIME - now
return (timediff.total_seconds() > TransCache.SINGLETON_EXPIRATION_MAX_SECONDS) | Indica si la caché está caducada |
384,258 | def set_features(self, filter_type):
elements_to_split = {: self.allpsms, : self.allpeps}
self.features = self.splitfunc(elements_to_split, self.ns, filter_type) | Calls splitter to split percolator output into target/decoy
elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway. |
384,259 | def _unescape_str(value):
if isinstance(value, int):
return "%d" % value
value = value.replace(r"\\", "\\")
for i, j in ts3_escape.items():
value = value.replace(j, i)
return value | Unescape a TS3 compatible string into a normal string
@param value: Value
@type value: string/int |
384,260 | def agent_version(self):
version = self.safe_data[][]
if version:
return client.Number.from_json(version)
else:
return None | Get the version of the Juju machine agent.
May return None if the agent is not yet available. |
384,261 | def status(self, build_record_id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.status_with_http_info(build_record_id, **kwargs)
else:
(data) = self.status_with_http_info(build_record_id, **kwargs)
return data | Latest push result of BuildRecord.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.status(build_record_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int build_record_id: Build Record id (required)
:return: BuildRecordPushResultRest
If the method is called asynchronously,
returns the request thread. |
384,262 | def build_defaults(self):
defaults = {}
for arg in self.args:
if not isinstance(arg, _BaseOpt):
raise errors.InvalidSchemeError()
if not isinstance(arg.default, NoDefault):
defaults[arg.name] = arg.default
if isinstance(arg, DictOption):
if arg.scheme:
b = arg.scheme.build_defaults()
if b:
defaults[arg.name] = b
return defaults | Build a dictionary of default values from the `Scheme`.
Returns:
dict: The default configurations as set by the `Scheme`.
Raises:
errors.InvalidSchemeError: The `Scheme` does not contain
valid options. |
384,263 | def guess_payload_class(self, payload):
plen = len(payload)
if plen > _NTP_AUTH_MD5_TAIL_SIZE:
return NTPExtensions
elif plen == _NTP_AUTH_MD5_TAIL_SIZE:
return NTPAuthenticator
return Packet.guess_payload_class(self, payload) | Handles NTPv4 extensions and MAC part (when authentication is used.) |
384,264 | async def close(self):
if self.server:
self.server.close()
await self.server.wait_closed()
self.server = None | Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections. |
384,265 | def start(self, poll_period=None):
logger.info("Incoming ports bound")
if poll_period is None:
poll_period = self.poll_period
start = time.time()
count = 0
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.migrate_tasks_to_internal,
args=(self._kill_event,))
self._task_puller_thread.start()
self._command_thread = threading.Thread(target=self._command_server,
args=(self._kill_event,))
self._command_thread.start()
poller = zmq.Poller()
poller.register(self.task_outgoing, zmq.POLLIN)
poller.register(self.results_incoming, zmq.POLLIN)
interesting_managers = set()
while not self._kill_event.is_set():
self.socks = dict(poller.poll(timeout=poll_period))
if self.task_outgoing in self.socks and self.socks[self.task_outgoing] == zmq.POLLIN:
logger.debug("[MAIN] starting task_outgoing section")
message = self.task_outgoing.recv_multipart()
manager = message[0]
if manager not in self._ready_manager_queue:
reg_flag = False
try:
msg = json.loads(message[1].decode())
reg_flag = True
except Exception:
logger.warning("[MAIN] Got a non-json registration message from manager:{}".format(
manager))
logger.debug("[MAIN] Message :\n{}\n".format(message[0]))
self._ready_manager_queue[manager] = {: time.time(),
: 0,
: None,
: 0,
: True,
: []}
if reg_flag is True:
interesting_managers.add(manager)
logger.info("[MAIN] Adding manager: {} to ready queue".format(manager))
self._ready_manager_queue[manager].update(msg)
logger.info("[MAIN] Registration info for manager {}: {}".format(manager, msg))
if (msg[].rsplit(".", 1)[0] != self.current_platform[].rsplit(".", 1)[0] or
msg[] != self.current_platform[]):
logger.warn("[MAIN] Manager {} has incompatible version info with the interchange".format(manager))
if self.suppress_failure is False:
logger.debug("Setting kill event")
self._kill_event.set()
e = ManagerLost(manager)
result_package = {: -1, : serialize_object(e)}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
else:
logger.debug("[MAIN] Suppressing shutdown due to version incompatibility")
else:
logger.info("[MAIN] Manager {} has compatible Parsl version {}".format(manager, msg[]))
logger.info("[MAIN] Manager {} has compatible Python version {}".format(manager,
msg[].rsplit(".", 1)[0]))
else:
if self.suppress_failure is False:
self._kill_event.set()
e = BadRegistration(manager, critical=True)
result_package = {: -1, : serialize_object(e)}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
else:
logger.debug("[MAIN] Suppressing bad registration from manager:{}".format(
manager))
else:
tasks_requested = int.from_bytes(message[1], "little")
self._ready_manager_queue[manager][] = time.time()
if tasks_requested == HEARTBEAT_CODE:
logger.debug("[MAIN] Manager {} sent heartbeat".format(manager))
self.task_outgoing.send_multipart([manager, b, PKL_HEARTBEAT_CODE])
else:
logger.debug("[MAIN] Manager {} requested {} tasks".format(manager, tasks_requested))
self._ready_manager_queue[manager][] = tasks_requested
interesting_managers.add(manager)
logger.debug("[MAIN] leaving task_outgoing section")
logger.debug("Managers count (total/interesting): {}/{}".format(len(self._ready_manager_queue),
len(interesting_managers)))
if interesting_managers and not self.pending_task_queue.empty():
shuffled_managers = list(interesting_managers)
random.shuffle(shuffled_managers)
while shuffled_managers and not self.pending_task_queue.empty():
manager = shuffled_managers.pop()
tasks_inflight = len(self._ready_manager_queue[manager][])
real_capacity = min(self._ready_manager_queue[manager][],
self._ready_manager_queue[manager][] - tasks_inflight)
if (real_capacity and self._ready_manager_queue[manager][]):
tasks = self.get_tasks(real_capacity)
if tasks:
self.task_outgoing.send_multipart([manager, b, pickle.dumps(tasks)])
task_count = len(tasks)
count += task_count
tids = [t[] for t in tasks]
self._ready_manager_queue[manager][] -= task_count
self._ready_manager_queue[manager][].extend(tids)
logger.debug("[MAIN] Sent tasks: {} to manager {}".format(tids, manager))
if self._ready_manager_queue[manager][] > 0:
logger.debug("[MAIN] Manager {} has free_capacity {}".format(manager, self._ready_manager_queue[manager][]))
else:
logger.debug("[MAIN] Manager {} is now saturated".format(manager))
interesting_managers.remove(manager)
else:
interesting_managers.remove(manager)
logger.debug("[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(len(interesting_managers)))
else:
logger.debug("[MAIN] either no interesting managers or no tasks, so skipping manager pass")
if self.results_incoming in self.socks and self.socks[self.results_incoming] == zmq.POLLIN:
logger.debug("[MAIN] entering results_incoming section")
manager, *b_messages = self.results_incoming.recv_multipart()
if manager not in self._ready_manager_queue:
logger.warning("[MAIN] Received a result from a un-registered manager: {}".format(manager))
else:
logger.debug("[MAIN] Got {} result items in batch".format(len(b_messages)))
for b_message in b_messages:
r = pickle.loads(b_message)
self._ready_manager_queue[manager][].remove(r[])
self.results_outgoing.send_multipart(b_messages)
logger.debug("[MAIN] Current tasks: {}".format(self._ready_manager_queue[manager][]))
logger.debug("[MAIN] leaving results_incoming section")
logger.debug("[MAIN] entering bad_managers section")
bad_managers = [manager for manager in self._ready_manager_queue if
time.time() - self._ready_manager_queue[manager][] > self.heartbeat_threshold]
for manager in bad_managers:
logger.debug("[MAIN] Last: {} Current: {}".format(self._ready_manager_queue[manager][], time.time()))
logger.warning("[MAIN] Too many heartbeats missed for manager {}".format(manager))
for tid in self._ready_manager_queue[manager][]:
try:
raise ManagerLost(manager)
except Exception:
result_package = {: tid, : serialize_object(RemoteExceptionWrapper(*sys.exc_info()))}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
self._ready_manager_queue.pop(manager, )
logger.debug("[MAIN] leaving bad_managers section")
logger.debug("[MAIN] ending one main loop iteration")
delta = time.time() - start
logger.info("Processed {} tasks in {} seconds".format(count, delta))
logger.warning("Exiting") | Start the NeedNameQeueu
Parameters:
----------
TODO: Move task receiving to a thread |
384,266 | def sensitivity(imgs, bg=None):
bg = getBackground(bg)
for n, i in enumerate(imgs):
i = imread(i, dtype=float)
i -= bg
smooth = fastMean(median_filter(i, 3))
i /= smooth
if n == 0:
out = i
else:
out += i
out /= (n + 1)
return out | Extract pixel sensitivity from a set of homogeneously illuminated images
This method is detailed in Section 5 of:
---
K.Bedrich, M.Bokalic et al.:
ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
ADVANCED FLAT FIELD CALIBRATION,2017
--- |
384,267 | def block(self, mcs):
self.oracle.add_clause([self.sels[cl_id - 1] for cl_id in mcs]) | Block a (previously computed) MCS. The MCS should be given as an
iterable of integers. Note that this method is not automatically
invoked from :func:`enumerate` because a user may want to block
some of the MCSes conditionally depending on the needs. For
example, one may want to compute disjoint MCSes only in which case
this standard blocking is not appropriate.
:param mcs: an MCS to block
:type mcs: iterable(int) |
384,268 | def get_composition_repository_assignment_session(self, proxy):
if not self.supports_composition_repository_assignment():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise
proxy = self._convert_proxy(proxy)
try:
session = sessions.CompositionRepositoryAssignmentSession(proxy, runtime=self._runtime)
except AttributeError:
raise
return session | Gets the session for assigning composition to repository
mappings.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionRepositoryAssignmentSession)
- a CompositionRepositoryAssignmentSession
raise: OperationFailed - unable to complete request
raise: Unimplemented -
supports_composition_repository_assignment() is false
compliance: optional - This method must be implemented if
supports_composition_repository_assignment() is
true. |
384,269 | def listen(self, topic, timeout=1, limit=1):
if not self._subscribed:
logger.warn()
return []
if topic not in self._messages:
logger.warn( % topic)
return []
if limit != 0 and len(self._messages[topic]) >= limit:
messages = self._messages[topic][:]
self._messages[topic] = []
return messages[-limit:] if limit != 0 else messages | Listen to a topic and return a list of message payloads received
within the specified time. Requires an async Subscribe to have been called previously.
`topic` topic to listen to
`timeout` duration to listen
`limit` the max number of payloads that will be returned. Specify 0
for no limit
Examples:
Listen and get a list of all messages received within 5 seconds
| ${messages}= | Listen | test/test | timeout=5 | limit=0 |
Listen and get 1st message received within 60 seconds
| @{messages}= | Listen | test/test | timeout=60 | limit=1 |
| Length should be | ${messages} | 1 | |
384,270 | def profile(func):
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
res = func(*args, **kwargs)
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats()
ps.print_stats()
print(s.getvalue())
return res
return inner | Decorator to profile functions with cProfile
Args:
func: python function
Returns:
profile report
References:
https://osf.io/upav8/ |
384,271 | def _at_dump_context(self, calculator, rule, scope, block):
sys.stderr.write("%s\n" % repr(rule.namespace._variables)) | Implements @dump_context |
384,272 | def is_child_of_objective_bank(self, id_, objective_bank_id):
if self._catalog_session is not None:
return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=objective_bank_id)
return self._hierarchy_session.is_child(id_=objective_bank_id, child_id=id_) | Tests if an objective bank is a direct child of another.
arg: id (osid.id.Id): an ``Id``
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
return: (boolean) - ``true`` if the ``id`` is a child of
``objective_bank_id,`` ``false`` otherwise
raise: NotFound - ``objective_bank_id`` is not found
raise: NullArgument - ``id`` or ``objective_bank_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
384,273 | def _prfx_getattr_(obj, item):
if item.startswith() or item.startswith():
return getattr(obj, item[2:])
raise AttributeError( % (obj.__class__.__name__, item)) | Replacement of __getattr__ |
384,274 | def rename(idf, objkey, objname, newname):
refnames = getrefnames(idf, objkey)
for refname in refnames:
objlists = getallobjlists(idf, refname)
for refname in refnames:
for robjkey, refname, fieldindexlist in objlists:
idfobjects = idf.idfobjects[robjkey]
for idfobject in idfobjects:
for findex in fieldindexlist:
if idfobject[idfobject.objls[findex]] == objname:
idfobject[idfobject.objls[findex]] = newname
theobject = idf.getobject(objkey, objname)
fieldname = [item for item in theobject.objls if item.endswith()][0]
theobject[fieldname] = newname
return theobject | rename all the refrences to this objname |
384,275 | def _buildTime(self, source, quantity, modifier, units):
if _debug:
print % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity =
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == :
units =
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple() | Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time |
384,276 | def activate():
parent = lib.parent()
try:
cmd = lib.cmd(parent)
except SystemError as exc:
lib.echo(exc)
sys.exit(lib.PROGRAM_ERROR)
context = lib.context(root=_extern.cwd())
context["BE_SHELL"] = parent
if lib.platform() == "unix":
context["BE_TABCOMPLETION"] = os.path.join(
os.path.dirname(__file__), "_autocomplete.sh").replace("\\", "/")
context.pop("BE_ACTIVE", None)
sys.exit(subprocess.call(cmd, env=context)) | Enter into an environment with support for tab-completion
This command drops you into a subshell, similar to the one
generated via `be in ...`, except no topic is present and
instead it enables tab-completion for supported shells.
See documentation for further information.
https://github.com/mottosso/be/wiki/cli |
384,277 | def interface(iface):
iface_info, error = _get_iface_info(iface)
if error is False:
return iface_info.get(iface, {}).get(, )
else:
return error | Return the details of `iface` or an error if it does not exist |
384,278 | def parseArguments(argv=None):
store_opt = StoreOpt()
parser = argparse.ArgumentParser(
prog=,
usage=,
add_help=False,
description=dedent(
.rstrip()),
epilog=dedent(
.rstrip()),
formatter_class=argparse.RawDescriptionHelpFormatter)
target_args = parser.add_argument_group("Target Specification")
target_args.add_argument(, action=, nargs=,
metavar=,
help=(),
default=argparse.SUPPRESS)
concurrency_args = parser.add_argument_group("Concurrency Options")
store_opt(
concurrency_args.add_argument(, , action=,
type=int, metavar=,
help="Number of processes to use to run tests. Note that your "
"tests need to be written to avoid using the same resources (temp "
"files, sockets, ports, etc.) for the multi-process mode to work "
"well (--initializer and --finalizer can help provision "
"per-process resources). Default is to run the same number of "
"processes as your machine has logical CPUs. Note that for a "
"small number of trivial tests, running everything in a single "
"process may be faster than the overhead of initializing all the "
"processes.",
default=argparse.SUPPRESS))
store_opt(
concurrency_args.add_argument(, , action=,
metavar=,
help="Python function to run inside of a single worker process "
"before it starts running tests. This is the way to provision "
"external resources that each concurrent worker process needs to "
"have exclusive access to. Specify the function in dotted "
"notation in a way that will be importable from the location you "
"are running green from.",
default=argparse.SUPPRESS))
store_opt(
concurrency_args.add_argument(, , action=,
metavar=,
help="Same as --initializer, only run at the end of a worker "
"process-t--termcolorstore_true-T--notermcolorstore_true-W--disable-windowsstore_true-a--allow-stdoutstore_true-q--quiet-stdoutstore_true-k--no-skip-reportstore_truet print the report of skipped tests "
"after testing is done. Skips will still show up in the progress "
"report and summary count."),
default=argparse.SUPPRESS))
store_opt(out_args.add_argument(, ,
action=, help=("Don-h--helpstore_true-V--versionstore_true-l--loggingstore_truet configure the root logger to redirect to /dev/null, "
"enabling internal debugging output, as well as any output test (or "
"tested) code may be sending via the root logger.",
default=argparse.SUPPRESS))
store_opt(out_args.add_argument(, , action=,
help=("Enable internal debugging statements. Implies --logging. Can "
"be specified up to three times for more debug output."),
default=argparse.SUPPRESS))
store_opt(out_args.add_argument(, , action=,
help=("Verbose. Can be specified up to three times for more "
"verbosity. Recommended levels are -v and -vv."),
default=argparse.SUPPRESS))
store_opt(out_args.add_argument(, , action=,
help=("Disable unidecode which converts test output from unicode to"
"ascii by default on Windows to avoid hard-to-debug crashes."),
default=argparse.SUPPRESS))
other_args = parser.add_argument_group("Other Options")
store_opt(other_args.add_argument(, , action=,
help=("Stop execution at the first test that fails or errors."),
default=argparse.SUPPRESS))
store_opt(other_args.add_argument(, , action=,
metavar=, help="Use this config file to override any values from "
"the config file specified by environment variable GREEN_CONFIG, "
"~/.green, and .green in the current working directory.",
default=argparse.SUPPRESS))
store_opt(other_args.add_argument(, , action=,
metavar=,
help="Pattern to match test files. Default is test*.py",
default=argparse.SUPPRESS))
store_opt(other_args.add_argument(, , action=,
metavar=, help="Pattern to match test method names after "
". Default is , meaning match methods named .",
default=argparse.SUPPRESS))
store_opt(other_args.add_argument(, ,
action=,
metavar="FILENAME",
help=("Generate a JUnit XML report."),
default=argparse.SUPPRESS))
cov_args = parser.add_argument_group(
"Coverage Options ({})".format(coverage_version))
store_opt(cov_args.add_argument(, , action=,
help=("Produce coverage output."), default=argparse.SUPPRESS))
store_opt(cov_args.add_argument(, ,
action=, metavar=, help=("Specify a coverage config file. "
"Implies --run-coverage See the coverage documentation "
"at https://coverage.readthedocs.io/en/v4.5.x/config.html "
"for coverage config file syntax. The [run] and [report] sections "
"are most relevant."),
default=argparse.SUPPRESS)),
store_opt(cov_args.add_argument(, , action=,
help=("Do not print coverage report to stdout (coverage files will "
"still be created). Implies --run-coverage"),
default=argparse.SUPPRESS))
store_opt(cov_args.add_argument(, , action=,
help=("Green tries really hard to set up a good list of patterns of "
"files to omit from coverage reports. If the default list "
"catches files that you DO want to cover you can specify this "
"flag to leave the default list empty to start with. You can "
"then add patterns back in with --omit-add. The default list is "
"something like -- only longer."),
default=argparse.SUPPRESS))
store_opt(cov_args.add_argument(, , action=,
metavar=,
help=("Comma-separated file-patterns to include in coverage. This "
"implies that anything that does not match the include pattern is "
"omitted from coverage reporting."),
default=argparse.SUPPRESS))
store_opt(cov_args.add_argument(, , action=,
metavar=,
help=("Comma-separated file-patterns to omit from coverage. For "
"example, if coverage reported a file mypackage/foo/bar you could "
"omit it from coverage with , , or "),
default=argparse.SUPPRESS))
store_opt(cov_args.add_argument(, , action=,
metavar=, type=int, help=("Integer. A minimum coverage value. If "
"not met, then we will print a message and exit with a nonzero "
"status. Implies --run-coverage"),
default=argparse.SUPPRESS))
integration_args = parser.add_argument_group("Integration Options")
store_opt(integration_args.add_argument(,
action=, help=("Location of the bash- and zsh-completion "
"file. To enable bash- or zsh-completion, see ENABLING SHELL "
"COMPLETION below."), default=argparse.SUPPRESS))
store_opt(integration_args.add_argument(,
action=,
help=("Output possible completions of the given target. Used by "
"bash- and zsh-completion."), default=argparse.SUPPRESS))
store_opt(integration_args.add_argument(, action=,
help="Output all options. Used by bash- and zsh-completion.",
default=argparse.SUPPRESS))
args = parser.parse_args(argv)
args.parser = parser
args.store_opt = store_opt
return args | I parse arguments in sys.argv and return the args object. The parser
itself is available as args.parser.
Adds the following members to args:
parser = the parser object
store_opt = the StoreOpt object |
384,279 | def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result | Returns the model properties as a dict |
384,280 | def process_transport_command(self, header, message):
if not isinstance(message, dict):
return
relevant = False
if "host" in message:
if message["host"] != self.__hostid:
return
relevant = True
if "service" in message:
if message["service"] != self._service_class_name:
return
relevant = True
if not relevant:
return
if message.get("command"):
self.log.info(
"Received command via transport layer", message["command"]
)
if message["command"] == "shutdown":
self.shutdown = True
else:
self.log.warning("Received invalid transport command message") | Parse a command coming in through the transport command subscription |
384,281 | def on_tool_finish(self, tool):
with self._lock:
if tool in self.current_tools:
self.current_tools.remove(tool)
self.completed_tools.append(tool) | Called when an individual tool completes execution.
:param tool: the name of the tool that completed
:type tool: str |
384,282 | def _load_mapping(self, mapping):
mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id"))
job_id, local_ids_for_batch = self._create_job(mapping)
result = self._wait_for_job(job_id)
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result | Load data for a single step. |
384,283 | def append(self, name, data, start):
for throttle in self.throttles.values():
getattr(throttle, name).append(data, start) | Update timeout for all throttles
:param name: name of throttle to append to ("read" or "write")
:type name: :py:class:`str`
:param data: bytes of data for count
:type data: :py:class:`bytes`
:param start: start of read/write time from
:py:meth:`asyncio.BaseEventLoop.time`
:type start: :py:class:`float` |
384,284 | def find_tf_idf(file_names=[],prev_file_path=None, dump_path=None):
tf_idf = []
df = defaultdict(int)
if prev_file_path:
print(TAG,,prev_file_path)
df,tf_idf = pickle.load(open(prev_file_path,))
prev_doc_count = len(df)
prev_corpus_length = len(tf_idf)
for f in file_names:
with open(f,) as file1:
for line in file1:
wdict = defaultdict(int)
for word in set(line.split()):
df[word] +=1
for word in line.split():
wdict[word] += 1
tf_idf.append(wdict)
for doc in tf_idf:
for key in doc:
true_idf = math.log(len(tf_idf)/df[key])
true_tf = doc[key]/float(len(doc))
doc[key] = true_tf * true_idf
print(TAG,,len(df),+paint(+str(len(df)-prev_doc_count),)+ if prev_file_path else )
print(TAG,,len(tf_idf),+paint(+str(len(tf_idf)-prev_corpus_length),)+ if prev_file_path else )
if dump_path:
if dump_path[-8:] == :
pickle.dump((df,tf_idf),open(dump_path,),protocol=pickle.HIGHEST_PROTOCOL)
print(TAG,,dump_path)
return df,tf_idf | Function to create a TF-IDF list of dictionaries for a corpus of docs.
If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding)
and also re-generate a new tfidf list which overrides over an old one by mentioning its path.
@Args:
--
file_names : paths of files to be processed on, these files are created using twitter_streaming module.
prev_file_path : path of old .tfidfpkl file, if available. (default=None)
dump_path : directory-path where to dump generated lists.(default=None)
@returns:
--
df : a dict of unique words in corpus,with their document frequency as values.
tf_idf : the generated tf-idf list of dictionaries for mentioned docs. |
384,285 | def add_dependency(self, name, obj):
if name in self._deps:
if self._deps[name] is obj:
return
raise ValueError(
"There exists a different dep with the same name : %r" % name)
self._deps[name] = obj | Add a code dependency so it gets inserted into globals |
384,286 | def get_auth_token_login_url(
self,
auth_token_ticket,
authenticator,
private_key,
service_url,
username,
):
auth_token, auth_token_signature = self._build_auth_token_data(
auth_token_ticket,
authenticator,
private_key,
username=username,
)
logging.debug(.format(auth_token))
url = self._get_auth_token_login_url(
auth_token=auth_token,
auth_token_signature=auth_token_signature,
service_url=service_url,
)
logging.debug(.format(url))
return url | Build an auth token login URL.
See https://github.com/rbCAS/CASino/wiki/Auth-Token-Login for details. |
384,287 | def search(self, value, createIndex=None):
pecel leleFullNameUsername
if createIndex:
self._createIndex = createIndex
self._search = True
self.filter(QueryExpression({ : {: value}}))
return self | Full-text support, make sure that text index already exist on collection. Raise IndexNotFound if text index not exist.
**Examples**: ``query.search('pecel lele', createIndex=['FullName', 'Username'])`` |
384,288 | def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]:
rv = [
self.get_graph_by_id(network_id)
for network_id in network_ids
]
log.debug(, network_ids)
return rv | Get a list of networks with the given identifiers and converts to BEL graphs. |
384,289 | def _retry_deliveries(self):
self.logger.debug("Begin messages delivery retries")
tasks = []
for message in itertools.chain(self.session.inflight_in.values(), self.session.inflight_out.values()):
tasks.append(asyncio.wait_for(self._handle_message_flow(message), 10, loop=self._loop))
if tasks:
done, pending = yield from asyncio.wait(tasks, loop=self._loop)
self.logger.debug("%d messages redelivered" % len(done))
self.logger.debug("%d messages not redelivered due to timeout" % len(pending))
self.logger.debug("End messages delivery retries") | Handle [MQTT-4.4.0-1] by resending PUBLISH and PUBREL messages for pending out messages
:return: |
384,290 | def choose(msg, items, attr):
if len(items) == 1:
return items[0]
print()
for index, i in enumerate(items):
name = attr(i) if callable(attr) else getattr(i, attr)
print( % (index, name))
print()
while True:
try:
inp = input( % msg)
if any(s in inp for s in (, , )):
idx = slice(*map(lambda x: int(x.strip()) if x.strip() else None, inp.split()))
return items[idx]
else:
return items[int(inp)]
except (ValueError, IndexError):
pass | Command line helper to display a list of choices, asking the
user to choose one of the options. |
384,291 | def sub_channel(self):
if self._sub_channel is None:
self._sub_channel = self.sub_channel_class(self.context,
self.session,
(self.ip, self.iopub_port))
return self._sub_channel | Get the SUB socket channel object. |
384,292 | def per(arga, argb, prec=10):
r
if not isinstance(prec, int):
raise RuntimeError("Argument `prec` is not valid")
a_type = 1 * _isreal(arga) + 2 * (isiterable(arga) and not isinstance(arga, str))
b_type = 1 * _isreal(argb) + 2 * (isiterable(argb) and not isinstance(argb, str))
if not a_type:
raise RuntimeError("Argument `arga` is not valid")
if not b_type:
raise RuntimeError("Argument `argb` is not valid")
if a_type != b_type:
raise TypeError("Arguments are not of the same type")
if a_type == 1:
arga, argb = float(arga), float(argb)
num_min, num_max = min(arga, argb), max(arga, argb)
return (
0
if _isclose(arga, argb)
else (
sys.float_info.max
if _isclose(num_min, 0.0)
else round((num_max / num_min) - 1, prec)
)
)
ret = copy.copy(arga)
for num, (x, y) in enumerate(zip(arga, argb)):
if not _isreal(x):
raise RuntimeError("Argument `arga` is not valid")
if not _isreal(y):
raise RuntimeError("Argument `argb` is not valid")
x, y = float(x), float(y)
ret[num] = (
0
if _isclose(x, y)
else (
sys.float_info.max
if _isclose(x, 0.0) or _isclose(y, 0)
else (round((max(x, y) / min(x, y)) - 1, prec))
)
)
return ret | r"""
Calculate percentage difference between numbers.
If only two numbers are given, the percentage difference between them is
computed. If two sequences of numbers are given (either two lists of
numbers or Numpy vectors), the element-wise percentage difference is
computed. If any of the numbers in the arguments is zero the value returned
is the maximum floating-point number supported by the Python interpreter.
:param arga: First number, list of numbers or Numpy vector
:type arga: float, integer, list of floats or integers, or Numpy vector
of floats or integers
:param argb: Second number, list of numbers or or Numpy vector
:type argb: float, integer, list of floats or integers, or Numpy vector
of floats or integers
:param prec: Maximum length of the fractional part of the result
:type prec: integer
:rtype: Float, list of floats or Numpy vector, depending on the arguments
type
:raises:
* RuntimeError (Argument \`arga\` is not valid)
* RuntimeError (Argument \`argb\` is not valid)
* RuntimeError (Argument \`prec\` is not valid)
* TypeError (Arguments are not of the same type) |
384,293 | def generalized_lsp_value_withtau(times, mags, errs, omega):
one_over_errs2 = 1.0/(errs*errs)
W = npsum(one_over_errs2)
wi = one_over_errs2/W
sin_omegat = npsin(omega*times)
cos_omegat = npcos(omega*times)
sin2_omegat = sin_omegat*sin_omegat
cos2_omegat = cos_omegat*cos_omegat
sincos_omegat = sin_omegat*cos_omegat
Y = npsum( wi*mags )
C = npsum( wi*cos_omegat )
S = npsum( wi*sin_omegat )
CpS = npsum( wi*sincos_omegat )
CpC = npsum( wi*cos2_omegat )
CS = CpS - C*S
CC = CpC - C*C
SS = 1 - CpC - S*S
tan_omega_tau_top = 2.0*CS
tan_omega_tau_bottom = CC - SS
tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom
tau = nparctan(tan_omega_tau)/(2.0*omega)
sin_omega_tau = npsin(omega*(times - tau))
cos_omega_tau = npcos(omega*(times - tau))
sin2_omega_tau = sin_omega_tau*sin_omega_tau
cos2_omega_tau = cos_omega_tau*cos_omega_tau
sincos_omega_tau = sin_omega_tau*cos_omega_tau
C_tau = npsum(wi*cos_omega_tau)
S_tau = npsum(wi*sin_omega_tau)
CpS_tau = npsum( wi*sincos_omega_tau )
CpC_tau = npsum( wi*cos2_omega_tau )
CS_tau = CpS_tau - C_tau*S_tau
CC_tau = CpC_tau - C_tau*C_tau
SS_tau = 1 - CpC_tau - S_tau*S_tau
YpY = npsum( wi*mags*mags)
YpC_tau = npsum( wi*mags*cos_omega_tau )
YpS_tau = npsum( wi*mags*sin_omega_tau )
YY = YpY - Y*Y
YC_tau = YpC_tau - Y*C_tau
YS_tau = YpS_tau - Y*S_tau
periodogramvalue = (YC_tau*YC_tau/CC_tau + YS_tau*YS_tau/SS_tau)/YY
return periodogramvalue | Generalized LSP value for a single omega.
This uses tau to provide an arbitrary time-reference point.
The relations used are::
P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS)
where: YC, YS, CC, and SS are all calculated at T
and where: tan 2omegaT = 2*CS/(CC - SS)
and where:
Y = sum( w_i*y_i )
C = sum( w_i*cos(wT_i) )
S = sum( w_i*sin(wT_i) )
YY = sum( w_i*y_i*y_i ) - Y*Y
YC = sum( w_i*y_i*cos(wT_i) ) - Y*C
YS = sum( w_i*y_i*sin(wT_i) ) - Y*S
CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) )
CC = CpC - C*C
SS = (1 - CpC) - S*S
CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S
Parameters
----------
times,mags,errs : np.array
The time-series to calculate the periodogram value for.
omega : float
The frequency to calculate the periodogram value at.
Returns
-------
periodogramvalue : float
The normalized periodogram at the specified test frequency `omega`. |
384,294 | def load_configuration(conf_path):
with open(conf_path) as f:
conf_dict = yaml.load(f)
validate_config(conf_dict)
return conf_dict | Load and validate test configuration.
:param conf_path: path to YAML configuration file.
:return: configuration as dict. |
384,295 | def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_application_name(self, **kwargs):
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
node_info = ET.SubElement(show_firmware_version, "node-info")
firmware_version_info = ET.SubElement(node_info, "firmware-version-info")
application_name = ET.SubElement(firmware_version_info, "application-name")
application_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
384,296 | def get(self, requirement):
if isinstance(requirement, basestring):
requirement = Requirement.parse(requirement)
return sorted(p for p in self.packages
if requirement.name == p.name and requirement.match(p)) | Find packages matching ``requirement``.
:param requirement: Requirement to match against repository packages.
:type requirement: `str` or :class:`.Requirement`
:returns: :func:`list` of matching :class:`.Package` objects. |
384,297 | def format_op_row(ipFile, totLines, totWords, uniqueWords):
txt = os.path.basename(ipFile).ljust(36) +
txt += str(totLines).rjust(7) +
txt += str(totWords).rjust(7) +
txt += str(len(uniqueWords)).rjust(7) +
return txt | Format the output row with stats |
384,298 | def create(cls, cli, management_address,
local_username=None, local_password=None,
remote_username=None, remote_password=None,
connection_type=None):
req_body = cli.make_body(
managementAddress=management_address, localUsername=local_username,
localPassword=local_password, remoteUsername=remote_username,
remotePassword=remote_password, connectionType=connection_type)
resp = cli.post(cls().resource_class, **req_body)
resp.raise_if_err()
return cls.get(cli, resp.resource_id) | Configures a remote system for remote replication.
:param cls: this class.
:param cli: the rest client.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system. |
384,299 | def _initialize_likelihood_prior(self, positions, log_likelihoods, log_priors):
func = SimpleCLFunction.from_string( + str(self._nmr_params) + , dependencies=[self._get_log_prior_cl_func(), self._get_log_likelihood_cl_func()])
kernel_data = {
: Array(positions, , mode=, ensure_zero_copy=True),
: Array(log_likelihoods, , mode=, ensure_zero_copy=True),
: Array(log_priors, , mode=, ensure_zero_copy=True),
: LocalMemory(, self._nmr_params),
: self._data
}
func.evaluate(kernel_data, self._nmr_problems,
use_local_reduction=all(env.is_gpu for env in self._cl_runtime_info.cl_environments),
cl_runtime_info=self._cl_runtime_info) | Initialize the likelihood and the prior using the given positions.
This is a general method for computing the log likelihoods and log priors for given positions.
Subclasses can use this to instantiate secondary chains as well. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.