text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def surface_ras_shift(self):
"""Freesurfer uses two coordinate systems: one for volumes ("RAS") and
one for surfaces ("tkReg", "tkRAS", and "Surface RAS").
To get from surface to volume coordinates, add this numbers.
To get from volume to surface coordinates, substract this numbers.
"""
T1_path = self.dir / 'mri' / 'T1.mgz'
assert T1_path.exists()
T1 = nload(str(T1_path))
return T1.header['Pxyz_c']
|
[
"def",
"surface_ras_shift",
"(",
"self",
")",
":",
"T1_path",
"=",
"self",
".",
"dir",
"/",
"'mri'",
"/",
"'T1.mgz'",
"assert",
"T1_path",
".",
"exists",
"(",
")",
"T1",
"=",
"nload",
"(",
"str",
"(",
"T1_path",
")",
")",
"return",
"T1",
".",
"header",
"[",
"'Pxyz_c'",
"]"
] | 38.666667 | 17.083333 |
def AppendToFile(filename, contents, eol_style=EOL_STYLE_NATIVE, encoding=None, binary=False):
'''
Appends content to a local file.
:param unicode filename:
:param unicode contents:
:type eol_style: EOL_STYLE_XXX constant
:param eol_style:
Replaces the EOL by the appropriate EOL depending on the eol_style value.
Considers that all content is using only "\n" as EOL.
:param unicode encoding:
Target file's content encoding.
Defaults to sys.getfilesystemencoding()
:param bool binary:
If True, content is appended in binary mode. In this case, `contents` must be `bytes` and not
`unicode`
:raises NotImplementedForRemotePathError:
If trying to modify a non-local path
:raises ValueError:
If trying to mix unicode `contents` without `encoding`, or `encoding` without
unicode `contents`
'''
_AssertIsLocal(filename)
assert isinstance(contents, six.text_type) ^ binary, 'Must always receive unicode contents, unless binary=True'
if not binary:
# Replaces eol on each line by the given eol_style.
contents = _HandleContentsEol(contents, eol_style)
# Handle encoding here, and always write in binary mode. We can't use io.open because it
# tries to do its own line ending handling.
contents = contents.encode(encoding or sys.getfilesystemencoding())
oss = open(filename, 'ab')
try:
oss.write(contents)
finally:
oss.close()
|
[
"def",
"AppendToFile",
"(",
"filename",
",",
"contents",
",",
"eol_style",
"=",
"EOL_STYLE_NATIVE",
",",
"encoding",
"=",
"None",
",",
"binary",
"=",
"False",
")",
":",
"_AssertIsLocal",
"(",
"filename",
")",
"assert",
"isinstance",
"(",
"contents",
",",
"six",
".",
"text_type",
")",
"^",
"binary",
",",
"'Must always receive unicode contents, unless binary=True'",
"if",
"not",
"binary",
":",
"# Replaces eol on each line by the given eol_style.",
"contents",
"=",
"_HandleContentsEol",
"(",
"contents",
",",
"eol_style",
")",
"# Handle encoding here, and always write in binary mode. We can't use io.open because it",
"# tries to do its own line ending handling.",
"contents",
"=",
"contents",
".",
"encode",
"(",
"encoding",
"or",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
"oss",
"=",
"open",
"(",
"filename",
",",
"'ab'",
")",
"try",
":",
"oss",
".",
"write",
"(",
"contents",
")",
"finally",
":",
"oss",
".",
"close",
"(",
")"
] | 32.888889 | 27.333333 |
def to_dict(self):
"""
Convert this VirtualIP to a dict representation for passing
to the API.
"""
if self.id:
return {"id": self.id}
return {"type": self.type, "ipVersion": self.ip_version}
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"if",
"self",
".",
"id",
":",
"return",
"{",
"\"id\"",
":",
"self",
".",
"id",
"}",
"return",
"{",
"\"type\"",
":",
"self",
".",
"type",
",",
"\"ipVersion\"",
":",
"self",
".",
"ip_version",
"}"
] | 30.375 | 15.125 |
async def on_raw_329(self, message):
""" Channel creation time. """
target, channel, timestamp = message.params
if not self.in_channel(channel):
return
self.channels[channel]['created'] = datetime.datetime.fromtimestamp(int(timestamp))
|
[
"async",
"def",
"on_raw_329",
"(",
"self",
",",
"message",
")",
":",
"target",
",",
"channel",
",",
"timestamp",
"=",
"message",
".",
"params",
"if",
"not",
"self",
".",
"in_channel",
"(",
"channel",
")",
":",
"return",
"self",
".",
"channels",
"[",
"channel",
"]",
"[",
"'created'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"int",
"(",
"timestamp",
")",
")"
] | 39.142857 | 18.285714 |
def auth_proxy(self, method):
"""Authentication proxy for API requests.
This is required because the API objects are naive of ``HelpScout``,
so they would otherwise be unauthenticated.
Args:
method (callable): A method call that should be authenticated. It
should accept a ``requests.Session`` as its first parameter,
which should be used for the actual API call.
Returns:
mixed: The results of the authenticated callable.
"""
def _proxy(*args, **kwargs):
"""The actual proxy, which instantiates and authenticates the API.
Args:
*args (mixed): Args to send to class instantiation.
**kwargs (mixed): Kwargs to send to class instantiation.
Returns:
mixed: The result of the authenticated callable.
"""
return method(self.session, *args, **kwargs)
return _proxy
|
[
"def",
"auth_proxy",
"(",
"self",
",",
"method",
")",
":",
"def",
"_proxy",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"The actual proxy, which instantiates and authenticates the API.\n\n Args:\n *args (mixed): Args to send to class instantiation.\n **kwargs (mixed): Kwargs to send to class instantiation.\n\n Returns:\n mixed: The result of the authenticated callable.\n \"\"\"",
"return",
"method",
"(",
"self",
".",
"session",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_proxy"
] | 35.555556 | 23.074074 |
def reset(self):
"""Reset the value to the default"""
if self.resetable:
for i in range(len(self)):
self[i] = self.default
|
[
"def",
"reset",
"(",
"self",
")",
":",
"if",
"self",
".",
"resetable",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
")",
":",
"self",
"[",
"i",
"]",
"=",
"self",
".",
"default"
] | 32.4 | 8.4 |
def middleware(self, *args, **kwargs):
"""
A decorator that can be used to implement a Middleware plugin to
all of the Blueprints that belongs to this specific Blueprint Group.
In case of nested Blueprint Groups, the same middleware is applied
across each of the Blueprints recursively.
:param args: Optional positional Parameters to be use middleware
:param kwargs: Optional Keyword arg to use with Middleware
:return: Partial function to apply the middleware
"""
kwargs["bp_group"] = True
def register_middleware_for_blueprints(fn):
for blueprint in self.blueprints:
blueprint.middleware(fn, *args, **kwargs)
return register_middleware_for_blueprints
|
[
"def",
"middleware",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"bp_group\"",
"]",
"=",
"True",
"def",
"register_middleware_for_blueprints",
"(",
"fn",
")",
":",
"for",
"blueprint",
"in",
"self",
".",
"blueprints",
":",
"blueprint",
".",
"middleware",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"register_middleware_for_blueprints"
] | 40.105263 | 20.947368 |
def store_file(self, filename, file_content, content_type):
"""
Store a small file in an Upload Area
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name the file will have in the Upload Area
:param str file_content: The contents of the file
:param str content_type: The MIME-type for the file
:return: information about the stored file (similar to that returned by files_info)
:rtype: dict
:raises UploadApiException: if file could not be stored
"""
return self.upload_service.api_client.store_file(area_uuid=self.uuid,
filename=filename,
file_content=file_content,
content_type=content_type)
|
[
"def",
"store_file",
"(",
"self",
",",
"filename",
",",
"file_content",
",",
"content_type",
")",
":",
"return",
"self",
".",
"upload_service",
".",
"api_client",
".",
"store_file",
"(",
"area_uuid",
"=",
"self",
".",
"uuid",
",",
"filename",
"=",
"filename",
",",
"file_content",
"=",
"file_content",
",",
"content_type",
"=",
"content_type",
")"
] | 51.764706 | 26.941176 |
def delete_saml_provider(name, region=None, key=None, keyid=None, profile=None):
'''
Delete SAML provider
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_saml_provider my_saml_provider_name
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
saml_provider_arn = get_saml_provider_arn(name, region=region, key=key, keyid=keyid, profile=profile)
if not saml_provider_arn:
log.info('SAML provider %s not found.', name)
return True
conn.delete_saml_provider(saml_provider_arn)
log.info('Successfully deleted SAML provider %s.', name)
return True
except boto.exception.BotoServerError as e:
aws = __utils__['boto.get_error'](e)
log.debug(aws)
log.error('Failed to delete SAML provider %s.', name)
return False
|
[
"def",
"delete_saml_provider",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"saml_provider_arn",
"=",
"get_saml_provider_arn",
"(",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"saml_provider_arn",
":",
"log",
".",
"info",
"(",
"'SAML provider %s not found.'",
",",
"name",
")",
"return",
"True",
"conn",
".",
"delete_saml_provider",
"(",
"saml_provider_arn",
")",
"log",
".",
"info",
"(",
"'Successfully deleted SAML provider %s.'",
",",
"name",
")",
"return",
"True",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"aws",
"=",
"__utils__",
"[",
"'boto.get_error'",
"]",
"(",
"e",
")",
"log",
".",
"debug",
"(",
"aws",
")",
"log",
".",
"error",
"(",
"'Failed to delete SAML provider %s.'",
",",
"name",
")",
"return",
"False"
] | 36 | 25.75 |
def list(gandi, state, id, limit, datacenter):
"""List virtual machines."""
options = {
'items_per_page': limit,
}
if state:
options['state'] = state
if datacenter:
options['datacenter_id'] = gandi.datacenter.usable_id(datacenter)
output_keys = ['hostname', 'state']
if id:
output_keys.append('id')
result = gandi.iaas.list(options)
for num, vm in enumerate(result):
if num:
gandi.separator_line()
output_vm(gandi, vm, [], output_keys)
return result
|
[
"def",
"list",
"(",
"gandi",
",",
"state",
",",
"id",
",",
"limit",
",",
"datacenter",
")",
":",
"options",
"=",
"{",
"'items_per_page'",
":",
"limit",
",",
"}",
"if",
"state",
":",
"options",
"[",
"'state'",
"]",
"=",
"state",
"if",
"datacenter",
":",
"options",
"[",
"'datacenter_id'",
"]",
"=",
"gandi",
".",
"datacenter",
".",
"usable_id",
"(",
"datacenter",
")",
"output_keys",
"=",
"[",
"'hostname'",
",",
"'state'",
"]",
"if",
"id",
":",
"output_keys",
".",
"append",
"(",
"'id'",
")",
"result",
"=",
"gandi",
".",
"iaas",
".",
"list",
"(",
"options",
")",
"for",
"num",
",",
"vm",
"in",
"enumerate",
"(",
"result",
")",
":",
"if",
"num",
":",
"gandi",
".",
"separator_line",
"(",
")",
"output_vm",
"(",
"gandi",
",",
"vm",
",",
"[",
"]",
",",
"output_keys",
")",
"return",
"result"
] | 25.333333 | 18.47619 |
def reduce_filename(f):
r'''
Expects something like /tmp/tmpAjry4Gdsbench/test.weights.e5.XXX.YYY.pb
Where XXX is a variation on the model size for example
And where YYY is a const related to the training dataset
'''
f = os.path.basename(f).split('.')
return keep_only_digits(f[-3])
|
[
"def",
"reduce_filename",
"(",
"f",
")",
":",
"f",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
".",
"split",
"(",
"'.'",
")",
"return",
"keep_only_digits",
"(",
"f",
"[",
"-",
"3",
"]",
")"
] | 33.666667 | 22.555556 |
def _processSegmentUpdates(self, activeColumns):
"""
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
:param activeColumns TODO: document
"""
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c, i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c, i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =", self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self._adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate, segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self._trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0)
|
[
"def",
"_processSegmentUpdates",
"(",
"self",
",",
"activeColumns",
")",
":",
"# The segmentUpdates dict has keys which are the column,cellIdx of the",
"# owner cell. The values are lists of segment updates for that cell",
"removeKeys",
"=",
"[",
"]",
"trimSegments",
"=",
"[",
"]",
"for",
"key",
",",
"updateList",
"in",
"self",
".",
"segmentUpdates",
".",
"iteritems",
"(",
")",
":",
"# Get the column number and cell index of the owner cell",
"c",
",",
"i",
"=",
"key",
"[",
"0",
"]",
",",
"key",
"[",
"1",
"]",
"# If the cell received bottom-up, update its segments",
"if",
"c",
"in",
"activeColumns",
":",
"action",
"=",
"'update'",
"# If not, either keep it around if it's still predicted, or remove it",
"else",
":",
"# If it is still predicted, and we are pooling, keep it around",
"if",
"self",
".",
"doPooling",
"and",
"self",
".",
"lrnPredictedState",
"[",
"'t'",
"]",
"[",
"c",
",",
"i",
"]",
"==",
"1",
":",
"action",
"=",
"'keep'",
"else",
":",
"action",
"=",
"'remove'",
"# Process each segment for this cell. Each segment entry contains",
"# [creationDate, SegmentInfo]",
"updateListKeep",
"=",
"[",
"]",
"if",
"action",
"!=",
"'remove'",
":",
"for",
"(",
"createDate",
",",
"segUpdate",
")",
"in",
"updateList",
":",
"if",
"self",
".",
"verbosity",
">=",
"4",
":",
"print",
"\"_nLrnIterations =\"",
",",
"self",
".",
"lrnIterationIdx",
",",
"print",
"segUpdate",
"# If this segment has expired. Ignore this update (and hence remove it",
"# from list)",
"if",
"self",
".",
"lrnIterationIdx",
"-",
"createDate",
">",
"self",
".",
"segUpdateValidDuration",
":",
"continue",
"if",
"action",
"==",
"'update'",
":",
"trimSegment",
"=",
"self",
".",
"_adaptSegment",
"(",
"segUpdate",
")",
"if",
"trimSegment",
":",
"trimSegments",
".",
"append",
"(",
"(",
"segUpdate",
".",
"columnIdx",
",",
"segUpdate",
".",
"cellIdx",
",",
"segUpdate",
".",
"segment",
")",
")",
"else",
":",
"# Keep segments that haven't expired yet (the cell is still being",
"# predicted)",
"updateListKeep",
".",
"append",
"(",
"(",
"createDate",
",",
"segUpdate",
")",
")",
"self",
".",
"segmentUpdates",
"[",
"key",
"]",
"=",
"updateListKeep",
"if",
"len",
"(",
"updateListKeep",
")",
"==",
"0",
":",
"removeKeys",
".",
"append",
"(",
"key",
")",
"# Clean out empty segment updates",
"for",
"key",
"in",
"removeKeys",
":",
"self",
".",
"segmentUpdates",
".",
"pop",
"(",
"key",
")",
"# Trim segments that had synapses go to 0",
"for",
"(",
"c",
",",
"i",
",",
"segment",
")",
"in",
"trimSegments",
":",
"self",
".",
"_trimSegmentsInCell",
"(",
"c",
",",
"i",
",",
"[",
"segment",
"]",
",",
"minPermanence",
"=",
"0.00001",
",",
"minNumSyns",
"=",
"0",
")"
] | 35.457143 | 21.542857 |
def dict_of_sets_add(dictionary, key, value):
# type: (DictUpperBound, Any, Any) -> None
"""Add value to a set in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to set in dictionary
Returns:
None
"""
set_objs = dictionary.get(key, set())
set_objs.add(value)
dictionary[key] = set_objs
|
[
"def",
"dict_of_sets_add",
"(",
"dictionary",
",",
"key",
",",
"value",
")",
":",
"# type: (DictUpperBound, Any, Any) -> None",
"set_objs",
"=",
"dictionary",
".",
"get",
"(",
"key",
",",
"set",
"(",
")",
")",
"set_objs",
".",
"add",
"(",
"value",
")",
"dictionary",
"[",
"key",
"]",
"=",
"set_objs"
] | 27.3125 | 18.125 |
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
|
[
"def",
"_query_iterator",
"(",
"self",
",",
"result",
",",
"chunksize",
",",
"columns",
",",
"coerce_float",
"=",
"True",
",",
"parse_dates",
"=",
"None",
")",
":",
"while",
"True",
":",
"data",
"=",
"result",
".",
"fetchmany",
"(",
"chunksize",
")",
"if",
"not",
"data",
":",
"break",
"else",
":",
"self",
".",
"frame",
"=",
"DataFrame",
".",
"from_records",
"(",
"data",
",",
"columns",
"=",
"columns",
",",
"coerce_float",
"=",
"coerce_float",
")",
"self",
".",
"_harmonize_columns",
"(",
"parse_dates",
"=",
"parse_dates",
")",
"if",
"self",
".",
"index",
"is",
"not",
"None",
":",
"self",
".",
"frame",
".",
"set_index",
"(",
"self",
".",
"index",
",",
"inplace",
"=",
"True",
")",
"yield",
"self",
".",
"frame"
] | 34.666667 | 21.111111 |
def get_vlan_from_query_reply(self, reply, vsiid, mac):
"""Parse the query reply from VDP daemon to get the VLAN value. """
hints_ret, fail_reason = self.check_hints(reply)
if not hints_ret:
LOG.error("Incorrect hints found %s", reply)
return constants.INVALID_VLAN, fail_reason
check_filter, fail_reason = self.check_filter_validity(reply, "filter")
if not check_filter:
return constants.INVALID_VLAN, fail_reason
try:
verify_flag, fail_reason = self.crosscheck_query_vsiid_mac(
reply, vsiid, mac)
if not verify_flag:
return constants.INVALID_VLAN, fail_reason
filter_val = reply.partition("filter")[2]
len_fil = len(filter_val)
vlan_val = filter_val[4:len_fil].split('-')[0]
vlan = int(vlan_val)
except ValueError:
fail_reason = vdp_const.format_failure_reason % (reply)
LOG.error("%s", fail_reason)
return constants.INVALID_VLAN, fail_reason
return vlan, None
|
[
"def",
"get_vlan_from_query_reply",
"(",
"self",
",",
"reply",
",",
"vsiid",
",",
"mac",
")",
":",
"hints_ret",
",",
"fail_reason",
"=",
"self",
".",
"check_hints",
"(",
"reply",
")",
"if",
"not",
"hints_ret",
":",
"LOG",
".",
"error",
"(",
"\"Incorrect hints found %s\"",
",",
"reply",
")",
"return",
"constants",
".",
"INVALID_VLAN",
",",
"fail_reason",
"check_filter",
",",
"fail_reason",
"=",
"self",
".",
"check_filter_validity",
"(",
"reply",
",",
"\"filter\"",
")",
"if",
"not",
"check_filter",
":",
"return",
"constants",
".",
"INVALID_VLAN",
",",
"fail_reason",
"try",
":",
"verify_flag",
",",
"fail_reason",
"=",
"self",
".",
"crosscheck_query_vsiid_mac",
"(",
"reply",
",",
"vsiid",
",",
"mac",
")",
"if",
"not",
"verify_flag",
":",
"return",
"constants",
".",
"INVALID_VLAN",
",",
"fail_reason",
"filter_val",
"=",
"reply",
".",
"partition",
"(",
"\"filter\"",
")",
"[",
"2",
"]",
"len_fil",
"=",
"len",
"(",
"filter_val",
")",
"vlan_val",
"=",
"filter_val",
"[",
"4",
":",
"len_fil",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"vlan",
"=",
"int",
"(",
"vlan_val",
")",
"except",
"ValueError",
":",
"fail_reason",
"=",
"vdp_const",
".",
"format_failure_reason",
"%",
"(",
"reply",
")",
"LOG",
".",
"error",
"(",
"\"%s\"",
",",
"fail_reason",
")",
"return",
"constants",
".",
"INVALID_VLAN",
",",
"fail_reason",
"return",
"vlan",
",",
"None"
] | 46.956522 | 15 |
def to_lal_type_str(pytype):
"""Convert the input python type to a LAL type string
Examples
--------
To convert a python type:
>>> from gwpy.utils.lal import to_lal_type_str
>>> to_lal_type_str(float)
'REAL8'
To convert a `numpy.dtype`:
>>> import numpy
>>> to_lal_type_str(numpy.dtype('uint32'))
'UINT4'
To convert a LAL type code:
>>> to_lal_type_str(11)
'REAL8'
Raises
------
KeyError
if the input doesn't map to a LAL type string
"""
# noop
if pytype in LAL_TYPE_FROM_STR:
return pytype
# convert type code
if pytype in LAL_TYPE_STR:
return LAL_TYPE_STR[pytype]
# convert python type
try:
dtype = numpy.dtype(pytype)
return LAL_TYPE_STR_FROM_NUMPY[dtype.type]
except (TypeError, KeyError):
raise ValueError("Failed to map {!r} to LAL type string")
|
[
"def",
"to_lal_type_str",
"(",
"pytype",
")",
":",
"# noop",
"if",
"pytype",
"in",
"LAL_TYPE_FROM_STR",
":",
"return",
"pytype",
"# convert type code",
"if",
"pytype",
"in",
"LAL_TYPE_STR",
":",
"return",
"LAL_TYPE_STR",
"[",
"pytype",
"]",
"# convert python type",
"try",
":",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"pytype",
")",
"return",
"LAL_TYPE_STR_FROM_NUMPY",
"[",
"dtype",
".",
"type",
"]",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"raise",
"ValueError",
"(",
"\"Failed to map {!r} to LAL type string\"",
")"
] | 21.170732 | 21.560976 |
def irecarray_to_py(a):
"""Slow conversion of a recarray into a list of records with python types.
Get the field names from :attr:`a.dtype.names`.
:Returns: iterator so that one can handle big input arrays
"""
pytypes = [pyify(typestr) for name,typestr in a.dtype.descr]
def convert_record(r):
return tuple([converter(value) for converter, value in zip(pytypes,r)])
return (convert_record(r) for r in a)
|
[
"def",
"irecarray_to_py",
"(",
"a",
")",
":",
"pytypes",
"=",
"[",
"pyify",
"(",
"typestr",
")",
"for",
"name",
",",
"typestr",
"in",
"a",
".",
"dtype",
".",
"descr",
"]",
"def",
"convert_record",
"(",
"r",
")",
":",
"return",
"tuple",
"(",
"[",
"converter",
"(",
"value",
")",
"for",
"converter",
",",
"value",
"in",
"zip",
"(",
"pytypes",
",",
"r",
")",
"]",
")",
"return",
"(",
"convert_record",
"(",
"r",
")",
"for",
"r",
"in",
"a",
")"
] | 39.181818 | 18.909091 |
def set_params(self, params):
"""Sets multiple GO-PCA Server parameters using a dictionary.
Parameters
----------
params: dict
Dictionary containing the parameter values.
Returns
-------
None
"""
for k,v in params.iteritems():
self.set_param(k,v)
|
[
"def",
"set_params",
"(",
"self",
",",
"params",
")",
":",
"for",
"k",
",",
"v",
"in",
"params",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"set_param",
"(",
"k",
",",
"v",
")"
] | 23.642857 | 18.5 |
def _rightleft(self, direction):
"""Provides curses horizontal padding"""
if direction == "left" and self.padding != 0:
self.padding -= 1
if direction == "right" and \
self.screen.getmaxyx()[1] + self.padding < self.max_width:
self.padding += 1
|
[
"def",
"_rightleft",
"(",
"self",
",",
"direction",
")",
":",
"if",
"direction",
"==",
"\"left\"",
"and",
"self",
".",
"padding",
"!=",
"0",
":",
"self",
".",
"padding",
"-=",
"1",
"if",
"direction",
"==",
"\"right\"",
"and",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"[",
"1",
"]",
"+",
"self",
".",
"padding",
"<",
"self",
".",
"max_width",
":",
"self",
".",
"padding",
"+=",
"1"
] | 33.555556 | 17.777778 |
def find_revision_number(self, revision=None):
"""Find the local revision number of the given revision."""
# Make sure the local repository exists.
self.create()
# Try to find the revision number of the specified revision.
revision = revision or self.default_revision
output = self.context.capture('hg', 'id', '--rev=%s' % revision, '--num').rstrip('+')
# Validate the `hg id --num' output.
if not output.isdigit():
msg = "Failed to find local revision number! ('hg id --num' gave unexpected output)"
raise EnvironmentError(msg)
return int(output)
|
[
"def",
"find_revision_number",
"(",
"self",
",",
"revision",
"=",
"None",
")",
":",
"# Make sure the local repository exists.",
"self",
".",
"create",
"(",
")",
"# Try to find the revision number of the specified revision.",
"revision",
"=",
"revision",
"or",
"self",
".",
"default_revision",
"output",
"=",
"self",
".",
"context",
".",
"capture",
"(",
"'hg'",
",",
"'id'",
",",
"'--rev=%s'",
"%",
"revision",
",",
"'--num'",
")",
".",
"rstrip",
"(",
"'+'",
")",
"# Validate the `hg id --num' output.",
"if",
"not",
"output",
".",
"isdigit",
"(",
")",
":",
"msg",
"=",
"\"Failed to find local revision number! ('hg id --num' gave unexpected output)\"",
"raise",
"EnvironmentError",
"(",
"msg",
")",
"return",
"int",
"(",
"output",
")"
] | 52.666667 | 17.416667 |
def __annotate_sequence_with_quality(seq, q_line_parts):
"""Extract meta data from pre-tokenized maf q-line and populate sequence.
q -- quality information about an aligned base in a species. Two fields after
the 'q': the source name and a single digit for each nucleotide in its
sequence (0-9 or F, or - to indicate a gap).
"""
if q_line_parts[1] != seq.name:
raise MAFError("trying to populate meta data for sequence " + seq.name +
" with q-line information for " +
str(q_line_parts[1]) + "; maflormed MAF file?")
if len(q_line_parts[2]) != len(seq):
raise MAFError("trying to populate quality meta data for sequence with " +
"length " + str(len(seq)) + " using quality line with " +
"length " + str(len(q_line_parts[2])) + "; malformed " +
"MAF file?")
seq.meta_data[QUALITY_META_KEY] = q_line_parts[2]
|
[
"def",
"__annotate_sequence_with_quality",
"(",
"seq",
",",
"q_line_parts",
")",
":",
"if",
"q_line_parts",
"[",
"1",
"]",
"!=",
"seq",
".",
"name",
":",
"raise",
"MAFError",
"(",
"\"trying to populate meta data for sequence \"",
"+",
"seq",
".",
"name",
"+",
"\" with q-line information for \"",
"+",
"str",
"(",
"q_line_parts",
"[",
"1",
"]",
")",
"+",
"\"; maflormed MAF file?\"",
")",
"if",
"len",
"(",
"q_line_parts",
"[",
"2",
"]",
")",
"!=",
"len",
"(",
"seq",
")",
":",
"raise",
"MAFError",
"(",
"\"trying to populate quality meta data for sequence with \"",
"+",
"\"length \"",
"+",
"str",
"(",
"len",
"(",
"seq",
")",
")",
"+",
"\" using quality line with \"",
"+",
"\"length \"",
"+",
"str",
"(",
"len",
"(",
"q_line_parts",
"[",
"2",
"]",
")",
")",
"+",
"\"; malformed \"",
"+",
"\"MAF file?\"",
")",
"seq",
".",
"meta_data",
"[",
"QUALITY_META_KEY",
"]",
"=",
"q_line_parts",
"[",
"2",
"]"
] | 54.058824 | 20.882353 |
def doFeedback(self, item_id, use_comment_template, buyer_id, comment, comment_type, op):
"""http://allegro.pl/webapi/documentation.php/show/id,42"""
return self.__ask__('doFeedback',
feItemId=item_id,
feUseCommentTemplate=use_comment_template,
feToUserId=buyer_id,
feComment=comment,
feCommentType=comment_type,
feOp=op)['feedbackId']
|
[
"def",
"doFeedback",
"(",
"self",
",",
"item_id",
",",
"use_comment_template",
",",
"buyer_id",
",",
"comment",
",",
"comment_type",
",",
"op",
")",
":",
"return",
"self",
".",
"__ask__",
"(",
"'doFeedback'",
",",
"feItemId",
"=",
"item_id",
",",
"feUseCommentTemplate",
"=",
"use_comment_template",
",",
"feToUserId",
"=",
"buyer_id",
",",
"feComment",
"=",
"comment",
",",
"feCommentType",
"=",
"comment_type",
",",
"feOp",
"=",
"op",
")",
"[",
"'feedbackId'",
"]"
] | 56.777778 | 13.777778 |
def _construct_module(info, target):
"""Build a module from templates and user supplied information"""
for path in paths:
real_path = os.path.abspath(os.path.join(target, path.format(**info)))
log("Making directory '%s'" % real_path)
os.makedirs(real_path)
# pprint(info)
for item in templates.values():
source = os.path.join('dev/templates', item[0])
filename = os.path.abspath(
os.path.join(target, item[1].format(**info)))
log("Creating file from template '%s'" % filename,
emitter='MANAGE')
write_template_file(source, filename, info)
|
[
"def",
"_construct_module",
"(",
"info",
",",
"target",
")",
":",
"for",
"path",
"in",
"paths",
":",
"real_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"path",
".",
"format",
"(",
"*",
"*",
"info",
")",
")",
")",
"log",
"(",
"\"Making directory '%s'\"",
"%",
"real_path",
")",
"os",
".",
"makedirs",
"(",
"real_path",
")",
"# pprint(info)",
"for",
"item",
"in",
"templates",
".",
"values",
"(",
")",
":",
"source",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'dev/templates'",
",",
"item",
"[",
"0",
"]",
")",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"item",
"[",
"1",
"]",
".",
"format",
"(",
"*",
"*",
"info",
")",
")",
")",
"log",
"(",
"\"Creating file from template '%s'\"",
"%",
"filename",
",",
"emitter",
"=",
"'MANAGE'",
")",
"write_template_file",
"(",
"source",
",",
"filename",
",",
"info",
")"
] | 38.8125 | 16.375 |
def match_tweet(self, tweet, user_stream):
"""
Check if a tweet matches the defined criteria
:param tweet: The tweet in question
:type tweet: :class:`~responsebot.models.Tweet`
:return: True if matched, False otherwise
"""
if user_stream:
if len(self.track) > 0:
return self.is_tweet_match_track(tweet)
return True
return self.is_tweet_match_track(tweet) or self.is_tweet_match_follow(tweet)
|
[
"def",
"match_tweet",
"(",
"self",
",",
"tweet",
",",
"user_stream",
")",
":",
"if",
"user_stream",
":",
"if",
"len",
"(",
"self",
".",
"track",
")",
">",
"0",
":",
"return",
"self",
".",
"is_tweet_match_track",
"(",
"tweet",
")",
"return",
"True",
"return",
"self",
".",
"is_tweet_match_track",
"(",
"tweet",
")",
"or",
"self",
".",
"is_tweet_match_follow",
"(",
"tweet",
")"
] | 32.266667 | 17.333333 |
def _get_alpha(self, C, vs30, pga_rock):
"""
Returns the alpha, the linearised functional relationship between the
site amplification and the PGA on rock. Equation 31.
"""
alpha = np.zeros(len(pga_rock))
idx = vs30 < C["k1"]
if np.any(idx):
af1 = pga_rock[idx] +\
self.CONSTS["c"] * ((vs30[idx] / C["k1"]) ** self.CONSTS["n"])
af2 = pga_rock[idx] + self.CONSTS["c"]
alpha[idx] = C["k2"] * pga_rock[idx] * ((1.0 / af1) - (1.0 / af2))
return alpha
|
[
"def",
"_get_alpha",
"(",
"self",
",",
"C",
",",
"vs30",
",",
"pga_rock",
")",
":",
"alpha",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"pga_rock",
")",
")",
"idx",
"=",
"vs30",
"<",
"C",
"[",
"\"k1\"",
"]",
"if",
"np",
".",
"any",
"(",
"idx",
")",
":",
"af1",
"=",
"pga_rock",
"[",
"idx",
"]",
"+",
"self",
".",
"CONSTS",
"[",
"\"c\"",
"]",
"*",
"(",
"(",
"vs30",
"[",
"idx",
"]",
"/",
"C",
"[",
"\"k1\"",
"]",
")",
"**",
"self",
".",
"CONSTS",
"[",
"\"n\"",
"]",
")",
"af2",
"=",
"pga_rock",
"[",
"idx",
"]",
"+",
"self",
".",
"CONSTS",
"[",
"\"c\"",
"]",
"alpha",
"[",
"idx",
"]",
"=",
"C",
"[",
"\"k2\"",
"]",
"*",
"pga_rock",
"[",
"idx",
"]",
"*",
"(",
"(",
"1.0",
"/",
"af1",
")",
"-",
"(",
"1.0",
"/",
"af2",
")",
")",
"return",
"alpha"
] | 42.230769 | 15.307692 |
def google_maps_geoloc_link(data):
"""
Get a link to google maps pointing on this IP's geolocation.
Args:
data (str/tuple): IP address or (latitude, longitude).
Returns:
str: a link to google maps pointing on this IP's geolocation.
"""
if isinstance(data, str):
lat_lon = ip_geoloc(data)
if lat_lon is None:
return ''
lat, lon = lat_lon
else:
lat, lon = data
loc = '%s,%s' % (lat, lon)
return 'https://www.google.com/maps/place/@%s,17z/' \
'data=!3m1!4b1!4m5!3m4!1s0x0:0x0!8m2!3d%s!4d%s' % (
loc, lat, lon)
|
[
"def",
"google_maps_geoloc_link",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"lat_lon",
"=",
"ip_geoloc",
"(",
"data",
")",
"if",
"lat_lon",
"is",
"None",
":",
"return",
"''",
"lat",
",",
"lon",
"=",
"lat_lon",
"else",
":",
"lat",
",",
"lon",
"=",
"data",
"loc",
"=",
"'%s,%s'",
"%",
"(",
"lat",
",",
"lon",
")",
"return",
"'https://www.google.com/maps/place/@%s,17z/'",
"'data=!3m1!4b1!4m5!3m4!1s0x0:0x0!8m2!3d%s!4d%s'",
"%",
"(",
"loc",
",",
"lat",
",",
"lon",
")"
] | 28.904762 | 18.809524 |
def report_view(self, request, key, period):
"""
Processes the reporting action.
"""
if not self.has_change_permission(request, None):
raise PermissionDenied
reporters = self.get_reporters()
try:
reporter = reporters[key]
except KeyError:
return self.render_report_error(request, _('Report not found'), 404)
allowed_periods = [k for (k, v) in self.get_period_options()]
if period == 'A':
period = ''
if period and period not in allowed_periods:
return self.render_report_error(request, _('Invalid report type'), 400)
try:
return reporter.process(request, self.get_period_queryset(request, period), period)
except:
logger.exception('Tracking Reports could not generate the report due to an internal error')
return self.render_report_error(request, _('An unexpected error has occurred'), 500)
|
[
"def",
"report_view",
"(",
"self",
",",
"request",
",",
"key",
",",
"period",
")",
":",
"if",
"not",
"self",
".",
"has_change_permission",
"(",
"request",
",",
"None",
")",
":",
"raise",
"PermissionDenied",
"reporters",
"=",
"self",
".",
"get_reporters",
"(",
")",
"try",
":",
"reporter",
"=",
"reporters",
"[",
"key",
"]",
"except",
"KeyError",
":",
"return",
"self",
".",
"render_report_error",
"(",
"request",
",",
"_",
"(",
"'Report not found'",
")",
",",
"404",
")",
"allowed_periods",
"=",
"[",
"k",
"for",
"(",
"k",
",",
"v",
")",
"in",
"self",
".",
"get_period_options",
"(",
")",
"]",
"if",
"period",
"==",
"'A'",
":",
"period",
"=",
"''",
"if",
"period",
"and",
"period",
"not",
"in",
"allowed_periods",
":",
"return",
"self",
".",
"render_report_error",
"(",
"request",
",",
"_",
"(",
"'Invalid report type'",
")",
",",
"400",
")",
"try",
":",
"return",
"reporter",
".",
"process",
"(",
"request",
",",
"self",
".",
"get_period_queryset",
"(",
"request",
",",
"period",
")",
",",
"period",
")",
"except",
":",
"logger",
".",
"exception",
"(",
"'Tracking Reports could not generate the report due to an internal error'",
")",
"return",
"self",
".",
"render_report_error",
"(",
"request",
",",
"_",
"(",
"'An unexpected error has occurred'",
")",
",",
"500",
")"
] | 37 | 25.307692 |
def score(self, query, normalized=True, synonimizer=None, return_suffix_scores=False):
"""
Matches the string against the GAST using
the algorithm described in [Chernyak, sections 1.3 & 1.4].
Expects the input string to consist of
alphabet letters only (no whitespaces etc.)
Returns the score (a float in [0, 1]).
query -- Unicode
"""
query = query.replace(" ", "")
result = 0
suffix_scores = {}
# For each suffix of the string:
for suffix_start in xrange(len(query)):
suffix = query[suffix_start:]
suffix_score = 0
suffix_result = 0
matched_chars = 0
nodes_matched = 0
child_node = self.root.chose_arc(suffix)
while child_node:
nodes_matched += 1
(str_ind, substr_start, substr_end) = child_node.arc()
match = utils.match_strings(
suffix, self.strings_collection[str_ind][substr_start:substr_end])
suffix_score += child_node.conditional_probability()
matched_chars += match
suffix = suffix[match:]
if suffix and match == substr_end - substr_start:
child_node = child_node.chose_arc(suffix)
else:
break
if matched_chars:
suffix_result = (suffix_score + matched_chars - nodes_matched)
if normalized:
suffix_result /= matched_chars
result += suffix_result
suffix_scores[query[suffix_start:]] = suffix_result
result /= len(query)
if return_suffix_scores:
result = result, suffix_scores
return result
|
[
"def",
"score",
"(",
"self",
",",
"query",
",",
"normalized",
"=",
"True",
",",
"synonimizer",
"=",
"None",
",",
"return_suffix_scores",
"=",
"False",
")",
":",
"query",
"=",
"query",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
"result",
"=",
"0",
"suffix_scores",
"=",
"{",
"}",
"# For each suffix of the string:",
"for",
"suffix_start",
"in",
"xrange",
"(",
"len",
"(",
"query",
")",
")",
":",
"suffix",
"=",
"query",
"[",
"suffix_start",
":",
"]",
"suffix_score",
"=",
"0",
"suffix_result",
"=",
"0",
"matched_chars",
"=",
"0",
"nodes_matched",
"=",
"0",
"child_node",
"=",
"self",
".",
"root",
".",
"chose_arc",
"(",
"suffix",
")",
"while",
"child_node",
":",
"nodes_matched",
"+=",
"1",
"(",
"str_ind",
",",
"substr_start",
",",
"substr_end",
")",
"=",
"child_node",
".",
"arc",
"(",
")",
"match",
"=",
"utils",
".",
"match_strings",
"(",
"suffix",
",",
"self",
".",
"strings_collection",
"[",
"str_ind",
"]",
"[",
"substr_start",
":",
"substr_end",
"]",
")",
"suffix_score",
"+=",
"child_node",
".",
"conditional_probability",
"(",
")",
"matched_chars",
"+=",
"match",
"suffix",
"=",
"suffix",
"[",
"match",
":",
"]",
"if",
"suffix",
"and",
"match",
"==",
"substr_end",
"-",
"substr_start",
":",
"child_node",
"=",
"child_node",
".",
"chose_arc",
"(",
"suffix",
")",
"else",
":",
"break",
"if",
"matched_chars",
":",
"suffix_result",
"=",
"(",
"suffix_score",
"+",
"matched_chars",
"-",
"nodes_matched",
")",
"if",
"normalized",
":",
"suffix_result",
"/=",
"matched_chars",
"result",
"+=",
"suffix_result",
"suffix_scores",
"[",
"query",
"[",
"suffix_start",
":",
"]",
"]",
"=",
"suffix_result",
"result",
"/=",
"len",
"(",
"query",
")",
"if",
"return_suffix_scores",
":",
"result",
"=",
"result",
",",
"suffix_scores",
"return",
"result"
] | 34.090909 | 17.909091 |
def delete(self, path, data={}):
'''Perform DELETE Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.delete(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.delete, path, data)
|
[
"def",
"delete",
"(",
"self",
",",
"path",
",",
"data",
"=",
"{",
"}",
")",
":",
"if",
"len",
"(",
"data",
")",
"!=",
"0",
":",
"parameter_string",
"=",
"''",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"parameter_string",
"+=",
"'{}={}'",
".",
"format",
"(",
"k",
",",
"v",
")",
"parameter_string",
"+=",
"'&'",
"path",
"+=",
"'?'",
"+",
"parameter_string",
"response",
"=",
"requests",
".",
"delete",
"(",
"API_URL",
"+",
"path",
",",
"headers",
"=",
"self",
".",
"_set_headers",
"(",
")",
")",
"return",
"self",
".",
"_check_response",
"(",
"response",
",",
"self",
".",
"delete",
",",
"path",
",",
"data",
")"
] | 40.727273 | 14.909091 |
def _find_matcher(self, alias):
""" Finds a matcher based on the given alias or raises an error if no
matcher could be found.
"""
matcher = lookup(alias)
if not matcher:
msg = 'Matcher "%s" not found' % alias
# Try to find similarly named matchers to help the user
similar = suggest(alias, max=3, cutoff=0.5)
if len(similar) > 1:
last = similar.pop()
msg += '. Perhaps you meant to use %s or %s?' % (', '.join(similar), last)
elif len(similar) > 0:
msg += '. Perhaps you meant to use %s?' % similar.pop()
raise KeyError(msg)
return matcher
|
[
"def",
"_find_matcher",
"(",
"self",
",",
"alias",
")",
":",
"matcher",
"=",
"lookup",
"(",
"alias",
")",
"if",
"not",
"matcher",
":",
"msg",
"=",
"'Matcher \"%s\" not found'",
"%",
"alias",
"# Try to find similarly named matchers to help the user",
"similar",
"=",
"suggest",
"(",
"alias",
",",
"max",
"=",
"3",
",",
"cutoff",
"=",
"0.5",
")",
"if",
"len",
"(",
"similar",
")",
">",
"1",
":",
"last",
"=",
"similar",
".",
"pop",
"(",
")",
"msg",
"+=",
"'. Perhaps you meant to use %s or %s?'",
"%",
"(",
"', '",
".",
"join",
"(",
"similar",
")",
",",
"last",
")",
"elif",
"len",
"(",
"similar",
")",
">",
"0",
":",
"msg",
"+=",
"'. Perhaps you meant to use %s?'",
"%",
"similar",
".",
"pop",
"(",
")",
"raise",
"KeyError",
"(",
"msg",
")",
"return",
"matcher"
] | 36.631579 | 17.789474 |
def detect(self, G):
"""Detect a single core-periphery pair.
Parameters
----------
G : NetworkX graph object
Examples
--------
>>> import networkx as nx
>>> import cpalgorithm as cpa
>>> G = nx.karate_club_graph() # load the karate club network.
>>> lrc = cp.LowRankCore()
>>> lrc.detect(G)
"""
self.c_, self.x_ = self._low_rank_core(G)
self.Q_ = self._score(G, self.c_, self.x_)
self.qs_ = self.Q_
|
[
"def",
"detect",
"(",
"self",
",",
"G",
")",
":",
"self",
".",
"c_",
",",
"self",
".",
"x_",
"=",
"self",
".",
"_low_rank_core",
"(",
"G",
")",
"self",
".",
"Q_",
"=",
"self",
".",
"_score",
"(",
"G",
",",
"self",
".",
"c_",
",",
"self",
".",
"x_",
")",
"self",
".",
"qs_",
"=",
"self",
".",
"Q_"
] | 20.142857 | 21.47619 |
def makeLys(segID, N, CA, C, O, geo):
'''Creates a Lysine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle=geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_CD_length=geo.CG_CD_length
CB_CG_CD_angle=geo.CB_CG_CD_angle
CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle
CD_CE_length=geo.CD_CE_length
CG_CD_CE_angle=geo.CG_CD_CE_angle
CB_CG_CD_CE_diangle=geo.CB_CG_CD_CE_diangle
CE_NZ_length=geo.CE_NZ_length
CD_CE_NZ_angle=geo.CD_CE_NZ_angle
CG_CD_CE_NZ_diangle=geo.CG_CD_CE_NZ_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)
CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C")
carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle)
CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C")
carbon_e= calculateCoordinates(CB, CG, CD, CD_CE_length, CG_CD_CE_angle, CB_CG_CD_CE_diangle)
CE= Atom("CE", carbon_e, 0.0, 1.0, " ", " CE", 0, "C")
nitrogen_z= calculateCoordinates(CG, CD, CE, CE_NZ_length, CD_CE_NZ_angle, CG_CD_CE_NZ_diangle)
NZ= Atom("NZ", nitrogen_z, 0.0, 1.0, " ", " NZ", 0, "N")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "LYS", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD)
res.add(CE)
res.add(NZ)
return res
|
[
"def",
"makeLys",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
":",
"##R-Group",
"CA_CB_length",
"=",
"geo",
".",
"CA_CB_length",
"C_CA_CB_angle",
"=",
"geo",
".",
"C_CA_CB_angle",
"N_C_CA_CB_diangle",
"=",
"geo",
".",
"N_C_CA_CB_diangle",
"CB_CG_length",
"=",
"geo",
".",
"CB_CG_length",
"CA_CB_CG_angle",
"=",
"geo",
".",
"CA_CB_CG_angle",
"N_CA_CB_CG_diangle",
"=",
"geo",
".",
"N_CA_CB_CG_diangle",
"CG_CD_length",
"=",
"geo",
".",
"CG_CD_length",
"CB_CG_CD_angle",
"=",
"geo",
".",
"CB_CG_CD_angle",
"CA_CB_CG_CD_diangle",
"=",
"geo",
".",
"CA_CB_CG_CD_diangle",
"CD_CE_length",
"=",
"geo",
".",
"CD_CE_length",
"CG_CD_CE_angle",
"=",
"geo",
".",
"CG_CD_CE_angle",
"CB_CG_CD_CE_diangle",
"=",
"geo",
".",
"CB_CG_CD_CE_diangle",
"CE_NZ_length",
"=",
"geo",
".",
"CE_NZ_length",
"CD_CE_NZ_angle",
"=",
"geo",
".",
"CD_CE_NZ_angle",
"CG_CD_CE_NZ_diangle",
"=",
"geo",
".",
"CG_CD_CE_NZ_diangle",
"carbon_b",
"=",
"calculateCoordinates",
"(",
"N",
",",
"C",
",",
"CA",
",",
"CA_CB_length",
",",
"C_CA_CB_angle",
",",
"N_C_CA_CB_diangle",
")",
"CB",
"=",
"Atom",
"(",
"\"CB\"",
",",
"carbon_b",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" CB\"",
",",
"0",
",",
"\"C\"",
")",
"carbon_g",
"=",
"calculateCoordinates",
"(",
"N",
",",
"CA",
",",
"CB",
",",
"CB_CG_length",
",",
"CA_CB_CG_angle",
",",
"N_CA_CB_CG_diangle",
")",
"CG",
"=",
"Atom",
"(",
"\"CG\"",
",",
"carbon_g",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" CG\"",
",",
"0",
",",
"\"C\"",
")",
"carbon_d",
"=",
"calculateCoordinates",
"(",
"CA",
",",
"CB",
",",
"CG",
",",
"CG_CD_length",
",",
"CB_CG_CD_angle",
",",
"CA_CB_CG_CD_diangle",
")",
"CD",
"=",
"Atom",
"(",
"\"CD\"",
",",
"carbon_d",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" CD\"",
",",
"0",
",",
"\"C\"",
")",
"carbon_e",
"=",
"calculateCoordinates",
"(",
"CB",
",",
"CG",
",",
"CD",
",",
"CD_CE_length",
",",
"CG_CD_CE_angle",
",",
"CB_CG_CD_CE_diangle",
")",
"CE",
"=",
"Atom",
"(",
"\"CE\"",
",",
"carbon_e",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" CE\"",
",",
"0",
",",
"\"C\"",
")",
"nitrogen_z",
"=",
"calculateCoordinates",
"(",
"CG",
",",
"CD",
",",
"CE",
",",
"CE_NZ_length",
",",
"CD_CE_NZ_angle",
",",
"CG_CD_CE_NZ_diangle",
")",
"NZ",
"=",
"Atom",
"(",
"\"NZ\"",
",",
"nitrogen_z",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" NZ\"",
",",
"0",
",",
"\"N\"",
")",
"##Create Residue Data Structure",
"res",
"=",
"Residue",
"(",
"(",
"' '",
",",
"segID",
",",
"' '",
")",
",",
"\"LYS\"",
",",
"' '",
")",
"res",
".",
"add",
"(",
"N",
")",
"res",
".",
"add",
"(",
"CA",
")",
"res",
".",
"add",
"(",
"C",
")",
"res",
".",
"add",
"(",
"O",
")",
"res",
".",
"add",
"(",
"CB",
")",
"res",
".",
"add",
"(",
"CG",
")",
"res",
".",
"add",
"(",
"CD",
")",
"res",
".",
"add",
"(",
"CE",
")",
"res",
".",
"add",
"(",
"NZ",
")",
"return",
"res"
] | 36.23913 | 21.586957 |
def move_item_into_viewport(self, item):
"""Causes the `item` to be moved into the viewport
The zoom factor and the position of the viewport are updated to move the `item` into the viewport. If `item`
is not a `StateView`, the parental `StateView` is moved into the viewport.
:param StateView | ConnectionView | PortView item: The item to be moved into the viewport
"""
if not item:
return
HORIZONTAL = 0
VERTICAL = 1
if not isinstance(item, Item):
state_v = item.parent
elif not isinstance(item, StateView):
state_v = self.canvas.get_parent(item)
else:
state_v = item
viewport_size = self.view.editor.get_allocation().width, self.view.editor.get_allocation().height
state_size = self.view.editor.get_matrix_i2v(state_v).transform_distance(state_v.width, state_v.height)
min_relative_size = min(viewport_size[i] / state_size[i] for i in [HORIZONTAL, VERTICAL])
if min_relative_size != 1:
# Allow margin around state
margin_relative = 1. / gui_constants.BORDER_WIDTH_STATE_SIZE_FACTOR
zoom_factor = min_relative_size * (1 - margin_relative)
if zoom_factor > 1:
zoom_base = 4
zoom_factor = max(1, math.log(zoom_factor*zoom_base, zoom_base))
self.view.editor.zoom(zoom_factor)
# The zoom operation must be performed before the pan operation to work on updated GtkAdjustments (scroll
# bars)
self.canvas.wait_for_update()
state_pos = self.view.editor.get_matrix_i2v(state_v).transform_point(0, 0)
state_size = self.view.editor.get_matrix_i2v(state_v).transform_distance(state_v.width, state_v.height)
viewport_size = self.view.editor.get_allocation().width, self.view.editor.get_allocation().height
# Calculate offset around state so that the state is centered in the viewport
padding_offset_horizontal = (viewport_size[HORIZONTAL] - state_size[HORIZONTAL]) / 2.
padding_offset_vertical = (viewport_size[VERTICAL] - state_size[VERTICAL]) / 2.
self.view.editor.hadjustment.set_value(state_pos[HORIZONTAL] - padding_offset_horizontal)
self.view.editor.vadjustment.set_value(state_pos[VERTICAL] - padding_offset_vertical)
|
[
"def",
"move_item_into_viewport",
"(",
"self",
",",
"item",
")",
":",
"if",
"not",
"item",
":",
"return",
"HORIZONTAL",
"=",
"0",
"VERTICAL",
"=",
"1",
"if",
"not",
"isinstance",
"(",
"item",
",",
"Item",
")",
":",
"state_v",
"=",
"item",
".",
"parent",
"elif",
"not",
"isinstance",
"(",
"item",
",",
"StateView",
")",
":",
"state_v",
"=",
"self",
".",
"canvas",
".",
"get_parent",
"(",
"item",
")",
"else",
":",
"state_v",
"=",
"item",
"viewport_size",
"=",
"self",
".",
"view",
".",
"editor",
".",
"get_allocation",
"(",
")",
".",
"width",
",",
"self",
".",
"view",
".",
"editor",
".",
"get_allocation",
"(",
")",
".",
"height",
"state_size",
"=",
"self",
".",
"view",
".",
"editor",
".",
"get_matrix_i2v",
"(",
"state_v",
")",
".",
"transform_distance",
"(",
"state_v",
".",
"width",
",",
"state_v",
".",
"height",
")",
"min_relative_size",
"=",
"min",
"(",
"viewport_size",
"[",
"i",
"]",
"/",
"state_size",
"[",
"i",
"]",
"for",
"i",
"in",
"[",
"HORIZONTAL",
",",
"VERTICAL",
"]",
")",
"if",
"min_relative_size",
"!=",
"1",
":",
"# Allow margin around state",
"margin_relative",
"=",
"1.",
"/",
"gui_constants",
".",
"BORDER_WIDTH_STATE_SIZE_FACTOR",
"zoom_factor",
"=",
"min_relative_size",
"*",
"(",
"1",
"-",
"margin_relative",
")",
"if",
"zoom_factor",
">",
"1",
":",
"zoom_base",
"=",
"4",
"zoom_factor",
"=",
"max",
"(",
"1",
",",
"math",
".",
"log",
"(",
"zoom_factor",
"*",
"zoom_base",
",",
"zoom_base",
")",
")",
"self",
".",
"view",
".",
"editor",
".",
"zoom",
"(",
"zoom_factor",
")",
"# The zoom operation must be performed before the pan operation to work on updated GtkAdjustments (scroll",
"# bars)",
"self",
".",
"canvas",
".",
"wait_for_update",
"(",
")",
"state_pos",
"=",
"self",
".",
"view",
".",
"editor",
".",
"get_matrix_i2v",
"(",
"state_v",
")",
".",
"transform_point",
"(",
"0",
",",
"0",
")",
"state_size",
"=",
"self",
".",
"view",
".",
"editor",
".",
"get_matrix_i2v",
"(",
"state_v",
")",
".",
"transform_distance",
"(",
"state_v",
".",
"width",
",",
"state_v",
".",
"height",
")",
"viewport_size",
"=",
"self",
".",
"view",
".",
"editor",
".",
"get_allocation",
"(",
")",
".",
"width",
",",
"self",
".",
"view",
".",
"editor",
".",
"get_allocation",
"(",
")",
".",
"height",
"# Calculate offset around state so that the state is centered in the viewport",
"padding_offset_horizontal",
"=",
"(",
"viewport_size",
"[",
"HORIZONTAL",
"]",
"-",
"state_size",
"[",
"HORIZONTAL",
"]",
")",
"/",
"2.",
"padding_offset_vertical",
"=",
"(",
"viewport_size",
"[",
"VERTICAL",
"]",
"-",
"state_size",
"[",
"VERTICAL",
"]",
")",
"/",
"2.",
"self",
".",
"view",
".",
"editor",
".",
"hadjustment",
".",
"set_value",
"(",
"state_pos",
"[",
"HORIZONTAL",
"]",
"-",
"padding_offset_horizontal",
")",
"self",
".",
"view",
".",
"editor",
".",
"vadjustment",
".",
"set_value",
"(",
"state_pos",
"[",
"VERTICAL",
"]",
"-",
"padding_offset_vertical",
")"
] | 54.348837 | 32.186047 |
def create(self, name, serviceId, timezone, description, enabled):
"""
Create a connector for the organization in the Watson IoT Platform.
The connector must reference the target service that the Watson IoT Platform will store the IoT data in.
Parameters:
- name (string) - Name of the service
- serviceId (string) - must be either eventstreams or cloudant
- timezone (string) -
- description (string) - description of the service
- enabled (boolean) - enabled
Throws APIException on failure
"""
connector = {
"name": name,
"description": description,
"serviceId": serviceId,
"timezone": timezone,
"enabled": enabled,
}
url = "api/v0002/historianconnectors"
r = self._apiClient.post(url, data=connector)
if r.status_code == 201:
return Connector(apiClient=self._apiClient, **r.json())
else:
raise ApiException(r)
|
[
"def",
"create",
"(",
"self",
",",
"name",
",",
"serviceId",
",",
"timezone",
",",
"description",
",",
"enabled",
")",
":",
"connector",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"description\"",
":",
"description",
",",
"\"serviceId\"",
":",
"serviceId",
",",
"\"timezone\"",
":",
"timezone",
",",
"\"enabled\"",
":",
"enabled",
",",
"}",
"url",
"=",
"\"api/v0002/historianconnectors\"",
"r",
"=",
"self",
".",
"_apiClient",
".",
"post",
"(",
"url",
",",
"data",
"=",
"connector",
")",
"if",
"r",
".",
"status_code",
"==",
"201",
":",
"return",
"Connector",
"(",
"apiClient",
"=",
"self",
".",
"_apiClient",
",",
"*",
"*",
"r",
".",
"json",
"(",
")",
")",
"else",
":",
"raise",
"ApiException",
"(",
"r",
")"
] | 36.785714 | 18.714286 |
def get_id(date=None, project: str = 'sip',
instance_id: int = None) -> str:
"""Get a SBI Identifier.
Args:
date (str or datetime.datetime, optional): UTC date of the SBI
project (str, optional ): Project Name
instance_id (int, optional): SBI instance identifier
Returns:
str, Scheduling Block Instance (SBI) ID.
"""
if date is None:
date = datetime.datetime.utcnow()
if isinstance(date, datetime.datetime):
date = date.strftime('%Y%m%d')
if instance_id is None:
instance_id = randint(0, 9999)
return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
|
[
"def",
"get_id",
"(",
"date",
"=",
"None",
",",
"project",
":",
"str",
"=",
"'sip'",
",",
"instance_id",
":",
"int",
"=",
"None",
")",
"->",
"str",
":",
"if",
"date",
"is",
"None",
":",
"date",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"isinstance",
"(",
"date",
",",
"datetime",
".",
"datetime",
")",
":",
"date",
"=",
"date",
".",
"strftime",
"(",
"'%Y%m%d'",
")",
"if",
"instance_id",
"is",
"None",
":",
"instance_id",
"=",
"randint",
"(",
"0",
",",
"9999",
")",
"return",
"'SBI-{}-{}-{:04d}'",
".",
"format",
"(",
"date",
",",
"project",
",",
"instance_id",
")"
] | 30.478261 | 19.565217 |
def is_group_valid(self, groupID):
"""
Check if this group ID is valid.
"""
cur = self.conn.cursor()
cur.execute('SELECT * FROM groups WHERE id=? LIMIT 1', [groupID])
results = cur.fetchall()
cur.close()
logging.debug('is_group_valid(groupID={}) => {}'.format(groupID, True if len(results) else False))
return len(results) > 0
|
[
"def",
"is_group_valid",
"(",
"self",
",",
"groupID",
")",
":",
"cur",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"'SELECT * FROM groups WHERE id=? LIMIT 1'",
",",
"[",
"groupID",
"]",
")",
"results",
"=",
"cur",
".",
"fetchall",
"(",
")",
"cur",
".",
"close",
"(",
")",
"logging",
".",
"debug",
"(",
"'is_group_valid(groupID={}) => {}'",
".",
"format",
"(",
"groupID",
",",
"True",
"if",
"len",
"(",
"results",
")",
"else",
"False",
")",
")",
"return",
"len",
"(",
"results",
")",
">",
"0"
] | 35.363636 | 17.363636 |
def clean_title(self, title):
"""Clean title with the use of og:site_name
in this case try to get rid of site name
and use TITLE_SPLITTERS to reformat title
"""
# check if we have the site name in opengraph data
if "site_name" in self.article.opengraph.keys():
site_name = self.article.opengraph['site_name']
# remove the site name from title
title = title.replace(site_name, '').strip()
# try to remove the domain from url
if self.article.domain:
pattern = re.compile(self.article.domain, re.IGNORECASE)
title = pattern.sub("", title).strip()
# split the title in words
# TechCrunch | my wonderfull article
# my wonderfull article | TechCrunch
title_words = title.split()
# check for an empty title
# so that we don't get an IndexError below
if len(title_words) == 0:
return u""
# check if first letter is in TITLE_SPLITTERS
# if so remove it
if title_words[0] in TITLE_SPLITTERS:
title_words.pop(0)
# check if last letter is in TITLE_SPLITTERS
# if so remove it
if title_words[-1] in TITLE_SPLITTERS:
title_words.pop(-1)
# rebuild the title
title = u" ".join(title_words).strip()
return title
|
[
"def",
"clean_title",
"(",
"self",
",",
"title",
")",
":",
"# check if we have the site name in opengraph data",
"if",
"\"site_name\"",
"in",
"self",
".",
"article",
".",
"opengraph",
".",
"keys",
"(",
")",
":",
"site_name",
"=",
"self",
".",
"article",
".",
"opengraph",
"[",
"'site_name'",
"]",
"# remove the site name from title",
"title",
"=",
"title",
".",
"replace",
"(",
"site_name",
",",
"''",
")",
".",
"strip",
"(",
")",
"# try to remove the domain from url",
"if",
"self",
".",
"article",
".",
"domain",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"article",
".",
"domain",
",",
"re",
".",
"IGNORECASE",
")",
"title",
"=",
"pattern",
".",
"sub",
"(",
"\"\"",
",",
"title",
")",
".",
"strip",
"(",
")",
"# split the title in words",
"# TechCrunch | my wonderfull article",
"# my wonderfull article | TechCrunch",
"title_words",
"=",
"title",
".",
"split",
"(",
")",
"# check for an empty title",
"# so that we don't get an IndexError below",
"if",
"len",
"(",
"title_words",
")",
"==",
"0",
":",
"return",
"u\"\"",
"# check if first letter is in TITLE_SPLITTERS",
"# if so remove it",
"if",
"title_words",
"[",
"0",
"]",
"in",
"TITLE_SPLITTERS",
":",
"title_words",
".",
"pop",
"(",
"0",
")",
"# check if last letter is in TITLE_SPLITTERS",
"# if so remove it",
"if",
"title_words",
"[",
"-",
"1",
"]",
"in",
"TITLE_SPLITTERS",
":",
"title_words",
".",
"pop",
"(",
"-",
"1",
")",
"# rebuild the title",
"title",
"=",
"u\" \"",
".",
"join",
"(",
"title_words",
")",
".",
"strip",
"(",
")",
"return",
"title"
] | 33.75 | 15.4 |
def processWhileRunning(self):
"""
Run tasks until stopService is called.
"""
work = self.step()
for result, more in work:
yield result
if not self.running:
break
if more:
delay = 0.1
else:
delay = 10.0
yield task.deferLater(reactor, delay, lambda: None)
|
[
"def",
"processWhileRunning",
"(",
"self",
")",
":",
"work",
"=",
"self",
".",
"step",
"(",
")",
"for",
"result",
",",
"more",
"in",
"work",
":",
"yield",
"result",
"if",
"not",
"self",
".",
"running",
":",
"break",
"if",
"more",
":",
"delay",
"=",
"0.1",
"else",
":",
"delay",
"=",
"10.0",
"yield",
"task",
".",
"deferLater",
"(",
"reactor",
",",
"delay",
",",
"lambda",
":",
"None",
")"
] | 27.785714 | 12.214286 |
def _get_aux_specs(self):
"""Get and pre-process all of the non-core specifications."""
# Drop the "core" specifications, which are handled separately.
specs = self._specs_in.copy()
[specs.pop(core) for core in self._CORE_SPEC_NAMES]
specs[_REGIONS_STR] = self._get_regions()
specs[_VARIABLES_STR] = self._get_variables()
specs['date_ranges'] = self._get_date_ranges()
specs['output_time_regional_reductions'] = self._get_time_reg_reducts()
return specs
|
[
"def",
"_get_aux_specs",
"(",
"self",
")",
":",
"# Drop the \"core\" specifications, which are handled separately.",
"specs",
"=",
"self",
".",
"_specs_in",
".",
"copy",
"(",
")",
"[",
"specs",
".",
"pop",
"(",
"core",
")",
"for",
"core",
"in",
"self",
".",
"_CORE_SPEC_NAMES",
"]",
"specs",
"[",
"_REGIONS_STR",
"]",
"=",
"self",
".",
"_get_regions",
"(",
")",
"specs",
"[",
"_VARIABLES_STR",
"]",
"=",
"self",
".",
"_get_variables",
"(",
")",
"specs",
"[",
"'date_ranges'",
"]",
"=",
"self",
".",
"_get_date_ranges",
"(",
")",
"specs",
"[",
"'output_time_regional_reductions'",
"]",
"=",
"self",
".",
"_get_time_reg_reducts",
"(",
")",
"return",
"specs"
] | 43 | 20.25 |
def evaluate(ref_intervals_hier, ref_labels_hier,
est_intervals_hier, est_labels_hier, **kwargs):
'''Compute all hierarchical structure metrics for the given reference and
estimated annotations.
Examples
--------
A toy example with two two-layer annotations
>>> ref_i = [[[0, 30], [30, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> est_i = [[[0, 45], [45, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> ref_l = [ ['A', 'B'], ['a', 'b', 'a', 'c'] ]
>>> est_l = [ ['A', 'B'], ['a', 'a', 'b', 'b'] ]
>>> scores = mir_eval.hierarchy.evaluate(ref_i, ref_l, est_i, est_l)
>>> dict(scores)
{'T-Measure full': 0.94822745804853459,
'T-Measure reduced': 0.8732458222764804,
'T-Precision full': 0.96569179094693058,
'T-Precision reduced': 0.89939075137018787,
'T-Recall full': 0.93138358189386117,
'T-Recall reduced': 0.84857799953694923}
A more realistic example, using SALAMI pre-parsed annotations
>>> def load_salami(filename):
... "load SALAMI event format as labeled intervals"
... events, labels = mir_eval.io.load_labeled_events(filename)
... intervals = mir_eval.util.boundaries_to_intervals(events)[0]
... return intervals, labels[:len(intervals)]
>>> ref_files = ['data/10/parsed/textfile1_uppercase.txt',
... 'data/10/parsed/textfile1_lowercase.txt']
>>> est_files = ['data/10/parsed/textfile2_uppercase.txt',
... 'data/10/parsed/textfile2_lowercase.txt']
>>> ref = [load_salami(fname) for fname in ref_files]
>>> ref_int = [seg[0] for seg in ref]
>>> ref_lab = [seg[1] for seg in ref]
>>> est = [load_salami(fname) for fname in est_files]
>>> est_int = [seg[0] for seg in est]
>>> est_lab = [seg[1] for seg in est]
>>> scores = mir_eval.hierarchy.evaluate(ref_int, ref_lab,
... est_hier, est_lab)
>>> dict(scores)
{'T-Measure full': 0.66029225561405358,
'T-Measure reduced': 0.62001868041578034,
'T-Precision full': 0.66844764668949885,
'T-Precision reduced': 0.63252297209957919,
'T-Recall full': 0.6523334654992341,
'T-Recall reduced': 0.60799919710921635}
Parameters
----------
ref_intervals_hier : list of list-like
ref_labels_hier : list of list of str
est_intervals_hier : list of list-like
est_labels_hier : list of list of str
Hierarchical annotations are encoded as an ordered list
of segmentations. Each segmentation itself is a list (or list-like)
of intervals (\*_intervals_hier) and a list of lists of labels
(\*_labels_hier).
kwargs
additional keyword arguments to the evaluation metrics.
Returns
-------
scores : OrderedDict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
T-measures are computed in both the "full" (``transitive=True``) and
"reduced" (``transitive=False``) modes.
Raises
------
ValueError
Thrown when the provided annotations are not valid.
'''
# First, find the maximum length of the reference
_, t_end = _hierarchy_bounds(ref_intervals_hier)
# Pre-process the intervals to match the range of the reference,
# and start at 0
ref_intervals_hier, ref_labels_hier = _align_intervals(ref_intervals_hier,
ref_labels_hier,
t_min=0.0,
t_max=None)
est_intervals_hier, est_labels_hier = _align_intervals(est_intervals_hier,
est_labels_hier,
t_min=0.0,
t_max=t_end)
scores = collections.OrderedDict()
# Force the transitivity setting
kwargs['transitive'] = False
(scores['T-Precision reduced'],
scores['T-Recall reduced'],
scores['T-Measure reduced']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
kwargs['transitive'] = True
(scores['T-Precision full'],
scores['T-Recall full'],
scores['T-Measure full']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
(scores['L-Precision'],
scores['L-Recall'],
scores['L-Measure']) = util.filter_kwargs(lmeasure,
ref_intervals_hier,
ref_labels_hier,
est_intervals_hier,
est_labels_hier,
**kwargs)
return scores
|
[
"def",
"evaluate",
"(",
"ref_intervals_hier",
",",
"ref_labels_hier",
",",
"est_intervals_hier",
",",
"est_labels_hier",
",",
"*",
"*",
"kwargs",
")",
":",
"# First, find the maximum length of the reference",
"_",
",",
"t_end",
"=",
"_hierarchy_bounds",
"(",
"ref_intervals_hier",
")",
"# Pre-process the intervals to match the range of the reference,",
"# and start at 0",
"ref_intervals_hier",
",",
"ref_labels_hier",
"=",
"_align_intervals",
"(",
"ref_intervals_hier",
",",
"ref_labels_hier",
",",
"t_min",
"=",
"0.0",
",",
"t_max",
"=",
"None",
")",
"est_intervals_hier",
",",
"est_labels_hier",
"=",
"_align_intervals",
"(",
"est_intervals_hier",
",",
"est_labels_hier",
",",
"t_min",
"=",
"0.0",
",",
"t_max",
"=",
"t_end",
")",
"scores",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"# Force the transitivity setting",
"kwargs",
"[",
"'transitive'",
"]",
"=",
"False",
"(",
"scores",
"[",
"'T-Precision reduced'",
"]",
",",
"scores",
"[",
"'T-Recall reduced'",
"]",
",",
"scores",
"[",
"'T-Measure reduced'",
"]",
")",
"=",
"util",
".",
"filter_kwargs",
"(",
"tmeasure",
",",
"ref_intervals_hier",
",",
"est_intervals_hier",
",",
"*",
"*",
"kwargs",
")",
"kwargs",
"[",
"'transitive'",
"]",
"=",
"True",
"(",
"scores",
"[",
"'T-Precision full'",
"]",
",",
"scores",
"[",
"'T-Recall full'",
"]",
",",
"scores",
"[",
"'T-Measure full'",
"]",
")",
"=",
"util",
".",
"filter_kwargs",
"(",
"tmeasure",
",",
"ref_intervals_hier",
",",
"est_intervals_hier",
",",
"*",
"*",
"kwargs",
")",
"(",
"scores",
"[",
"'L-Precision'",
"]",
",",
"scores",
"[",
"'L-Recall'",
"]",
",",
"scores",
"[",
"'L-Measure'",
"]",
")",
"=",
"util",
".",
"filter_kwargs",
"(",
"lmeasure",
",",
"ref_intervals_hier",
",",
"ref_labels_hier",
",",
"est_intervals_hier",
",",
"est_labels_hier",
",",
"*",
"*",
"kwargs",
")",
"return",
"scores"
] | 41.868852 | 21.737705 |
def set_attrs(self, **attrs):
"""Set the given attributes on *all* commands in collection."""
commands = tuple(self.values())
for name, value in attrs.items():
for command in commands:
setattr(command, name, value)
|
[
"def",
"set_attrs",
"(",
"self",
",",
"*",
"*",
"attrs",
")",
":",
"commands",
"=",
"tuple",
"(",
"self",
".",
"values",
"(",
")",
")",
"for",
"name",
",",
"value",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"for",
"command",
"in",
"commands",
":",
"setattr",
"(",
"command",
",",
"name",
",",
"value",
")"
] | 43.5 | 3.666667 |
def to_hex(x):
"""
Converts input to the hex string
:param x:
:return:
"""
if isinstance(x, bytearray):
x = bytes(x)
elif isinstance(x, (list, tuple)):
x = bytes(bytearray(x))
if isinstance(x, basestring):
return base64.b16encode(x).decode('ascii')
else:
raise ValueError('Unknown input argument type')
|
[
"def",
"to_hex",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"bytearray",
")",
":",
"x",
"=",
"bytes",
"(",
"x",
")",
"elif",
"isinstance",
"(",
"x",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"x",
"=",
"bytes",
"(",
"bytearray",
"(",
"x",
")",
")",
"if",
"isinstance",
"(",
"x",
",",
"basestring",
")",
":",
"return",
"base64",
".",
"b16encode",
"(",
"x",
")",
".",
"decode",
"(",
"'ascii'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown input argument type'",
")"
] | 25.5 | 13.357143 |
def write_collection_from_tmpfile(self, collection_id, tmpfi, parent_sha, auth_info, commit_msg=''):
"""Given a collection_id, temporary filename of content, branch and auth_info
"""
return self.write_doc_from_tmpfile(collection_id,
tmpfi,
parent_sha,
auth_info,
commit_msg,
doctype_display_name="collection")
|
[
"def",
"write_collection_from_tmpfile",
"(",
"self",
",",
"collection_id",
",",
"tmpfi",
",",
"parent_sha",
",",
"auth_info",
",",
"commit_msg",
"=",
"''",
")",
":",
"return",
"self",
".",
"write_doc_from_tmpfile",
"(",
"collection_id",
",",
"tmpfi",
",",
"parent_sha",
",",
"auth_info",
",",
"commit_msg",
",",
"doctype_display_name",
"=",
"\"collection\"",
")"
] | 60 | 18.222222 |
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> from numpy import log10
>>> x,b, data = dc.NRZ_bits(10000,10)
>>> Px,f = dc.my_psd(x,2**10,10)
>>> plt.plot(f, 10*log10(Px))
>>> plt.show()
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f
|
[
"def",
"my_psd",
"(",
"x",
",",
"NFFT",
"=",
"2",
"**",
"10",
",",
"Fs",
"=",
"1",
")",
":",
"Px",
",",
"f",
"=",
"pylab",
".",
"mlab",
".",
"psd",
"(",
"x",
",",
"NFFT",
",",
"Fs",
")",
"return",
"Px",
".",
"flatten",
"(",
")",
",",
"f"
] | 27.138889 | 18.416667 |
def send_message(self, message, room_id, **kwargs):
"""
Send a message to a given room
"""
return SendMessage(settings=self.settings, **kwargs).call(
message=message,
room_id=room_id,
**kwargs
)
|
[
"def",
"send_message",
"(",
"self",
",",
"message",
",",
"room_id",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"SendMessage",
"(",
"settings",
"=",
"self",
".",
"settings",
",",
"*",
"*",
"kwargs",
")",
".",
"call",
"(",
"message",
"=",
"message",
",",
"room_id",
"=",
"room_id",
",",
"*",
"*",
"kwargs",
")"
] | 29.111111 | 12.666667 |
def add_subgroups(self, subgroups):
"""
Add a list of SubGroupDefinition objects to this composite.
Note that in contrast to :meth:`BaseTrack`, which takes a single
dictionary indicating the particular subgroups for the track, this
method takes a list of :class:`SubGroupDefinition` objects representing
the allowed subgroups for the composite.
:param subgroups:
List of SubGroupDefinition objects.
"""
if subgroups is None:
subgroups = {}
_subgroups = {}
for sg in subgroups:
assert isinstance(sg, SubGroupDefinition)
_subgroups[sg.name] = sg
self.subgroups = _subgroups
|
[
"def",
"add_subgroups",
"(",
"self",
",",
"subgroups",
")",
":",
"if",
"subgroups",
"is",
"None",
":",
"subgroups",
"=",
"{",
"}",
"_subgroups",
"=",
"{",
"}",
"for",
"sg",
"in",
"subgroups",
":",
"assert",
"isinstance",
"(",
"sg",
",",
"SubGroupDefinition",
")",
"_subgroups",
"[",
"sg",
".",
"name",
"]",
"=",
"sg",
"self",
".",
"subgroups",
"=",
"_subgroups"
] | 36.789474 | 17 |
def containerFor(self, entry):
"""
Returns a container for the inputed entry widget.
:param entry | <XOrbQueryEntryWidget>
:return <XOrbQueryContainer> || None
"""
try:
index = self._compoundStack.index(entry)
except ValueError:
return None
return self.widget(index + 1)
|
[
"def",
"containerFor",
"(",
"self",
",",
"entry",
")",
":",
"try",
":",
"index",
"=",
"self",
".",
"_compoundStack",
".",
"index",
"(",
"entry",
")",
"except",
"ValueError",
":",
"return",
"None",
"return",
"self",
".",
"widget",
"(",
"index",
"+",
"1",
")"
] | 28.142857 | 15.142857 |
def class_name(self):
"""str: class name of the key or None if not available."""
if not self._registry_key and self._registry:
self._GetKeyFromRegistry()
if not self._registry_key:
return None
return self._registry_key.class_name
|
[
"def",
"class_name",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_registry_key",
"and",
"self",
".",
"_registry",
":",
"self",
".",
"_GetKeyFromRegistry",
"(",
")",
"if",
"not",
"self",
".",
"_registry_key",
":",
"return",
"None",
"return",
"self",
".",
"_registry_key",
".",
"class_name"
] | 27.888889 | 16.555556 |
def _transform_col(self, x, i):
"""Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
"""
labels = self.label_encoder._transform_col(x, i)
label_max = self.label_encoder.label_maxes[i]
# build row and column index for non-zero values of a sparse matrix
index = np.array(range(len(labels)))
i = index[labels > 0]
j = labels[labels > 0] - 1 # column index starts from 0
if len(i) > 0:
return sparse.coo_matrix((np.ones_like(i), (i, j)),
shape=(x.shape[0], label_max))
else:
# if there is no non-zero value, return no matrix
return None
|
[
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"i",
")",
":",
"labels",
"=",
"self",
".",
"label_encoder",
".",
"_transform_col",
"(",
"x",
",",
"i",
")",
"label_max",
"=",
"self",
".",
"label_encoder",
".",
"label_maxes",
"[",
"i",
"]",
"# build row and column index for non-zero values of a sparse matrix",
"index",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"len",
"(",
"labels",
")",
")",
")",
"i",
"=",
"index",
"[",
"labels",
">",
"0",
"]",
"j",
"=",
"labels",
"[",
"labels",
">",
"0",
"]",
"-",
"1",
"# column index starts from 0",
"if",
"len",
"(",
"i",
")",
">",
"0",
":",
"return",
"sparse",
".",
"coo_matrix",
"(",
"(",
"np",
".",
"ones_like",
"(",
"i",
")",
",",
"(",
"i",
",",
"j",
")",
")",
",",
"shape",
"=",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"label_max",
")",
")",
"else",
":",
"# if there is no non-zero value, return no matrix",
"return",
"None"
] | 37.076923 | 22.730769 |
def get_subdomain_ops_at_txid(txid, proxy=None, hostport=None):
"""
Get the list of subdomain operations added by a txid
Returns the list of operations ([{...}]) on success
Returns {'error': ...} on failure
"""
assert proxy or hostport, 'Need proxy or hostport'
if proxy is None:
proxy = connect_hostport(hostport)
subdomain_ops_schema = {
'type': 'object',
'properties': {
'subdomain_ops': {
'type': 'array',
'items': {
'type': 'object',
'properties': OP_HISTORY_SCHEMA['properties'],
'required': SUBDOMAIN_HISTORY_REQUIRED,
},
},
},
'required': ['subdomain_ops'],
}
schema = json_response_schema(subdomain_ops_schema)
resp = {}
try:
resp = proxy.get_subdomain_ops_at_txid(txid)
resp = json_validate(schema, resp)
if json_is_error(resp):
return resp
# names must be valid
for op in resp['subdomain_ops']:
assert is_subdomain(str(op['fully_qualified_subdomain'])), ('Invalid subdomain "{}"'.format(op['fully_qualified_subdomain']))
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except AssertionError as e:
if BLOCKSTACK_DEBUG:
log.exception(e)
resp = {'error': 'Server response included an invalid subdomain', 'http_status': 500}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp
return resp['subdomain_ops']
|
[
"def",
"get_subdomain_ops_at_txid",
"(",
"txid",
",",
"proxy",
"=",
"None",
",",
"hostport",
"=",
"None",
")",
":",
"assert",
"proxy",
"or",
"hostport",
",",
"'Need proxy or hostport'",
"if",
"proxy",
"is",
"None",
":",
"proxy",
"=",
"connect_hostport",
"(",
"hostport",
")",
"subdomain_ops_schema",
"=",
"{",
"'type'",
":",
"'object'",
",",
"'properties'",
":",
"{",
"'subdomain_ops'",
":",
"{",
"'type'",
":",
"'array'",
",",
"'items'",
":",
"{",
"'type'",
":",
"'object'",
",",
"'properties'",
":",
"OP_HISTORY_SCHEMA",
"[",
"'properties'",
"]",
",",
"'required'",
":",
"SUBDOMAIN_HISTORY_REQUIRED",
",",
"}",
",",
"}",
",",
"}",
",",
"'required'",
":",
"[",
"'subdomain_ops'",
"]",
",",
"}",
"schema",
"=",
"json_response_schema",
"(",
"subdomain_ops_schema",
")",
"resp",
"=",
"{",
"}",
"try",
":",
"resp",
"=",
"proxy",
".",
"get_subdomain_ops_at_txid",
"(",
"txid",
")",
"resp",
"=",
"json_validate",
"(",
"schema",
",",
"resp",
")",
"if",
"json_is_error",
"(",
"resp",
")",
":",
"return",
"resp",
"# names must be valid",
"for",
"op",
"in",
"resp",
"[",
"'subdomain_ops'",
"]",
":",
"assert",
"is_subdomain",
"(",
"str",
"(",
"op",
"[",
"'fully_qualified_subdomain'",
"]",
")",
")",
",",
"(",
"'Invalid subdomain \"{}\"'",
".",
"format",
"(",
"op",
"[",
"'fully_qualified_subdomain'",
"]",
")",
")",
"except",
"ValidationError",
"as",
"ve",
":",
"if",
"BLOCKSTACK_DEBUG",
":",
"log",
".",
"exception",
"(",
"ve",
")",
"resp",
"=",
"{",
"'error'",
":",
"'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.'",
",",
"'http_status'",
":",
"502",
"}",
"return",
"resp",
"except",
"AssertionError",
"as",
"e",
":",
"if",
"BLOCKSTACK_DEBUG",
":",
"log",
".",
"exception",
"(",
"e",
")",
"resp",
"=",
"{",
"'error'",
":",
"'Server response included an invalid subdomain'",
",",
"'http_status'",
":",
"500",
"}",
"return",
"resp",
"except",
"socket",
".",
"timeout",
":",
"log",
".",
"error",
"(",
"\"Connection timed out\"",
")",
"resp",
"=",
"{",
"'error'",
":",
"'Connection to remote host timed out.'",
",",
"'http_status'",
":",
"503",
"}",
"return",
"resp",
"except",
"socket",
".",
"error",
"as",
"se",
":",
"log",
".",
"error",
"(",
"\"Connection error {}\"",
".",
"format",
"(",
"se",
".",
"errno",
")",
")",
"resp",
"=",
"{",
"'error'",
":",
"'Connection to remote host failed.'",
",",
"'http_status'",
":",
"502",
"}",
"return",
"resp",
"except",
"Exception",
"as",
"ee",
":",
"if",
"BLOCKSTACK_DEBUG",
":",
"log",
".",
"exception",
"(",
"ee",
")",
"log",
".",
"error",
"(",
"\"Caught exception while connecting to Blockstack node: {}\"",
".",
"format",
"(",
"ee",
")",
")",
"resp",
"=",
"{",
"'error'",
":",
"'Failed to contact Blockstack node. Try again with `--debug`.'",
",",
"'http_status'",
":",
"500",
"}",
"return",
"resp",
"return",
"resp",
"[",
"'subdomain_ops'",
"]"
] | 32.971831 | 23.985915 |
def debug(message, *args, **kwargs):
"""
debug output goes to stderr so you can still redirect the stdout to a file
or another program. Controlled by the JUT_DEBUG environment variable being
present
"""
if 'end' in kwargs:
end = kwargs['end']
else:
end = '\n'
if DEBUG:
if len(args) == 0:
sys.stderr.write(message)
else:
sys.stderr.write(message % args)
sys.stderr.write(end)
sys.stderr.flush()
|
[
"def",
"debug",
"(",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'end'",
"in",
"kwargs",
":",
"end",
"=",
"kwargs",
"[",
"'end'",
"]",
"else",
":",
"end",
"=",
"'\\n'",
"if",
"DEBUG",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"message",
")",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"message",
"%",
"args",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"end",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")"
] | 24.15 | 20.55 |
def process_hv_plots(widgets, plots):
"""
Temporary fix to patch HoloViews plot comms
"""
bokeh_plots = []
for plot in plots:
if hasattr(plot, '_update_callbacks'):
for subplot in plot.traverse(lambda x: x):
subplot.comm = widgets.server_comm
for cb in subplot.callbacks:
for c in cb.callbacks:
c.code = c.code.replace(plot.id, widgets.plot_id)
plot = plot.state
bokeh_plots.append(plot)
return bokeh_plots
|
[
"def",
"process_hv_plots",
"(",
"widgets",
",",
"plots",
")",
":",
"bokeh_plots",
"=",
"[",
"]",
"for",
"plot",
"in",
"plots",
":",
"if",
"hasattr",
"(",
"plot",
",",
"'_update_callbacks'",
")",
":",
"for",
"subplot",
"in",
"plot",
".",
"traverse",
"(",
"lambda",
"x",
":",
"x",
")",
":",
"subplot",
".",
"comm",
"=",
"widgets",
".",
"server_comm",
"for",
"cb",
"in",
"subplot",
".",
"callbacks",
":",
"for",
"c",
"in",
"cb",
".",
"callbacks",
":",
"c",
".",
"code",
"=",
"c",
".",
"code",
".",
"replace",
"(",
"plot",
".",
"id",
",",
"widgets",
".",
"plot_id",
")",
"plot",
"=",
"plot",
".",
"state",
"bokeh_plots",
".",
"append",
"(",
"plot",
")",
"return",
"bokeh_plots"
] | 35.466667 | 10.266667 |
def xml(self, url, method='get', params=None, data=None):
"""
请求并返回xml
:type url: str
:param url: API
:type method: str
:param method: HTTP METHOD
:type params: dict
:param params: query
:type data: dict
:param data: body
:rtype: html.HtmlElement
:return:
"""
r = self.req(url, method, params, data)
# this is required for avoid utf8-mb4 lead to encoding error
return self.to_xml(r.content, base_url=r.url)
|
[
"def",
"xml",
"(",
"self",
",",
"url",
",",
"method",
"=",
"'get'",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
")",
":",
"r",
"=",
"self",
".",
"req",
"(",
"url",
",",
"method",
",",
"params",
",",
"data",
")",
"# this is required for avoid utf8-mb4 lead to encoding error",
"return",
"self",
".",
"to_xml",
"(",
"r",
".",
"content",
",",
"base_url",
"=",
"r",
".",
"url",
")"
] | 25.409091 | 17.863636 |
def get_next_types(self, n=None):
"""Gets the next set of ``Types`` in this list.
The specified amount must be less than or equal to the return
from ``available()``.
arg: n (cardinal): the number of ``Type`` elements requested
which must be less than or equal to ``available()``
return: (osid.type.Type) - an array of ``Type`` elements.The
length of the array is less than or equal to the number
specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if n > self.available():
# !!! This is not quite as specified (see method docs) !!!
raise IllegalState('not enough elements available in this list')
else:
next_list = []
i = 0
while i < n:
try:
next_list.append(next(self))
except: # Need to specify exceptions here
raise OperationFailed()
i += 1
return next_list
|
[
"def",
"get_next_types",
"(",
"self",
",",
"n",
"=",
"None",
")",
":",
"if",
"n",
">",
"self",
".",
"available",
"(",
")",
":",
"# !!! This is not quite as specified (see method docs) !!!",
"raise",
"IllegalState",
"(",
"'not enough elements available in this list'",
")",
"else",
":",
"next_list",
"=",
"[",
"]",
"i",
"=",
"0",
"while",
"i",
"<",
"n",
":",
"try",
":",
"next_list",
".",
"append",
"(",
"next",
"(",
"self",
")",
")",
"except",
":",
"# Need to specify exceptions here",
"raise",
"OperationFailed",
"(",
")",
"i",
"+=",
"1",
"return",
"next_list"
] | 40.482759 | 20.965517 |
def cg(output,
show,
verbose,
classname,
methodname,
descriptor,
accessflag,
no_isolated,
apk):
"""
Create a call graph and export it into a graph format.
classnames are found in the type "Lfoo/bar/bla;".
Example:
\b
$ androguard cg APK
"""
androcg_main(verbose=verbose,
APK=apk,
classname=classname,
methodname=methodname,
descriptor=descriptor,
accessflag=accessflag,
no_isolated=no_isolated,
show=show,
output=output)
|
[
"def",
"cg",
"(",
"output",
",",
"show",
",",
"verbose",
",",
"classname",
",",
"methodname",
",",
"descriptor",
",",
"accessflag",
",",
"no_isolated",
",",
"apk",
")",
":",
"androcg_main",
"(",
"verbose",
"=",
"verbose",
",",
"APK",
"=",
"apk",
",",
"classname",
"=",
"classname",
",",
"methodname",
"=",
"methodname",
",",
"descriptor",
"=",
"descriptor",
",",
"accessflag",
"=",
"accessflag",
",",
"no_isolated",
"=",
"no_isolated",
",",
"show",
"=",
"show",
",",
"output",
"=",
"output",
")"
] | 22.285714 | 17.642857 |
def process_event(event_filter, callback, timeout_callback, timeout, args,
start_time=None):
"""
Start to watch one event.
:param event_filter:
:param callback:
:param timeout_callback:
:param timeout:
:param args:
:param start_time:
:return:
"""
try:
events = event_filter.get_all_entries()
if events:
callback(events[0], *args)
return True
except (ValueError, Exception) as err:
# ignore error, but log it
logger.debug(f'Got error grabbing keeper events: {str(err)}')
if timeout:
elapsed = int(datetime.now().timestamp()) - start_time
if elapsed > timeout:
if timeout_callback:
timeout_callback(*args)
else:
callback(None, *args)
return True
return False
|
[
"def",
"process_event",
"(",
"event_filter",
",",
"callback",
",",
"timeout_callback",
",",
"timeout",
",",
"args",
",",
"start_time",
"=",
"None",
")",
":",
"try",
":",
"events",
"=",
"event_filter",
".",
"get_all_entries",
"(",
")",
"if",
"events",
":",
"callback",
"(",
"events",
"[",
"0",
"]",
",",
"*",
"args",
")",
"return",
"True",
"except",
"(",
"ValueError",
",",
"Exception",
")",
"as",
"err",
":",
"# ignore error, but log it",
"logger",
".",
"debug",
"(",
"f'Got error grabbing keeper events: {str(err)}'",
")",
"if",
"timeout",
":",
"elapsed",
"=",
"int",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"timestamp",
"(",
")",
")",
"-",
"start_time",
"if",
"elapsed",
">",
"timeout",
":",
"if",
"timeout_callback",
":",
"timeout_callback",
"(",
"*",
"args",
")",
"else",
":",
"callback",
"(",
"None",
",",
"*",
"args",
")",
"return",
"True",
"return",
"False"
] | 28.058824 | 17.058824 |
def dirsWavFeatureExtraction(dirNames, mt_win, mt_step, st_win, st_step, compute_beat=False):
'''
Same as dirWavFeatureExtraction, but instead of a single dir it
takes a list of paths as input and returns a list of feature matrices.
EXAMPLE:
[features, classNames] =
a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise','audioData/classSegmentsRec/speech',
'audioData/classSegmentsRec/brush-teeth','audioData/classSegmentsRec/shower'], 1, 1, 0.02, 0.02);
It can be used during the training process of a classification model ,
in order to get feature matrices from various audio classes (each stored in a seperate path)
'''
# feature extraction for each class:
features = []
classNames = []
fileNames = []
for i, d in enumerate(dirNames):
[f, fn, feature_names] = dirWavFeatureExtraction(d, mt_win, mt_step,
st_win, st_step,
compute_beat=compute_beat)
if f.shape[0] > 0:
# if at least one audio file has been found in the provided folder:
features.append(f)
fileNames.append(fn)
if d[-1] == os.sep:
classNames.append(d.split(os.sep)[-2])
else:
classNames.append(d.split(os.sep)[-1])
return features, classNames, fileNames
|
[
"def",
"dirsWavFeatureExtraction",
"(",
"dirNames",
",",
"mt_win",
",",
"mt_step",
",",
"st_win",
",",
"st_step",
",",
"compute_beat",
"=",
"False",
")",
":",
"# feature extraction for each class:",
"features",
"=",
"[",
"]",
"classNames",
"=",
"[",
"]",
"fileNames",
"=",
"[",
"]",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dirNames",
")",
":",
"[",
"f",
",",
"fn",
",",
"feature_names",
"]",
"=",
"dirWavFeatureExtraction",
"(",
"d",
",",
"mt_win",
",",
"mt_step",
",",
"st_win",
",",
"st_step",
",",
"compute_beat",
"=",
"compute_beat",
")",
"if",
"f",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"# if at least one audio file has been found in the provided folder:",
"features",
".",
"append",
"(",
"f",
")",
"fileNames",
".",
"append",
"(",
"fn",
")",
"if",
"d",
"[",
"-",
"1",
"]",
"==",
"os",
".",
"sep",
":",
"classNames",
".",
"append",
"(",
"d",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"-",
"2",
"]",
")",
"else",
":",
"classNames",
".",
"append",
"(",
"d",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"-",
"1",
"]",
")",
"return",
"features",
",",
"classNames",
",",
"fileNames"
] | 47.7 | 29.033333 |
def user_parse(data):
"""Parse information from the provider."""
yield 'id', data.get('uuid')
yield 'username', data.get('username')
yield 'last_name', data.get('display_name')
links = data.get('links', {})
yield 'picture', links.get('avatar', {}).get('href')
yield 'link', links.get('html', {}).get('href')
|
[
"def",
"user_parse",
"(",
"data",
")",
":",
"yield",
"'id'",
",",
"data",
".",
"get",
"(",
"'uuid'",
")",
"yield",
"'username'",
",",
"data",
".",
"get",
"(",
"'username'",
")",
"yield",
"'last_name'",
",",
"data",
".",
"get",
"(",
"'display_name'",
")",
"links",
"=",
"data",
".",
"get",
"(",
"'links'",
",",
"{",
"}",
")",
"yield",
"'picture'",
",",
"links",
".",
"get",
"(",
"'avatar'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'href'",
")",
"yield",
"'link'",
",",
"links",
".",
"get",
"(",
"'html'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'href'",
")"
] | 44.5 | 9.75 |
def _jobresult(self, jobid, json=True, headers=None):
"""Poll the async job result.
To be run via in a Thread, the result is put within
the result list which is a hack.
"""
failures = 0
total_time = self.job_timeout or 2**30
remaining = timedelta(seconds=total_time)
endtime = datetime.now() + remaining
while remaining.total_seconds() > 0:
timeout = max(min(self.timeout, remaining.total_seconds()), 1)
try:
kind, params = self._prepare_request('queryAsyncJobResult',
jobid=jobid)
transform(params)
params['signature'] = self._sign(params)
req = requests.Request(self.method,
self.endpoint,
headers=headers,
**{kind: params})
prepped = req.prepare()
if self.trace:
print(prepped.method, prepped.url, file=sys.stderr)
if prepped.headers:
print(prepped.headers, "\n", file=sys.stderr)
if prepped.body:
print(prepped.body, file=sys.stderr)
else:
print(file=sys.stderr)
with requests.Session() as session:
response = session.send(prepped,
timeout=timeout,
verify=self.verify,
cert=self.cert)
j = self._response_value(response, json)
if self.trace:
print(response.status_code, response.reason,
file=sys.stderr)
headersTrace = "\n".join(
"{}: {}".format(k, v)
for k, v in response.headers.items())
print(headersTrace, "\n", file=sys.stderr)
print(response.text, "\n", file=sys.stderr)
failures = 0
if j['jobstatus'] != PENDING:
if j['jobresultcode'] or j['jobstatus'] != SUCCESS:
raise CloudStackException("Job failure",
response=response)
if 'jobresult' not in j:
raise CloudStackException("Unknown job result",
response=response)
return j['jobresult']
except CloudStackException:
raise
except Exception as e:
failures += 1
if failures > 10:
raise e
time.sleep(self.poll_interval)
remaining = endtime - datetime.now()
if response:
response.status_code = 408
raise CloudStackException("Timeout waiting for async job result",
jobid,
response=response)
|
[
"def",
"_jobresult",
"(",
"self",
",",
"jobid",
",",
"json",
"=",
"True",
",",
"headers",
"=",
"None",
")",
":",
"failures",
"=",
"0",
"total_time",
"=",
"self",
".",
"job_timeout",
"or",
"2",
"**",
"30",
"remaining",
"=",
"timedelta",
"(",
"seconds",
"=",
"total_time",
")",
"endtime",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"remaining",
"while",
"remaining",
".",
"total_seconds",
"(",
")",
">",
"0",
":",
"timeout",
"=",
"max",
"(",
"min",
"(",
"self",
".",
"timeout",
",",
"remaining",
".",
"total_seconds",
"(",
")",
")",
",",
"1",
")",
"try",
":",
"kind",
",",
"params",
"=",
"self",
".",
"_prepare_request",
"(",
"'queryAsyncJobResult'",
",",
"jobid",
"=",
"jobid",
")",
"transform",
"(",
"params",
")",
"params",
"[",
"'signature'",
"]",
"=",
"self",
".",
"_sign",
"(",
"params",
")",
"req",
"=",
"requests",
".",
"Request",
"(",
"self",
".",
"method",
",",
"self",
".",
"endpoint",
",",
"headers",
"=",
"headers",
",",
"*",
"*",
"{",
"kind",
":",
"params",
"}",
")",
"prepped",
"=",
"req",
".",
"prepare",
"(",
")",
"if",
"self",
".",
"trace",
":",
"print",
"(",
"prepped",
".",
"method",
",",
"prepped",
".",
"url",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"prepped",
".",
"headers",
":",
"print",
"(",
"prepped",
".",
"headers",
",",
"\"\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"prepped",
".",
"body",
":",
"print",
"(",
"prepped",
".",
"body",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"else",
":",
"print",
"(",
"file",
"=",
"sys",
".",
"stderr",
")",
"with",
"requests",
".",
"Session",
"(",
")",
"as",
"session",
":",
"response",
"=",
"session",
".",
"send",
"(",
"prepped",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"cert",
"=",
"self",
".",
"cert",
")",
"j",
"=",
"self",
".",
"_response_value",
"(",
"response",
",",
"json",
")",
"if",
"self",
".",
"trace",
":",
"print",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"headersTrace",
"=",
"\"\\n\"",
".",
"join",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"response",
".",
"headers",
".",
"items",
"(",
")",
")",
"print",
"(",
"headersTrace",
",",
"\"\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"response",
".",
"text",
",",
"\"\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"failures",
"=",
"0",
"if",
"j",
"[",
"'jobstatus'",
"]",
"!=",
"PENDING",
":",
"if",
"j",
"[",
"'jobresultcode'",
"]",
"or",
"j",
"[",
"'jobstatus'",
"]",
"!=",
"SUCCESS",
":",
"raise",
"CloudStackException",
"(",
"\"Job failure\"",
",",
"response",
"=",
"response",
")",
"if",
"'jobresult'",
"not",
"in",
"j",
":",
"raise",
"CloudStackException",
"(",
"\"Unknown job result\"",
",",
"response",
"=",
"response",
")",
"return",
"j",
"[",
"'jobresult'",
"]",
"except",
"CloudStackException",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"failures",
"+=",
"1",
"if",
"failures",
">",
"10",
":",
"raise",
"e",
"time",
".",
"sleep",
"(",
"self",
".",
"poll_interval",
")",
"remaining",
"=",
"endtime",
"-",
"datetime",
".",
"now",
"(",
")",
"if",
"response",
":",
"response",
".",
"status_code",
"=",
"408",
"raise",
"CloudStackException",
"(",
"\"Timeout waiting for async job result\"",
",",
"jobid",
",",
"response",
"=",
"response",
")"
] | 38.382716 | 19.098765 |
def reduced_formula(self):
"""
Returns a reduced formula string with appended charge.
"""
reduced_formula = super().reduced_formula
charge = self._charge / self.get_reduced_composition_and_factor()[1]
if charge > 0:
if abs(charge) == 1:
chg_str = "[+]"
else:
chg_str = "[" + formula_double_format(charge, False) + "+]"
elif charge < 0:
if abs(charge) == 1:
chg_str = "[-]"
else:
chg_str = "[{}-]".format(formula_double_format(abs(charge),
False))
else:
chg_str = "(aq)"
return reduced_formula + chg_str
|
[
"def",
"reduced_formula",
"(",
"self",
")",
":",
"reduced_formula",
"=",
"super",
"(",
")",
".",
"reduced_formula",
"charge",
"=",
"self",
".",
"_charge",
"/",
"self",
".",
"get_reduced_composition_and_factor",
"(",
")",
"[",
"1",
"]",
"if",
"charge",
">",
"0",
":",
"if",
"abs",
"(",
"charge",
")",
"==",
"1",
":",
"chg_str",
"=",
"\"[+]\"",
"else",
":",
"chg_str",
"=",
"\"[\"",
"+",
"formula_double_format",
"(",
"charge",
",",
"False",
")",
"+",
"\"+]\"",
"elif",
"charge",
"<",
"0",
":",
"if",
"abs",
"(",
"charge",
")",
"==",
"1",
":",
"chg_str",
"=",
"\"[-]\"",
"else",
":",
"chg_str",
"=",
"\"[{}-]\"",
".",
"format",
"(",
"formula_double_format",
"(",
"abs",
"(",
"charge",
")",
",",
"False",
")",
")",
"else",
":",
"chg_str",
"=",
"\"(aq)\"",
"return",
"reduced_formula",
"+",
"chg_str"
] | 37.1 | 16.7 |
def addCity(self, fileName):
"""Add a JSON file and read the users.
:param fileName: path to the JSON file. This file has to have a list of
users, called users.
:type fileName: str.
"""
with open(fileName) as data_file:
data = load(data_file)
for u in data["users"]:
if not any(d["name"] == u["name"] for d in self.__users):
self.__users.append(u)
|
[
"def",
"addCity",
"(",
"self",
",",
"fileName",
")",
":",
"with",
"open",
"(",
"fileName",
")",
"as",
"data_file",
":",
"data",
"=",
"load",
"(",
"data_file",
")",
"for",
"u",
"in",
"data",
"[",
"\"users\"",
"]",
":",
"if",
"not",
"any",
"(",
"d",
"[",
"\"name\"",
"]",
"==",
"u",
"[",
"\"name\"",
"]",
"for",
"d",
"in",
"self",
".",
"__users",
")",
":",
"self",
".",
"__users",
".",
"append",
"(",
"u",
")"
] | 36.083333 | 13.5 |
def _as_label(self, index_or_label):
"""Convert index to label."""
if isinstance(index_or_label, str):
return index_or_label
if isinstance(index_or_label, numbers.Integral):
return self.labels[index_or_label]
else:
raise ValueError(str(index_or_label) + ' is not a label or index')
|
[
"def",
"_as_label",
"(",
"self",
",",
"index_or_label",
")",
":",
"if",
"isinstance",
"(",
"index_or_label",
",",
"str",
")",
":",
"return",
"index_or_label",
"if",
"isinstance",
"(",
"index_or_label",
",",
"numbers",
".",
"Integral",
")",
":",
"return",
"self",
".",
"labels",
"[",
"index_or_label",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"index_or_label",
")",
"+",
"' is not a label or index'",
")"
] | 42.75 | 12.625 |
def snip_line(line, max_width, split_at):
"""Shorten a line to a maximum length."""
if len(line) < max_width:
return line
return line[:split_at] + " … " \
+ line[-(max_width - split_at - 3):]
|
[
"def",
"snip_line",
"(",
"line",
",",
"max_width",
",",
"split_at",
")",
":",
"if",
"len",
"(",
"line",
")",
"<",
"max_width",
":",
"return",
"line",
"return",
"line",
"[",
":",
"split_at",
"]",
"+",
"\" … \" \\",
"+",
"line",
"[",
"-",
"(",
"max_width",
"-",
"split_at",
"-",
"3",
")",
":",
"]"
] | 35.666667 | 6.833333 |
def release(self, force=False):
"""Release an exclusive lock on this integration task.
Unless forcing, if we are not the current owners of the lock a Locked exception will be raised.
"""
D = self.__class__
collection = self.get_collection()
identity = self.Lock()
query = D.id == self
if not force:
query &= D.lock.instance == identity.instance
previous = collection.find_one_and_update(query, {'$unset': {~D.lock: True}}, {~D.lock: True})
if previous is None:
lock = getattr(self.find_one(self, projection={~D.lock: True}), 'lock', None)
raise self.Locked("Unable to release lock.", lock)
lock = self.Lock.from_mongo(previous[~D.lock])
if lock and lock.expires <= identity.time:
lock.expired(self)
identity.released(self, force)
|
[
"def",
"release",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"D",
"=",
"self",
".",
"__class__",
"collection",
"=",
"self",
".",
"get_collection",
"(",
")",
"identity",
"=",
"self",
".",
"Lock",
"(",
")",
"query",
"=",
"D",
".",
"id",
"==",
"self",
"if",
"not",
"force",
":",
"query",
"&=",
"D",
".",
"lock",
".",
"instance",
"==",
"identity",
".",
"instance",
"previous",
"=",
"collection",
".",
"find_one_and_update",
"(",
"query",
",",
"{",
"'$unset'",
":",
"{",
"~",
"D",
".",
"lock",
":",
"True",
"}",
"}",
",",
"{",
"~",
"D",
".",
"lock",
":",
"True",
"}",
")",
"if",
"previous",
"is",
"None",
":",
"lock",
"=",
"getattr",
"(",
"self",
".",
"find_one",
"(",
"self",
",",
"projection",
"=",
"{",
"~",
"D",
".",
"lock",
":",
"True",
"}",
")",
",",
"'lock'",
",",
"None",
")",
"raise",
"self",
".",
"Locked",
"(",
"\"Unable to release lock.\"",
",",
"lock",
")",
"lock",
"=",
"self",
".",
"Lock",
".",
"from_mongo",
"(",
"previous",
"[",
"~",
"D",
".",
"lock",
"]",
")",
"if",
"lock",
"and",
"lock",
".",
"expires",
"<=",
"identity",
".",
"time",
":",
"lock",
".",
"expired",
"(",
"self",
")",
"identity",
".",
"released",
"(",
"self",
",",
"force",
")"
] | 28.444444 | 24.62963 |
def list_firewall_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall rules for a project."""
# Pass filters in "params" argument to do_request
return self.list('firewall_rules', self.firewall_rules_path,
retrieve_all, **_params)
|
[
"def",
"list_firewall_rules",
"(",
"self",
",",
"retrieve_all",
"=",
"True",
",",
"*",
"*",
"_params",
")",
":",
"# Pass filters in \"params\" argument to do_request",
"return",
"self",
".",
"list",
"(",
"'firewall_rules'",
",",
"self",
".",
"firewall_rules_path",
",",
"retrieve_all",
",",
"*",
"*",
"_params",
")"
] | 49.833333 | 19 |
def find_datasets(self, dataset_name=None, **kwargs):
"""Finds and returns all datasets from the database which matches the requirement.
In some case, the data in a dataset can be stored separately for better management.
Parameters
----------
dataset_name : str
The name/key of dataset.
kwargs : other events
Other events, such as description, author and etc (optional).
Returns
--------
params : the parameters, return False if nothing found.
"""
self._fill_project_info(kwargs)
if dataset_name is None:
raise Exception("dataset_name is None, please give a dataset name")
kwargs.update({'dataset_name': dataset_name})
s = time.time()
pc = self.db.Dataset.find(kwargs)
if pc is not None:
dataset_id_list = pc.distinct('dataset_id')
dataset_list = []
for dataset_id in dataset_id_list: # you may have multiple Buckets files
tmp = self.dataset_fs.get(dataset_id).read()
dataset_list.append(self._deserialization(tmp))
else:
print("[Database] FAIL! Cannot find any dataset: {}".format(kwargs))
return False
print("[Database] Find {} datasets SUCCESS, took: {}s".format(len(dataset_list), round(time.time() - s, 2)))
return dataset_list
|
[
"def",
"find_datasets",
"(",
"self",
",",
"dataset_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_fill_project_info",
"(",
"kwargs",
")",
"if",
"dataset_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"dataset_name is None, please give a dataset name\"",
")",
"kwargs",
".",
"update",
"(",
"{",
"'dataset_name'",
":",
"dataset_name",
"}",
")",
"s",
"=",
"time",
".",
"time",
"(",
")",
"pc",
"=",
"self",
".",
"db",
".",
"Dataset",
".",
"find",
"(",
"kwargs",
")",
"if",
"pc",
"is",
"not",
"None",
":",
"dataset_id_list",
"=",
"pc",
".",
"distinct",
"(",
"'dataset_id'",
")",
"dataset_list",
"=",
"[",
"]",
"for",
"dataset_id",
"in",
"dataset_id_list",
":",
"# you may have multiple Buckets files",
"tmp",
"=",
"self",
".",
"dataset_fs",
".",
"get",
"(",
"dataset_id",
")",
".",
"read",
"(",
")",
"dataset_list",
".",
"append",
"(",
"self",
".",
"_deserialization",
"(",
"tmp",
")",
")",
"else",
":",
"print",
"(",
"\"[Database] FAIL! Cannot find any dataset: {}\"",
".",
"format",
"(",
"kwargs",
")",
")",
"return",
"False",
"print",
"(",
"\"[Database] Find {} datasets SUCCESS, took: {}s\"",
".",
"format",
"(",
"len",
"(",
"dataset_list",
")",
",",
"round",
"(",
"time",
".",
"time",
"(",
")",
"-",
"s",
",",
"2",
")",
")",
")",
"return",
"dataset_list"
] | 37.405405 | 24.351351 |
def process(self, request, item):
"""Process a PayPal direct payment."""
warn_untested()
from paypal.pro.helpers import PayPalWPP
wpp = PayPalWPP(request)
params = self.cleaned_data
params['creditcardtype'] = self.fields['acct'].card_type
params['expdate'] = self.cleaned_data['expdate'].strftime("%m%Y")
params['ipaddress'] = request.META.get("REMOTE_ADDR", "")
params.update(item)
try:
# Create single payment:
if 'billingperiod' not in params:
wpp.doDirectPayment(params)
# Create recurring payment:
else:
wpp.createRecurringPaymentsProfile(params, direct=True)
except PayPalFailure:
return False
return True
|
[
"def",
"process",
"(",
"self",
",",
"request",
",",
"item",
")",
":",
"warn_untested",
"(",
")",
"from",
"paypal",
".",
"pro",
".",
"helpers",
"import",
"PayPalWPP",
"wpp",
"=",
"PayPalWPP",
"(",
"request",
")",
"params",
"=",
"self",
".",
"cleaned_data",
"params",
"[",
"'creditcardtype'",
"]",
"=",
"self",
".",
"fields",
"[",
"'acct'",
"]",
".",
"card_type",
"params",
"[",
"'expdate'",
"]",
"=",
"self",
".",
"cleaned_data",
"[",
"'expdate'",
"]",
".",
"strftime",
"(",
"\"%m%Y\"",
")",
"params",
"[",
"'ipaddress'",
"]",
"=",
"request",
".",
"META",
".",
"get",
"(",
"\"REMOTE_ADDR\"",
",",
"\"\"",
")",
"params",
".",
"update",
"(",
"item",
")",
"try",
":",
"# Create single payment:",
"if",
"'billingperiod'",
"not",
"in",
"params",
":",
"wpp",
".",
"doDirectPayment",
"(",
"params",
")",
"# Create recurring payment:",
"else",
":",
"wpp",
".",
"createRecurringPaymentsProfile",
"(",
"params",
",",
"direct",
"=",
"True",
")",
"except",
"PayPalFailure",
":",
"return",
"False",
"return",
"True"
] | 35.454545 | 16.545455 |
def is_device_virtual(self):
"""Returns if the device is physical or virtual. """
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.is_device_virtual()
# No way to pin a device as of now, so return the first
# TODO(padkrish)
return ret
|
[
"def",
"is_device_virtual",
"(",
"self",
")",
":",
"for",
"ip",
"in",
"self",
".",
"obj_dict",
":",
"drvr_obj",
"=",
"self",
".",
"obj_dict",
".",
"get",
"(",
"ip",
")",
".",
"get",
"(",
"'drvr_obj'",
")",
"ret",
"=",
"drvr_obj",
".",
"is_device_virtual",
"(",
")",
"# No way to pin a device as of now, so return the first",
"# TODO(padkrish)",
"return",
"ret"
] | 42.875 | 12.875 |
def update(old_template=None, old_version=None, new_template=None, new_version=None,
enter_parameters=False):
"""Updates the temple project to the latest template
Proceeeds in the following steps:
1. Ensure we are inside the project repository
2. Obtain the latest version of the package template
3. If the package is up to date with the latest template, return
4. If not, create an empty template branch with a new copy of the old template
5. Create an update branch from HEAD and merge in the new template copy
6. Create a new copy of the new template and merge into the empty template branch
7. Merge the updated empty template branch into the update branch
8. Ensure temple.yaml reflects what is in the template branch
9. Remove the empty template branch
Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'update' for the
duration of this function.
Two branches will be created during the update process, one named
``_temple_update`` and one named ``_temple_update_temp``. At the end of
the process, ``_temple_update_temp`` will be removed automatically. The
work will be left in ``_temple_update`` in an uncommitted state for
review. The update will fail early if either of these branches exist
before the process starts.
Args:
old_template (str, default=None): The old template from which to update. Defaults
to the template in temple.yaml
old_version (str, default=None): The old version of the template. Defaults to
the version in temple.yaml
new_template (str, default=None): The new template for updating. Defaults to the
template in temple.yaml
new_version (str, default=None): The new version of the new template to update.
Defaults to the latest version of the new template
enter_parameters (bool, default=False): Force entering template parameters for the project
Raises:
`NotInGitRepoError`: When not inside of a git repository
`InvalidTempleProjectError`: When not inside a valid temple repository
`InDirtyRepoError`: When an update is triggered while the repo is in a dirty state
`ExistingBranchError`: When an update is triggered and there is an existing
update branch
Returns:
boolean: True if update was performed or False if template was already up to date
"""
update_branch = temple.constants.UPDATE_BRANCH_NAME
temp_update_branch = temple.constants.TEMP_UPDATE_BRANCH_NAME
temple.check.in_git_repo()
temple.check.in_clean_repo()
temple.check.is_temple_project()
temple.check.not_has_branch(update_branch)
temple.check.not_has_branch(temp_update_branch)
temple.check.has_env_vars(temple.constants.GITHUB_API_TOKEN_ENV_VAR)
temple_config = temple.utils.read_temple_config()
old_template = old_template or temple_config['_template']
new_template = new_template or temple_config['_template']
old_version = old_version or temple_config['_version']
new_version = new_version or _get_latest_template_version(new_template)
if new_template == old_template and new_version == old_version and not enter_parameters:
print('No updates have happened to the template, so no files were updated')
return False
print('Creating branch {} for processing the update'.format(update_branch))
temple.utils.shell('git checkout -b {}'.format(update_branch),
stderr=subprocess.DEVNULL)
print('Creating temporary working branch {}'.format(temp_update_branch))
temple.utils.shell('git checkout --orphan {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git rm -rf .',
stdout=subprocess.DEVNULL)
_apply_template(old_template,
'.',
checkout=old_version,
extra_context=temple_config)
temple.utils.shell('git add .')
temple.utils.shell(
'git commit --no-verify -m "Initialize template from version {}"'.format(old_version),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Merge old template history into update branch.')
temple.utils.shell('git checkout {}'.format(update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell(
'git merge -s ours --no-edit --allow-unrelated-histories {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
print('Update template in temporary branch.')
temple.utils.shell('git checkout {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git rm -rf .',
stdout=subprocess.DEVNULL)
# If the cookiecutter.json files have changed or the templates have changed,
# the user will need to re-enter the cookiecutter config
needs_new_cc_config = _needs_new_cc_config_for_update(old_template, old_version,
new_template, new_version)
if needs_new_cc_config:
if old_template != new_template:
cc_config_input_msg = (
'You will be prompted for the parameters of the new template.'
' Please read the docs at https://github.com/{} before entering parameters.'
' Press enter to continue'
).format(temple.utils.get_repo_path(new_template))
else:
cc_config_input_msg = (
'A new template variable has been defined in the updated template.'
' You will be prompted to enter all of the variables again. Variables'
' already configured in your project will have their values set as'
' defaults. Press enter to continue'
)
input(cc_config_input_msg)
# Even if there is no detected need to re-enter the cookiecutter config, the user
# can still re-enter config parameters with the "enter_parameters" flag
if needs_new_cc_config or enter_parameters:
_, temple_config = (
temple.utils.get_cookiecutter_config(new_template,
default_config=temple_config,
version=new_version))
_apply_template(new_template,
'.',
checkout=new_version,
extra_context=temple_config)
temple.utils.write_temple_config(temple_config, new_template, new_version)
temple.utils.shell('git add .')
temple.utils.shell(
'git commit --no-verify -m "Update template to version {}"'.format(new_version),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Merge updated template into update branch.')
temple.utils.shell('git checkout {}'.format(update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git merge --no-commit {}'.format(temp_update_branch),
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# The temple.yaml file should always reflect what is in the new template
temple.utils.shell('git checkout --theirs {}'.format(temple.constants.TEMPLE_CONFIG_FILE),
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Remove temporary template branch {}'.format(temp_update_branch))
temple.utils.shell('git branch -D {}'.format(temp_update_branch),
stdout=subprocess.DEVNULL)
print(textwrap.dedent("""\
Updating complete!
Please review the changes with "git status" for any errors or
conflicts. Once you are satisfied with the changes, add, commit,
push, and open a PR with the branch {}
""").format(update_branch))
return True
|
[
"def",
"update",
"(",
"old_template",
"=",
"None",
",",
"old_version",
"=",
"None",
",",
"new_template",
"=",
"None",
",",
"new_version",
"=",
"None",
",",
"enter_parameters",
"=",
"False",
")",
":",
"update_branch",
"=",
"temple",
".",
"constants",
".",
"UPDATE_BRANCH_NAME",
"temp_update_branch",
"=",
"temple",
".",
"constants",
".",
"TEMP_UPDATE_BRANCH_NAME",
"temple",
".",
"check",
".",
"in_git_repo",
"(",
")",
"temple",
".",
"check",
".",
"in_clean_repo",
"(",
")",
"temple",
".",
"check",
".",
"is_temple_project",
"(",
")",
"temple",
".",
"check",
".",
"not_has_branch",
"(",
"update_branch",
")",
"temple",
".",
"check",
".",
"not_has_branch",
"(",
"temp_update_branch",
")",
"temple",
".",
"check",
".",
"has_env_vars",
"(",
"temple",
".",
"constants",
".",
"GITHUB_API_TOKEN_ENV_VAR",
")",
"temple_config",
"=",
"temple",
".",
"utils",
".",
"read_temple_config",
"(",
")",
"old_template",
"=",
"old_template",
"or",
"temple_config",
"[",
"'_template'",
"]",
"new_template",
"=",
"new_template",
"or",
"temple_config",
"[",
"'_template'",
"]",
"old_version",
"=",
"old_version",
"or",
"temple_config",
"[",
"'_version'",
"]",
"new_version",
"=",
"new_version",
"or",
"_get_latest_template_version",
"(",
"new_template",
")",
"if",
"new_template",
"==",
"old_template",
"and",
"new_version",
"==",
"old_version",
"and",
"not",
"enter_parameters",
":",
"print",
"(",
"'No updates have happened to the template, so no files were updated'",
")",
"return",
"False",
"print",
"(",
"'Creating branch {} for processing the update'",
".",
"format",
"(",
"update_branch",
")",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git checkout -b {}'",
".",
"format",
"(",
"update_branch",
")",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"print",
"(",
"'Creating temporary working branch {}'",
".",
"format",
"(",
"temp_update_branch",
")",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git checkout --orphan {}'",
".",
"format",
"(",
"temp_update_branch",
")",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git rm -rf .'",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
")",
"_apply_template",
"(",
"old_template",
",",
"'.'",
",",
"checkout",
"=",
"old_version",
",",
"extra_context",
"=",
"temple_config",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git add .'",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git commit --no-verify -m \"Initialize template from version {}\"'",
".",
"format",
"(",
"old_version",
")",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"print",
"(",
"'Merge old template history into update branch.'",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git checkout {}'",
".",
"format",
"(",
"update_branch",
")",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git merge -s ours --no-edit --allow-unrelated-histories {}'",
".",
"format",
"(",
"temp_update_branch",
")",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"print",
"(",
"'Update template in temporary branch.'",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git checkout {}'",
".",
"format",
"(",
"temp_update_branch",
")",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git rm -rf .'",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
")",
"# If the cookiecutter.json files have changed or the templates have changed,",
"# the user will need to re-enter the cookiecutter config",
"needs_new_cc_config",
"=",
"_needs_new_cc_config_for_update",
"(",
"old_template",
",",
"old_version",
",",
"new_template",
",",
"new_version",
")",
"if",
"needs_new_cc_config",
":",
"if",
"old_template",
"!=",
"new_template",
":",
"cc_config_input_msg",
"=",
"(",
"'You will be prompted for the parameters of the new template.'",
"' Please read the docs at https://github.com/{} before entering parameters.'",
"' Press enter to continue'",
")",
".",
"format",
"(",
"temple",
".",
"utils",
".",
"get_repo_path",
"(",
"new_template",
")",
")",
"else",
":",
"cc_config_input_msg",
"=",
"(",
"'A new template variable has been defined in the updated template.'",
"' You will be prompted to enter all of the variables again. Variables'",
"' already configured in your project will have their values set as'",
"' defaults. Press enter to continue'",
")",
"input",
"(",
"cc_config_input_msg",
")",
"# Even if there is no detected need to re-enter the cookiecutter config, the user",
"# can still re-enter config parameters with the \"enter_parameters\" flag",
"if",
"needs_new_cc_config",
"or",
"enter_parameters",
":",
"_",
",",
"temple_config",
"=",
"(",
"temple",
".",
"utils",
".",
"get_cookiecutter_config",
"(",
"new_template",
",",
"default_config",
"=",
"temple_config",
",",
"version",
"=",
"new_version",
")",
")",
"_apply_template",
"(",
"new_template",
",",
"'.'",
",",
"checkout",
"=",
"new_version",
",",
"extra_context",
"=",
"temple_config",
")",
"temple",
".",
"utils",
".",
"write_temple_config",
"(",
"temple_config",
",",
"new_template",
",",
"new_version",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git add .'",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git commit --no-verify -m \"Update template to version {}\"'",
".",
"format",
"(",
"new_version",
")",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"print",
"(",
"'Merge updated template into update branch.'",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git checkout {}'",
".",
"format",
"(",
"update_branch",
")",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git merge --no-commit {}'",
".",
"format",
"(",
"temp_update_branch",
")",
",",
"check",
"=",
"False",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"# The temple.yaml file should always reflect what is in the new template",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git checkout --theirs {}'",
".",
"format",
"(",
"temple",
".",
"constants",
".",
"TEMPLE_CONFIG_FILE",
")",
",",
"check",
"=",
"False",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"print",
"(",
"'Remove temporary template branch {}'",
".",
"format",
"(",
"temp_update_branch",
")",
")",
"temple",
".",
"utils",
".",
"shell",
"(",
"'git branch -D {}'",
".",
"format",
"(",
"temp_update_branch",
")",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
")",
"print",
"(",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n Updating complete!\n\n Please review the changes with \"git status\" for any errors or\n conflicts. Once you are satisfied with the changes, add, commit,\n push, and open a PR with the branch {}\n \"\"\"",
")",
".",
"format",
"(",
"update_branch",
")",
")",
"return",
"True"
] | 47.49697 | 24.515152 |
def total_surface_energy(self):
"""
Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl)
"""
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy
|
[
"def",
"total_surface_energy",
"(",
"self",
")",
":",
"tot_surface_energy",
"=",
"0",
"for",
"hkl",
"in",
"self",
".",
"miller_energy_dict",
".",
"keys",
"(",
")",
":",
"tot_surface_energy",
"+=",
"self",
".",
"miller_energy_dict",
"[",
"hkl",
"]",
"*",
"self",
".",
"miller_area_dict",
"[",
"hkl",
"]",
"return",
"tot_surface_energy"
] | 34.166667 | 14 |
def case(institute_id, case_name):
"""Return a variant."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if case_obj is None:
return abort(404)
return Response(json_util.dumps(case_obj), mimetype='application/json')
|
[
"def",
"case",
"(",
"institute_id",
",",
"case_name",
")",
":",
"institute_obj",
",",
"case_obj",
"=",
"institute_and_case",
"(",
"store",
",",
"institute_id",
",",
"case_name",
")",
"if",
"case_obj",
"is",
"None",
":",
"return",
"abort",
"(",
"404",
")",
"return",
"Response",
"(",
"json_util",
".",
"dumps",
"(",
"case_obj",
")",
",",
"mimetype",
"=",
"'application/json'",
")"
] | 44.166667 | 18.666667 |
def deprecated_arg_names(arg_mapping):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping : dict[str, str]
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter(
'always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
"Keyword argument '{0}' has been deprecated in favour "
"of '{1}'. '{0}' will be removed in a future version."
.format(old, new),
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
warnings.simplefilter(
'default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return func_wrapper
return decorator
|
[
"def",
"deprecated_arg_names",
"(",
"arg_mapping",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"func_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'always'",
",",
"DeprecationWarning",
")",
"# turn off filter",
"for",
"old",
",",
"new",
"in",
"arg_mapping",
".",
"items",
"(",
")",
":",
"if",
"old",
"in",
"kwargs",
":",
"warnings",
".",
"warn",
"(",
"\"Keyword argument '{0}' has been deprecated in favour \"",
"\"of '{1}'. '{0}' will be removed in a future version.\"",
".",
"format",
"(",
"old",
",",
"new",
")",
",",
"category",
"=",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"val",
"=",
"kwargs",
".",
"pop",
"(",
"old",
")",
"kwargs",
"[",
"new",
"]",
"=",
"val",
"warnings",
".",
"simplefilter",
"(",
"'default'",
",",
"DeprecationWarning",
")",
"# reset filter",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"func_wrapper",
"return",
"decorator"
] | 39.65625 | 15.53125 |
def iter_entry_points(cls, target):
"""Yields the name, entry_point pairs of binary targets in this PythonArtifact."""
for name, binary_target in target.provided_binaries.items():
concrete_target = binary_target
if not isinstance(concrete_target, PythonBinary) or concrete_target.entry_point is None:
raise TargetDefinitionException(target,
'Cannot add a binary to a PythonArtifact if it does not contain an entry_point.')
yield name, concrete_target.entry_point
|
[
"def",
"iter_entry_points",
"(",
"cls",
",",
"target",
")",
":",
"for",
"name",
",",
"binary_target",
"in",
"target",
".",
"provided_binaries",
".",
"items",
"(",
")",
":",
"concrete_target",
"=",
"binary_target",
"if",
"not",
"isinstance",
"(",
"concrete_target",
",",
"PythonBinary",
")",
"or",
"concrete_target",
".",
"entry_point",
"is",
"None",
":",
"raise",
"TargetDefinitionException",
"(",
"target",
",",
"'Cannot add a binary to a PythonArtifact if it does not contain an entry_point.'",
")",
"yield",
"name",
",",
"concrete_target",
".",
"entry_point"
] | 62.625 | 18.875 |
def is_topk(self, topk=10, reverse=False):
"""
Create an SArray indicating which elements are in the top k.
Entries are '1' if the corresponding element in the current SArray is a
part of the top k elements, and '0' if that corresponding element is
not. Order is descending by default.
Parameters
----------
topk : int
The number of elements to determine if 'top'
reverse : bool
If True, return the topk elements in ascending order
Returns
-------
out : SArray (of type int)
Notes
-----
This is used internally by SFrame's topk function.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.topk_index(topk, reverse))
|
[
"def",
"is_topk",
"(",
"self",
",",
"topk",
"=",
"10",
",",
"reverse",
"=",
"False",
")",
":",
"with",
"cython_context",
"(",
")",
":",
"return",
"SArray",
"(",
"_proxy",
"=",
"self",
".",
"__proxy__",
".",
"topk_index",
"(",
"topk",
",",
"reverse",
")",
")"
] | 30.038462 | 23.346154 |
def runserver(ctx, conf, port, foreground):
"""Run the fnExchange server"""
config = read_config(conf)
debug = config['conf'].get('debug', False)
click.echo('Debug mode {0}.'.format('on' if debug else 'off'))
port = port or config['conf']['server']['port']
app_settings = {
'debug': debug,
'auto_reload': config['conf']['server'].get('auto_reload', False),
}
handlers_settings = __create_handler_settings(config)
if foreground:
click.echo('Requested mode: foreground')
start_app(port, app_settings, handlers_settings)
else:
click.echo('Requested mode: background')
# subprocess.call([sys.executable, 'yourscript.py'], env=os.environ.copy())
raise NotImplementedError
|
[
"def",
"runserver",
"(",
"ctx",
",",
"conf",
",",
"port",
",",
"foreground",
")",
":",
"config",
"=",
"read_config",
"(",
"conf",
")",
"debug",
"=",
"config",
"[",
"'conf'",
"]",
".",
"get",
"(",
"'debug'",
",",
"False",
")",
"click",
".",
"echo",
"(",
"'Debug mode {0}.'",
".",
"format",
"(",
"'on'",
"if",
"debug",
"else",
"'off'",
")",
")",
"port",
"=",
"port",
"or",
"config",
"[",
"'conf'",
"]",
"[",
"'server'",
"]",
"[",
"'port'",
"]",
"app_settings",
"=",
"{",
"'debug'",
":",
"debug",
",",
"'auto_reload'",
":",
"config",
"[",
"'conf'",
"]",
"[",
"'server'",
"]",
".",
"get",
"(",
"'auto_reload'",
",",
"False",
")",
",",
"}",
"handlers_settings",
"=",
"__create_handler_settings",
"(",
"config",
")",
"if",
"foreground",
":",
"click",
".",
"echo",
"(",
"'Requested mode: foreground'",
")",
"start_app",
"(",
"port",
",",
"app_settings",
",",
"handlers_settings",
")",
"else",
":",
"click",
".",
"echo",
"(",
"'Requested mode: background'",
")",
"# subprocess.call([sys.executable, 'yourscript.py'], env=os.environ.copy())",
"raise",
"NotImplementedError"
] | 33.863636 | 21.545455 |
def get_segmentation(recording, annotations, internal_id=None):
"""
Parameters
----------
recording :
A HandwrittenData object
annotations : list of strings
internal_id : string
An identifier for the dataset, e.g. 'user1/200922-947-111.ink'.
Returns
-------
tuple : segmentation and list of symbol ids (of write-math.com)
"""
global missing_stroke_segmentation, double_segmentation
segmentation = []
symbol_stream = []
needed = list(range(len(recording)))
annotations = filter(lambda n: n.startswith('SYMBOL '), annotations)
for line in annotations:
tmp = line.split("<")[1]
tmp, symbol_string = tmp.split(">")
symbol_string = symbol_string.strip()
strokes = [int(stroke) for stroke in tmp.split(",")
if int(stroke) < len(recording)]
for el in strokes:
if el not in needed:
double_segmentation.append(internal_id)
strokes.remove(el)
logging.debug("invalid segmentation by annotation: %s",
annotations)
else:
needed.remove(el)
segmentation.append(strokes)
symbol_stream.append(datasets.formula_to_dbid(mathbrush_formula_fix(symbol_string), True))
if len(needed) > 0:
# hw = handwritten_data.HandwrittenData(json.dumps(recording))
# hw.show()
missing_stroke_segmentation.append(internal_id)
segmentation.append(needed)
return segmentation, symbol_stream
|
[
"def",
"get_segmentation",
"(",
"recording",
",",
"annotations",
",",
"internal_id",
"=",
"None",
")",
":",
"global",
"missing_stroke_segmentation",
",",
"double_segmentation",
"segmentation",
"=",
"[",
"]",
"symbol_stream",
"=",
"[",
"]",
"needed",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"recording",
")",
")",
")",
"annotations",
"=",
"filter",
"(",
"lambda",
"n",
":",
"n",
".",
"startswith",
"(",
"'SYMBOL '",
")",
",",
"annotations",
")",
"for",
"line",
"in",
"annotations",
":",
"tmp",
"=",
"line",
".",
"split",
"(",
"\"<\"",
")",
"[",
"1",
"]",
"tmp",
",",
"symbol_string",
"=",
"tmp",
".",
"split",
"(",
"\">\"",
")",
"symbol_string",
"=",
"symbol_string",
".",
"strip",
"(",
")",
"strokes",
"=",
"[",
"int",
"(",
"stroke",
")",
"for",
"stroke",
"in",
"tmp",
".",
"split",
"(",
"\",\"",
")",
"if",
"int",
"(",
"stroke",
")",
"<",
"len",
"(",
"recording",
")",
"]",
"for",
"el",
"in",
"strokes",
":",
"if",
"el",
"not",
"in",
"needed",
":",
"double_segmentation",
".",
"append",
"(",
"internal_id",
")",
"strokes",
".",
"remove",
"(",
"el",
")",
"logging",
".",
"debug",
"(",
"\"invalid segmentation by annotation: %s\"",
",",
"annotations",
")",
"else",
":",
"needed",
".",
"remove",
"(",
"el",
")",
"segmentation",
".",
"append",
"(",
"strokes",
")",
"symbol_stream",
".",
"append",
"(",
"datasets",
".",
"formula_to_dbid",
"(",
"mathbrush_formula_fix",
"(",
"symbol_string",
")",
",",
"True",
")",
")",
"if",
"len",
"(",
"needed",
")",
">",
"0",
":",
"# hw = handwritten_data.HandwrittenData(json.dumps(recording))",
"# hw.show()",
"missing_stroke_segmentation",
".",
"append",
"(",
"internal_id",
")",
"segmentation",
".",
"append",
"(",
"needed",
")",
"return",
"segmentation",
",",
"symbol_stream"
] | 36.309524 | 17.404762 |
def _freq_parser(self, freq):
"""Parse timedelta.
Valid keywords "days", "day", "d", "hours", "hour", "h",
"minutes", "minute", "min", "m", "seconds", "second", "sec", "s",
"weeks", "week", "w",
"""
freq = freq.lower().strip()
valid_keywords = [
"days", "day", "d",
"hours", "hour", "h",
"minutes", "minute", "min", "m",
"seconds", "second", "sec", "s",
"weeks", "week", "w",
]
error_message = "'%s' is invalid, use one of %s" % (
freq, valid_keywords)
try:
# day
for surfix in ["days", "day", "d"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(days=int(freq))
# hour
for surfix in ["hours", "hour", "h"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(hours=int(freq))
# minute
for surfix in ["minutes", "minute", "min", "m"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(minutes=int(freq))
# second
for surfix in ["seconds", "second", "sec", "s"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(seconds=int(freq))
# week
for surfix in ["weeks", "week", "w"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(days=int(freq) * 7)
except:
pass
raise ValueError(error_message)
|
[
"def",
"_freq_parser",
"(",
"self",
",",
"freq",
")",
":",
"freq",
"=",
"freq",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"valid_keywords",
"=",
"[",
"\"days\"",
",",
"\"day\"",
",",
"\"d\"",
",",
"\"hours\"",
",",
"\"hour\"",
",",
"\"h\"",
",",
"\"minutes\"",
",",
"\"minute\"",
",",
"\"min\"",
",",
"\"m\"",
",",
"\"seconds\"",
",",
"\"second\"",
",",
"\"sec\"",
",",
"\"s\"",
",",
"\"weeks\"",
",",
"\"week\"",
",",
"\"w\"",
",",
"]",
"error_message",
"=",
"\"'%s' is invalid, use one of %s\"",
"%",
"(",
"freq",
",",
"valid_keywords",
")",
"try",
":",
"# day",
"for",
"surfix",
"in",
"[",
"\"days\"",
",",
"\"day\"",
",",
"\"d\"",
"]",
":",
"if",
"freq",
".",
"endswith",
"(",
"surfix",
")",
":",
"freq",
"=",
"freq",
".",
"replace",
"(",
"surfix",
",",
"\"\"",
")",
"return",
"timedelta",
"(",
"days",
"=",
"int",
"(",
"freq",
")",
")",
"# hour",
"for",
"surfix",
"in",
"[",
"\"hours\"",
",",
"\"hour\"",
",",
"\"h\"",
"]",
":",
"if",
"freq",
".",
"endswith",
"(",
"surfix",
")",
":",
"freq",
"=",
"freq",
".",
"replace",
"(",
"surfix",
",",
"\"\"",
")",
"return",
"timedelta",
"(",
"hours",
"=",
"int",
"(",
"freq",
")",
")",
"# minute",
"for",
"surfix",
"in",
"[",
"\"minutes\"",
",",
"\"minute\"",
",",
"\"min\"",
",",
"\"m\"",
"]",
":",
"if",
"freq",
".",
"endswith",
"(",
"surfix",
")",
":",
"freq",
"=",
"freq",
".",
"replace",
"(",
"surfix",
",",
"\"\"",
")",
"return",
"timedelta",
"(",
"minutes",
"=",
"int",
"(",
"freq",
")",
")",
"# second",
"for",
"surfix",
"in",
"[",
"\"seconds\"",
",",
"\"second\"",
",",
"\"sec\"",
",",
"\"s\"",
"]",
":",
"if",
"freq",
".",
"endswith",
"(",
"surfix",
")",
":",
"freq",
"=",
"freq",
".",
"replace",
"(",
"surfix",
",",
"\"\"",
")",
"return",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"freq",
")",
")",
"# week",
"for",
"surfix",
"in",
"[",
"\"weeks\"",
",",
"\"week\"",
",",
"\"w\"",
"]",
":",
"if",
"freq",
".",
"endswith",
"(",
"surfix",
")",
":",
"freq",
"=",
"freq",
".",
"replace",
"(",
"surfix",
",",
"\"\"",
")",
"return",
"timedelta",
"(",
"days",
"=",
"int",
"(",
"freq",
")",
"*",
"7",
")",
"except",
":",
"pass",
"raise",
"ValueError",
"(",
"error_message",
")"
] | 33.056604 | 16.754717 |
def fix_repeat_dt(dt_list, offset_s=0.001):
"""Add some small offset to remove duplicate times
Needed for xarray interp, which expects monotonically increasing times
"""
idx = (np.diff(dt_list) == timedelta(0))
while np.any(idx):
dt_list[idx.nonzero()[0] + 1] += timedelta(seconds=offset_s)
idx = (np.diff(dt_list) == timedelta(0))
return dt_list
|
[
"def",
"fix_repeat_dt",
"(",
"dt_list",
",",
"offset_s",
"=",
"0.001",
")",
":",
"idx",
"=",
"(",
"np",
".",
"diff",
"(",
"dt_list",
")",
"==",
"timedelta",
"(",
"0",
")",
")",
"while",
"np",
".",
"any",
"(",
"idx",
")",
":",
"dt_list",
"[",
"idx",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"+",
"1",
"]",
"+=",
"timedelta",
"(",
"seconds",
"=",
"offset_s",
")",
"idx",
"=",
"(",
"np",
".",
"diff",
"(",
"dt_list",
")",
"==",
"timedelta",
"(",
"0",
")",
")",
"return",
"dt_list"
] | 42 | 13 |
def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
|
[
"def",
"frombinary",
"(",
"path",
",",
"ext",
"=",
"'bin'",
",",
"conf",
"=",
"'conf.json'",
",",
"dtype",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"skip",
"=",
"0",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"shape",
",",
"dtype",
"=",
"_binaryconfig",
"(",
"path",
",",
"conf",
",",
"dtype",
",",
"shape",
",",
"credentials",
")",
"from",
"thunder",
".",
"readers",
"import",
"normalize_scheme",
",",
"get_parallel_reader",
"path",
"=",
"normalize_scheme",
"(",
"path",
",",
"ext",
")",
"from",
"numpy",
"import",
"dtype",
"as",
"dtype_func",
"nelements",
"=",
"shape",
"[",
"-",
"1",
"]",
"+",
"skip",
"recordsize",
"=",
"dtype_func",
"(",
"dtype",
")",
".",
"itemsize",
"*",
"nelements",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"lines",
"=",
"engine",
".",
"binaryRecords",
"(",
"path",
",",
"recordsize",
")",
"raw",
"=",
"lines",
".",
"map",
"(",
"lambda",
"x",
":",
"frombuffer",
"(",
"buffer",
"(",
"x",
")",
",",
"offset",
"=",
"0",
",",
"count",
"=",
"nelements",
",",
"dtype",
"=",
"dtype",
")",
"[",
"skip",
":",
"]",
")",
"def",
"switch",
"(",
"record",
")",
":",
"ary",
",",
"idx",
"=",
"record",
"return",
"(",
"idx",
",",
")",
",",
"ary",
"rdd",
"=",
"raw",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"switch",
")",
"if",
"shape",
"and",
"len",
"(",
"shape",
")",
">",
"2",
":",
"expand",
"=",
"lambda",
"k",
":",
"unravel_index",
"(",
"k",
"[",
"0",
"]",
",",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
"rdd",
"=",
"rdd",
".",
"map",
"(",
"lambda",
"kv",
":",
"(",
"expand",
"(",
"kv",
"[",
"0",
"]",
")",
",",
"kv",
"[",
"1",
"]",
")",
")",
"if",
"not",
"index",
":",
"index",
"=",
"arange",
"(",
"shape",
"[",
"-",
"1",
"]",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"shape",
",",
"index",
"=",
"index",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"reader",
"=",
"get_parallel_reader",
"(",
"path",
")",
"(",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"data",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"ext",
"=",
"ext",
")",
"values",
"=",
"[",
"]",
"for",
"record",
"in",
"data",
":",
"buf",
"=",
"record",
"[",
"1",
"]",
"offset",
"=",
"0",
"while",
"offset",
"<",
"len",
"(",
"buf",
")",
":",
"v",
"=",
"frombuffer",
"(",
"buffer",
"(",
"buf",
")",
",",
"offset",
"=",
"offset",
",",
"count",
"=",
"nelements",
",",
"dtype",
"=",
"dtype",
")",
"values",
".",
"append",
"(",
"v",
"[",
"skip",
":",
"]",
")",
"offset",
"+=",
"recordsize",
"if",
"not",
"len",
"(",
"values",
")",
"==",
"prod",
"(",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Unexpected shape, got %g records but expected %g'",
"%",
"(",
"len",
"(",
"values",
")",
",",
"prod",
"(",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
")",
")",
"values",
"=",
"asarray",
"(",
"values",
",",
"dtype",
"=",
"dtype",
")",
"if",
"shape",
":",
"values",
"=",
"values",
".",
"reshape",
"(",
"shape",
")",
"return",
"fromarray",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] | 35.067416 | 23.224719 |
def write(self, obj, value, merge=False):
"""
Returns
object: full copy of new obj
"""
full = deepcopy(obj)
frag = full
parts, last = self.parts[:-1], self.parts[-1]
for part in parts:
if isinstance(frag, dict):
frag = frag[part]
elif isinstance(frag, (list, tuple)):
frag = frag[int(part)]
if isinstance(frag, dict):
if last in frag and merge:
frag[last].update(value)
else:
frag[last] = value
elif isinstance(frag, list):
if last == '-':
frag.append(value)
else:
frag[int(last)] = value
return full
|
[
"def",
"write",
"(",
"self",
",",
"obj",
",",
"value",
",",
"merge",
"=",
"False",
")",
":",
"full",
"=",
"deepcopy",
"(",
"obj",
")",
"frag",
"=",
"full",
"parts",
",",
"last",
"=",
"self",
".",
"parts",
"[",
":",
"-",
"1",
"]",
",",
"self",
".",
"parts",
"[",
"-",
"1",
"]",
"for",
"part",
"in",
"parts",
":",
"if",
"isinstance",
"(",
"frag",
",",
"dict",
")",
":",
"frag",
"=",
"frag",
"[",
"part",
"]",
"elif",
"isinstance",
"(",
"frag",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"frag",
"=",
"frag",
"[",
"int",
"(",
"part",
")",
"]",
"if",
"isinstance",
"(",
"frag",
",",
"dict",
")",
":",
"if",
"last",
"in",
"frag",
"and",
"merge",
":",
"frag",
"[",
"last",
"]",
".",
"update",
"(",
"value",
")",
"else",
":",
"frag",
"[",
"last",
"]",
"=",
"value",
"elif",
"isinstance",
"(",
"frag",
",",
"list",
")",
":",
"if",
"last",
"==",
"'-'",
":",
"frag",
".",
"append",
"(",
"value",
")",
"else",
":",
"frag",
"[",
"int",
"(",
"last",
")",
"]",
"=",
"value",
"return",
"full"
] | 27.296296 | 12.259259 |
def use(self, url, name='mytable'):
'''Changes the data provider
>>> yql.use('http://myserver.com/mytables.xml')
'''
self.yql_table_url = url
self.yql_table_name = name
return {'table url': url, 'table name': name}
|
[
"def",
"use",
"(",
"self",
",",
"url",
",",
"name",
"=",
"'mytable'",
")",
":",
"self",
".",
"yql_table_url",
"=",
"url",
"self",
".",
"yql_table_name",
"=",
"name",
"return",
"{",
"'table url'",
":",
"url",
",",
"'table name'",
":",
"name",
"}"
] | 36.571429 | 11.428571 |
def default_index(func):
"""Decorator assuring the wrapped method may only run if we are the default
repository index. This is as we rely on git commands that operate
on that index only. """
@wraps(func)
def check_default_index(self, *args, **kwargs):
if self._file_path != self._index_path():
raise AssertionError(
"Cannot call %r on indices that do not represent the default git index" % func.__name__)
return func(self, *args, **kwargs)
# END wrapper method
return check_default_index
|
[
"def",
"default_index",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"check_default_index",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_file_path",
"!=",
"self",
".",
"_index_path",
"(",
")",
":",
"raise",
"AssertionError",
"(",
"\"Cannot call %r on indices that do not represent the default git index\"",
"%",
"func",
".",
"__name__",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# END wrapper method",
"return",
"check_default_index"
] | 39.142857 | 19.142857 |
def min(self):
"""Minimum value."""
if self._prop.fmin is None:
return -_INF
return self._prop.fmin(self._obj)
|
[
"def",
"min",
"(",
"self",
")",
":",
"if",
"self",
".",
"_prop",
".",
"fmin",
"is",
"None",
":",
"return",
"-",
"_INF",
"return",
"self",
".",
"_prop",
".",
"fmin",
"(",
"self",
".",
"_obj",
")"
] | 28.4 | 9.6 |
def execute(api):
"""Executes operation.
Args:
api: The base API object
Returns:
A response body object
"""
try:
return api.execute()
except Exception as exception:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
_print_error('%s: Exception %s: %s' % (now, type(exception).__name__,
str(exception)))
# Re-raise exception to be handled by retry logic
raise exception
|
[
"def",
"execute",
"(",
"api",
")",
":",
"try",
":",
"return",
"api",
".",
"execute",
"(",
")",
"except",
"Exception",
"as",
"exception",
":",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S.%f'",
")",
"_print_error",
"(",
"'%s: Exception %s: %s'",
"%",
"(",
"now",
",",
"type",
"(",
"exception",
")",
".",
"__name__",
",",
"str",
"(",
"exception",
")",
")",
")",
"# Re-raise exception to be handled by retry logic",
"raise",
"exception"
] | 27.588235 | 20.235294 |
def read_sensor(self, device_id, sensor_uri):
"""Return sensor value based on sensor_uri."""
url = MINUT_DEVICES_URL + "/{device_id}/{sensor_uri}".format(
device_id=device_id, sensor_uri=sensor_uri)
res = self._request(url, request_type='GET', data={'limit': 1})
if not res.get('values'):
return None
return res.get('values')[-1].get('value')
|
[
"def",
"read_sensor",
"(",
"self",
",",
"device_id",
",",
"sensor_uri",
")",
":",
"url",
"=",
"MINUT_DEVICES_URL",
"+",
"\"/{device_id}/{sensor_uri}\"",
".",
"format",
"(",
"device_id",
"=",
"device_id",
",",
"sensor_uri",
"=",
"sensor_uri",
")",
"res",
"=",
"self",
".",
"_request",
"(",
"url",
",",
"request_type",
"=",
"'GET'",
",",
"data",
"=",
"{",
"'limit'",
":",
"1",
"}",
")",
"if",
"not",
"res",
".",
"get",
"(",
"'values'",
")",
":",
"return",
"None",
"return",
"res",
".",
"get",
"(",
"'values'",
")",
"[",
"-",
"1",
"]",
".",
"get",
"(",
"'value'",
")"
] | 49.875 | 14.125 |
def connect(
host="localhost",
port=1113,
discovery_host=None,
discovery_port=2113,
username=None,
password=None,
loop=None,
name=None,
selector=select_random,
) -> Client:
""" Create a new client.
Examples:
Since the Client is an async context manager, we can use it in a
with block for automatic connect/disconnect semantics.
>>> async with connect(host='127.0.0.1', port=1113) as c:
>>> await c.ping()
Or we can call connect at a more convenient moment
>>> c = connect()
>>> await c.connect()
>>> await c.ping()
>>> await c.close()
For cluster discovery cases, we can provide a discovery host and
port. The host may be an IP or DNS entry. If you provide a DNS
entry, discovery will choose randomly from the registered IP
addresses for the hostname.
>>> async with connect(discovery_host="eventstore.test") as c:
>>> await c.ping()
The discovery host returns gossip data about the cluster. We use the
gossip to select a node at random from the avaialble cluster members.
If you're using
:meth:`persistent subscriptions <photonpump.connection.Client.create_subscription>`
you will always want to connect to the master node of the cluster.
The selector parameter is a function that chooses an available node from
the gossip result. To select the master node, use the
:func:`photonpump.discovery.prefer_master` function. This function will return
the master node if there is a live master, and a random replica otherwise.
All requests to the server can be made with the require_master flag which
will raise an error if the current node is not a master.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_master,
>>> ) as c:
>>> await c.ping(require_master=True)
Conversely, you might want to avoid connecting to the master node for reasons
of scalability. For this you can use the
:func:`photonpump.discovery.prefer_replica` function.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_replica,
>>> ) as c:
>>> await c.ping()
For some operations, you may need to authenticate your requests by
providing a username and password to the client.
>>> async with connect(username='admin', password='changeit') as c:
>>> await c.ping()
Ordinarily you will create a single Client per application, but for
advanced scenarios you might want multiple connections. In this
situation, you can name each connection in order to get better logging.
>>> async with connect(name="event-reader"):
>>> await c.ping()
>>> async with connect(name="event-writer"):
>>> await c.ping()
Args:
host: The IP or DNS entry to connect with, defaults to 'localhost'.
port: The port to connect with, defaults to 1113.
discovery_host: The IP or DNS entry to use for cluster discovery.
discovery_port: The port to use for cluster discovery, defaults to 2113.
username: The username to use when communicating with eventstore.
password: The password to use when communicating with eventstore.
loop:An Asyncio event loop.
selector: An optional function that selects one element from a list of
:class:`photonpump.disovery.DiscoveredNode` elements.
"""
discovery = get_discoverer(host, port, discovery_host, discovery_port, selector)
dispatcher = MessageDispatcher(name=name, loop=loop)
connector = Connector(discovery, dispatcher, name=name)
credential = msg.Credential(username, password) if username and password else None
return Client(connector, dispatcher, credential=credential)
|
[
"def",
"connect",
"(",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"1113",
",",
"discovery_host",
"=",
"None",
",",
"discovery_port",
"=",
"2113",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"loop",
"=",
"None",
",",
"name",
"=",
"None",
",",
"selector",
"=",
"select_random",
",",
")",
"->",
"Client",
":",
"discovery",
"=",
"get_discoverer",
"(",
"host",
",",
"port",
",",
"discovery_host",
",",
"discovery_port",
",",
"selector",
")",
"dispatcher",
"=",
"MessageDispatcher",
"(",
"name",
"=",
"name",
",",
"loop",
"=",
"loop",
")",
"connector",
"=",
"Connector",
"(",
"discovery",
",",
"dispatcher",
",",
"name",
"=",
"name",
")",
"credential",
"=",
"msg",
".",
"Credential",
"(",
"username",
",",
"password",
")",
"if",
"username",
"and",
"password",
"else",
"None",
"return",
"Client",
"(",
"connector",
",",
"dispatcher",
",",
"credential",
"=",
"credential",
")"
] | 41.425743 | 27.693069 |
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"ht:o:p:", \
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor, project_title, project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
|
[
"def",
"main",
"(",
"argv",
")",
":",
"global",
"output_dir",
"try",
":",
"opts",
",",
"args",
"=",
"getopt",
".",
"getopt",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
",",
"\"ht:o:p:\"",
",",
"[",
"\"help\"",
",",
"\"title=\"",
",",
"\"output=\"",
",",
"\"prefix=\"",
"]",
")",
"except",
"getopt",
".",
"GetoptError",
":",
"usage",
"(",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"if",
"args",
"==",
"[",
"]",
":",
"usage",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# process options",
"#",
"project_title",
"=",
"\"Project\"",
"project_prefix",
"=",
"None",
"output_dir",
"=",
"None",
"for",
"opt",
"in",
"opts",
":",
"if",
"opt",
"[",
"0",
"]",
"in",
"(",
"\"-h\"",
",",
"\"--help\"",
")",
":",
"usage",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"if",
"opt",
"[",
"0",
"]",
"in",
"(",
"\"-t\"",
",",
"\"--title\"",
")",
":",
"project_title",
"=",
"opt",
"[",
"1",
"]",
"if",
"opt",
"[",
"0",
"]",
"in",
"(",
"\"-o\"",
",",
"\"--output\"",
")",
":",
"utils",
".",
"output_dir",
"=",
"opt",
"[",
"1",
"]",
"if",
"opt",
"[",
"0",
"]",
"in",
"(",
"\"-p\"",
",",
"\"--prefix\"",
")",
":",
"project_prefix",
"=",
"opt",
"[",
"1",
"]",
"check_output",
"(",
")",
"# create context and processor",
"source_processor",
"=",
"SourceProcessor",
"(",
")",
"content_processor",
"=",
"ContentProcessor",
"(",
")",
"# retrieve the list of files to process",
"file_list",
"=",
"make_file_list",
"(",
"args",
")",
"for",
"filename",
"in",
"file_list",
":",
"source_processor",
".",
"parse_file",
"(",
"filename",
")",
"content_processor",
".",
"parse_sources",
"(",
"source_processor",
")",
"# process sections",
"content_processor",
".",
"finish",
"(",
")",
"formatter",
"=",
"HtmlFormatter",
"(",
"content_processor",
",",
"project_title",
",",
"project_prefix",
")",
"formatter",
".",
"toc_dump",
"(",
")",
"formatter",
".",
"index_dump",
"(",
")",
"formatter",
".",
"section_dump_all",
"(",
")"
] | 24.245614 | 20.368421 |
def replace(
self,
name=_void,
kind=_void,
annotation=_void,
default=_void,
_partial_kwarg=_void,
):
"""Creates a customized copy of the Parameter."""
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(
name,
kind,
default=default,
annotation=annotation,
_partial_kwarg=_partial_kwarg,
)
|
[
"def",
"replace",
"(",
"self",
",",
"name",
"=",
"_void",
",",
"kind",
"=",
"_void",
",",
"annotation",
"=",
"_void",
",",
"default",
"=",
"_void",
",",
"_partial_kwarg",
"=",
"_void",
",",
")",
":",
"if",
"name",
"is",
"_void",
":",
"name",
"=",
"self",
".",
"_name",
"if",
"kind",
"is",
"_void",
":",
"kind",
"=",
"self",
".",
"_kind",
"if",
"annotation",
"is",
"_void",
":",
"annotation",
"=",
"self",
".",
"_annotation",
"if",
"default",
"is",
"_void",
":",
"default",
"=",
"self",
".",
"_default",
"if",
"_partial_kwarg",
"is",
"_void",
":",
"_partial_kwarg",
"=",
"self",
".",
"_partial_kwarg",
"return",
"type",
"(",
"self",
")",
"(",
"name",
",",
"kind",
",",
"default",
"=",
"default",
",",
"annotation",
"=",
"annotation",
",",
"_partial_kwarg",
"=",
"_partial_kwarg",
",",
")"
] | 21.90625 | 19.3125 |
def Get(self,key):
"""Get alert by providing name, ID, or other unique key.
If key is not unique and finds multiple matches only the first
will be returned
"""
for alert in self.alerts:
if alert.id == key: return(alert)
elif alert.name == key: return(alert)
|
[
"def",
"Get",
"(",
"self",
",",
"key",
")",
":",
"for",
"alert",
"in",
"self",
".",
"alerts",
":",
"if",
"alert",
".",
"id",
"==",
"key",
":",
"return",
"(",
"alert",
")",
"elif",
"alert",
".",
"name",
"==",
"key",
":",
"return",
"(",
"alert",
")"
] | 26.8 | 16.5 |
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['day'] = self._day
json_dict['mon'] = self._mon
json_dict['year'] = self._year
return json.dumps(json_dict)
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"json_dict",
"=",
"self",
".",
"to_json_basic",
"(",
")",
"json_dict",
"[",
"'day'",
"]",
"=",
"self",
".",
"_day",
"json_dict",
"[",
"'mon'",
"]",
"=",
"self",
".",
"_mon",
"json_dict",
"[",
"'year'",
"]",
"=",
"self",
".",
"_year",
"return",
"json",
".",
"dumps",
"(",
"json_dict",
")"
] | 27.333333 | 6.222222 |
def singularize(plural):
"""Convert plural word to its singular form.
Args:
plural: A word in its plural form.
Returns:
The word in its singular form.
"""
if plural in UNCOUNTABLES:
return plural
for i in IRREGULAR:
if i[1] == plural:
return i[0]
for i in SINGULARIZE_PATTERNS:
if re.search(i[0], plural):
return re.sub(i[0], i[1], plural)
return plural
|
[
"def",
"singularize",
"(",
"plural",
")",
":",
"if",
"plural",
"in",
"UNCOUNTABLES",
":",
"return",
"plural",
"for",
"i",
"in",
"IRREGULAR",
":",
"if",
"i",
"[",
"1",
"]",
"==",
"plural",
":",
"return",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"SINGULARIZE_PATTERNS",
":",
"if",
"re",
".",
"search",
"(",
"i",
"[",
"0",
"]",
",",
"plural",
")",
":",
"return",
"re",
".",
"sub",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
",",
"plural",
")",
"return",
"plural"
] | 25.529412 | 13.823529 |
def _get_general_coverage(data, itype):
"""Retrieve coverage information from new shared SV bins.
"""
work_bam = dd.get_align_bam(data) or dd.get_work_bam(data)
return [{"bam": work_bam, "file": tz.get_in(["depth", "bins", "target"], data),
"cnntype": "target", "itype": itype, "sample": dd.get_sample_name(data)},
{"bam": work_bam, "file": tz.get_in(["depth", "bins", "antitarget"], data),
"cnntype": "antitarget", "itype": itype, "sample": dd.get_sample_name(data)}]
|
[
"def",
"_get_general_coverage",
"(",
"data",
",",
"itype",
")",
":",
"work_bam",
"=",
"dd",
".",
"get_align_bam",
"(",
"data",
")",
"or",
"dd",
".",
"get_work_bam",
"(",
"data",
")",
"return",
"[",
"{",
"\"bam\"",
":",
"work_bam",
",",
"\"file\"",
":",
"tz",
".",
"get_in",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"target\"",
"]",
",",
"data",
")",
",",
"\"cnntype\"",
":",
"\"target\"",
",",
"\"itype\"",
":",
"itype",
",",
"\"sample\"",
":",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"}",
",",
"{",
"\"bam\"",
":",
"work_bam",
",",
"\"file\"",
":",
"tz",
".",
"get_in",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"antitarget\"",
"]",
",",
"data",
")",
",",
"\"cnntype\"",
":",
"\"antitarget\"",
",",
"\"itype\"",
":",
"itype",
",",
"\"sample\"",
":",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"}",
"]"
] | 64.375 | 26.125 |
def _connect(self):
"""
Connect to the EC2 cloud provider.
:return: :py:class:`boto.ec2.connection.EC2Connection`
:raises: Generic exception on error
"""
# check for existing connection
if self._ec2_connection:
return self._ec2_connection
try:
log.debug("Connecting to EC2 endpoint %s", self._ec2host)
# connect to webservice
ec2_connection = boto.ec2.connect_to_region(
self._region_name,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
is_secure=self._secure,
host=self._ec2host,
port=self._ec2port,
path=self._ec2path,
)
# With the loose setting `BOTO_USE_ENDPOINT_HEURISTICS`
# which is necessary to work around issue #592, Boto will
# now accept *any* string as an AWS region name;
# furthermore, it *always* returns a connection object --
# so the only way to check that we are not going to run
# into trouble is to check that there *is* a valid host
# name on the other end of the connection.
if ec2_connection.host:
log.debug("EC2 connection has been successful.")
else:
raise CloudProviderError(
"Cannot establish connection to EC2 region {0}"
.format(self._region_name))
if not self._vpc:
vpc_connection = None
self._vpc_id = None
else:
vpc_connection, self._vpc_id = self._find_vpc_by_name(self._vpc)
except Exception as err:
log.error("Error connecting to EC2: %s", err)
raise
self._ec2_connection, self._vpc_connection = (
ec2_connection, vpc_connection)
return self._ec2_connection
|
[
"def",
"_connect",
"(",
"self",
")",
":",
"# check for existing connection",
"if",
"self",
".",
"_ec2_connection",
":",
"return",
"self",
".",
"_ec2_connection",
"try",
":",
"log",
".",
"debug",
"(",
"\"Connecting to EC2 endpoint %s\"",
",",
"self",
".",
"_ec2host",
")",
"# connect to webservice",
"ec2_connection",
"=",
"boto",
".",
"ec2",
".",
"connect_to_region",
"(",
"self",
".",
"_region_name",
",",
"aws_access_key_id",
"=",
"self",
".",
"_access_key",
",",
"aws_secret_access_key",
"=",
"self",
".",
"_secret_key",
",",
"is_secure",
"=",
"self",
".",
"_secure",
",",
"host",
"=",
"self",
".",
"_ec2host",
",",
"port",
"=",
"self",
".",
"_ec2port",
",",
"path",
"=",
"self",
".",
"_ec2path",
",",
")",
"# With the loose setting `BOTO_USE_ENDPOINT_HEURISTICS`",
"# which is necessary to work around issue #592, Boto will",
"# now accept *any* string as an AWS region name;",
"# furthermore, it *always* returns a connection object --",
"# so the only way to check that we are not going to run",
"# into trouble is to check that there *is* a valid host",
"# name on the other end of the connection.",
"if",
"ec2_connection",
".",
"host",
":",
"log",
".",
"debug",
"(",
"\"EC2 connection has been successful.\"",
")",
"else",
":",
"raise",
"CloudProviderError",
"(",
"\"Cannot establish connection to EC2 region {0}\"",
".",
"format",
"(",
"self",
".",
"_region_name",
")",
")",
"if",
"not",
"self",
".",
"_vpc",
":",
"vpc_connection",
"=",
"None",
"self",
".",
"_vpc_id",
"=",
"None",
"else",
":",
"vpc_connection",
",",
"self",
".",
"_vpc_id",
"=",
"self",
".",
"_find_vpc_by_name",
"(",
"self",
".",
"_vpc",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"error",
"(",
"\"Error connecting to EC2: %s\"",
",",
"err",
")",
"raise",
"self",
".",
"_ec2_connection",
",",
"self",
".",
"_vpc_connection",
"=",
"(",
"ec2_connection",
",",
"vpc_connection",
")",
"return",
"self",
".",
"_ec2_connection"
] | 37.784314 | 16.921569 |
def rename(self, old, new):
"""Rename the old schema/identifier to the new schema/identifier and
update references.
If the new schema/identifier is already present, that is an error.
If the schema/identifier key is absent, we only debug log and return,
assuming it's a temp table being renamed.
:param BaseRelation old: The existing relation name information.
:param BaseRelation new: The new relation name information.
:raises InternalError: If the new key is already present.
"""
old_key = _make_key(old)
new_key = _make_key(new)
logger.debug('Renaming relation {!s} to {!s}'.format(
old_key, new_key)
)
logger.debug('before rename: {}'.format(
pprint.pformat(self.dump_graph()))
)
with self.lock:
if self._check_rename_constraints(old_key, new_key):
self._rename_relation(old_key, _CachedRelation(new))
else:
self._setdefault(_CachedRelation(new))
logger.debug('after rename: {}'.format(
pprint.pformat(self.dump_graph()))
)
|
[
"def",
"rename",
"(",
"self",
",",
"old",
",",
"new",
")",
":",
"old_key",
"=",
"_make_key",
"(",
"old",
")",
"new_key",
"=",
"_make_key",
"(",
"new",
")",
"logger",
".",
"debug",
"(",
"'Renaming relation {!s} to {!s}'",
".",
"format",
"(",
"old_key",
",",
"new_key",
")",
")",
"logger",
".",
"debug",
"(",
"'before rename: {}'",
".",
"format",
"(",
"pprint",
".",
"pformat",
"(",
"self",
".",
"dump_graph",
"(",
")",
")",
")",
")",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"_check_rename_constraints",
"(",
"old_key",
",",
"new_key",
")",
":",
"self",
".",
"_rename_relation",
"(",
"old_key",
",",
"_CachedRelation",
"(",
"new",
")",
")",
"else",
":",
"self",
".",
"_setdefault",
"(",
"_CachedRelation",
"(",
"new",
")",
")",
"logger",
".",
"debug",
"(",
"'after rename: {}'",
".",
"format",
"(",
"pprint",
".",
"pformat",
"(",
"self",
".",
"dump_graph",
"(",
")",
")",
")",
")"
] | 37.933333 | 20.833333 |
def send_capabilties_request(self, vehicle, name, m):
'''An alias for send_capabilities_request.
The word "capabilities" was misspelled in previous versions of this code. This is simply
an alias to send_capabilities_request using the legacy name.
'''
return self.send_capabilities_request(vehicle, name, m)
|
[
"def",
"send_capabilties_request",
"(",
"self",
",",
"vehicle",
",",
"name",
",",
"m",
")",
":",
"return",
"self",
".",
"send_capabilities_request",
"(",
"vehicle",
",",
"name",
",",
"m",
")"
] | 48.714286 | 28.428571 |
def get_collection(self, session, query, api_key):
"""
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
"""
model = self._fetch_model(api_key)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
included = {}
sorts = query.get('sort', '').split(',')
order_by = []
collection = session.query(model)
for attr in sorts:
if attr == '':
break
attr_name, is_asc = [attr[1:], False]\
if attr[0] == '-'\
else [attr, True]
if attr_name not in model.__mapper__.all_orm_descriptors.keys()\
or not hasattr(model, attr_name)\
or attr_name in model.__mapper__.relationships.keys():
return NotSortableError(model, attr_name)
attr = getattr(model, attr_name)
if not hasattr(attr, 'asc'):
# pragma: no cover
return NotSortableError(model, attr_name)
check_permission(model, attr_name, Permissions.VIEW)
order_by.append(attr.asc() if is_asc else attr.desc())
if len(order_by) > 0:
collection = collection.order_by(*order_by)
pos = -1
start, end = self._parse_page(query)
response = JSONAPIResponse()
response.data['data'] = []
for instance in collection:
try:
check_permission(instance, None, Permissions.VIEW)
except PermissionDeniedError:
continue
pos += 1
if end is not None and (pos < start or pos > end):
continue
built = self._render_full_resource(instance, include, fields)
included.update(built.pop('included'))
response.data['data'].append(built)
response.data['included'] = list(included.values())
return response
|
[
"def",
"get_collection",
"(",
"self",
",",
"session",
",",
"query",
",",
"api_key",
")",
":",
"model",
"=",
"self",
".",
"_fetch_model",
"(",
"api_key",
")",
"include",
"=",
"self",
".",
"_parse_include",
"(",
"query",
".",
"get",
"(",
"'include'",
",",
"''",
")",
".",
"split",
"(",
"','",
")",
")",
"fields",
"=",
"self",
".",
"_parse_fields",
"(",
"query",
")",
"included",
"=",
"{",
"}",
"sorts",
"=",
"query",
".",
"get",
"(",
"'sort'",
",",
"''",
")",
".",
"split",
"(",
"','",
")",
"order_by",
"=",
"[",
"]",
"collection",
"=",
"session",
".",
"query",
"(",
"model",
")",
"for",
"attr",
"in",
"sorts",
":",
"if",
"attr",
"==",
"''",
":",
"break",
"attr_name",
",",
"is_asc",
"=",
"[",
"attr",
"[",
"1",
":",
"]",
",",
"False",
"]",
"if",
"attr",
"[",
"0",
"]",
"==",
"'-'",
"else",
"[",
"attr",
",",
"True",
"]",
"if",
"attr_name",
"not",
"in",
"model",
".",
"__mapper__",
".",
"all_orm_descriptors",
".",
"keys",
"(",
")",
"or",
"not",
"hasattr",
"(",
"model",
",",
"attr_name",
")",
"or",
"attr_name",
"in",
"model",
".",
"__mapper__",
".",
"relationships",
".",
"keys",
"(",
")",
":",
"return",
"NotSortableError",
"(",
"model",
",",
"attr_name",
")",
"attr",
"=",
"getattr",
"(",
"model",
",",
"attr_name",
")",
"if",
"not",
"hasattr",
"(",
"attr",
",",
"'asc'",
")",
":",
"# pragma: no cover",
"return",
"NotSortableError",
"(",
"model",
",",
"attr_name",
")",
"check_permission",
"(",
"model",
",",
"attr_name",
",",
"Permissions",
".",
"VIEW",
")",
"order_by",
".",
"append",
"(",
"attr",
".",
"asc",
"(",
")",
"if",
"is_asc",
"else",
"attr",
".",
"desc",
"(",
")",
")",
"if",
"len",
"(",
"order_by",
")",
">",
"0",
":",
"collection",
"=",
"collection",
".",
"order_by",
"(",
"*",
"order_by",
")",
"pos",
"=",
"-",
"1",
"start",
",",
"end",
"=",
"self",
".",
"_parse_page",
"(",
"query",
")",
"response",
"=",
"JSONAPIResponse",
"(",
")",
"response",
".",
"data",
"[",
"'data'",
"]",
"=",
"[",
"]",
"for",
"instance",
"in",
"collection",
":",
"try",
":",
"check_permission",
"(",
"instance",
",",
"None",
",",
"Permissions",
".",
"VIEW",
")",
"except",
"PermissionDeniedError",
":",
"continue",
"pos",
"+=",
"1",
"if",
"end",
"is",
"not",
"None",
"and",
"(",
"pos",
"<",
"start",
"or",
"pos",
">",
"end",
")",
":",
"continue",
"built",
"=",
"self",
".",
"_render_full_resource",
"(",
"instance",
",",
"include",
",",
"fields",
")",
"included",
".",
"update",
"(",
"built",
".",
"pop",
"(",
"'included'",
")",
")",
"response",
".",
"data",
"[",
"'data'",
"]",
".",
"append",
"(",
"built",
")",
"response",
".",
"data",
"[",
"'included'",
"]",
"=",
"list",
"(",
"included",
".",
"values",
"(",
")",
")",
"return",
"response"
] | 32.15625 | 19.53125 |
def parse_args():
'''Parse command line arguments'''
parser = argparse.ArgumentParser(
description='Morphology feature plotter',
epilog='Note: Makes plots of various features and superimposes\
input distributions. Plots are saved to PDF file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('datapath',
help='Morphology data directory path')
parser.add_argument('--mtypeconfig',
required=True,
help='Get mtype JSON configuration file')
parser.add_argument('--output',
default='plots.pdf',
help='Output PDF file name')
return parser.parse_args()
|
[
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Morphology feature plotter'",
",",
"epilog",
"=",
"'Note: Makes plots of various features and superimposes\\\n input distributions. Plots are saved to PDF file.'",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'datapath'",
",",
"help",
"=",
"'Morphology data directory path'",
")",
"parser",
".",
"add_argument",
"(",
"'--mtypeconfig'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Get mtype JSON configuration file'",
")",
"parser",
".",
"add_argument",
"(",
"'--output'",
",",
"default",
"=",
"'plots.pdf'",
",",
"help",
"=",
"'Output PDF file name'",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] | 38.684211 | 16.578947 |
def _gccalc(lon, lat, azimuth, maxdist=None):
"""
Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated into python by Thomas Lecocq
This function is a black box, because trigonometry is difficult
"""
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist / 1.852243
faz = azimuth * np.pi / 180.
EPS = 0.00000000005
if ((np.abs(np.cos(glat1)) < EPS) and not (np.abs(np.sin(faz)) < EPS)):
raise CourseException("Only North-South courses are meaningful")
a = 6378.137 / 1.852243
f = 1 / 298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf = np.cos(faz)
if (cf == 0):
b = 0.
else:
b = 2. * np.arctan2 (tu, cf)
cu = 1. / np.sqrt(1 + tu * tu)
su = tu * cu
sa = cu * sf
c2a = 1 - sa * sa
x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.))
x = (x - 2.) / x
c = 1. - x
c = (x * x / 4. + 1.) / c
d = (0.375 * x * x - 1.) * x
tu = s / (r * a * c)
y = tu
c = y + 1
while (np.abs (y - c) > EPS):
sy = np.sin(y)
cy = np.cos(y)
cz = np.cos(b + y)
e = 2. * cz * cz - 1.
c = y
x = e * cy
y = e + e - 1.
y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *
d / 4. - cz) * sy * d + tu
b = cu * cy * cf - su * sy
c = r * np.sqrt(sa * sa + b * b)
d = su * cy + cu * sy * cf
glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi
c = cu * cy - su * sy * cf
x = np.arctan2(sy * sf, c)
c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.
d = ((e * cy * c + cz) * sy * c + y) * sa
glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi
baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)
glon2 *= 180./np.pi
glat2 *= 180./np.pi
baz *= 180./np.pi
return (glon2, glat2, baz)
|
[
"def",
"_gccalc",
"(",
"lon",
",",
"lat",
",",
"azimuth",
",",
"maxdist",
"=",
"None",
")",
":",
"glat1",
"=",
"lat",
"*",
"np",
".",
"pi",
"/",
"180.",
"glon1",
"=",
"lon",
"*",
"np",
".",
"pi",
"/",
"180.",
"s",
"=",
"maxdist",
"/",
"1.852243",
"faz",
"=",
"azimuth",
"*",
"np",
".",
"pi",
"/",
"180.",
"EPS",
"=",
"0.00000000005",
"if",
"(",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"cos",
"(",
"glat1",
")",
")",
"<",
"EPS",
")",
"and",
"not",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"sin",
"(",
"faz",
")",
")",
"<",
"EPS",
")",
")",
":",
"raise",
"CourseException",
"(",
"\"Only North-South courses are meaningful\"",
")",
"a",
"=",
"6378.137",
"/",
"1.852243",
"f",
"=",
"1",
"/",
"298.257223563",
"r",
"=",
"1",
"-",
"f",
"tu",
"=",
"r",
"*",
"np",
".",
"tan",
"(",
"glat1",
")",
"sf",
"=",
"np",
".",
"sin",
"(",
"faz",
")",
"cf",
"=",
"np",
".",
"cos",
"(",
"faz",
")",
"if",
"(",
"cf",
"==",
"0",
")",
":",
"b",
"=",
"0.",
"else",
":",
"b",
"=",
"2.",
"*",
"np",
".",
"arctan2",
"(",
"tu",
",",
"cf",
")",
"cu",
"=",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"1",
"+",
"tu",
"*",
"tu",
")",
"su",
"=",
"tu",
"*",
"cu",
"sa",
"=",
"cu",
"*",
"sf",
"c2a",
"=",
"1",
"-",
"sa",
"*",
"sa",
"x",
"=",
"1.",
"+",
"np",
".",
"sqrt",
"(",
"1.",
"+",
"c2a",
"*",
"(",
"1.",
"/",
"(",
"r",
"*",
"r",
")",
"-",
"1.",
")",
")",
"x",
"=",
"(",
"x",
"-",
"2.",
")",
"/",
"x",
"c",
"=",
"1.",
"-",
"x",
"c",
"=",
"(",
"x",
"*",
"x",
"/",
"4.",
"+",
"1.",
")",
"/",
"c",
"d",
"=",
"(",
"0.375",
"*",
"x",
"*",
"x",
"-",
"1.",
")",
"*",
"x",
"tu",
"=",
"s",
"/",
"(",
"r",
"*",
"a",
"*",
"c",
")",
"y",
"=",
"tu",
"c",
"=",
"y",
"+",
"1",
"while",
"(",
"np",
".",
"abs",
"(",
"y",
"-",
"c",
")",
">",
"EPS",
")",
":",
"sy",
"=",
"np",
".",
"sin",
"(",
"y",
")",
"cy",
"=",
"np",
".",
"cos",
"(",
"y",
")",
"cz",
"=",
"np",
".",
"cos",
"(",
"b",
"+",
"y",
")",
"e",
"=",
"2.",
"*",
"cz",
"*",
"cz",
"-",
"1.",
"c",
"=",
"y",
"x",
"=",
"e",
"*",
"cy",
"y",
"=",
"e",
"+",
"e",
"-",
"1.",
"y",
"=",
"(",
"(",
"(",
"sy",
"*",
"sy",
"*",
"4.",
"-",
"3.",
")",
"*",
"y",
"*",
"cz",
"*",
"d",
"/",
"6.",
"+",
"x",
")",
"*",
"d",
"/",
"4.",
"-",
"cz",
")",
"*",
"sy",
"*",
"d",
"+",
"tu",
"b",
"=",
"cu",
"*",
"cy",
"*",
"cf",
"-",
"su",
"*",
"sy",
"c",
"=",
"r",
"*",
"np",
".",
"sqrt",
"(",
"sa",
"*",
"sa",
"+",
"b",
"*",
"b",
")",
"d",
"=",
"su",
"*",
"cy",
"+",
"cu",
"*",
"sy",
"*",
"cf",
"glat2",
"=",
"(",
"np",
".",
"arctan2",
"(",
"d",
",",
"c",
")",
"+",
"np",
".",
"pi",
")",
"%",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"-",
"np",
".",
"pi",
"c",
"=",
"cu",
"*",
"cy",
"-",
"su",
"*",
"sy",
"*",
"cf",
"x",
"=",
"np",
".",
"arctan2",
"(",
"sy",
"*",
"sf",
",",
"c",
")",
"c",
"=",
"(",
"(",
"-",
"3.",
"*",
"c2a",
"+",
"4.",
")",
"*",
"f",
"+",
"4.",
")",
"*",
"c2a",
"*",
"f",
"/",
"16.",
"d",
"=",
"(",
"(",
"e",
"*",
"cy",
"*",
"c",
"+",
"cz",
")",
"*",
"sy",
"*",
"c",
"+",
"y",
")",
"*",
"sa",
"glon2",
"=",
"(",
"(",
"glon1",
"+",
"x",
"-",
"(",
"1.",
"-",
"c",
")",
"*",
"d",
"*",
"f",
"+",
"np",
".",
"pi",
")",
"%",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"-",
"np",
".",
"pi",
"baz",
"=",
"(",
"np",
".",
"arctan2",
"(",
"sa",
",",
"b",
")",
"+",
"np",
".",
"pi",
")",
"%",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"glon2",
"*=",
"180.",
"/",
"np",
".",
"pi",
"glat2",
"*=",
"180.",
"/",
"np",
".",
"pi",
"baz",
"*=",
"180.",
"/",
"np",
".",
"pi",
"return",
"(",
"glon2",
",",
"glat2",
",",
"baz",
")"
] | 27.681818 | 18.439394 |
def stop(self):
'''
Stop the fuzzing session
'''
self.logger.info('Stopping client fuzzer')
self._target_control_thread.stop()
self.target.signal_mutated()
super(ClientFuzzer, self).stop()
|
[
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Stopping client fuzzer'",
")",
"self",
".",
"_target_control_thread",
".",
"stop",
"(",
")",
"self",
".",
"target",
".",
"signal_mutated",
"(",
")",
"super",
"(",
"ClientFuzzer",
",",
"self",
")",
".",
"stop",
"(",
")"
] | 29.625 | 13.375 |
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
|
[
"def",
"handle_fobj",
"(",
"backend",
",",
"f",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"is_binary",
"(",
"f",
")",
":",
"raise",
"AssertionError",
"(",
"'File must be opened in binary mode.'",
")",
"if",
"callable",
"(",
"getattr",
"(",
"backend",
",",
"'handle_fobj'",
",",
"None",
")",
")",
":",
"# Prefer handle_fobj() if present.",
"LOGGER",
".",
"debug",
"(",
"\"using handle_fobj\"",
")",
"return",
"backend",
".",
"handle_fobj",
"(",
"f",
")",
"elif",
"callable",
"(",
"getattr",
"(",
"backend",
",",
"'handle_path'",
",",
"None",
")",
")",
":",
"# Fallback to handle_path(). Warn user since this is potentially",
"# expensive.",
"LOGGER",
".",
"debug",
"(",
"\"using handle_path\"",
")",
"LOGGER",
".",
"warning",
"(",
"\"Using disk, %r backend does not provide `handle_fobj()`\"",
",",
"backend",
")",
"ext",
"=",
"''",
"if",
"'ext'",
"in",
"kwargs",
":",
"ext",
"=",
"'.'",
"+",
"kwargs",
"[",
"'ext'",
"]",
"with",
"fobj_to_tempfile",
"(",
"f",
",",
"suffix",
"=",
"ext",
")",
"as",
"fname",
":",
"return",
"backend",
".",
"handle_path",
"(",
"fname",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"'Backend %s has no _get functions'",
"%",
"backend",
".",
"__name__",
")"
] | 35.71875 | 19.65625 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.