text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def gaussian(data, mean, covariance):
"""!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
"""
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float('inf')
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result
|
[
"def",
"gaussian",
"(",
"data",
",",
"mean",
",",
"covariance",
")",
":",
"dimension",
"=",
"float",
"(",
"len",
"(",
"data",
"[",
"0",
"]",
")",
")",
"if",
"dimension",
"!=",
"1.0",
":",
"inv_variance",
"=",
"numpy",
".",
"linalg",
".",
"pinv",
"(",
"covariance",
")",
"else",
":",
"inv_variance",
"=",
"1.0",
"/",
"covariance",
"divider",
"=",
"(",
"pi",
"*",
"2.0",
")",
"**",
"(",
"dimension",
"/",
"2.0",
")",
"*",
"numpy",
".",
"sqrt",
"(",
"numpy",
".",
"linalg",
".",
"norm",
"(",
"covariance",
")",
")",
"if",
"divider",
"!=",
"0.0",
":",
"right_const",
"=",
"1.0",
"/",
"divider",
"else",
":",
"right_const",
"=",
"float",
"(",
"'inf'",
")",
"result",
"=",
"[",
"]",
"for",
"point",
"in",
"data",
":",
"mean_delta",
"=",
"point",
"-",
"mean",
"point_gaussian",
"=",
"right_const",
"*",
"numpy",
".",
"exp",
"(",
"-",
"0.5",
"*",
"mean_delta",
".",
"dot",
"(",
"inv_variance",
")",
".",
"dot",
"(",
"numpy",
".",
"transpose",
"(",
"mean_delta",
")",
")",
")",
"result",
".",
"append",
"(",
"point_gaussian",
")",
"return",
"result"
] | 36 | 26.151515 |
def display_outputs(self, groupby="type"):
"""republish the outputs of the computation
Parameters
----------
groupby : str [default: type]
if 'type':
Group outputs by type (show all stdout, then all stderr, etc.):
[stdout:1] foo
[stdout:2] foo
[stderr:1] bar
[stderr:2] bar
if 'engine':
Display outputs for each engine before moving on to the next:
[stdout:1] foo
[stderr:1] bar
[stdout:2] foo
[stderr:2] bar
if 'order':
Like 'type', but further collate individual displaypub
outputs. This is meant for cases of each command producing
several plots, and you would like to see all of the first
plots together, then all of the second plots, and so on.
"""
if self._single_result:
self._display_single_result()
return
stdouts = self.stdout
stderrs = self.stderr
pyouts = self.pyout
output_lists = self.outputs
results = self.get()
targets = self.engine_id
if groupby == "engine":
for eid,stdout,stderr,outputs,r,pyout in zip(
targets, stdouts, stderrs, output_lists, results, pyouts
):
self._display_stream(stdout, '[stdout:%i] ' % eid)
self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
if outputs or pyout is not None:
_raw_text('[output:%i]' % eid)
for output in outputs:
self._republish_displaypub(output, eid)
if pyout is not None:
display(r)
elif groupby in ('type', 'order'):
# republish stdout:
for eid,stdout in zip(targets, stdouts):
self._display_stream(stdout, '[stdout:%i] ' % eid)
# republish stderr:
for eid,stderr in zip(targets, stderrs):
self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
if groupby == 'order':
output_dict = dict((eid, outputs) for eid,outputs in zip(targets, output_lists))
N = max(len(outputs) for outputs in output_lists)
for i in range(N):
for eid in targets:
outputs = output_dict[eid]
if len(outputs) >= N:
_raw_text('[output:%i]' % eid)
self._republish_displaypub(outputs[i], eid)
else:
# republish displaypub output
for eid,outputs in zip(targets, output_lists):
if outputs:
_raw_text('[output:%i]' % eid)
for output in outputs:
self._republish_displaypub(output, eid)
# finally, add pyout:
for eid,r,pyout in zip(targets, results, pyouts):
if pyout is not None:
display(r)
else:
raise ValueError("groupby must be one of 'type', 'engine', 'collate', not %r" % groupby)
|
[
"def",
"display_outputs",
"(",
"self",
",",
"groupby",
"=",
"\"type\"",
")",
":",
"if",
"self",
".",
"_single_result",
":",
"self",
".",
"_display_single_result",
"(",
")",
"return",
"stdouts",
"=",
"self",
".",
"stdout",
"stderrs",
"=",
"self",
".",
"stderr",
"pyouts",
"=",
"self",
".",
"pyout",
"output_lists",
"=",
"self",
".",
"outputs",
"results",
"=",
"self",
".",
"get",
"(",
")",
"targets",
"=",
"self",
".",
"engine_id",
"if",
"groupby",
"==",
"\"engine\"",
":",
"for",
"eid",
",",
"stdout",
",",
"stderr",
",",
"outputs",
",",
"r",
",",
"pyout",
"in",
"zip",
"(",
"targets",
",",
"stdouts",
",",
"stderrs",
",",
"output_lists",
",",
"results",
",",
"pyouts",
")",
":",
"self",
".",
"_display_stream",
"(",
"stdout",
",",
"'[stdout:%i] '",
"%",
"eid",
")",
"self",
".",
"_display_stream",
"(",
"stderr",
",",
"'[stderr:%i] '",
"%",
"eid",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"try",
":",
"get_ipython",
"(",
")",
"except",
"NameError",
":",
"# displaypub is meaningless outside IPython",
"return",
"if",
"outputs",
"or",
"pyout",
"is",
"not",
"None",
":",
"_raw_text",
"(",
"'[output:%i]'",
"%",
"eid",
")",
"for",
"output",
"in",
"outputs",
":",
"self",
".",
"_republish_displaypub",
"(",
"output",
",",
"eid",
")",
"if",
"pyout",
"is",
"not",
"None",
":",
"display",
"(",
"r",
")",
"elif",
"groupby",
"in",
"(",
"'type'",
",",
"'order'",
")",
":",
"# republish stdout:",
"for",
"eid",
",",
"stdout",
"in",
"zip",
"(",
"targets",
",",
"stdouts",
")",
":",
"self",
".",
"_display_stream",
"(",
"stdout",
",",
"'[stdout:%i] '",
"%",
"eid",
")",
"# republish stderr:",
"for",
"eid",
",",
"stderr",
"in",
"zip",
"(",
"targets",
",",
"stderrs",
")",
":",
"self",
".",
"_display_stream",
"(",
"stderr",
",",
"'[stderr:%i] '",
"%",
"eid",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"try",
":",
"get_ipython",
"(",
")",
"except",
"NameError",
":",
"# displaypub is meaningless outside IPython",
"return",
"if",
"groupby",
"==",
"'order'",
":",
"output_dict",
"=",
"dict",
"(",
"(",
"eid",
",",
"outputs",
")",
"for",
"eid",
",",
"outputs",
"in",
"zip",
"(",
"targets",
",",
"output_lists",
")",
")",
"N",
"=",
"max",
"(",
"len",
"(",
"outputs",
")",
"for",
"outputs",
"in",
"output_lists",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"for",
"eid",
"in",
"targets",
":",
"outputs",
"=",
"output_dict",
"[",
"eid",
"]",
"if",
"len",
"(",
"outputs",
")",
">=",
"N",
":",
"_raw_text",
"(",
"'[output:%i]'",
"%",
"eid",
")",
"self",
".",
"_republish_displaypub",
"(",
"outputs",
"[",
"i",
"]",
",",
"eid",
")",
"else",
":",
"# republish displaypub output",
"for",
"eid",
",",
"outputs",
"in",
"zip",
"(",
"targets",
",",
"output_lists",
")",
":",
"if",
"outputs",
":",
"_raw_text",
"(",
"'[output:%i]'",
"%",
"eid",
")",
"for",
"output",
"in",
"outputs",
":",
"self",
".",
"_republish_displaypub",
"(",
"output",
",",
"eid",
")",
"# finally, add pyout:",
"for",
"eid",
",",
"r",
",",
"pyout",
"in",
"zip",
"(",
"targets",
",",
"results",
",",
"pyouts",
")",
":",
"if",
"pyout",
"is",
"not",
"None",
":",
"display",
"(",
"r",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"groupby must be one of 'type', 'engine', 'collate', not %r\"",
"%",
"groupby",
")"
] | 37.306931 | 18.29703 |
def bootstrap_datapackage(repo, force=False,
options=None, noinput=False):
"""
Create the datapackage file..
"""
print("Bootstrapping datapackage")
# get the directory
tsprefix = datetime.now().date().isoformat()
# Initial data package json
package = OrderedDict([
('title', ''),
('description', ''),
('username', repo.username),
('reponame', repo.reponame),
('name', str(repo)),
('title', ""),
('description', ""),
('keywords', []),
('resources', []),
('creator', getpass.getuser()),
('createdat', datetime.now().isoformat()),
('remote-url', repo.remoteurl)
])
if options is not None:
package['title'] = options['title']
package['description'] = options['description']
else:
if noinput:
raise IncompleteParameters("Option field with title and description")
for var in ['title', 'description']:
value = ''
while value in ['',None]:
value = input('Your Repo ' + var.title() + ": ")
if len(value) == 0:
print("{} cannot be empty. Please re-enter.".format(var.title()))
package[var] = value
# Now store the package...
(handle, filename) = tempfile.mkstemp()
with open(filename, 'w') as fd:
fd.write(json.dumps(package, indent=4))
repo.package = package
return filename
|
[
"def",
"bootstrap_datapackage",
"(",
"repo",
",",
"force",
"=",
"False",
",",
"options",
"=",
"None",
",",
"noinput",
"=",
"False",
")",
":",
"print",
"(",
"\"Bootstrapping datapackage\"",
")",
"# get the directory",
"tsprefix",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
"# Initial data package json",
"package",
"=",
"OrderedDict",
"(",
"[",
"(",
"'title'",
",",
"''",
")",
",",
"(",
"'description'",
",",
"''",
")",
",",
"(",
"'username'",
",",
"repo",
".",
"username",
")",
",",
"(",
"'reponame'",
",",
"repo",
".",
"reponame",
")",
",",
"(",
"'name'",
",",
"str",
"(",
"repo",
")",
")",
",",
"(",
"'title'",
",",
"\"\"",
")",
",",
"(",
"'description'",
",",
"\"\"",
")",
",",
"(",
"'keywords'",
",",
"[",
"]",
")",
",",
"(",
"'resources'",
",",
"[",
"]",
")",
",",
"(",
"'creator'",
",",
"getpass",
".",
"getuser",
"(",
")",
")",
",",
"(",
"'createdat'",
",",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
")",
",",
"(",
"'remote-url'",
",",
"repo",
".",
"remoteurl",
")",
"]",
")",
"if",
"options",
"is",
"not",
"None",
":",
"package",
"[",
"'title'",
"]",
"=",
"options",
"[",
"'title'",
"]",
"package",
"[",
"'description'",
"]",
"=",
"options",
"[",
"'description'",
"]",
"else",
":",
"if",
"noinput",
":",
"raise",
"IncompleteParameters",
"(",
"\"Option field with title and description\"",
")",
"for",
"var",
"in",
"[",
"'title'",
",",
"'description'",
"]",
":",
"value",
"=",
"''",
"while",
"value",
"in",
"[",
"''",
",",
"None",
"]",
":",
"value",
"=",
"input",
"(",
"'Your Repo '",
"+",
"var",
".",
"title",
"(",
")",
"+",
"\": \"",
")",
"if",
"len",
"(",
"value",
")",
"==",
"0",
":",
"print",
"(",
"\"{} cannot be empty. Please re-enter.\"",
".",
"format",
"(",
"var",
".",
"title",
"(",
")",
")",
")",
"package",
"[",
"var",
"]",
"=",
"value",
"# Now store the package...",
"(",
"handle",
",",
"filename",
")",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"package",
",",
"indent",
"=",
"4",
")",
")",
"repo",
".",
"package",
"=",
"package",
"return",
"filename"
] | 27.884615 | 17.730769 |
def update(cls, whitelist_sdd_id, monetary_account_paying_id=None,
maximum_amount_per_month=None, custom_headers=None):
"""
:type user_id: int
:type whitelist_sdd_id: int
:param monetary_account_paying_id: ID of the monetary account of which
you want to pay from.
:type monetary_account_paying_id: int
:param maximum_amount_per_month: The maximum amount of money that is
allowed to be deducted based on the whitelist.
:type maximum_amount_per_month: object_.Amount
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseInt
"""
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
request_map = {
cls.FIELD_MONETARY_ACCOUNT_PAYING_ID: monetary_account_paying_id,
cls.FIELD_MAXIMUM_AMOUNT_PER_MONTH: maximum_amount_per_month
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
request_bytes = request_map_string.encode()
endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id(),
whitelist_sdd_id)
response_raw = api_client.put(endpoint_url, request_bytes,
custom_headers)
return BunqResponseInt.cast_from_bunq_response(
cls._process_for_id(response_raw)
)
|
[
"def",
"update",
"(",
"cls",
",",
"whitelist_sdd_id",
",",
"monetary_account_paying_id",
"=",
"None",
",",
"maximum_amount_per_month",
"=",
"None",
",",
"custom_headers",
"=",
"None",
")",
":",
"if",
"custom_headers",
"is",
"None",
":",
"custom_headers",
"=",
"{",
"}",
"api_client",
"=",
"client",
".",
"ApiClient",
"(",
"cls",
".",
"_get_api_context",
"(",
")",
")",
"request_map",
"=",
"{",
"cls",
".",
"FIELD_MONETARY_ACCOUNT_PAYING_ID",
":",
"monetary_account_paying_id",
",",
"cls",
".",
"FIELD_MAXIMUM_AMOUNT_PER_MONTH",
":",
"maximum_amount_per_month",
"}",
"request_map_string",
"=",
"converter",
".",
"class_to_json",
"(",
"request_map",
")",
"request_map_string",
"=",
"cls",
".",
"_remove_field_for_request",
"(",
"request_map_string",
")",
"request_bytes",
"=",
"request_map_string",
".",
"encode",
"(",
")",
"endpoint_url",
"=",
"cls",
".",
"_ENDPOINT_URL_UPDATE",
".",
"format",
"(",
"cls",
".",
"_determine_user_id",
"(",
")",
",",
"whitelist_sdd_id",
")",
"response_raw",
"=",
"api_client",
".",
"put",
"(",
"endpoint_url",
",",
"request_bytes",
",",
"custom_headers",
")",
"return",
"BunqResponseInt",
".",
"cast_from_bunq_response",
"(",
"cls",
".",
"_process_for_id",
"(",
"response_raw",
")",
")"
] | 40.891892 | 22.621622 |
def format_level_1_memory(memory):
""" Format an experiment result memory object for measurement level 1.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 1 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)
indicies.
"""
formatted_memory = _list_to_complex_array(memory)
# infer meas_return from shape of returned data.
if not 1 <= len(formatted_memory.shape) <= 2:
raise QiskitError('Level one memory is not of correct shape.')
return formatted_memory
|
[
"def",
"format_level_1_memory",
"(",
"memory",
")",
":",
"formatted_memory",
"=",
"_list_to_complex_array",
"(",
"memory",
")",
"# infer meas_return from shape of returned data.",
"if",
"not",
"1",
"<=",
"len",
"(",
"formatted_memory",
".",
"shape",
")",
"<=",
"2",
":",
"raise",
"QiskitError",
"(",
"'Level one memory is not of correct shape.'",
")",
"return",
"formatted_memory"
] | 37.052632 | 22.894737 |
def handle_import_tags(userdata, import_root):
"""Handle @import(filepath)@ tags in a UserData script.
:param import_root: Location for imports.
:type import_root: str
:param userdata: UserData script content.
:type userdata: str
:return: UserData script with the contents of the imported files.
:rtype: str
"""
imports = re.findall('@import\((.*?)\)@', userdata) # pylint: disable=anomalous-backslash-in-string
if not imports:
return userdata
for filepath in imports:
logger.info('Processing "import" of %s', filepath)
import_path = os.path.join(import_root, filepath)
try:
with open(import_path) as fo:
content = fo.read()
userdata = userdata.replace('@import(%s)@' % filepath, content)
except FileNotFoundError:
raise UserDataException('Import path {} not found.'.format(import_path))
return userdata
|
[
"def",
"handle_import_tags",
"(",
"userdata",
",",
"import_root",
")",
":",
"imports",
"=",
"re",
".",
"findall",
"(",
"'@import\\((.*?)\\)@'",
",",
"userdata",
")",
"# pylint: disable=anomalous-backslash-in-string",
"if",
"not",
"imports",
":",
"return",
"userdata",
"for",
"filepath",
"in",
"imports",
":",
"logger",
".",
"info",
"(",
"'Processing \"import\" of %s'",
",",
"filepath",
")",
"import_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"import_root",
",",
"filepath",
")",
"try",
":",
"with",
"open",
"(",
"import_path",
")",
"as",
"fo",
":",
"content",
"=",
"fo",
".",
"read",
"(",
")",
"userdata",
"=",
"userdata",
".",
"replace",
"(",
"'@import(%s)@'",
"%",
"filepath",
",",
"content",
")",
"except",
"FileNotFoundError",
":",
"raise",
"UserDataException",
"(",
"'Import path {} not found.'",
".",
"format",
"(",
"import_path",
")",
")",
"return",
"userdata"
] | 40.44 | 20.44 |
def generate_PID_name(self, prefix=None):
'''
Generate a unique random Handle name (random UUID). The Handle is not
registered. If a prefix is specified, the PID name has the syntax
<prefix>/<generatedname>, otherwise it just returns the generated
random name (suffix for the Handle).
:param prefix: Optional. The prefix to be used for the Handle name.
:return: The handle name in the form <prefix>/<generatedsuffix> or
<generatedsuffix>.
'''
LOGGER.debug('generate_PID_name...')
randomuuid = uuid.uuid4()
if prefix is not None:
return prefix + '/' + str(randomuuid)
else:
return str(randomuuid)
|
[
"def",
"generate_PID_name",
"(",
"self",
",",
"prefix",
"=",
"None",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'generate_PID_name...'",
")",
"randomuuid",
"=",
"uuid",
".",
"uuid4",
"(",
")",
"if",
"prefix",
"is",
"not",
"None",
":",
"return",
"prefix",
"+",
"'/'",
"+",
"str",
"(",
"randomuuid",
")",
"else",
":",
"return",
"str",
"(",
"randomuuid",
")"
] | 37.473684 | 22.526316 |
def new(cls, num_id, abstractNum_id):
"""
Return a new ``<w:num>`` element having numId of *num_id* and having
a ``<w:abstractNumId>`` child with val attribute set to
*abstractNum_id*.
"""
num = OxmlElement('w:num')
num.numId = num_id
abstractNumId = CT_DecimalNumber.new(
'w:abstractNumId', abstractNum_id
)
num.append(abstractNumId)
return num
|
[
"def",
"new",
"(",
"cls",
",",
"num_id",
",",
"abstractNum_id",
")",
":",
"num",
"=",
"OxmlElement",
"(",
"'w:num'",
")",
"num",
".",
"numId",
"=",
"num_id",
"abstractNumId",
"=",
"CT_DecimalNumber",
".",
"new",
"(",
"'w:abstractNumId'",
",",
"abstractNum_id",
")",
"num",
".",
"append",
"(",
"abstractNumId",
")",
"return",
"num"
] | 33.307692 | 12.846154 |
def serialize_wrapped_key(key_provider, wrapping_algorithm, wrapping_key_id, encrypted_wrapped_key):
"""Serializes EncryptedData into a Wrapped EncryptedDataKey.
:param key_provider: Info for Wrapping MasterKey
:type key_provider: aws_encryption_sdk.structures.MasterKeyInfo
:param wrapping_algorithm: Wrapping Algorithm with which to wrap plaintext_data_key
:type wrapping_algorithm: aws_encryption_sdk.identifiers.WrappingAlgorithm
:param bytes wrapping_key_id: Key ID of wrapping MasterKey
:param encrypted_wrapped_key: Encrypted data key
:type encrypted_wrapped_key: aws_encryption_sdk.internal.structures.EncryptedData
:returns: Wrapped EncryptedDataKey
:rtype: aws_encryption_sdk.structures.EncryptedDataKey
"""
if encrypted_wrapped_key.iv is None:
key_info = wrapping_key_id
key_ciphertext = encrypted_wrapped_key.ciphertext
else:
key_info = struct.pack(
">{key_id_len}sII{iv_len}s".format(
key_id_len=len(wrapping_key_id), iv_len=wrapping_algorithm.algorithm.iv_len
),
to_bytes(wrapping_key_id),
len(encrypted_wrapped_key.tag) * 8, # Tag Length is stored in bits, not bytes
wrapping_algorithm.algorithm.iv_len,
encrypted_wrapped_key.iv,
)
key_ciphertext = encrypted_wrapped_key.ciphertext + encrypted_wrapped_key.tag
return EncryptedDataKey(
key_provider=MasterKeyInfo(provider_id=key_provider.provider_id, key_info=key_info),
encrypted_data_key=key_ciphertext,
)
|
[
"def",
"serialize_wrapped_key",
"(",
"key_provider",
",",
"wrapping_algorithm",
",",
"wrapping_key_id",
",",
"encrypted_wrapped_key",
")",
":",
"if",
"encrypted_wrapped_key",
".",
"iv",
"is",
"None",
":",
"key_info",
"=",
"wrapping_key_id",
"key_ciphertext",
"=",
"encrypted_wrapped_key",
".",
"ciphertext",
"else",
":",
"key_info",
"=",
"struct",
".",
"pack",
"(",
"\">{key_id_len}sII{iv_len}s\"",
".",
"format",
"(",
"key_id_len",
"=",
"len",
"(",
"wrapping_key_id",
")",
",",
"iv_len",
"=",
"wrapping_algorithm",
".",
"algorithm",
".",
"iv_len",
")",
",",
"to_bytes",
"(",
"wrapping_key_id",
")",
",",
"len",
"(",
"encrypted_wrapped_key",
".",
"tag",
")",
"*",
"8",
",",
"# Tag Length is stored in bits, not bytes",
"wrapping_algorithm",
".",
"algorithm",
".",
"iv_len",
",",
"encrypted_wrapped_key",
".",
"iv",
",",
")",
"key_ciphertext",
"=",
"encrypted_wrapped_key",
".",
"ciphertext",
"+",
"encrypted_wrapped_key",
".",
"tag",
"return",
"EncryptedDataKey",
"(",
"key_provider",
"=",
"MasterKeyInfo",
"(",
"provider_id",
"=",
"key_provider",
".",
"provider_id",
",",
"key_info",
"=",
"key_info",
")",
",",
"encrypted_data_key",
"=",
"key_ciphertext",
",",
")"
] | 49.903226 | 22.903226 |
def uri_unsplit_tree(uri_tree):
"""
Unsplit a coded URI tree, which must also be coalesced by
uri_tree_normalize().
"""
scheme, authority, path, query, fragment = uri_tree
if authority:
user, passwd, host, port = authority
if user and passwd:
userinfo = user + ':' + passwd
elif user:
userinfo = user
elif passwd:
userinfo = ':' + passwd
else:
userinfo = None
if host and port:
hostport = host + ':' + port
elif host:
hostport = host
elif port:
hostport = ':' + port
else:
hostport = None
if userinfo and hostport:
authority = userinfo + '@' + hostport
elif userinfo:
authority = userinfo + '@'
elif hostport:
authority = hostport
else:
authority = None
if query:
query = unsplit_query(query)
uri = ''
if scheme:
uri += scheme + ':'
if authority:
uri += '//' + authority
if path:
if (not authority) and path[0:2] == '//':
uri += '//'
uri += path
if query:
uri += '?' + query
if fragment:
uri += '#' + fragment
return uri
|
[
"def",
"uri_unsplit_tree",
"(",
"uri_tree",
")",
":",
"scheme",
",",
"authority",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"uri_tree",
"if",
"authority",
":",
"user",
",",
"passwd",
",",
"host",
",",
"port",
"=",
"authority",
"if",
"user",
"and",
"passwd",
":",
"userinfo",
"=",
"user",
"+",
"':'",
"+",
"passwd",
"elif",
"user",
":",
"userinfo",
"=",
"user",
"elif",
"passwd",
":",
"userinfo",
"=",
"':'",
"+",
"passwd",
"else",
":",
"userinfo",
"=",
"None",
"if",
"host",
"and",
"port",
":",
"hostport",
"=",
"host",
"+",
"':'",
"+",
"port",
"elif",
"host",
":",
"hostport",
"=",
"host",
"elif",
"port",
":",
"hostport",
"=",
"':'",
"+",
"port",
"else",
":",
"hostport",
"=",
"None",
"if",
"userinfo",
"and",
"hostport",
":",
"authority",
"=",
"userinfo",
"+",
"'@'",
"+",
"hostport",
"elif",
"userinfo",
":",
"authority",
"=",
"userinfo",
"+",
"'@'",
"elif",
"hostport",
":",
"authority",
"=",
"hostport",
"else",
":",
"authority",
"=",
"None",
"if",
"query",
":",
"query",
"=",
"unsplit_query",
"(",
"query",
")",
"uri",
"=",
"''",
"if",
"scheme",
":",
"uri",
"+=",
"scheme",
"+",
"':'",
"if",
"authority",
":",
"uri",
"+=",
"'//'",
"+",
"authority",
"if",
"path",
":",
"if",
"(",
"not",
"authority",
")",
"and",
"path",
"[",
"0",
":",
"2",
"]",
"==",
"'//'",
":",
"uri",
"+=",
"'//'",
"uri",
"+=",
"path",
"if",
"query",
":",
"uri",
"+=",
"'?'",
"+",
"query",
"if",
"fragment",
":",
"uri",
"+=",
"'#'",
"+",
"fragment",
"return",
"uri"
] | 25.9375 | 15.1875 |
def string_to_locale(value, strict=True):
"""
Return an instance ``Locale`` corresponding to the string
representation of a locale.
@param value: a string representation of a locale, i.e., a ISO 639-3
alpha-3 code (or alpha-2 code), optionally followed by a dash
character ``-`` and a ISO 3166-1 alpha-2 code.
@param strict: indicate whether the string representation of a locale
has to be strictly compliant with RFC 4646, or whether a Java-
style locale (character ``_`` instead of ``-``) is accepted.
@return: an instance ``Locale``.
"""
try:
return None if is_undefined(value) else Locale.from_string(value, strict=strict)
except Locale.MalformedLocaleException, exception:
if strict:
raise exception
|
[
"def",
"string_to_locale",
"(",
"value",
",",
"strict",
"=",
"True",
")",
":",
"try",
":",
"return",
"None",
"if",
"is_undefined",
"(",
"value",
")",
"else",
"Locale",
".",
"from_string",
"(",
"value",
",",
"strict",
"=",
"strict",
")",
"except",
"Locale",
".",
"MalformedLocaleException",
",",
"exception",
":",
"if",
"strict",
":",
"raise",
"exception"
] | 39.2 | 22.5 |
def options(self, **kwds):
"""
Change options for interactive functions.
Returns
-------
A new :class:`_InteractFactory` which will apply the
options when called.
"""
opts = dict(self.opts)
for k in kwds:
try:
# Ensure that the key exists because we want to change
# existing options, not add new ones.
_ = opts[k]
except KeyError:
raise ValueError("invalid option {!r}".format(k))
opts[k] = kwds[k]
return type(self)(self.cls, opts, self.kwargs)
|
[
"def",
"options",
"(",
"self",
",",
"*",
"*",
"kwds",
")",
":",
"opts",
"=",
"dict",
"(",
"self",
".",
"opts",
")",
"for",
"k",
"in",
"kwds",
":",
"try",
":",
"# Ensure that the key exists because we want to change",
"# existing options, not add new ones.",
"_",
"=",
"opts",
"[",
"k",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"invalid option {!r}\"",
".",
"format",
"(",
"k",
")",
")",
"opts",
"[",
"k",
"]",
"=",
"kwds",
"[",
"k",
"]",
"return",
"type",
"(",
"self",
")",
"(",
"self",
".",
"cls",
",",
"opts",
",",
"self",
".",
"kwargs",
")"
] | 32.052632 | 16.578947 |
def get_masked_cnv_manifest(tcga_id):
"""Get manifest for masked TCGA copy-number variation data.
Params
------
tcga_id : str
The TCGA project ID.
download_file : str
The path of the download file.
Returns
-------
`pandas.DataFrame`
The manifest.
"""
payload = {
"filters": json.dumps({
"op": "and",
"content" : [
{
"op":"in",
"content":{
"field":"cases.project.program.name",
"value":["TCGA"]}},
{
"op":"in",
"content":{
"field":"cases.project.project_id",
"value":[tcga_id]}},
{
"op":"in",
"content":{
"field":"files.data_category",
"value":["Copy Number Variation"]}},
{
"op":"in",
"content":{
"field":"files.data_type",
"value":["Masked Copy Number Segment"]}}]
}),
"return_type":"manifest",
"size":10000,
}
r = requests.get('https://gdc-api.nci.nih.gov/files', params=payload)
df = pd.read_csv(io.StringIO(r.text), sep='\t', header=0)
logger.info('Obtained manifest with %d files.', df.shape[0])
return df
|
[
"def",
"get_masked_cnv_manifest",
"(",
"tcga_id",
")",
":",
"payload",
"=",
"{",
"\"filters\"",
":",
"json",
".",
"dumps",
"(",
"{",
"\"op\"",
":",
"\"and\"",
",",
"\"content\"",
":",
"[",
"{",
"\"op\"",
":",
"\"in\"",
",",
"\"content\"",
":",
"{",
"\"field\"",
":",
"\"cases.project.program.name\"",
",",
"\"value\"",
":",
"[",
"\"TCGA\"",
"]",
"}",
"}",
",",
"{",
"\"op\"",
":",
"\"in\"",
",",
"\"content\"",
":",
"{",
"\"field\"",
":",
"\"cases.project.project_id\"",
",",
"\"value\"",
":",
"[",
"tcga_id",
"]",
"}",
"}",
",",
"{",
"\"op\"",
":",
"\"in\"",
",",
"\"content\"",
":",
"{",
"\"field\"",
":",
"\"files.data_category\"",
",",
"\"value\"",
":",
"[",
"\"Copy Number Variation\"",
"]",
"}",
"}",
",",
"{",
"\"op\"",
":",
"\"in\"",
",",
"\"content\"",
":",
"{",
"\"field\"",
":",
"\"files.data_type\"",
",",
"\"value\"",
":",
"[",
"\"Masked Copy Number Segment\"",
"]",
"}",
"}",
"]",
"}",
")",
",",
"\"return_type\"",
":",
"\"manifest\"",
",",
"\"size\"",
":",
"10000",
",",
"}",
"r",
"=",
"requests",
".",
"get",
"(",
"'https://gdc-api.nci.nih.gov/files'",
",",
"params",
"=",
"payload",
")",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"io",
".",
"StringIO",
"(",
"r",
".",
"text",
")",
",",
"sep",
"=",
"'\\t'",
",",
"header",
"=",
"0",
")",
"logger",
".",
"info",
"(",
"'Obtained manifest with %d files.'",
",",
"df",
".",
"shape",
"[",
"0",
"]",
")",
"return",
"df"
] | 29.875 | 18 |
def jobUpdateResults(self, jobID, results):
""" Update the results string and last-update-time fields of a model.
Parameters:
----------------------------------------------------------------
jobID: job ID of model to modify
results: new results (json dict string)
"""
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), ' \
' results=%%s ' \
' WHERE job_id=%%s' % (self.jobsTableName,)
conn.cursor.execute(query, [results, jobID])
|
[
"def",
"jobUpdateResults",
"(",
"self",
",",
"jobID",
",",
"results",
")",
":",
"with",
"ConnectionFactory",
".",
"get",
"(",
")",
"as",
"conn",
":",
"query",
"=",
"'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), '",
"' results=%%s '",
"' WHERE job_id=%%s'",
"%",
"(",
"self",
".",
"jobsTableName",
",",
")",
"conn",
".",
"cursor",
".",
"execute",
"(",
"query",
",",
"[",
"results",
",",
"jobID",
"]",
")"
] | 43.461538 | 13.461538 |
def write_line_list(argname, cmd, basename, filename):
"""
Write out the retrieved value as list of lines.
"""
values = getattr(cmd.distribution, argname, None)
if isinstance(values, list):
values = '\n'.join(values)
cmd.write_or_delete_file(argname, filename, values, force=True)
|
[
"def",
"write_line_list",
"(",
"argname",
",",
"cmd",
",",
"basename",
",",
"filename",
")",
":",
"values",
"=",
"getattr",
"(",
"cmd",
".",
"distribution",
",",
"argname",
",",
"None",
")",
"if",
"isinstance",
"(",
"values",
",",
"list",
")",
":",
"values",
"=",
"'\\n'",
".",
"join",
"(",
"values",
")",
"cmd",
".",
"write_or_delete_file",
"(",
"argname",
",",
"filename",
",",
"values",
",",
"force",
"=",
"True",
")"
] | 33.888889 | 13.222222 |
def remove_update_callback(self, group, name=None, cb=None):
"""Remove the supplied callback for a group or a group.name"""
if not cb:
return
if not name:
if group in self.group_update_callbacks:
self.group_update_callbacks[group].remove_callback(cb)
else:
paramname = '{}.{}'.format(group, name)
if paramname in self.param_update_callbacks:
self.param_update_callbacks[paramname].remove_callback(cb)
|
[
"def",
"remove_update_callback",
"(",
"self",
",",
"group",
",",
"name",
"=",
"None",
",",
"cb",
"=",
"None",
")",
":",
"if",
"not",
"cb",
":",
"return",
"if",
"not",
"name",
":",
"if",
"group",
"in",
"self",
".",
"group_update_callbacks",
":",
"self",
".",
"group_update_callbacks",
"[",
"group",
"]",
".",
"remove_callback",
"(",
"cb",
")",
"else",
":",
"paramname",
"=",
"'{}.{}'",
".",
"format",
"(",
"group",
",",
"name",
")",
"if",
"paramname",
"in",
"self",
".",
"param_update_callbacks",
":",
"self",
".",
"param_update_callbacks",
"[",
"paramname",
"]",
".",
"remove_callback",
"(",
"cb",
")"
] | 41.833333 | 21.166667 |
def name(self, filetype, **kwargs):
"""Return the directory containing a file of a given type.
Parameters
----------
filetype : str
File type parameter.
Returns
-------
name : str
Name of a file with no directory information.
"""
full = kwargs.get('full', None)
if not full:
full = self.full(filetype, **kwargs)
return os.path.basename(full)
|
[
"def",
"name",
"(",
"self",
",",
"filetype",
",",
"*",
"*",
"kwargs",
")",
":",
"full",
"=",
"kwargs",
".",
"get",
"(",
"'full'",
",",
"None",
")",
"if",
"not",
"full",
":",
"full",
"=",
"self",
".",
"full",
"(",
"filetype",
",",
"*",
"*",
"kwargs",
")",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"full",
")"
] | 23.736842 | 18.736842 |
async def post(self, public_key, coinid):
"""Writes content to blockchain
Accepts:
Query string args:
- "public_key" - str
- "coin id" - str
Request body arguments:
- message (signed dict as json):
- "cus" (content) - str
- "description" - str
- "read_access" (price for read access) - int
- "write_access" (price for write access) - int
- signature
Returns:
- dictionary with following fields:
- "owneraddr" - str
- "description" - str
- "read_price" - int
- "write_price" - int
Verified: True
"""
logging.debug("[+] -- Post content debugging. ")
#if settings.SIGNATURE_VERIFICATION:
# super().verify()
# Define genesis variables
if coinid in settings.bridges.keys(): # Define bridge url
owneraddr = self.account.validator[coinid](public_key) # Define owner address
logging.debug("\n\n Owner address")
logging.debug(coinid)
logging.debug(owneraddr)
self.account.blockchain.setendpoint(settings.bridges[coinid])
else:
self.set_status(400)
self.write({"error":400, "reason":"Invalid coinid"})
raise tornado.web.Finish
# Check if account exists
account = await self.account.getaccountdata(public_key=public_key)
logging.debug("\n Users account ")
logging.debug(account)
if "error" in account.keys():
self.set_status(account["error"])
self.write(account)
raise tornado.web.Finish
# Get message from request
try:
data = json.loads(self.request.body)
except:
self.set_status(400)
self.write({"error":400, "reason":"Unexpected data format. JSON required"})
raise tornado.web.Finish
if isinstance(data["message"], str):
message = json.loads(data["message"])
elif isinstance(data["message"], dict):
message = data["message"]
cus = message.get("cus")
description = message.get("description")
read_access = message.get("read_access")
write_access = message.get("write_access")
if sys.getsizeof(cus) > 1000000:
self.set_status(403)
self.write({"error":400, "reason":"Exceeded the content size limit."})
raise tornado.web.Finish
# Set fee
fee = await billing.upload_content_fee(cus=cus, owneraddr=owneraddr,
description=description)
if "error" in fee.keys():
self.set_status(fee["error"])
self.write(fee)
raise tornado.web.Finish
# Send request to bridge
data = {"cus":cus,
"owneraddr":owneraddr,
"description":description,
"read_price":read_access,
"write_price":write_access
}
response = await self.account.blockchain.makecid(**data)
logging.debug("\n Bridge makecid")
logging.debug(response)
if "error" in response.keys():
self.set_status(400)
self.write(response)
raise tornado.web.Finish
# Write cid to database
db_content = await self.account.setuserscontent(public_key=public_key,hash=response["cus_hash"],
coinid=coinid, txid=response["result"]["txid"],access="content")
logging.debug("\n Database content")
logging.debug(db_content)
response = {i:data[i] for i in data if i != "cus"}
self.write(response)
|
[
"async",
"def",
"post",
"(",
"self",
",",
"public_key",
",",
"coinid",
")",
":",
"logging",
".",
"debug",
"(",
"\"[+] -- Post content debugging. \"",
")",
"#if settings.SIGNATURE_VERIFICATION:",
"#\tsuper().verify()",
"# Define genesis variables",
"if",
"coinid",
"in",
"settings",
".",
"bridges",
".",
"keys",
"(",
")",
":",
"# Define bridge url",
"owneraddr",
"=",
"self",
".",
"account",
".",
"validator",
"[",
"coinid",
"]",
"(",
"public_key",
")",
"# Define owner address",
"logging",
".",
"debug",
"(",
"\"\\n\\n Owner address\"",
")",
"logging",
".",
"debug",
"(",
"coinid",
")",
"logging",
".",
"debug",
"(",
"owneraddr",
")",
"self",
".",
"account",
".",
"blockchain",
".",
"setendpoint",
"(",
"settings",
".",
"bridges",
"[",
"coinid",
"]",
")",
"else",
":",
"self",
".",
"set_status",
"(",
"400",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Invalid coinid\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Check if account exists",
"account",
"=",
"await",
"self",
".",
"account",
".",
"getaccountdata",
"(",
"public_key",
"=",
"public_key",
")",
"logging",
".",
"debug",
"(",
"\"\\n Users account \"",
")",
"logging",
".",
"debug",
"(",
"account",
")",
"if",
"\"error\"",
"in",
"account",
".",
"keys",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"account",
"[",
"\"error\"",
"]",
")",
"self",
".",
"write",
"(",
"account",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Get message from request ",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"request",
".",
"body",
")",
"except",
":",
"self",
".",
"set_status",
"(",
"400",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Unexpected data format. JSON required\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"if",
"isinstance",
"(",
"data",
"[",
"\"message\"",
"]",
",",
"str",
")",
":",
"message",
"=",
"json",
".",
"loads",
"(",
"data",
"[",
"\"message\"",
"]",
")",
"elif",
"isinstance",
"(",
"data",
"[",
"\"message\"",
"]",
",",
"dict",
")",
":",
"message",
"=",
"data",
"[",
"\"message\"",
"]",
"cus",
"=",
"message",
".",
"get",
"(",
"\"cus\"",
")",
"description",
"=",
"message",
".",
"get",
"(",
"\"description\"",
")",
"read_access",
"=",
"message",
".",
"get",
"(",
"\"read_access\"",
")",
"write_access",
"=",
"message",
".",
"get",
"(",
"\"write_access\"",
")",
"if",
"sys",
".",
"getsizeof",
"(",
"cus",
")",
">",
"1000000",
":",
"self",
".",
"set_status",
"(",
"403",
")",
"self",
".",
"write",
"(",
"{",
"\"error\"",
":",
"400",
",",
"\"reason\"",
":",
"\"Exceeded the content size limit.\"",
"}",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Set fee",
"fee",
"=",
"await",
"billing",
".",
"upload_content_fee",
"(",
"cus",
"=",
"cus",
",",
"owneraddr",
"=",
"owneraddr",
",",
"description",
"=",
"description",
")",
"if",
"\"error\"",
"in",
"fee",
".",
"keys",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"fee",
"[",
"\"error\"",
"]",
")",
"self",
".",
"write",
"(",
"fee",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Send request to bridge",
"data",
"=",
"{",
"\"cus\"",
":",
"cus",
",",
"\"owneraddr\"",
":",
"owneraddr",
",",
"\"description\"",
":",
"description",
",",
"\"read_price\"",
":",
"read_access",
",",
"\"write_price\"",
":",
"write_access",
"}",
"response",
"=",
"await",
"self",
".",
"account",
".",
"blockchain",
".",
"makecid",
"(",
"*",
"*",
"data",
")",
"logging",
".",
"debug",
"(",
"\"\\n Bridge makecid\"",
")",
"logging",
".",
"debug",
"(",
"response",
")",
"if",
"\"error\"",
"in",
"response",
".",
"keys",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"400",
")",
"self",
".",
"write",
"(",
"response",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"# Write cid to database",
"db_content",
"=",
"await",
"self",
".",
"account",
".",
"setuserscontent",
"(",
"public_key",
"=",
"public_key",
",",
"hash",
"=",
"response",
"[",
"\"cus_hash\"",
"]",
",",
"coinid",
"=",
"coinid",
",",
"txid",
"=",
"response",
"[",
"\"result\"",
"]",
"[",
"\"txid\"",
"]",
",",
"access",
"=",
"\"content\"",
")",
"logging",
".",
"debug",
"(",
"\"\\n Database content\"",
")",
"logging",
".",
"debug",
"(",
"db_content",
")",
"response",
"=",
"{",
"i",
":",
"data",
"[",
"i",
"]",
"for",
"i",
"in",
"data",
"if",
"i",
"!=",
"\"cus\"",
"}",
"self",
".",
"write",
"(",
"response",
")"
] | 28.261682 | 19.233645 |
def decrypt(*args, **kwargs):
""" Decrypts legacy or spec-compliant JOSE token.
First attempts to decrypt the token in a legacy mode
(https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-19).
If it is not a valid legacy token then attempts to decrypt it in a
spec-compliant way (http://tools.ietf.org/html/rfc7519)
"""
try:
return legacy_decrypt(*args, **kwargs)
except (NotYetValid, Expired) as e:
# these should be raised immediately.
# The token has been decrypted successfully to get to here.
# decrypting using `legacy_decrypt` will not help things.
raise e
except (Error, ValueError) as e:
return spec_compliant_decrypt(*args, **kwargs)
|
[
"def",
"decrypt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"legacy_decrypt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"NotYetValid",
",",
"Expired",
")",
"as",
"e",
":",
"# these should be raised immediately.",
"# The token has been decrypted successfully to get to here.",
"# decrypting using `legacy_decrypt` will not help things.",
"raise",
"e",
"except",
"(",
"Error",
",",
"ValueError",
")",
"as",
"e",
":",
"return",
"spec_compliant_decrypt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 44.875 | 15.25 |
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
|
[
"def",
"clean_honeypot",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"cleaned_data",
"[",
"\"honeypot\"",
"]",
"if",
"value",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"self",
".",
"fields",
"[",
"\"honeypot\"",
"]",
".",
"label",
")",
"return",
"value"
] | 40.5 | 15.5 |
def record_markdown(text, cellid):
"""Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook.
"""
from acorn.logging.database import record
from time import time
ekey = "nb-{}".format(cellid)
global _cellid_map
if cellid not in _cellid_map:
from acorn.logging.database import active_db
from difflib import SequenceMatcher
from acorn.logging.diff import cascade
taskdb = active_db()
if ekey not in taskdb.entities:
#Compute a new ekey if possible with the most similar markdown cell
#in the database.
possible = [k for k in taskdb.entities if k[0:3] == "nb-"]
maxkey, maxvalue = None, 0.
for pkey in possible:
sequence = [e["c"] for e in taskdb.entities[pkey]]
state = ''.join(cascade(sequence))
matcher = SequenceMatcher(a=state, b=text)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxkey, maxvalue = pkey, ratio
#We expect the similarity to be at least 0.5; otherwise we decide
#that it is a new cell.
if maxkey is not None:
ekey = pkey
_cellid_map[cellid] = ekey
ekey = _cellid_map[cellid]
entry = {
"m": "md",
"a": None,
"s": time(),
"r": None,
"c": text,
}
record(ekey, entry, diff=True)
|
[
"def",
"record_markdown",
"(",
"text",
",",
"cellid",
")",
":",
"from",
"acorn",
".",
"logging",
".",
"database",
"import",
"record",
"from",
"time",
"import",
"time",
"ekey",
"=",
"\"nb-{}\"",
".",
"format",
"(",
"cellid",
")",
"global",
"_cellid_map",
"if",
"cellid",
"not",
"in",
"_cellid_map",
":",
"from",
"acorn",
".",
"logging",
".",
"database",
"import",
"active_db",
"from",
"difflib",
"import",
"SequenceMatcher",
"from",
"acorn",
".",
"logging",
".",
"diff",
"import",
"cascade",
"taskdb",
"=",
"active_db",
"(",
")",
"if",
"ekey",
"not",
"in",
"taskdb",
".",
"entities",
":",
"#Compute a new ekey if possible with the most similar markdown cell",
"#in the database.",
"possible",
"=",
"[",
"k",
"for",
"k",
"in",
"taskdb",
".",
"entities",
"if",
"k",
"[",
"0",
":",
"3",
"]",
"==",
"\"nb-\"",
"]",
"maxkey",
",",
"maxvalue",
"=",
"None",
",",
"0.",
"for",
"pkey",
"in",
"possible",
":",
"sequence",
"=",
"[",
"e",
"[",
"\"c\"",
"]",
"for",
"e",
"in",
"taskdb",
".",
"entities",
"[",
"pkey",
"]",
"]",
"state",
"=",
"''",
".",
"join",
"(",
"cascade",
"(",
"sequence",
")",
")",
"matcher",
"=",
"SequenceMatcher",
"(",
"a",
"=",
"state",
",",
"b",
"=",
"text",
")",
"ratio",
"=",
"matcher",
".",
"quick_ratio",
"(",
")",
"if",
"ratio",
">",
"maxvalue",
"and",
"ratio",
">",
"0.5",
":",
"maxkey",
",",
"maxvalue",
"=",
"pkey",
",",
"ratio",
"#We expect the similarity to be at least 0.5; otherwise we decide",
"#that it is a new cell.",
"if",
"maxkey",
"is",
"not",
"None",
":",
"ekey",
"=",
"pkey",
"_cellid_map",
"[",
"cellid",
"]",
"=",
"ekey",
"ekey",
"=",
"_cellid_map",
"[",
"cellid",
"]",
"entry",
"=",
"{",
"\"m\"",
":",
"\"md\"",
",",
"\"a\"",
":",
"None",
",",
"\"s\"",
":",
"time",
"(",
")",
",",
"\"r\"",
":",
"None",
",",
"\"c\"",
":",
"text",
",",
"}",
"record",
"(",
"ekey",
",",
"entry",
",",
"diff",
"=",
"True",
")"
] | 33.787234 | 16.808511 |
def _try_run(self, run_func: Callable[[], None]) -> None:
"""
Try running the given function (training/prediction).
Calls
- :py:meth:`cxflow.hooks.AbstractHook.before_training`
- :py:meth:`cxflow.hooks.AbstractHook.after_training`
:param run_func: function to be run
"""
# Initialization: before_training
for hook in self._hooks:
hook.before_training()
try:
run_func()
except TrainingTerminated as ex:
logging.info('Training terminated: %s', ex)
# After training: after_training
for hook in self._hooks:
hook.after_training()
|
[
"def",
"_try_run",
"(",
"self",
",",
"run_func",
":",
"Callable",
"[",
"[",
"]",
",",
"None",
"]",
")",
"->",
"None",
":",
"# Initialization: before_training",
"for",
"hook",
"in",
"self",
".",
"_hooks",
":",
"hook",
".",
"before_training",
"(",
")",
"try",
":",
"run_func",
"(",
")",
"except",
"TrainingTerminated",
"as",
"ex",
":",
"logging",
".",
"info",
"(",
"'Training terminated: %s'",
",",
"ex",
")",
"# After training: after_training",
"for",
"hook",
"in",
"self",
".",
"_hooks",
":",
"hook",
".",
"after_training",
"(",
")"
] | 30.363636 | 16.818182 |
def register_previewer(self, name, previewer):
"""Register a previewer in the system."""
if name in self.previewers:
assert name not in self.previewers, \
"Previewer with same name already registered"
self.previewers[name] = previewer
if hasattr(previewer, 'previewable_extensions'):
self._previewable_extensions |= set(
previewer.previewable_extensions)
|
[
"def",
"register_previewer",
"(",
"self",
",",
"name",
",",
"previewer",
")",
":",
"if",
"name",
"in",
"self",
".",
"previewers",
":",
"assert",
"name",
"not",
"in",
"self",
".",
"previewers",
",",
"\"Previewer with same name already registered\"",
"self",
".",
"previewers",
"[",
"name",
"]",
"=",
"previewer",
"if",
"hasattr",
"(",
"previewer",
",",
"'previewable_extensions'",
")",
":",
"self",
".",
"_previewable_extensions",
"|=",
"set",
"(",
"previewer",
".",
"previewable_extensions",
")"
] | 48.222222 | 8.333333 |
def get_genomic_seq_for_transcript(self, transcript_id, expand):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/sequence/id/{0}?type=genomic;expand_3prime={1};expand_5prime={1}".format(transcript_id, expand)
r = self.ensembl_request(ext, headers)
gene = json.loads(r)
seq = gene["seq"]
seq_id = gene["id"]
if seq_id != transcript_id:
raise ValueError("ensembl gave the wrong transcript")
desc = gene["desc"].split(":")
chrom = desc[2]
start = int(desc[3]) + expand
end = int(desc[4]) - expand
strand_temp = int(desc[5])
strand = "+"
if strand_temp == -1:
strand = "-"
return (chrom, start, end, strand, seq)
|
[
"def",
"get_genomic_seq_for_transcript",
"(",
"self",
",",
"transcript_id",
",",
"expand",
")",
":",
"headers",
"=",
"{",
"\"content-type\"",
":",
"\"application/json\"",
"}",
"self",
".",
"attempt",
"=",
"0",
"ext",
"=",
"\"/sequence/id/{0}?type=genomic;expand_3prime={1};expand_5prime={1}\"",
".",
"format",
"(",
"transcript_id",
",",
"expand",
")",
"r",
"=",
"self",
".",
"ensembl_request",
"(",
"ext",
",",
"headers",
")",
"gene",
"=",
"json",
".",
"loads",
"(",
"r",
")",
"seq",
"=",
"gene",
"[",
"\"seq\"",
"]",
"seq_id",
"=",
"gene",
"[",
"\"id\"",
"]",
"if",
"seq_id",
"!=",
"transcript_id",
":",
"raise",
"ValueError",
"(",
"\"ensembl gave the wrong transcript\"",
")",
"desc",
"=",
"gene",
"[",
"\"desc\"",
"]",
".",
"split",
"(",
"\":\"",
")",
"chrom",
"=",
"desc",
"[",
"2",
"]",
"start",
"=",
"int",
"(",
"desc",
"[",
"3",
"]",
")",
"+",
"expand",
"end",
"=",
"int",
"(",
"desc",
"[",
"4",
"]",
")",
"-",
"expand",
"strand_temp",
"=",
"int",
"(",
"desc",
"[",
"5",
"]",
")",
"strand",
"=",
"\"+\"",
"if",
"strand_temp",
"==",
"-",
"1",
":",
"strand",
"=",
"\"-\"",
"return",
"(",
"chrom",
",",
"start",
",",
"end",
",",
"strand",
",",
"seq",
")"
] | 31.103448 | 18.758621 |
def get_shard_by_key_id(self, key_id):
"""
get_shard_by_key_id returns the Redis shard given a key id.
Keyword arguments:
key_id -- the key id (e.g. '12345')
This is similar to get_shard_by_key(key) except that it will not search
for a key id within the curly braces.
returns a redis.StrictRedis connection
"""
shard_num = self.get_shard_num_by_key_id(key_id)
return self.get_shard_by_num(shard_num)
|
[
"def",
"get_shard_by_key_id",
"(",
"self",
",",
"key_id",
")",
":",
"shard_num",
"=",
"self",
".",
"get_shard_num_by_key_id",
"(",
"key_id",
")",
"return",
"self",
".",
"get_shard_by_num",
"(",
"shard_num",
")"
] | 39.083333 | 13.25 |
def execute_reliabledictionary(client, application_name, service_name, input_file):
"""Execute create, update, delete operations on existing reliable dictionaries.
carry out create, update and delete operations on existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param output_file: input file with list of json to provide the operation information for reliable dictionaries.
"""
cluster = Cluster.from_sfclient(client)
service = cluster.get_application(application_name).get_service(service_name)
# call get service with headers and params
with open(input_file) as json_file:
json_data = json.load(json_file)
service.execute(json_data)
return
|
[
"def",
"execute_reliabledictionary",
"(",
"client",
",",
"application_name",
",",
"service_name",
",",
"input_file",
")",
":",
"cluster",
"=",
"Cluster",
".",
"from_sfclient",
"(",
"client",
")",
"service",
"=",
"cluster",
".",
"get_application",
"(",
"application_name",
")",
".",
"get_service",
"(",
"service_name",
")",
"# call get service with headers and params",
"with",
"open",
"(",
"input_file",
")",
"as",
"json_file",
":",
"json_data",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"service",
".",
"execute",
"(",
"json_data",
")",
"return"
] | 42.85 | 24.25 |
def run_script(self, script_id, params=None):
"""
Runs a stored script.
script_id:= id of stored script.
params:= up to 10 parameters required by the script.
...
s = pi.run_script(sid, [par1, par2])
s = pi.run_script(sid)
s = pi.run_script(sid, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
...
"""
# I p1 script id
# I p2 0
# I p3 params * 4 (0-10 params)
# (optional) extension
# I[] params
if params is not None:
ext = bytearray()
for p in params:
ext.extend(struct.pack("I", p))
nump = len(params)
extents = [ext]
else:
nump = 0
extents = []
res = yield from self._pigpio_aio_command_ext(_PI_CMD_PROCR, script_id,
0, nump * 4, extents)
return _u2i(res)
|
[
"def",
"run_script",
"(",
"self",
",",
"script_id",
",",
"params",
"=",
"None",
")",
":",
"# I p1 script id",
"# I p2 0",
"# I p3 params * 4 (0-10 params)",
"# (optional) extension",
"# I[] params",
"if",
"params",
"is",
"not",
"None",
":",
"ext",
"=",
"bytearray",
"(",
")",
"for",
"p",
"in",
"params",
":",
"ext",
".",
"extend",
"(",
"struct",
".",
"pack",
"(",
"\"I\"",
",",
"p",
")",
")",
"nump",
"=",
"len",
"(",
"params",
")",
"extents",
"=",
"[",
"ext",
"]",
"else",
":",
"nump",
"=",
"0",
"extents",
"=",
"[",
"]",
"res",
"=",
"yield",
"from",
"self",
".",
"_pigpio_aio_command_ext",
"(",
"_PI_CMD_PROCR",
",",
"script_id",
",",
"0",
",",
"nump",
"*",
"4",
",",
"extents",
")",
"return",
"_u2i",
"(",
"res",
")"
] | 28.46875 | 18.09375 |
def str_to_inet(address):
"""Convert an a string IP address to a inet struct
Args:
address (str): String representation of address
Returns:
inet: Inet network address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_pton(socket.AF_INET, address)
except socket.error:
return socket.inet_pton(socket.AF_INET6, address)
|
[
"def",
"str_to_inet",
"(",
"address",
")",
":",
"# First try ipv4 and then ipv6",
"try",
":",
"return",
"socket",
".",
"inet_pton",
"(",
"socket",
".",
"AF_INET",
",",
"address",
")",
"except",
"socket",
".",
"error",
":",
"return",
"socket",
".",
"inet_pton",
"(",
"socket",
".",
"AF_INET6",
",",
"address",
")"
] | 30.076923 | 16.461538 |
def ReadHuntObject(self, hunt_id):
"""Reads a hunt object from the database."""
try:
return self._DeepCopy(self.hunts[hunt_id])
except KeyError:
raise db.UnknownHuntError(hunt_id)
|
[
"def",
"ReadHuntObject",
"(",
"self",
",",
"hunt_id",
")",
":",
"try",
":",
"return",
"self",
".",
"_DeepCopy",
"(",
"self",
".",
"hunts",
"[",
"hunt_id",
"]",
")",
"except",
"KeyError",
":",
"raise",
"db",
".",
"UnknownHuntError",
"(",
"hunt_id",
")"
] | 33 | 11 |
def _sanitize(cls, message):
"""
Sanitize the given message,
dealing with multiple arguments
and/or string formatting.
:param message: the log message to be sanitized
:type message: string or list of strings
:rtype: string
"""
if isinstance(message, list):
if len(message) == 0:
sanitized = u"Empty log message"
elif len(message) == 1:
sanitized = message[0]
else:
sanitized = message[0] % tuple(message[1:])
else:
sanitized = message
if not gf.is_unicode(sanitized):
raise TypeError("The given log message is not a Unicode string")
return sanitized
|
[
"def",
"_sanitize",
"(",
"cls",
",",
"message",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"list",
")",
":",
"if",
"len",
"(",
"message",
")",
"==",
"0",
":",
"sanitized",
"=",
"u\"Empty log message\"",
"elif",
"len",
"(",
"message",
")",
"==",
"1",
":",
"sanitized",
"=",
"message",
"[",
"0",
"]",
"else",
":",
"sanitized",
"=",
"message",
"[",
"0",
"]",
"%",
"tuple",
"(",
"message",
"[",
"1",
":",
"]",
")",
"else",
":",
"sanitized",
"=",
"message",
"if",
"not",
"gf",
".",
"is_unicode",
"(",
"sanitized",
")",
":",
"raise",
"TypeError",
"(",
"\"The given log message is not a Unicode string\"",
")",
"return",
"sanitized"
] | 33.363636 | 11.909091 |
def addRelationships(
self,
data: list,
LIMIT: int = 20,
_print: bool = True,
crawl: bool = False,
) -> list:
"""
data = [{
"term1_id", "term2_id", "relationship_tid",
"term1_version", "term2_version",
"relationship_term_version",}]
"""
url_base = self.base_url + '/api/1/term/add-relationship'
relationships = []
for relationship in data:
relationship.update({
'term1_version': relationship['term1_version'],
'term2_version': relationship['term2_version'],
'relationship_term_version': relationship['relationship_term_version']
})
relationships.append((url_base, relationship))
return self.post(
relationships,
LIMIT = LIMIT,
action = 'Adding Relationships',
_print = _print,
crawl = crawl,
)
|
[
"def",
"addRelationships",
"(",
"self",
",",
"data",
":",
"list",
",",
"LIMIT",
":",
"int",
"=",
"20",
",",
"_print",
":",
"bool",
"=",
"True",
",",
"crawl",
":",
"bool",
"=",
"False",
",",
")",
"->",
"list",
":",
"url_base",
"=",
"self",
".",
"base_url",
"+",
"'/api/1/term/add-relationship'",
"relationships",
"=",
"[",
"]",
"for",
"relationship",
"in",
"data",
":",
"relationship",
".",
"update",
"(",
"{",
"'term1_version'",
":",
"relationship",
"[",
"'term1_version'",
"]",
",",
"'term2_version'",
":",
"relationship",
"[",
"'term2_version'",
"]",
",",
"'relationship_term_version'",
":",
"relationship",
"[",
"'relationship_term_version'",
"]",
"}",
")",
"relationships",
".",
"append",
"(",
"(",
"url_base",
",",
"relationship",
")",
")",
"return",
"self",
".",
"post",
"(",
"relationships",
",",
"LIMIT",
"=",
"LIMIT",
",",
"action",
"=",
"'Adding Relationships'",
",",
"_print",
"=",
"_print",
",",
"crawl",
"=",
"crawl",
",",
")"
] | 33 | 16.103448 |
def colum_avg(self, state):
"""Toggle backgroundcolor"""
self.colum_avg_enabled = state > 0
if self.colum_avg_enabled:
self.return_max = lambda col_vals, index: col_vals[index]
else:
self.return_max = global_max
self.reset()
|
[
"def",
"colum_avg",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"colum_avg_enabled",
"=",
"state",
">",
"0",
"if",
"self",
".",
"colum_avg_enabled",
":",
"self",
".",
"return_max",
"=",
"lambda",
"col_vals",
",",
"index",
":",
"col_vals",
"[",
"index",
"]",
"else",
":",
"self",
".",
"return_max",
"=",
"global_max",
"self",
".",
"reset",
"(",
")"
] | 36 | 12.125 |
def export_dist(self, args):
"""Copies a created dist to an output dir.
This makes it easy to navigate to the dist to investigate it
or call build.py, though you do not in general need to do this
and can use the apk command instead.
"""
ctx = self.ctx
dist = dist_from_args(ctx, args)
if dist.needs_build:
raise BuildInterruptingException(
'You asked to export a dist, but there is no dist '
'with suitable recipes available. For now, you must '
' create one first with the create argument.')
if args.symlink:
shprint(sh.ln, '-s', dist.dist_dir, args.output_dir)
else:
shprint(sh.cp, '-r', dist.dist_dir, args.output_dir)
|
[
"def",
"export_dist",
"(",
"self",
",",
"args",
")",
":",
"ctx",
"=",
"self",
".",
"ctx",
"dist",
"=",
"dist_from_args",
"(",
"ctx",
",",
"args",
")",
"if",
"dist",
".",
"needs_build",
":",
"raise",
"BuildInterruptingException",
"(",
"'You asked to export a dist, but there is no dist '",
"'with suitable recipes available. For now, you must '",
"' create one first with the create argument.'",
")",
"if",
"args",
".",
"symlink",
":",
"shprint",
"(",
"sh",
".",
"ln",
",",
"'-s'",
",",
"dist",
".",
"dist_dir",
",",
"args",
".",
"output_dir",
")",
"else",
":",
"shprint",
"(",
"sh",
".",
"cp",
",",
"'-r'",
",",
"dist",
".",
"dist_dir",
",",
"args",
".",
"output_dir",
")"
] | 42.722222 | 17.666667 |
def nifti_out(f):
""" Picks a function whose first argument is an `img`, processes its
data and returns a numpy array. This decorator wraps this numpy array
into a nibabel.Nifti1Image."""
@wraps(f)
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
img = read_img(args[0])
return nib.Nifti1Image(r, affine=img.get_affine(), header=img.header)
return wrapped
|
[
"def",
"nifti_out",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"img",
"=",
"read_img",
"(",
"args",
"[",
"0",
"]",
")",
"return",
"nib",
".",
"Nifti1Image",
"(",
"r",
",",
"affine",
"=",
"img",
".",
"get_affine",
"(",
")",
",",
"header",
"=",
"img",
".",
"header",
")",
"return",
"wrapped"
] | 33.166667 | 20.666667 |
def do_blame(self, subcmd, opts, *args):
"""Output the content of specified files or
URLs with revision and author information in-line.
usage:
blame TARGET...
${cmd_option_list}
"""
print "'svn %s' opts: %s" % (subcmd, opts)
print "'svn %s' args: %s" % (subcmd, args)
|
[
"def",
"do_blame",
"(",
"self",
",",
"subcmd",
",",
"opts",
",",
"*",
"args",
")",
":",
"print",
"\"'svn %s' opts: %s\"",
"%",
"(",
"subcmd",
",",
"opts",
")",
"print",
"\"'svn %s' args: %s\"",
"%",
"(",
"subcmd",
",",
"args",
")"
] | 30.454545 | 14.818182 |
def merge(self, merge_func=None, merge_key=None, stash='active'):
"""
Merge the states in a given stash.
:param stash: The stash (default: 'active')
:param merge_func: If provided, instead of using state.merge, call this function with
the states as the argument. Should return the merged state.
:param merge_key: If provided, should be a function that takes a state and returns a key that will compare
equal for all states that are allowed to be merged together, as a first aproximation.
By default: uses PC, callstack, and open file descriptors.
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
"""
self.prune(from_stash=stash)
to_merge = self._fetch_states(stash=stash)
not_to_merge = []
if merge_key is None: merge_key = self._merge_key
merge_groups = [ ]
while to_merge:
base_key = merge_key(to_merge[0])
g, to_merge = self._filter_states(lambda s: base_key == merge_key(s), to_merge)
if len(g) <= 1:
not_to_merge.extend(g)
else:
merge_groups.append(g)
for g in merge_groups:
try:
m = self._merge_states(g) if merge_func is None else merge_func(*g)
not_to_merge.append(m)
except SimMergeError:
l.warning("SimMergeError while merging %d states", len(g), exc_info=True)
not_to_merge.extend(g)
self._clear_states(stash)
self._store_states(stash, not_to_merge)
return self
|
[
"def",
"merge",
"(",
"self",
",",
"merge_func",
"=",
"None",
",",
"merge_key",
"=",
"None",
",",
"stash",
"=",
"'active'",
")",
":",
"self",
".",
"prune",
"(",
"from_stash",
"=",
"stash",
")",
"to_merge",
"=",
"self",
".",
"_fetch_states",
"(",
"stash",
"=",
"stash",
")",
"not_to_merge",
"=",
"[",
"]",
"if",
"merge_key",
"is",
"None",
":",
"merge_key",
"=",
"self",
".",
"_merge_key",
"merge_groups",
"=",
"[",
"]",
"while",
"to_merge",
":",
"base_key",
"=",
"merge_key",
"(",
"to_merge",
"[",
"0",
"]",
")",
"g",
",",
"to_merge",
"=",
"self",
".",
"_filter_states",
"(",
"lambda",
"s",
":",
"base_key",
"==",
"merge_key",
"(",
"s",
")",
",",
"to_merge",
")",
"if",
"len",
"(",
"g",
")",
"<=",
"1",
":",
"not_to_merge",
".",
"extend",
"(",
"g",
")",
"else",
":",
"merge_groups",
".",
"append",
"(",
"g",
")",
"for",
"g",
"in",
"merge_groups",
":",
"try",
":",
"m",
"=",
"self",
".",
"_merge_states",
"(",
"g",
")",
"if",
"merge_func",
"is",
"None",
"else",
"merge_func",
"(",
"*",
"g",
")",
"not_to_merge",
".",
"append",
"(",
"m",
")",
"except",
"SimMergeError",
":",
"l",
".",
"warning",
"(",
"\"SimMergeError while merging %d states\"",
",",
"len",
"(",
"g",
")",
",",
"exc_info",
"=",
"True",
")",
"not_to_merge",
".",
"extend",
"(",
"g",
")",
"self",
".",
"_clear_states",
"(",
"stash",
")",
"self",
".",
"_store_states",
"(",
"stash",
",",
"not_to_merge",
")",
"return",
"self"
] | 43.358974 | 23.461538 |
def score(
self, X_test, Y_test, b=0.5, pos_label=1, set_unlabeled_as_neg=True, beta=1
):
"""
Returns the summary scores:
* For binary: precision, recall, F-beta score, ROC-AUC score
* For categorical: accuracy
:param X_test: The input test candidates.
:type X_test: pair with candidates and corresponding features
:param Y_test: The input test labels.
:type Y_test: list of labels
:param b: Decision boundary *for binary setting only*.
:type b: float
:param pos_label: Positive class index *for binary setting only*. Default: 1
:type pos_label: int
:param set_unlabeled_as_neg: Whether to map 0 labels -> -1,
*for binary setting only*
:type set_unlabeled_as_neg: bool
:param beta: For F-beta score; by default beta = 1 => F-1 score.
:type beta: int
"""
if self._check_input(X_test):
X_test, Y_test = self._preprocess_data(X_test, Y_test)
Y_pred, Y_prob = self.predict(
X_test, b=b, pos_label=pos_label, return_probs=True
)
# Convert Y_test to dense numpy array
try:
Y_test = np.array(Y_test.todense()).reshape(-1)
except Exception:
Y_test = np.array(Y_test)
scores = {}
# Compute P/R/F1 for binary settings
if self.cardinality == 2:
# Either remap or filter out unlabeled (0-valued) test labels
if set_unlabeled_as_neg:
Y_test[Y_test == 0] = 3 - pos_label
else:
Y_pred = Y_pred[Y_test != 0]
Y_test = Y_test[Y_test != 0]
# Compute and return precision, recall, and F1 score
pred_pos = np.where(Y_pred == pos_label, True, False)
gt_pos = np.where(Y_test == pos_label, True, False)
TP = np.sum(pred_pos * gt_pos)
FP = np.sum(pred_pos * np.logical_not(gt_pos))
FN = np.sum(np.logical_not(pred_pos) * gt_pos)
prec = TP / (TP + FP) if TP + FP > 0 else 0.0
rec = TP / (TP + FN) if TP + FN > 0 else 0.0
fbeta = (
(1 + beta ** 2) * (prec * rec) / ((beta ** 2 * prec) + rec)
if (beta ** 2 * prec) + rec > 0
else 0.0
)
scores["precision"] = prec
scores["recall"] = rec
scores[f"f{beta}"] = fbeta
roc_auc = roc_auc_score(Y_test, Y_prob[:, pos_label - 1])
scores["roc_auc"] = roc_auc
# Compute accuracy for all settings
acc = np.where([Y_pred == Y_test])[0].shape[0] / float(Y_test.shape[0])
scores["accuracy"] = acc
return scores
|
[
"def",
"score",
"(",
"self",
",",
"X_test",
",",
"Y_test",
",",
"b",
"=",
"0.5",
",",
"pos_label",
"=",
"1",
",",
"set_unlabeled_as_neg",
"=",
"True",
",",
"beta",
"=",
"1",
")",
":",
"if",
"self",
".",
"_check_input",
"(",
"X_test",
")",
":",
"X_test",
",",
"Y_test",
"=",
"self",
".",
"_preprocess_data",
"(",
"X_test",
",",
"Y_test",
")",
"Y_pred",
",",
"Y_prob",
"=",
"self",
".",
"predict",
"(",
"X_test",
",",
"b",
"=",
"b",
",",
"pos_label",
"=",
"pos_label",
",",
"return_probs",
"=",
"True",
")",
"# Convert Y_test to dense numpy array",
"try",
":",
"Y_test",
"=",
"np",
".",
"array",
"(",
"Y_test",
".",
"todense",
"(",
")",
")",
".",
"reshape",
"(",
"-",
"1",
")",
"except",
"Exception",
":",
"Y_test",
"=",
"np",
".",
"array",
"(",
"Y_test",
")",
"scores",
"=",
"{",
"}",
"# Compute P/R/F1 for binary settings",
"if",
"self",
".",
"cardinality",
"==",
"2",
":",
"# Either remap or filter out unlabeled (0-valued) test labels",
"if",
"set_unlabeled_as_neg",
":",
"Y_test",
"[",
"Y_test",
"==",
"0",
"]",
"=",
"3",
"-",
"pos_label",
"else",
":",
"Y_pred",
"=",
"Y_pred",
"[",
"Y_test",
"!=",
"0",
"]",
"Y_test",
"=",
"Y_test",
"[",
"Y_test",
"!=",
"0",
"]",
"# Compute and return precision, recall, and F1 score",
"pred_pos",
"=",
"np",
".",
"where",
"(",
"Y_pred",
"==",
"pos_label",
",",
"True",
",",
"False",
")",
"gt_pos",
"=",
"np",
".",
"where",
"(",
"Y_test",
"==",
"pos_label",
",",
"True",
",",
"False",
")",
"TP",
"=",
"np",
".",
"sum",
"(",
"pred_pos",
"*",
"gt_pos",
")",
"FP",
"=",
"np",
".",
"sum",
"(",
"pred_pos",
"*",
"np",
".",
"logical_not",
"(",
"gt_pos",
")",
")",
"FN",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"logical_not",
"(",
"pred_pos",
")",
"*",
"gt_pos",
")",
"prec",
"=",
"TP",
"/",
"(",
"TP",
"+",
"FP",
")",
"if",
"TP",
"+",
"FP",
">",
"0",
"else",
"0.0",
"rec",
"=",
"TP",
"/",
"(",
"TP",
"+",
"FN",
")",
"if",
"TP",
"+",
"FN",
">",
"0",
"else",
"0.0",
"fbeta",
"=",
"(",
"(",
"1",
"+",
"beta",
"**",
"2",
")",
"*",
"(",
"prec",
"*",
"rec",
")",
"/",
"(",
"(",
"beta",
"**",
"2",
"*",
"prec",
")",
"+",
"rec",
")",
"if",
"(",
"beta",
"**",
"2",
"*",
"prec",
")",
"+",
"rec",
">",
"0",
"else",
"0.0",
")",
"scores",
"[",
"\"precision\"",
"]",
"=",
"prec",
"scores",
"[",
"\"recall\"",
"]",
"=",
"rec",
"scores",
"[",
"f\"f{beta}\"",
"]",
"=",
"fbeta",
"roc_auc",
"=",
"roc_auc_score",
"(",
"Y_test",
",",
"Y_prob",
"[",
":",
",",
"pos_label",
"-",
"1",
"]",
")",
"scores",
"[",
"\"roc_auc\"",
"]",
"=",
"roc_auc",
"# Compute accuracy for all settings",
"acc",
"=",
"np",
".",
"where",
"(",
"[",
"Y_pred",
"==",
"Y_test",
"]",
")",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
"/",
"float",
"(",
"Y_test",
".",
"shape",
"[",
"0",
"]",
")",
"scores",
"[",
"\"accuracy\"",
"]",
"=",
"acc",
"return",
"scores"
] | 36.337838 | 19.851351 |
def search(self, search_content, search_type, limit=9):
"""Search entrance.
:params search_content: search content.
:params search_type: search type.
:params limit: result count returned by weapi.
:return: a dict.
"""
url = 'http://music.163.com/weapi/cloudsearch/get/web?csrf_token='
params = {'s': search_content, 'type': search_type, 'offset': 0,
'sub': 'false', 'limit': limit}
result = self.post_request(url, params)
return result
|
[
"def",
"search",
"(",
"self",
",",
"search_content",
",",
"search_type",
",",
"limit",
"=",
"9",
")",
":",
"url",
"=",
"'http://music.163.com/weapi/cloudsearch/get/web?csrf_token='",
"params",
"=",
"{",
"'s'",
":",
"search_content",
",",
"'type'",
":",
"search_type",
",",
"'offset'",
":",
"0",
",",
"'sub'",
":",
"'false'",
",",
"'limit'",
":",
"limit",
"}",
"result",
"=",
"self",
".",
"post_request",
"(",
"url",
",",
"params",
")",
"return",
"result"
] | 37.285714 | 16.714286 |
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a formset instance with the passed
POST variables and then checked for validity.
"""
formset = self.construct_formset()
if formset.is_valid():
return self.formset_valid(formset)
else:
return self.formset_invalid(formset)
|
[
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"formset",
"=",
"self",
".",
"construct_formset",
"(",
")",
"if",
"formset",
".",
"is_valid",
"(",
")",
":",
"return",
"self",
".",
"formset_valid",
"(",
"formset",
")",
"else",
":",
"return",
"self",
".",
"formset_invalid",
"(",
"formset",
")"
] | 37.4 | 10.6 |
def all_experiments(self):
"""
Similar to experiments,
but uses the default manager to return archived experiments as well.
"""
from db.models.experiments import Experiment
return Experiment.all.filter(experiment_group=self)
|
[
"def",
"all_experiments",
"(",
"self",
")",
":",
"from",
"db",
".",
"models",
".",
"experiments",
"import",
"Experiment",
"return",
"Experiment",
".",
"all",
".",
"filter",
"(",
"experiment_group",
"=",
"self",
")"
] | 33.25 | 16.25 |
def push_not_registered_user_data_task(data):
"""
Async: push_not_registered_user_data_task.apply_async(args=[data], countdown=100)
"""
lock_id = "%s-push-not-registered-user-data-task-%s" % (settings.ENV_PREFIX, data["email"])
acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE) # noqa: E731
release_lock = lambda: cache.delete(lock_id) # noqa: E731
if acquire_lock():
try:
upload_not_registered_user_data(data)
except (KeyError, NotImplementedError, MultipleMatchingUsersError):
release_lock()
raise
release_lock()
|
[
"def",
"push_not_registered_user_data_task",
"(",
"data",
")",
":",
"lock_id",
"=",
"\"%s-push-not-registered-user-data-task-%s\"",
"%",
"(",
"settings",
".",
"ENV_PREFIX",
",",
"data",
"[",
"\"email\"",
"]",
")",
"acquire_lock",
"=",
"lambda",
":",
"cache",
".",
"add",
"(",
"lock_id",
",",
"\"true\"",
",",
"LOCK_EXPIRE",
")",
"# noqa: E731",
"release_lock",
"=",
"lambda",
":",
"cache",
".",
"delete",
"(",
"lock_id",
")",
"# noqa: E731",
"if",
"acquire_lock",
"(",
")",
":",
"try",
":",
"upload_not_registered_user_data",
"(",
"data",
")",
"except",
"(",
"KeyError",
",",
"NotImplementedError",
",",
"MultipleMatchingUsersError",
")",
":",
"release_lock",
"(",
")",
"raise",
"release_lock",
"(",
")"
] | 40.266667 | 23.466667 |
def _fixupRandomEncoderParams(params, minVal, maxVal, minResolution):
"""
Given model params, figure out the correct parameters for the
RandomDistributed encoder. Modifies params in place.
"""
encodersDict = (
params["modelConfig"]["modelParams"]["sensorParams"]["encoders"]
)
for encoder in encodersDict.itervalues():
if encoder is not None:
if encoder["type"] == "RandomDistributedScalarEncoder":
resolution = max(minResolution,
(maxVal - minVal) / encoder.pop("numBuckets")
)
encodersDict["c1"]["resolution"] = resolution
|
[
"def",
"_fixupRandomEncoderParams",
"(",
"params",
",",
"minVal",
",",
"maxVal",
",",
"minResolution",
")",
":",
"encodersDict",
"=",
"(",
"params",
"[",
"\"modelConfig\"",
"]",
"[",
"\"modelParams\"",
"]",
"[",
"\"sensorParams\"",
"]",
"[",
"\"encoders\"",
"]",
")",
"for",
"encoder",
"in",
"encodersDict",
".",
"itervalues",
"(",
")",
":",
"if",
"encoder",
"is",
"not",
"None",
":",
"if",
"encoder",
"[",
"\"type\"",
"]",
"==",
"\"RandomDistributedScalarEncoder\"",
":",
"resolution",
"=",
"max",
"(",
"minResolution",
",",
"(",
"maxVal",
"-",
"minVal",
")",
"/",
"encoder",
".",
"pop",
"(",
"\"numBuckets\"",
")",
")",
"encodersDict",
"[",
"\"c1\"",
"]",
"[",
"\"resolution\"",
"]",
"=",
"resolution"
] | 37.6875 | 18.0625 |
def _formatEvidence(self, elements):
"""
Formats elements passed into parts of a query for filtering
"""
elementClause = None
filters = []
for evidence in elements:
if evidence.description:
elementClause = 'regex(?{}, "{}")'.format(
'environment_label', evidence.description)
if (hasattr(evidence, 'externalIdentifiers') and
evidence.externalIdentifiers):
# TODO will this pick up > 1 externalIdentifiers ?
for externalIdentifier in evidence['externalIdentifiers']:
exid_clause = self._formatExternalIdentifier(
externalIdentifier, 'environment')
# cleanup parens from _formatExternalIdentifier method
elementClause = exid_clause[1:-1]
if elementClause:
filters.append(elementClause)
elementClause = "({})".format(" || ".join(filters))
return elementClause
|
[
"def",
"_formatEvidence",
"(",
"self",
",",
"elements",
")",
":",
"elementClause",
"=",
"None",
"filters",
"=",
"[",
"]",
"for",
"evidence",
"in",
"elements",
":",
"if",
"evidence",
".",
"description",
":",
"elementClause",
"=",
"'regex(?{}, \"{}\")'",
".",
"format",
"(",
"'environment_label'",
",",
"evidence",
".",
"description",
")",
"if",
"(",
"hasattr",
"(",
"evidence",
",",
"'externalIdentifiers'",
")",
"and",
"evidence",
".",
"externalIdentifiers",
")",
":",
"# TODO will this pick up > 1 externalIdentifiers ?",
"for",
"externalIdentifier",
"in",
"evidence",
"[",
"'externalIdentifiers'",
"]",
":",
"exid_clause",
"=",
"self",
".",
"_formatExternalIdentifier",
"(",
"externalIdentifier",
",",
"'environment'",
")",
"# cleanup parens from _formatExternalIdentifier method",
"elementClause",
"=",
"exid_clause",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"elementClause",
":",
"filters",
".",
"append",
"(",
"elementClause",
")",
"elementClause",
"=",
"\"({})\"",
".",
"format",
"(",
"\" || \"",
".",
"join",
"(",
"filters",
")",
")",
"return",
"elementClause"
] | 46.5 | 15.5 |
def delete_buckets(cls, record):
"""Delete the bucket."""
files = record.get('_files', [])
buckets = set()
for f in files:
buckets.add(f.get('bucket'))
for b_id in buckets:
b = Bucket.get(b_id)
b.deleted = True
|
[
"def",
"delete_buckets",
"(",
"cls",
",",
"record",
")",
":",
"files",
"=",
"record",
".",
"get",
"(",
"'_files'",
",",
"[",
"]",
")",
"buckets",
"=",
"set",
"(",
")",
"for",
"f",
"in",
"files",
":",
"buckets",
".",
"add",
"(",
"f",
".",
"get",
"(",
"'bucket'",
")",
")",
"for",
"b_id",
"in",
"buckets",
":",
"b",
"=",
"Bucket",
".",
"get",
"(",
"b_id",
")",
"b",
".",
"deleted",
"=",
"True"
] | 30.888889 | 8.222222 |
def validate_schema_dict(schema):
# type: (Dict[str, Any]) -> None
""" Validate the schema.
This raises iff either the schema or the master schema are
invalid. If it's successful, it returns nothing.
:param schema: The schema to validate, as parsed by `json`.
:raises SchemaError: When the schema is invalid.
:raises MasterSchemaError: When the master schema is invalid.
"""
if not isinstance(schema, dict):
msg = ('The top level of the schema file is a {}, whereas a dict is '
'expected.'.format(type(schema).__name__))
raise SchemaError(msg)
if 'version' in schema:
version = schema['version']
else:
raise SchemaError('A format version is expected in the schema.')
master_schema_bytes = _get_master_schema(version)
try:
master_schema = json.loads(master_schema_bytes.decode('utf-8'))
except ValueError as e: # In Python 3 we can be more specific with
# json.decoder.JSONDecodeError, but that
# doesn't exist in Python 2.
msg = ('The master schema is not a valid JSON file. The schema cannot '
'be validated. Please file a bug report.')
raise_from(MasterSchemaError(msg), e)
try:
jsonschema.validate(schema, master_schema)
except jsonschema.exceptions.ValidationError as e:
raise_from(SchemaError('The schema is not valid.'), e)
except jsonschema.exceptions.SchemaError as e:
msg = ('The master schema is not valid. The schema cannot be '
'validated. Please file a bug report.')
raise_from(MasterSchemaError(msg), e)
|
[
"def",
"validate_schema_dict",
"(",
"schema",
")",
":",
"# type: (Dict[str, Any]) -> None",
"if",
"not",
"isinstance",
"(",
"schema",
",",
"dict",
")",
":",
"msg",
"=",
"(",
"'The top level of the schema file is a {}, whereas a dict is '",
"'expected.'",
".",
"format",
"(",
"type",
"(",
"schema",
")",
".",
"__name__",
")",
")",
"raise",
"SchemaError",
"(",
"msg",
")",
"if",
"'version'",
"in",
"schema",
":",
"version",
"=",
"schema",
"[",
"'version'",
"]",
"else",
":",
"raise",
"SchemaError",
"(",
"'A format version is expected in the schema.'",
")",
"master_schema_bytes",
"=",
"_get_master_schema",
"(",
"version",
")",
"try",
":",
"master_schema",
"=",
"json",
".",
"loads",
"(",
"master_schema_bytes",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"# In Python 3 we can be more specific with",
"# json.decoder.JSONDecodeError, but that",
"# doesn't exist in Python 2.",
"msg",
"=",
"(",
"'The master schema is not a valid JSON file. The schema cannot '",
"'be validated. Please file a bug report.'",
")",
"raise_from",
"(",
"MasterSchemaError",
"(",
"msg",
")",
",",
"e",
")",
"try",
":",
"jsonschema",
".",
"validate",
"(",
"schema",
",",
"master_schema",
")",
"except",
"jsonschema",
".",
"exceptions",
".",
"ValidationError",
"as",
"e",
":",
"raise_from",
"(",
"SchemaError",
"(",
"'The schema is not valid.'",
")",
",",
"e",
")",
"except",
"jsonschema",
".",
"exceptions",
".",
"SchemaError",
"as",
"e",
":",
"msg",
"=",
"(",
"'The master schema is not valid. The schema cannot be '",
"'validated. Please file a bug report.'",
")",
"raise_from",
"(",
"MasterSchemaError",
"(",
"msg",
")",
",",
"e",
")"
] | 41.589744 | 20.282051 |
async def hmset(self, name, mapping):
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
"""
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in iteritems(mapping):
items.extend(pair)
return await self.execute_command('HMSET', name, *items)
|
[
"async",
"def",
"hmset",
"(",
"self",
",",
"name",
",",
"mapping",
")",
":",
"if",
"not",
"mapping",
":",
"raise",
"DataError",
"(",
"\"'hmset' with 'mapping' of length 0\"",
")",
"items",
"=",
"[",
"]",
"for",
"pair",
"in",
"iteritems",
"(",
"mapping",
")",
":",
"items",
".",
"extend",
"(",
"pair",
")",
"return",
"await",
"self",
".",
"execute_command",
"(",
"'HMSET'",
",",
"name",
",",
"*",
"items",
")"
] | 37.636364 | 12.545455 |
def generate_key_pair(size=512, number=2, rnd=default_crypto_random,
k=DEFAULT_ITERATION, primality_algorithm=None,
strict_size=True, e=0x10001):
'''Generates an RSA key pair.
size:
the bit size of the modulus, default to 512.
number:
the number of primes to use, default to 2.
rnd:
the random number generator to use, default to SystemRandom from the
random library.
k:
the number of iteration to use for the probabilistic primality
tests.
primality_algorithm:
the primality algorithm to use.
strict_size:
whether to use size as a lower bound or a strict goal.
e:
the public key exponent.
Returns the pair (public_key, private_key).
'''
primes = []
lbda = 1
bits = size // number + 1
n = 1
while len(primes) < number:
if number - len(primes) == 1:
bits = size - primitives.integer_bit_size(n) + 1
prime = get_prime(bits, rnd, k, algorithm=primality_algorithm)
if prime in primes:
continue
if e is not None and fractions.gcd(e, lbda) != 1:
continue
if (strict_size and number - len(primes) == 1 and
primitives.integer_bit_size(n*prime) != size):
continue
primes.append(prime)
n *= prime
lbda *= prime - 1
if e is None:
e = 0x10001
while e < lbda:
if fractions.gcd(e, lbda) == 1:
break
e += 2
assert 3 <= e <= n-1
public = RsaPublicKey(n, e)
private = MultiPrimeRsaPrivateKey(primes, e, blind=True, rnd=rnd)
return public, private
|
[
"def",
"generate_key_pair",
"(",
"size",
"=",
"512",
",",
"number",
"=",
"2",
",",
"rnd",
"=",
"default_crypto_random",
",",
"k",
"=",
"DEFAULT_ITERATION",
",",
"primality_algorithm",
"=",
"None",
",",
"strict_size",
"=",
"True",
",",
"e",
"=",
"0x10001",
")",
":",
"primes",
"=",
"[",
"]",
"lbda",
"=",
"1",
"bits",
"=",
"size",
"//",
"number",
"+",
"1",
"n",
"=",
"1",
"while",
"len",
"(",
"primes",
")",
"<",
"number",
":",
"if",
"number",
"-",
"len",
"(",
"primes",
")",
"==",
"1",
":",
"bits",
"=",
"size",
"-",
"primitives",
".",
"integer_bit_size",
"(",
"n",
")",
"+",
"1",
"prime",
"=",
"get_prime",
"(",
"bits",
",",
"rnd",
",",
"k",
",",
"algorithm",
"=",
"primality_algorithm",
")",
"if",
"prime",
"in",
"primes",
":",
"continue",
"if",
"e",
"is",
"not",
"None",
"and",
"fractions",
".",
"gcd",
"(",
"e",
",",
"lbda",
")",
"!=",
"1",
":",
"continue",
"if",
"(",
"strict_size",
"and",
"number",
"-",
"len",
"(",
"primes",
")",
"==",
"1",
"and",
"primitives",
".",
"integer_bit_size",
"(",
"n",
"*",
"prime",
")",
"!=",
"size",
")",
":",
"continue",
"primes",
".",
"append",
"(",
"prime",
")",
"n",
"*=",
"prime",
"lbda",
"*=",
"prime",
"-",
"1",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"0x10001",
"while",
"e",
"<",
"lbda",
":",
"if",
"fractions",
".",
"gcd",
"(",
"e",
",",
"lbda",
")",
"==",
"1",
":",
"break",
"e",
"+=",
"2",
"assert",
"3",
"<=",
"e",
"<=",
"n",
"-",
"1",
"public",
"=",
"RsaPublicKey",
"(",
"n",
",",
"e",
")",
"private",
"=",
"MultiPrimeRsaPrivateKey",
"(",
"primes",
",",
"e",
",",
"blind",
"=",
"True",
",",
"rnd",
"=",
"rnd",
")",
"return",
"public",
",",
"private"
] | 32.903846 | 20.25 |
def modify_snapshot_attribute(self, snapshot_id,
attribute='createVolumePermission',
operation='add', user_ids=None, groups=None):
"""
Changes an attribute of an image.
:type snapshot_id: string
:param snapshot_id: The snapshot id you wish to change
:type attribute: string
:param attribute: The attribute you wish to change. Valid values are:
createVolumePermission
:type operation: string
:param operation: Either add or remove (this is required for changing
snapshot ermissions)
:type user_ids: list
:param user_ids: The Amazon IDs of users to add/remove attributes
:type groups: list
:param groups: The groups to add/remove attributes. The only valid
value at this time is 'all'.
"""
params = {'SnapshotId' : snapshot_id,
'Attribute' : attribute,
'OperationType' : operation}
if user_ids:
self.build_list_params(params, user_ids, 'UserId')
if groups:
self.build_list_params(params, groups, 'UserGroup')
return self.get_status('ModifySnapshotAttribute', params, verb='POST')
|
[
"def",
"modify_snapshot_attribute",
"(",
"self",
",",
"snapshot_id",
",",
"attribute",
"=",
"'createVolumePermission'",
",",
"operation",
"=",
"'add'",
",",
"user_ids",
"=",
"None",
",",
"groups",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'SnapshotId'",
":",
"snapshot_id",
",",
"'Attribute'",
":",
"attribute",
",",
"'OperationType'",
":",
"operation",
"}",
"if",
"user_ids",
":",
"self",
".",
"build_list_params",
"(",
"params",
",",
"user_ids",
",",
"'UserId'",
")",
"if",
"groups",
":",
"self",
".",
"build_list_params",
"(",
"params",
",",
"groups",
",",
"'UserGroup'",
")",
"return",
"self",
".",
"get_status",
"(",
"'ModifySnapshotAttribute'",
",",
"params",
",",
"verb",
"=",
"'POST'",
")"
] | 39.151515 | 21.090909 |
def pkcs_mgf1(mgfSeed, maskLen, h):
"""
Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen
"""
# steps are those of Appendix B.2.1
if not h in _hashFuncParams:
warning("pkcs_mgf1: invalid hash (%s) provided")
return None
hLen = _hashFuncParams[h][0]
hFunc = _hashFuncParams[h][1]
if maskLen > 2**32 * hLen: # 1)
warning("pkcs_mgf1: maskLen > 2**32 * hLen")
return None
T = "" # 2)
maxCounter = math.ceil(float(maskLen) / float(hLen)) # 3)
counter = 0
while counter < maxCounter:
C = pkcs_i2osp(counter, 4)
T += hFunc(mgfSeed + C)
counter += 1
return T[:maskLen]
|
[
"def",
"pkcs_mgf1",
"(",
"mgfSeed",
",",
"maskLen",
",",
"h",
")",
":",
"# steps are those of Appendix B.2.1",
"if",
"not",
"h",
"in",
"_hashFuncParams",
":",
"warning",
"(",
"\"pkcs_mgf1: invalid hash (%s) provided\"",
")",
"return",
"None",
"hLen",
"=",
"_hashFuncParams",
"[",
"h",
"]",
"[",
"0",
"]",
"hFunc",
"=",
"_hashFuncParams",
"[",
"h",
"]",
"[",
"1",
"]",
"if",
"maskLen",
">",
"2",
"**",
"32",
"*",
"hLen",
":",
"# 1)",
"warning",
"(",
"\"pkcs_mgf1: maskLen > 2**32 * hLen\"",
")",
"return",
"None",
"T",
"=",
"\"\"",
"# 2)",
"maxCounter",
"=",
"math",
".",
"ceil",
"(",
"float",
"(",
"maskLen",
")",
"/",
"float",
"(",
"hLen",
")",
")",
"# 3)",
"counter",
"=",
"0",
"while",
"counter",
"<",
"maxCounter",
":",
"C",
"=",
"pkcs_i2osp",
"(",
"counter",
",",
"4",
")",
"T",
"+=",
"hFunc",
"(",
"mgfSeed",
"+",
"C",
")",
"counter",
"+=",
"1",
"return",
"T",
"[",
":",
"maskLen",
"]"
] | 37.75 | 18.75 |
def _handle_dl_term(self):
"""Handle the term in a description list (``foo`` in ``;foo:bar``)."""
self._context ^= contexts.DL_TERM
if self._read() == ":":
self._handle_list_marker()
else:
self._emit_text("\n")
|
[
"def",
"_handle_dl_term",
"(",
"self",
")",
":",
"self",
".",
"_context",
"^=",
"contexts",
".",
"DL_TERM",
"if",
"self",
".",
"_read",
"(",
")",
"==",
"\":\"",
":",
"self",
".",
"_handle_list_marker",
"(",
")",
"else",
":",
"self",
".",
"_emit_text",
"(",
"\"\\n\"",
")"
] | 37.142857 | 8.571429 |
def __validate_decode_msg(self, message): # noqa (complexity) pylint: disable=too-many-return-statements,too-many-branches
"""Decodes wrapper, check hash & seq, decodes body. Returns body or None, if validation / unpack failed"""
try:
if not _CONTENT_TYPE_PATTERN.match(message.content_type):
logger.debug('Message with unexpected content type %s from container, ignoring', message.content_type)
return None
except AttributeError:
logger.debug('Message without content type from container, ignoring')
return None
# Decode & check message wrapper
try:
body = ubjloadb(message.body)
except:
logger.warning('Failed to decode message wrapper, ignoring', exc_info=DEBUG_ENABLED)
return None
if not self.__valid_msg_wrapper(body):
logger.warning('Invalid message wrapper, ignoring')
return None
# currently only warn although maybe this should be an error
if self.__cnt_seqnum != -1 and not self.__valid_seqnum(body[W_SEQ], self.__cnt_seqnum):
logger.warning('Unexpected seqnum from container: %d (last seen: %d)', body[W_SEQ],
self.__cnt_seqnum)
self.__cnt_seqnum = body[W_SEQ]
# Check message hash
if not self.__check_hash(body):
logger.warning('Message has invalid hash, ignoring')
return None
# Decompress inner message
try:
msg = COMPRESSORS[body[W_COMPRESSION]].decompress(body[W_MESSAGE])
except KeyError:
logger.warning('Received message with unknown compression: %s', body[W_COMPRESSION])
return None
except OversizeException as ex:
logger.warning('Uncompressed message exceeds %d bytes, ignoring', ex.size, exc_info=DEBUG_ENABLED)
return None
except:
logger.warning('Decompression failed, ignoring message', exc_info=DEBUG_ENABLED)
return None
# Decode inner message
try:
msg = ubjloadb(msg, object_pairs_hook=OrderedDict)
except:
logger.warning('Failed to decode message, ignoring', exc_info=DEBUG_ENABLED)
return None
if self.__valid_msg_body(msg):
return (msg, body[W_SEQ])
else:
logger.warning('Message with invalid body, ignoring: %s', msg)
return None
|
[
"def",
"__validate_decode_msg",
"(",
"self",
",",
"message",
")",
":",
"# noqa (complexity) pylint: disable=too-many-return-statements,too-many-branches",
"try",
":",
"if",
"not",
"_CONTENT_TYPE_PATTERN",
".",
"match",
"(",
"message",
".",
"content_type",
")",
":",
"logger",
".",
"debug",
"(",
"'Message with unexpected content type %s from container, ignoring'",
",",
"message",
".",
"content_type",
")",
"return",
"None",
"except",
"AttributeError",
":",
"logger",
".",
"debug",
"(",
"'Message without content type from container, ignoring'",
")",
"return",
"None",
"# Decode & check message wrapper",
"try",
":",
"body",
"=",
"ubjloadb",
"(",
"message",
".",
"body",
")",
"except",
":",
"logger",
".",
"warning",
"(",
"'Failed to decode message wrapper, ignoring'",
",",
"exc_info",
"=",
"DEBUG_ENABLED",
")",
"return",
"None",
"if",
"not",
"self",
".",
"__valid_msg_wrapper",
"(",
"body",
")",
":",
"logger",
".",
"warning",
"(",
"'Invalid message wrapper, ignoring'",
")",
"return",
"None",
"# currently only warn although maybe this should be an error",
"if",
"self",
".",
"__cnt_seqnum",
"!=",
"-",
"1",
"and",
"not",
"self",
".",
"__valid_seqnum",
"(",
"body",
"[",
"W_SEQ",
"]",
",",
"self",
".",
"__cnt_seqnum",
")",
":",
"logger",
".",
"warning",
"(",
"'Unexpected seqnum from container: %d (last seen: %d)'",
",",
"body",
"[",
"W_SEQ",
"]",
",",
"self",
".",
"__cnt_seqnum",
")",
"self",
".",
"__cnt_seqnum",
"=",
"body",
"[",
"W_SEQ",
"]",
"# Check message hash",
"if",
"not",
"self",
".",
"__check_hash",
"(",
"body",
")",
":",
"logger",
".",
"warning",
"(",
"'Message has invalid hash, ignoring'",
")",
"return",
"None",
"# Decompress inner message",
"try",
":",
"msg",
"=",
"COMPRESSORS",
"[",
"body",
"[",
"W_COMPRESSION",
"]",
"]",
".",
"decompress",
"(",
"body",
"[",
"W_MESSAGE",
"]",
")",
"except",
"KeyError",
":",
"logger",
".",
"warning",
"(",
"'Received message with unknown compression: %s'",
",",
"body",
"[",
"W_COMPRESSION",
"]",
")",
"return",
"None",
"except",
"OversizeException",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"'Uncompressed message exceeds %d bytes, ignoring'",
",",
"ex",
".",
"size",
",",
"exc_info",
"=",
"DEBUG_ENABLED",
")",
"return",
"None",
"except",
":",
"logger",
".",
"warning",
"(",
"'Decompression failed, ignoring message'",
",",
"exc_info",
"=",
"DEBUG_ENABLED",
")",
"return",
"None",
"# Decode inner message",
"try",
":",
"msg",
"=",
"ubjloadb",
"(",
"msg",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")",
"except",
":",
"logger",
".",
"warning",
"(",
"'Failed to decode message, ignoring'",
",",
"exc_info",
"=",
"DEBUG_ENABLED",
")",
"return",
"None",
"if",
"self",
".",
"__valid_msg_body",
"(",
"msg",
")",
":",
"return",
"(",
"msg",
",",
"body",
"[",
"W_SEQ",
"]",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Message with invalid body, ignoring: %s'",
",",
"msg",
")",
"return",
"None"
] | 43.5 | 26.535714 |
def sendImage(
self,
image_id,
message=None,
thread_id=None,
thread_type=ThreadType.USER,
is_gif=False,
):
"""
Deprecated. Use :func:`fbchat.Client._sendFiles` instead
"""
if is_gif:
mimetype = "image/gif"
else:
mimetype = "image/png"
return self._sendFiles(
files=[(image_id, mimetype)],
message=message,
thread_id=thread_id,
thread_type=thread_type,
)
|
[
"def",
"sendImage",
"(",
"self",
",",
"image_id",
",",
"message",
"=",
"None",
",",
"thread_id",
"=",
"None",
",",
"thread_type",
"=",
"ThreadType",
".",
"USER",
",",
"is_gif",
"=",
"False",
",",
")",
":",
"if",
"is_gif",
":",
"mimetype",
"=",
"\"image/gif\"",
"else",
":",
"mimetype",
"=",
"\"image/png\"",
"return",
"self",
".",
"_sendFiles",
"(",
"files",
"=",
"[",
"(",
"image_id",
",",
"mimetype",
")",
"]",
",",
"message",
"=",
"message",
",",
"thread_id",
"=",
"thread_id",
",",
"thread_type",
"=",
"thread_type",
",",
")"
] | 24.428571 | 15.190476 |
def setup_users_page(self, ):
"""Create and set the model on the users page
:returns: None
:rtype: None
:raises: None
"""
self.users_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
log.debug("Loading users for users page.")
rootdata = treemodel.ListItemData(['Username', 'First', 'Last', 'Email'])
rootitem = treemodel.TreeItem(rootdata)
users = djadapter.users.all()
for usr in users:
usrdata = djitemdata.UserItemData(usr)
treemodel.TreeItem(usrdata, rootitem)
self.users_model = treemodel.TreeModel(rootitem)
self.users_tablev.setModel(self.users_model)
|
[
"def",
"setup_users_page",
"(",
"self",
",",
")",
":",
"self",
".",
"users_tablev",
".",
"horizontalHeader",
"(",
")",
".",
"setResizeMode",
"(",
"QtGui",
".",
"QHeaderView",
".",
"ResizeToContents",
")",
"log",
".",
"debug",
"(",
"\"Loading users for users page.\"",
")",
"rootdata",
"=",
"treemodel",
".",
"ListItemData",
"(",
"[",
"'Username'",
",",
"'First'",
",",
"'Last'",
",",
"'Email'",
"]",
")",
"rootitem",
"=",
"treemodel",
".",
"TreeItem",
"(",
"rootdata",
")",
"users",
"=",
"djadapter",
".",
"users",
".",
"all",
"(",
")",
"for",
"usr",
"in",
"users",
":",
"usrdata",
"=",
"djitemdata",
".",
"UserItemData",
"(",
"usr",
")",
"treemodel",
".",
"TreeItem",
"(",
"usrdata",
",",
"rootitem",
")",
"self",
".",
"users_model",
"=",
"treemodel",
".",
"TreeModel",
"(",
"rootitem",
")",
"self",
".",
"users_tablev",
".",
"setModel",
"(",
"self",
".",
"users_model",
")"
] | 41 | 16.764706 |
def get_absolute_url(self):
"""produces a url to link directly to this instance, given the URL config
:return: `str`
"""
try:
url = reverse("content-detail-view", kwargs={"pk": self.pk, "slug": self.slug})
except NoReverseMatch:
url = None
return url
|
[
"def",
"get_absolute_url",
"(",
"self",
")",
":",
"try",
":",
"url",
"=",
"reverse",
"(",
"\"content-detail-view\"",
",",
"kwargs",
"=",
"{",
"\"pk\"",
":",
"self",
".",
"pk",
",",
"\"slug\"",
":",
"self",
".",
"slug",
"}",
")",
"except",
"NoReverseMatch",
":",
"url",
"=",
"None",
"return",
"url"
] | 31.4 | 20 |
def authenticate_search_bind(self, username, password):
"""
Performs a search bind to authenticate a user. This is
required when a the login attribute is not the same
as the RDN, since we cannot string together their DN on
the fly, instead we have to find it in the LDAP, then attempt
to bind with their credentials.
Args:
username (str): Username of the user to bind (the field specified
as LDAP_BIND_LOGIN_ATTR)
password (str): User's password to bind with when we find their dn.
Returns:
AuthenticationResponse
"""
connection = self._make_connection(
bind_user=self.config.get('LDAP_BIND_USER_DN'),
bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'),
)
try:
connection.bind()
log.debug("Successfully bound to LDAP as '{0}' for search_bind method".format(
self.config.get('LDAP_BIND_USER_DN') or 'Anonymous'
))
except Exception as e:
self.destroy_connection(connection)
log.error(e)
return AuthenticationResponse()
# Find the user in the search path.
user_filter = '({search_attr}={username})'.format(
search_attr=self.config.get('LDAP_USER_LOGIN_ATTR'),
username=username
)
search_filter = '(&{0}{1})'.format(
self.config.get('LDAP_USER_OBJECT_FILTER'),
user_filter,
)
log.debug(
"Performing an LDAP Search using filter '{0}', base '{1}', "
"and scope '{2}'".format(
search_filter,
self.full_user_search_dn,
self.config.get('LDAP_USER_SEARCH_SCOPE')
))
connection.search(
search_base=self.full_user_search_dn,
search_filter=search_filter,
search_scope=getattr(
ldap3, self.config.get('LDAP_USER_SEARCH_SCOPE')),
attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES')
)
response = AuthenticationResponse()
if len(connection.response) == 0 or \
(self.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and
len(connection.response) > 1):
# Don't allow them to log in.
log.debug(
"Authentication was not successful for user '{0}'".format(username))
else:
for user in connection.response:
# Attempt to bind with each user we find until we can find
# one that works.
if 'type' not in user or user.get('type') != 'searchResEntry':
# Issue #13 - Don't return non-entry results.
continue
user_connection = self._make_connection(
bind_user=user['dn'],
bind_password=password
)
log.debug(
"Directly binding a connection to a server with "
"user:'{0}'".format(user['dn']))
try:
user_connection.bind()
log.debug(
"Authentication was successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.success
# Populate User Data
user['attributes']['dn'] = user['dn']
response.user_info = user['attributes']
response.user_id = username
response.user_dn = user['dn']
if self.config.get('LDAP_SEARCH_FOR_GROUPS'):
response.user_groups = self.get_user_groups(
dn=user['dn'], _connection=connection)
self.destroy_connection(user_connection)
break
except ldap3.core.exceptions.LDAPInvalidCredentialsResult:
log.debug(
"Authentication was not successful for "
"user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e: # pragma: no cover
# This should never happen, however in case ldap3 does ever
# throw an error here, we catch it and log it
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(user_connection)
self.destroy_connection(connection)
return response
|
[
"def",
"authenticate_search_bind",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"connection",
"=",
"self",
".",
"_make_connection",
"(",
"bind_user",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_BIND_USER_DN'",
")",
",",
"bind_password",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_BIND_USER_PASSWORD'",
")",
",",
")",
"try",
":",
"connection",
".",
"bind",
"(",
")",
"log",
".",
"debug",
"(",
"\"Successfully bound to LDAP as '{0}' for search_bind method\"",
".",
"format",
"(",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_BIND_USER_DN'",
")",
"or",
"'Anonymous'",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"destroy_connection",
"(",
"connection",
")",
"log",
".",
"error",
"(",
"e",
")",
"return",
"AuthenticationResponse",
"(",
")",
"# Find the user in the search path.",
"user_filter",
"=",
"'({search_attr}={username})'",
".",
"format",
"(",
"search_attr",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_USER_LOGIN_ATTR'",
")",
",",
"username",
"=",
"username",
")",
"search_filter",
"=",
"'(&{0}{1})'",
".",
"format",
"(",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_USER_OBJECT_FILTER'",
")",
",",
"user_filter",
",",
")",
"log",
".",
"debug",
"(",
"\"Performing an LDAP Search using filter '{0}', base '{1}', \"",
"\"and scope '{2}'\"",
".",
"format",
"(",
"search_filter",
",",
"self",
".",
"full_user_search_dn",
",",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_USER_SEARCH_SCOPE'",
")",
")",
")",
"connection",
".",
"search",
"(",
"search_base",
"=",
"self",
".",
"full_user_search_dn",
",",
"search_filter",
"=",
"search_filter",
",",
"search_scope",
"=",
"getattr",
"(",
"ldap3",
",",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_USER_SEARCH_SCOPE'",
")",
")",
",",
"attributes",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_GET_USER_ATTRIBUTES'",
")",
")",
"response",
"=",
"AuthenticationResponse",
"(",
")",
"if",
"len",
"(",
"connection",
".",
"response",
")",
"==",
"0",
"or",
"(",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND'",
")",
"and",
"len",
"(",
"connection",
".",
"response",
")",
">",
"1",
")",
":",
"# Don't allow them to log in.",
"log",
".",
"debug",
"(",
"\"Authentication was not successful for user '{0}'\"",
".",
"format",
"(",
"username",
")",
")",
"else",
":",
"for",
"user",
"in",
"connection",
".",
"response",
":",
"# Attempt to bind with each user we find until we can find",
"# one that works.",
"if",
"'type'",
"not",
"in",
"user",
"or",
"user",
".",
"get",
"(",
"'type'",
")",
"!=",
"'searchResEntry'",
":",
"# Issue #13 - Don't return non-entry results.",
"continue",
"user_connection",
"=",
"self",
".",
"_make_connection",
"(",
"bind_user",
"=",
"user",
"[",
"'dn'",
"]",
",",
"bind_password",
"=",
"password",
")",
"log",
".",
"debug",
"(",
"\"Directly binding a connection to a server with \"",
"\"user:'{0}'\"",
".",
"format",
"(",
"user",
"[",
"'dn'",
"]",
")",
")",
"try",
":",
"user_connection",
".",
"bind",
"(",
")",
"log",
".",
"debug",
"(",
"\"Authentication was successful for user '{0}'\"",
".",
"format",
"(",
"username",
")",
")",
"response",
".",
"status",
"=",
"AuthenticationResponseStatus",
".",
"success",
"# Populate User Data",
"user",
"[",
"'attributes'",
"]",
"[",
"'dn'",
"]",
"=",
"user",
"[",
"'dn'",
"]",
"response",
".",
"user_info",
"=",
"user",
"[",
"'attributes'",
"]",
"response",
".",
"user_id",
"=",
"username",
"response",
".",
"user_dn",
"=",
"user",
"[",
"'dn'",
"]",
"if",
"self",
".",
"config",
".",
"get",
"(",
"'LDAP_SEARCH_FOR_GROUPS'",
")",
":",
"response",
".",
"user_groups",
"=",
"self",
".",
"get_user_groups",
"(",
"dn",
"=",
"user",
"[",
"'dn'",
"]",
",",
"_connection",
"=",
"connection",
")",
"self",
".",
"destroy_connection",
"(",
"user_connection",
")",
"break",
"except",
"ldap3",
".",
"core",
".",
"exceptions",
".",
"LDAPInvalidCredentialsResult",
":",
"log",
".",
"debug",
"(",
"\"Authentication was not successful for \"",
"\"user '{0}'\"",
".",
"format",
"(",
"username",
")",
")",
"response",
".",
"status",
"=",
"AuthenticationResponseStatus",
".",
"fail",
"except",
"Exception",
"as",
"e",
":",
"# pragma: no cover",
"# This should never happen, however in case ldap3 does ever",
"# throw an error here, we catch it and log it",
"log",
".",
"error",
"(",
"e",
")",
"response",
".",
"status",
"=",
"AuthenticationResponseStatus",
".",
"fail",
"self",
".",
"destroy_connection",
"(",
"user_connection",
")",
"self",
".",
"destroy_connection",
"(",
"connection",
")",
"return",
"response"
] | 39.396552 | 21.327586 |
def on_send(self, frame):
"""
:param Frame frame:
"""
print('on_send %s %s %s' % (frame.cmd, frame.headers, frame.body))
|
[
"def",
"on_send",
"(",
"self",
",",
"frame",
")",
":",
"print",
"(",
"'on_send %s %s %s'",
"%",
"(",
"frame",
".",
"cmd",
",",
"frame",
".",
"headers",
",",
"frame",
".",
"body",
")",
")"
] | 29.6 | 12.4 |
def delete(self):
"""Delete this experiment and all its data."""
for alternative in self.alternatives:
alternative.delete()
self.reset_winner()
self.redis.srem('experiments', self.name)
self.redis.delete(self.name)
self.increment_version()
|
[
"def",
"delete",
"(",
"self",
")",
":",
"for",
"alternative",
"in",
"self",
".",
"alternatives",
":",
"alternative",
".",
"delete",
"(",
")",
"self",
".",
"reset_winner",
"(",
")",
"self",
".",
"redis",
".",
"srem",
"(",
"'experiments'",
",",
"self",
".",
"name",
")",
"self",
".",
"redis",
".",
"delete",
"(",
"self",
".",
"name",
")",
"self",
".",
"increment_version",
"(",
")"
] | 36.5 | 8.75 |
def cmd(send, msg, args):
"""Slap somebody.
Syntax: {command} <nick> [for <reason>]
"""
implements = ['the golden gate bridge', 'a large trout', 'a clue-by-four', 'a fresh haddock', 'moon', 'an Itanium', 'fwilson', 'a wombat']
methods = ['around a bit', 'upside the head']
if not msg:
channel = args['target'] if args['target'] != 'private' else args['config']['core']['channel']
with args['handler'].data_lock:
users = list(args['handler'].channels[channel].users())
slap = 'slaps %s %s with %s'
send(slap % (choice(users), choice(methods), choice(implements)), 'action')
else:
reason = ''
method = choice(methods)
implement = ''
msg = msg.split()
slapee = msg[0]
# Basic and stupid NLP!
i = 1
args = False
while i < len(msg):
if msg[i] == 'for':
args = True
if reason:
send("Invalid Syntax: You can only have one for clause!")
return
i += 1
while i < len(msg):
if msg[i] == 'with':
break
reason += " "
reason += msg[i]
i += 1
reason = reason.strip()
elif msg[i] == 'with':
args = True
if implement:
send("Invalid Synatx: You can only have one with clause!")
return
i += 1
while i < len(msg):
if msg[i] == 'for':
break
implement += msg[i]
implement += ' '
i += 1
implement = implement.strip()
elif not args:
slapee += ' ' + msg[i]
i += 1
if not implement:
implement = choice(implements)
if reason:
slap = 'slaps %s %s with %s for %s' % (slapee, method, implement, reason)
else:
slap = 'slaps %s %s with %s' % (slapee, method, implement)
send(slap, 'action')
|
[
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"implements",
"=",
"[",
"'the golden gate bridge'",
",",
"'a large trout'",
",",
"'a clue-by-four'",
",",
"'a fresh haddock'",
",",
"'moon'",
",",
"'an Itanium'",
",",
"'fwilson'",
",",
"'a wombat'",
"]",
"methods",
"=",
"[",
"'around a bit'",
",",
"'upside the head'",
"]",
"if",
"not",
"msg",
":",
"channel",
"=",
"args",
"[",
"'target'",
"]",
"if",
"args",
"[",
"'target'",
"]",
"!=",
"'private'",
"else",
"args",
"[",
"'config'",
"]",
"[",
"'core'",
"]",
"[",
"'channel'",
"]",
"with",
"args",
"[",
"'handler'",
"]",
".",
"data_lock",
":",
"users",
"=",
"list",
"(",
"args",
"[",
"'handler'",
"]",
".",
"channels",
"[",
"channel",
"]",
".",
"users",
"(",
")",
")",
"slap",
"=",
"'slaps %s %s with %s'",
"send",
"(",
"slap",
"%",
"(",
"choice",
"(",
"users",
")",
",",
"choice",
"(",
"methods",
")",
",",
"choice",
"(",
"implements",
")",
")",
",",
"'action'",
")",
"else",
":",
"reason",
"=",
"''",
"method",
"=",
"choice",
"(",
"methods",
")",
"implement",
"=",
"''",
"msg",
"=",
"msg",
".",
"split",
"(",
")",
"slapee",
"=",
"msg",
"[",
"0",
"]",
"# Basic and stupid NLP!",
"i",
"=",
"1",
"args",
"=",
"False",
"while",
"i",
"<",
"len",
"(",
"msg",
")",
":",
"if",
"msg",
"[",
"i",
"]",
"==",
"'for'",
":",
"args",
"=",
"True",
"if",
"reason",
":",
"send",
"(",
"\"Invalid Syntax: You can only have one for clause!\"",
")",
"return",
"i",
"+=",
"1",
"while",
"i",
"<",
"len",
"(",
"msg",
")",
":",
"if",
"msg",
"[",
"i",
"]",
"==",
"'with'",
":",
"break",
"reason",
"+=",
"\" \"",
"reason",
"+=",
"msg",
"[",
"i",
"]",
"i",
"+=",
"1",
"reason",
"=",
"reason",
".",
"strip",
"(",
")",
"elif",
"msg",
"[",
"i",
"]",
"==",
"'with'",
":",
"args",
"=",
"True",
"if",
"implement",
":",
"send",
"(",
"\"Invalid Synatx: You can only have one with clause!\"",
")",
"return",
"i",
"+=",
"1",
"while",
"i",
"<",
"len",
"(",
"msg",
")",
":",
"if",
"msg",
"[",
"i",
"]",
"==",
"'for'",
":",
"break",
"implement",
"+=",
"msg",
"[",
"i",
"]",
"implement",
"+=",
"' '",
"i",
"+=",
"1",
"implement",
"=",
"implement",
".",
"strip",
"(",
")",
"elif",
"not",
"args",
":",
"slapee",
"+=",
"' '",
"+",
"msg",
"[",
"i",
"]",
"i",
"+=",
"1",
"if",
"not",
"implement",
":",
"implement",
"=",
"choice",
"(",
"implements",
")",
"if",
"reason",
":",
"slap",
"=",
"'slaps %s %s with %s for %s'",
"%",
"(",
"slapee",
",",
"method",
",",
"implement",
",",
"reason",
")",
"else",
":",
"slap",
"=",
"'slaps %s %s with %s'",
"%",
"(",
"slapee",
",",
"method",
",",
"implement",
")",
"send",
"(",
"slap",
",",
"'action'",
")"
] | 34.836066 | 17.52459 |
def match_host(host, pattern):
''' Match a host string against a pattern
Args:
host (str)
A hostname to compare to the given pattern
pattern (str)
A string representing a hostname pattern, possibly including
wildcards for ip address octets or ports.
This function will return ``True`` if the hostname matches the pattern,
including any wildcards. If the pattern contains a port, the host string
must also contain a matching port.
Returns:
bool
Examples:
>>> match_host('192.168.0.1:80', '192.168.0.1:80')
True
>>> match_host('192.168.0.1:80', '192.168.0.1')
True
>>> match_host('192.168.0.1:80', '192.168.0.1:8080')
False
>>> match_host('192.168.0.1', '192.168.0.2')
False
>>> match_host('192.168.0.1', '192.168.*.*')
True
>>> match_host('alice', 'alice')
True
>>> match_host('alice:80', 'alice')
True
>>> match_host('alice', 'bob')
False
>>> match_host('foo.example.com', 'foo.example.com.net')
False
>>> match_host('alice', '*')
True
>>> match_host('alice', '*:*')
True
>>> match_host('alice:80', '*')
True
>>> match_host('alice:80', '*:80')
True
>>> match_host('alice:8080', '*:80')
False
'''
if ':' in host:
host, host_port = host.rsplit(':', 1)
else:
host_port = None
if ':' in pattern:
pattern, pattern_port = pattern.rsplit(':', 1)
if pattern_port == '*':
pattern_port = None
else:
pattern_port = None
if pattern_port is not None and host_port != pattern_port:
return False
host = host.split('.')
pattern = pattern.split('.')
if len(pattern) > len(host):
return False
for h, p in zip(host, pattern):
if h == p or p == '*':
continue
else:
return False
return True
|
[
"def",
"match_host",
"(",
"host",
",",
"pattern",
")",
":",
"if",
"':'",
"in",
"host",
":",
"host",
",",
"host_port",
"=",
"host",
".",
"rsplit",
"(",
"':'",
",",
"1",
")",
"else",
":",
"host_port",
"=",
"None",
"if",
"':'",
"in",
"pattern",
":",
"pattern",
",",
"pattern_port",
"=",
"pattern",
".",
"rsplit",
"(",
"':'",
",",
"1",
")",
"if",
"pattern_port",
"==",
"'*'",
":",
"pattern_port",
"=",
"None",
"else",
":",
"pattern_port",
"=",
"None",
"if",
"pattern_port",
"is",
"not",
"None",
"and",
"host_port",
"!=",
"pattern_port",
":",
"return",
"False",
"host",
"=",
"host",
".",
"split",
"(",
"'.'",
")",
"pattern",
"=",
"pattern",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"pattern",
")",
">",
"len",
"(",
"host",
")",
":",
"return",
"False",
"for",
"h",
",",
"p",
"in",
"zip",
"(",
"host",
",",
"pattern",
")",
":",
"if",
"h",
"==",
"p",
"or",
"p",
"==",
"'*'",
":",
"continue",
"else",
":",
"return",
"False",
"return",
"True"
] | 25.571429 | 21.857143 |
def check_rollout(edits_service, package_name, days):
"""Check if package_name has a release on staged rollout for too long"""
edit = edits_service.insert(body={}, packageName=package_name).execute()
response = edits_service.tracks().get(editId=edit['id'], track='production', packageName=package_name).execute()
releases = response['releases']
for release in releases:
if release['status'] == 'inProgress':
url = 'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'.format(release['name'])
resp = requests.head(url)
if resp.status_code != 200:
if resp.status_code != 404: # 404 is expected for release candidates
logger.warning("Could not check %s: %s", url, resp.status_code)
continue
age = time.time() - calendar.timegm(eu.parsedate(resp.headers['Last-Modified']))
if age >= days * DAY:
yield release, age
|
[
"def",
"check_rollout",
"(",
"edits_service",
",",
"package_name",
",",
"days",
")",
":",
"edit",
"=",
"edits_service",
".",
"insert",
"(",
"body",
"=",
"{",
"}",
",",
"packageName",
"=",
"package_name",
")",
".",
"execute",
"(",
")",
"response",
"=",
"edits_service",
".",
"tracks",
"(",
")",
".",
"get",
"(",
"editId",
"=",
"edit",
"[",
"'id'",
"]",
",",
"track",
"=",
"'production'",
",",
"packageName",
"=",
"package_name",
")",
".",
"execute",
"(",
")",
"releases",
"=",
"response",
"[",
"'releases'",
"]",
"for",
"release",
"in",
"releases",
":",
"if",
"release",
"[",
"'status'",
"]",
"==",
"'inProgress'",
":",
"url",
"=",
"'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'",
".",
"format",
"(",
"release",
"[",
"'name'",
"]",
")",
"resp",
"=",
"requests",
".",
"head",
"(",
"url",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"if",
"resp",
".",
"status_code",
"!=",
"404",
":",
"# 404 is expected for release candidates",
"logger",
".",
"warning",
"(",
"\"Could not check %s: %s\"",
",",
"url",
",",
"resp",
".",
"status_code",
")",
"continue",
"age",
"=",
"time",
".",
"time",
"(",
")",
"-",
"calendar",
".",
"timegm",
"(",
"eu",
".",
"parsedate",
"(",
"resp",
".",
"headers",
"[",
"'Last-Modified'",
"]",
")",
")",
"if",
"age",
">=",
"days",
"*",
"DAY",
":",
"yield",
"release",
",",
"age"
] | 60.0625 | 24.0625 |
def calculate_power_output(weather, example_farm, example_cluster):
r"""
Calculates power output of wind farms and clusters using the
:class:`~.turbine_cluster_modelchain.TurbineClusterModelChain`.
The :class:`~.turbine_cluster_modelchain.TurbineClusterModelChain` is a
class that provides all necessary steps to calculate the power output of a
wind farm or cluster. You can either use the default methods for the
calculation steps, as done for 'example_farm', or choose different methods,
as done for 'example_cluster'.
Parameters
----------
weather : pd.DataFrame
Contains weather data time series.
example_farm : WindFarm
WindFarm object.
example_cluster : WindTurbineCluster
WindTurbineCluster object.
"""
# set efficiency of example_farm to apply wake losses
example_farm.efficiency = 0.9
# power output calculation for example_farm
# initialize TurbineClusterModelChain with default parameters and use
# run_model method to calculate power output
mc_example_farm = TurbineClusterModelChain(example_farm).run_model(weather)
# write power output time series to WindFarm object
example_farm.power_output = mc_example_farm.power_output
# power output calculation for turbine_cluster
# own specifications for TurbineClusterModelChain setup
modelchain_data = {
'wake_losses_model': 'constant_efficiency', #
# 'dena_mean' (default), None,
# 'power_efficiency_curve',
# 'constant_efficiency' or name of
# a wind efficiency curve
# see :py:func:`~.wake_losses.get_wind_efficiency_curve`
'smoothing': True, # False (default) or True
'block_width': 0.5, # default: 0.5
'standard_deviation_method': 'Staffell_Pfenninger', #
# 'turbulence_intensity' (default)
# or 'Staffell_Pfenninger'
'smoothing_order': 'wind_farm_power_curves', #
# 'wind_farm_power_curves' (default) or
# 'turbine_power_curves'
'wind_speed_model': 'logarithmic', # 'logarithmic' (default),
# 'hellman' or
# 'interpolation_extrapolation'
'density_model': 'ideal_gas', # 'barometric' (default), 'ideal_gas' or
# 'interpolation_extrapolation'
'temperature_model': 'linear_gradient', # 'linear_gradient' (def.) or
# 'interpolation_extrapolation'
'power_output_model': 'power_curve', # 'power_curve' (default) or
# 'power_coefficient_curve'
'density_correction': True, # False (default) or True
'obstacle_height': 0, # default: 0
'hellman_exp': None} # None (default) or None
# initialize TurbineClusterModelChain with own specifications and use
# run_model method to calculate power output
mc_example_cluster = TurbineClusterModelChain(
example_cluster, **modelchain_data).run_model(weather)
# write power output time series to WindTurbineCluster object
example_cluster.power_output = mc_example_cluster.power_output
return
|
[
"def",
"calculate_power_output",
"(",
"weather",
",",
"example_farm",
",",
"example_cluster",
")",
":",
"# set efficiency of example_farm to apply wake losses",
"example_farm",
".",
"efficiency",
"=",
"0.9",
"# power output calculation for example_farm",
"# initialize TurbineClusterModelChain with default parameters and use",
"# run_model method to calculate power output",
"mc_example_farm",
"=",
"TurbineClusterModelChain",
"(",
"example_farm",
")",
".",
"run_model",
"(",
"weather",
")",
"# write power output time series to WindFarm object",
"example_farm",
".",
"power_output",
"=",
"mc_example_farm",
".",
"power_output",
"# power output calculation for turbine_cluster",
"# own specifications for TurbineClusterModelChain setup",
"modelchain_data",
"=",
"{",
"'wake_losses_model'",
":",
"'constant_efficiency'",
",",
"#",
"# 'dena_mean' (default), None,",
"# 'power_efficiency_curve',",
"# 'constant_efficiency' or name of",
"# a wind efficiency curve",
"# see :py:func:`~.wake_losses.get_wind_efficiency_curve`",
"'smoothing'",
":",
"True",
",",
"# False (default) or True",
"'block_width'",
":",
"0.5",
",",
"# default: 0.5",
"'standard_deviation_method'",
":",
"'Staffell_Pfenninger'",
",",
"#",
"# 'turbulence_intensity' (default)",
"# or 'Staffell_Pfenninger'",
"'smoothing_order'",
":",
"'wind_farm_power_curves'",
",",
"#",
"# 'wind_farm_power_curves' (default) or",
"# 'turbine_power_curves'",
"'wind_speed_model'",
":",
"'logarithmic'",
",",
"# 'logarithmic' (default),",
"# 'hellman' or",
"# 'interpolation_extrapolation'",
"'density_model'",
":",
"'ideal_gas'",
",",
"# 'barometric' (default), 'ideal_gas' or",
"# 'interpolation_extrapolation'",
"'temperature_model'",
":",
"'linear_gradient'",
",",
"# 'linear_gradient' (def.) or",
"# 'interpolation_extrapolation'",
"'power_output_model'",
":",
"'power_curve'",
",",
"# 'power_curve' (default) or",
"# 'power_coefficient_curve'",
"'density_correction'",
":",
"True",
",",
"# False (default) or True",
"'obstacle_height'",
":",
"0",
",",
"# default: 0",
"'hellman_exp'",
":",
"None",
"}",
"# None (default) or None",
"# initialize TurbineClusterModelChain with own specifications and use",
"# run_model method to calculate power output",
"mc_example_cluster",
"=",
"TurbineClusterModelChain",
"(",
"example_cluster",
",",
"*",
"*",
"modelchain_data",
")",
".",
"run_model",
"(",
"weather",
")",
"# write power output time series to WindTurbineCluster object",
"example_cluster",
".",
"power_output",
"=",
"mc_example_cluster",
".",
"power_output",
"return"
] | 51.029412 | 23.779412 |
def import_parallel_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN parallel gateway.
Parallel gateway doesn't have additional attributes. Separate method is used to improve code readability.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'parallelGateway'.
"""
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element)
|
[
"def",
"import_parallel_gateway_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"element",
")",
":",
"BpmnDiagramGraphImport",
".",
"import_gateway_to_graph",
"(",
"diagram_graph",
",",
"process_id",
",",
"process_attributes",
",",
"element",
")"
] | 66.083333 | 39.083333 |
def _process_execs(self, contents, modulename, atype, mode="insert"):
"""Extracts all the executable methods that belong to the type."""
#We only want to look at text after the contains statement
match = self.RE_CONTAINS.search(contents)
#It is possible for the type to not have any executables
if match is not None:
exectext = match.group("remainder")
self._process_execs_contents(exectext, modulename, atype, mode)
|
[
"def",
"_process_execs",
"(",
"self",
",",
"contents",
",",
"modulename",
",",
"atype",
",",
"mode",
"=",
"\"insert\"",
")",
":",
"#We only want to look at text after the contains statement",
"match",
"=",
"self",
".",
"RE_CONTAINS",
".",
"search",
"(",
"contents",
")",
"#It is possible for the type to not have any executables",
"if",
"match",
"is",
"not",
"None",
":",
"exectext",
"=",
"match",
".",
"group",
"(",
"\"remainder\"",
")",
"self",
".",
"_process_execs_contents",
"(",
"exectext",
",",
"modulename",
",",
"atype",
",",
"mode",
")"
] | 52.555556 | 20.111111 |
def paragraphs(quantity=2, separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3, as_list=False):
"""Random paragraphs."""
if html:
wrap_start = '<p>'
wrap_end = '</p>'
separator = '\n\n'
result = []
for i in xrange(0, quantity):
result.append(wrap_start + sentences(sentences_quantity) + wrap_end)
if as_list:
return result
else:
return separator.join(result)
|
[
"def",
"paragraphs",
"(",
"quantity",
"=",
"2",
",",
"separator",
"=",
"'\\n\\n'",
",",
"wrap_start",
"=",
"''",
",",
"wrap_end",
"=",
"''",
",",
"html",
"=",
"False",
",",
"sentences_quantity",
"=",
"3",
",",
"as_list",
"=",
"False",
")",
":",
"if",
"html",
":",
"wrap_start",
"=",
"'<p>'",
"wrap_end",
"=",
"'</p>'",
"separator",
"=",
"'\\n\\n'",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"quantity",
")",
":",
"result",
".",
"append",
"(",
"wrap_start",
"+",
"sentences",
"(",
"sentences_quantity",
")",
"+",
"wrap_end",
")",
"if",
"as_list",
":",
"return",
"result",
"else",
":",
"return",
"separator",
".",
"join",
"(",
"result",
")"
] | 28.6875 | 22.0625 |
def forward(self, address, types=None, lon=None, lat=None,
country=None, bbox=None, limit=None, languages=None):
"""Returns a Requests response object that contains a GeoJSON
collection of places matching the given address.
`response.geojson()` returns the geocoding result as GeoJSON.
`response.status_code` returns the HTTP API status code.
Place results may be constrained to those of one or more types
or be biased toward a given longitude and latitude.
See: https://www.mapbox.com/api-documentation/search/#geocoding."""
uri = URITemplate(self.baseuri + '/{dataset}/{query}.json').expand(
dataset=self.name, query=address.encode('utf-8'))
params = {}
if country:
params.update(self._validate_country_codes(country))
if types:
params.update(self._validate_place_types(types))
if lon is not None and lat is not None:
params.update(proximity='{0},{1}'.format(
round(float(lon), self.precision.get('proximity', 3)),
round(float(lat), self.precision.get('proximity', 3))))
if languages:
params.update(language=','.join(languages))
if bbox is not None:
params.update(bbox='{0},{1},{2},{3}'.format(*bbox))
if limit is not None:
params.update(limit='{0}'.format(limit))
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
# for consistency with other services
def geojson():
return resp.json()
resp.geojson = geojson
return resp
|
[
"def",
"forward",
"(",
"self",
",",
"address",
",",
"types",
"=",
"None",
",",
"lon",
"=",
"None",
",",
"lat",
"=",
"None",
",",
"country",
"=",
"None",
",",
"bbox",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"languages",
"=",
"None",
")",
":",
"uri",
"=",
"URITemplate",
"(",
"self",
".",
"baseuri",
"+",
"'/{dataset}/{query}.json'",
")",
".",
"expand",
"(",
"dataset",
"=",
"self",
".",
"name",
",",
"query",
"=",
"address",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"params",
"=",
"{",
"}",
"if",
"country",
":",
"params",
".",
"update",
"(",
"self",
".",
"_validate_country_codes",
"(",
"country",
")",
")",
"if",
"types",
":",
"params",
".",
"update",
"(",
"self",
".",
"_validate_place_types",
"(",
"types",
")",
")",
"if",
"lon",
"is",
"not",
"None",
"and",
"lat",
"is",
"not",
"None",
":",
"params",
".",
"update",
"(",
"proximity",
"=",
"'{0},{1}'",
".",
"format",
"(",
"round",
"(",
"float",
"(",
"lon",
")",
",",
"self",
".",
"precision",
".",
"get",
"(",
"'proximity'",
",",
"3",
")",
")",
",",
"round",
"(",
"float",
"(",
"lat",
")",
",",
"self",
".",
"precision",
".",
"get",
"(",
"'proximity'",
",",
"3",
")",
")",
")",
")",
"if",
"languages",
":",
"params",
".",
"update",
"(",
"language",
"=",
"','",
".",
"join",
"(",
"languages",
")",
")",
"if",
"bbox",
"is",
"not",
"None",
":",
"params",
".",
"update",
"(",
"bbox",
"=",
"'{0},{1},{2},{3}'",
".",
"format",
"(",
"*",
"bbox",
")",
")",
"if",
"limit",
"is",
"not",
"None",
":",
"params",
".",
"update",
"(",
"limit",
"=",
"'{0}'",
".",
"format",
"(",
"limit",
")",
")",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"uri",
",",
"params",
"=",
"params",
")",
"self",
".",
"handle_http_error",
"(",
"resp",
")",
"# for consistency with other services",
"def",
"geojson",
"(",
")",
":",
"return",
"resp",
".",
"json",
"(",
")",
"resp",
".",
"geojson",
"=",
"geojson",
"return",
"resp"
] | 42.789474 | 20.578947 |
def download_ncbi_associations(gene2go="gene2go", prt=sys.stdout, loading_bar=True):
"""Download associations from NCBI, if necessary"""
# Download: ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz
gzip_file = "{GENE2GO}.gz".format(GENE2GO=gene2go)
if not os.path.isfile(gene2go):
file_remote = "ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/{GZ}".format(
GZ=os.path.basename(gzip_file))
dnld_file(file_remote, gene2go, prt, loading_bar)
else:
if prt is not None:
prt.write(" EXISTS: {FILE}\n".format(FILE=gene2go))
return gene2go
|
[
"def",
"download_ncbi_associations",
"(",
"gene2go",
"=",
"\"gene2go\"",
",",
"prt",
"=",
"sys",
".",
"stdout",
",",
"loading_bar",
"=",
"True",
")",
":",
"# Download: ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz",
"gzip_file",
"=",
"\"{GENE2GO}.gz\"",
".",
"format",
"(",
"GENE2GO",
"=",
"gene2go",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"gene2go",
")",
":",
"file_remote",
"=",
"\"ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/{GZ}\"",
".",
"format",
"(",
"GZ",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"gzip_file",
")",
")",
"dnld_file",
"(",
"file_remote",
",",
"gene2go",
",",
"prt",
",",
"loading_bar",
")",
"else",
":",
"if",
"prt",
"is",
"not",
"None",
":",
"prt",
".",
"write",
"(",
"\" EXISTS: {FILE}\\n\"",
".",
"format",
"(",
"FILE",
"=",
"gene2go",
")",
")",
"return",
"gene2go"
] | 48.5 | 19.083333 |
def on_parallel_port_change(self, parallel_port):
"""Triggered when settings of a parallel port of the
associated virtual machine have changed.
in parallel_port of type :class:`IParallelPort`
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(parallel_port, IParallelPort):
raise TypeError("parallel_port can only be an instance of type IParallelPort")
self._call("onParallelPortChange",
in_p=[parallel_port])
|
[
"def",
"on_parallel_port_change",
"(",
"self",
",",
"parallel_port",
")",
":",
"if",
"not",
"isinstance",
"(",
"parallel_port",
",",
"IParallelPort",
")",
":",
"raise",
"TypeError",
"(",
"\"parallel_port can only be an instance of type IParallelPort\"",
")",
"self",
".",
"_call",
"(",
"\"onParallelPortChange\"",
",",
"in_p",
"=",
"[",
"parallel_port",
"]",
")"
] | 38.588235 | 16.058824 |
def potential_physical_input(method):
"""Decorator to convert inputs to Potential functions from physical
to internal coordinates"""
@wraps(method)
def wrapper(*args,**kwargs):
from galpy.potential import flatten as flatten_potential
Pot= flatten_potential(args[0])
ro= kwargs.get('ro',None)
if ro is None and hasattr(Pot,'_ro'):
ro= Pot._ro
if ro is None and isinstance(Pot,list) \
and hasattr(Pot[0],'_ro'):
# For lists of Potentials
ro= Pot[0]._ro
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
if 't' in kwargs or 'M' in kwargs:
vo= kwargs.get('vo',None)
if vo is None and hasattr(Pot,'_vo'):
vo= Pot._vo
if vo is None and isinstance(Pot,list) \
and hasattr(Pot[0],'_vo'):
# For lists of Potentials
vo= Pot[0]._vo
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
# Loop through args
newargs= (Pot,)
for ii in range(1,len(args)):
if _APY_LOADED and isinstance(args[ii],units.Quantity):
newargs= newargs+(args[ii].to(units.kpc).value/ro,)
else:
newargs= newargs+(args[ii],)
args= newargs
# phi and t kwargs
if 'phi' in kwargs and _APY_LOADED \
and isinstance(kwargs['phi'],units.Quantity):
kwargs['phi']= kwargs['phi'].to(units.rad).value
if 't' in kwargs and _APY_LOADED \
and isinstance(kwargs['t'],units.Quantity):
kwargs['t']= kwargs['t'].to(units.Gyr).value\
/time_in_Gyr(vo,ro)
# v kwarg for dissipative forces
if 'v' in kwargs and _APY_LOADED \
and isinstance(kwargs['v'],units.Quantity):
kwargs['v']= kwargs['v'].to(units.km/units.s).value/vo
# Mass kwarg for rtide
if 'M' in kwargs and _APY_LOADED \
and isinstance(kwargs['M'],units.Quantity):
try:
kwargs['M']= kwargs['M'].to(units.Msun).value\
/mass_in_msol(vo,ro)
except units.UnitConversionError:
kwargs['M']= kwargs['M'].to(units.pc*units.km**2/units.s**2)\
.value/mass_in_msol(vo,ro)/_G
# kwargs that come up in quasiisothermaldf
if 'z' in kwargs and _APY_LOADED \
and isinstance(kwargs['z'],units.Quantity):
kwargs['z']= kwargs['z'].to(units.kpc).value/ro
if 'dz' in kwargs and _APY_LOADED \
and isinstance(kwargs['dz'],units.Quantity):
kwargs['dz']= kwargs['dz'].to(units.kpc).value/ro
if 'dR' in kwargs and _APY_LOADED \
and isinstance(kwargs['dR'],units.Quantity):
kwargs['dR']= kwargs['dR'].to(units.kpc).value/ro
if 'zmax' in kwargs and _APY_LOADED \
and isinstance(kwargs['zmax'],units.Quantity):
kwargs['zmax']= kwargs['zmax'].to(units.kpc).value/ro
return method(*args,**kwargs)
return wrapper
|
[
"def",
"potential_physical_input",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"galpy",
".",
"potential",
"import",
"flatten",
"as",
"flatten_potential",
"Pot",
"=",
"flatten_potential",
"(",
"args",
"[",
"0",
"]",
")",
"ro",
"=",
"kwargs",
".",
"get",
"(",
"'ro'",
",",
"None",
")",
"if",
"ro",
"is",
"None",
"and",
"hasattr",
"(",
"Pot",
",",
"'_ro'",
")",
":",
"ro",
"=",
"Pot",
".",
"_ro",
"if",
"ro",
"is",
"None",
"and",
"isinstance",
"(",
"Pot",
",",
"list",
")",
"and",
"hasattr",
"(",
"Pot",
"[",
"0",
"]",
",",
"'_ro'",
")",
":",
"# For lists of Potentials",
"ro",
"=",
"Pot",
"[",
"0",
"]",
".",
"_ro",
"if",
"_APY_LOADED",
"and",
"isinstance",
"(",
"ro",
",",
"units",
".",
"Quantity",
")",
":",
"ro",
"=",
"ro",
".",
"to",
"(",
"units",
".",
"kpc",
")",
".",
"value",
"if",
"'t'",
"in",
"kwargs",
"or",
"'M'",
"in",
"kwargs",
":",
"vo",
"=",
"kwargs",
".",
"get",
"(",
"'vo'",
",",
"None",
")",
"if",
"vo",
"is",
"None",
"and",
"hasattr",
"(",
"Pot",
",",
"'_vo'",
")",
":",
"vo",
"=",
"Pot",
".",
"_vo",
"if",
"vo",
"is",
"None",
"and",
"isinstance",
"(",
"Pot",
",",
"list",
")",
"and",
"hasattr",
"(",
"Pot",
"[",
"0",
"]",
",",
"'_vo'",
")",
":",
"# For lists of Potentials",
"vo",
"=",
"Pot",
"[",
"0",
"]",
".",
"_vo",
"if",
"_APY_LOADED",
"and",
"isinstance",
"(",
"vo",
",",
"units",
".",
"Quantity",
")",
":",
"vo",
"=",
"vo",
".",
"to",
"(",
"units",
".",
"km",
"/",
"units",
".",
"s",
")",
".",
"value",
"# Loop through args",
"newargs",
"=",
"(",
"Pot",
",",
")",
"for",
"ii",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"args",
")",
")",
":",
"if",
"_APY_LOADED",
"and",
"isinstance",
"(",
"args",
"[",
"ii",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"newargs",
"=",
"newargs",
"+",
"(",
"args",
"[",
"ii",
"]",
".",
"to",
"(",
"units",
".",
"kpc",
")",
".",
"value",
"/",
"ro",
",",
")",
"else",
":",
"newargs",
"=",
"newargs",
"+",
"(",
"args",
"[",
"ii",
"]",
",",
")",
"args",
"=",
"newargs",
"# phi and t kwargs",
"if",
"'phi'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'phi'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"kwargs",
"[",
"'phi'",
"]",
"=",
"kwargs",
"[",
"'phi'",
"]",
".",
"to",
"(",
"units",
".",
"rad",
")",
".",
"value",
"if",
"'t'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'t'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"kwargs",
"[",
"'t'",
"]",
"=",
"kwargs",
"[",
"'t'",
"]",
".",
"to",
"(",
"units",
".",
"Gyr",
")",
".",
"value",
"/",
"time_in_Gyr",
"(",
"vo",
",",
"ro",
")",
"# v kwarg for dissipative forces",
"if",
"'v'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'v'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"kwargs",
"[",
"'v'",
"]",
"=",
"kwargs",
"[",
"'v'",
"]",
".",
"to",
"(",
"units",
".",
"km",
"/",
"units",
".",
"s",
")",
".",
"value",
"/",
"vo",
"# Mass kwarg for rtide",
"if",
"'M'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'M'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"try",
":",
"kwargs",
"[",
"'M'",
"]",
"=",
"kwargs",
"[",
"'M'",
"]",
".",
"to",
"(",
"units",
".",
"Msun",
")",
".",
"value",
"/",
"mass_in_msol",
"(",
"vo",
",",
"ro",
")",
"except",
"units",
".",
"UnitConversionError",
":",
"kwargs",
"[",
"'M'",
"]",
"=",
"kwargs",
"[",
"'M'",
"]",
".",
"to",
"(",
"units",
".",
"pc",
"*",
"units",
".",
"km",
"**",
"2",
"/",
"units",
".",
"s",
"**",
"2",
")",
".",
"value",
"/",
"mass_in_msol",
"(",
"vo",
",",
"ro",
")",
"/",
"_G",
"# kwargs that come up in quasiisothermaldf ",
"if",
"'z'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'z'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"kwargs",
"[",
"'z'",
"]",
"=",
"kwargs",
"[",
"'z'",
"]",
".",
"to",
"(",
"units",
".",
"kpc",
")",
".",
"value",
"/",
"ro",
"if",
"'dz'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'dz'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"kwargs",
"[",
"'dz'",
"]",
"=",
"kwargs",
"[",
"'dz'",
"]",
".",
"to",
"(",
"units",
".",
"kpc",
")",
".",
"value",
"/",
"ro",
"if",
"'dR'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'dR'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"kwargs",
"[",
"'dR'",
"]",
"=",
"kwargs",
"[",
"'dR'",
"]",
".",
"to",
"(",
"units",
".",
"kpc",
")",
".",
"value",
"/",
"ro",
"if",
"'zmax'",
"in",
"kwargs",
"and",
"_APY_LOADED",
"and",
"isinstance",
"(",
"kwargs",
"[",
"'zmax'",
"]",
",",
"units",
".",
"Quantity",
")",
":",
"kwargs",
"[",
"'zmax'",
"]",
"=",
"kwargs",
"[",
"'zmax'",
"]",
".",
"to",
"(",
"units",
".",
"kpc",
")",
".",
"value",
"/",
"ro",
"return",
"method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 45.114286 | 12.114286 |
def is_full_mxid(user_string):
"""Returns True if a string is a valid mxid."""
if not user_string[0] == "@":
return False
parts = user_string[1:].split(":")
localpart_chars = ascii_lowercase + digits + "._-="
if not (len(parts) == 2 and all([i in localpart_chars for i in parts[0]])):
return False
return True
|
[
"def",
"is_full_mxid",
"(",
"user_string",
")",
":",
"if",
"not",
"user_string",
"[",
"0",
"]",
"==",
"\"@\"",
":",
"return",
"False",
"parts",
"=",
"user_string",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\":\"",
")",
"localpart_chars",
"=",
"ascii_lowercase",
"+",
"digits",
"+",
"\"._-=\"",
"if",
"not",
"(",
"len",
"(",
"parts",
")",
"==",
"2",
"and",
"all",
"(",
"[",
"i",
"in",
"localpart_chars",
"for",
"i",
"in",
"parts",
"[",
"0",
"]",
"]",
")",
")",
":",
"return",
"False",
"return",
"True"
] | 37.888889 | 15.333333 |
def kde_histogram(events_x, events_y, xout=None, yout=None, bins=None):
""" Histogram-based Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
bins: tuple (binsx, binsy)
The number of bins to use for the histogram.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
See Also
--------
`numpy.histogram2d`
`scipy.interpolate.RectBivariateSpline`
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
if bins is None:
bins = (max(5, bin_num_doane(events_x)),
max(5, bin_num_doane(events_y)))
# Compute the histogram
hist2d, xedges, yedges = np.histogram2d(x=events_x,
y=events_y,
bins=bins,
normed=True)
xip = xedges[1:]-(xedges[1]-xedges[0])/2
yip = yedges[1:]-(yedges[1]-yedges[0])/2
estimator = RectBivariateSpline(x=xip, y=yip, z=hist2d)
density = estimator.ev(xout, yout)
density[density < 0] = 0
return density.reshape(xout.shape)
|
[
"def",
"kde_histogram",
"(",
"events_x",
",",
"events_y",
",",
"xout",
"=",
"None",
",",
"yout",
"=",
"None",
",",
"bins",
"=",
"None",
")",
":",
"valid_combi",
"=",
"(",
"(",
"xout",
"is",
"None",
"and",
"yout",
"is",
"None",
")",
"or",
"(",
"xout",
"is",
"not",
"None",
"and",
"yout",
"is",
"not",
"None",
")",
")",
"if",
"not",
"valid_combi",
":",
"raise",
"ValueError",
"(",
"\"Both `xout` and `yout` must be (un)set.\"",
")",
"if",
"yout",
"is",
"None",
"and",
"yout",
"is",
"None",
":",
"xout",
"=",
"events_x",
"yout",
"=",
"events_y",
"if",
"bins",
"is",
"None",
":",
"bins",
"=",
"(",
"max",
"(",
"5",
",",
"bin_num_doane",
"(",
"events_x",
")",
")",
",",
"max",
"(",
"5",
",",
"bin_num_doane",
"(",
"events_y",
")",
")",
")",
"# Compute the histogram",
"hist2d",
",",
"xedges",
",",
"yedges",
"=",
"np",
".",
"histogram2d",
"(",
"x",
"=",
"events_x",
",",
"y",
"=",
"events_y",
",",
"bins",
"=",
"bins",
",",
"normed",
"=",
"True",
")",
"xip",
"=",
"xedges",
"[",
"1",
":",
"]",
"-",
"(",
"xedges",
"[",
"1",
"]",
"-",
"xedges",
"[",
"0",
"]",
")",
"/",
"2",
"yip",
"=",
"yedges",
"[",
"1",
":",
"]",
"-",
"(",
"yedges",
"[",
"1",
"]",
"-",
"yedges",
"[",
"0",
"]",
")",
"/",
"2",
"estimator",
"=",
"RectBivariateSpline",
"(",
"x",
"=",
"xip",
",",
"y",
"=",
"yip",
",",
"z",
"=",
"hist2d",
")",
"density",
"=",
"estimator",
".",
"ev",
"(",
"xout",
",",
"yout",
")",
"density",
"[",
"density",
"<",
"0",
"]",
"=",
"0",
"return",
"density",
".",
"reshape",
"(",
"xout",
".",
"shape",
")"
] | 31.745098 | 18.254902 |
def _ctypes_regular(parameter):
"""Returns the code lines to define a *local* variable with the fortran types
that has a matching signature to the wrapped executable.
"""
if ("pointer" in parameter.modifiers or "allocatable" in parameter.modifiers
or "target" in parameter.modifiers or parameter.dtype == "logical"):
return (parameter.definition(local=True), False)
|
[
"def",
"_ctypes_regular",
"(",
"parameter",
")",
":",
"if",
"(",
"\"pointer\"",
"in",
"parameter",
".",
"modifiers",
"or",
"\"allocatable\"",
"in",
"parameter",
".",
"modifiers",
"or",
"\"target\"",
"in",
"parameter",
".",
"modifiers",
"or",
"parameter",
".",
"dtype",
"==",
"\"logical\"",
")",
":",
"return",
"(",
"parameter",
".",
"definition",
"(",
"local",
"=",
"True",
")",
",",
"False",
")"
] | 55.857143 | 17.285714 |
def validate(self, value):
"""Validate field value."""
if value is not None and not isinstance(value, str):
raise ValidationError("field must be a string")
super().validate(value)
|
[
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"ValidationError",
"(",
"\"field must be a string\"",
")",
"super",
"(",
")",
".",
"validate",
"(",
"value",
")"
] | 35.166667 | 17 |
async def delete(self, request, resource=None, **kwargs):
"""Delete a resource.
Supports batch delete.
"""
if resource:
resources = [resource]
else:
data = await self.parse(request)
if data:
resources = list(self.collection.where(self.meta.model_pk << data))
if not resources:
raise RESTNotFound(reason='Resource not found')
for resource in resources:
resource.delete_instance()
|
[
"async",
"def",
"delete",
"(",
"self",
",",
"request",
",",
"resource",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"resource",
":",
"resources",
"=",
"[",
"resource",
"]",
"else",
":",
"data",
"=",
"await",
"self",
".",
"parse",
"(",
"request",
")",
"if",
"data",
":",
"resources",
"=",
"list",
"(",
"self",
".",
"collection",
".",
"where",
"(",
"self",
".",
"meta",
".",
"model_pk",
"<<",
"data",
")",
")",
"if",
"not",
"resources",
":",
"raise",
"RESTNotFound",
"(",
"reason",
"=",
"'Resource not found'",
")",
"for",
"resource",
"in",
"resources",
":",
"resource",
".",
"delete_instance",
"(",
")"
] | 29.235294 | 18.176471 |
def xml(self, attribs = None,elements = None, skipchildren = False):
"""See :meth:`AbstractElement.xml`"""
if not attribs: attribs = {}
if self.idref:
attribs['id'] = self.idref
return super(AbstractTextMarkup,self).xml(attribs,elements, skipchildren)
|
[
"def",
"xml",
"(",
"self",
",",
"attribs",
"=",
"None",
",",
"elements",
"=",
"None",
",",
"skipchildren",
"=",
"False",
")",
":",
"if",
"not",
"attribs",
":",
"attribs",
"=",
"{",
"}",
"if",
"self",
".",
"idref",
":",
"attribs",
"[",
"'id'",
"]",
"=",
"self",
".",
"idref",
"return",
"super",
"(",
"AbstractTextMarkup",
",",
"self",
")",
".",
"xml",
"(",
"attribs",
",",
"elements",
",",
"skipchildren",
")"
] | 48.333333 | 15.5 |
def inv_entry_to_path(data):
"""
Determine the path from the intersphinx inventory entry
Discard the anchors between head and tail to make it
compatible with situations where extra meta information is encoded.
"""
path_tuple = data[2].split("#")
if len(path_tuple) > 1:
path_str = "#".join((path_tuple[0], path_tuple[-1]))
else:
path_str = data[2]
return path_str
|
[
"def",
"inv_entry_to_path",
"(",
"data",
")",
":",
"path_tuple",
"=",
"data",
"[",
"2",
"]",
".",
"split",
"(",
"\"#\"",
")",
"if",
"len",
"(",
"path_tuple",
")",
">",
"1",
":",
"path_str",
"=",
"\"#\"",
".",
"join",
"(",
"(",
"path_tuple",
"[",
"0",
"]",
",",
"path_tuple",
"[",
"-",
"1",
"]",
")",
")",
"else",
":",
"path_str",
"=",
"data",
"[",
"2",
"]",
"return",
"path_str"
] | 31.076923 | 17.076923 |
def send_last_message(self, msg, connection_id=None):
"""
Should be used instead of send_message, when you want to close the
connection once the message is sent.
:param msg: protobuf validator_pb2.Message
"""
zmq_identity = None
if connection_id is not None and self._connections is not None:
if connection_id in self._connections:
connection_info = self._connections.get(connection_id)
if connection_info.connection_type == \
ConnectionType.ZMQ_IDENTITY:
zmq_identity = connection_info.connection
del self._connections[connection_id]
else:
LOGGER.debug("Can't send to %s, not in self._connections",
connection_id)
return
self._ready.wait()
try:
asyncio.run_coroutine_threadsafe(
self._send_last_message(zmq_identity, msg),
self._event_loop)
except RuntimeError:
# run_coroutine_threadsafe will throw a RuntimeError if
# the eventloop is closed. This occurs on shutdown.
pass
|
[
"def",
"send_last_message",
"(",
"self",
",",
"msg",
",",
"connection_id",
"=",
"None",
")",
":",
"zmq_identity",
"=",
"None",
"if",
"connection_id",
"is",
"not",
"None",
"and",
"self",
".",
"_connections",
"is",
"not",
"None",
":",
"if",
"connection_id",
"in",
"self",
".",
"_connections",
":",
"connection_info",
"=",
"self",
".",
"_connections",
".",
"get",
"(",
"connection_id",
")",
"if",
"connection_info",
".",
"connection_type",
"==",
"ConnectionType",
".",
"ZMQ_IDENTITY",
":",
"zmq_identity",
"=",
"connection_info",
".",
"connection",
"del",
"self",
".",
"_connections",
"[",
"connection_id",
"]",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"\"Can't send to %s, not in self._connections\"",
",",
"connection_id",
")",
"return",
"self",
".",
"_ready",
".",
"wait",
"(",
")",
"try",
":",
"asyncio",
".",
"run_coroutine_threadsafe",
"(",
"self",
".",
"_send_last_message",
"(",
"zmq_identity",
",",
"msg",
")",
",",
"self",
".",
"_event_loop",
")",
"except",
"RuntimeError",
":",
"# run_coroutine_threadsafe will throw a RuntimeError if",
"# the eventloop is closed. This occurs on shutdown.",
"pass"
] | 38.258065 | 19.419355 |
def goback(self,days = 1):
""" Go back days
刪除最新天數資料數據
days 代表刪除多少天數(倒退幾天)
"""
for i in xrange(days):
self.raw_data.pop()
self.data_date.pop()
self.stock_range.pop()
self.stock_vol.pop()
self.stock_open.pop()
self.stock_h.pop()
self.stock_l.pop()
|
[
"def",
"goback",
"(",
"self",
",",
"days",
"=",
"1",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"days",
")",
":",
"self",
".",
"raw_data",
".",
"pop",
"(",
")",
"self",
".",
"data_date",
".",
"pop",
"(",
")",
"self",
".",
"stock_range",
".",
"pop",
"(",
")",
"self",
".",
"stock_vol",
".",
"pop",
"(",
")",
"self",
".",
"stock_open",
".",
"pop",
"(",
")",
"self",
".",
"stock_h",
".",
"pop",
"(",
")",
"self",
".",
"stock_l",
".",
"pop",
"(",
")"
] | 23.384615 | 12.538462 |
def norm(self):
"""Return a Scalar object with the norm of this vector"""
result = Scalar(self.size, self.deriv)
result.v = np.sqrt(self.x.v**2 + self.y.v**2 + self.z.v**2)
if self.deriv > 0:
result.d += self.x.v*self.x.d
result.d += self.y.v*self.y.d
result.d += self.z.v*self.z.d
result.d /= result.v
if self.deriv > 1:
result.dd += self.x.v*self.x.dd
result.dd += self.y.v*self.y.dd
result.dd += self.z.v*self.z.dd
denom = result.v**2
result.dd += (1 - self.x.v**2/denom)*np.outer(self.x.d, self.x.d)
result.dd += (1 - self.y.v**2/denom)*np.outer(self.y.d, self.y.d)
result.dd += (1 - self.z.v**2/denom)*np.outer(self.z.d, self.z.d)
tmp = -self.x.v*self.y.v/denom*np.outer(self.x.d, self.y.d)
result.dd += tmp+tmp.transpose()
tmp = -self.y.v*self.z.v/denom*np.outer(self.y.d, self.z.d)
result.dd += tmp+tmp.transpose()
tmp = -self.z.v*self.x.v/denom*np.outer(self.z.d, self.x.d)
result.dd += tmp+tmp.transpose()
result.dd /= result.v
return result
|
[
"def",
"norm",
"(",
"self",
")",
":",
"result",
"=",
"Scalar",
"(",
"self",
".",
"size",
",",
"self",
".",
"deriv",
")",
"result",
".",
"v",
"=",
"np",
".",
"sqrt",
"(",
"self",
".",
"x",
".",
"v",
"**",
"2",
"+",
"self",
".",
"y",
".",
"v",
"**",
"2",
"+",
"self",
".",
"z",
".",
"v",
"**",
"2",
")",
"if",
"self",
".",
"deriv",
">",
"0",
":",
"result",
".",
"d",
"+=",
"self",
".",
"x",
".",
"v",
"*",
"self",
".",
"x",
".",
"d",
"result",
".",
"d",
"+=",
"self",
".",
"y",
".",
"v",
"*",
"self",
".",
"y",
".",
"d",
"result",
".",
"d",
"+=",
"self",
".",
"z",
".",
"v",
"*",
"self",
".",
"z",
".",
"d",
"result",
".",
"d",
"/=",
"result",
".",
"v",
"if",
"self",
".",
"deriv",
">",
"1",
":",
"result",
".",
"dd",
"+=",
"self",
".",
"x",
".",
"v",
"*",
"self",
".",
"x",
".",
"dd",
"result",
".",
"dd",
"+=",
"self",
".",
"y",
".",
"v",
"*",
"self",
".",
"y",
".",
"dd",
"result",
".",
"dd",
"+=",
"self",
".",
"z",
".",
"v",
"*",
"self",
".",
"z",
".",
"dd",
"denom",
"=",
"result",
".",
"v",
"**",
"2",
"result",
".",
"dd",
"+=",
"(",
"1",
"-",
"self",
".",
"x",
".",
"v",
"**",
"2",
"/",
"denom",
")",
"*",
"np",
".",
"outer",
"(",
"self",
".",
"x",
".",
"d",
",",
"self",
".",
"x",
".",
"d",
")",
"result",
".",
"dd",
"+=",
"(",
"1",
"-",
"self",
".",
"y",
".",
"v",
"**",
"2",
"/",
"denom",
")",
"*",
"np",
".",
"outer",
"(",
"self",
".",
"y",
".",
"d",
",",
"self",
".",
"y",
".",
"d",
")",
"result",
".",
"dd",
"+=",
"(",
"1",
"-",
"self",
".",
"z",
".",
"v",
"**",
"2",
"/",
"denom",
")",
"*",
"np",
".",
"outer",
"(",
"self",
".",
"z",
".",
"d",
",",
"self",
".",
"z",
".",
"d",
")",
"tmp",
"=",
"-",
"self",
".",
"x",
".",
"v",
"*",
"self",
".",
"y",
".",
"v",
"/",
"denom",
"*",
"np",
".",
"outer",
"(",
"self",
".",
"x",
".",
"d",
",",
"self",
".",
"y",
".",
"d",
")",
"result",
".",
"dd",
"+=",
"tmp",
"+",
"tmp",
".",
"transpose",
"(",
")",
"tmp",
"=",
"-",
"self",
".",
"y",
".",
"v",
"*",
"self",
".",
"z",
".",
"v",
"/",
"denom",
"*",
"np",
".",
"outer",
"(",
"self",
".",
"y",
".",
"d",
",",
"self",
".",
"z",
".",
"d",
")",
"result",
".",
"dd",
"+=",
"tmp",
"+",
"tmp",
".",
"transpose",
"(",
")",
"tmp",
"=",
"-",
"self",
".",
"z",
".",
"v",
"*",
"self",
".",
"x",
".",
"v",
"/",
"denom",
"*",
"np",
".",
"outer",
"(",
"self",
".",
"z",
".",
"d",
",",
"self",
".",
"x",
".",
"d",
")",
"result",
".",
"dd",
"+=",
"tmp",
"+",
"tmp",
".",
"transpose",
"(",
")",
"result",
".",
"dd",
"/=",
"result",
".",
"v",
"return",
"result"
] | 47.6 | 14.28 |
def get_enclosingmethod(self):
"""
the class.method or class (if the definition is not from within a
method) that encloses the definition of this class. Returns
None if this was not an inner class.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.7
""" # noqa
buff = self.get_attribute("EnclosingMethod")
# TODO:
# Running across classes with data in this attribute like
# 00 06 00 00
# which would be the 6th const for the class name, and the
# zero-th (INVALID) const for method. Maybe this is static
# inner classes?
if buff is None:
return None
# class index, method index
with unpack(buff) as up:
ci, mi = up.unpack_struct(_HH)
result = None
if ci and mi:
enc_class = self.deref_const(ci)
enc_meth, enc_type = self.deref_const(mi)
result = "%s.%s%s" % (enc_class, enc_meth, enc_type)
elif ci:
result = self.deref_const(ci)
return result
|
[
"def",
"get_enclosingmethod",
"(",
"self",
")",
":",
"# noqa",
"buff",
"=",
"self",
".",
"get_attribute",
"(",
"\"EnclosingMethod\"",
")",
"# TODO:",
"# Running across classes with data in this attribute like",
"# 00 06 00 00",
"# which would be the 6th const for the class name, and the",
"# zero-th (INVALID) const for method. Maybe this is static",
"# inner classes?",
"if",
"buff",
"is",
"None",
":",
"return",
"None",
"# class index, method index",
"with",
"unpack",
"(",
"buff",
")",
"as",
"up",
":",
"ci",
",",
"mi",
"=",
"up",
".",
"unpack_struct",
"(",
"_HH",
")",
"result",
"=",
"None",
"if",
"ci",
"and",
"mi",
":",
"enc_class",
"=",
"self",
".",
"deref_const",
"(",
"ci",
")",
"enc_meth",
",",
"enc_type",
"=",
"self",
".",
"deref_const",
"(",
"mi",
")",
"result",
"=",
"\"%s.%s%s\"",
"%",
"(",
"enc_class",
",",
"enc_meth",
",",
"enc_type",
")",
"elif",
"ci",
":",
"result",
"=",
"self",
".",
"deref_const",
"(",
"ci",
")",
"return",
"result"
] | 30.027778 | 22.361111 |
def _at_dump_imports(self, calculator, rule, scope, block):
"""
Implements @dump_imports
"""
sys.stderr.write("%s\n" % repr(rule.namespace._imports))
|
[
"def",
"_at_dump_imports",
"(",
"self",
",",
"calculator",
",",
"rule",
",",
"scope",
",",
"block",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"repr",
"(",
"rule",
".",
"namespace",
".",
"_imports",
")",
")"
] | 35.4 | 10.2 |
def get_template_path(self, content=None):
""" Find template.
:return string: remplate path
"""
if isinstance(content, Paginator):
return op.join('api', 'paginator.%s' % self.format)
if isinstance(content, UpdatedList):
return op.join('api', 'updated.%s' % self.format)
app = ''
name = self.resource._meta.name
if not content:
content = self.resource._meta.model
if isinstance(content, (Model, ModelBase)):
app = content._meta.app_label
name = content._meta.module_name
basedir = 'api'
if getattr(self.resource, 'api', None):
basedir = self.resource.api.prefix
return op.join(
basedir,
str(self.resource.api or ''), app, "%s.%s" % (name, self.format)
)
|
[
"def",
"get_template_path",
"(",
"self",
",",
"content",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"content",
",",
"Paginator",
")",
":",
"return",
"op",
".",
"join",
"(",
"'api'",
",",
"'paginator.%s'",
"%",
"self",
".",
"format",
")",
"if",
"isinstance",
"(",
"content",
",",
"UpdatedList",
")",
":",
"return",
"op",
".",
"join",
"(",
"'api'",
",",
"'updated.%s'",
"%",
"self",
".",
"format",
")",
"app",
"=",
"''",
"name",
"=",
"self",
".",
"resource",
".",
"_meta",
".",
"name",
"if",
"not",
"content",
":",
"content",
"=",
"self",
".",
"resource",
".",
"_meta",
".",
"model",
"if",
"isinstance",
"(",
"content",
",",
"(",
"Model",
",",
"ModelBase",
")",
")",
":",
"app",
"=",
"content",
".",
"_meta",
".",
"app_label",
"name",
"=",
"content",
".",
"_meta",
".",
"module_name",
"basedir",
"=",
"'api'",
"if",
"getattr",
"(",
"self",
".",
"resource",
",",
"'api'",
",",
"None",
")",
":",
"basedir",
"=",
"self",
".",
"resource",
".",
"api",
".",
"prefix",
"return",
"op",
".",
"join",
"(",
"basedir",
",",
"str",
"(",
"self",
".",
"resource",
".",
"api",
"or",
"''",
")",
",",
"app",
",",
"\"%s.%s\"",
"%",
"(",
"name",
",",
"self",
".",
"format",
")",
")"
] | 27.7 | 19.133333 |
def fullStats(a, b):
"""Performs several stats on a against b, typically a is the predictions
array, and b the observations array
Returns:
A dataFrame of stat name, stat description, result
"""
stats = [
['bias', 'Bias', bias(a, b)],
['stderr', 'Standard Deviation Error', stderr(a, b)],
['mae', 'Mean Absolute Error', mae(a, b)],
['rmse', 'Root Mean Square Error', rmse(a, b)],
['nmse', 'Normalized Mean Square Error', nmse(a, b)],
['mfbe', 'Mean Fractionalized bias Error', mfbe(a, b)],
['fa2', 'Factor of Two', fa(a, b, 2)],
['foex', 'Factor of Exceedance', foex(a, b)],
['correlation', 'Correlation R', correlation(a, b)],
['determination', 'Coefficient of Determination r2', determination(a, b)],
['gmb', 'Geometric Mean Bias', gmb(a, b)],
['gmv', 'Geometric Mean Variance', gmv(a, b)],
['fmt', 'Figure of Merit in Time', fmt(a, b)]
]
rec = np.rec.fromrecords(stats, names=('stat', 'description', 'result'))
df = pd.DataFrame.from_records(rec, index='stat')
return df
|
[
"def",
"fullStats",
"(",
"a",
",",
"b",
")",
":",
"stats",
"=",
"[",
"[",
"'bias'",
",",
"'Bias'",
",",
"bias",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'stderr'",
",",
"'Standard Deviation Error'",
",",
"stderr",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'mae'",
",",
"'Mean Absolute Error'",
",",
"mae",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'rmse'",
",",
"'Root Mean Square Error'",
",",
"rmse",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'nmse'",
",",
"'Normalized Mean Square Error'",
",",
"nmse",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'mfbe'",
",",
"'Mean Fractionalized bias Error'",
",",
"mfbe",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'fa2'",
",",
"'Factor of Two'",
",",
"fa",
"(",
"a",
",",
"b",
",",
"2",
")",
"]",
",",
"[",
"'foex'",
",",
"'Factor of Exceedance'",
",",
"foex",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'correlation'",
",",
"'Correlation R'",
",",
"correlation",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'determination'",
",",
"'Coefficient of Determination r2'",
",",
"determination",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'gmb'",
",",
"'Geometric Mean Bias'",
",",
"gmb",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'gmv'",
",",
"'Geometric Mean Variance'",
",",
"gmv",
"(",
"a",
",",
"b",
")",
"]",
",",
"[",
"'fmt'",
",",
"'Figure of Merit in Time'",
",",
"fmt",
"(",
"a",
",",
"b",
")",
"]",
"]",
"rec",
"=",
"np",
".",
"rec",
".",
"fromrecords",
"(",
"stats",
",",
"names",
"=",
"(",
"'stat'",
",",
"'description'",
",",
"'result'",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"rec",
",",
"index",
"=",
"'stat'",
")",
"return",
"df"
] | 42.192308 | 19.076923 |
def MigrateInstance(r, instance, mode=None, cleanup=None):
"""
Migrates an instance.
@type instance: string
@param instance: Instance name
@type mode: string
@param mode: Migration mode
@type cleanup: bool
@param cleanup: Whether to clean up a previously failed migration
"""
body = {}
if mode is not None:
body["mode"] = mode
if cleanup is not None:
body["cleanup"] = cleanup
return r.request("put", "/2/instances/%s/migrate" % instance,
content=body)
|
[
"def",
"MigrateInstance",
"(",
"r",
",",
"instance",
",",
"mode",
"=",
"None",
",",
"cleanup",
"=",
"None",
")",
":",
"body",
"=",
"{",
"}",
"if",
"mode",
"is",
"not",
"None",
":",
"body",
"[",
"\"mode\"",
"]",
"=",
"mode",
"if",
"cleanup",
"is",
"not",
"None",
":",
"body",
"[",
"\"cleanup\"",
"]",
"=",
"cleanup",
"return",
"r",
".",
"request",
"(",
"\"put\"",
",",
"\"/2/instances/%s/migrate\"",
"%",
"instance",
",",
"content",
"=",
"body",
")"
] | 23.863636 | 19.681818 |
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return a pandas DataFrame with entries sorted according to `sort_key`.
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
|
[
"def",
"get_dataframe",
"(",
"self",
",",
"sort_key",
"=",
"\"wall_time\"",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"pandas",
"as",
"pd",
"frame",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"AbinitTimerSection",
".",
"FIELDS",
")",
"for",
"osect",
"in",
"self",
".",
"order_sections",
"(",
"sort_key",
")",
":",
"frame",
"=",
"frame",
".",
"append",
"(",
"osect",
".",
"to_dict",
"(",
")",
",",
"ignore_index",
"=",
"True",
")",
"# Monkey patch",
"frame",
".",
"info",
"=",
"self",
".",
"info",
"frame",
".",
"cpu_time",
"=",
"self",
".",
"cpu_time",
"frame",
".",
"wall_time",
"=",
"self",
".",
"wall_time",
"frame",
".",
"mpi_nprocs",
"=",
"self",
".",
"mpi_nprocs",
"frame",
".",
"omp_nthreads",
"=",
"self",
".",
"omp_nthreads",
"frame",
".",
"mpi_rank",
"=",
"self",
".",
"mpi_rank",
"frame",
".",
"fname",
"=",
"self",
".",
"fname",
"return",
"frame"
] | 34 | 15.8 |
def str_arg_to_bool(value):
"""
Convert string argument into a boolean values.
:param value: str value to convert. Allowed values are y, yes, true, 1, t, n, no, false, 0, f.
The implementation is case insensitive.
:raises: argparse.ArgumentTypeError if value is not recognized as a boolean.
:return: boolean
"""
if value.lower() in ["y", "yes", "true", "1", "t"]:
return True
elif value.lower() in ["n", "no", "false", "0", "f"]:
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
|
[
"def",
"str_arg_to_bool",
"(",
"value",
")",
":",
"if",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"y\"",
",",
"\"yes\"",
",",
"\"true\"",
",",
"\"1\"",
",",
"\"t\"",
"]",
":",
"return",
"True",
"elif",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"\"n\"",
",",
"\"no\"",
",",
"\"false\"",
",",
"\"0\"",
",",
"\"f\"",
"]",
":",
"return",
"False",
"else",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"Boolean value expected.\"",
")"
] | 39.928571 | 19.642857 |
def relabel_non_zero(label_image, start = 1):
r"""
Relabel the regions of a label image.
Re-processes the labels to make them consecutively and starting from start.
Keeps all zero (0) labels, as they are considered background.
Parameters
----------
label_image : array_like
A nD label map.
start : integer
The id of the first label to assign
Returns
-------
relabel_map : ndarray
The relabelled label map.
See also
--------
relabel
"""
if start <= 0: raise ArgumentError('The starting value can not be 0 or lower.')
l = list(scipy.unique(label_image))
if 0 in l: l.remove(0)
mapping = dict()
mapping[0] = 0
for key, item in zip(l, list(range(start, len(l) + start))):
mapping[key] = item
return relabel_map(label_image, mapping)
|
[
"def",
"relabel_non_zero",
"(",
"label_image",
",",
"start",
"=",
"1",
")",
":",
"if",
"start",
"<=",
"0",
":",
"raise",
"ArgumentError",
"(",
"'The starting value can not be 0 or lower.'",
")",
"l",
"=",
"list",
"(",
"scipy",
".",
"unique",
"(",
"label_image",
")",
")",
"if",
"0",
"in",
"l",
":",
"l",
".",
"remove",
"(",
"0",
")",
"mapping",
"=",
"dict",
"(",
")",
"mapping",
"[",
"0",
"]",
"=",
"0",
"for",
"key",
",",
"item",
"in",
"zip",
"(",
"l",
",",
"list",
"(",
"range",
"(",
"start",
",",
"len",
"(",
"l",
")",
"+",
"start",
")",
")",
")",
":",
"mapping",
"[",
"key",
"]",
"=",
"item",
"return",
"relabel_map",
"(",
"label_image",
",",
"mapping",
")"
] | 26.53125 | 20.46875 |
def create_scans(urls_file):
"""
This method is rather simple, it will group the urls to be scanner together
based on (protocol, domain and port).
:param urls_file: The filename with all the URLs
:return: A list of scans to be run
"""
cli_logger.debug('Starting to process batch input file')
created_scans = []
for line in urls_file:
line = line.strip()
if line.startswith('#'):
continue
if not line:
continue
try:
protocol, domain, port, path = parse_url(line)
except ValueError, ve:
cli_logger.debug(str(ve))
continue
for scan in created_scans:
if scan.matches(protocol, domain, port):
scan.add_path(path)
args = (path, scan.get_root_url())
cli_logger.debug('Added %s to %s' % args)
break
else:
scan = BatchScan(protocol, domain, port, path)
created_scans.append(scan)
cli_logger.debug('Added a new scan to %s' % scan.get_root_url())
cli_logger.debug('Created a total of %s scans' % len(created_scans))
return created_scans
|
[
"def",
"create_scans",
"(",
"urls_file",
")",
":",
"cli_logger",
".",
"debug",
"(",
"'Starting to process batch input file'",
")",
"created_scans",
"=",
"[",
"]",
"for",
"line",
"in",
"urls_file",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"if",
"not",
"line",
":",
"continue",
"try",
":",
"protocol",
",",
"domain",
",",
"port",
",",
"path",
"=",
"parse_url",
"(",
"line",
")",
"except",
"ValueError",
",",
"ve",
":",
"cli_logger",
".",
"debug",
"(",
"str",
"(",
"ve",
")",
")",
"continue",
"for",
"scan",
"in",
"created_scans",
":",
"if",
"scan",
".",
"matches",
"(",
"protocol",
",",
"domain",
",",
"port",
")",
":",
"scan",
".",
"add_path",
"(",
"path",
")",
"args",
"=",
"(",
"path",
",",
"scan",
".",
"get_root_url",
"(",
")",
")",
"cli_logger",
".",
"debug",
"(",
"'Added %s to %s'",
"%",
"args",
")",
"break",
"else",
":",
"scan",
"=",
"BatchScan",
"(",
"protocol",
",",
"domain",
",",
"port",
",",
"path",
")",
"created_scans",
".",
"append",
"(",
"scan",
")",
"cli_logger",
".",
"debug",
"(",
"'Added a new scan to %s'",
"%",
"scan",
".",
"get_root_url",
"(",
")",
")",
"cli_logger",
".",
"debug",
"(",
"'Created a total of %s scans'",
"%",
"len",
"(",
"created_scans",
")",
")",
"return",
"created_scans"
] | 29.897436 | 19.435897 |
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
|
[
"def",
"_proc_gnulong",
"(",
"self",
",",
"tarfile",
")",
":",
"buf",
"=",
"tarfile",
".",
"fileobj",
".",
"read",
"(",
"self",
".",
"_block",
"(",
"self",
".",
"size",
")",
")",
"# Fetch the next header and process it.",
"try",
":",
"next",
"=",
"self",
".",
"fromtarfile",
"(",
"tarfile",
")",
"except",
"HeaderError",
":",
"raise",
"SubsequentHeaderError",
"(",
"\"missing or bad subsequent header\"",
")",
"# Patch the TarInfo object from the next header with",
"# the longname information.",
"next",
".",
"offset",
"=",
"self",
".",
"offset",
"if",
"self",
".",
"type",
"==",
"GNUTYPE_LONGNAME",
":",
"next",
".",
"name",
"=",
"nts",
"(",
"buf",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"elif",
"self",
".",
"type",
"==",
"GNUTYPE_LONGLINK",
":",
"next",
".",
"linkname",
"=",
"nts",
"(",
"buf",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"return",
"next"
] | 36.095238 | 16.904762 |
def token_clean(self, length, numbers=True):
"""
Strip out non-alphanumeric tokens.
length: remove tokens of length "length" or less.
numbers: strip out non-alpha tokens.
"""
def clean1(tokens):
return [t for t in tokens if t.isalpha() == 1 and len(t) > length]
def clean2(tokens):
return [t for t in tokens if t.isalnum() == 1 and len(t) > length]
if numbers:
self.tokens = list(map(clean1, self.tokens))
else:
self.tokens = list(map(clean2, self.tokens))
|
[
"def",
"token_clean",
"(",
"self",
",",
"length",
",",
"numbers",
"=",
"True",
")",
":",
"def",
"clean1",
"(",
"tokens",
")",
":",
"return",
"[",
"t",
"for",
"t",
"in",
"tokens",
"if",
"t",
".",
"isalpha",
"(",
")",
"==",
"1",
"and",
"len",
"(",
"t",
")",
">",
"length",
"]",
"def",
"clean2",
"(",
"tokens",
")",
":",
"return",
"[",
"t",
"for",
"t",
"in",
"tokens",
"if",
"t",
".",
"isalnum",
"(",
")",
"==",
"1",
"and",
"len",
"(",
"t",
")",
">",
"length",
"]",
"if",
"numbers",
":",
"self",
".",
"tokens",
"=",
"list",
"(",
"map",
"(",
"clean1",
",",
"self",
".",
"tokens",
")",
")",
"else",
":",
"self",
".",
"tokens",
"=",
"list",
"(",
"map",
"(",
"clean2",
",",
"self",
".",
"tokens",
")",
")"
] | 31.277778 | 20.5 |
def update(self, duration):
"""Add a recorded duration."""
if duration >= 0:
self.histogram.update(duration)
self.meter.mark()
|
[
"def",
"update",
"(",
"self",
",",
"duration",
")",
":",
"if",
"duration",
">=",
"0",
":",
"self",
".",
"histogram",
".",
"update",
"(",
"duration",
")",
"self",
".",
"meter",
".",
"mark",
"(",
")"
] | 32.4 | 8.4 |
def latitude_from_cross_section(cross):
"""Calculate the latitude of points in a cross-section.
Parameters
----------
cross : `xarray.DataArray`
The input DataArray of a cross-section from which to obtain latitudes.
Returns
-------
latitude : `xarray.DataArray`
Latitude of points
"""
y = cross.metpy.y
if CFConventionHandler.check_axis(y, 'lat'):
return y
else:
import cartopy.crs as ccrs
latitude = ccrs.Geodetic().transform_points(cross.metpy.cartopy_crs,
cross.metpy.x.values,
y.values)[..., 1]
latitude = xr.DataArray(latitude, coords=y.coords, dims=y.dims,
attrs={'units': 'degrees_north'})
return latitude
|
[
"def",
"latitude_from_cross_section",
"(",
"cross",
")",
":",
"y",
"=",
"cross",
".",
"metpy",
".",
"y",
"if",
"CFConventionHandler",
".",
"check_axis",
"(",
"y",
",",
"'lat'",
")",
":",
"return",
"y",
"else",
":",
"import",
"cartopy",
".",
"crs",
"as",
"ccrs",
"latitude",
"=",
"ccrs",
".",
"Geodetic",
"(",
")",
".",
"transform_points",
"(",
"cross",
".",
"metpy",
".",
"cartopy_crs",
",",
"cross",
".",
"metpy",
".",
"x",
".",
"values",
",",
"y",
".",
"values",
")",
"[",
"...",
",",
"1",
"]",
"latitude",
"=",
"xr",
".",
"DataArray",
"(",
"latitude",
",",
"coords",
"=",
"y",
".",
"coords",
",",
"dims",
"=",
"y",
".",
"dims",
",",
"attrs",
"=",
"{",
"'units'",
":",
"'degrees_north'",
"}",
")",
"return",
"latitude"
] | 33.08 | 22.36 |
def btemp_threshold(img, min_in, max_in, threshold, threshold_out=None, **kwargs):
"""Scale data linearly in two separate regions.
This enhancement scales the input data linearly by splitting the data
into two regions; min_in to threshold and threshold to max_in. These
regions are mapped to 1 to threshold_out and threshold_out to 0
respectively, resulting in the data being "flipped" around the
threshold. A default threshold_out is set to `176.0 / 255.0` to
match the behavior of the US National Weather Service's forecasting
tool called AWIPS.
Args:
img (XRImage): Image object to be scaled
min_in (float): Minimum input value to scale
max_in (float): Maximum input value to scale
threshold (float): Input value where to split data in to two regions
threshold_out (float): Output value to map the input `threshold`
to. Optional, defaults to 176.0 / 255.0.
"""
threshold_out = threshold_out if threshold_out is not None else (176 / 255.0)
low_factor = (threshold_out - 1.) / (min_in - threshold)
low_offset = 1. + (low_factor * min_in)
high_factor = threshold_out / (max_in - threshold)
high_offset = high_factor * max_in
def _bt_threshold(band_data):
# expects dask array to be passed
return da.where(band_data >= threshold,
high_offset - high_factor * band_data,
low_offset - low_factor * band_data)
return apply_enhancement(img.data, _bt_threshold, pass_dask=True)
|
[
"def",
"btemp_threshold",
"(",
"img",
",",
"min_in",
",",
"max_in",
",",
"threshold",
",",
"threshold_out",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"threshold_out",
"=",
"threshold_out",
"if",
"threshold_out",
"is",
"not",
"None",
"else",
"(",
"176",
"/",
"255.0",
")",
"low_factor",
"=",
"(",
"threshold_out",
"-",
"1.",
")",
"/",
"(",
"min_in",
"-",
"threshold",
")",
"low_offset",
"=",
"1.",
"+",
"(",
"low_factor",
"*",
"min_in",
")",
"high_factor",
"=",
"threshold_out",
"/",
"(",
"max_in",
"-",
"threshold",
")",
"high_offset",
"=",
"high_factor",
"*",
"max_in",
"def",
"_bt_threshold",
"(",
"band_data",
")",
":",
"# expects dask array to be passed",
"return",
"da",
".",
"where",
"(",
"band_data",
">=",
"threshold",
",",
"high_offset",
"-",
"high_factor",
"*",
"band_data",
",",
"low_offset",
"-",
"low_factor",
"*",
"band_data",
")",
"return",
"apply_enhancement",
"(",
"img",
".",
"data",
",",
"_bt_threshold",
",",
"pass_dask",
"=",
"True",
")"
] | 46.272727 | 22.575758 |
def assert_selector(self, *args, **kwargs):
"""
Asserts that a given selector is on the page or a descendant of the current node. ::
page.assert_selector("p#foo")
By default it will check if the expression occurs at least once, but a different number can
be specified. ::
page.assert_selector("p.foo", count=4)
This will check if the expression occurs exactly 4 times. See :meth:`find_all` for other
available result size options.
If a ``count`` of 0 is specified, it will behave like :meth:`assert_no_selector`; however,
use of that method is preferred over this one.
It also accepts all options that :meth:`find_all` accepts, such as ``text`` and
``visible``. ::
page.assert_selector("li", text="Horse", visible=True)
``assert_selector`` can also accept XPath expressions generated by the ``xpath-py``
package::
from xpath import dsl as x
page.assert_selector("xpath", x.descendant("p"))
Args:
*args: Variable length argument list for :class:`SelectorQuery`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
True
Raises:
ExpectationNotMet: The given selector did not match.
"""
query = SelectorQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_selector():
result = query.resolve_for(self)
if not (result.matches_count and
(len(result) > 0 or expects_none(query.options))):
raise ExpectationNotMet(result.failure_message)
return True
return assert_selector()
|
[
"def",
"assert_selector",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"SelectorQuery",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"@",
"self",
".",
"synchronize",
"(",
"wait",
"=",
"query",
".",
"wait",
")",
"def",
"assert_selector",
"(",
")",
":",
"result",
"=",
"query",
".",
"resolve_for",
"(",
"self",
")",
"if",
"not",
"(",
"result",
".",
"matches_count",
"and",
"(",
"len",
"(",
"result",
")",
">",
"0",
"or",
"expects_none",
"(",
"query",
".",
"options",
")",
")",
")",
":",
"raise",
"ExpectationNotMet",
"(",
"result",
".",
"failure_message",
")",
"return",
"True",
"return",
"assert_selector",
"(",
")"
] | 32.264151 | 27.886792 |
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target
|
[
"def",
"copy_to",
"(",
"self",
",",
"destination",
")",
":",
"target",
"=",
"self",
".",
"__get_destination__",
"(",
"destination",
")",
"logger",
".",
"info",
"(",
"\"Copying %s to %s\"",
"%",
"(",
"self",
",",
"target",
")",
")",
"shutil",
".",
"copy",
"(",
"self",
".",
"path",
",",
"unicode",
"(",
"destination",
")",
")",
"return",
"target"
] | 40.1 | 12.5 |
def _on_timeout(self, _attempts=0):
"""
Called when the request associated with this ResponseFuture times out.
This function may reschedule itself. The ``_attempts`` parameter tracks
the number of times this has happened. This parameter should only be
set in those cases, where ``_on_timeout`` reschedules itself.
"""
# PYTHON-853: for short timeouts, we sometimes race with our __init__
if self._connection is None and _attempts < 3:
self._timer = self.session.cluster.connection_class.create_timer(
0.01,
partial(self._on_timeout, _attempts=_attempts + 1)
)
return
if self._connection is not None:
try:
self._connection._requests.pop(self._req_id)
# This prevents the race condition of the
# event loop thread just receiving the waited message
# If it arrives after this, it will be ignored
except KeyError:
return
pool = self.session._pools.get(self._current_host)
if pool and not pool.is_shutdown:
with self._connection.lock:
self._connection.request_ids.append(self._req_id)
pool.return_connection(self._connection)
errors = self._errors
if not errors:
if self.is_schema_agreed:
key = str(self._current_host.endpoint) if self._current_host else 'no host queried before timeout'
errors = {key: "Client request timeout. See Session.execute[_async](timeout)"}
else:
connection = self.session.cluster.control_connection._connection
host = str(connection.endpoint) if connection else 'unknown'
errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."}
self._set_final_exception(OperationTimedOut(errors, self._current_host))
|
[
"def",
"_on_timeout",
"(",
"self",
",",
"_attempts",
"=",
"0",
")",
":",
"# PYTHON-853: for short timeouts, we sometimes race with our __init__",
"if",
"self",
".",
"_connection",
"is",
"None",
"and",
"_attempts",
"<",
"3",
":",
"self",
".",
"_timer",
"=",
"self",
".",
"session",
".",
"cluster",
".",
"connection_class",
".",
"create_timer",
"(",
"0.01",
",",
"partial",
"(",
"self",
".",
"_on_timeout",
",",
"_attempts",
"=",
"_attempts",
"+",
"1",
")",
")",
"return",
"if",
"self",
".",
"_connection",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"_connection",
".",
"_requests",
".",
"pop",
"(",
"self",
".",
"_req_id",
")",
"# This prevents the race condition of the",
"# event loop thread just receiving the waited message",
"# If it arrives after this, it will be ignored",
"except",
"KeyError",
":",
"return",
"pool",
"=",
"self",
".",
"session",
".",
"_pools",
".",
"get",
"(",
"self",
".",
"_current_host",
")",
"if",
"pool",
"and",
"not",
"pool",
".",
"is_shutdown",
":",
"with",
"self",
".",
"_connection",
".",
"lock",
":",
"self",
".",
"_connection",
".",
"request_ids",
".",
"append",
"(",
"self",
".",
"_req_id",
")",
"pool",
".",
"return_connection",
"(",
"self",
".",
"_connection",
")",
"errors",
"=",
"self",
".",
"_errors",
"if",
"not",
"errors",
":",
"if",
"self",
".",
"is_schema_agreed",
":",
"key",
"=",
"str",
"(",
"self",
".",
"_current_host",
".",
"endpoint",
")",
"if",
"self",
".",
"_current_host",
"else",
"'no host queried before timeout'",
"errors",
"=",
"{",
"key",
":",
"\"Client request timeout. See Session.execute[_async](timeout)\"",
"}",
"else",
":",
"connection",
"=",
"self",
".",
"session",
".",
"cluster",
".",
"control_connection",
".",
"_connection",
"host",
"=",
"str",
"(",
"connection",
".",
"endpoint",
")",
"if",
"connection",
"else",
"'unknown'",
"errors",
"=",
"{",
"host",
":",
"\"Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait.\"",
"}",
"self",
".",
"_set_final_exception",
"(",
"OperationTimedOut",
"(",
"errors",
",",
"self",
".",
"_current_host",
")",
")"
] | 46.860465 | 27.837209 |
def gload(smatch, gpaths=None, glabels=None, filt=None, reducel=False,
remove_underscore=True, clear=True, single=True, reshape=RESHAPE_DEFAULT,
idxlower=True, returnfirst=False, lowercase=True, lamb=None, verbose=True,
idval=None):
"""
Loads into global namespace the symbols listed in {slist}
from the GDX listed in {gpaths}.
If {reducel}==True, filter the dataset on 'l' entries only.
If {remove_underscore}==True, symbols are loaded into the global
namespace with their names without underscores.
"""
# Normalize the match string for symbols
if smatch[0] == '@':
returnfirst = True
smatch = smatch[1:]
smatch = expandmatch(smatch)
# Build gdxobj list and
if isinstance(gpaths,list) and isinstance(gpaths[0],GdxFile):
gpaths = [g.internal_filename for g in gpaths]
gdxobjs = gpaths
elif not gpaths is None:
gpaths = expandlist(gpaths)
gdxobjs = [GdxFile(g) for g in gpaths]
else:
gpaths = gload.last_gpaths
gdxobjs = [GdxFile(g) for g in gpaths]
glabels = gload.last_glabels
# Normalize the list of labels for gdxs
gload.last_gpaths = gpaths
gload.last_glabels = glabels
glabels = expandlist(glabels,gpaths)
all_symbols = set()
for g in gdxobjs:
all_symbols |= set([x.name for x in g.get_symbols_list()])
ng = len(gpaths)
nax = 0
if verbose: print(smatch)
svar2ret = []
for s in all_symbols:
m = re.match(smatch,s, re.M|re.I)
if not m:
continue
if verbose: print('\n<<< %s >>>' % s)
sdata = {}
svar = None
validgdxs = []
for ig,g in enumerate(gpaths):
fname, fext = os.path.splitext(g)
if glabels == None:
gid = fname
else:
if isinstance(glabels,int):
gid = 'g%d' % (ig+glabels)
else:
gid = glabels[ig]
try:
sdata_curr = gdxobjs[ig].query(s,filt=filt,reshape=reshape,idval=idval,idxlower=idxlower)
sdata[gid] = sdata_curr
except Exception as e:
#traceback.print_exc()
if verbose:
print_traceback(e)
print('WARNING: Missing "%s" from "%s"' % (s,gid))
continue
validgdxs.append(gid)
nvg = len(validgdxs)
if nvg>1:
if isinstance(sdata_curr, pd.Index):
df = pd.concat({gid: pd.Series(1, x) for gid, x in sdata.items()}, keys=validgdxs).index
elif (reshape==RESHAPE_PANEL):
raise NotImplementedError('Panels are obsolete')
else:
if isinstance(sdata_curr, float):
df = pd.Series(sdata)[validgdxs]
else:
df = pd.concat(sdata, keys=validgdxs)
if reshape==RESHAPE_NONE:
df.reset_index(inplace=True)
col2drop = df.columns[1]
df.drop(col2drop, axis=1, inplace=True)
ncols = len(df.columns)
df.columns = ['s{}'.format(x) for x in range(ncols-1)] + ['val',]
elif reshape>=RESHAPE_SERIES:
for i in range(len(df.index.levels)):
df.index.levels[i].name = 's{}'.format(i)
if reshape>=RESHAPE_FRAME:
try:
df.columns.name = 's{}'.format(i+1)
df = df.stack().unstack(0)
except:
df = df.unstack(0)
else:
df = sdata_curr
try:
df.name = s
except:
pass
svar = df
if remove_underscore:
s = s.replace('','')
if lowercase:
s = s.lower()
if not lamb is None:
svar = lamb(svar)
if not returnfirst:
if not clear:
try:
sold = __builtins__[s]
if len(sold.shape) == len(svar.shape):
if verbose: print('Augmenting',s)
for c in svar.axes[0]:
sold[c] = svar[c]
svar = sold
except:
pass
else:
__builtins__[s] = svar
logprint = logger.info if verbose else logger.debug
if isinstance(svar, pd.DataFrame):
logprint('Rows : {} ... {}'.format(str(svar.index[0]), str(svar.index[-1])))
colwidth = np.max([len(str(svar.columns[i])) for i in range(len(svar.columns))])
logprint('Columns: {}'.format('\n '.join([('{:<%d} = {} ... {}'%colwidth).format(
str(svar.columns[i]), svar.iloc[0,i], svar.iloc[-1,i]) for i in range(len(svar.columns))])))
elif isinstance(svar, pd.Series):
logprint('Index : {} ... {}'.format(str(svar.index[0]), str(svar.index[-1])))
else:
logprint(svar)
if returnfirst:
svar2ret.append(svar)
if returnfirst:
if len(svar2ret) == 1:
svar2ret = svar2ret[0]
return svar2ret
|
[
"def",
"gload",
"(",
"smatch",
",",
"gpaths",
"=",
"None",
",",
"glabels",
"=",
"None",
",",
"filt",
"=",
"None",
",",
"reducel",
"=",
"False",
",",
"remove_underscore",
"=",
"True",
",",
"clear",
"=",
"True",
",",
"single",
"=",
"True",
",",
"reshape",
"=",
"RESHAPE_DEFAULT",
",",
"idxlower",
"=",
"True",
",",
"returnfirst",
"=",
"False",
",",
"lowercase",
"=",
"True",
",",
"lamb",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"idval",
"=",
"None",
")",
":",
"# Normalize the match string for symbols",
"if",
"smatch",
"[",
"0",
"]",
"==",
"'@'",
":",
"returnfirst",
"=",
"True",
"smatch",
"=",
"smatch",
"[",
"1",
":",
"]",
"smatch",
"=",
"expandmatch",
"(",
"smatch",
")",
"# Build gdxobj list and",
"if",
"isinstance",
"(",
"gpaths",
",",
"list",
")",
"and",
"isinstance",
"(",
"gpaths",
"[",
"0",
"]",
",",
"GdxFile",
")",
":",
"gpaths",
"=",
"[",
"g",
".",
"internal_filename",
"for",
"g",
"in",
"gpaths",
"]",
"gdxobjs",
"=",
"gpaths",
"elif",
"not",
"gpaths",
"is",
"None",
":",
"gpaths",
"=",
"expandlist",
"(",
"gpaths",
")",
"gdxobjs",
"=",
"[",
"GdxFile",
"(",
"g",
")",
"for",
"g",
"in",
"gpaths",
"]",
"else",
":",
"gpaths",
"=",
"gload",
".",
"last_gpaths",
"gdxobjs",
"=",
"[",
"GdxFile",
"(",
"g",
")",
"for",
"g",
"in",
"gpaths",
"]",
"glabels",
"=",
"gload",
".",
"last_glabels",
"# Normalize the list of labels for gdxs",
"gload",
".",
"last_gpaths",
"=",
"gpaths",
"gload",
".",
"last_glabels",
"=",
"glabels",
"glabels",
"=",
"expandlist",
"(",
"glabels",
",",
"gpaths",
")",
"all_symbols",
"=",
"set",
"(",
")",
"for",
"g",
"in",
"gdxobjs",
":",
"all_symbols",
"|=",
"set",
"(",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"g",
".",
"get_symbols_list",
"(",
")",
"]",
")",
"ng",
"=",
"len",
"(",
"gpaths",
")",
"nax",
"=",
"0",
"if",
"verbose",
":",
"print",
"(",
"smatch",
")",
"svar2ret",
"=",
"[",
"]",
"for",
"s",
"in",
"all_symbols",
":",
"m",
"=",
"re",
".",
"match",
"(",
"smatch",
",",
"s",
",",
"re",
".",
"M",
"|",
"re",
".",
"I",
")",
"if",
"not",
"m",
":",
"continue",
"if",
"verbose",
":",
"print",
"(",
"'\\n<<< %s >>>'",
"%",
"s",
")",
"sdata",
"=",
"{",
"}",
"svar",
"=",
"None",
"validgdxs",
"=",
"[",
"]",
"for",
"ig",
",",
"g",
"in",
"enumerate",
"(",
"gpaths",
")",
":",
"fname",
",",
"fext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"g",
")",
"if",
"glabels",
"==",
"None",
":",
"gid",
"=",
"fname",
"else",
":",
"if",
"isinstance",
"(",
"glabels",
",",
"int",
")",
":",
"gid",
"=",
"'g%d'",
"%",
"(",
"ig",
"+",
"glabels",
")",
"else",
":",
"gid",
"=",
"glabels",
"[",
"ig",
"]",
"try",
":",
"sdata_curr",
"=",
"gdxobjs",
"[",
"ig",
"]",
".",
"query",
"(",
"s",
",",
"filt",
"=",
"filt",
",",
"reshape",
"=",
"reshape",
",",
"idval",
"=",
"idval",
",",
"idxlower",
"=",
"idxlower",
")",
"sdata",
"[",
"gid",
"]",
"=",
"sdata_curr",
"except",
"Exception",
"as",
"e",
":",
"#traceback.print_exc()",
"if",
"verbose",
":",
"print_traceback",
"(",
"e",
")",
"print",
"(",
"'WARNING: Missing \"%s\" from \"%s\"'",
"%",
"(",
"s",
",",
"gid",
")",
")",
"continue",
"validgdxs",
".",
"append",
"(",
"gid",
")",
"nvg",
"=",
"len",
"(",
"validgdxs",
")",
"if",
"nvg",
">",
"1",
":",
"if",
"isinstance",
"(",
"sdata_curr",
",",
"pd",
".",
"Index",
")",
":",
"df",
"=",
"pd",
".",
"concat",
"(",
"{",
"gid",
":",
"pd",
".",
"Series",
"(",
"1",
",",
"x",
")",
"for",
"gid",
",",
"x",
"in",
"sdata",
".",
"items",
"(",
")",
"}",
",",
"keys",
"=",
"validgdxs",
")",
".",
"index",
"elif",
"(",
"reshape",
"==",
"RESHAPE_PANEL",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Panels are obsolete'",
")",
"else",
":",
"if",
"isinstance",
"(",
"sdata_curr",
",",
"float",
")",
":",
"df",
"=",
"pd",
".",
"Series",
"(",
"sdata",
")",
"[",
"validgdxs",
"]",
"else",
":",
"df",
"=",
"pd",
".",
"concat",
"(",
"sdata",
",",
"keys",
"=",
"validgdxs",
")",
"if",
"reshape",
"==",
"RESHAPE_NONE",
":",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"col2drop",
"=",
"df",
".",
"columns",
"[",
"1",
"]",
"df",
".",
"drop",
"(",
"col2drop",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"ncols",
"=",
"len",
"(",
"df",
".",
"columns",
")",
"df",
".",
"columns",
"=",
"[",
"'s{}'",
".",
"format",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"ncols",
"-",
"1",
")",
"]",
"+",
"[",
"'val'",
",",
"]",
"elif",
"reshape",
">=",
"RESHAPE_SERIES",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"df",
".",
"index",
".",
"levels",
")",
")",
":",
"df",
".",
"index",
".",
"levels",
"[",
"i",
"]",
".",
"name",
"=",
"'s{}'",
".",
"format",
"(",
"i",
")",
"if",
"reshape",
">=",
"RESHAPE_FRAME",
":",
"try",
":",
"df",
".",
"columns",
".",
"name",
"=",
"'s{}'",
".",
"format",
"(",
"i",
"+",
"1",
")",
"df",
"=",
"df",
".",
"stack",
"(",
")",
".",
"unstack",
"(",
"0",
")",
"except",
":",
"df",
"=",
"df",
".",
"unstack",
"(",
"0",
")",
"else",
":",
"df",
"=",
"sdata_curr",
"try",
":",
"df",
".",
"name",
"=",
"s",
"except",
":",
"pass",
"svar",
"=",
"df",
"if",
"remove_underscore",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"''",
",",
"''",
")",
"if",
"lowercase",
":",
"s",
"=",
"s",
".",
"lower",
"(",
")",
"if",
"not",
"lamb",
"is",
"None",
":",
"svar",
"=",
"lamb",
"(",
"svar",
")",
"if",
"not",
"returnfirst",
":",
"if",
"not",
"clear",
":",
"try",
":",
"sold",
"=",
"__builtins__",
"[",
"s",
"]",
"if",
"len",
"(",
"sold",
".",
"shape",
")",
"==",
"len",
"(",
"svar",
".",
"shape",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'Augmenting'",
",",
"s",
")",
"for",
"c",
"in",
"svar",
".",
"axes",
"[",
"0",
"]",
":",
"sold",
"[",
"c",
"]",
"=",
"svar",
"[",
"c",
"]",
"svar",
"=",
"sold",
"except",
":",
"pass",
"else",
":",
"__builtins__",
"[",
"s",
"]",
"=",
"svar",
"logprint",
"=",
"logger",
".",
"info",
"if",
"verbose",
"else",
"logger",
".",
"debug",
"if",
"isinstance",
"(",
"svar",
",",
"pd",
".",
"DataFrame",
")",
":",
"logprint",
"(",
"'Rows : {} ... {}'",
".",
"format",
"(",
"str",
"(",
"svar",
".",
"index",
"[",
"0",
"]",
")",
",",
"str",
"(",
"svar",
".",
"index",
"[",
"-",
"1",
"]",
")",
")",
")",
"colwidth",
"=",
"np",
".",
"max",
"(",
"[",
"len",
"(",
"str",
"(",
"svar",
".",
"columns",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"svar",
".",
"columns",
")",
")",
"]",
")",
"logprint",
"(",
"'Columns: {}'",
".",
"format",
"(",
"'\\n '",
".",
"join",
"(",
"[",
"(",
"'{:<%d} = {} ... {}'",
"%",
"colwidth",
")",
".",
"format",
"(",
"str",
"(",
"svar",
".",
"columns",
"[",
"i",
"]",
")",
",",
"svar",
".",
"iloc",
"[",
"0",
",",
"i",
"]",
",",
"svar",
".",
"iloc",
"[",
"-",
"1",
",",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"svar",
".",
"columns",
")",
")",
"]",
")",
")",
")",
"elif",
"isinstance",
"(",
"svar",
",",
"pd",
".",
"Series",
")",
":",
"logprint",
"(",
"'Index : {} ... {}'",
".",
"format",
"(",
"str",
"(",
"svar",
".",
"index",
"[",
"0",
"]",
")",
",",
"str",
"(",
"svar",
".",
"index",
"[",
"-",
"1",
"]",
")",
")",
")",
"else",
":",
"logprint",
"(",
"svar",
")",
"if",
"returnfirst",
":",
"svar2ret",
".",
"append",
"(",
"svar",
")",
"if",
"returnfirst",
":",
"if",
"len",
"(",
"svar2ret",
")",
"==",
"1",
":",
"svar2ret",
"=",
"svar2ret",
"[",
"0",
"]",
"return",
"svar2ret"
] | 41.77037 | 16.407407 |
def _ReloadArtifacts(self):
"""Load artifacts from all sources."""
self._artifacts = {}
self._LoadArtifactsFromFiles(self._sources.GetAllFiles())
self.ReloadDatastoreArtifacts()
|
[
"def",
"_ReloadArtifacts",
"(",
"self",
")",
":",
"self",
".",
"_artifacts",
"=",
"{",
"}",
"self",
".",
"_LoadArtifactsFromFiles",
"(",
"self",
".",
"_sources",
".",
"GetAllFiles",
"(",
")",
")",
"self",
".",
"ReloadDatastoreArtifacts",
"(",
")"
] | 37.8 | 11 |
def fingerprints(self, keyhalf='any', keytype='any'):
"""
List loaded fingerprints with some optional filtering.
:param str keyhalf: Can be 'any', 'public', or 'private'. If 'public', or 'private', the fingerprints of keys of the
the other type will not be included in the results.
:param str keytype: Can be 'any', 'primary', or 'sub'. If 'primary' or 'sub', the fingerprints of keys of the
the other type will not be included in the results.
:returns: a ``set`` of fingerprints of keys matching the filters specified.
"""
return {pk.fingerprint for pk in self._keys.values()
if pk.is_primary in [True if keytype in ['primary', 'any'] else None,
False if keytype in ['sub', 'any'] else None]
if pk.is_public in [True if keyhalf in ['public', 'any'] else None,
False if keyhalf in ['private', 'any'] else None]}
|
[
"def",
"fingerprints",
"(",
"self",
",",
"keyhalf",
"=",
"'any'",
",",
"keytype",
"=",
"'any'",
")",
":",
"return",
"{",
"pk",
".",
"fingerprint",
"for",
"pk",
"in",
"self",
".",
"_keys",
".",
"values",
"(",
")",
"if",
"pk",
".",
"is_primary",
"in",
"[",
"True",
"if",
"keytype",
"in",
"[",
"'primary'",
",",
"'any'",
"]",
"else",
"None",
",",
"False",
"if",
"keytype",
"in",
"[",
"'sub'",
",",
"'any'",
"]",
"else",
"None",
"]",
"if",
"pk",
".",
"is_public",
"in",
"[",
"True",
"if",
"keyhalf",
"in",
"[",
"'public'",
",",
"'any'",
"]",
"else",
"None",
",",
"False",
"if",
"keyhalf",
"in",
"[",
"'private'",
",",
"'any'",
"]",
"else",
"None",
"]",
"}"
] | 67.666667 | 36.866667 |
def colored_noise(psd, start_time, end_time, seed=0, low_frequency_cutoff=1.0):
""" Create noise from a PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd : pycbc.types.FrequencySeries
PSD to color the noise
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {1.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
psd = psd.copy()
flen = int(SAMPLE_RATE / psd.delta_f) / 2 + 1
oldlen = len(psd)
psd.resize(flen)
# Want to avoid zeroes in PSD.
max_val = psd.max()
for i in xrange(len(psd)):
if i >= (oldlen-1):
psd.data[i] = psd[oldlen - 2]
if psd[i] == 0:
psd.data[i] = max_val
wn_dur = int(end_time - start_time) + 2*FILTER_LENGTH
if psd.delta_f >= 1. / (2.*FILTER_LENGTH):
# If the PSD is short enough, this method is less memory intensive than
# resizing and then calling inverse_spectrum_truncation
psd = pycbc.psd.interpolate(psd, 1.0 / (2.*FILTER_LENGTH))
# inverse_spectrum_truncation truncates the inverted PSD. To truncate
# the non-inverted PSD we give it the inverted PSD to truncate and then
# invert the output.
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
psd = psd.astype(complex_same_precision_as(psd))
# Zero-pad the time-domain PSD to desired length. Zeroes must be added
# in the middle, so some rolling between a resize is used.
psd = psd.to_timeseries()
psd.roll(SAMPLE_RATE * FILTER_LENGTH)
psd.resize(wn_dur * SAMPLE_RATE)
psd.roll(-SAMPLE_RATE * FILTER_LENGTH)
# As time series is still mirrored the complex frequency components are
# 0. But convert to real by using abs as in inverse_spectrum_truncate
psd = psd.to_frequencyseries()
else:
psd = pycbc.psd.interpolate(psd, 1.0 / wn_dur)
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[:kmin].clear()
asd = (psd.real())**0.5
del psd
white_noise = normal(start_time - FILTER_LENGTH, end_time + FILTER_LENGTH,
seed=seed)
white_noise = white_noise.to_frequencyseries()
# Here we color. Do not want to duplicate memory here though so use '*='
white_noise *= asd
del asd
colored = white_noise.to_timeseries()
del white_noise
return colored.time_slice(start_time, end_time)
|
[
"def",
"colored_noise",
"(",
"psd",
",",
"start_time",
",",
"end_time",
",",
"seed",
"=",
"0",
",",
"low_frequency_cutoff",
"=",
"1.0",
")",
":",
"psd",
"=",
"psd",
".",
"copy",
"(",
")",
"flen",
"=",
"int",
"(",
"SAMPLE_RATE",
"/",
"psd",
".",
"delta_f",
")",
"/",
"2",
"+",
"1",
"oldlen",
"=",
"len",
"(",
"psd",
")",
"psd",
".",
"resize",
"(",
"flen",
")",
"# Want to avoid zeroes in PSD.",
"max_val",
"=",
"psd",
".",
"max",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"psd",
")",
")",
":",
"if",
"i",
">=",
"(",
"oldlen",
"-",
"1",
")",
":",
"psd",
".",
"data",
"[",
"i",
"]",
"=",
"psd",
"[",
"oldlen",
"-",
"2",
"]",
"if",
"psd",
"[",
"i",
"]",
"==",
"0",
":",
"psd",
".",
"data",
"[",
"i",
"]",
"=",
"max_val",
"wn_dur",
"=",
"int",
"(",
"end_time",
"-",
"start_time",
")",
"+",
"2",
"*",
"FILTER_LENGTH",
"if",
"psd",
".",
"delta_f",
">=",
"1.",
"/",
"(",
"2.",
"*",
"FILTER_LENGTH",
")",
":",
"# If the PSD is short enough, this method is less memory intensive than",
"# resizing and then calling inverse_spectrum_truncation",
"psd",
"=",
"pycbc",
".",
"psd",
".",
"interpolate",
"(",
"psd",
",",
"1.0",
"/",
"(",
"2.",
"*",
"FILTER_LENGTH",
")",
")",
"# inverse_spectrum_truncation truncates the inverted PSD. To truncate",
"# the non-inverted PSD we give it the inverted PSD to truncate and then",
"# invert the output.",
"psd",
"=",
"1.",
"/",
"pycbc",
".",
"psd",
".",
"inverse_spectrum_truncation",
"(",
"1.",
"/",
"psd",
",",
"FILTER_LENGTH",
"*",
"SAMPLE_RATE",
",",
"low_frequency_cutoff",
"=",
"low_frequency_cutoff",
",",
"trunc_method",
"=",
"'hann'",
")",
"psd",
"=",
"psd",
".",
"astype",
"(",
"complex_same_precision_as",
"(",
"psd",
")",
")",
"# Zero-pad the time-domain PSD to desired length. Zeroes must be added",
"# in the middle, so some rolling between a resize is used.",
"psd",
"=",
"psd",
".",
"to_timeseries",
"(",
")",
"psd",
".",
"roll",
"(",
"SAMPLE_RATE",
"*",
"FILTER_LENGTH",
")",
"psd",
".",
"resize",
"(",
"wn_dur",
"*",
"SAMPLE_RATE",
")",
"psd",
".",
"roll",
"(",
"-",
"SAMPLE_RATE",
"*",
"FILTER_LENGTH",
")",
"# As time series is still mirrored the complex frequency components are",
"# 0. But convert to real by using abs as in inverse_spectrum_truncate",
"psd",
"=",
"psd",
".",
"to_frequencyseries",
"(",
")",
"else",
":",
"psd",
"=",
"pycbc",
".",
"psd",
".",
"interpolate",
"(",
"psd",
",",
"1.0",
"/",
"wn_dur",
")",
"psd",
"=",
"1.",
"/",
"pycbc",
".",
"psd",
".",
"inverse_spectrum_truncation",
"(",
"1.",
"/",
"psd",
",",
"FILTER_LENGTH",
"*",
"SAMPLE_RATE",
",",
"low_frequency_cutoff",
"=",
"low_frequency_cutoff",
",",
"trunc_method",
"=",
"'hann'",
")",
"kmin",
"=",
"int",
"(",
"low_frequency_cutoff",
"/",
"psd",
".",
"delta_f",
")",
"psd",
"[",
":",
"kmin",
"]",
".",
"clear",
"(",
")",
"asd",
"=",
"(",
"psd",
".",
"real",
"(",
")",
")",
"**",
"0.5",
"del",
"psd",
"white_noise",
"=",
"normal",
"(",
"start_time",
"-",
"FILTER_LENGTH",
",",
"end_time",
"+",
"FILTER_LENGTH",
",",
"seed",
"=",
"seed",
")",
"white_noise",
"=",
"white_noise",
".",
"to_frequencyseries",
"(",
")",
"# Here we color. Do not want to duplicate memory here though so use '*='",
"white_noise",
"*=",
"asd",
"del",
"asd",
"colored",
"=",
"white_noise",
".",
"to_timeseries",
"(",
")",
"del",
"white_noise",
"return",
"colored",
".",
"time_slice",
"(",
"start_time",
",",
"end_time",
")"
] | 39.135802 | 20.395062 |
def clear_thumbnails(self):
'''clear all thumbnails from the map'''
state = self.state
for l in state.layers:
keys = state.layers[l].keys()[:]
for key in keys:
if (isinstance(state.layers[l][key], SlipThumbnail)
and not isinstance(state.layers[l][key], SlipIcon)):
state.layers[l].pop(key)
|
[
"def",
"clear_thumbnails",
"(",
"self",
")",
":",
"state",
"=",
"self",
".",
"state",
"for",
"l",
"in",
"state",
".",
"layers",
":",
"keys",
"=",
"state",
".",
"layers",
"[",
"l",
"]",
".",
"keys",
"(",
")",
"[",
":",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"(",
"isinstance",
"(",
"state",
".",
"layers",
"[",
"l",
"]",
"[",
"key",
"]",
",",
"SlipThumbnail",
")",
"and",
"not",
"isinstance",
"(",
"state",
".",
"layers",
"[",
"l",
"]",
"[",
"key",
"]",
",",
"SlipIcon",
")",
")",
":",
"state",
".",
"layers",
"[",
"l",
"]",
".",
"pop",
"(",
"key",
")"
] | 42.777778 | 13.666667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.