text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def get_30_360(self, end):
"""
implements 30/360 Day Count Convention (4.16(f) 2006 ISDA Definitions)
"""
start_day = min(self.day, 30)
end_day = 30 if (start_day == 30 and end.day == 31) else end.day
return (360 * (end.year - self.year) + 30 * (end.month - self.month) + (end_day - start_day)) / 360.0 | [
"def",
"get_30_360",
"(",
"self",
",",
"end",
")",
":",
"start_day",
"=",
"min",
"(",
"self",
".",
"day",
",",
"30",
")",
"end_day",
"=",
"30",
"if",
"(",
"start_day",
"==",
"30",
"and",
"end",
".",
"day",
"==",
"31",
")",
"else",
"end",
".",
"day",
"return",
"(",
"360",
"*",
"(",
"end",
".",
"year",
"-",
"self",
".",
"year",
")",
"+",
"30",
"*",
"(",
"end",
".",
"month",
"-",
"self",
".",
"month",
")",
"+",
"(",
"end_day",
"-",
"start_day",
")",
")",
"/",
"360.0"
]
| 49.714286 | 22.857143 |
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait() | [
"def",
"update_mandb",
"(",
"self",
",",
"quiet",
"=",
"True",
")",
":",
"if",
"not",
"environ",
".",
"config",
".",
"UpdateManPath",
":",
"return",
"print",
"(",
"'\\nrunning mandb...'",
")",
"cmd",
"=",
"'mandb %s'",
"%",
"(",
"' -q'",
"if",
"quiet",
"else",
"''",
")",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
".",
"wait",
"(",
")"
]
| 36.857143 | 7.857143 |
def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read.
"""
try:
return self.handle.read(size)
except (OSError, serial.SerialException):
print()
print("Piksi disconnected")
print()
self.handle.close()
raise IOError | [
"def",
"read",
"(",
"self",
",",
"size",
")",
":",
"try",
":",
"return",
"self",
".",
"handle",
".",
"read",
"(",
"size",
")",
"except",
"(",
"OSError",
",",
"serial",
".",
"SerialException",
")",
":",
"print",
"(",
")",
"print",
"(",
"\"Piksi disconnected\"",
")",
"print",
"(",
")",
"self",
".",
"handle",
".",
"close",
"(",
")",
"raise",
"IOError"
]
| 22.764706 | 15 |
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'IndependentPoisson',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype_hint=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
return tfd.Independent(
tfd.Poisson(
log_rate=tf.reshape(params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args) | [
"def",
"new",
"(",
"params",
",",
"event_shape",
"=",
"(",
")",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'IndependentPoisson'",
",",
"[",
"params",
",",
"event_shape",
"]",
")",
":",
"params",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"params",
",",
"name",
"=",
"'params'",
")",
"event_shape",
"=",
"dist_util",
".",
"expand_to_vector",
"(",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"event_shape",
",",
"name",
"=",
"'event_shape'",
",",
"dtype_hint",
"=",
"tf",
".",
"int32",
")",
",",
"tensor_name",
"=",
"'event_shape'",
")",
"output_shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"shape",
"(",
"input",
"=",
"params",
")",
"[",
":",
"-",
"1",
"]",
",",
"event_shape",
",",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"tfd",
".",
"Independent",
"(",
"tfd",
".",
"Poisson",
"(",
"log_rate",
"=",
"tf",
".",
"reshape",
"(",
"params",
",",
"output_shape",
")",
",",
"validate_args",
"=",
"validate_args",
")",
",",
"reinterpreted_batch_ndims",
"=",
"tf",
".",
"size",
"(",
"input",
"=",
"event_shape",
")",
",",
"validate_args",
"=",
"validate_args",
")"
]
| 44.35 | 13.65 |
def _run(self):
"""Broadcasts forever.
"""
self._is_running = True
network_fail = False
try:
while self._do_run:
try:
if network_fail is True:
LOGGER.info("Network connection re-established!")
network_fail = False
self._sender(self._message)
except IOError as err:
if err.errno == errno.ENETUNREACH:
LOGGER.error("Network unreachable. "
"Trying again in %d s.",
self._interval)
network_fail = True
else:
raise
time.sleep(self._interval)
finally:
self._is_running = False
self._sender.close() | [
"def",
"_run",
"(",
"self",
")",
":",
"self",
".",
"_is_running",
"=",
"True",
"network_fail",
"=",
"False",
"try",
":",
"while",
"self",
".",
"_do_run",
":",
"try",
":",
"if",
"network_fail",
"is",
"True",
":",
"LOGGER",
".",
"info",
"(",
"\"Network connection re-established!\"",
")",
"network_fail",
"=",
"False",
"self",
".",
"_sender",
"(",
"self",
".",
"_message",
")",
"except",
"IOError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"==",
"errno",
".",
"ENETUNREACH",
":",
"LOGGER",
".",
"error",
"(",
"\"Network unreachable. \"",
"\"Trying again in %d s.\"",
",",
"self",
".",
"_interval",
")",
"network_fail",
"=",
"True",
"else",
":",
"raise",
"time",
".",
"sleep",
"(",
"self",
".",
"_interval",
")",
"finally",
":",
"self",
".",
"_is_running",
"=",
"False",
"self",
".",
"_sender",
".",
"close",
"(",
")"
]
| 36.416667 | 11.958333 |
def event_return(events):
'''
Send the events to a mattermost room.
:param events: List of events
:return: Boolean if messages were sent successfully.
'''
_options = _get_options()
api_url = _options.get('api_url')
channel = _options.get('channel')
username = _options.get('username')
hook = _options.get('hook')
is_ok = True
for event in events:
log.debug('Event: %s', event)
log.debug('Event data: %s', event['data'])
message = 'tag: {0}\r\n'.format(event['tag'])
for key, value in six.iteritems(event['data']):
message += '{0}: {1}\r\n'.format(key, value)
result = post_message(channel,
message,
username,
api_url,
hook)
if not result:
is_ok = False
return is_ok | [
"def",
"event_return",
"(",
"events",
")",
":",
"_options",
"=",
"_get_options",
"(",
")",
"api_url",
"=",
"_options",
".",
"get",
"(",
"'api_url'",
")",
"channel",
"=",
"_options",
".",
"get",
"(",
"'channel'",
")",
"username",
"=",
"_options",
".",
"get",
"(",
"'username'",
")",
"hook",
"=",
"_options",
".",
"get",
"(",
"'hook'",
")",
"is_ok",
"=",
"True",
"for",
"event",
"in",
"events",
":",
"log",
".",
"debug",
"(",
"'Event: %s'",
",",
"event",
")",
"log",
".",
"debug",
"(",
"'Event data: %s'",
",",
"event",
"[",
"'data'",
"]",
")",
"message",
"=",
"'tag: {0}\\r\\n'",
".",
"format",
"(",
"event",
"[",
"'tag'",
"]",
")",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"event",
"[",
"'data'",
"]",
")",
":",
"message",
"+=",
"'{0}: {1}\\r\\n'",
".",
"format",
"(",
"key",
",",
"value",
")",
"result",
"=",
"post_message",
"(",
"channel",
",",
"message",
",",
"username",
",",
"api_url",
",",
"hook",
")",
"if",
"not",
"result",
":",
"is_ok",
"=",
"False",
"return",
"is_ok"
]
| 30 | 15.466667 |
def create_xml(self, useNamespace=False):
"""Create an ElementTree representation of the object."""
UNTL_NAMESPACE = 'http://digital2.library.unt.edu/untl/'
UNTL = '{%s}' % UNTL_NAMESPACE
NSMAP = {'untl': UNTL_NAMESPACE}
if useNamespace:
root = Element(UNTL + self.tag, nsmap=NSMAP)
else:
root = Element(self.tag)
# Sort the elements by the index of
# UNTL_XML_ORDER pre-ordered list.
self.sort_untl(UNTL_XML_ORDER)
# Create an XML structure from field list.
for element in self.children:
if useNamespace:
create_untl_xml_subelement(root, element, UNTL)
else:
create_untl_xml_subelement(root, element)
return root | [
"def",
"create_xml",
"(",
"self",
",",
"useNamespace",
"=",
"False",
")",
":",
"UNTL_NAMESPACE",
"=",
"'http://digital2.library.unt.edu/untl/'",
"UNTL",
"=",
"'{%s}'",
"%",
"UNTL_NAMESPACE",
"NSMAP",
"=",
"{",
"'untl'",
":",
"UNTL_NAMESPACE",
"}",
"if",
"useNamespace",
":",
"root",
"=",
"Element",
"(",
"UNTL",
"+",
"self",
".",
"tag",
",",
"nsmap",
"=",
"NSMAP",
")",
"else",
":",
"root",
"=",
"Element",
"(",
"self",
".",
"tag",
")",
"# Sort the elements by the index of",
"# UNTL_XML_ORDER pre-ordered list.",
"self",
".",
"sort_untl",
"(",
"UNTL_XML_ORDER",
")",
"# Create an XML structure from field list.",
"for",
"element",
"in",
"self",
".",
"children",
":",
"if",
"useNamespace",
":",
"create_untl_xml_subelement",
"(",
"root",
",",
"element",
",",
"UNTL",
")",
"else",
":",
"create_untl_xml_subelement",
"(",
"root",
",",
"element",
")",
"return",
"root"
]
| 35.045455 | 14.818182 |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._id_ is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._action is not None:
return False
if self._user_id is not None:
return False
if self._monetary_account_id is not None:
return False
if self._object_ is not None:
return False
if self._status is not None:
return False
return True | [
"def",
"is_all_field_none",
"(",
"self",
")",
":",
"if",
"self",
".",
"_id_",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_created",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_updated",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_action",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_user_id",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_monetary_account_id",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_object_",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_status",
"is",
"not",
"None",
":",
"return",
"False",
"return",
"True"
]
| 19.433333 | 19.233333 |
def ExpandRelativePath(method_config, params, relative_path=None):
"""Determine the relative path for request."""
path = relative_path or method_config.relative_path or ''
for param in method_config.path_params:
param_template = '{%s}' % param
# For more details about "reserved word expansion", see:
# http://tools.ietf.org/html/rfc6570#section-3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path:
reserved_chars = _RESERVED_URI_CHARS
path = path.replace(reserved_template, param_template)
if param_template not in path:
raise exceptions.InvalidUserInputError(
'Missing path parameter %s' % param)
try:
# TODO(craigcitro): Do we want to support some sophisticated
# mapping here?
value = params[param]
except KeyError:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
if value is None:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
try:
if not isinstance(value, six.string_types):
value = str(value)
path = path.replace(param_template,
urllib_parse.quote(value.encode('utf_8'),
reserved_chars))
except TypeError as e:
raise exceptions.InvalidUserInputError(
'Error setting required parameter %s to value %s: %s' % (
param, value, e))
return path | [
"def",
"ExpandRelativePath",
"(",
"method_config",
",",
"params",
",",
"relative_path",
"=",
"None",
")",
":",
"path",
"=",
"relative_path",
"or",
"method_config",
".",
"relative_path",
"or",
"''",
"for",
"param",
"in",
"method_config",
".",
"path_params",
":",
"param_template",
"=",
"'{%s}'",
"%",
"param",
"# For more details about \"reserved word expansion\", see:",
"# http://tools.ietf.org/html/rfc6570#section-3.2.2",
"reserved_chars",
"=",
"''",
"reserved_template",
"=",
"'{+%s}'",
"%",
"param",
"if",
"reserved_template",
"in",
"path",
":",
"reserved_chars",
"=",
"_RESERVED_URI_CHARS",
"path",
"=",
"path",
".",
"replace",
"(",
"reserved_template",
",",
"param_template",
")",
"if",
"param_template",
"not",
"in",
"path",
":",
"raise",
"exceptions",
".",
"InvalidUserInputError",
"(",
"'Missing path parameter %s'",
"%",
"param",
")",
"try",
":",
"# TODO(craigcitro): Do we want to support some sophisticated",
"# mapping here?",
"value",
"=",
"params",
"[",
"param",
"]",
"except",
"KeyError",
":",
"raise",
"exceptions",
".",
"InvalidUserInputError",
"(",
"'Request missing required parameter %s'",
"%",
"param",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"exceptions",
".",
"InvalidUserInputError",
"(",
"'Request missing required parameter %s'",
"%",
"param",
")",
"try",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"param_template",
",",
"urllib_parse",
".",
"quote",
"(",
"value",
".",
"encode",
"(",
"'utf_8'",
")",
",",
"reserved_chars",
")",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"exceptions",
".",
"InvalidUserInputError",
"(",
"'Error setting required parameter %s to value %s: %s'",
"%",
"(",
"param",
",",
"value",
",",
"e",
")",
")",
"return",
"path"
]
| 44.918919 | 16 |
def _theorem5p4(adj, ub):
"""By Theorem 5.4, if any two vertices have ub + 1 common neighbors
then we can add an edge between them.
"""
new_edges = set()
for u, v in itertools.combinations(adj, 2):
if u in adj[v]:
# already an edge
continue
if len(adj[u].intersection(adj[v])) > ub:
new_edges.add((u, v))
while new_edges:
for u, v in new_edges:
adj[u].add(v)
adj[v].add(u)
new_edges = set()
for u, v in itertools.combinations(adj, 2):
if u in adj[v]:
continue
if len(adj[u].intersection(adj[v])) > ub:
new_edges.add((u, v)) | [
"def",
"_theorem5p4",
"(",
"adj",
",",
"ub",
")",
":",
"new_edges",
"=",
"set",
"(",
")",
"for",
"u",
",",
"v",
"in",
"itertools",
".",
"combinations",
"(",
"adj",
",",
"2",
")",
":",
"if",
"u",
"in",
"adj",
"[",
"v",
"]",
":",
"# already an edge",
"continue",
"if",
"len",
"(",
"adj",
"[",
"u",
"]",
".",
"intersection",
"(",
"adj",
"[",
"v",
"]",
")",
")",
">",
"ub",
":",
"new_edges",
".",
"add",
"(",
"(",
"u",
",",
"v",
")",
")",
"while",
"new_edges",
":",
"for",
"u",
",",
"v",
"in",
"new_edges",
":",
"adj",
"[",
"u",
"]",
".",
"add",
"(",
"v",
")",
"adj",
"[",
"v",
"]",
".",
"add",
"(",
"u",
")",
"new_edges",
"=",
"set",
"(",
")",
"for",
"u",
",",
"v",
"in",
"itertools",
".",
"combinations",
"(",
"adj",
",",
"2",
")",
":",
"if",
"u",
"in",
"adj",
"[",
"v",
"]",
":",
"continue",
"if",
"len",
"(",
"adj",
"[",
"u",
"]",
".",
"intersection",
"(",
"adj",
"[",
"v",
"]",
")",
")",
">",
"ub",
":",
"new_edges",
".",
"add",
"(",
"(",
"u",
",",
"v",
")",
")"
]
| 27.32 | 15.88 |
def _part(self, name, func, args, help, **kwargs):
"""Parses arguments of a single command (e.g. 'v').
If :args: is empty, it assumes that command takes no further arguments.
:name: Name of the command.
:func: Arg method to execute.
:args: Dictionary of CLI arguments pointed at Arg method arguments.
:help: Commands' help text.
:kwargs: Additional arguments for :func:.
"""
while self.argv:
arg = self.argv.popleft()
if arg == "-h" or arg == "--help":
print(help)
return
try:
argname, argarg = args[arg]
kwargs[argname] = argarg and self.argv.popleft() or True
except KeyError:
raise UnrecognizedArgumentError(name, arg)
except IndexError:
valids = ["-s", "--sort", "-d", "--done", "-D", "--undone"]
if arg not in valids:
raise NotEnoughArgumentsError(name)
kwargs[argname] = True
func(**kwargs) | [
"def",
"_part",
"(",
"self",
",",
"name",
",",
"func",
",",
"args",
",",
"help",
",",
"*",
"*",
"kwargs",
")",
":",
"while",
"self",
".",
"argv",
":",
"arg",
"=",
"self",
".",
"argv",
".",
"popleft",
"(",
")",
"if",
"arg",
"==",
"\"-h\"",
"or",
"arg",
"==",
"\"--help\"",
":",
"print",
"(",
"help",
")",
"return",
"try",
":",
"argname",
",",
"argarg",
"=",
"args",
"[",
"arg",
"]",
"kwargs",
"[",
"argname",
"]",
"=",
"argarg",
"and",
"self",
".",
"argv",
".",
"popleft",
"(",
")",
"or",
"True",
"except",
"KeyError",
":",
"raise",
"UnrecognizedArgumentError",
"(",
"name",
",",
"arg",
")",
"except",
"IndexError",
":",
"valids",
"=",
"[",
"\"-s\"",
",",
"\"--sort\"",
",",
"\"-d\"",
",",
"\"--done\"",
",",
"\"-D\"",
",",
"\"--undone\"",
"]",
"if",
"arg",
"not",
"in",
"valids",
":",
"raise",
"NotEnoughArgumentsError",
"(",
"name",
")",
"kwargs",
"[",
"argname",
"]",
"=",
"True",
"func",
"(",
"*",
"*",
"kwargs",
")"
]
| 37.857143 | 16.214286 |
def get_terms(self, field=None):
"""
Create a terms aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
"""
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("terms", field=field, size=self.size, order={"_count": "desc"})
self.aggregations['terms_' + field] = agg
return self | [
"def",
"get_terms",
"(",
"self",
",",
"field",
"=",
"None",
")",
":",
"if",
"not",
"field",
":",
"raise",
"AttributeError",
"(",
"\"Please provide field to apply aggregation to!\"",
")",
"agg",
"=",
"A",
"(",
"\"terms\"",
",",
"field",
"=",
"field",
",",
"size",
"=",
"self",
".",
"size",
",",
"order",
"=",
"{",
"\"_count\"",
":",
"\"desc\"",
"}",
")",
"self",
".",
"aggregations",
"[",
"'terms_'",
"+",
"field",
"]",
"=",
"agg",
"return",
"self"
]
| 41.692308 | 25.846154 |
def activateRandomLocation(self):
"""
Set the location to a random point.
"""
self.bumpPhases = np.array([np.random.random(2)]).T
self._computeActiveCells() | [
"def",
"activateRandomLocation",
"(",
"self",
")",
":",
"self",
".",
"bumpPhases",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"random",
".",
"random",
"(",
"2",
")",
"]",
")",
".",
"T",
"self",
".",
"_computeActiveCells",
"(",
")"
]
| 28.5 | 5.5 |
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
key = name
key = '_%s' % reserved.get(key, key)
setattr(content.data, key, value) | [
"def",
"append_attribute",
"(",
"self",
",",
"name",
",",
"value",
",",
"content",
")",
":",
"key",
"=",
"name",
"key",
"=",
"'_%s'",
"%",
"reserved",
".",
"get",
"(",
"key",
",",
"key",
")",
"setattr",
"(",
"content",
".",
"data",
",",
"key",
",",
"value",
")"
]
| 36.384615 | 8.384615 |
def restore_session(session: tf.Session, checkpoint_dir: str,
saver: Optional[tf.train.Saver] = None) -> None:
"""
Restores Tensorflow session from the latest checkpoint.
:param session: The TF session
:param checkpoint_dir: checkpoint files directory.
:param saver: The saver object, if not provided a default saver object will be created.
"""
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
logger = settings.logger()
if logger.isEnabledFor(logging.INFO):
logger.info("Restoring session from `%s`.", checkpoint_path)
saver = saver or get_default_saver()
saver.restore(session, checkpoint_path) | [
"def",
"restore_session",
"(",
"session",
":",
"tf",
".",
"Session",
",",
"checkpoint_dir",
":",
"str",
",",
"saver",
":",
"Optional",
"[",
"tf",
".",
"train",
".",
"Saver",
"]",
"=",
"None",
")",
"->",
"None",
":",
"checkpoint_path",
"=",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"checkpoint_dir",
")",
"logger",
"=",
"settings",
".",
"logger",
"(",
")",
"if",
"logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"INFO",
")",
":",
"logger",
".",
"info",
"(",
"\"Restoring session from `%s`.\"",
",",
"checkpoint_path",
")",
"saver",
"=",
"saver",
"or",
"get_default_saver",
"(",
")",
"saver",
".",
"restore",
"(",
"session",
",",
"checkpoint_path",
")"
]
| 44.466667 | 16.333333 |
def plotBrightLimitInV(gBright, pdf=False, png=False):
"""
Plot the bright limit of Gaia in V as a function of (V-I).
Parameters
----------
gBright - The bright limit of Gaia in G
"""
vmini=np.linspace(0.0,6.0,1001)
gminv=gminvFromVmini(vmini)
vBright=gBright-gminv
fig=plt.figure(figsize=(10,6.5))
plt.plot(vmini,vBright,'b-')
plt.xlabel('$(V-I)$')
plt.ylabel('Bright limit of Gaia in $V$')
plt.xlim(0,6)
plt.ylim(5,11)
plt.grid(which='both')
plt.title("Bright limit in $G$: {0}".format(gBright))
if (pdf):
plt.savefig('VBandBrightLimit.pdf')
elif (png):
plt.savefig('VBandBrightLimit.png')
else:
plt.show() | [
"def",
"plotBrightLimitInV",
"(",
"gBright",
",",
"pdf",
"=",
"False",
",",
"png",
"=",
"False",
")",
":",
"vmini",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"6.0",
",",
"1001",
")",
"gminv",
"=",
"gminvFromVmini",
"(",
"vmini",
")",
"vBright",
"=",
"gBright",
"-",
"gminv",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"6.5",
")",
")",
"plt",
".",
"plot",
"(",
"vmini",
",",
"vBright",
",",
"'b-'",
")",
"plt",
".",
"xlabel",
"(",
"'$(V-I)$'",
")",
"plt",
".",
"ylabel",
"(",
"'Bright limit of Gaia in $V$'",
")",
"plt",
".",
"xlim",
"(",
"0",
",",
"6",
")",
"plt",
".",
"ylim",
"(",
"5",
",",
"11",
")",
"plt",
".",
"grid",
"(",
"which",
"=",
"'both'",
")",
"plt",
".",
"title",
"(",
"\"Bright limit in $G$: {0}\"",
".",
"format",
"(",
"gBright",
")",
")",
"if",
"(",
"pdf",
")",
":",
"plt",
".",
"savefig",
"(",
"'VBandBrightLimit.pdf'",
")",
"elif",
"(",
"png",
")",
":",
"plt",
".",
"savefig",
"(",
"'VBandBrightLimit.png'",
")",
"else",
":",
"plt",
".",
"show",
"(",
")"
]
| 22.75 | 18.535714 |
def generate_source_image(source_file, processor_options, generators=None,
fail_silently=True):
"""
Processes a source ``File`` through a series of source generators, stopping
once a generator returns an image.
The return value is this image instance or ``None`` if no generators
return an image.
If the source file cannot be opened, it will be set to ``None`` and still
passed to the generators.
"""
processor_options = ThumbnailOptions(processor_options)
# Keep record of whether the source file was originally closed. Not all
# file-like objects provide this attribute, so just fall back to False.
was_closed = getattr(source_file, 'closed', False)
if generators is None:
generators = [
utils.dynamic_import(name)
for name in settings.THUMBNAIL_SOURCE_GENERATORS]
exceptions = []
try:
for generator in generators:
source = source_file
# First try to open the file.
try:
source.open()
except Exception:
# If that failed, maybe the file-like object doesn't support
# reopening so just try seeking back to the start of the file.
try:
source.seek(0)
except Exception:
source = None
try:
image = generator(source, **processor_options)
except Exception as e:
if not fail_silently:
if len(generators) == 1:
raise
exceptions.append(e)
image = None
if image:
return image
finally:
# Attempt to close the file if it was closed originally (but fail
# silently).
if was_closed:
try:
source_file.close()
except Exception:
pass
if exceptions and not fail_silently:
raise NoSourceGenerator(*exceptions) | [
"def",
"generate_source_image",
"(",
"source_file",
",",
"processor_options",
",",
"generators",
"=",
"None",
",",
"fail_silently",
"=",
"True",
")",
":",
"processor_options",
"=",
"ThumbnailOptions",
"(",
"processor_options",
")",
"# Keep record of whether the source file was originally closed. Not all",
"# file-like objects provide this attribute, so just fall back to False.",
"was_closed",
"=",
"getattr",
"(",
"source_file",
",",
"'closed'",
",",
"False",
")",
"if",
"generators",
"is",
"None",
":",
"generators",
"=",
"[",
"utils",
".",
"dynamic_import",
"(",
"name",
")",
"for",
"name",
"in",
"settings",
".",
"THUMBNAIL_SOURCE_GENERATORS",
"]",
"exceptions",
"=",
"[",
"]",
"try",
":",
"for",
"generator",
"in",
"generators",
":",
"source",
"=",
"source_file",
"# First try to open the file.",
"try",
":",
"source",
".",
"open",
"(",
")",
"except",
"Exception",
":",
"# If that failed, maybe the file-like object doesn't support",
"# reopening so just try seeking back to the start of the file.",
"try",
":",
"source",
".",
"seek",
"(",
"0",
")",
"except",
"Exception",
":",
"source",
"=",
"None",
"try",
":",
"image",
"=",
"generator",
"(",
"source",
",",
"*",
"*",
"processor_options",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"not",
"fail_silently",
":",
"if",
"len",
"(",
"generators",
")",
"==",
"1",
":",
"raise",
"exceptions",
".",
"append",
"(",
"e",
")",
"image",
"=",
"None",
"if",
"image",
":",
"return",
"image",
"finally",
":",
"# Attempt to close the file if it was closed originally (but fail",
"# silently).",
"if",
"was_closed",
":",
"try",
":",
"source_file",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"pass",
"if",
"exceptions",
"and",
"not",
"fail_silently",
":",
"raise",
"NoSourceGenerator",
"(",
"*",
"exceptions",
")"
]
| 36.907407 | 17.055556 |
def storage_class(self, value):
"""Set the storage class for the bucket.
See https://cloud.google.com/storage/docs/storage-classes
:type value: str
:param value: one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE",
"COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY"
"""
if value not in self._STORAGE_CLASSES:
raise ValueError("Invalid storage class: %s" % (value,))
self._patch_property("storageClass", value) | [
"def",
"storage_class",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"not",
"in",
"self",
".",
"_STORAGE_CLASSES",
":",
"raise",
"ValueError",
"(",
"\"Invalid storage class: %s\"",
"%",
"(",
"value",
",",
")",
")",
"self",
".",
"_patch_property",
"(",
"\"storageClass\"",
",",
"value",
")"
]
| 41.083333 | 20.333333 |
def _validate_compute_chunk_params(
self, dates, symbols, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(symbols)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/symbols "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
) | [
"def",
"_validate_compute_chunk_params",
"(",
"self",
",",
"dates",
",",
"symbols",
",",
"initial_workspace",
")",
":",
"root",
"=",
"self",
".",
"_root_mask_term",
"clsname",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"# Writing this out explicitly so this errors in testing if we change",
"# the name without updating this line.",
"compute_chunk_name",
"=",
"self",
".",
"compute_chunk",
".",
"__name__",
"if",
"root",
"not",
"in",
"initial_workspace",
":",
"raise",
"AssertionError",
"(",
"\"root_mask values not supplied to {cls}.{method}\"",
".",
"format",
"(",
"cls",
"=",
"clsname",
",",
"method",
"=",
"compute_chunk_name",
",",
")",
")",
"shape",
"=",
"initial_workspace",
"[",
"root",
"]",
".",
"shape",
"implied_shape",
"=",
"len",
"(",
"dates",
")",
",",
"len",
"(",
"symbols",
")",
"if",
"shape",
"!=",
"implied_shape",
":",
"raise",
"AssertionError",
"(",
"\"root_mask shape is {shape}, but received dates/symbols \"",
"\"imply that shape should be {implied}\"",
".",
"format",
"(",
"shape",
"=",
"shape",
",",
"implied",
"=",
"implied_shape",
",",
")",
")"
]
| 37.413793 | 15.206897 |
def dorequest(self, request_dic, opname):
"""
:param request_dic: a dictionary containing parameters for the request
:param opname: An API operation name, available are: loginUser,getInitialUserData,logoutUser,createNewBudget,
freshStartABudget,cloneBudget,deleteTombstonedBudgets,syncCatalogData,syncBudgetData,getInstitutionList,
getInstitutionLoginFields,getInstitutionAccountList,registerAccountForDirectConnect,
updateDirectConnectCredentials,poll,createFeedback,runSqlStatement
:return: the dictionary of the result of the request
"""
# Available operations :
def curate_password(message):
return message.replace(self.password, '********')
def errorout(message):
LOG.error(curate_password(message))
raise NYnabConnectionError(message)
json_request_dict = json.dumps(request_dic, cls=ComplexEncoder)
params = {u'operation_name': opname, 'request_data': json_request_dict}
LOG.debug(curate_password('%s ... %s ' % (opname, params)))
r = self.session.post(self.urlCatalog, params)
self.lastrequest_elapsed = r.elapsed
js = r.json()
if r.status_code == 500:
errorout('Unrecoverable server error, sorry YNAB')
if r.status_code != 200:
LOG.debug('non-200 HTTP code: %s ' % r.text)
if not 'error' in js:
errorout('The server returned a json value without an error field')
if js['error'] is None:
LOG.debug(curate_password('the server returned '+pp_json(js)))
return js
error = js['error']
if 'id' not in error:
errorout('Error field %s without id returned from the API, %s' % (error, params))
if error['id'] == 'user_not_found':
errorout('API error, User Not Found')
elif error['id'] == 'user_password_invalid':
errorout('API error, User-Password combination invalid')
elif error['id'] == 'request_throttled':
LOG.debug('API Rrequest throttled')
retryrafter = r.headers['Retry-After']
LOG.debug('Waiting for %s s' % retryrafter)
sleep(float(retryrafter))
return self.dorequest(request_dic, opname)
elif error['id'] == 'invalid_session_token':
errorout('Invalid session token. You should call init_session() on the connection object')
else:
errorout('Unknown API Error \"%s\" was returned from the API when sending request (%s)' % (error['id'], params)) | [
"def",
"dorequest",
"(",
"self",
",",
"request_dic",
",",
"opname",
")",
":",
"# Available operations :",
"def",
"curate_password",
"(",
"message",
")",
":",
"return",
"message",
".",
"replace",
"(",
"self",
".",
"password",
",",
"'********'",
")",
"def",
"errorout",
"(",
"message",
")",
":",
"LOG",
".",
"error",
"(",
"curate_password",
"(",
"message",
")",
")",
"raise",
"NYnabConnectionError",
"(",
"message",
")",
"json_request_dict",
"=",
"json",
".",
"dumps",
"(",
"request_dic",
",",
"cls",
"=",
"ComplexEncoder",
")",
"params",
"=",
"{",
"u'operation_name'",
":",
"opname",
",",
"'request_data'",
":",
"json_request_dict",
"}",
"LOG",
".",
"debug",
"(",
"curate_password",
"(",
"'%s ... %s '",
"%",
"(",
"opname",
",",
"params",
")",
")",
")",
"r",
"=",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"urlCatalog",
",",
"params",
")",
"self",
".",
"lastrequest_elapsed",
"=",
"r",
".",
"elapsed",
"js",
"=",
"r",
".",
"json",
"(",
")",
"if",
"r",
".",
"status_code",
"==",
"500",
":",
"errorout",
"(",
"'Unrecoverable server error, sorry YNAB'",
")",
"if",
"r",
".",
"status_code",
"!=",
"200",
":",
"LOG",
".",
"debug",
"(",
"'non-200 HTTP code: %s '",
"%",
"r",
".",
"text",
")",
"if",
"not",
"'error'",
"in",
"js",
":",
"errorout",
"(",
"'The server returned a json value without an error field'",
")",
"if",
"js",
"[",
"'error'",
"]",
"is",
"None",
":",
"LOG",
".",
"debug",
"(",
"curate_password",
"(",
"'the server returned '",
"+",
"pp_json",
"(",
"js",
")",
")",
")",
"return",
"js",
"error",
"=",
"js",
"[",
"'error'",
"]",
"if",
"'id'",
"not",
"in",
"error",
":",
"errorout",
"(",
"'Error field %s without id returned from the API, %s'",
"%",
"(",
"error",
",",
"params",
")",
")",
"if",
"error",
"[",
"'id'",
"]",
"==",
"'user_not_found'",
":",
"errorout",
"(",
"'API error, User Not Found'",
")",
"elif",
"error",
"[",
"'id'",
"]",
"==",
"'user_password_invalid'",
":",
"errorout",
"(",
"'API error, User-Password combination invalid'",
")",
"elif",
"error",
"[",
"'id'",
"]",
"==",
"'request_throttled'",
":",
"LOG",
".",
"debug",
"(",
"'API Rrequest throttled'",
")",
"retryrafter",
"=",
"r",
".",
"headers",
"[",
"'Retry-After'",
"]",
"LOG",
".",
"debug",
"(",
"'Waiting for %s s'",
"%",
"retryrafter",
")",
"sleep",
"(",
"float",
"(",
"retryrafter",
")",
")",
"return",
"self",
".",
"dorequest",
"(",
"request_dic",
",",
"opname",
")",
"elif",
"error",
"[",
"'id'",
"]",
"==",
"'invalid_session_token'",
":",
"errorout",
"(",
"'Invalid session token. You should call init_session() on the connection object'",
")",
"else",
":",
"errorout",
"(",
"'Unknown API Error \\\"%s\\\" was returned from the API when sending request (%s)'",
"%",
"(",
"error",
"[",
"'id'",
"]",
",",
"params",
")",
")"
]
| 50.92 | 22.84 |
def short_description(func):
"""
Given an object with a docstring, return the first line of the docstring
"""
doc = inspect.getdoc(func)
if doc is not None:
doc = inspect.cleandoc(doc)
lines = doc.splitlines()
return lines[0]
return "" | [
"def",
"short_description",
"(",
"func",
")",
":",
"doc",
"=",
"inspect",
".",
"getdoc",
"(",
"func",
")",
"if",
"doc",
"is",
"not",
"None",
":",
"doc",
"=",
"inspect",
".",
"cleandoc",
"(",
"doc",
")",
"lines",
"=",
"doc",
".",
"splitlines",
"(",
")",
"return",
"lines",
"[",
"0",
"]",
"return",
"\"\""
]
| 22.833333 | 17.666667 |
def bind_bottom_up(lower, upper, __fval=None, **fval):
"""Bind 2 layers for dissection.
The upper layer will be chosen for dissection on top of the lower layer, if
ALL the passed arguments are validated. If multiple calls are made with the same # noqa: E501
layers, the last one will be used as default.
ex:
>>> bind_bottom_up(Ether, SNAP, type=0x1234)
>>> Ether(b'\xff\xff\xff\xff\xff\xff\xd0P\x99V\xdd\xf9\x124\x00\x00\x00\x00\x00') # noqa: E501
<Ether dst=ff:ff:ff:ff:ff:ff src=d0:50:99:56:dd:f9 type=0x1234 |<SNAP OUI=0x0 code=0x0 |>> # noqa: E501
"""
if __fval is not None:
fval.update(__fval)
lower.payload_guess = lower.payload_guess[:]
lower.payload_guess.append((fval, upper)) | [
"def",
"bind_bottom_up",
"(",
"lower",
",",
"upper",
",",
"__fval",
"=",
"None",
",",
"*",
"*",
"fval",
")",
":",
"if",
"__fval",
"is",
"not",
"None",
":",
"fval",
".",
"update",
"(",
"__fval",
")",
"lower",
".",
"payload_guess",
"=",
"lower",
".",
"payload_guess",
"[",
":",
"]",
"lower",
".",
"payload_guess",
".",
"append",
"(",
"(",
"fval",
",",
"upper",
")",
")"
]
| 49.666667 | 25.466667 |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the report.
"""
if remove_nones:
report_dict = super().to_dict(remove_nones=True)
else:
report_dict = {
'title': self.title,
'reportBody': self.body,
'timeBegan': self.time_began,
'externalUrl': self.external_url,
'distributionType': self._get_distribution_type(),
'externalTrackingId': self.external_id,
'enclaveIds': self.enclave_ids,
'created': self.created,
'updated': self.updated,
}
# id field might not be present
if self.id is not None:
report_dict['id'] = self.id
else:
report_dict['id'] = None
return report_dict | [
"def",
"to_dict",
"(",
"self",
",",
"remove_nones",
"=",
"False",
")",
":",
"if",
"remove_nones",
":",
"report_dict",
"=",
"super",
"(",
")",
".",
"to_dict",
"(",
"remove_nones",
"=",
"True",
")",
"else",
":",
"report_dict",
"=",
"{",
"'title'",
":",
"self",
".",
"title",
",",
"'reportBody'",
":",
"self",
".",
"body",
",",
"'timeBegan'",
":",
"self",
".",
"time_began",
",",
"'externalUrl'",
":",
"self",
".",
"external_url",
",",
"'distributionType'",
":",
"self",
".",
"_get_distribution_type",
"(",
")",
",",
"'externalTrackingId'",
":",
"self",
".",
"external_id",
",",
"'enclaveIds'",
":",
"self",
".",
"enclave_ids",
",",
"'created'",
":",
"self",
".",
"created",
",",
"'updated'",
":",
"self",
".",
"updated",
",",
"}",
"# id field might not be present",
"if",
"self",
".",
"id",
"is",
"not",
"None",
":",
"report_dict",
"[",
"'id'",
"]",
"=",
"self",
".",
"id",
"else",
":",
"report_dict",
"[",
"'id'",
"]",
"=",
"None",
"return",
"report_dict"
]
| 34.466667 | 16.733333 |
def humanise_seconds(seconds):
"""Utility function to humanise seconds value into e.g. 10 seconds ago.
The function will try to make a nice phrase of the seconds count
provided.
.. note:: Currently seconds that amount to days are not supported.
:param seconds: Mandatory seconds value e.g. 1100.
:type seconds: int
:returns: A humanised version of the seconds count.
:rtype: str
"""
days = seconds / (3600 * 24)
day_modulus = seconds % (3600 * 24)
hours = day_modulus / 3600
hour_modulus = day_modulus % 3600
minutes = hour_modulus / 60
if seconds < 60:
return tr('%i seconds' % seconds)
if seconds < 120:
return tr('a minute')
if seconds < 3600:
return tr('%s minutes' % minutes)
if seconds < 7200:
return tr('over an hour')
if seconds < 86400:
return tr('%i hours and %i minutes' % (hours, minutes))
else:
# If all else fails...
return tr('%i days, %i hours and %i minutes' % (
days, hours, minutes)) | [
"def",
"humanise_seconds",
"(",
"seconds",
")",
":",
"days",
"=",
"seconds",
"/",
"(",
"3600",
"*",
"24",
")",
"day_modulus",
"=",
"seconds",
"%",
"(",
"3600",
"*",
"24",
")",
"hours",
"=",
"day_modulus",
"/",
"3600",
"hour_modulus",
"=",
"day_modulus",
"%",
"3600",
"minutes",
"=",
"hour_modulus",
"/",
"60",
"if",
"seconds",
"<",
"60",
":",
"return",
"tr",
"(",
"'%i seconds'",
"%",
"seconds",
")",
"if",
"seconds",
"<",
"120",
":",
"return",
"tr",
"(",
"'a minute'",
")",
"if",
"seconds",
"<",
"3600",
":",
"return",
"tr",
"(",
"'%s minutes'",
"%",
"minutes",
")",
"if",
"seconds",
"<",
"7200",
":",
"return",
"tr",
"(",
"'over an hour'",
")",
"if",
"seconds",
"<",
"86400",
":",
"return",
"tr",
"(",
"'%i hours and %i minutes'",
"%",
"(",
"hours",
",",
"minutes",
")",
")",
"else",
":",
"# If all else fails...",
"return",
"tr",
"(",
"'%i days, %i hours and %i minutes'",
"%",
"(",
"days",
",",
"hours",
",",
"minutes",
")",
")"
]
| 30.058824 | 17.529412 |
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec | [
"def",
"dispatch_on",
"(",
"*",
"dispatch_args",
")",
":",
"assert",
"dispatch_args",
",",
"'No dispatch args passed'",
"dispatch_str",
"=",
"'(%s,)'",
"%",
"', '",
".",
"join",
"(",
"dispatch_args",
")",
"def",
"check",
"(",
"arguments",
",",
"wrong",
"=",
"operator",
".",
"ne",
",",
"msg",
"=",
"''",
")",
":",
"\"\"\"Make sure one passes the expected number of arguments\"\"\"",
"if",
"wrong",
"(",
"len",
"(",
"arguments",
")",
",",
"len",
"(",
"dispatch_args",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Expected %d arguments, got %d%s'",
"%",
"(",
"len",
"(",
"dispatch_args",
")",
",",
"len",
"(",
"arguments",
")",
",",
"msg",
")",
")",
"def",
"gen_func_dec",
"(",
"func",
")",
":",
"\"\"\"Decorator turning a function into a generic function\"\"\"",
"# first check the dispatch arguments",
"argset",
"=",
"set",
"(",
"getfullargspec",
"(",
"func",
")",
".",
"args",
")",
"if",
"not",
"set",
"(",
"dispatch_args",
")",
"<=",
"argset",
":",
"raise",
"NameError",
"(",
"'Unknown dispatch arguments %s'",
"%",
"dispatch_str",
")",
"typemap",
"=",
"{",
"}",
"def",
"vancestors",
"(",
"*",
"types",
")",
":",
"\"\"\"\n Get a list of sets of virtual ancestors for the given types\n \"\"\"",
"check",
"(",
"types",
")",
"ras",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"dispatch_args",
")",
")",
"]",
"for",
"types_",
"in",
"typemap",
":",
"for",
"t",
",",
"type_",
",",
"ra",
"in",
"zip",
"(",
"types",
",",
"types_",
",",
"ras",
")",
":",
"if",
"issubclass",
"(",
"t",
",",
"type_",
")",
"and",
"type_",
"not",
"in",
"t",
".",
"mro",
"(",
")",
":",
"append",
"(",
"type_",
",",
"ra",
")",
"return",
"[",
"set",
"(",
"ra",
")",
"for",
"ra",
"in",
"ras",
"]",
"def",
"ancestors",
"(",
"*",
"types",
")",
":",
"\"\"\"\n Get a list of virtual MROs, one for each type\n \"\"\"",
"check",
"(",
"types",
")",
"lists",
"=",
"[",
"]",
"for",
"t",
",",
"vas",
"in",
"zip",
"(",
"types",
",",
"vancestors",
"(",
"*",
"types",
")",
")",
":",
"n_vas",
"=",
"len",
"(",
"vas",
")",
"if",
"n_vas",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"'Ambiguous dispatch for %s: %s'",
"%",
"(",
"t",
",",
"vas",
")",
")",
"elif",
"n_vas",
"==",
"1",
":",
"va",
",",
"=",
"vas",
"mro",
"=",
"type",
"(",
"'t'",
",",
"(",
"t",
",",
"va",
")",
",",
"{",
"}",
")",
".",
"mro",
"(",
")",
"[",
"1",
":",
"]",
"else",
":",
"mro",
"=",
"t",
".",
"mro",
"(",
")",
"lists",
".",
"append",
"(",
"mro",
"[",
":",
"-",
"1",
"]",
")",
"# discard t and object",
"return",
"lists",
"def",
"register",
"(",
"*",
"types",
")",
":",
"\"\"\"\n Decorator to register an implementation for the given types\n \"\"\"",
"check",
"(",
"types",
")",
"def",
"dec",
"(",
"f",
")",
":",
"check",
"(",
"getfullargspec",
"(",
"f",
")",
".",
"args",
",",
"operator",
".",
"lt",
",",
"' in '",
"+",
"f",
".",
"__name__",
")",
"typemap",
"[",
"types",
"]",
"=",
"f",
"return",
"f",
"return",
"dec",
"def",
"dispatch_info",
"(",
"*",
"types",
")",
":",
"\"\"\"\n An utility to introspect the dispatch algorithm\n \"\"\"",
"check",
"(",
"types",
")",
"lst",
"=",
"[",
"]",
"for",
"anc",
"in",
"itertools",
".",
"product",
"(",
"*",
"ancestors",
"(",
"*",
"types",
")",
")",
":",
"lst",
".",
"append",
"(",
"tuple",
"(",
"a",
".",
"__name__",
"for",
"a",
"in",
"anc",
")",
")",
"return",
"lst",
"def",
"_dispatch",
"(",
"dispatch_args",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"types",
"=",
"tuple",
"(",
"type",
"(",
"arg",
")",
"for",
"arg",
"in",
"dispatch_args",
")",
"try",
":",
"# fast path",
"f",
"=",
"typemap",
"[",
"types",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"combinations",
"=",
"itertools",
".",
"product",
"(",
"*",
"ancestors",
"(",
"*",
"types",
")",
")",
"next",
"(",
"combinations",
")",
"# the first one has been already tried",
"for",
"types_",
"in",
"combinations",
":",
"f",
"=",
"typemap",
".",
"get",
"(",
"types_",
")",
"if",
"f",
"is",
"not",
"None",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"# else call the default implementation",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"FunctionMaker",
".",
"create",
"(",
"func",
",",
"'return _f_(%s, %%(shortsignature)s)'",
"%",
"dispatch_str",
",",
"dict",
"(",
"_f_",
"=",
"_dispatch",
")",
",",
"register",
"=",
"register",
",",
"default",
"=",
"func",
",",
"typemap",
"=",
"typemap",
",",
"vancestors",
"=",
"vancestors",
",",
"ancestors",
"=",
"ancestors",
",",
"dispatch_info",
"=",
"dispatch_info",
",",
"__wrapped__",
"=",
"func",
")",
"gen_func_dec",
".",
"__name__",
"=",
"'dispatch_on'",
"+",
"dispatch_str",
"return",
"gen_func_dec"
]
| 35.563107 | 16.485437 |
def verify_md5(md5_expected, data, other_errors=None):
"return True if okay, raise Exception if not" # O_o ?
md5_recv = hashlib.md5(data).hexdigest()
if md5_expected != md5_recv:
if other_errors is not None:
logger.critical('\n'.join(other_errors))
raise FailedVerification('original md5 = %r != %r = received md5' \
% (md5_expected, md5_recv))
return True | [
"def",
"verify_md5",
"(",
"md5_expected",
",",
"data",
",",
"other_errors",
"=",
"None",
")",
":",
"# O_o ?",
"md5_recv",
"=",
"hashlib",
".",
"md5",
"(",
"data",
")",
".",
"hexdigest",
"(",
")",
"if",
"md5_expected",
"!=",
"md5_recv",
":",
"if",
"other_errors",
"is",
"not",
"None",
":",
"logger",
".",
"critical",
"(",
"'\\n'",
".",
"join",
"(",
"other_errors",
")",
")",
"raise",
"FailedVerification",
"(",
"'original md5 = %r != %r = received md5'",
"%",
"(",
"md5_expected",
",",
"md5_recv",
")",
")",
"return",
"True"
]
| 47.333333 | 15.555556 |
def _split_op(
self, identifier, hs_label=None, dagger=False, args=None):
"""Return `name`, total `subscript`, total `superscript` and
`arguments` str. All of the returned strings are fully rendered.
Args:
identifier (str or SymbolicLabelBase): A (non-rendered/ascii)
identifier that may include a subscript. The output `name` will
be the `identifier` without any subscript
hs_label (str): The rendered label for the Hilbert space of the
operator, or None. Returned unchanged.
dagger (bool): Flag to indicate whether the operator is daggered.
If True, :attr:`dagger_sym` will be included in the
`superscript` (or `subscript`, depending on the settings)
args (list or None): List of arguments (expressions). Each element
will be rendered with :meth:`doprint`. The total list of args
will then be joined with commas, enclosed
with :attr:`_parenth_left` and :attr:`parenth_right`, and
returnd as the `arguments` string
"""
if self._isinstance(identifier, 'SymbolicLabelBase'):
identifier = QnetAsciiDefaultPrinter()._print_SCALAR_TYPES(
identifier.expr)
name, total_subscript = self._split_identifier(identifier)
total_superscript = ''
if (hs_label not in [None, '']):
if self._settings['show_hs_label'] == 'subscript':
if len(total_subscript) == 0:
total_subscript = '(' + hs_label + ')'
else:
total_subscript += ',(' + hs_label + ')'
else:
total_superscript += '(' + hs_label + ')'
if dagger:
total_superscript += self._dagger_sym
args_str = ''
if (args is not None) and (len(args) > 0):
args_str = (self._parenth_left +
",".join([self.doprint(arg) for arg in args]) +
self._parenth_right)
return name, total_subscript, total_superscript, args_str | [
"def",
"_split_op",
"(",
"self",
",",
"identifier",
",",
"hs_label",
"=",
"None",
",",
"dagger",
"=",
"False",
",",
"args",
"=",
"None",
")",
":",
"if",
"self",
".",
"_isinstance",
"(",
"identifier",
",",
"'SymbolicLabelBase'",
")",
":",
"identifier",
"=",
"QnetAsciiDefaultPrinter",
"(",
")",
".",
"_print_SCALAR_TYPES",
"(",
"identifier",
".",
"expr",
")",
"name",
",",
"total_subscript",
"=",
"self",
".",
"_split_identifier",
"(",
"identifier",
")",
"total_superscript",
"=",
"''",
"if",
"(",
"hs_label",
"not",
"in",
"[",
"None",
",",
"''",
"]",
")",
":",
"if",
"self",
".",
"_settings",
"[",
"'show_hs_label'",
"]",
"==",
"'subscript'",
":",
"if",
"len",
"(",
"total_subscript",
")",
"==",
"0",
":",
"total_subscript",
"=",
"'('",
"+",
"hs_label",
"+",
"')'",
"else",
":",
"total_subscript",
"+=",
"',('",
"+",
"hs_label",
"+",
"')'",
"else",
":",
"total_superscript",
"+=",
"'('",
"+",
"hs_label",
"+",
"')'",
"if",
"dagger",
":",
"total_superscript",
"+=",
"self",
".",
"_dagger_sym",
"args_str",
"=",
"''",
"if",
"(",
"args",
"is",
"not",
"None",
")",
"and",
"(",
"len",
"(",
"args",
")",
">",
"0",
")",
":",
"args_str",
"=",
"(",
"self",
".",
"_parenth_left",
"+",
"\",\"",
".",
"join",
"(",
"[",
"self",
".",
"doprint",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
")",
"+",
"self",
".",
"_parenth_right",
")",
"return",
"name",
",",
"total_subscript",
",",
"total_superscript",
",",
"args_str"
]
| 51.707317 | 21.195122 |
def print_mem(unit="MB"):
"""Show the proc-mem-cost with psutil, use this only for lazinesssss.
:param unit: B, KB, MB, GB.
"""
try:
import psutil
B = float(psutil.Process(os.getpid()).memory_info().vms)
KB = B / 1024
MB = KB / 1024
GB = MB / 1024
result = vars()[unit]
print_info("memory usage: %.2f(%s)" % (result, unit))
return result
except ImportError:
print_info("pip install psutil first.") | [
"def",
"print_mem",
"(",
"unit",
"=",
"\"MB\"",
")",
":",
"try",
":",
"import",
"psutil",
"B",
"=",
"float",
"(",
"psutil",
".",
"Process",
"(",
"os",
".",
"getpid",
"(",
")",
")",
".",
"memory_info",
"(",
")",
".",
"vms",
")",
"KB",
"=",
"B",
"/",
"1024",
"MB",
"=",
"KB",
"/",
"1024",
"GB",
"=",
"MB",
"/",
"1024",
"result",
"=",
"vars",
"(",
")",
"[",
"unit",
"]",
"print_info",
"(",
"\"memory usage: %.2f(%s)\"",
"%",
"(",
"result",
",",
"unit",
")",
")",
"return",
"result",
"except",
"ImportError",
":",
"print_info",
"(",
"\"pip install psutil first.\"",
")"
]
| 27.941176 | 18.176471 |
def parse_peddy_ped_check(lines):
"""Parse a .ped_check.csv file
Args:
lines(iterable(str))
Returns:
ped_check(list(dict))
"""
ped_check = []
header = []
for i,line in enumerate(lines):
line = line.rstrip()
if i == 0:
# Header line
header = line.lstrip('#').split(',')
else:
pair_info = dict(zip(header, line.split(',')))
# the number of sites at which sample_a was heterozygous
pair_info['hets_a'] = convert_number(pair_info['hets_a'])
# the number of sites at which sample_b was heterozygous
pair_info['hets_b'] = convert_number(pair_info['hets_b'])
# the number of sites at which the 2 samples shared no alleles
# (should approach 0 for parent-child pairs).
pair_info['ibs0'] = convert_number(pair_info['ibs0'])
# the number of sites and which the 2 samples where both
# hom-ref, both het, or both hom-alt.
pair_info['ibs2'] = convert_number(pair_info['ibs2'])
# the number of sites that was used to predict the relatedness.
pair_info['n'] = convert_number(pair_info['n'])
# the relatedness reported in the ped file.
pair_info['rel'] = convert_number(pair_info['rel'])
# the relatedness reported in the ped file.
pair_info['pedigree_relatedness'] = convert_number(pair_info['pedigree_relatedness'])
# difference between the preceding 2 colummns.
pair_info['rel_difference'] = convert_number(pair_info['rel_difference'])
# the number of sites at which both samples were hets.
pair_info['shared_hets'] = convert_number(pair_info['shared_hets'])
# boolean indicating that this pair is a parent-child pair
# according to the ped file.
pair_info['pedigree_parents'] = make_bool(pair_info.get('pedigree_parents'))
# boolean indicating that this pair is expected to be a parent-child
# pair according to the ibs0 (< 0.012) calculated from the genotypes.
pair_info['predicted_parents'] = make_bool(pair_info.get('predicted_parents'))
# boolean indicating that the preceding 2 columns do not match
pair_info['parent_error'] = make_bool(pair_info.get('parent_error'))
# boolean indicating that rel > 0.75 and ibs0 < 0.012
pair_info['sample_duplication_error'] = make_bool(pair_info.get('sample_duplication_error'))
ped_check.append(pair_info)
return ped_check | [
"def",
"parse_peddy_ped_check",
"(",
"lines",
")",
":",
"ped_check",
"=",
"[",
"]",
"header",
"=",
"[",
"]",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"i",
"==",
"0",
":",
"# Header line",
"header",
"=",
"line",
".",
"lstrip",
"(",
"'#'",
")",
".",
"split",
"(",
"','",
")",
"else",
":",
"pair_info",
"=",
"dict",
"(",
"zip",
"(",
"header",
",",
"line",
".",
"split",
"(",
"','",
")",
")",
")",
"# the number of sites at which sample_a was heterozygous",
"pair_info",
"[",
"'hets_a'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'hets_a'",
"]",
")",
"# the number of sites at which sample_b was heterozygous",
"pair_info",
"[",
"'hets_b'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'hets_b'",
"]",
")",
"# the number of sites at which the 2 samples shared no alleles ",
"# (should approach 0 for parent-child pairs).",
"pair_info",
"[",
"'ibs0'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'ibs0'",
"]",
")",
"# the number of sites and which the 2 samples where both ",
"# hom-ref, both het, or both hom-alt.",
"pair_info",
"[",
"'ibs2'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'ibs2'",
"]",
")",
"# the number of sites that was used to predict the relatedness.",
"pair_info",
"[",
"'n'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'n'",
"]",
")",
"# the relatedness reported in the ped file.",
"pair_info",
"[",
"'rel'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'rel'",
"]",
")",
"# the relatedness reported in the ped file.",
"pair_info",
"[",
"'pedigree_relatedness'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'pedigree_relatedness'",
"]",
")",
"# difference between the preceding 2 colummns.",
"pair_info",
"[",
"'rel_difference'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'rel_difference'",
"]",
")",
"# the number of sites at which both samples were hets.",
"pair_info",
"[",
"'shared_hets'",
"]",
"=",
"convert_number",
"(",
"pair_info",
"[",
"'shared_hets'",
"]",
")",
"# boolean indicating that this pair is a parent-child pair ",
"# according to the ped file.",
"pair_info",
"[",
"'pedigree_parents'",
"]",
"=",
"make_bool",
"(",
"pair_info",
".",
"get",
"(",
"'pedigree_parents'",
")",
")",
"# boolean indicating that this pair is expected to be a parent-child",
"# pair according to the ibs0 (< 0.012) calculated from the genotypes.",
"pair_info",
"[",
"'predicted_parents'",
"]",
"=",
"make_bool",
"(",
"pair_info",
".",
"get",
"(",
"'predicted_parents'",
")",
")",
"# boolean indicating that the preceding 2 columns do not match",
"pair_info",
"[",
"'parent_error'",
"]",
"=",
"make_bool",
"(",
"pair_info",
".",
"get",
"(",
"'parent_error'",
")",
")",
"# boolean indicating that rel > 0.75 and ibs0 < 0.012",
"pair_info",
"[",
"'sample_duplication_error'",
"]",
"=",
"make_bool",
"(",
"pair_info",
".",
"get",
"(",
"'sample_duplication_error'",
")",
")",
"ped_check",
".",
"append",
"(",
"pair_info",
")",
"return",
"ped_check"
]
| 40.984848 | 27.090909 |
def close(self):
"""in write mode, closing the handle adds the sentinel value into the
queue and joins the thread executing the HTTP request. in read mode,
this clears out the read response object so there are no references
to it, and the resources can be reclaimed.
"""
if self._mode.find('w') >= 0:
self._queue.put(self._sentinel)
self._thread.join(timeout=self._timeout)
if self._thread.is_alive():
raise RemoteFileException("Closing file timed out.")
response = self._response_queue.get_nowait()
try:
response.raise_for_status()
except Exception as e:
raise RestApiError(cause=e)
else:
self._read_response = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mode",
".",
"find",
"(",
"'w'",
")",
">=",
"0",
":",
"self",
".",
"_queue",
".",
"put",
"(",
"self",
".",
"_sentinel",
")",
"self",
".",
"_thread",
".",
"join",
"(",
"timeout",
"=",
"self",
".",
"_timeout",
")",
"if",
"self",
".",
"_thread",
".",
"is_alive",
"(",
")",
":",
"raise",
"RemoteFileException",
"(",
"\"Closing file timed out.\"",
")",
"response",
"=",
"self",
".",
"_response_queue",
".",
"get_nowait",
"(",
")",
"try",
":",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"RestApiError",
"(",
"cause",
"=",
"e",
")",
"else",
":",
"self",
".",
"_read_response",
"=",
"None"
]
| 39.4 | 15.7 |
def query(query, params={}, epoch=None,
expected_response_code=200, database=None):
"""Wrapper around ``InfluxDBClient.query()``."""
db = get_db()
database = database or settings.INFLUXDB_DATABASE
return db.query(query, params, epoch, expected_response_code, database=database) | [
"def",
"query",
"(",
"query",
",",
"params",
"=",
"{",
"}",
",",
"epoch",
"=",
"None",
",",
"expected_response_code",
"=",
"200",
",",
"database",
"=",
"None",
")",
":",
"db",
"=",
"get_db",
"(",
")",
"database",
"=",
"database",
"or",
"settings",
".",
"INFLUXDB_DATABASE",
"return",
"db",
".",
"query",
"(",
"query",
",",
"params",
",",
"epoch",
",",
"expected_response_code",
",",
"database",
"=",
"database",
")"
]
| 49.666667 | 15.666667 |
def _get_all_relationships(self):
"""Return all relationships seen in GO Dag subset."""
relationships_all = set()
for goterm in self.go2obj.values():
if goterm.relationship:
relationships_all.update(goterm.relationship)
if goterm.relationship_rev:
relationships_all.update(goterm.relationship_rev)
return relationships_all | [
"def",
"_get_all_relationships",
"(",
"self",
")",
":",
"relationships_all",
"=",
"set",
"(",
")",
"for",
"goterm",
"in",
"self",
".",
"go2obj",
".",
"values",
"(",
")",
":",
"if",
"goterm",
".",
"relationship",
":",
"relationships_all",
".",
"update",
"(",
"goterm",
".",
"relationship",
")",
"if",
"goterm",
".",
"relationship_rev",
":",
"relationships_all",
".",
"update",
"(",
"goterm",
".",
"relationship_rev",
")",
"return",
"relationships_all"
]
| 44.666667 | 8.555556 |
def cf_array_from_list(values):
"""
Creates a CFArrayRef object from a list of CF* type objects.
:param values:
A list of CF* type object
:return:
A CFArrayRef
"""
length = len(values)
return CoreFoundation.CFArrayCreate(
CoreFoundation.kCFAllocatorDefault,
values,
length,
ffi.addressof(CoreFoundation.kCFTypeArrayCallBacks)
) | [
"def",
"cf_array_from_list",
"(",
"values",
")",
":",
"length",
"=",
"len",
"(",
"values",
")",
"return",
"CoreFoundation",
".",
"CFArrayCreate",
"(",
"CoreFoundation",
".",
"kCFAllocatorDefault",
",",
"values",
",",
"length",
",",
"ffi",
".",
"addressof",
"(",
"CoreFoundation",
".",
"kCFTypeArrayCallBacks",
")",
")"
]
| 24.944444 | 18.722222 |
def _find_playlist(self):
"""
Internal method to populate the object given the ``id`` or
``reference_id`` that has been set in the constructor.
"""
data = None
if self.id:
data = self.connection.get_item(
'find_playlist_by_id', playlist_id=self.id)
elif self.reference_id:
data = self.connection.get_item(
'find_playlist_by_reference_id',
reference_id=self.reference_id)
if data:
self._load(data) | [
"def",
"_find_playlist",
"(",
"self",
")",
":",
"data",
"=",
"None",
"if",
"self",
".",
"id",
":",
"data",
"=",
"self",
".",
"connection",
".",
"get_item",
"(",
"'find_playlist_by_id'",
",",
"playlist_id",
"=",
"self",
".",
"id",
")",
"elif",
"self",
".",
"reference_id",
":",
"data",
"=",
"self",
".",
"connection",
".",
"get_item",
"(",
"'find_playlist_by_reference_id'",
",",
"reference_id",
"=",
"self",
".",
"reference_id",
")",
"if",
"data",
":",
"self",
".",
"_load",
"(",
"data",
")"
]
| 33.125 | 14.5 |
def _flag_is_registered(self, flag_obj):
"""Checks whether a Flag object is registered under long name or short name.
Args:
flag_obj: Flag, the Flag instance to check for.
Returns:
bool, True iff flag_obj is registered under long name or short name.
"""
flag_dict = self._flags()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
return False | [
"def",
"_flag_is_registered",
"(",
"self",
",",
"flag_obj",
")",
":",
"flag_dict",
"=",
"self",
".",
"_flags",
"(",
")",
"# Check whether flag_obj is registered under its long name.",
"name",
"=",
"flag_obj",
".",
"name",
"if",
"flag_dict",
".",
"get",
"(",
"name",
",",
"None",
")",
"==",
"flag_obj",
":",
"return",
"True",
"# Check whether flag_obj is registered under its short name.",
"short_name",
"=",
"flag_obj",
".",
"short_name",
"if",
"(",
"short_name",
"is",
"not",
"None",
"and",
"flag_dict",
".",
"get",
"(",
"short_name",
",",
"None",
")",
"==",
"flag_obj",
")",
":",
"return",
"True",
"return",
"False"
]
| 33.65 | 17.9 |
def team_events(self, team, year=None, simple=False, keys=False):
"""
Get team events a team has participated in.
:param team: Team to get events for.
:param year: Year to get events from.
:param simple: Get only vital data.
:param keys: Get just the keys of the events. Set to True if you only need the keys of each event and not their full data.
:return: List of strings or Teams
"""
if year:
if keys:
return self._get('team/%s/events/%s/keys' % (self.team_key(team), year))
else:
return [Event(raw) for raw in self._get('team/%s/events/%s%s' % (self.team_key(team), year, '/simple' if simple else ''))]
else:
if keys:
return self._get('team/%s/events/keys' % self.team_key(team))
else:
return [Event(raw) for raw in self._get('team/%s/events%s' % (self.team_key(team), '/simple' if simple else ''))] | [
"def",
"team_events",
"(",
"self",
",",
"team",
",",
"year",
"=",
"None",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"year",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'team/%s/events/%s/keys'",
"%",
"(",
"self",
".",
"team_key",
"(",
"team",
")",
",",
"year",
")",
")",
"else",
":",
"return",
"[",
"Event",
"(",
"raw",
")",
"for",
"raw",
"in",
"self",
".",
"_get",
"(",
"'team/%s/events/%s%s'",
"%",
"(",
"self",
".",
"team_key",
"(",
"team",
")",
",",
"year",
",",
"'/simple'",
"if",
"simple",
"else",
"''",
")",
")",
"]",
"else",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'team/%s/events/keys'",
"%",
"self",
".",
"team_key",
"(",
"team",
")",
")",
"else",
":",
"return",
"[",
"Event",
"(",
"raw",
")",
"for",
"raw",
"in",
"self",
".",
"_get",
"(",
"'team/%s/events%s'",
"%",
"(",
"self",
".",
"team_key",
"(",
"team",
")",
",",
"'/simple'",
"if",
"simple",
"else",
"''",
")",
")",
"]"
]
| 48.8 | 29.4 |
def Start(self, hostname, port):
"""Starts the process status RPC server.
Args:
hostname (str): hostname or IP address to connect to for requests.
port (int): port to connect to for requests.
Returns:
bool: True if the RPC server was successfully started.
"""
if not self._Open(hostname, port):
return False
self._rpc_thread = threading.Thread(
name=self._THREAD_NAME, target=self._xmlrpc_server.serve_forever)
self._rpc_thread.start()
return True | [
"def",
"Start",
"(",
"self",
",",
"hostname",
",",
"port",
")",
":",
"if",
"not",
"self",
".",
"_Open",
"(",
"hostname",
",",
"port",
")",
":",
"return",
"False",
"self",
".",
"_rpc_thread",
"=",
"threading",
".",
"Thread",
"(",
"name",
"=",
"self",
".",
"_THREAD_NAME",
",",
"target",
"=",
"self",
".",
"_xmlrpc_server",
".",
"serve_forever",
")",
"self",
".",
"_rpc_thread",
".",
"start",
"(",
")",
"return",
"True"
]
| 29.294118 | 20.176471 |
def _gen_property_table(self):
"""
2D array describing each registered property
together with headers - for use in __str__
"""
headers = ['Property Name', 'Type', 'Value', 'Default Value']
table = []
for propval in sorted(self._properties.itervalues(),
key=lambda pval: pval.name.upper()):
table.append([propval.name,
np.dtype(propval.dtype).name,
getattr(self, propval.name),
propval.default])
return table, headers | [
"def",
"_gen_property_table",
"(",
"self",
")",
":",
"headers",
"=",
"[",
"'Property Name'",
",",
"'Type'",
",",
"'Value'",
",",
"'Default Value'",
"]",
"table",
"=",
"[",
"]",
"for",
"propval",
"in",
"sorted",
"(",
"self",
".",
"_properties",
".",
"itervalues",
"(",
")",
",",
"key",
"=",
"lambda",
"pval",
":",
"pval",
".",
"name",
".",
"upper",
"(",
")",
")",
":",
"table",
".",
"append",
"(",
"[",
"propval",
".",
"name",
",",
"np",
".",
"dtype",
"(",
"propval",
".",
"dtype",
")",
".",
"name",
",",
"getattr",
"(",
"self",
",",
"propval",
".",
"name",
")",
",",
"propval",
".",
"default",
"]",
")",
"return",
"table",
",",
"headers"
]
| 34.8125 | 14.8125 |
def sponsor_image_url(sponsor, name):
"""Returns the corresponding url from the sponsors images"""
if sponsor.files.filter(name=name).exists():
# We avoid worrying about multiple matches by always
# returning the first one.
return sponsor.files.filter(name=name).first().item.url
return '' | [
"def",
"sponsor_image_url",
"(",
"sponsor",
",",
"name",
")",
":",
"if",
"sponsor",
".",
"files",
".",
"filter",
"(",
"name",
"=",
"name",
")",
".",
"exists",
"(",
")",
":",
"# We avoid worrying about multiple matches by always",
"# returning the first one.",
"return",
"sponsor",
".",
"files",
".",
"filter",
"(",
"name",
"=",
"name",
")",
".",
"first",
"(",
")",
".",
"item",
".",
"url",
"return",
"''"
]
| 45.571429 | 12.428571 |
def get_pulls_review_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/pulls/comments",
url_parameters
) | [
"def",
"get_pulls_review_comments",
"(",
"self",
",",
"sort",
"=",
"github",
".",
"GithubObject",
".",
"NotSet",
",",
"direction",
"=",
"github",
".",
"GithubObject",
".",
"NotSet",
",",
"since",
"=",
"github",
".",
"GithubObject",
".",
"NotSet",
")",
":",
"assert",
"sort",
"is",
"github",
".",
"GithubObject",
".",
"NotSet",
"or",
"isinstance",
"(",
"sort",
",",
"(",
"str",
",",
"unicode",
")",
")",
",",
"sort",
"assert",
"direction",
"is",
"github",
".",
"GithubObject",
".",
"NotSet",
"or",
"isinstance",
"(",
"direction",
",",
"(",
"str",
",",
"unicode",
")",
")",
",",
"direction",
"assert",
"since",
"is",
"github",
".",
"GithubObject",
".",
"NotSet",
"or",
"isinstance",
"(",
"since",
",",
"datetime",
".",
"datetime",
")",
",",
"since",
"url_parameters",
"=",
"dict",
"(",
")",
"if",
"sort",
"is",
"not",
"github",
".",
"GithubObject",
".",
"NotSet",
":",
"url_parameters",
"[",
"\"sort\"",
"]",
"=",
"sort",
"if",
"direction",
"is",
"not",
"github",
".",
"GithubObject",
".",
"NotSet",
":",
"url_parameters",
"[",
"\"direction\"",
"]",
"=",
"direction",
"if",
"since",
"is",
"not",
"github",
".",
"GithubObject",
".",
"NotSet",
":",
"url_parameters",
"[",
"\"since\"",
"]",
"=",
"since",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%SZ\"",
")",
"return",
"github",
".",
"PaginatedList",
".",
"PaginatedList",
"(",
"github",
".",
"IssueComment",
".",
"IssueComment",
",",
"self",
".",
"_requester",
",",
"self",
".",
"url",
"+",
"\"/pulls/comments\"",
",",
"url_parameters",
")"
]
| 55.333333 | 25.083333 |
def end_output (self, **kwargs):
"""Write XML end tag."""
self.xml_endtag(u"urlset")
self.xml_end_output()
self.close_fileoutput() | [
"def",
"end_output",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"xml_endtag",
"(",
"u\"urlset\"",
")",
"self",
".",
"xml_end_output",
"(",
")",
"self",
".",
"close_fileoutput",
"(",
")"
]
| 31.6 | 6.8 |
def authenticate(self, *args, **kwargs):
'''
Authenticate the user agains LDAP
'''
# Get config
username = kwargs.get("username", None)
password = kwargs.get("password", None)
# Check user in Active Directory (authorization == None if can not connect to Active Directory Server)
authorization = self.ldap_link(username, password, mode='LOGIN')
if authorization:
# The user was validated in Active Directory
user = self.get_or_create_user(username, password)
# Get or get_create_user will revalidate the new user
if user:
# If the user has been properly validated
user.is_active = True
user.save()
else:
# Locate user in our system
user = User.objects.filter(username=username).first()
if user and not user.is_staff:
# If access was denied
if authorization is False or getattr(settings, "AD_LOCK_UNAUTHORIZED", False):
# Deactivate the user
user.is_active = False
user.save()
# No access and no user here
user = None
# Return the final decision
return user | [
"def",
"authenticate",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get config",
"username",
"=",
"kwargs",
".",
"get",
"(",
"\"username\"",
",",
"None",
")",
"password",
"=",
"kwargs",
".",
"get",
"(",
"\"password\"",
",",
"None",
")",
"# Check user in Active Directory (authorization == None if can not connect to Active Directory Server)",
"authorization",
"=",
"self",
".",
"ldap_link",
"(",
"username",
",",
"password",
",",
"mode",
"=",
"'LOGIN'",
")",
"if",
"authorization",
":",
"# The user was validated in Active Directory",
"user",
"=",
"self",
".",
"get_or_create_user",
"(",
"username",
",",
"password",
")",
"# Get or get_create_user will revalidate the new user",
"if",
"user",
":",
"# If the user has been properly validated",
"user",
".",
"is_active",
"=",
"True",
"user",
".",
"save",
"(",
")",
"else",
":",
"# Locate user in our system",
"user",
"=",
"User",
".",
"objects",
".",
"filter",
"(",
"username",
"=",
"username",
")",
".",
"first",
"(",
")",
"if",
"user",
"and",
"not",
"user",
".",
"is_staff",
":",
"# If access was denied",
"if",
"authorization",
"is",
"False",
"or",
"getattr",
"(",
"settings",
",",
"\"AD_LOCK_UNAUTHORIZED\"",
",",
"False",
")",
":",
"# Deactivate the user",
"user",
".",
"is_active",
"=",
"False",
"user",
".",
"save",
"(",
")",
"# No access and no user here",
"user",
"=",
"None",
"# Return the final decision",
"return",
"user"
]
| 36.285714 | 19.771429 |
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable) | [
"def",
"get_value",
"(",
"self",
",",
"label",
",",
"takeable",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"\"get_value is deprecated and will be removed \"",
"\"in a future release. Please use \"",
"\".at[] or .iat[] accessors instead\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"self",
".",
"_get_value",
"(",
"label",
",",
"takeable",
"=",
"takeable",
")"
]
| 28.652174 | 20.73913 |
def parse_python_version(output):
"""Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
"""
version_line = output.split("\n", 1)[0]
version_pattern = re.compile(
r"""
^ # Beginning of line.
Python # Literally "Python".
\s # Space.
(?P<major>\d+) # Major = one or more digits.
\. # Dot.
(?P<minor>\d+) # Minor = one or more digits.
(?: # Unnamed group for dot-micro.
\. # Dot.
(?P<micro>\d+) # Micro = one or more digit.
)? # Micro is optional because pypa/pipenv#1893.
.* # Trailing garbage.
$ # End of line.
""",
re.VERBOSE,
)
match = version_pattern.match(version_line)
if not match:
return None
return match.groupdict(default="0") | [
"def",
"parse_python_version",
"(",
"output",
")",
":",
"version_line",
"=",
"output",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"[",
"0",
"]",
"version_pattern",
"=",
"re",
".",
"compile",
"(",
"r\"\"\"\n ^ # Beginning of line.\n Python # Literally \"Python\".\n \\s # Space.\n (?P<major>\\d+) # Major = one or more digits.\n \\. # Dot.\n (?P<minor>\\d+) # Minor = one or more digits.\n (?: # Unnamed group for dot-micro.\n \\. # Dot.\n (?P<micro>\\d+) # Micro = one or more digit.\n )? # Micro is optional because pypa/pipenv#1893.\n .* # Trailing garbage.\n $ # End of line.\n \"\"\"",
",",
"re",
".",
"VERBOSE",
",",
")",
"match",
"=",
"version_pattern",
".",
"match",
"(",
"version_line",
")",
"if",
"not",
"match",
":",
"return",
"None",
"return",
"match",
".",
"groupdict",
"(",
"default",
"=",
"\"0\"",
")"
]
| 36.483871 | 14.967742 |
def preprocess(content):
"""
对输出内容进行预处理,转为str类型 (py3),并替换行内\r\t\n等字符为空格
do pre-process to the content, turn it into str (for py3), and replace \r\t\n with space
"""
if six.PY2:
if not isinstance(content, unicode):
if isinstance(content, str):
_content = unicode(content, encoding=sys.stdin.encoding)
elif isinstance(content, int):
_content = unicode(content)
else:
_content = content
assert isinstance(_content, unicode)
elif six.PY3:
_content = str(content)
_content = re.sub(r'\r|\t|\n', ' ', _content)
return _content | [
"def",
"preprocess",
"(",
"content",
")",
":",
"if",
"six",
".",
"PY2",
":",
"if",
"not",
"isinstance",
"(",
"content",
",",
"unicode",
")",
":",
"if",
"isinstance",
"(",
"content",
",",
"str",
")",
":",
"_content",
"=",
"unicode",
"(",
"content",
",",
"encoding",
"=",
"sys",
".",
"stdin",
".",
"encoding",
")",
"elif",
"isinstance",
"(",
"content",
",",
"int",
")",
":",
"_content",
"=",
"unicode",
"(",
"content",
")",
"else",
":",
"_content",
"=",
"content",
"assert",
"isinstance",
"(",
"_content",
",",
"unicode",
")",
"elif",
"six",
".",
"PY3",
":",
"_content",
"=",
"str",
"(",
"content",
")",
"_content",
"=",
"re",
".",
"sub",
"(",
"r'\\r|\\t|\\n'",
",",
"' '",
",",
"_content",
")",
"return",
"_content"
]
| 30.238095 | 17.285714 |
def add_child(self, child, rangecheck=False):
"""Add a child feature to this feature."""
assert self.seqid == child.seqid, \
(
'seqid mismatch for feature {} ({} vs {})'.format(
self.fid, self.seqid, child.seqid
)
)
if rangecheck is True:
assert self._strand == child._strand, \
('child of feature {} has a different strand'.format(self.fid))
assert self._range.contains(child._range), \
(
'child of feature {} is not contained within its span '
'({}-{})'.format(self.fid, child.start, child.end)
)
if self.children is None:
self.children = list()
self.children.append(child)
self.children.sort() | [
"def",
"add_child",
"(",
"self",
",",
"child",
",",
"rangecheck",
"=",
"False",
")",
":",
"assert",
"self",
".",
"seqid",
"==",
"child",
".",
"seqid",
",",
"(",
"'seqid mismatch for feature {} ({} vs {})'",
".",
"format",
"(",
"self",
".",
"fid",
",",
"self",
".",
"seqid",
",",
"child",
".",
"seqid",
")",
")",
"if",
"rangecheck",
"is",
"True",
":",
"assert",
"self",
".",
"_strand",
"==",
"child",
".",
"_strand",
",",
"(",
"'child of feature {} has a different strand'",
".",
"format",
"(",
"self",
".",
"fid",
")",
")",
"assert",
"self",
".",
"_range",
".",
"contains",
"(",
"child",
".",
"_range",
")",
",",
"(",
"'child of feature {} is not contained within its span '",
"'({}-{})'",
".",
"format",
"(",
"self",
".",
"fid",
",",
"child",
".",
"start",
",",
"child",
".",
"end",
")",
")",
"if",
"self",
".",
"children",
"is",
"None",
":",
"self",
".",
"children",
"=",
"list",
"(",
")",
"self",
".",
"children",
".",
"append",
"(",
"child",
")",
"self",
".",
"children",
".",
"sort",
"(",
")"
]
| 41.25 | 17.05 |
def actualize(self):
"""
Removes from this forecast all the *Weather* objects having a reference
timestamp in the past with respect to the current timestamp
"""
current_time = timeutils.now(timeformat='unix')
for w in self._weathers:
if w.get_reference_time(timeformat='unix') < current_time:
self._weathers.remove(w) | [
"def",
"actualize",
"(",
"self",
")",
":",
"current_time",
"=",
"timeutils",
".",
"now",
"(",
"timeformat",
"=",
"'unix'",
")",
"for",
"w",
"in",
"self",
".",
"_weathers",
":",
"if",
"w",
".",
"get_reference_time",
"(",
"timeformat",
"=",
"'unix'",
")",
"<",
"current_time",
":",
"self",
".",
"_weathers",
".",
"remove",
"(",
"w",
")"
]
| 42.777778 | 15.444444 |
def _make_futures(futmap_keys, class_check, make_result_fn):
"""
Create futures and a futuremap for the keys in futmap_keys,
and create a request-level future to be bassed to the C API.
"""
futmap = {}
for key in futmap_keys:
if class_check is not None and not isinstance(key, class_check):
raise ValueError("Expected list of {}".format(type(class_check)))
futmap[key] = concurrent.futures.Future()
if not futmap[key].set_running_or_notify_cancel():
raise RuntimeError("Future was cancelled prematurely")
# Create an internal future for the entire request,
# this future will trigger _make_..._result() and set result/exception
# per topic,future in futmap.
f = concurrent.futures.Future()
f.add_done_callback(lambda f: make_result_fn(f, futmap))
if not f.set_running_or_notify_cancel():
raise RuntimeError("Future was cancelled prematurely")
return f, futmap | [
"def",
"_make_futures",
"(",
"futmap_keys",
",",
"class_check",
",",
"make_result_fn",
")",
":",
"futmap",
"=",
"{",
"}",
"for",
"key",
"in",
"futmap_keys",
":",
"if",
"class_check",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"key",
",",
"class_check",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected list of {}\"",
".",
"format",
"(",
"type",
"(",
"class_check",
")",
")",
")",
"futmap",
"[",
"key",
"]",
"=",
"concurrent",
".",
"futures",
".",
"Future",
"(",
")",
"if",
"not",
"futmap",
"[",
"key",
"]",
".",
"set_running_or_notify_cancel",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Future was cancelled prematurely\"",
")",
"# Create an internal future for the entire request,",
"# this future will trigger _make_..._result() and set result/exception",
"# per topic,future in futmap.",
"f",
"=",
"concurrent",
".",
"futures",
".",
"Future",
"(",
")",
"f",
".",
"add_done_callback",
"(",
"lambda",
"f",
":",
"make_result_fn",
"(",
"f",
",",
"futmap",
")",
")",
"if",
"not",
"f",
".",
"set_running_or_notify_cancel",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Future was cancelled prematurely\"",
")",
"return",
"f",
",",
"futmap"
]
| 44.521739 | 21.826087 |
def granger(vec1,vec2,order=10,rate=200,maxfreq=0):
"""
GRANGER
Provide a simple way of calculating the key quantities.
Usage:
F,pp,cohe,Fx2y,Fy2x,Fxy=granger(vec1,vec2,order,rate,maxfreq)
where:
F is a 1xN vector of frequencies
pp is a 2xN array of power spectra
cohe is the coherence between vec1 and vec2
Fx2y is the causality from vec1->vec2
Fy2x is the causality from vec2->vec1
Fxy is non-directional causality (cohe-Fx2y-Fy2x)
vec1 is a time series of length N
vec2 is another time series of length N
rate is the sampling rate, in Hz
maxfreq is the maximum frequency to be returned, in Hz
Version: 2011jul18
"""
from .bsmart import timefreq, pwcausalr
from scipy import array, size
if maxfreq==0: F=timefreq(vec1,rate) # Define the frequency points
else: F=array(list(range(0,maxfreq+1))) # Or just pick them
npts=size(F,0)
data=array([vec1,vec2])
F,pp,cohe,Fx2y,Fy2x,Fxy=pwcausalr(data,1,npts,order,rate,maxfreq)
return F,pp[0,:],cohe[0,:],Fx2y[0,:],Fy2x[0,:],Fxy[0,:] | [
"def",
"granger",
"(",
"vec1",
",",
"vec2",
",",
"order",
"=",
"10",
",",
"rate",
"=",
"200",
",",
"maxfreq",
"=",
"0",
")",
":",
"from",
".",
"bsmart",
"import",
"timefreq",
",",
"pwcausalr",
"from",
"scipy",
"import",
"array",
",",
"size",
"if",
"maxfreq",
"==",
"0",
":",
"F",
"=",
"timefreq",
"(",
"vec1",
",",
"rate",
")",
"# Define the frequency points",
"else",
":",
"F",
"=",
"array",
"(",
"list",
"(",
"range",
"(",
"0",
",",
"maxfreq",
"+",
"1",
")",
")",
")",
"# Or just pick them",
"npts",
"=",
"size",
"(",
"F",
",",
"0",
")",
"data",
"=",
"array",
"(",
"[",
"vec1",
",",
"vec2",
"]",
")",
"F",
",",
"pp",
",",
"cohe",
",",
"Fx2y",
",",
"Fy2x",
",",
"Fxy",
"=",
"pwcausalr",
"(",
"data",
",",
"1",
",",
"npts",
",",
"order",
",",
"rate",
",",
"maxfreq",
")",
"return",
"F",
",",
"pp",
"[",
"0",
",",
":",
"]",
",",
"cohe",
"[",
"0",
",",
":",
"]",
",",
"Fx2y",
"[",
"0",
",",
":",
"]",
",",
"Fy2x",
"[",
"0",
",",
":",
"]",
",",
"Fxy",
"[",
"0",
",",
":",
"]"
]
| 34.121212 | 18 |
def _add_notes_slide_part(cls, package, slide_part, notes_master_part):
"""
Create and return a new notes slide part that is fully related, but
has no shape content (i.e. placeholders not cloned).
"""
partname = package.next_partname('/ppt/notesSlides/notesSlide%d.xml')
content_type = CT.PML_NOTES_SLIDE
notes = CT_NotesSlide.new()
notes_slide_part = NotesSlidePart(
partname, content_type, notes, package
)
notes_slide_part.relate_to(notes_master_part, RT.NOTES_MASTER)
notes_slide_part.relate_to(slide_part, RT.SLIDE)
return notes_slide_part | [
"def",
"_add_notes_slide_part",
"(",
"cls",
",",
"package",
",",
"slide_part",
",",
"notes_master_part",
")",
":",
"partname",
"=",
"package",
".",
"next_partname",
"(",
"'/ppt/notesSlides/notesSlide%d.xml'",
")",
"content_type",
"=",
"CT",
".",
"PML_NOTES_SLIDE",
"notes",
"=",
"CT_NotesSlide",
".",
"new",
"(",
")",
"notes_slide_part",
"=",
"NotesSlidePart",
"(",
"partname",
",",
"content_type",
",",
"notes",
",",
"package",
")",
"notes_slide_part",
".",
"relate_to",
"(",
"notes_master_part",
",",
"RT",
".",
"NOTES_MASTER",
")",
"notes_slide_part",
".",
"relate_to",
"(",
"slide_part",
",",
"RT",
".",
"SLIDE",
")",
"return",
"notes_slide_part"
]
| 45.642857 | 16.214286 |
def paint_invalid_cell(self, row, col, color='MEDIUM VIOLET RED',
skip_cell=False):
"""
Take row, column, and turn it color
"""
self.SetColLabelRenderer(col, MyColLabelRenderer('#1101e0'))
# SetCellRenderer doesn't work with table-based grid (HugeGrid class)
if not skip_cell:
self.SetCellRenderer(row, col, MyCustomRenderer(color)) | [
"def",
"paint_invalid_cell",
"(",
"self",
",",
"row",
",",
"col",
",",
"color",
"=",
"'MEDIUM VIOLET RED'",
",",
"skip_cell",
"=",
"False",
")",
":",
"self",
".",
"SetColLabelRenderer",
"(",
"col",
",",
"MyColLabelRenderer",
"(",
"'#1101e0'",
")",
")",
"# SetCellRenderer doesn't work with table-based grid (HugeGrid class)",
"if",
"not",
"skip_cell",
":",
"self",
".",
"SetCellRenderer",
"(",
"row",
",",
"col",
",",
"MyCustomRenderer",
"(",
"color",
")",
")"
]
| 45.666667 | 15.444444 |
def _to_zipfile(self, file_generator):
"""Convert files to zip archive.
:return: None
:rtype: :py:obj:`None`
"""
with zipfile.ZipFile(file_generator.to_path, mode="w", compression=zipfile.ZIP_DEFLATED) as outfile:
for f in file_generator:
outpath = self._output_path(f.source, file_generator.to_format, archive=True)
outfile.writestr(outpath, f.writestr(file_generator.to_format)) | [
"def",
"_to_zipfile",
"(",
"self",
",",
"file_generator",
")",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"file_generator",
".",
"to_path",
",",
"mode",
"=",
"\"w\"",
",",
"compression",
"=",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"outfile",
":",
"for",
"f",
"in",
"file_generator",
":",
"outpath",
"=",
"self",
".",
"_output_path",
"(",
"f",
".",
"source",
",",
"file_generator",
".",
"to_format",
",",
"archive",
"=",
"True",
")",
"outfile",
".",
"writestr",
"(",
"outpath",
",",
"f",
".",
"writestr",
"(",
"file_generator",
".",
"to_format",
")",
")"
]
| 50.666667 | 21.666667 |
def getLockStatsDB(self):
"""Returns the number of active lock discriminated by database.
@return: : Dictionary of stats.
"""
info_dict = {'all': {},
'wait': {}}
cur = self._conn.cursor()
cur.execute("SELECT d.datname, l.granted, COUNT(*) FROM pg_database d "
"JOIN pg_locks l ON d.oid=l.database "
"GROUP BY d.datname, l.granted;")
rows = cur.fetchall()
for (db, granted, cnt) in rows:
info_dict['all'][db] = info_dict['all'].get(db, 0) + cnt
if not granted:
info_dict['wait'][db] = info_dict['wait'].get(db, 0) + cnt
return info_dict | [
"def",
"getLockStatsDB",
"(",
"self",
")",
":",
"info_dict",
"=",
"{",
"'all'",
":",
"{",
"}",
",",
"'wait'",
":",
"{",
"}",
"}",
"cur",
"=",
"self",
".",
"_conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"\"SELECT d.datname, l.granted, COUNT(*) FROM pg_database d \"",
"\"JOIN pg_locks l ON d.oid=l.database \"",
"\"GROUP BY d.datname, l.granted;\"",
")",
"rows",
"=",
"cur",
".",
"fetchall",
"(",
")",
"for",
"(",
"db",
",",
"granted",
",",
"cnt",
")",
"in",
"rows",
":",
"info_dict",
"[",
"'all'",
"]",
"[",
"db",
"]",
"=",
"info_dict",
"[",
"'all'",
"]",
".",
"get",
"(",
"db",
",",
"0",
")",
"+",
"cnt",
"if",
"not",
"granted",
":",
"info_dict",
"[",
"'wait'",
"]",
"[",
"db",
"]",
"=",
"info_dict",
"[",
"'wait'",
"]",
".",
"get",
"(",
"db",
",",
"0",
")",
"+",
"cnt",
"return",
"info_dict"
]
| 39.388889 | 15.388889 |
def __do_grep(curr_line, pattern, **kwargs):
"""
Do grep on a single string.
See 'grep' docs for info about kwargs.
:param curr_line: a single line to test.
:param pattern: pattern to search.
:return: (matched, position, end_position).
"""
# currently found position
position = -1
end_pos = -1
# check if fixed strings mode
if kwargs.get('fixed_strings'):
# if case insensitive fix case
if kwargs.get('ignore_case'):
pattern = pattern.lower()
curr_line = curr_line.lower()
# if pattern is a single string, match it:
pattern_len = 0
if isinstance(pattern, _basestring):
position = curr_line.find(pattern)
pattern_len = len(pattern)
# if not, treat it as a list of strings and match any
else:
for p in pattern:
position = curr_line.find(p)
pattern_len = len(p)
if position != -1:
break
# calc end position
end_pos = position + pattern_len
# check if need to match whole words
if kwargs.get('words') and position != -1:
foundpart = (' ' + curr_line + ' ')[position:position+len(pattern)+2]
if _is_part_of_word(foundpart[0]):
position = -1
elif _is_part_of_word(foundpart[-1]):
position = -1
# if not fixed string, it means its a regex
else:
# set regex flags
flags = kwargs.get('regex_flags') or 0
flags |= re.IGNORECASE if kwargs.get('ignore_case') else 0
# add whole-words option
if kwargs.get('words'):
pattern = r'\b' + pattern + r'\b'
# do search
result = re.search(pattern, curr_line, flags)
# if found, set position
if result:
position = result.start()
end_pos = result.end()
# check if need to match whole line
if kwargs.get('line') and (position != 0 or end_pos != len(curr_line)):
position = -1
# parse return value
matched = position != -1
# if invert flag is on, invert value
if kwargs.get('invert'):
matched = not matched
# if position is -1 reset end pos as well
if not matched:
end_pos = -1
# return result
return matched, position, end_pos | [
"def",
"__do_grep",
"(",
"curr_line",
",",
"pattern",
",",
"*",
"*",
"kwargs",
")",
":",
"# currently found position",
"position",
"=",
"-",
"1",
"end_pos",
"=",
"-",
"1",
"# check if fixed strings mode",
"if",
"kwargs",
".",
"get",
"(",
"'fixed_strings'",
")",
":",
"# if case insensitive fix case",
"if",
"kwargs",
".",
"get",
"(",
"'ignore_case'",
")",
":",
"pattern",
"=",
"pattern",
".",
"lower",
"(",
")",
"curr_line",
"=",
"curr_line",
".",
"lower",
"(",
")",
"# if pattern is a single string, match it:",
"pattern_len",
"=",
"0",
"if",
"isinstance",
"(",
"pattern",
",",
"_basestring",
")",
":",
"position",
"=",
"curr_line",
".",
"find",
"(",
"pattern",
")",
"pattern_len",
"=",
"len",
"(",
"pattern",
")",
"# if not, treat it as a list of strings and match any",
"else",
":",
"for",
"p",
"in",
"pattern",
":",
"position",
"=",
"curr_line",
".",
"find",
"(",
"p",
")",
"pattern_len",
"=",
"len",
"(",
"p",
")",
"if",
"position",
"!=",
"-",
"1",
":",
"break",
"# calc end position",
"end_pos",
"=",
"position",
"+",
"pattern_len",
"# check if need to match whole words",
"if",
"kwargs",
".",
"get",
"(",
"'words'",
")",
"and",
"position",
"!=",
"-",
"1",
":",
"foundpart",
"=",
"(",
"' '",
"+",
"curr_line",
"+",
"' '",
")",
"[",
"position",
":",
"position",
"+",
"len",
"(",
"pattern",
")",
"+",
"2",
"]",
"if",
"_is_part_of_word",
"(",
"foundpart",
"[",
"0",
"]",
")",
":",
"position",
"=",
"-",
"1",
"elif",
"_is_part_of_word",
"(",
"foundpart",
"[",
"-",
"1",
"]",
")",
":",
"position",
"=",
"-",
"1",
"# if not fixed string, it means its a regex",
"else",
":",
"# set regex flags",
"flags",
"=",
"kwargs",
".",
"get",
"(",
"'regex_flags'",
")",
"or",
"0",
"flags",
"|=",
"re",
".",
"IGNORECASE",
"if",
"kwargs",
".",
"get",
"(",
"'ignore_case'",
")",
"else",
"0",
"# add whole-words option",
"if",
"kwargs",
".",
"get",
"(",
"'words'",
")",
":",
"pattern",
"=",
"r'\\b'",
"+",
"pattern",
"+",
"r'\\b'",
"# do search",
"result",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"curr_line",
",",
"flags",
")",
"# if found, set position",
"if",
"result",
":",
"position",
"=",
"result",
".",
"start",
"(",
")",
"end_pos",
"=",
"result",
".",
"end",
"(",
")",
"# check if need to match whole line",
"if",
"kwargs",
".",
"get",
"(",
"'line'",
")",
"and",
"(",
"position",
"!=",
"0",
"or",
"end_pos",
"!=",
"len",
"(",
"curr_line",
")",
")",
":",
"position",
"=",
"-",
"1",
"# parse return value",
"matched",
"=",
"position",
"!=",
"-",
"1",
"# if invert flag is on, invert value",
"if",
"kwargs",
".",
"get",
"(",
"'invert'",
")",
":",
"matched",
"=",
"not",
"matched",
"# if position is -1 reset end pos as well",
"if",
"not",
"matched",
":",
"end_pos",
"=",
"-",
"1",
"# return result",
"return",
"matched",
",",
"position",
",",
"end_pos"
]
| 28.04878 | 16.756098 |
def daemon_start(main, pidfile, daemon=True, workspace=None):
"""Start application in background mode if required and available. If not then in front mode.
"""
logger.debug("start daemon application pidfile={pidfile} daemon={daemon} workspace={workspace}.".format(pidfile=pidfile, daemon=daemon, workspace=workspace))
new_pid = os.getpid()
workspace = workspace or os.getcwd()
os.chdir(workspace)
daemon_flag = False
if pidfile and daemon:
old_pid = load_pid(pidfile)
if old_pid:
logger.debug("pidfile {pidfile} already exists, pid={pid}.".format(pidfile=pidfile, pid=old_pid))
# if old service is running, just exit.
if old_pid and is_running(old_pid):
error_message = "Service is running in process: {pid}.".format(pid=old_pid)
logger.error(error_message)
six.print_(error_message, file=os.sys.stderr)
os.sys.exit(95)
# clean old pid file.
clean_pid_file(pidfile)
# start as background mode if required and available.
if daemon and os.name == "posix":
make_basic_daemon()
daemon_flag = True
if daemon_flag:
logger.info("Start application in DAEMON mode, pidfile={pidfile} pid={pid}".format(pidfile=pidfile, pid=new_pid))
else:
logger.info("Start application in FRONT mode, pid={pid}.".format(pid=new_pid))
write_pidfile(pidfile)
atexit.register(clean_pid_file, pidfile)
main()
return | [
"def",
"daemon_start",
"(",
"main",
",",
"pidfile",
",",
"daemon",
"=",
"True",
",",
"workspace",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"start daemon application pidfile={pidfile} daemon={daemon} workspace={workspace}.\"",
".",
"format",
"(",
"pidfile",
"=",
"pidfile",
",",
"daemon",
"=",
"daemon",
",",
"workspace",
"=",
"workspace",
")",
")",
"new_pid",
"=",
"os",
".",
"getpid",
"(",
")",
"workspace",
"=",
"workspace",
"or",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"workspace",
")",
"daemon_flag",
"=",
"False",
"if",
"pidfile",
"and",
"daemon",
":",
"old_pid",
"=",
"load_pid",
"(",
"pidfile",
")",
"if",
"old_pid",
":",
"logger",
".",
"debug",
"(",
"\"pidfile {pidfile} already exists, pid={pid}.\"",
".",
"format",
"(",
"pidfile",
"=",
"pidfile",
",",
"pid",
"=",
"old_pid",
")",
")",
"# if old service is running, just exit.",
"if",
"old_pid",
"and",
"is_running",
"(",
"old_pid",
")",
":",
"error_message",
"=",
"\"Service is running in process: {pid}.\"",
".",
"format",
"(",
"pid",
"=",
"old_pid",
")",
"logger",
".",
"error",
"(",
"error_message",
")",
"six",
".",
"print_",
"(",
"error_message",
",",
"file",
"=",
"os",
".",
"sys",
".",
"stderr",
")",
"os",
".",
"sys",
".",
"exit",
"(",
"95",
")",
"# clean old pid file.",
"clean_pid_file",
"(",
"pidfile",
")",
"# start as background mode if required and available.",
"if",
"daemon",
"and",
"os",
".",
"name",
"==",
"\"posix\"",
":",
"make_basic_daemon",
"(",
")",
"daemon_flag",
"=",
"True",
"if",
"daemon_flag",
":",
"logger",
".",
"info",
"(",
"\"Start application in DAEMON mode, pidfile={pidfile} pid={pid}\"",
".",
"format",
"(",
"pidfile",
"=",
"pidfile",
",",
"pid",
"=",
"new_pid",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Start application in FRONT mode, pid={pid}.\"",
".",
"format",
"(",
"pid",
"=",
"new_pid",
")",
")",
"write_pidfile",
"(",
"pidfile",
")",
"atexit",
".",
"register",
"(",
"clean_pid_file",
",",
"pidfile",
")",
"main",
"(",
")",
"return"
]
| 46.0625 | 22.0625 |
def _enqueue_fs_event(self, event):
"""Watchman filesystem event handler for BUILD/requirements.txt updates. Called via a thread."""
self._logger.info('enqueuing {} changes for subscription {}'
.format(len(event['files']), event['subscription']))
self._event_queue.put(event) | [
"def",
"_enqueue_fs_event",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'enqueuing {} changes for subscription {}'",
".",
"format",
"(",
"len",
"(",
"event",
"[",
"'files'",
"]",
")",
",",
"event",
"[",
"'subscription'",
"]",
")",
")",
"self",
".",
"_event_queue",
".",
"put",
"(",
"event",
")"
]
| 61 | 14.2 |
def classification_tikhonov(G, y, M, tau=0):
r"""Solve a classification problem on graph via Tikhonov minimization.
The function first transforms :math:`y` in logits :math:`Y`, then solves
.. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X)
if :math:`\tau > 0`, and
.. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X
otherwise, where :math:`X` and :math:`Y` are logits.
The function returns the maximum of the logits.
Parameters
----------
G : :class:`pygsp.graphs.Graph`
y : array, length G.n_vertices
Measurements.
M : array of boolean, length G.n_vertices
Masking vector.
tau : float
Regularization parameter.
Returns
-------
logits : array, length G.n_vertices
The logits :math:`X`.
Examples
--------
>>> from pygsp import graphs, learning
>>> import matplotlib.pyplot as plt
>>>
>>> G = graphs.Logo()
Create a ground truth signal:
>>> signal = np.zeros(G.n_vertices)
>>> signal[G.info['idx_s']] = 1
>>> signal[G.info['idx_p']] = 2
Construct a measurement signal from a binary mask:
>>> rs = np.random.RandomState(42)
>>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5
>>> measures = signal.copy()
>>> measures[~mask] = np.nan
Solve the classification problem by reconstructing the signal:
>>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0)
Plot the results.
Note that we recover the class with ``np.argmax(recovery, axis=1)``.
>>> prediction = np.argmax(recovery, axis=1)
>>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6))
>>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth')
>>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements')
>>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class')
>>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0')
>>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1')
>>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2')
>>> _ = fig.tight_layout()
"""
y[M == False] = 0
Y = _to_logits(y.astype(np.int))
return regression_tikhonov(G, Y, M, tau) | [
"def",
"classification_tikhonov",
"(",
"G",
",",
"y",
",",
"M",
",",
"tau",
"=",
"0",
")",
":",
"y",
"[",
"M",
"==",
"False",
"]",
"=",
"0",
"Y",
"=",
"_to_logits",
"(",
"y",
".",
"astype",
"(",
"np",
".",
"int",
")",
")",
"return",
"regression_tikhonov",
"(",
"G",
",",
"Y",
",",
"M",
",",
"tau",
")"
]
| 31.557143 | 22.571429 |
def unescape(str):
"""Undoes the effects of the escape() function."""
out = ''
prev_backslash = False
for char in str:
if not prev_backslash and char == '\\':
prev_backslash = True
continue
out += char
prev_backslash = False
return out | [
"def",
"unescape",
"(",
"str",
")",
":",
"out",
"=",
"''",
"prev_backslash",
"=",
"False",
"for",
"char",
"in",
"str",
":",
"if",
"not",
"prev_backslash",
"and",
"char",
"==",
"'\\\\'",
":",
"prev_backslash",
"=",
"True",
"continue",
"out",
"+=",
"char",
"prev_backslash",
"=",
"False",
"return",
"out"
]
| 26.636364 | 15.909091 |
def subtract_afromb(*inputs, **kwargs):
"""Subtract stream a from stream b.
Returns:
list(IOTileReading)
"""
try:
value_a = inputs[0].pop()
value_b = inputs[1].pop()
return [IOTileReading(0, 0, value_b.value - value_a.value)]
except StreamEmptyError:
return [] | [
"def",
"subtract_afromb",
"(",
"*",
"inputs",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"value_a",
"=",
"inputs",
"[",
"0",
"]",
".",
"pop",
"(",
")",
"value_b",
"=",
"inputs",
"[",
"1",
"]",
".",
"pop",
"(",
")",
"return",
"[",
"IOTileReading",
"(",
"0",
",",
"0",
",",
"value_b",
".",
"value",
"-",
"value_a",
".",
"value",
")",
"]",
"except",
"StreamEmptyError",
":",
"return",
"[",
"]"
]
| 22.142857 | 19.285714 |
def replace_node_status(self, name, body, **kwargs):
"""
replace status of the specified Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_node_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Node (required)
:param V1Node body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Node
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_node_status_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_node_status_with_http_info(name, body, **kwargs)
return data | [
"def",
"replace_node_status",
"(",
"self",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"replace_node_status_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"replace_node_status_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
]
| 62.375 | 35.291667 |
def spectrodir(self, filetype, **kwargs):
"""Returns :envvar:`SPECTRO_REDUX` or :envvar:`BOSS_SPECTRO_REDUX`
depending on the value of `run2d`.
Parameters
----------
filetype : str
File type parameter.
run2d : int or str
2D Reduction ID.
Returns
-------
spectrodir : str
Value of the appropriate environment variable.
"""
if str(kwargs['run2d']) in ('26', '103', '104'):
return os.environ['SPECTRO_REDUX']
else:
return os.environ['BOSS_SPECTRO_REDUX'] | [
"def",
"spectrodir",
"(",
"self",
",",
"filetype",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"str",
"(",
"kwargs",
"[",
"'run2d'",
"]",
")",
"in",
"(",
"'26'",
",",
"'103'",
",",
"'104'",
")",
":",
"return",
"os",
".",
"environ",
"[",
"'SPECTRO_REDUX'",
"]",
"else",
":",
"return",
"os",
".",
"environ",
"[",
"'BOSS_SPECTRO_REDUX'",
"]"
]
| 29.5 | 16.15 |
def register(self, entry_point):
"""Register an extension
:param str entry_point: extension to register (entry point syntax).
:raise: ValueError if already registered.
"""
if entry_point in self.registered_extensions:
raise ValueError('Extension already registered')
ep = EntryPoint.parse(entry_point)
if ep.name in self.names():
raise ValueError('An extension with the same name already exist')
ext = self._load_one_plugin(ep, False, (), {}, False)
self.extensions.append(ext)
if self._extensions_by_name is not None:
self._extensions_by_name[ext.name] = ext
self.registered_extensions.insert(0, entry_point) | [
"def",
"register",
"(",
"self",
",",
"entry_point",
")",
":",
"if",
"entry_point",
"in",
"self",
".",
"registered_extensions",
":",
"raise",
"ValueError",
"(",
"'Extension already registered'",
")",
"ep",
"=",
"EntryPoint",
".",
"parse",
"(",
"entry_point",
")",
"if",
"ep",
".",
"name",
"in",
"self",
".",
"names",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'An extension with the same name already exist'",
")",
"ext",
"=",
"self",
".",
"_load_one_plugin",
"(",
"ep",
",",
"False",
",",
"(",
")",
",",
"{",
"}",
",",
"False",
")",
"self",
".",
"extensions",
".",
"append",
"(",
"ext",
")",
"if",
"self",
".",
"_extensions_by_name",
"is",
"not",
"None",
":",
"self",
".",
"_extensions_by_name",
"[",
"ext",
".",
"name",
"]",
"=",
"ext",
"self",
".",
"registered_extensions",
".",
"insert",
"(",
"0",
",",
"entry_point",
")"
]
| 37.842105 | 18.526316 |
def complete_watch(self, text, *_):
""" Autocomplete for watch """
return [t + " " for t in self.engine.cached_descriptions if t.startswith(text)] | [
"def",
"complete_watch",
"(",
"self",
",",
"text",
",",
"*",
"_",
")",
":",
"return",
"[",
"t",
"+",
"\" \"",
"for",
"t",
"in",
"self",
".",
"engine",
".",
"cached_descriptions",
"if",
"t",
".",
"startswith",
"(",
"text",
")",
"]"
]
| 53.333333 | 17.333333 |
def isPositiveStrand(self):
"""
Check if this genomic region is on the positive strand.
:return: True if this element is on the positive strand
"""
if self.strand is None and self.DEFAULT_STRAND == self.POSITIVE_STRAND:
return True
return self.strand == self.POSITIVE_STRAND | [
"def",
"isPositiveStrand",
"(",
"self",
")",
":",
"if",
"self",
".",
"strand",
"is",
"None",
"and",
"self",
".",
"DEFAULT_STRAND",
"==",
"self",
".",
"POSITIVE_STRAND",
":",
"return",
"True",
"return",
"self",
".",
"strand",
"==",
"self",
".",
"POSITIVE_STRAND"
]
| 33 | 17.222222 |
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
"""
if dt is None:
dt = pendulum.now().time()
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (
self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration
return klass(microseconds=us2 - us1) | [
"def",
"diff",
"(",
"self",
",",
"dt",
"=",
"None",
",",
"abs",
"=",
"True",
")",
":",
"if",
"dt",
"is",
"None",
":",
"dt",
"=",
"pendulum",
".",
"now",
"(",
")",
".",
"time",
"(",
")",
"else",
":",
"dt",
"=",
"self",
".",
"__class__",
"(",
"dt",
".",
"hour",
",",
"dt",
".",
"minute",
",",
"dt",
".",
"second",
",",
"dt",
".",
"microsecond",
")",
"us1",
"=",
"(",
"self",
".",
"hour",
"*",
"SECS_PER_HOUR",
"+",
"self",
".",
"minute",
"*",
"SECS_PER_MIN",
"+",
"self",
".",
"second",
")",
"*",
"USECS_PER_SEC",
"us2",
"=",
"(",
"dt",
".",
"hour",
"*",
"SECS_PER_HOUR",
"+",
"dt",
".",
"minute",
"*",
"SECS_PER_MIN",
"+",
"dt",
".",
"second",
")",
"*",
"USECS_PER_SEC",
"klass",
"=",
"Duration",
"if",
"abs",
":",
"klass",
"=",
"AbsoluteDuration",
"return",
"klass",
"(",
"microseconds",
"=",
"us2",
"-",
"us1",
")"
]
| 26.655172 | 23.206897 |
def average_values(self, *args, **kwargs) -> float:
"""Average the actual values of the |Variable| object.
For 0-dimensional |Variable| objects, the result of method
|Variable.average_values| equals |Variable.value|. The
following example shows this for the sloppily defined class
`SoilMoisture`:
>>> from hydpy.core.variabletools import Variable
>>> class SoilMoisture(Variable):
... NDIM = 0
... TYPE = float
... refweigths = None
... availablemasks = None
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> sm = SoilMoisture(None)
>>> sm.value = 200.0
>>> sm.average_values()
200.0
When the dimensionality of this class is increased to one,
applying method |Variable.average_values| results in the
following error:
>>> SoilMoisture.NDIM = 1
>>> import numpy
>>> SoilMoisture.shape = (3,)
>>> SoilMoisture.value = numpy.array([200.0, 400.0, 500.0])
>>> sm.average_values()
Traceback (most recent call last):
...
AttributeError: While trying to calculate the mean value \
of variable `soilmoisture`, the following error occurred: Variable \
`soilmoisture` does not define any weighting coefficients.
So model developers have to define another (in this case
1-dimensional) |Variable| subclass (usually a |Parameter|
subclass), and make the relevant object available via property
|Variable.refweights|:
>>> class Area(Variable):
... NDIM = 1
... shape = (3,)
... value = numpy.array([1.0, 1.0, 2.0])
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> area = Area(None)
>>> SoilMoisture.refweights = property(lambda self: area)
>>> sm.average_values()
400.0
In the examples above, all single entries of `values` are relevant,
which is the default case. However, subclasses of |Variable| can
define an alternative mask, allowing to make some entries
irrelevant. Assume for example, that our `SoilMoisture` object
contains three single values, each one associated with a specific
hydrological response unit (hru). To indicate that soil moisture
is undefined for the third unit, (maybe because it is a water area),
we set the third entry of the verification mask to |False|:
>>> from hydpy.core.masktools import DefaultMask
>>> class Soil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, True, False])
>>> SoilMoisture.mask = Soil()
>>> sm.average_values()
300.0
Alternatively, method |Variable.average_values| accepts additional
masking information as positional or keyword arguments. Therefore,
the corresponding model must implement some alternative masks,
which are provided by property |Variable.availablemasks|.
We mock this property with a new |Masks| object, handling one
mask for flat soils (only the first hru), one mask for deep soils
(only the second hru), and one mask for water areas (only the
third hru):
>>> class FlatSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, False, False])
>>> class DeepSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, True, False])
>>> class Water(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, False, True])
>>> from hydpy.core import masktools
>>> class Masks(masktools.Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water)
>>> SoilMoisture.availablemasks = Masks(None)
One can pass either the mask classes themselves or their names:
>>> sm.average_values(sm.availablemasks.flatsoil)
200.0
>>> sm.average_values('deepsoil')
400.0
Both variants can be combined:
>>> sm.average_values(sm.availablemasks.deepsoil, 'flatsoil')
300.0
The following error happens if the general mask of the variable
does not contain the given masks:
>>> sm.average_values('flatsoil', 'water')
Traceback (most recent call last):
...
ValueError: While trying to calculate the mean value of variable \
`soilmoisture`, the following error occurred: Based on the arguments \
`('flatsoil', 'water')` and `{}` the mask `CustomMask([ True, False, True])` \
has been determined, which is not a submask of `Soil([ True, True, False])`.
Applying masks with custom options is also supported. One can change
the behaviour of the following mask via the argument `complete`:
>>> class AllOrNothing(DefaultMask):
... @classmethod
... def new(cls, variable, complete):
... if complete:
... bools = [True, True, True]
... else:
... bools = [False, False, False]
... return cls.array2mask(bools)
>>> class Masks(Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water,
... AllOrNothing)
>>> SoilMoisture.availablemasks = Masks(None)
Again, one can apply the mask class directly (but note that one
has to pass the relevant variable as the first argument.):
>>> sm.average_values( # doctest: +ELLIPSIS
... sm.availablemasks.allornothing(sm, complete=True))
Traceback (most recent call last):
...
ValueError: While trying to...
Alternatively, one can pass the mask name as a keyword and pack
the mask's options into a |dict| object:
>>> sm.average_values(allornothing={'complete': False})
nan
You can combine all variants explained above:
>>> sm.average_values(
... 'deepsoil', flatsoil={}, allornothing={'complete': False})
300.0
"""
try:
if not self.NDIM:
return self.value
mask = self.get_submask(*args, **kwargs)
if numpy.any(mask):
weights = self.refweights[mask]
return numpy.sum(weights*self[mask])/numpy.sum(weights)
return numpy.nan
except BaseException:
objecttools.augment_excmessage(
f'While trying to calculate the mean value of variable '
f'{objecttools.devicephrase(self)}') | [
"def",
"average_values",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"float",
":",
"try",
":",
"if",
"not",
"self",
".",
"NDIM",
":",
"return",
"self",
".",
"value",
"mask",
"=",
"self",
".",
"get_submask",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"numpy",
".",
"any",
"(",
"mask",
")",
":",
"weights",
"=",
"self",
".",
"refweights",
"[",
"mask",
"]",
"return",
"numpy",
".",
"sum",
"(",
"weights",
"*",
"self",
"[",
"mask",
"]",
")",
"/",
"numpy",
".",
"sum",
"(",
"weights",
")",
"return",
"numpy",
".",
"nan",
"except",
"BaseException",
":",
"objecttools",
".",
"augment_excmessage",
"(",
"f'While trying to calculate the mean value of variable '",
"f'{objecttools.devicephrase(self)}'",
")"
]
| 39.94186 | 20.133721 |
def _get_content_range(start: Optional[int], end: Optional[int], total: int) -> str:
"""Returns a suitable Content-Range header:
>>> print(_get_content_range(None, 1, 4))
bytes 0-0/4
>>> print(_get_content_range(1, 3, 4))
bytes 1-2/4
>>> print(_get_content_range(None, None, 4))
bytes 0-3/4
"""
start = start or 0
end = (end or total) - 1
return "bytes %s-%s/%s" % (start, end, total) | [
"def",
"_get_content_range",
"(",
"start",
":",
"Optional",
"[",
"int",
"]",
",",
"end",
":",
"Optional",
"[",
"int",
"]",
",",
"total",
":",
"int",
")",
"->",
"str",
":",
"start",
"=",
"start",
"or",
"0",
"end",
"=",
"(",
"end",
"or",
"total",
")",
"-",
"1",
"return",
"\"bytes %s-%s/%s\"",
"%",
"(",
"start",
",",
"end",
",",
"total",
")"
]
| 32.076923 | 16.384615 |
def register_all_add_grad(
add_grad_function, arg_types, exclude=(), ignore_existing=False):
"""Register a gradient adder for all combinations of given types.
This is a convenience shorthand for calling register_add_grad when registering
gradient adders for multiple types that can be interchanged for the purpose
of addition.
Args:
add_grad_function: A gradient adder, see register_add_grad.
arg_types: List of Python type objects. The gradient adder will be
registered for all pairs of these types.
exclude: Optional list of type tuples to exclude.
ignore_existing: Boolean. Whether to silently skip argument pairs that were
already registered.
"""
for t1 in arg_types:
for t2 in arg_types:
if (t1, t2) in exclude:
continue
if ignore_existing and (t1, t2) in grad_adders:
continue
register_add_grad(t1, t2, add_grad_function) | [
"def",
"register_all_add_grad",
"(",
"add_grad_function",
",",
"arg_types",
",",
"exclude",
"=",
"(",
")",
",",
"ignore_existing",
"=",
"False",
")",
":",
"for",
"t1",
"in",
"arg_types",
":",
"for",
"t2",
"in",
"arg_types",
":",
"if",
"(",
"t1",
",",
"t2",
")",
"in",
"exclude",
":",
"continue",
"if",
"ignore_existing",
"and",
"(",
"t1",
",",
"t2",
")",
"in",
"grad_adders",
":",
"continue",
"register_add_grad",
"(",
"t1",
",",
"t2",
",",
"add_grad_function",
")"
]
| 38.73913 | 21.782609 |
def write(name, value):
"""Temporarily change or set the environment variable during the execution of a function.
Args:
name: The name of the environment variable
value: A value to set for the environment variable
Returns:
The function return value.
"""
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
existing_env = core.read(name, allow_none=True)
core.write(name, value)
func_val = func(*args, **kwargs)
core.write(name, existing_env)
return func_val
return _decorator
return wrapped | [
"def",
"write",
"(",
"name",
",",
"value",
")",
":",
"def",
"wrapped",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"_decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"existing_env",
"=",
"core",
".",
"read",
"(",
"name",
",",
"allow_none",
"=",
"True",
")",
"core",
".",
"write",
"(",
"name",
",",
"value",
")",
"func_val",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"core",
".",
"write",
"(",
"name",
",",
"existing_env",
")",
"return",
"func_val",
"return",
"_decorator",
"return",
"wrapped"
]
| 31.4 | 14.9 |
def flip_coords(X,loop):
""" Align circulation with z-axis """
if(loop[0]==1):
return np.array(map(lambda i: np.array([i[2],i[1],i[0],i[5],i[4],i[3]]),X))
else:
return X | [
"def",
"flip_coords",
"(",
"X",
",",
"loop",
")",
":",
"if",
"(",
"loop",
"[",
"0",
"]",
"==",
"1",
")",
":",
"return",
"np",
".",
"array",
"(",
"map",
"(",
"lambda",
"i",
":",
"np",
".",
"array",
"(",
"[",
"i",
"[",
"2",
"]",
",",
"i",
"[",
"1",
"]",
",",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"5",
"]",
",",
"i",
"[",
"4",
"]",
",",
"i",
"[",
"3",
"]",
"]",
")",
",",
"X",
")",
")",
"else",
":",
"return",
"X"
]
| 32 | 22.5 |
def get_symbols(node, ctx_types=(ast.Load, ast.Store)):
'''
Returns all symbols defined in an ast node.
if ctx_types is given, then restrict the symbols to ones with that context.
:param node: ast node
:param ctx_types: type or tuple of types that may be found assigned to the `ctx` attribute of
an ast Name node.
'''
gen = SymbolVisitor(ctx_types)
return gen.visit(node) | [
"def",
"get_symbols",
"(",
"node",
",",
"ctx_types",
"=",
"(",
"ast",
".",
"Load",
",",
"ast",
".",
"Store",
")",
")",
":",
"gen",
"=",
"SymbolVisitor",
"(",
"ctx_types",
")",
"return",
"gen",
".",
"visit",
"(",
"node",
")"
]
| 33.384615 | 25.076923 |
def map(self, data_source_factory, timeout=0, on_timeout="local_mode"):
"""Sends tasks to workers and awaits the responses.
When all the responses are received, reduces them and returns the result.
If timeout is set greater than 0, producer will quit waiting for workers when time has passed.
If on_timeout is set to "local_mode", after the time limit producer will run tasks locally.
If on_timeout is set to "fail", after the time limit producer raise TimeOutException.
"""
def local_launch():
print "Local launch"
return self.reduce_fn(
self.map_fn(data_source_factory.build_data_source())
)
if self.local_mode:
return local_launch()
for index, factory in enumerate(self.divide(data_source_factory)):
self.unprocessed_request_num += 1
self.logging.info("Sending %d-th message with %d elements" % (index + 1, factory.length()))
self.logging.info("len(data) = %d" % len(pickle.dumps(factory)))
self.channel.basic_publish(exchange='',
routing_key=self.routing_key(),
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id="_".join((self.correlation_id, str(index))),
),
body=pickle.dumps(factory))
self.logging.info("Waiting...")
time_limit_exceeded = [False]
def on_timeout_func():
print "Timeout!!"
self.logging.warning("Timeout!")
time_limit_exceeded[0] = True
if timeout > 0:
self.timer = Timer(timeout, on_timeout_func)
self.timer.start()
while self.unprocessed_request_num:
if time_limit_exceeded[0]:
if on_timeout == "local_mode":
return local_launch()
assert on_timeout == "fail", "Invalid value for on_timeout: %s" % on_timeout
raise TimeOutException()
self.connection.process_data_events()
self.logging.info("Responses: %s" % str(self.responses))
return self.reduce_fn(self.responses) | [
"def",
"map",
"(",
"self",
",",
"data_source_factory",
",",
"timeout",
"=",
"0",
",",
"on_timeout",
"=",
"\"local_mode\"",
")",
":",
"def",
"local_launch",
"(",
")",
":",
"print",
"\"Local launch\"",
"return",
"self",
".",
"reduce_fn",
"(",
"self",
".",
"map_fn",
"(",
"data_source_factory",
".",
"build_data_source",
"(",
")",
")",
")",
"if",
"self",
".",
"local_mode",
":",
"return",
"local_launch",
"(",
")",
"for",
"index",
",",
"factory",
"in",
"enumerate",
"(",
"self",
".",
"divide",
"(",
"data_source_factory",
")",
")",
":",
"self",
".",
"unprocessed_request_num",
"+=",
"1",
"self",
".",
"logging",
".",
"info",
"(",
"\"Sending %d-th message with %d elements\"",
"%",
"(",
"index",
"+",
"1",
",",
"factory",
".",
"length",
"(",
")",
")",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"len(data) = %d\"",
"%",
"len",
"(",
"pickle",
".",
"dumps",
"(",
"factory",
")",
")",
")",
"self",
".",
"channel",
".",
"basic_publish",
"(",
"exchange",
"=",
"''",
",",
"routing_key",
"=",
"self",
".",
"routing_key",
"(",
")",
",",
"properties",
"=",
"pika",
".",
"BasicProperties",
"(",
"reply_to",
"=",
"self",
".",
"callback_queue",
",",
"correlation_id",
"=",
"\"_\"",
".",
"join",
"(",
"(",
"self",
".",
"correlation_id",
",",
"str",
"(",
"index",
")",
")",
")",
",",
")",
",",
"body",
"=",
"pickle",
".",
"dumps",
"(",
"factory",
")",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"Waiting...\"",
")",
"time_limit_exceeded",
"=",
"[",
"False",
"]",
"def",
"on_timeout_func",
"(",
")",
":",
"print",
"\"Timeout!!\"",
"self",
".",
"logging",
".",
"warning",
"(",
"\"Timeout!\"",
")",
"time_limit_exceeded",
"[",
"0",
"]",
"=",
"True",
"if",
"timeout",
">",
"0",
":",
"self",
".",
"timer",
"=",
"Timer",
"(",
"timeout",
",",
"on_timeout_func",
")",
"self",
".",
"timer",
".",
"start",
"(",
")",
"while",
"self",
".",
"unprocessed_request_num",
":",
"if",
"time_limit_exceeded",
"[",
"0",
"]",
":",
"if",
"on_timeout",
"==",
"\"local_mode\"",
":",
"return",
"local_launch",
"(",
")",
"assert",
"on_timeout",
"==",
"\"fail\"",
",",
"\"Invalid value for on_timeout: %s\"",
"%",
"on_timeout",
"raise",
"TimeOutException",
"(",
")",
"self",
".",
"connection",
".",
"process_data_events",
"(",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"Responses: %s\"",
"%",
"str",
"(",
"self",
".",
"responses",
")",
")",
"return",
"self",
".",
"reduce_fn",
"(",
"self",
".",
"responses",
")"
]
| 43.037037 | 23.962963 |
def delete(self, container, del_objects=False):
"""
Deletes the specified container. If the container contains objects, the
command will fail unless 'del_objects' is passed as True. In that case,
each object will be deleted first, and then the container.
"""
return self._manager.delete(container, del_objects=del_objects) | [
"def",
"delete",
"(",
"self",
",",
"container",
",",
"del_objects",
"=",
"False",
")",
":",
"return",
"self",
".",
"_manager",
".",
"delete",
"(",
"container",
",",
"del_objects",
"=",
"del_objects",
")"
]
| 52 | 20.285714 |
def reverse_url(self, scheme: str, path: str) -> str:
"""
Reverses the url using scheme and path given in parameter.
:param scheme: Scheme of the url
:param path: Path of the url
:return:
"""
# remove starting slash in path if present
path = path.lstrip('/')
server, port = self.connection_handler.server, self.connection_handler.port
if self.connection_handler.path:
url = '{scheme}://{server}:{port}/{path}'.format(scheme=scheme,
server=server,
port=port,
path=path)
else:
url = '{scheme}://{server}:{port}/'.format(scheme=scheme,
server=server,
port=port)
return url + path | [
"def",
"reverse_url",
"(",
"self",
",",
"scheme",
":",
"str",
",",
"path",
":",
"str",
")",
"->",
"str",
":",
"# remove starting slash in path if present",
"path",
"=",
"path",
".",
"lstrip",
"(",
"'/'",
")",
"server",
",",
"port",
"=",
"self",
".",
"connection_handler",
".",
"server",
",",
"self",
".",
"connection_handler",
".",
"port",
"if",
"self",
".",
"connection_handler",
".",
"path",
":",
"url",
"=",
"'{scheme}://{server}:{port}/{path}'",
".",
"format",
"(",
"scheme",
"=",
"scheme",
",",
"server",
"=",
"server",
",",
"port",
"=",
"port",
",",
"path",
"=",
"path",
")",
"else",
":",
"url",
"=",
"'{scheme}://{server}:{port}/'",
".",
"format",
"(",
"scheme",
"=",
"scheme",
",",
"server",
"=",
"server",
",",
"port",
"=",
"port",
")",
"return",
"url",
"+",
"path"
]
| 42.173913 | 22 |
def deps(ctx):
'''Install or update development dependencies'''
header(deps.__doc__)
with ctx.cd(ROOT):
ctx.run('pip install -r requirements/develop.pip -r requirements/doc.pip', pty=True) | [
"def",
"deps",
"(",
"ctx",
")",
":",
"header",
"(",
"deps",
".",
"__doc__",
")",
"with",
"ctx",
".",
"cd",
"(",
"ROOT",
")",
":",
"ctx",
".",
"run",
"(",
"'pip install -r requirements/develop.pip -r requirements/doc.pip'",
",",
"pty",
"=",
"True",
")"
]
| 40.8 | 24.8 |
def readTrainingData(file_locations, GROUP_LABEL):
'''
Used in downstream tests
'''
class Mock(object):
pass
mock_module = Mock()
mock_module.PARENT_LABEL = GROUP_LABEL
for location in file_locations:
with open(location) as f:
tree = etree.parse(f)
xml = tree.getroot()
for each in data_prep_utils.TrainingData(xml, mock_module):
yield each | [
"def",
"readTrainingData",
"(",
"file_locations",
",",
"GROUP_LABEL",
")",
":",
"class",
"Mock",
"(",
"object",
")",
":",
"pass",
"mock_module",
"=",
"Mock",
"(",
")",
"mock_module",
".",
"PARENT_LABEL",
"=",
"GROUP_LABEL",
"for",
"location",
"in",
"file_locations",
":",
"with",
"open",
"(",
"location",
")",
"as",
"f",
":",
"tree",
"=",
"etree",
".",
"parse",
"(",
"f",
")",
"xml",
"=",
"tree",
".",
"getroot",
"(",
")",
"for",
"each",
"in",
"data_prep_utils",
".",
"TrainingData",
"(",
"xml",
",",
"mock_module",
")",
":",
"yield",
"each"
]
| 27.4 | 17.8 |
def channel_submit_row(context):
"""
Display the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
can_delete = context['has_delete_permission']
can_add = context['has_add_permission']
can_change = context['has_change_permission']
ctx = Context(context)
ctx.update({
'show_delete_link': (not is_popup and
can_delete and
change and
context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (can_add and
not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': (not is_popup and can_change and show_save_and_continue),
'show_save': show_save,
})
return ctx | [
"def",
"channel_submit_row",
"(",
"context",
")",
":",
"change",
"=",
"context",
"[",
"'change'",
"]",
"is_popup",
"=",
"context",
"[",
"'is_popup'",
"]",
"save_as",
"=",
"context",
"[",
"'save_as'",
"]",
"show_save",
"=",
"context",
".",
"get",
"(",
"'show_save'",
",",
"True",
")",
"show_save_and_continue",
"=",
"context",
".",
"get",
"(",
"'show_save_and_continue'",
",",
"True",
")",
"can_delete",
"=",
"context",
"[",
"'has_delete_permission'",
"]",
"can_add",
"=",
"context",
"[",
"'has_add_permission'",
"]",
"can_change",
"=",
"context",
"[",
"'has_change_permission'",
"]",
"ctx",
"=",
"Context",
"(",
"context",
")",
"ctx",
".",
"update",
"(",
"{",
"'show_delete_link'",
":",
"(",
"not",
"is_popup",
"and",
"can_delete",
"and",
"change",
"and",
"context",
".",
"get",
"(",
"'show_delete'",
",",
"True",
")",
")",
",",
"'show_save_as_new'",
":",
"not",
"is_popup",
"and",
"change",
"and",
"save_as",
",",
"'show_save_and_add_another'",
":",
"(",
"can_add",
"and",
"not",
"is_popup",
"and",
"(",
"not",
"save_as",
"or",
"context",
"[",
"'add'",
"]",
")",
")",
",",
"'show_save_and_continue'",
":",
"(",
"not",
"is_popup",
"and",
"can_change",
"and",
"show_save_and_continue",
")",
",",
"'show_save'",
":",
"show_save",
",",
"}",
")",
"return",
"ctx"
]
| 37.7 | 15.166667 |
def worker_disabled(name, workers=None, profile='default'):
'''
Disable all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_disabled:
- workers:
- app1
- app2
'''
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_disable', name, workers, profile
) | [
"def",
"worker_disabled",
"(",
"name",
",",
"workers",
"=",
"None",
",",
"profile",
"=",
"'default'",
")",
":",
"if",
"workers",
"is",
"None",
":",
"workers",
"=",
"[",
"]",
"return",
"_bulk_state",
"(",
"'modjk.bulk_disable'",
",",
"name",
",",
"workers",
",",
"profile",
")"
]
| 21.105263 | 23.631579 |
def uuid_to_date(uuid, century='20'):
"""Return a date created from the last 6 digits of a uuid.
Arguments:
uuid The unique identifier to parse.
century The first 2 digits to assume in the year. Default is '20'.
Examples:
>>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201')
datetime.date(2012, 2, 1)
>>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201', '18')
datetime.date(1812, 2, 1)
"""
day = int(uuid[-2:])
month = int(uuid[-4:-2])
year = int('%s%s' % (century, uuid[-6:-4]))
return datetime.date(year=year, month=month, day=day) | [
"def",
"uuid_to_date",
"(",
"uuid",
",",
"century",
"=",
"'20'",
")",
":",
"day",
"=",
"int",
"(",
"uuid",
"[",
"-",
"2",
":",
"]",
")",
"month",
"=",
"int",
"(",
"uuid",
"[",
"-",
"4",
":",
"-",
"2",
"]",
")",
"year",
"=",
"int",
"(",
"'%s%s'",
"%",
"(",
"century",
",",
"uuid",
"[",
"-",
"6",
":",
"-",
"4",
"]",
")",
")",
"return",
"datetime",
".",
"date",
"(",
"year",
"=",
"year",
",",
"month",
"=",
"month",
",",
"day",
"=",
"day",
")"
]
| 30.35 | 20.7 |
def convert_UCERFSource(self, node):
"""
Converts the Ucerf Source node into an SES Control object
"""
dirname = os.path.dirname(self.fname) # where the source_model_file is
source_file = os.path.join(dirname, node["filename"])
if "startDate" in node.attrib and "investigationTime" in node.attrib:
# Is a time-dependent model - even if rates were originally
# poissonian
# Verify that the source time span is the same as the TOM time span
inv_time = float(node["investigationTime"])
if inv_time != self.investigation_time:
raise ValueError("Source investigation time (%s) is not "
"equal to configuration investigation time "
"(%s)" % (inv_time, self.investigation_time))
start_date = datetime.strptime(node["startDate"], "%d/%m/%Y")
else:
start_date = None
return UCERFSource(
source_file,
self.investigation_time,
start_date,
float(node["minMag"]),
npd=self.convert_npdist(node),
hdd=self.convert_hpdist(node),
aspect=~node.ruptAspectRatio,
upper_seismogenic_depth=~node.pointGeometry.upperSeismoDepth,
lower_seismogenic_depth=~node.pointGeometry.lowerSeismoDepth,
msr=valid.SCALEREL[~node.magScaleRel](),
mesh_spacing=self.rupture_mesh_spacing,
trt=node["tectonicRegion"]) | [
"def",
"convert_UCERFSource",
"(",
"self",
",",
"node",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"fname",
")",
"# where the source_model_file is",
"source_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"node",
"[",
"\"filename\"",
"]",
")",
"if",
"\"startDate\"",
"in",
"node",
".",
"attrib",
"and",
"\"investigationTime\"",
"in",
"node",
".",
"attrib",
":",
"# Is a time-dependent model - even if rates were originally",
"# poissonian",
"# Verify that the source time span is the same as the TOM time span",
"inv_time",
"=",
"float",
"(",
"node",
"[",
"\"investigationTime\"",
"]",
")",
"if",
"inv_time",
"!=",
"self",
".",
"investigation_time",
":",
"raise",
"ValueError",
"(",
"\"Source investigation time (%s) is not \"",
"\"equal to configuration investigation time \"",
"\"(%s)\"",
"%",
"(",
"inv_time",
",",
"self",
".",
"investigation_time",
")",
")",
"start_date",
"=",
"datetime",
".",
"strptime",
"(",
"node",
"[",
"\"startDate\"",
"]",
",",
"\"%d/%m/%Y\"",
")",
"else",
":",
"start_date",
"=",
"None",
"return",
"UCERFSource",
"(",
"source_file",
",",
"self",
".",
"investigation_time",
",",
"start_date",
",",
"float",
"(",
"node",
"[",
"\"minMag\"",
"]",
")",
",",
"npd",
"=",
"self",
".",
"convert_npdist",
"(",
"node",
")",
",",
"hdd",
"=",
"self",
".",
"convert_hpdist",
"(",
"node",
")",
",",
"aspect",
"=",
"~",
"node",
".",
"ruptAspectRatio",
",",
"upper_seismogenic_depth",
"=",
"~",
"node",
".",
"pointGeometry",
".",
"upperSeismoDepth",
",",
"lower_seismogenic_depth",
"=",
"~",
"node",
".",
"pointGeometry",
".",
"lowerSeismoDepth",
",",
"msr",
"=",
"valid",
".",
"SCALEREL",
"[",
"~",
"node",
".",
"magScaleRel",
"]",
"(",
")",
",",
"mesh_spacing",
"=",
"self",
".",
"rupture_mesh_spacing",
",",
"trt",
"=",
"node",
"[",
"\"tectonicRegion\"",
"]",
")"
]
| 45.16129 | 17.483871 |
def wait(self=None, period=10, callback=None, *args, **kwargs):
"""Wait until task is complete
:param period: Time in seconds between reloads
:param callback: Function to call after the task has finished,
arguments and keyword arguments can be provided for it
:return: Return value of provided callback function or None if a
callback function was not provided
"""
while self.status not in [
TaskStatus.COMPLETED,
TaskStatus.FAILED,
TaskStatus.ABORTED
]:
self.reload()
time.sleep(period)
if callback:
return callback(*args, **kwargs) | [
"def",
"wait",
"(",
"self",
"=",
"None",
",",
"period",
"=",
"10",
",",
"callback",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"while",
"self",
".",
"status",
"not",
"in",
"[",
"TaskStatus",
".",
"COMPLETED",
",",
"TaskStatus",
".",
"FAILED",
",",
"TaskStatus",
".",
"ABORTED",
"]",
":",
"self",
".",
"reload",
"(",
")",
"time",
".",
"sleep",
"(",
"period",
")",
"if",
"callback",
":",
"return",
"callback",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| 37.555556 | 15.722222 |
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = \
self.storage.modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support ``modified_time`` or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0)
>= source_last_modified.replace(microsecond=0)):
if not ((self.symlink and full_path
and not os.path.islink(full_path)) or
(not self.symlink and full_path
and os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True | [
"def",
"delete_file",
"(",
"self",
",",
"path",
",",
"prefixed_path",
",",
"source_storage",
")",
":",
"if",
"self",
".",
"storage",
".",
"exists",
"(",
"prefixed_path",
")",
":",
"try",
":",
"# When was the target file modified last time?",
"target_last_modified",
"=",
"self",
".",
"storage",
".",
"modified_time",
"(",
"prefixed_path",
")",
"except",
"(",
"OSError",
",",
"NotImplementedError",
",",
"AttributeError",
")",
":",
"# The storage doesn't support ``modified_time`` or failed",
"pass",
"else",
":",
"try",
":",
"# When was the source file modified last time?",
"source_last_modified",
"=",
"source_storage",
".",
"modified_time",
"(",
"path",
")",
"except",
"(",
"OSError",
",",
"NotImplementedError",
",",
"AttributeError",
")",
":",
"pass",
"else",
":",
"# The full path of the target file",
"if",
"self",
".",
"local",
":",
"full_path",
"=",
"self",
".",
"storage",
".",
"path",
"(",
"prefixed_path",
")",
"else",
":",
"full_path",
"=",
"None",
"# Skip the file if the source file is younger",
"# Avoid sub-second precision (see #14665, #19540)",
"if",
"(",
"target_last_modified",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
">=",
"source_last_modified",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
")",
":",
"if",
"not",
"(",
"(",
"self",
".",
"symlink",
"and",
"full_path",
"and",
"not",
"os",
".",
"path",
".",
"islink",
"(",
"full_path",
")",
")",
"or",
"(",
"not",
"self",
".",
"symlink",
"and",
"full_path",
"and",
"os",
".",
"path",
".",
"islink",
"(",
"full_path",
")",
")",
")",
":",
"if",
"prefixed_path",
"not",
"in",
"self",
".",
"unmodified_files",
":",
"self",
".",
"unmodified_files",
".",
"append",
"(",
"prefixed_path",
")",
"self",
".",
"log",
"(",
"\"Skipping '%s' (not modified)\"",
"%",
"path",
")",
"return",
"False",
"# Then delete the existing file if really needed",
"if",
"self",
".",
"dry_run",
":",
"self",
".",
"log",
"(",
"\"Pretending to delete '%s'\"",
"%",
"path",
")",
"else",
":",
"self",
".",
"log",
"(",
"\"Deleting '%s'\"",
"%",
"path",
")",
"self",
".",
"storage",
".",
"delete",
"(",
"prefixed_path",
")",
"return",
"True"
]
| 49.604651 | 20.209302 |
def push_repository(self,
repository,
docker_executable='docker',
shutit_pexpect_child=None,
expect=None,
note=None,
loglevel=logging.INFO):
"""Pushes the repository.
@param repository: Repository to push.
@param docker_executable: Defaults to 'docker'
@param expect: See send()
@param shutit_pexpect_child: See send()
@type repository: string
@type docker_executable: string
"""
shutit_global.shutit_global_object.yield_to_draw()
self.handle_note(note)
shutit_pexpect_child = shutit_pexpect_child or self.get_shutit_pexpect_session_from_id('host_child').pexpect_child
expect = expect or self.expect_prompts['ORIGIN_ENV']
send = docker_executable + ' push ' + self.repository['user'] + '/' + repository
timeout = 99999
self.log('Running: ' + send,level=logging.INFO)
self.multisend(docker_executable + ' login',
{'Username':self.repository['user'], 'Password':self.repository['password'], 'Email':self.repository['email']},
shutit_pexpect_child=shutit_pexpect_child,
expect=expect)
self.send(send,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
timeout=timeout,
check_exit=False,
fail_on_empty_before=False,
loglevel=loglevel)
self.handle_note_after(note)
return True | [
"def",
"push_repository",
"(",
"self",
",",
"repository",
",",
"docker_executable",
"=",
"'docker'",
",",
"shutit_pexpect_child",
"=",
"None",
",",
"expect",
"=",
"None",
",",
"note",
"=",
"None",
",",
"loglevel",
"=",
"logging",
".",
"INFO",
")",
":",
"shutit_global",
".",
"shutit_global_object",
".",
"yield_to_draw",
"(",
")",
"self",
".",
"handle_note",
"(",
"note",
")",
"shutit_pexpect_child",
"=",
"shutit_pexpect_child",
"or",
"self",
".",
"get_shutit_pexpect_session_from_id",
"(",
"'host_child'",
")",
".",
"pexpect_child",
"expect",
"=",
"expect",
"or",
"self",
".",
"expect_prompts",
"[",
"'ORIGIN_ENV'",
"]",
"send",
"=",
"docker_executable",
"+",
"' push '",
"+",
"self",
".",
"repository",
"[",
"'user'",
"]",
"+",
"'/'",
"+",
"repository",
"timeout",
"=",
"99999",
"self",
".",
"log",
"(",
"'Running: '",
"+",
"send",
",",
"level",
"=",
"logging",
".",
"INFO",
")",
"self",
".",
"multisend",
"(",
"docker_executable",
"+",
"' login'",
",",
"{",
"'Username'",
":",
"self",
".",
"repository",
"[",
"'user'",
"]",
",",
"'Password'",
":",
"self",
".",
"repository",
"[",
"'password'",
"]",
",",
"'Email'",
":",
"self",
".",
"repository",
"[",
"'email'",
"]",
"}",
",",
"shutit_pexpect_child",
"=",
"shutit_pexpect_child",
",",
"expect",
"=",
"expect",
")",
"self",
".",
"send",
"(",
"send",
",",
"shutit_pexpect_child",
"=",
"shutit_pexpect_child",
",",
"expect",
"=",
"expect",
",",
"timeout",
"=",
"timeout",
",",
"check_exit",
"=",
"False",
",",
"fail_on_empty_before",
"=",
"False",
",",
"loglevel",
"=",
"loglevel",
")",
"self",
".",
"handle_note_after",
"(",
"note",
")",
"return",
"True"
]
| 41.216216 | 17.108108 |
def padding(self, px):
"""
Add padding around four sides of box
:param px: padding value in pixels.
Can be an array in the format of [top right bottom left] or single value.
:return: New padding added box
"""
# if px is not an array, have equal padding all sides
if not isinstance(px, list):
px = [px] * 4
x = max(0, self.x - px[3])
y = max(0, self.y - px[0])
x2 = self.x + self.width + px[1]
y2 = self.y + self.height + px[2]
return Box.from_xy(x, y, x2, y2) | [
"def",
"padding",
"(",
"self",
",",
"px",
")",
":",
"# if px is not an array, have equal padding all sides",
"if",
"not",
"isinstance",
"(",
"px",
",",
"list",
")",
":",
"px",
"=",
"[",
"px",
"]",
"*",
"4",
"x",
"=",
"max",
"(",
"0",
",",
"self",
".",
"x",
"-",
"px",
"[",
"3",
"]",
")",
"y",
"=",
"max",
"(",
"0",
",",
"self",
".",
"y",
"-",
"px",
"[",
"0",
"]",
")",
"x2",
"=",
"self",
".",
"x",
"+",
"self",
".",
"width",
"+",
"px",
"[",
"1",
"]",
"y2",
"=",
"self",
".",
"y",
"+",
"self",
".",
"height",
"+",
"px",
"[",
"2",
"]",
"return",
"Box",
".",
"from_xy",
"(",
"x",
",",
"y",
",",
"x2",
",",
"y2",
")"
]
| 33.235294 | 12.058824 |
def hazard_preparation(self):
"""This function is doing the hazard preparation."""
LOGGER.info('ANALYSIS : Hazard preparation')
use_same_projection = (
self.hazard.crs().authid() == self._crs.authid())
self.set_state_info(
'hazard',
'use_same_projection_as_aggregation',
use_same_projection)
if is_raster_layer(self.hazard):
extent = self._analysis_impacted.extent()
if not use_same_projection:
transform = QgsCoordinateTransform(
self._crs, self.hazard.crs(), QgsProject.instance())
extent = transform.transform(extent)
self.set_state_process(
'hazard', 'Clip raster by analysis bounding box')
# noinspection PyTypeChecker
self.hazard = clip_by_extent(self.hazard, extent)
self.debug_layer(self.hazard)
if self.hazard.keywords.get('layer_mode') == 'continuous':
self.set_state_process(
'hazard', 'Classify continuous raster hazard')
# noinspection PyTypeChecker
self.hazard = reclassify_raster(
self.hazard, self.exposure.keywords['exposure'])
self.debug_layer(self.hazard)
self.set_state_process(
'hazard', 'Polygonize classified raster hazard')
# noinspection PyTypeChecker
self.hazard = polygonize(self.hazard)
self.debug_layer(self.hazard)
if not use_same_projection:
self.set_state_process(
'hazard',
'Reproject hazard layer to aggregation CRS')
# noinspection PyTypeChecker
self.hazard = reproject(self.hazard, self._crs)
self.debug_layer(self.hazard, check_fields=False)
self.set_state_process(
'hazard',
'Clip and mask hazard polygons with the analysis layer')
self.hazard = clip(self.hazard, self._analysis_impacted)
self.debug_layer(self.hazard, check_fields=False)
self.set_state_process(
'hazard',
'Cleaning the vector hazard attribute table')
# noinspection PyTypeChecker
self.hazard = prepare_vector_layer(self.hazard)
self.debug_layer(self.hazard)
if self.hazard.keywords.get('layer_mode') == 'continuous':
# If the layer is continuous, we update the original data to the
# inasafe hazard class.
self.set_state_process(
'hazard',
'Classify continuous hazard and assign class names')
self.hazard = reclassify_vector(
self.hazard, self.exposure.keywords['exposure'])
self.debug_layer(self.hazard)
else:
# However, if it's a classified dataset, we only transpose the
# value map using inasafe hazard classes.
self.set_state_process(
'hazard', 'Assign classes based on value map')
self.hazard = update_value_map(
self.hazard, self.exposure.keywords['exposure'])
self.debug_layer(self.hazard) | [
"def",
"hazard_preparation",
"(",
"self",
")",
":",
"LOGGER",
".",
"info",
"(",
"'ANALYSIS : Hazard preparation'",
")",
"use_same_projection",
"=",
"(",
"self",
".",
"hazard",
".",
"crs",
"(",
")",
".",
"authid",
"(",
")",
"==",
"self",
".",
"_crs",
".",
"authid",
"(",
")",
")",
"self",
".",
"set_state_info",
"(",
"'hazard'",
",",
"'use_same_projection_as_aggregation'",
",",
"use_same_projection",
")",
"if",
"is_raster_layer",
"(",
"self",
".",
"hazard",
")",
":",
"extent",
"=",
"self",
".",
"_analysis_impacted",
".",
"extent",
"(",
")",
"if",
"not",
"use_same_projection",
":",
"transform",
"=",
"QgsCoordinateTransform",
"(",
"self",
".",
"_crs",
",",
"self",
".",
"hazard",
".",
"crs",
"(",
")",
",",
"QgsProject",
".",
"instance",
"(",
")",
")",
"extent",
"=",
"transform",
".",
"transform",
"(",
"extent",
")",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Clip raster by analysis bounding box'",
")",
"# noinspection PyTypeChecker",
"self",
".",
"hazard",
"=",
"clip_by_extent",
"(",
"self",
".",
"hazard",
",",
"extent",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
")",
"if",
"self",
".",
"hazard",
".",
"keywords",
".",
"get",
"(",
"'layer_mode'",
")",
"==",
"'continuous'",
":",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Classify continuous raster hazard'",
")",
"# noinspection PyTypeChecker",
"self",
".",
"hazard",
"=",
"reclassify_raster",
"(",
"self",
".",
"hazard",
",",
"self",
".",
"exposure",
".",
"keywords",
"[",
"'exposure'",
"]",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
")",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Polygonize classified raster hazard'",
")",
"# noinspection PyTypeChecker",
"self",
".",
"hazard",
"=",
"polygonize",
"(",
"self",
".",
"hazard",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
")",
"if",
"not",
"use_same_projection",
":",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Reproject hazard layer to aggregation CRS'",
")",
"# noinspection PyTypeChecker",
"self",
".",
"hazard",
"=",
"reproject",
"(",
"self",
".",
"hazard",
",",
"self",
".",
"_crs",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
",",
"check_fields",
"=",
"False",
")",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Clip and mask hazard polygons with the analysis layer'",
")",
"self",
".",
"hazard",
"=",
"clip",
"(",
"self",
".",
"hazard",
",",
"self",
".",
"_analysis_impacted",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
",",
"check_fields",
"=",
"False",
")",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Cleaning the vector hazard attribute table'",
")",
"# noinspection PyTypeChecker",
"self",
".",
"hazard",
"=",
"prepare_vector_layer",
"(",
"self",
".",
"hazard",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
")",
"if",
"self",
".",
"hazard",
".",
"keywords",
".",
"get",
"(",
"'layer_mode'",
")",
"==",
"'continuous'",
":",
"# If the layer is continuous, we update the original data to the",
"# inasafe hazard class.",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Classify continuous hazard and assign class names'",
")",
"self",
".",
"hazard",
"=",
"reclassify_vector",
"(",
"self",
".",
"hazard",
",",
"self",
".",
"exposure",
".",
"keywords",
"[",
"'exposure'",
"]",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
")",
"else",
":",
"# However, if it's a classified dataset, we only transpose the",
"# value map using inasafe hazard classes.",
"self",
".",
"set_state_process",
"(",
"'hazard'",
",",
"'Assign classes based on value map'",
")",
"self",
".",
"hazard",
"=",
"update_value_map",
"(",
"self",
".",
"hazard",
",",
"self",
".",
"exposure",
".",
"keywords",
"[",
"'exposure'",
"]",
")",
"self",
".",
"debug_layer",
"(",
"self",
".",
"hazard",
")"
]
| 40.948052 | 16.688312 |
def device(self, idx):
"""Get a specific GPU device
Args:
idx: index of device
Returns:
NvidiaDevice: single GPU device
"""
class GpuDevice(Structure):
pass
c_nvmlDevice_t = POINTER(GpuDevice)
c_index = c_uint(idx)
device = c_nvmlDevice_t()
_check_return(_NVML.get_function(
"nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device)))
return NvidiaDevice(device) | [
"def",
"device",
"(",
"self",
",",
"idx",
")",
":",
"class",
"GpuDevice",
"(",
"Structure",
")",
":",
"pass",
"c_nvmlDevice_t",
"=",
"POINTER",
"(",
"GpuDevice",
")",
"c_index",
"=",
"c_uint",
"(",
"idx",
")",
"device",
"=",
"c_nvmlDevice_t",
"(",
")",
"_check_return",
"(",
"_NVML",
".",
"get_function",
"(",
"\"nvmlDeviceGetHandleByIndex_v2\"",
")",
"(",
"c_index",
",",
"byref",
"(",
"device",
")",
")",
")",
"return",
"NvidiaDevice",
"(",
"device",
")"
]
| 23.7 | 18.25 |
def create_user(self, ):
"""Create a user and store it in the self.user
:returns: None
:rtype: None
:raises: None
"""
name = self.username_le.text()
if not name:
self.username_le.setPlaceholderText("Please provide a username.")
return
first = self.first_le.text()
last = self.last_le.text()
email = self.email_le.text()
try:
user = djadapter.models.User(username=name, first_name=first, last_name=last, email=email)
user.save()
for prj in self.projects:
prj.users.add(user)
for task in self.tasks:
task.users.add(user)
self.user = user
self.accept()
except:
log.exception("Could not create new assettype") | [
"def",
"create_user",
"(",
"self",
",",
")",
":",
"name",
"=",
"self",
".",
"username_le",
".",
"text",
"(",
")",
"if",
"not",
"name",
":",
"self",
".",
"username_le",
".",
"setPlaceholderText",
"(",
"\"Please provide a username.\"",
")",
"return",
"first",
"=",
"self",
".",
"first_le",
".",
"text",
"(",
")",
"last",
"=",
"self",
".",
"last_le",
".",
"text",
"(",
")",
"email",
"=",
"self",
".",
"email_le",
".",
"text",
"(",
")",
"try",
":",
"user",
"=",
"djadapter",
".",
"models",
".",
"User",
"(",
"username",
"=",
"name",
",",
"first_name",
"=",
"first",
",",
"last_name",
"=",
"last",
",",
"email",
"=",
"email",
")",
"user",
".",
"save",
"(",
")",
"for",
"prj",
"in",
"self",
".",
"projects",
":",
"prj",
".",
"users",
".",
"add",
"(",
"user",
")",
"for",
"task",
"in",
"self",
".",
"tasks",
":",
"task",
".",
"users",
".",
"add",
"(",
"user",
")",
"self",
".",
"user",
"=",
"user",
"self",
".",
"accept",
"(",
")",
"except",
":",
"log",
".",
"exception",
"(",
"\"Could not create new assettype\"",
")"
]
| 32.72 | 16.12 |
def toggle_service_status(self, service_id):
"""Toggles the service status.
:param int service_id: The id of the service to delete
"""
svc = self.client['Network_Application_Delivery_Controller_'
'LoadBalancer_Service']
return svc.toggleStatus(id=service_id) | [
"def",
"toggle_service_status",
"(",
"self",
",",
"service_id",
")",
":",
"svc",
"=",
"self",
".",
"client",
"[",
"'Network_Application_Delivery_Controller_'",
"'LoadBalancer_Service'",
"]",
"return",
"svc",
".",
"toggleStatus",
"(",
"id",
"=",
"service_id",
")"
]
| 35.333333 | 16.555556 |
def _update_model(self, completions):
"""
Creates a QStandardModel that holds the suggestion from the completion
models for the QCompleter
:param completionPrefix:
"""
# build the completion model
cc_model = QtGui.QStandardItemModel()
self._tooltips.clear()
for completion in completions:
name = completion['name']
item = QtGui.QStandardItem()
item.setData(name, QtCore.Qt.DisplayRole)
if 'tooltip' in completion and completion['tooltip']:
self._tooltips[name] = completion['tooltip']
if 'icon' in completion:
icon = completion['icon']
if isinstance(icon, list):
icon = QtGui.QIcon.fromTheme(icon[0], QtGui.QIcon(icon[1]))
else:
icon = QtGui.QIcon(icon)
item.setData(QtGui.QIcon(icon),
QtCore.Qt.DecorationRole)
cc_model.appendRow(item)
try:
self._completer.setModel(cc_model)
except RuntimeError:
self._create_completer()
self._completer.setModel(cc_model)
return cc_model | [
"def",
"_update_model",
"(",
"self",
",",
"completions",
")",
":",
"# build the completion model",
"cc_model",
"=",
"QtGui",
".",
"QStandardItemModel",
"(",
")",
"self",
".",
"_tooltips",
".",
"clear",
"(",
")",
"for",
"completion",
"in",
"completions",
":",
"name",
"=",
"completion",
"[",
"'name'",
"]",
"item",
"=",
"QtGui",
".",
"QStandardItem",
"(",
")",
"item",
".",
"setData",
"(",
"name",
",",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
")",
"if",
"'tooltip'",
"in",
"completion",
"and",
"completion",
"[",
"'tooltip'",
"]",
":",
"self",
".",
"_tooltips",
"[",
"name",
"]",
"=",
"completion",
"[",
"'tooltip'",
"]",
"if",
"'icon'",
"in",
"completion",
":",
"icon",
"=",
"completion",
"[",
"'icon'",
"]",
"if",
"isinstance",
"(",
"icon",
",",
"list",
")",
":",
"icon",
"=",
"QtGui",
".",
"QIcon",
".",
"fromTheme",
"(",
"icon",
"[",
"0",
"]",
",",
"QtGui",
".",
"QIcon",
"(",
"icon",
"[",
"1",
"]",
")",
")",
"else",
":",
"icon",
"=",
"QtGui",
".",
"QIcon",
"(",
"icon",
")",
"item",
".",
"setData",
"(",
"QtGui",
".",
"QIcon",
"(",
"icon",
")",
",",
"QtCore",
".",
"Qt",
".",
"DecorationRole",
")",
"cc_model",
".",
"appendRow",
"(",
"item",
")",
"try",
":",
"self",
".",
"_completer",
".",
"setModel",
"(",
"cc_model",
")",
"except",
"RuntimeError",
":",
"self",
".",
"_create_completer",
"(",
")",
"self",
".",
"_completer",
".",
"setModel",
"(",
"cc_model",
")",
"return",
"cc_model"
]
| 38.612903 | 11.129032 |
def _validate_pillar_roots(pillar_roots):
'''
If the pillar_roots option has a key that is None then we will error out,
just replace it with an empty list
'''
if not isinstance(pillar_roots, dict):
log.warning('The pillar_roots parameter is not properly formatted,'
' using defaults')
return {'base': _expand_glob_path([salt.syspaths.BASE_PILLAR_ROOTS_DIR])}
return _normalize_roots(pillar_roots) | [
"def",
"_validate_pillar_roots",
"(",
"pillar_roots",
")",
":",
"if",
"not",
"isinstance",
"(",
"pillar_roots",
",",
"dict",
")",
":",
"log",
".",
"warning",
"(",
"'The pillar_roots parameter is not properly formatted,'",
"' using defaults'",
")",
"return",
"{",
"'base'",
":",
"_expand_glob_path",
"(",
"[",
"salt",
".",
"syspaths",
".",
"BASE_PILLAR_ROOTS_DIR",
"]",
")",
"}",
"return",
"_normalize_roots",
"(",
"pillar_roots",
")"
]
| 44.7 | 18.7 |
def get_marginal_topic_distrib(doc_topic_distrib, doc_lengths):
"""
Return marginal topic distribution p(T) (topic proportions) given the document-topic distribution (theta)
`doc_topic_distrib` and the document lengths `doc_lengths`. The latter can be calculated with `get_doc_lengths()`.
"""
unnorm = (doc_topic_distrib.T * doc_lengths).sum(axis=1)
return unnorm / unnorm.sum() | [
"def",
"get_marginal_topic_distrib",
"(",
"doc_topic_distrib",
",",
"doc_lengths",
")",
":",
"unnorm",
"=",
"(",
"doc_topic_distrib",
".",
"T",
"*",
"doc_lengths",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"return",
"unnorm",
"/",
"unnorm",
".",
"sum",
"(",
")"
]
| 56.571429 | 28.285714 |
def transform(self, X):
"""
Add the features calculated using the timeseries_container and add them to the corresponding rows in the input
pandas.DataFrame X.
To save some computing time, you should only include those time serieses in the container, that you
need. You can set the timeseries container with the method :func:`set_timeseries_container`.
:param X: the DataFrame to which the calculated timeseries features will be added. This is *not* the
dataframe with the timeseries itself.
:type X: pandas.DataFrame
:return: The input DataFrame, but with added features.
:rtype: pandas.DataFrame
"""
if self.timeseries_container is None:
raise RuntimeError("You have to provide a time series using the set_timeseries_container function before.")
# Extract only features for the IDs in X.index
timeseries_container_X = restrict_input_to_index(self.timeseries_container, self.column_id, X.index)
extracted_features = extract_features(timeseries_container_X,
default_fc_parameters=self.default_fc_parameters,
kind_to_fc_parameters=self.kind_to_fc_parameters,
column_id=self.column_id, column_sort=self.column_sort,
column_kind=self.column_kind, column_value=self.column_value,
chunksize=self.chunksize,
n_jobs=self.n_jobs, show_warnings=self.show_warnings,
disable_progressbar=self.disable_progressbar,
impute_function=self.impute_function,
profile=self.profile,
profiling_filename=self.profiling_filename,
profiling_sorting=self.profiling_sorting)
X = pd.merge(X, extracted_features, left_index=True, right_index=True, how="left")
return X | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"if",
"self",
".",
"timeseries_container",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"You have to provide a time series using the set_timeseries_container function before.\"",
")",
"# Extract only features for the IDs in X.index",
"timeseries_container_X",
"=",
"restrict_input_to_index",
"(",
"self",
".",
"timeseries_container",
",",
"self",
".",
"column_id",
",",
"X",
".",
"index",
")",
"extracted_features",
"=",
"extract_features",
"(",
"timeseries_container_X",
",",
"default_fc_parameters",
"=",
"self",
".",
"default_fc_parameters",
",",
"kind_to_fc_parameters",
"=",
"self",
".",
"kind_to_fc_parameters",
",",
"column_id",
"=",
"self",
".",
"column_id",
",",
"column_sort",
"=",
"self",
".",
"column_sort",
",",
"column_kind",
"=",
"self",
".",
"column_kind",
",",
"column_value",
"=",
"self",
".",
"column_value",
",",
"chunksize",
"=",
"self",
".",
"chunksize",
",",
"n_jobs",
"=",
"self",
".",
"n_jobs",
",",
"show_warnings",
"=",
"self",
".",
"show_warnings",
",",
"disable_progressbar",
"=",
"self",
".",
"disable_progressbar",
",",
"impute_function",
"=",
"self",
".",
"impute_function",
",",
"profile",
"=",
"self",
".",
"profile",
",",
"profiling_filename",
"=",
"self",
".",
"profiling_filename",
",",
"profiling_sorting",
"=",
"self",
".",
"profiling_sorting",
")",
"X",
"=",
"pd",
".",
"merge",
"(",
"X",
",",
"extracted_features",
",",
"left_index",
"=",
"True",
",",
"right_index",
"=",
"True",
",",
"how",
"=",
"\"left\"",
")",
"return",
"X"
]
| 58.648649 | 39.081081 |
def grouped_insert(t, value):
"""Insert value into the target tree 't' with correct grouping."""
collator = Collator.createInstance(Locale(t.lang) if t.lang else Locale())
if value.tail is not None:
val_prev = value.getprevious()
if val_prev is not None:
val_prev.tail = (val_prev.tail or '') + value.tail
else:
val_parent = value.getparent()
if val_parent is not None:
val_parent.text = (val_parent.text or '') + value.tail
value.tail = None
if t.isgroup and t.sort(value) is not None:
if t.groupby:
for child in t.tree:
if child.get('class') == 'group-by':
# child[0] is the label span
order = collator.compare(
t.groupby(child[1]) or '', t.groupby(value) or '')
if order == 0:
c_target = Target(child, sort=t.sort, lang=t.lang)
insert_group(value, c_target)
break
elif order > 0:
group = create_group(t.groupby(value))
group.append(value)
child.addprevious(group)
break
else:
group = create_group(t.groupby(value))
group.append(value)
t.tree.append(group)
else:
insert_group(value, t)
elif t.sort and t.sort(value) is not None:
insert_sort(value, t)
elif t.location == 'inside':
for child in t.tree:
value.append(child)
value.text = t.tree.text
t.tree.text = None
t.tree.append(value)
elif t.location == 'outside':
value.tail = t.tree.tail
t.tree.tail = None
target_parent_descendants = (
[n.getparent() for n in t.parent.iterdescendants() if n == t.tree])
try:
parent = target_parent_descendants[0]
parent.insert(parent.index(t.tree), value)
value.append(t.tree)
except IndexError as e:
logger.error('Target of outside has been moved or deleted')
raise e
elif t.location == 'before':
value.tail = t.tree.text
t.tree.text = None
t.tree.insert(0, value)
else:
t.tree.append(value) | [
"def",
"grouped_insert",
"(",
"t",
",",
"value",
")",
":",
"collator",
"=",
"Collator",
".",
"createInstance",
"(",
"Locale",
"(",
"t",
".",
"lang",
")",
"if",
"t",
".",
"lang",
"else",
"Locale",
"(",
")",
")",
"if",
"value",
".",
"tail",
"is",
"not",
"None",
":",
"val_prev",
"=",
"value",
".",
"getprevious",
"(",
")",
"if",
"val_prev",
"is",
"not",
"None",
":",
"val_prev",
".",
"tail",
"=",
"(",
"val_prev",
".",
"tail",
"or",
"''",
")",
"+",
"value",
".",
"tail",
"else",
":",
"val_parent",
"=",
"value",
".",
"getparent",
"(",
")",
"if",
"val_parent",
"is",
"not",
"None",
":",
"val_parent",
".",
"text",
"=",
"(",
"val_parent",
".",
"text",
"or",
"''",
")",
"+",
"value",
".",
"tail",
"value",
".",
"tail",
"=",
"None",
"if",
"t",
".",
"isgroup",
"and",
"t",
".",
"sort",
"(",
"value",
")",
"is",
"not",
"None",
":",
"if",
"t",
".",
"groupby",
":",
"for",
"child",
"in",
"t",
".",
"tree",
":",
"if",
"child",
".",
"get",
"(",
"'class'",
")",
"==",
"'group-by'",
":",
"# child[0] is the label span",
"order",
"=",
"collator",
".",
"compare",
"(",
"t",
".",
"groupby",
"(",
"child",
"[",
"1",
"]",
")",
"or",
"''",
",",
"t",
".",
"groupby",
"(",
"value",
")",
"or",
"''",
")",
"if",
"order",
"==",
"0",
":",
"c_target",
"=",
"Target",
"(",
"child",
",",
"sort",
"=",
"t",
".",
"sort",
",",
"lang",
"=",
"t",
".",
"lang",
")",
"insert_group",
"(",
"value",
",",
"c_target",
")",
"break",
"elif",
"order",
">",
"0",
":",
"group",
"=",
"create_group",
"(",
"t",
".",
"groupby",
"(",
"value",
")",
")",
"group",
".",
"append",
"(",
"value",
")",
"child",
".",
"addprevious",
"(",
"group",
")",
"break",
"else",
":",
"group",
"=",
"create_group",
"(",
"t",
".",
"groupby",
"(",
"value",
")",
")",
"group",
".",
"append",
"(",
"value",
")",
"t",
".",
"tree",
".",
"append",
"(",
"group",
")",
"else",
":",
"insert_group",
"(",
"value",
",",
"t",
")",
"elif",
"t",
".",
"sort",
"and",
"t",
".",
"sort",
"(",
"value",
")",
"is",
"not",
"None",
":",
"insert_sort",
"(",
"value",
",",
"t",
")",
"elif",
"t",
".",
"location",
"==",
"'inside'",
":",
"for",
"child",
"in",
"t",
".",
"tree",
":",
"value",
".",
"append",
"(",
"child",
")",
"value",
".",
"text",
"=",
"t",
".",
"tree",
".",
"text",
"t",
".",
"tree",
".",
"text",
"=",
"None",
"t",
".",
"tree",
".",
"append",
"(",
"value",
")",
"elif",
"t",
".",
"location",
"==",
"'outside'",
":",
"value",
".",
"tail",
"=",
"t",
".",
"tree",
".",
"tail",
"t",
".",
"tree",
".",
"tail",
"=",
"None",
"target_parent_descendants",
"=",
"(",
"[",
"n",
".",
"getparent",
"(",
")",
"for",
"n",
"in",
"t",
".",
"parent",
".",
"iterdescendants",
"(",
")",
"if",
"n",
"==",
"t",
".",
"tree",
"]",
")",
"try",
":",
"parent",
"=",
"target_parent_descendants",
"[",
"0",
"]",
"parent",
".",
"insert",
"(",
"parent",
".",
"index",
"(",
"t",
".",
"tree",
")",
",",
"value",
")",
"value",
".",
"append",
"(",
"t",
".",
"tree",
")",
"except",
"IndexError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Target of outside has been moved or deleted'",
")",
"raise",
"e",
"elif",
"t",
".",
"location",
"==",
"'before'",
":",
"value",
".",
"tail",
"=",
"t",
".",
"tree",
".",
"text",
"t",
".",
"tree",
".",
"text",
"=",
"None",
"t",
".",
"tree",
".",
"insert",
"(",
"0",
",",
"value",
")",
"else",
":",
"t",
".",
"tree",
".",
"append",
"(",
"value",
")"
]
| 36.359375 | 15.078125 |
def urlpatterns(self):
'''load and decorate urls from all modules
then store it as cached property for less loading
'''
if not hasattr(self, '_urlspatterns'):
urlpatterns = []
# load all urls
# support .urls file and urls_conf = 'elephantblog.urls' on default module
# decorate all url patterns if is not explicitly excluded
for mod in leonardo.modules:
# TODO this not work
if is_leonardo_module(mod):
conf = get_conf_from_module(mod)
if module_has_submodule(mod, 'urls'):
urls_mod = import_module('.urls', mod.__name__)
if hasattr(urls_mod, 'urlpatterns'):
# if not public decorate all
if conf['public']:
urlpatterns += urls_mod.urlpatterns
else:
_decorate_urlconf(urls_mod.urlpatterns,
require_auth)
urlpatterns += urls_mod.urlpatterns
# avoid circural dependency
# TODO use our loaded modules instead this property
from django.conf import settings
for urls_conf, conf in six.iteritems(getattr(settings, 'MODULE_URLS', {})):
# is public ?
try:
if conf['is_public']:
urlpatterns += \
patterns('',
url(r'', include(urls_conf)),
)
else:
_decorate_urlconf(
url(r'', include(urls_conf)),
require_auth)
urlpatterns += patterns('',
url(r'', include(urls_conf)))
except Exception as e:
raise Exception('raised %s during loading %s' %
(str(e), urls_conf))
self._urlpatterns = urlpatterns
return self._urlpatterns | [
"def",
"urlpatterns",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_urlspatterns'",
")",
":",
"urlpatterns",
"=",
"[",
"]",
"# load all urls",
"# support .urls file and urls_conf = 'elephantblog.urls' on default module",
"# decorate all url patterns if is not explicitly excluded",
"for",
"mod",
"in",
"leonardo",
".",
"modules",
":",
"# TODO this not work",
"if",
"is_leonardo_module",
"(",
"mod",
")",
":",
"conf",
"=",
"get_conf_from_module",
"(",
"mod",
")",
"if",
"module_has_submodule",
"(",
"mod",
",",
"'urls'",
")",
":",
"urls_mod",
"=",
"import_module",
"(",
"'.urls'",
",",
"mod",
".",
"__name__",
")",
"if",
"hasattr",
"(",
"urls_mod",
",",
"'urlpatterns'",
")",
":",
"# if not public decorate all",
"if",
"conf",
"[",
"'public'",
"]",
":",
"urlpatterns",
"+=",
"urls_mod",
".",
"urlpatterns",
"else",
":",
"_decorate_urlconf",
"(",
"urls_mod",
".",
"urlpatterns",
",",
"require_auth",
")",
"urlpatterns",
"+=",
"urls_mod",
".",
"urlpatterns",
"# avoid circural dependency",
"# TODO use our loaded modules instead this property",
"from",
"django",
".",
"conf",
"import",
"settings",
"for",
"urls_conf",
",",
"conf",
"in",
"six",
".",
"iteritems",
"(",
"getattr",
"(",
"settings",
",",
"'MODULE_URLS'",
",",
"{",
"}",
")",
")",
":",
"# is public ?",
"try",
":",
"if",
"conf",
"[",
"'is_public'",
"]",
":",
"urlpatterns",
"+=",
"patterns",
"(",
"''",
",",
"url",
"(",
"r''",
",",
"include",
"(",
"urls_conf",
")",
")",
",",
")",
"else",
":",
"_decorate_urlconf",
"(",
"url",
"(",
"r''",
",",
"include",
"(",
"urls_conf",
")",
")",
",",
"require_auth",
")",
"urlpatterns",
"+=",
"patterns",
"(",
"''",
",",
"url",
"(",
"r''",
",",
"include",
"(",
"urls_conf",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"'raised %s during loading %s'",
"%",
"(",
"str",
"(",
"e",
")",
",",
"urls_conf",
")",
")",
"self",
".",
"_urlpatterns",
"=",
"urlpatterns",
"return",
"self",
".",
"_urlpatterns"
]
| 43.88 | 17.56 |
def vq_nearest_neighbor(x, means,
soft_em=False, num_samples=10, temperature=None):
"""Find the nearest element in means to elements in x."""
bottleneck_size = common_layers.shape_list(means)[0]
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if soft_em:
x_means_idx = tf.multinomial(-dist, num_samples=num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=common_layers.shape_list(means)[0])
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
if temperature is None:
x_means_idx = tf.argmax(-dist, axis=-1)
else:
x_means_idx = tf.multinomial(- dist / temperature, 1)
x_means_idx = tf.squeeze(x_means_idx, axis=-1)
if (common_layers.should_generate_summaries() and
not common_layers.is_xla_compiled()):
tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1]))
x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size])
x_means = tf.matmul(x_means_hot_flat, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss, dist | [
"def",
"vq_nearest_neighbor",
"(",
"x",
",",
"means",
",",
"soft_em",
"=",
"False",
",",
"num_samples",
"=",
"10",
",",
"temperature",
"=",
"None",
")",
":",
"bottleneck_size",
"=",
"common_layers",
".",
"shape_list",
"(",
"means",
")",
"[",
"0",
"]",
"x_norm_sq",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"x",
")",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
"means_norm_sq",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"means",
")",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
"scalar_prod",
"=",
"tf",
".",
"matmul",
"(",
"x",
",",
"means",
",",
"transpose_b",
"=",
"True",
")",
"dist",
"=",
"x_norm_sq",
"+",
"tf",
".",
"transpose",
"(",
"means_norm_sq",
")",
"-",
"2",
"*",
"scalar_prod",
"if",
"soft_em",
":",
"x_means_idx",
"=",
"tf",
".",
"multinomial",
"(",
"-",
"dist",
",",
"num_samples",
"=",
"num_samples",
")",
"x_means_hot",
"=",
"tf",
".",
"one_hot",
"(",
"x_means_idx",
",",
"depth",
"=",
"common_layers",
".",
"shape_list",
"(",
"means",
")",
"[",
"0",
"]",
")",
"x_means_hot",
"=",
"tf",
".",
"reduce_mean",
"(",
"x_means_hot",
",",
"axis",
"=",
"1",
")",
"else",
":",
"if",
"temperature",
"is",
"None",
":",
"x_means_idx",
"=",
"tf",
".",
"argmax",
"(",
"-",
"dist",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"x_means_idx",
"=",
"tf",
".",
"multinomial",
"(",
"-",
"dist",
"/",
"temperature",
",",
"1",
")",
"x_means_idx",
"=",
"tf",
".",
"squeeze",
"(",
"x_means_idx",
",",
"axis",
"=",
"-",
"1",
")",
"if",
"(",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
"and",
"not",
"common_layers",
".",
"is_xla_compiled",
"(",
")",
")",
":",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"means_idx\"",
",",
"tf",
".",
"reshape",
"(",
"x_means_idx",
",",
"[",
"-",
"1",
"]",
")",
")",
"x_means_hot",
"=",
"tf",
".",
"one_hot",
"(",
"x_means_idx",
",",
"bottleneck_size",
")",
"x_means_hot_flat",
"=",
"tf",
".",
"reshape",
"(",
"x_means_hot",
",",
"[",
"-",
"1",
",",
"bottleneck_size",
"]",
")",
"x_means",
"=",
"tf",
".",
"matmul",
"(",
"x_means_hot_flat",
",",
"means",
")",
"e_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"squared_difference",
"(",
"x",
",",
"tf",
".",
"stop_gradient",
"(",
"x_means",
")",
")",
")",
"return",
"x_means_hot",
",",
"e_loss",
",",
"dist"
]
| 49.888889 | 18.666667 |
def registerDriver(iface, driver, class_implements=[]):
""" Register driver adapter used by page object"""
for class_item in class_implements:
classImplements(class_item, iface)
component.provideAdapter(factory=driver, adapts=[iface], provides=IDriver) | [
"def",
"registerDriver",
"(",
"iface",
",",
"driver",
",",
"class_implements",
"=",
"[",
"]",
")",
":",
"for",
"class_item",
"in",
"class_implements",
":",
"classImplements",
"(",
"class_item",
",",
"iface",
")",
"component",
".",
"provideAdapter",
"(",
"factory",
"=",
"driver",
",",
"adapts",
"=",
"[",
"iface",
"]",
",",
"provides",
"=",
"IDriver",
")"
]
| 44.666667 | 16 |
def _getVals(self, prefix = ""):
"""
return the values in the vals dict
in case prefix is "", change the first letter of the name to lowercase, otherwise use prefix+name as the new name
"""
if not hasattr(self, "vals"):
self.vals = {}
dict = {}
for key in list(self.vals.keys()):
# if no prefix then lower the first letter
if prefix == "":
newkey = key[:1].lower() + key[1:] if key else ""
dict[newkey] = self.vals[key]
else:
newkey = key[:1].upper() + key[1:] if key else ""
dict[prefix + newkey] = self.vals[key]
return dict | [
"def",
"_getVals",
"(",
"self",
",",
"prefix",
"=",
"\"\"",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"vals\"",
")",
":",
"self",
".",
"vals",
"=",
"{",
"}",
"dict",
"=",
"{",
"}",
"for",
"key",
"in",
"list",
"(",
"self",
".",
"vals",
".",
"keys",
"(",
")",
")",
":",
"# if no prefix then lower the first letter",
"if",
"prefix",
"==",
"\"\"",
":",
"newkey",
"=",
"key",
"[",
":",
"1",
"]",
".",
"lower",
"(",
")",
"+",
"key",
"[",
"1",
":",
"]",
"if",
"key",
"else",
"\"\"",
"dict",
"[",
"newkey",
"]",
"=",
"self",
".",
"vals",
"[",
"key",
"]",
"else",
":",
"newkey",
"=",
"key",
"[",
":",
"1",
"]",
".",
"upper",
"(",
")",
"+",
"key",
"[",
"1",
":",
"]",
"if",
"key",
"else",
"\"\"",
"dict",
"[",
"prefix",
"+",
"newkey",
"]",
"=",
"self",
".",
"vals",
"[",
"key",
"]",
"return",
"dict"
]
| 40.352941 | 16 |
def parse_readme():
"""
Crude parsing of modules/README.md
returns a dict of {<module_name>: <documentation>}
"""
name = None
re_mod = re.compile(r'^\#\#\# <a name="(?P<name>[a-z_0-9]+)"></a>')
readme_file = os.path.join(modules_directory(), "README.md")
modules_dict = {}
with open(readme_file) as f:
for row in f.readlines():
match = re_mod.match(row)
if match:
name = match.group("name")
modules_dict[name] = []
continue
if row.startswith("---"):
name = None
continue
if name:
modules_dict[name].append(row)
return modules_dict | [
"def",
"parse_readme",
"(",
")",
":",
"name",
"=",
"None",
"re_mod",
"=",
"re",
".",
"compile",
"(",
"r'^\\#\\#\\# <a name=\"(?P<name>[a-z_0-9]+)\"></a>'",
")",
"readme_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"modules_directory",
"(",
")",
",",
"\"README.md\"",
")",
"modules_dict",
"=",
"{",
"}",
"with",
"open",
"(",
"readme_file",
")",
"as",
"f",
":",
"for",
"row",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"match",
"=",
"re_mod",
".",
"match",
"(",
"row",
")",
"if",
"match",
":",
"name",
"=",
"match",
".",
"group",
"(",
"\"name\"",
")",
"modules_dict",
"[",
"name",
"]",
"=",
"[",
"]",
"continue",
"if",
"row",
".",
"startswith",
"(",
"\"---\"",
")",
":",
"name",
"=",
"None",
"continue",
"if",
"name",
":",
"modules_dict",
"[",
"name",
"]",
".",
"append",
"(",
"row",
")",
"return",
"modules_dict"
]
| 31.863636 | 12.136364 |
def build_graph(path, term_depth=1000, skim_depth=10,
d_weights=False, **kwargs):
"""
Tokenize a text, index a term matrix, and build out a graph.
Args:
path (str): The file path.
term_depth (int): Consider the N most frequent terms.
skim_depth (int): Connect each word to the N closest siblings.
d_weights (bool): If true, give "close" nodes low weights.
Returns:
Skimmer: The indexed graph.
"""
# Tokenize text.
click.echo('\nTokenizing text...')
t = Text.from_file(path)
click.echo('Extracted %d tokens' % len(t.tokens))
m = Matrix()
# Index the term matrix.
click.echo('\nIndexing terms:')
m.index(t, t.most_frequent_terms(term_depth), **kwargs)
g = Skimmer()
# Construct the network.
click.echo('\nGenerating graph:')
g.build(t, m, skim_depth, d_weights)
return g | [
"def",
"build_graph",
"(",
"path",
",",
"term_depth",
"=",
"1000",
",",
"skim_depth",
"=",
"10",
",",
"d_weights",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# Tokenize text.",
"click",
".",
"echo",
"(",
"'\\nTokenizing text...'",
")",
"t",
"=",
"Text",
".",
"from_file",
"(",
"path",
")",
"click",
".",
"echo",
"(",
"'Extracted %d tokens'",
"%",
"len",
"(",
"t",
".",
"tokens",
")",
")",
"m",
"=",
"Matrix",
"(",
")",
"# Index the term matrix.",
"click",
".",
"echo",
"(",
"'\\nIndexing terms:'",
")",
"m",
".",
"index",
"(",
"t",
",",
"t",
".",
"most_frequent_terms",
"(",
"term_depth",
")",
",",
"*",
"*",
"kwargs",
")",
"g",
"=",
"Skimmer",
"(",
")",
"# Construct the network.",
"click",
".",
"echo",
"(",
"'\\nGenerating graph:'",
")",
"g",
".",
"build",
"(",
"t",
",",
"m",
",",
"skim_depth",
",",
"d_weights",
")",
"return",
"g"
]
| 25.647059 | 21.176471 |
def broadcast_definition(cast_name, onto_name):
"""
Return the definition of a broadcast as an object with keys
"cast", "onto", "cast_on", "onto_on", "cast_index", and "onto_index".
These are the same as the arguments to the ``broadcast`` function.
"""
if not orca.is_broadcast(cast_name, onto_name):
abort(404)
b = orca.get_broadcast(cast_name, onto_name)
return jsonify(
cast=b.cast, onto=b.onto, cast_on=b.cast_on, onto_on=b.onto_on,
cast_index=b.cast_index, onto_index=b.onto_index) | [
"def",
"broadcast_definition",
"(",
"cast_name",
",",
"onto_name",
")",
":",
"if",
"not",
"orca",
".",
"is_broadcast",
"(",
"cast_name",
",",
"onto_name",
")",
":",
"abort",
"(",
"404",
")",
"b",
"=",
"orca",
".",
"get_broadcast",
"(",
"cast_name",
",",
"onto_name",
")",
"return",
"jsonify",
"(",
"cast",
"=",
"b",
".",
"cast",
",",
"onto",
"=",
"b",
".",
"onto",
",",
"cast_on",
"=",
"b",
".",
"cast_on",
",",
"onto_on",
"=",
"b",
".",
"onto_on",
",",
"cast_index",
"=",
"b",
".",
"cast_index",
",",
"onto_index",
"=",
"b",
".",
"onto_index",
")"
]
| 35.4 | 21.533333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.