text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def get_trainer(self, model, method='sgd', config=None, annealer=None, validator=None):
"""
Get a trainer to optimize given model.
:rtype: deepy.trainers.GeneralNeuralTrainer
"""
from deepy.trainers import GeneralNeuralTrainer
return GeneralNeuralTrainer(model, method=method, config=config, annealer=annealer, validator=validator) | [
"def",
"get_trainer",
"(",
"self",
",",
"model",
",",
"method",
"=",
"'sgd'",
",",
"config",
"=",
"None",
",",
"annealer",
"=",
"None",
",",
"validator",
"=",
"None",
")",
":",
"from",
"deepy",
".",
"trainers",
"import",
"GeneralNeuralTrainer",
"return",
"GeneralNeuralTrainer",
"(",
"model",
",",
"method",
"=",
"method",
",",
"config",
"=",
"config",
",",
"annealer",
"=",
"annealer",
",",
"validator",
"=",
"validator",
")"
]
| 53.428571 | 21.714286 |
def rollback(cls, bigchain, new_height, txn_ids):
"""Looks for election and vote transactions inside the block and
cleans up the database artifacts possibly created in `process_blocks`.
Part of the `end_block`/`commit` crash recovery.
"""
# delete election records for elections initiated at this height and
# elections concluded at this height
bigchain.delete_elections(new_height)
txns = [bigchain.get_transaction(tx_id) for tx_id in txn_ids]
elections = cls._get_votes(txns)
for election_id in elections:
election = bigchain.get_transaction(election_id)
election.on_rollback(bigchain, new_height) | [
"def",
"rollback",
"(",
"cls",
",",
"bigchain",
",",
"new_height",
",",
"txn_ids",
")",
":",
"# delete election records for elections initiated at this height and",
"# elections concluded at this height",
"bigchain",
".",
"delete_elections",
"(",
"new_height",
")",
"txns",
"=",
"[",
"bigchain",
".",
"get_transaction",
"(",
"tx_id",
")",
"for",
"tx_id",
"in",
"txn_ids",
"]",
"elections",
"=",
"cls",
".",
"_get_votes",
"(",
"txns",
")",
"for",
"election_id",
"in",
"elections",
":",
"election",
"=",
"bigchain",
".",
"get_transaction",
"(",
"election_id",
")",
"election",
".",
"on_rollback",
"(",
"bigchain",
",",
"new_height",
")"
]
| 41 | 20 |
def cleanup_service(self, factory, svc_registration):
# type: (Any, ServiceRegistration) -> bool
"""
If this bundle used that factory, releases the reference; else does
nothing
:param factory: The service factory
:param svc_registration: The ServiceRegistration object
:return: True if the bundle was using the factory, else False
"""
svc_ref = svc_registration.get_reference()
try:
# "service" for factories, "services" for prototypes
services, _ = self.__factored.pop(svc_ref)
except KeyError:
return False
else:
if svc_ref.is_prototype() and services:
for service in services:
try:
factory.unget_service_instance(
self.__bundle, svc_registration, service
)
except Exception:
# Ignore instance-level exceptions, potential errors
# will reappear in unget_service()
pass
# Call the factory
factory.unget_service(self.__bundle, svc_registration)
# No more association
svc_ref.unused_by(self.__bundle)
return True | [
"def",
"cleanup_service",
"(",
"self",
",",
"factory",
",",
"svc_registration",
")",
":",
"# type: (Any, ServiceRegistration) -> bool",
"svc_ref",
"=",
"svc_registration",
".",
"get_reference",
"(",
")",
"try",
":",
"# \"service\" for factories, \"services\" for prototypes",
"services",
",",
"_",
"=",
"self",
".",
"__factored",
".",
"pop",
"(",
"svc_ref",
")",
"except",
"KeyError",
":",
"return",
"False",
"else",
":",
"if",
"svc_ref",
".",
"is_prototype",
"(",
")",
"and",
"services",
":",
"for",
"service",
"in",
"services",
":",
"try",
":",
"factory",
".",
"unget_service_instance",
"(",
"self",
".",
"__bundle",
",",
"svc_registration",
",",
"service",
")",
"except",
"Exception",
":",
"# Ignore instance-level exceptions, potential errors",
"# will reappear in unget_service()",
"pass",
"# Call the factory",
"factory",
".",
"unget_service",
"(",
"self",
".",
"__bundle",
",",
"svc_registration",
")",
"# No more association",
"svc_ref",
".",
"unused_by",
"(",
"self",
".",
"__bundle",
")",
"return",
"True"
]
| 37.911765 | 17.970588 |
def to_data(value):
"""Standardize data types. Converts PyTorch tensors to Numpy arrays,
and Numpy scalars to Python scalars."""
# TODO: Use get_framework() for better detection.
if value.__class__.__module__.startswith("torch"):
import torch
if isinstance(value, torch.nn.parameter.Parameter):
value = value.data
if isinstance(value, torch.Tensor):
if value.requires_grad:
value = value.detach()
value = value.cpu().numpy().copy()
# If 0-dim array, convert to scalar
if not value.shape:
value = value.item()
# Convert Numpy scalar types to Python types
if value.__class__.__module__ == "numpy" and value.__class__.__name__ != "ndarray":
value = value.item()
return value | [
"def",
"to_data",
"(",
"value",
")",
":",
"# TODO: Use get_framework() for better detection.",
"if",
"value",
".",
"__class__",
".",
"__module__",
".",
"startswith",
"(",
"\"torch\"",
")",
":",
"import",
"torch",
"if",
"isinstance",
"(",
"value",
",",
"torch",
".",
"nn",
".",
"parameter",
".",
"Parameter",
")",
":",
"value",
"=",
"value",
".",
"data",
"if",
"isinstance",
"(",
"value",
",",
"torch",
".",
"Tensor",
")",
":",
"if",
"value",
".",
"requires_grad",
":",
"value",
"=",
"value",
".",
"detach",
"(",
")",
"value",
"=",
"value",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
".",
"copy",
"(",
")",
"# If 0-dim array, convert to scalar",
"if",
"not",
"value",
".",
"shape",
":",
"value",
"=",
"value",
".",
"item",
"(",
")",
"# Convert Numpy scalar types to Python types",
"if",
"value",
".",
"__class__",
".",
"__module__",
"==",
"\"numpy\"",
"and",
"value",
".",
"__class__",
".",
"__name__",
"!=",
"\"ndarray\"",
":",
"value",
"=",
"value",
".",
"item",
"(",
")",
"return",
"value"
]
| 41.736842 | 12 |
def new_bg(self,bg):
"""
m.new_bg(,bg) -- Change the ACGT background frequencies to those in the supplied dictionary.
Recompute log-likelihood, etc. with new background.
"""
counts = []
for pos in self.logP:
D = {}
for L,lp in pos.items():
D[L] = math.pow(2.0,lp)
counts.append(D)
self.background = bg
self.compute_from_counts(counts,0) | [
"def",
"new_bg",
"(",
"self",
",",
"bg",
")",
":",
"counts",
"=",
"[",
"]",
"for",
"pos",
"in",
"self",
".",
"logP",
":",
"D",
"=",
"{",
"}",
"for",
"L",
",",
"lp",
"in",
"pos",
".",
"items",
"(",
")",
":",
"D",
"[",
"L",
"]",
"=",
"math",
".",
"pow",
"(",
"2.0",
",",
"lp",
")",
"counts",
".",
"append",
"(",
"D",
")",
"self",
".",
"background",
"=",
"bg",
"self",
".",
"compute_from_counts",
"(",
"counts",
",",
"0",
")"
]
| 35.307692 | 15.615385 |
def get_all_saved_searches(self, **kwargs): # noqa: E501
"""Get all saved searches for a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_saved_searches(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedSavedSearch
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_saved_searches_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_saved_searches_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"get_all_saved_searches",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"get_all_saved_searches_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_all_saved_searches_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
]
| 40.909091 | 19.045455 |
async def _catch_response(self, h11_connection):
'''
Instantiates the parser which manages incoming data, first getting
the headers, storing cookies, and then parsing the response's body,
if any.
This function also instances the Response class in which the response
status line, headers, cookies, and body is stored.
It should be noted that in order to remain preformant, if the user
wishes to do any file IO it should use async files or risk long wait
times and risk connection issues server-side when using callbacks.
If a callback is used, the response's body will be None.
Returns:
The most recent response object.
'''
response = await self._recv_event(h11_connection)
resp_data = {'encoding': self.encoding,
'method': self.method,
'status_code': response.status_code,
'reason_phrase': str(response.reason, 'utf-8'),
'http_version': str(response.http_version, 'utf-8'),
'headers': c_i_dict(
[(str(name, 'utf-8'), str(value, 'utf-8'))
for name, value in response.headers]),
'body': b'',
'url': self.req_url
}
for header in response.headers:
if header[0] == b'set-cookie':
try:
resp_data['headers']['set-cookie'].append(str(header[1],
'utf-8'))
except (KeyError, AttributeError):
resp_data['headers']['set-cookie'] = [str(header[1],
'utf-8')]
# check whether we should receive body according to RFC 7230
# https://tools.ietf.org/html/rfc7230#section-3.3.3
get_body = False
try:
if int(resp_data['headers']['content-length']) > 0:
get_body = True
except KeyError:
try:
if 'chunked' in resp_data['headers']['transfer-encoding'].lower():
get_body = True
except KeyError:
if resp_data['headers'].get('connection', '').lower() == 'close':
get_body = True
if get_body:
if self.callback is not None:
endof = await self._body_callback(h11_connection)
elif self.stream:
if not ((self.scheme == self.initial_scheme and
self.host == self.initial_netloc) or
resp_data['headers']['connection'].lower() == 'close'):
self.sock._active = False
resp_data['body'] = StreamBody(
h11_connection,
self.sock,
resp_data['headers'].get('content-encoding', None),
resp_data['encoding'])
self.streaming = True
else:
while True:
data = await self._recv_event(h11_connection)
if isinstance(data, h11.Data):
resp_data['body'] += data.data
elif isinstance(data, h11.EndOfMessage):
break
else:
endof = await self._recv_event(h11_connection)
assert isinstance(endof, h11.EndOfMessage)
if self.streaming:
return StreamResponse(**resp_data)
return Response(**resp_data) | [
"async",
"def",
"_catch_response",
"(",
"self",
",",
"h11_connection",
")",
":",
"response",
"=",
"await",
"self",
".",
"_recv_event",
"(",
"h11_connection",
")",
"resp_data",
"=",
"{",
"'encoding'",
":",
"self",
".",
"encoding",
",",
"'method'",
":",
"self",
".",
"method",
",",
"'status_code'",
":",
"response",
".",
"status_code",
",",
"'reason_phrase'",
":",
"str",
"(",
"response",
".",
"reason",
",",
"'utf-8'",
")",
",",
"'http_version'",
":",
"str",
"(",
"response",
".",
"http_version",
",",
"'utf-8'",
")",
",",
"'headers'",
":",
"c_i_dict",
"(",
"[",
"(",
"str",
"(",
"name",
",",
"'utf-8'",
")",
",",
"str",
"(",
"value",
",",
"'utf-8'",
")",
")",
"for",
"name",
",",
"value",
"in",
"response",
".",
"headers",
"]",
")",
",",
"'body'",
":",
"b''",
",",
"'url'",
":",
"self",
".",
"req_url",
"}",
"for",
"header",
"in",
"response",
".",
"headers",
":",
"if",
"header",
"[",
"0",
"]",
"==",
"b'set-cookie'",
":",
"try",
":",
"resp_data",
"[",
"'headers'",
"]",
"[",
"'set-cookie'",
"]",
".",
"append",
"(",
"str",
"(",
"header",
"[",
"1",
"]",
",",
"'utf-8'",
")",
")",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"resp_data",
"[",
"'headers'",
"]",
"[",
"'set-cookie'",
"]",
"=",
"[",
"str",
"(",
"header",
"[",
"1",
"]",
",",
"'utf-8'",
")",
"]",
"# check whether we should receive body according to RFC 7230",
"# https://tools.ietf.org/html/rfc7230#section-3.3.3",
"get_body",
"=",
"False",
"try",
":",
"if",
"int",
"(",
"resp_data",
"[",
"'headers'",
"]",
"[",
"'content-length'",
"]",
")",
">",
"0",
":",
"get_body",
"=",
"True",
"except",
"KeyError",
":",
"try",
":",
"if",
"'chunked'",
"in",
"resp_data",
"[",
"'headers'",
"]",
"[",
"'transfer-encoding'",
"]",
".",
"lower",
"(",
")",
":",
"get_body",
"=",
"True",
"except",
"KeyError",
":",
"if",
"resp_data",
"[",
"'headers'",
"]",
".",
"get",
"(",
"'connection'",
",",
"''",
")",
".",
"lower",
"(",
")",
"==",
"'close'",
":",
"get_body",
"=",
"True",
"if",
"get_body",
":",
"if",
"self",
".",
"callback",
"is",
"not",
"None",
":",
"endof",
"=",
"await",
"self",
".",
"_body_callback",
"(",
"h11_connection",
")",
"elif",
"self",
".",
"stream",
":",
"if",
"not",
"(",
"(",
"self",
".",
"scheme",
"==",
"self",
".",
"initial_scheme",
"and",
"self",
".",
"host",
"==",
"self",
".",
"initial_netloc",
")",
"or",
"resp_data",
"[",
"'headers'",
"]",
"[",
"'connection'",
"]",
".",
"lower",
"(",
")",
"==",
"'close'",
")",
":",
"self",
".",
"sock",
".",
"_active",
"=",
"False",
"resp_data",
"[",
"'body'",
"]",
"=",
"StreamBody",
"(",
"h11_connection",
",",
"self",
".",
"sock",
",",
"resp_data",
"[",
"'headers'",
"]",
".",
"get",
"(",
"'content-encoding'",
",",
"None",
")",
",",
"resp_data",
"[",
"'encoding'",
"]",
")",
"self",
".",
"streaming",
"=",
"True",
"else",
":",
"while",
"True",
":",
"data",
"=",
"await",
"self",
".",
"_recv_event",
"(",
"h11_connection",
")",
"if",
"isinstance",
"(",
"data",
",",
"h11",
".",
"Data",
")",
":",
"resp_data",
"[",
"'body'",
"]",
"+=",
"data",
".",
"data",
"elif",
"isinstance",
"(",
"data",
",",
"h11",
".",
"EndOfMessage",
")",
":",
"break",
"else",
":",
"endof",
"=",
"await",
"self",
".",
"_recv_event",
"(",
"h11_connection",
")",
"assert",
"isinstance",
"(",
"endof",
",",
"h11",
".",
"EndOfMessage",
")",
"if",
"self",
".",
"streaming",
":",
"return",
"StreamResponse",
"(",
"*",
"*",
"resp_data",
")",
"return",
"Response",
"(",
"*",
"*",
"resp_data",
")"
]
| 38.315217 | 22.858696 |
async def packet_receiver(queue):
""" Asynchronous function that processes queue until None is posted in queue """
LOG.info("Entering packet_receiver")
while True:
packet = await queue.get()
if packet is None:
break
LOG.info("Framenumber %s", packet.framenumber)
LOG.info("Exiting packet_receiver") | [
"async",
"def",
"packet_receiver",
"(",
"queue",
")",
":",
"LOG",
".",
"info",
"(",
"\"Entering packet_receiver\"",
")",
"while",
"True",
":",
"packet",
"=",
"await",
"queue",
".",
"get",
"(",
")",
"if",
"packet",
"is",
"None",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Framenumber %s\"",
",",
"packet",
".",
"framenumber",
")",
"LOG",
".",
"info",
"(",
"\"Exiting packet_receiver\"",
")"
]
| 34.2 | 13 |
def normalize_namespace(namespace):
""" Given a namespace (e.g. '.' or 'mynamespace'),
returns it in normalized form. That is:
- always prefixed with a dot
- no trailing dots
- any double dots are converted to single dot (..my..namespace => .my.namespace)
- one or more dots (e.g. '.', '..', '...') are converted to '.' (Global namespace)
"""
namespace = (DOT + DOT.join(RE_DOTS.split(namespace))).rstrip(DOT) + DOT
return namespace | [
"def",
"normalize_namespace",
"(",
"namespace",
")",
":",
"namespace",
"=",
"(",
"DOT",
"+",
"DOT",
".",
"join",
"(",
"RE_DOTS",
".",
"split",
"(",
"namespace",
")",
")",
")",
".",
"rstrip",
"(",
"DOT",
")",
"+",
"DOT",
"return",
"namespace"
]
| 47.5 | 18 |
def set_if_empty(self, param, default):
""" Set the parameter to the default if it doesn't exist """
if not self.has(param):
self.set(param, default) | [
"def",
"set_if_empty",
"(",
"self",
",",
"param",
",",
"default",
")",
":",
"if",
"not",
"self",
".",
"has",
"(",
"param",
")",
":",
"self",
".",
"set",
"(",
"param",
",",
"default",
")"
]
| 43.5 | 3.5 |
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info | [
"def",
"FixedOffset",
"(",
"offset",
",",
"_tzinfos",
"=",
"{",
"}",
")",
":",
"if",
"offset",
"==",
"0",
":",
"return",
"UTC",
"info",
"=",
"_tzinfos",
".",
"get",
"(",
"offset",
")",
"if",
"info",
"is",
"None",
":",
"# We haven't seen this one before. we need to save it.",
"# Use setdefault to avoid a race condition and make sure we have",
"# only one",
"info",
"=",
"_tzinfos",
".",
"setdefault",
"(",
"offset",
",",
"_FixedOffset",
"(",
"offset",
")",
")",
"return",
"info"
]
| 27.523077 | 20.215385 |
def _insert_f_additive(s):
"""i.B:.-+u.M:.-O:.-' => i.f.B:.-+u.f.M:.-O:.-'"""
subst, attr, mode = s
assert isinstance(mode, NullScript)
if isinstance(subst, AdditiveScript):
subst = AdditiveScript([_insert_attr_f(_s) for _s in subst])
else:
subst = _insert_attr_f(subst)
return m(subst ,attr) | [
"def",
"_insert_f_additive",
"(",
"s",
")",
":",
"subst",
",",
"attr",
",",
"mode",
"=",
"s",
"assert",
"isinstance",
"(",
"mode",
",",
"NullScript",
")",
"if",
"isinstance",
"(",
"subst",
",",
"AdditiveScript",
")",
":",
"subst",
"=",
"AdditiveScript",
"(",
"[",
"_insert_attr_f",
"(",
"_s",
")",
"for",
"_s",
"in",
"subst",
"]",
")",
"else",
":",
"subst",
"=",
"_insert_attr_f",
"(",
"subst",
")",
"return",
"m",
"(",
"subst",
",",
"attr",
")"
]
| 29.454545 | 17.090909 |
def async_chain(chain, group=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None):
"""
enqueues a chain of tasks
the chain must be in the format [(func,(args),{kwargs}),(func,(args),{kwargs})]
"""
if not group:
group = uuid()[1]
args = ()
kwargs = {}
task = chain.pop(0)
if type(task) is not tuple:
task = (task,)
if len(task) > 1:
args = task[1]
if len(task) > 2:
kwargs = task[2]
kwargs['chain'] = chain
kwargs['group'] = group
kwargs['cached'] = cached
kwargs['sync'] = sync
kwargs['broker'] = broker or get_broker()
async_task(task[0], *args, **kwargs)
return group | [
"def",
"async_chain",
"(",
"chain",
",",
"group",
"=",
"None",
",",
"cached",
"=",
"Conf",
".",
"CACHED",
",",
"sync",
"=",
"Conf",
".",
"SYNC",
",",
"broker",
"=",
"None",
")",
":",
"if",
"not",
"group",
":",
"group",
"=",
"uuid",
"(",
")",
"[",
"1",
"]",
"args",
"=",
"(",
")",
"kwargs",
"=",
"{",
"}",
"task",
"=",
"chain",
".",
"pop",
"(",
"0",
")",
"if",
"type",
"(",
"task",
")",
"is",
"not",
"tuple",
":",
"task",
"=",
"(",
"task",
",",
")",
"if",
"len",
"(",
"task",
")",
">",
"1",
":",
"args",
"=",
"task",
"[",
"1",
"]",
"if",
"len",
"(",
"task",
")",
">",
"2",
":",
"kwargs",
"=",
"task",
"[",
"2",
"]",
"kwargs",
"[",
"'chain'",
"]",
"=",
"chain",
"kwargs",
"[",
"'group'",
"]",
"=",
"group",
"kwargs",
"[",
"'cached'",
"]",
"=",
"cached",
"kwargs",
"[",
"'sync'",
"]",
"=",
"sync",
"kwargs",
"[",
"'broker'",
"]",
"=",
"broker",
"or",
"get_broker",
"(",
")",
"async_task",
"(",
"task",
"[",
"0",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"group"
]
| 28.391304 | 16.73913 |
def encode_field(self, field, value):
"""Encode a python field value to a JSON value.
Args:
field: A ProtoRPC field instance.
value: A python value supported by field.
Returns:
A JSON serializable value appropriate for field.
"""
if isinstance(field, messages.BytesField):
if field.repeated:
value = [base64.b64encode(byte) for byte in value]
else:
value = base64.b64encode(value)
elif isinstance(field, message_types.DateTimeField):
# DateTimeField stores its data as a RFC 3339 compliant string.
if field.repeated:
value = [i.isoformat() for i in value]
else:
value = value.isoformat()
return value | [
"def",
"encode_field",
"(",
"self",
",",
"field",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"messages",
".",
"BytesField",
")",
":",
"if",
"field",
".",
"repeated",
":",
"value",
"=",
"[",
"base64",
".",
"b64encode",
"(",
"byte",
")",
"for",
"byte",
"in",
"value",
"]",
"else",
":",
"value",
"=",
"base64",
".",
"b64encode",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"field",
",",
"message_types",
".",
"DateTimeField",
")",
":",
"# DateTimeField stores its data as a RFC 3339 compliant string.",
"if",
"field",
".",
"repeated",
":",
"value",
"=",
"[",
"i",
".",
"isoformat",
"(",
")",
"for",
"i",
"in",
"value",
"]",
"else",
":",
"value",
"=",
"value",
".",
"isoformat",
"(",
")",
"return",
"value"
]
| 35.954545 | 16.590909 |
def _set_interface_ospfv3_conf(self, v, load=False):
"""
Setter method for interface_ospfv3_conf, mapped from YANG variable /rbridge_id/interface/ve/ipv6/interface_ospfv3_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_ospfv3_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_ospfv3_conf() directly.
YANG Description: Open Shortest Path First version 3 (OSPFv3)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_ospfv3_conf.interface_ospfv3_conf, is_container='container', presence=False, yang_name="interface-ospfv3-conf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First version 3 (OSPFv3)', u'alt-name': u'ospf', u'cli-incomplete-no': None, u'callpoint': u'Ospfv3VeInterfaceConfig', u'cli-incomplete-command': None, u'sort-priority': u'113', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_ospfv3_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_ospfv3_conf.interface_ospfv3_conf, is_container='container', presence=False, yang_name="interface-ospfv3-conf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First version 3 (OSPFv3)', u'alt-name': u'ospf', u'cli-incomplete-no': None, u'callpoint': u'Ospfv3VeInterfaceConfig', u'cli-incomplete-command': None, u'sort-priority': u'113', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""",
})
self.__interface_ospfv3_conf = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_interface_ospfv3_conf",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"interface_ospfv3_conf",
".",
"interface_ospfv3_conf",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"interface-ospfv3-conf\"",
",",
"rest_name",
"=",
"\"ospf\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Open Shortest Path First version 3 (OSPFv3)'",
",",
"u'alt-name'",
":",
"u'ospf'",
",",
"u'cli-incomplete-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'Ospfv3VeInterfaceConfig'",
",",
"u'cli-incomplete-command'",
":",
"None",
",",
"u'sort-priority'",
":",
"u'113'",
",",
"u'display-when'",
":",
"u'/vcsmode/vcs-mode = \"true\"'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-ospfv3'",
",",
"defining_module",
"=",
"'brocade-ospfv3'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"interface_ospfv3_conf must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=interface_ospfv3_conf.interface_ospfv3_conf, is_container='container', presence=False, yang_name=\"interface-ospfv3-conf\", rest_name=\"ospf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First version 3 (OSPFv3)', u'alt-name': u'ospf', u'cli-incomplete-no': None, u'callpoint': u'Ospfv3VeInterfaceConfig', u'cli-incomplete-command': None, u'sort-priority': u'113', u'display-when': u'/vcsmode/vcs-mode = \"true\"'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__interface_ospfv3_conf",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| 93.458333 | 45.958333 |
def make_python_identifier(string, namespace=None, reserved_words=None,
convert='drop', handle='force'):
"""
Takes an arbitrary string and creates a valid Python identifier.
If the input string is in the namespace, return its value.
If the python identifier created is already in the namespace,
but the input string is not (ie, two similar strings resolve to
the same python identifier)
or if the identifier is a reserved word in the reserved_words
list, or is a python default reserved word,
adds _1, or if _1 is in the namespace, _2, etc.
Parameters
----------
string : <basestring>
The text to be converted into a valid python identifier
namespace : <dictionary>
Map of existing translations into python safe identifiers.
This is to ensure that two strings are not translated into
the same python identifier
reserved_words : <list of strings>
List of words that are reserved (because they have other meanings
in this particular program, such as also being the names of
libraries, etc.
convert : <string>
Tells the function what to do with characters that are not
valid in python identifiers
- 'hex' implies that they will be converted to their hexidecimal
representation. This is handy if you have variables that
have a lot of reserved characters, or you don't want the
name to be dependent on when things were added to the
namespace
- 'drop' implies that they will just be dropped altogether
handle : <string>
Tells the function how to deal with namespace conflicts
- 'force' will create a representation which is not in conflict
by appending _n to the resulting variable where n is
the lowest number necessary to avoid a conflict
- 'throw' will raise an exception
Returns
-------
identifier : <string>
A vaild python identifier based on the input string
namespace : <dictionary>
An updated map of the translations of words to python identifiers,
including the passed in 'string'.
Examples
--------
>>> make_python_identifier('Capital')
('capital', {'Capital': 'capital'})
>>> make_python_identifier('multiple words')
('multiple_words', {'multiple words': 'multiple_words'})
>>> make_python_identifier('multiple spaces')
('multiple_spaces', {'multiple spaces': 'multiple_spaces'})
When the name is a python keyword, add '_1' to differentiate it
>>> make_python_identifier('for')
('for_1', {'for': 'for_1'})
Remove leading and trailing whitespace
>>> make_python_identifier(' whitespace ')
('whitespace', {' whitespace ': 'whitespace'})
Remove most special characters outright:
>>> make_python_identifier('H@t tr!ck')
('ht_trck', {'H@t tr!ck': 'ht_trck'})
Replace special characters with their hex representations
>>> make_python_identifier('H@t tr!ck', convert='hex')
('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'})
remove leading digits
>>> make_python_identifier('123abc')
('abc', {'123abc': 'abc'})
already in namespace
>>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'})
('variable', {'Variable$': 'variable'})
namespace conflicts
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'})
('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'})
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable',
>>> 'Variable%': 'variable_1'})
('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'})
throw exception instead
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw')
Traceback (most recent call last):
...
NameError: variable already exists in namespace or is a reserved word
References
----------
Identifiers must follow the convention outlined here:
https://docs.python.org/2/reference/lexical_analysis.html#identifiers
"""
if namespace is None:
namespace = dict()
if reserved_words is None:
reserved_words = list()
if string in namespace:
return namespace[string], namespace
# create a working copy (and make it lowercase, while we're at it)
s = string.lower()
# remove leading and trailing whitespace
s = s.strip()
# Make spaces into underscores
s = re.sub('[\\s\\t\\n]+', '_', s)
if convert == 'hex':
# Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language),
# \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final)
# and \p{n} designates all numbers. We allow any of these to be present in the regex.
s = ''.join([c.encode("hex") if re.findall('[^\p{l}\p{m}\p{n}_]', c) else c for c in s])
elif convert == 'drop':
# Remove invalid characters
s = re.sub('[^\p{l}\p{m}\p{n}_]', '', s)
# Remove leading characters until we find a letter or underscore. Only letters can be leading characters.
s = re.sub('^[^\p{l}_]+', '', s)
# Check that the string is not a python identifier
while (s in keyword.kwlist or
s in namespace.values() or
s in reserved_words):
if handle == 'throw':
raise NameError(s + ' already exists in namespace or is a reserved word')
if handle == 'force':
if re.match(".*?_\d+$", s):
i = re.match(".*?_(\d+)$", s).groups()[0]
s = s.strip('_' + i) + '_' + str(int(i) + 1)
else:
s += '_1'
namespace[string] = s
return s, namespace | [
"def",
"make_python_identifier",
"(",
"string",
",",
"namespace",
"=",
"None",
",",
"reserved_words",
"=",
"None",
",",
"convert",
"=",
"'drop'",
",",
"handle",
"=",
"'force'",
")",
":",
"if",
"namespace",
"is",
"None",
":",
"namespace",
"=",
"dict",
"(",
")",
"if",
"reserved_words",
"is",
"None",
":",
"reserved_words",
"=",
"list",
"(",
")",
"if",
"string",
"in",
"namespace",
":",
"return",
"namespace",
"[",
"string",
"]",
",",
"namespace",
"# create a working copy (and make it lowercase, while we're at it)",
"s",
"=",
"string",
".",
"lower",
"(",
")",
"# remove leading and trailing whitespace",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"# Make spaces into underscores",
"s",
"=",
"re",
".",
"sub",
"(",
"'[\\\\s\\\\t\\\\n]+'",
",",
"'_'",
",",
"s",
")",
"if",
"convert",
"==",
"'hex'",
":",
"# Convert invalid characters to hex. Note: \\p{l} designates all Unicode letter characters (any language),",
"# \\p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final)",
"# and \\p{n} designates all numbers. We allow any of these to be present in the regex.",
"s",
"=",
"''",
".",
"join",
"(",
"[",
"c",
".",
"encode",
"(",
"\"hex\"",
")",
"if",
"re",
".",
"findall",
"(",
"'[^\\p{l}\\p{m}\\p{n}_]'",
",",
"c",
")",
"else",
"c",
"for",
"c",
"in",
"s",
"]",
")",
"elif",
"convert",
"==",
"'drop'",
":",
"# Remove invalid characters",
"s",
"=",
"re",
".",
"sub",
"(",
"'[^\\p{l}\\p{m}\\p{n}_]'",
",",
"''",
",",
"s",
")",
"# Remove leading characters until we find a letter or underscore. Only letters can be leading characters.",
"s",
"=",
"re",
".",
"sub",
"(",
"'^[^\\p{l}_]+'",
",",
"''",
",",
"s",
")",
"# Check that the string is not a python identifier",
"while",
"(",
"s",
"in",
"keyword",
".",
"kwlist",
"or",
"s",
"in",
"namespace",
".",
"values",
"(",
")",
"or",
"s",
"in",
"reserved_words",
")",
":",
"if",
"handle",
"==",
"'throw'",
":",
"raise",
"NameError",
"(",
"s",
"+",
"' already exists in namespace or is a reserved word'",
")",
"if",
"handle",
"==",
"'force'",
":",
"if",
"re",
".",
"match",
"(",
"\".*?_\\d+$\"",
",",
"s",
")",
":",
"i",
"=",
"re",
".",
"match",
"(",
"\".*?_(\\d+)$\"",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"s",
"=",
"s",
".",
"strip",
"(",
"'_'",
"+",
"i",
")",
"+",
"'_'",
"+",
"str",
"(",
"int",
"(",
"i",
")",
"+",
"1",
")",
"else",
":",
"s",
"+=",
"'_1'",
"namespace",
"[",
"string",
"]",
"=",
"s",
"return",
"s",
",",
"namespace"
]
| 37.818182 | 23.818182 |
def get_actual_start_time(self):
"""Gets the time this assessment was started.
return: (osid.calendaring.DateTime) - the start time
raise: IllegalState - ``has_started()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
if not self.has_started():
raise errors.IllegalState('this assessment has not yet started')
if self._my_map['actualStartTime'] is None:
raise errors.IllegalState('this assessment has not yet been started by the taker')
else:
start_time = self._my_map['actualStartTime']
return DateTime(year=start_time.year,
month=start_time.month,
day=start_time.day,
hour=start_time.hour,
minute=start_time.minute,
second=start_time.second,
microsecond=start_time.microsecond) | [
"def",
"get_actual_start_time",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"has_started",
"(",
")",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'this assessment has not yet started'",
")",
"if",
"self",
".",
"_my_map",
"[",
"'actualStartTime'",
"]",
"is",
"None",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'this assessment has not yet been started by the taker'",
")",
"else",
":",
"start_time",
"=",
"self",
".",
"_my_map",
"[",
"'actualStartTime'",
"]",
"return",
"DateTime",
"(",
"year",
"=",
"start_time",
".",
"year",
",",
"month",
"=",
"start_time",
".",
"month",
",",
"day",
"=",
"start_time",
".",
"day",
",",
"hour",
"=",
"start_time",
".",
"hour",
",",
"minute",
"=",
"start_time",
".",
"minute",
",",
"second",
"=",
"start_time",
".",
"second",
",",
"microsecond",
"=",
"start_time",
".",
"microsecond",
")"
]
| 46.333333 | 18.619048 |
def _val(var, is_percent=False):
"""
Tries to determine the appropriate value of a particular variable that is
passed in. If the value is supposed to be a percentage, a whole integer
will be sought after and then turned into a floating point number between
0 and 1. If the value is supposed to be an integer, the variable is cast
into an integer.
"""
try:
if is_percent:
var = float(int(var.strip('%')) / 100.0)
else:
var = int(var)
except ValueError:
raise ValueError('invalid watermark parameter: ' + var)
return var | [
"def",
"_val",
"(",
"var",
",",
"is_percent",
"=",
"False",
")",
":",
"try",
":",
"if",
"is_percent",
":",
"var",
"=",
"float",
"(",
"int",
"(",
"var",
".",
"strip",
"(",
"'%'",
")",
")",
"/",
"100.0",
")",
"else",
":",
"var",
"=",
"int",
"(",
"var",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'invalid watermark parameter: '",
"+",
"var",
")",
"return",
"var"
]
| 34.882353 | 22.647059 |
def cf_number_to_number(value):
"""
Converts a CFNumber object to a python float or integer
:param value:
The CFNumber object
:return:
A python number (float or integer)
"""
type_ = CoreFoundation.CFNumberGetType(_cast_pointer_p(value))
c_type = {
1: c_byte, # kCFNumberSInt8Type
2: ctypes.c_short, # kCFNumberSInt16Type
3: ctypes.c_int32, # kCFNumberSInt32Type
4: ctypes.c_int64, # kCFNumberSInt64Type
5: ctypes.c_float, # kCFNumberFloat32Type
6: ctypes.c_double, # kCFNumberFloat64Type
7: c_byte, # kCFNumberCharType
8: ctypes.c_short, # kCFNumberShortType
9: ctypes.c_int, # kCFNumberIntType
10: c_long, # kCFNumberLongType
11: ctypes.c_longlong, # kCFNumberLongLongType
12: ctypes.c_float, # kCFNumberFloatType
13: ctypes.c_double, # kCFNumberDoubleType
14: c_long, # kCFNumberCFIndexType
15: ctypes.c_int, # kCFNumberNSIntegerType
16: ctypes.c_double, # kCFNumberCGFloatType
}[type_]
output = c_type(0)
CoreFoundation.CFNumberGetValue(_cast_pointer_p(value), type_, byref(output))
return output.value | [
"def",
"cf_number_to_number",
"(",
"value",
")",
":",
"type_",
"=",
"CoreFoundation",
".",
"CFNumberGetType",
"(",
"_cast_pointer_p",
"(",
"value",
")",
")",
"c_type",
"=",
"{",
"1",
":",
"c_byte",
",",
"# kCFNumberSInt8Type",
"2",
":",
"ctypes",
".",
"c_short",
",",
"# kCFNumberSInt16Type",
"3",
":",
"ctypes",
".",
"c_int32",
",",
"# kCFNumberSInt32Type",
"4",
":",
"ctypes",
".",
"c_int64",
",",
"# kCFNumberSInt64Type",
"5",
":",
"ctypes",
".",
"c_float",
",",
"# kCFNumberFloat32Type",
"6",
":",
"ctypes",
".",
"c_double",
",",
"# kCFNumberFloat64Type",
"7",
":",
"c_byte",
",",
"# kCFNumberCharType",
"8",
":",
"ctypes",
".",
"c_short",
",",
"# kCFNumberShortType",
"9",
":",
"ctypes",
".",
"c_int",
",",
"# kCFNumberIntType",
"10",
":",
"c_long",
",",
"# kCFNumberLongType",
"11",
":",
"ctypes",
".",
"c_longlong",
",",
"# kCFNumberLongLongType",
"12",
":",
"ctypes",
".",
"c_float",
",",
"# kCFNumberFloatType",
"13",
":",
"ctypes",
".",
"c_double",
",",
"# kCFNumberDoubleType",
"14",
":",
"c_long",
",",
"# kCFNumberCFIndexType",
"15",
":",
"ctypes",
".",
"c_int",
",",
"# kCFNumberNSIntegerType",
"16",
":",
"ctypes",
".",
"c_double",
",",
"# kCFNumberCGFloatType",
"}",
"[",
"type_",
"]",
"output",
"=",
"c_type",
"(",
"0",
")",
"CoreFoundation",
".",
"CFNumberGetValue",
"(",
"_cast_pointer_p",
"(",
"value",
")",
",",
"type_",
",",
"byref",
"(",
"output",
")",
")",
"return",
"output",
".",
"value"
]
| 41.909091 | 19.060606 |
def text_color(background, dark_color=rgb_min, light_color=rgb_max):
"""
Given a background color in the form of an RGB 3-tuple, returns the color the text should be (defaulting to white
and black) for best readability. The light (white) and dark (black) defaults can be overridden to return preferred
values.
:param background:
:param dark_color:
:param light_color:
:return:
"""
max_y = rgb_to_yiq(rgb_max)[0]
return light_color if rgb_to_yiq(background)[0] <= max_y / 2 else dark_color | [
"def",
"text_color",
"(",
"background",
",",
"dark_color",
"=",
"rgb_min",
",",
"light_color",
"=",
"rgb_max",
")",
":",
"max_y",
"=",
"rgb_to_yiq",
"(",
"rgb_max",
")",
"[",
"0",
"]",
"return",
"light_color",
"if",
"rgb_to_yiq",
"(",
"background",
")",
"[",
"0",
"]",
"<=",
"max_y",
"/",
"2",
"else",
"dark_color"
]
| 40.076923 | 29.153846 |
def report(config, output, use, output_dir, accounts,
field, no_default_fields, tags, region, debug, verbose,
policy, policy_tags, format, resource, cache_path):
"""report on a cross account policy execution."""
accounts_config, custodian_config, executor = init(
config, use, debug, verbose, accounts, tags, policy,
resource=resource, policy_tags=policy_tags)
resource_types = set()
for p in custodian_config.get('policies'):
resource_types.add(p['resource'])
if len(resource_types) > 1:
raise ValueError("can only report on one resource type at a time")
elif not len(custodian_config['policies']) > 0:
raise ValueError("no matching policies found")
records = []
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config.get('accounts', ()):
for r in resolve_regions(region or a.get('regions', ())):
futures[w.submit(
report_account,
a, r,
custodian_config,
output_dir,
cache_path,
debug)] = (a, r)
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
if debug:
raise
log.warning(
"Error running policy in %s @ %s exception: %s",
a['name'], r, f.exception())
records.extend(f.result())
log.debug(
"Found %d records across %d accounts and %d policies",
len(records), len(accounts_config['accounts']),
len(custodian_config['policies']))
if format == 'json':
dumps(records, output, indent=2)
return
prefix_fields = OrderedDict(
(('Account', 'account'), ('Region', 'region'), ('Policy', 'policy')))
config = Config.empty()
factory = resource_registry.get(list(resource_types)[0])
formatter = Formatter(
factory.resource_type,
extra_fields=field,
include_default_fields=not(no_default_fields),
include_region=False,
include_policy=False,
fields=prefix_fields)
rows = formatter.to_csv(records, unique=False)
writer = UnicodeWriter(output, formatter.headers())
writer.writerow(formatter.headers())
writer.writerows(rows) | [
"def",
"report",
"(",
"config",
",",
"output",
",",
"use",
",",
"output_dir",
",",
"accounts",
",",
"field",
",",
"no_default_fields",
",",
"tags",
",",
"region",
",",
"debug",
",",
"verbose",
",",
"policy",
",",
"policy_tags",
",",
"format",
",",
"resource",
",",
"cache_path",
")",
":",
"accounts_config",
",",
"custodian_config",
",",
"executor",
"=",
"init",
"(",
"config",
",",
"use",
",",
"debug",
",",
"verbose",
",",
"accounts",
",",
"tags",
",",
"policy",
",",
"resource",
"=",
"resource",
",",
"policy_tags",
"=",
"policy_tags",
")",
"resource_types",
"=",
"set",
"(",
")",
"for",
"p",
"in",
"custodian_config",
".",
"get",
"(",
"'policies'",
")",
":",
"resource_types",
".",
"add",
"(",
"p",
"[",
"'resource'",
"]",
")",
"if",
"len",
"(",
"resource_types",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"can only report on one resource type at a time\"",
")",
"elif",
"not",
"len",
"(",
"custodian_config",
"[",
"'policies'",
"]",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"no matching policies found\"",
")",
"records",
"=",
"[",
"]",
"with",
"executor",
"(",
"max_workers",
"=",
"WORKER_COUNT",
")",
"as",
"w",
":",
"futures",
"=",
"{",
"}",
"for",
"a",
"in",
"accounts_config",
".",
"get",
"(",
"'accounts'",
",",
"(",
")",
")",
":",
"for",
"r",
"in",
"resolve_regions",
"(",
"region",
"or",
"a",
".",
"get",
"(",
"'regions'",
",",
"(",
")",
")",
")",
":",
"futures",
"[",
"w",
".",
"submit",
"(",
"report_account",
",",
"a",
",",
"r",
",",
"custodian_config",
",",
"output_dir",
",",
"cache_path",
",",
"debug",
")",
"]",
"=",
"(",
"a",
",",
"r",
")",
"for",
"f",
"in",
"as_completed",
"(",
"futures",
")",
":",
"a",
",",
"r",
"=",
"futures",
"[",
"f",
"]",
"if",
"f",
".",
"exception",
"(",
")",
":",
"if",
"debug",
":",
"raise",
"log",
".",
"warning",
"(",
"\"Error running policy in %s @ %s exception: %s\"",
",",
"a",
"[",
"'name'",
"]",
",",
"r",
",",
"f",
".",
"exception",
"(",
")",
")",
"records",
".",
"extend",
"(",
"f",
".",
"result",
"(",
")",
")",
"log",
".",
"debug",
"(",
"\"Found %d records across %d accounts and %d policies\"",
",",
"len",
"(",
"records",
")",
",",
"len",
"(",
"accounts_config",
"[",
"'accounts'",
"]",
")",
",",
"len",
"(",
"custodian_config",
"[",
"'policies'",
"]",
")",
")",
"if",
"format",
"==",
"'json'",
":",
"dumps",
"(",
"records",
",",
"output",
",",
"indent",
"=",
"2",
")",
"return",
"prefix_fields",
"=",
"OrderedDict",
"(",
"(",
"(",
"'Account'",
",",
"'account'",
")",
",",
"(",
"'Region'",
",",
"'region'",
")",
",",
"(",
"'Policy'",
",",
"'policy'",
")",
")",
")",
"config",
"=",
"Config",
".",
"empty",
"(",
")",
"factory",
"=",
"resource_registry",
".",
"get",
"(",
"list",
"(",
"resource_types",
")",
"[",
"0",
"]",
")",
"formatter",
"=",
"Formatter",
"(",
"factory",
".",
"resource_type",
",",
"extra_fields",
"=",
"field",
",",
"include_default_fields",
"=",
"not",
"(",
"no_default_fields",
")",
",",
"include_region",
"=",
"False",
",",
"include_policy",
"=",
"False",
",",
"fields",
"=",
"prefix_fields",
")",
"rows",
"=",
"formatter",
".",
"to_csv",
"(",
"records",
",",
"unique",
"=",
"False",
")",
"writer",
"=",
"UnicodeWriter",
"(",
"output",
",",
"formatter",
".",
"headers",
"(",
")",
")",
"writer",
".",
"writerow",
"(",
"formatter",
".",
"headers",
"(",
")",
")",
"writer",
".",
"writerows",
"(",
"rows",
")"
]
| 35.830769 | 16.523077 |
def get_rpms(self):
"""
Build a list of installed RPMs in the format required for the
metadata.
"""
tags = [
'NAME',
'VERSION',
'RELEASE',
'ARCH',
'EPOCH',
'SIGMD5',
'SIGPGP:pgpsig',
'SIGGPG:pgpsig',
]
cmd = "/bin/rpm " + rpm_qf_args(tags)
try:
# py3
(status, output) = subprocess.getstatusoutput(cmd)
except AttributeError:
# py2
with open('/dev/null', 'r+') as devnull:
p = subprocess.Popen(cmd,
shell=True,
stdin=devnull,
stdout=subprocess.PIPE,
stderr=devnull)
(stdout, stderr) = p.communicate()
status = p.wait()
output = stdout.decode()
if status != 0:
self.log.debug("%s: stderr output: %s", cmd, stderr)
raise RuntimeError("%s: exit code %s" % (cmd, status))
return parse_rpm_output(output.splitlines(), tags) | [
"def",
"get_rpms",
"(",
"self",
")",
":",
"tags",
"=",
"[",
"'NAME'",
",",
"'VERSION'",
",",
"'RELEASE'",
",",
"'ARCH'",
",",
"'EPOCH'",
",",
"'SIGMD5'",
",",
"'SIGPGP:pgpsig'",
",",
"'SIGGPG:pgpsig'",
",",
"]",
"cmd",
"=",
"\"/bin/rpm \"",
"+",
"rpm_qf_args",
"(",
"tags",
")",
"try",
":",
"# py3",
"(",
"status",
",",
"output",
")",
"=",
"subprocess",
".",
"getstatusoutput",
"(",
"cmd",
")",
"except",
"AttributeError",
":",
"# py2",
"with",
"open",
"(",
"'/dev/null'",
",",
"'r+'",
")",
"as",
"devnull",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdin",
"=",
"devnull",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"devnull",
")",
"(",
"stdout",
",",
"stderr",
")",
"=",
"p",
".",
"communicate",
"(",
")",
"status",
"=",
"p",
".",
"wait",
"(",
")",
"output",
"=",
"stdout",
".",
"decode",
"(",
")",
"if",
"status",
"!=",
"0",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"%s: stderr output: %s\"",
",",
"cmd",
",",
"stderr",
")",
"raise",
"RuntimeError",
"(",
"\"%s: exit code %s\"",
"%",
"(",
"cmd",
",",
"status",
")",
")",
"return",
"parse_rpm_output",
"(",
"output",
".",
"splitlines",
"(",
")",
",",
"tags",
")"
]
| 29.538462 | 19.128205 |
def set_write_bit(fn):
# type: (str) -> None
"""
Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None
"""
fn = fs_encode(fn)
if not os.path.exists(fn):
return
file_stat = os.stat(fn).st_mode
os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if os.name == "nt":
from ._winconsole import get_current_user
user_sid = get_current_user()
icacls_exe = _find_icacls_exe() or "icacls"
from .misc import run
if user_sid:
_, err = run([icacls_exe, "/grant", "{0}:WD".format(user_sid), "''{0}''".format(fn), "/T", "/C", "/Q"])
if not err:
return
if not os.path.isdir(fn):
for path in [fn, os.path.dirname(fn)]:
try:
os.chflags(path, 0)
except AttributeError:
pass
return None
for root, dirs, files in os.walk(fn, topdown=False):
for dir_ in [os.path.join(root, d) for d in dirs]:
set_write_bit(dir_)
for file_ in [os.path.join(root, f) for f in files]:
set_write_bit(file_) | [
"def",
"set_write_bit",
"(",
"fn",
")",
":",
"# type: (str) -> None",
"fn",
"=",
"fs_encode",
"(",
"fn",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fn",
")",
":",
"return",
"file_stat",
"=",
"os",
".",
"stat",
"(",
"fn",
")",
".",
"st_mode",
"os",
".",
"chmod",
"(",
"fn",
",",
"file_stat",
"|",
"stat",
".",
"S_IRWXU",
"|",
"stat",
".",
"S_IRWXG",
"|",
"stat",
".",
"S_IRWXO",
")",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
":",
"from",
".",
"_winconsole",
"import",
"get_current_user",
"user_sid",
"=",
"get_current_user",
"(",
")",
"icacls_exe",
"=",
"_find_icacls_exe",
"(",
")",
"or",
"\"icacls\"",
"from",
".",
"misc",
"import",
"run",
"if",
"user_sid",
":",
"_",
",",
"err",
"=",
"run",
"(",
"[",
"icacls_exe",
",",
"\"/grant\"",
",",
"\"{0}:WD\"",
".",
"format",
"(",
"user_sid",
")",
",",
"\"''{0}''\"",
".",
"format",
"(",
"fn",
")",
",",
"\"/T\"",
",",
"\"/C\"",
",",
"\"/Q\"",
"]",
")",
"if",
"not",
"err",
":",
"return",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"fn",
")",
":",
"for",
"path",
"in",
"[",
"fn",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
"]",
":",
"try",
":",
"os",
".",
"chflags",
"(",
"path",
",",
"0",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"None",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"fn",
",",
"topdown",
"=",
"False",
")",
":",
"for",
"dir_",
"in",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
"for",
"d",
"in",
"dirs",
"]",
":",
"set_write_bit",
"(",
"dir_",
")",
"for",
"file_",
"in",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"for",
"f",
"in",
"files",
"]",
":",
"set_write_bit",
"(",
"file_",
")"
]
| 32.052632 | 18.789474 |
def create_deep_linking_urls(self, url_params):
"""
Bulk Creates Deep Linking URLs
See the URL https://dev.branch.io/references/http_api/#bulk-creating-deep-linking-urls
:param url_params: Array of values returned from "create_deep_link_url(..., skip_api_call=True)"
:return: The response
"""
url = "/v1/url/bulk/%s" % self.branch_key
method = "POST"
# Checks params
self._check_param(value=url_params, type=list, sub_type=dict, optional=False)
return self.make_api_call(method, url, json_params=url_params) | [
"def",
"create_deep_linking_urls",
"(",
"self",
",",
"url_params",
")",
":",
"url",
"=",
"\"/v1/url/bulk/%s\"",
"%",
"self",
".",
"branch_key",
"method",
"=",
"\"POST\"",
"# Checks params",
"self",
".",
"_check_param",
"(",
"value",
"=",
"url_params",
",",
"type",
"=",
"list",
",",
"sub_type",
"=",
"dict",
",",
"optional",
"=",
"False",
")",
"return",
"self",
".",
"make_api_call",
"(",
"method",
",",
"url",
",",
"json_params",
"=",
"url_params",
")"
]
| 34.352941 | 26.823529 |
def check_bounding_rect(rect_pos):
"""Ensure the rect spec is valid."""
if not isinstance(rect_pos, Iterable):
raise ValueError('rectangle spect must be a tuple of floats '
'specifying (left, right, width, height)')
left, bottom, width, height = rect_pos
for val, name in zip((left, bottom, width, height),
('left', 'bottom', 'width', 'height')):
if val < 0.0 or val > 1.0:
raise ValueError("{}'s value must be >=0 and <= 1.0. "
"It is now {}".format(name, val))
if left + width > 1.0:
print('rect would extend beyond the width of figure/axis by {}'.format(left + width - 1.0))
if bottom + height > 1.0:
print('rect would extend beyond the height of figure/axis by {}'.format(
bottom + height - 1.0))
return rect_pos | [
"def",
"check_bounding_rect",
"(",
"rect_pos",
")",
":",
"if",
"not",
"isinstance",
"(",
"rect_pos",
",",
"Iterable",
")",
":",
"raise",
"ValueError",
"(",
"'rectangle spect must be a tuple of floats '",
"'specifying (left, right, width, height)'",
")",
"left",
",",
"bottom",
",",
"width",
",",
"height",
"=",
"rect_pos",
"for",
"val",
",",
"name",
"in",
"zip",
"(",
"(",
"left",
",",
"bottom",
",",
"width",
",",
"height",
")",
",",
"(",
"'left'",
",",
"'bottom'",
",",
"'width'",
",",
"'height'",
")",
")",
":",
"if",
"val",
"<",
"0.0",
"or",
"val",
">",
"1.0",
":",
"raise",
"ValueError",
"(",
"\"{}'s value must be >=0 and <= 1.0. \"",
"\"It is now {}\"",
".",
"format",
"(",
"name",
",",
"val",
")",
")",
"if",
"left",
"+",
"width",
">",
"1.0",
":",
"print",
"(",
"'rect would extend beyond the width of figure/axis by {}'",
".",
"format",
"(",
"left",
"+",
"width",
"-",
"1.0",
")",
")",
"if",
"bottom",
"+",
"height",
">",
"1.0",
":",
"print",
"(",
"'rect would extend beyond the height of figure/axis by {}'",
".",
"format",
"(",
"bottom",
"+",
"height",
"-",
"1.0",
")",
")",
"return",
"rect_pos"
]
| 39.227273 | 23.136364 |
def skip(self, count):
'''
:param count: number of cases to skip
:return: number of cases skipped
'''
self._get_ready()
skipped = 0
for i in range(count):
if self.mutate():
skipped += 1
else:
break
return skipped | [
"def",
"skip",
"(",
"self",
",",
"count",
")",
":",
"self",
".",
"_get_ready",
"(",
")",
"skipped",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"if",
"self",
".",
"mutate",
"(",
")",
":",
"skipped",
"+=",
"1",
"else",
":",
"break",
"return",
"skipped"
]
| 24.615385 | 16.153846 |
def NewPathSpec(cls, type_indicator, **kwargs):
"""Creates a new path specification for the specific type indicator.
Args:
type_indicator (str): type indicator.
kwargs (dict): keyword arguments depending on the path specification.
Returns:
PathSpec: path specification.
Raises:
KeyError: if path specification is not registered.
"""
if type_indicator not in cls._path_spec_types:
raise KeyError(
'Path specification type: {0:s} not set.'.format(type_indicator))
# An empty parent will cause parentless path specifications to raise
# so we conveniently remove it here.
if 'parent' in kwargs and kwargs['parent'] is None:
del kwargs['parent']
path_spec_type = cls._path_spec_types[type_indicator]
return path_spec_type(**kwargs) | [
"def",
"NewPathSpec",
"(",
"cls",
",",
"type_indicator",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"type_indicator",
"not",
"in",
"cls",
".",
"_path_spec_types",
":",
"raise",
"KeyError",
"(",
"'Path specification type: {0:s} not set.'",
".",
"format",
"(",
"type_indicator",
")",
")",
"# An empty parent will cause parentless path specifications to raise",
"# so we conveniently remove it here.",
"if",
"'parent'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'parent'",
"]",
"is",
"None",
":",
"del",
"kwargs",
"[",
"'parent'",
"]",
"path_spec_type",
"=",
"cls",
".",
"_path_spec_types",
"[",
"type_indicator",
"]",
"return",
"path_spec_type",
"(",
"*",
"*",
"kwargs",
")"
]
| 33.25 | 20.875 |
def get_definition(self):
"""Checks variable and executable code elements based on the current
context for a code element whose name matches context.exact_match
perfectly.
"""
#Check the variables first, then the functions.
match = self._bracket_exact_var(self.context.exact_match)
if match is None:
match = self._bracket_exact_exec(self.context.exact_match)
return match | [
"def",
"get_definition",
"(",
"self",
")",
":",
"#Check the variables first, then the functions.",
"match",
"=",
"self",
".",
"_bracket_exact_var",
"(",
"self",
".",
"context",
".",
"exact_match",
")",
"if",
"match",
"is",
"None",
":",
"match",
"=",
"self",
".",
"_bracket_exact_exec",
"(",
"self",
".",
"context",
".",
"exact_match",
")",
"return",
"match"
]
| 39.727273 | 19.454545 |
def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total) | [
"def",
"conv_precip_frac",
"(",
"precip_largescale",
",",
"precip_convective",
")",
":",
"total",
"=",
"total_precip",
"(",
"precip_largescale",
",",
"precip_convective",
")",
"# Mask using xarray's `where` method to prevent divide-by-zero.",
"return",
"precip_convective",
"/",
"total",
".",
"where",
"(",
"total",
")"
]
| 34.75 | 21.125 |
def fetchall_triples_xrefs(prefix):
"""
fetch all xrefs for a prefix, e.g. CHEBI
"""
logging.info("fetching xrefs for: "+prefix)
query = """
SELECT * WHERE {{
?c <{p}> ?x
}}
""".format(p=prefixmap[prefix])
bindings = run_sparql(query)
rows = [(r['c']['value'], r['x']['value']) for r in bindings]
return rows | [
"def",
"fetchall_triples_xrefs",
"(",
"prefix",
")",
":",
"logging",
".",
"info",
"(",
"\"fetching xrefs for: \"",
"+",
"prefix",
")",
"query",
"=",
"\"\"\"\n SELECT * WHERE {{\n ?c <{p}> ?x\n }}\n \"\"\"",
".",
"format",
"(",
"p",
"=",
"prefixmap",
"[",
"prefix",
"]",
")",
"bindings",
"=",
"run_sparql",
"(",
"query",
")",
"rows",
"=",
"[",
"(",
"r",
"[",
"'c'",
"]",
"[",
"'value'",
"]",
",",
"r",
"[",
"'x'",
"]",
"[",
"'value'",
"]",
")",
"for",
"r",
"in",
"bindings",
"]",
"return",
"rows"
]
| 26.461538 | 11.692308 |
def file_variations(filename, extensions):
"""Create a variation of file names.
Generate a list of variations on a filename by replacing the extension with
a the provided list.
:param filename: The original file name to use as a base.
:param extensions: A list of file extensions to generate new filenames.
"""
(label, ext) = splitext(filename)
return [label + extention for extention in extensions] | [
"def",
"file_variations",
"(",
"filename",
",",
"extensions",
")",
":",
"(",
"label",
",",
"ext",
")",
"=",
"splitext",
"(",
"filename",
")",
"return",
"[",
"label",
"+",
"extention",
"for",
"extention",
"in",
"extensions",
"]"
]
| 35.25 | 21.166667 |
def visit_and_update(self, visitor_fn):
"""Create an updated version (if needed) of BinaryComposition via the visitor pattern."""
new_left = self.left.visit_and_update(visitor_fn)
new_right = self.right.visit_and_update(visitor_fn)
if new_left is not self.left or new_right is not self.right:
return visitor_fn(BinaryComposition(self.operator, new_left, new_right))
else:
return visitor_fn(self) | [
"def",
"visit_and_update",
"(",
"self",
",",
"visitor_fn",
")",
":",
"new_left",
"=",
"self",
".",
"left",
".",
"visit_and_update",
"(",
"visitor_fn",
")",
"new_right",
"=",
"self",
".",
"right",
".",
"visit_and_update",
"(",
"visitor_fn",
")",
"if",
"new_left",
"is",
"not",
"self",
".",
"left",
"or",
"new_right",
"is",
"not",
"self",
".",
"right",
":",
"return",
"visitor_fn",
"(",
"BinaryComposition",
"(",
"self",
".",
"operator",
",",
"new_left",
",",
"new_right",
")",
")",
"else",
":",
"return",
"visitor_fn",
"(",
"self",
")"
]
| 50.222222 | 20.111111 |
def makevFunc(self,solution):
'''
Make the beginning-of-period value function (unconditional on the shock).
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
vFuncNow : ValueFunc
A representation of the value function for this period, defined over
normalized market resources m: v = vFuncNow(m).
'''
# Compute expected value and marginal value on a grid of market resources,
# accounting for all of the discrete preference shocks
PrefShkCount = self.PrefShkVals.size
mNrm_temp = self.mNrmMinNow + self.aXtraGrid
vNrmNow = np.zeros_like(mNrm_temp)
vPnow = np.zeros_like(mNrm_temp)
for j in range(PrefShkCount):
this_shock = self.PrefShkVals[j]
this_prob = self.PrefShkPrbs[j]
cNrmNow = solution.cFunc(mNrm_temp,this_shock*np.ones_like(mNrm_temp))
aNrmNow = mNrm_temp - cNrmNow
vNrmNow += this_prob*(this_shock*self.u(cNrmNow) + self.EndOfPrdvFunc(aNrmNow))
vPnow += this_prob*this_shock*self.uP(cNrmNow)
# Construct the beginning-of-period value function
vNvrs = self.uinv(vNrmNow) # value transformed through inverse utility
vNvrsP = vPnow*self.uinvP(vNrmNow)
mNrm_temp = np.insert(mNrm_temp,0,self.mNrmMinNow)
vNvrs = np.insert(vNvrs,0,0.0)
vNvrsP = np.insert(vNvrsP,0,self.MPCmaxEff**(-self.CRRA/(1.0-self.CRRA)))
MPCminNvrs = self.MPCminNow**(-self.CRRA/(1.0-self.CRRA))
vNvrsFuncNow = CubicInterp(mNrm_temp,vNvrs,vNvrsP,MPCminNvrs*self.hNrmNow,MPCminNvrs)
vFuncNow = ValueFunc(vNvrsFuncNow,self.CRRA)
return vFuncNow | [
"def",
"makevFunc",
"(",
"self",
",",
"solution",
")",
":",
"# Compute expected value and marginal value on a grid of market resources,",
"# accounting for all of the discrete preference shocks",
"PrefShkCount",
"=",
"self",
".",
"PrefShkVals",
".",
"size",
"mNrm_temp",
"=",
"self",
".",
"mNrmMinNow",
"+",
"self",
".",
"aXtraGrid",
"vNrmNow",
"=",
"np",
".",
"zeros_like",
"(",
"mNrm_temp",
")",
"vPnow",
"=",
"np",
".",
"zeros_like",
"(",
"mNrm_temp",
")",
"for",
"j",
"in",
"range",
"(",
"PrefShkCount",
")",
":",
"this_shock",
"=",
"self",
".",
"PrefShkVals",
"[",
"j",
"]",
"this_prob",
"=",
"self",
".",
"PrefShkPrbs",
"[",
"j",
"]",
"cNrmNow",
"=",
"solution",
".",
"cFunc",
"(",
"mNrm_temp",
",",
"this_shock",
"*",
"np",
".",
"ones_like",
"(",
"mNrm_temp",
")",
")",
"aNrmNow",
"=",
"mNrm_temp",
"-",
"cNrmNow",
"vNrmNow",
"+=",
"this_prob",
"*",
"(",
"this_shock",
"*",
"self",
".",
"u",
"(",
"cNrmNow",
")",
"+",
"self",
".",
"EndOfPrdvFunc",
"(",
"aNrmNow",
")",
")",
"vPnow",
"+=",
"this_prob",
"*",
"this_shock",
"*",
"self",
".",
"uP",
"(",
"cNrmNow",
")",
"# Construct the beginning-of-period value function",
"vNvrs",
"=",
"self",
".",
"uinv",
"(",
"vNrmNow",
")",
"# value transformed through inverse utility",
"vNvrsP",
"=",
"vPnow",
"*",
"self",
".",
"uinvP",
"(",
"vNrmNow",
")",
"mNrm_temp",
"=",
"np",
".",
"insert",
"(",
"mNrm_temp",
",",
"0",
",",
"self",
".",
"mNrmMinNow",
")",
"vNvrs",
"=",
"np",
".",
"insert",
"(",
"vNvrs",
",",
"0",
",",
"0.0",
")",
"vNvrsP",
"=",
"np",
".",
"insert",
"(",
"vNvrsP",
",",
"0",
",",
"self",
".",
"MPCmaxEff",
"**",
"(",
"-",
"self",
".",
"CRRA",
"/",
"(",
"1.0",
"-",
"self",
".",
"CRRA",
")",
")",
")",
"MPCminNvrs",
"=",
"self",
".",
"MPCminNow",
"**",
"(",
"-",
"self",
".",
"CRRA",
"/",
"(",
"1.0",
"-",
"self",
".",
"CRRA",
")",
")",
"vNvrsFuncNow",
"=",
"CubicInterp",
"(",
"mNrm_temp",
",",
"vNvrs",
",",
"vNvrsP",
",",
"MPCminNvrs",
"*",
"self",
".",
"hNrmNow",
",",
"MPCminNvrs",
")",
"vFuncNow",
"=",
"ValueFunc",
"(",
"vNvrsFuncNow",
",",
"self",
".",
"CRRA",
")",
"return",
"vFuncNow"
]
| 47.075 | 23.425 |
def get_config(self):
"""Return configurations of LinearAnnealedPolicy
# Returns
Dict of config
"""
config = super(LinearAnnealedPolicy, self).get_config()
config['attr'] = self.attr
config['value_max'] = self.value_max
config['value_min'] = self.value_min
config['value_test'] = self.value_test
config['nb_steps'] = self.nb_steps
config['inner_policy'] = get_object_config(self.inner_policy)
return config | [
"def",
"get_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"LinearAnnealedPolicy",
",",
"self",
")",
".",
"get_config",
"(",
")",
"config",
"[",
"'attr'",
"]",
"=",
"self",
".",
"attr",
"config",
"[",
"'value_max'",
"]",
"=",
"self",
".",
"value_max",
"config",
"[",
"'value_min'",
"]",
"=",
"self",
".",
"value_min",
"config",
"[",
"'value_test'",
"]",
"=",
"self",
".",
"value_test",
"config",
"[",
"'nb_steps'",
"]",
"=",
"self",
".",
"nb_steps",
"config",
"[",
"'inner_policy'",
"]",
"=",
"get_object_config",
"(",
"self",
".",
"inner_policy",
")",
"return",
"config"
]
| 35.285714 | 13.5 |
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json | [
"def",
"system_info",
"(",
"conf",
",",
"args",
")",
":",
"src",
"=",
"conf",
".",
"config",
"[",
"'instances'",
"]",
"[",
"args",
".",
"src",
"]",
"src_url",
"=",
"api",
".",
"build_system_url",
"(",
"build_instance_url",
"(",
"src",
")",
")",
"src_auth",
"=",
"tuple",
"(",
"[",
"conf",
".",
"creds",
"[",
"'instances'",
"]",
"[",
"args",
".",
"src",
"]",
"[",
"'user'",
"]",
",",
"conf",
".",
"creds",
"[",
"'instances'",
"]",
"[",
"args",
".",
"src",
"]",
"[",
"'pass'",
"]",
"]",
")",
"verify_ssl",
"=",
"src",
".",
"get",
"(",
"'verify_ssl'",
",",
"True",
")",
"sysinfo_json",
"=",
"api",
".",
"system_info",
"(",
"src_url",
",",
"src_auth",
",",
"verify_ssl",
")",
"return",
"sysinfo_json"
]
| 48.111111 | 14.444444 |
def _start_date_of_year(year: int) -> datetime.date:
"""
Return start date of the year using MMWR week rules
"""
jan_one = datetime.date(year, 1, 1)
diff = 7 * (jan_one.isoweekday() > 3) - jan_one.isoweekday()
return jan_one + datetime.timedelta(days=diff) | [
"def",
"_start_date_of_year",
"(",
"year",
":",
"int",
")",
"->",
"datetime",
".",
"date",
":",
"jan_one",
"=",
"datetime",
".",
"date",
"(",
"year",
",",
"1",
",",
"1",
")",
"diff",
"=",
"7",
"*",
"(",
"jan_one",
".",
"isoweekday",
"(",
")",
">",
"3",
")",
"-",
"jan_one",
".",
"isoweekday",
"(",
")",
"return",
"jan_one",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"diff",
")"
]
| 30.444444 | 15.777778 |
def start(self, key):
"""
Start a concurrent operation.
This gets the concurrency limiter for the given key (creating it if
necessary) and starts a concurrent operation on it.
"""
start_d = self._get_limiter(key).start()
self._cleanup_limiter(key)
return start_d | [
"def",
"start",
"(",
"self",
",",
"key",
")",
":",
"start_d",
"=",
"self",
".",
"_get_limiter",
"(",
"key",
")",
".",
"start",
"(",
")",
"self",
".",
"_cleanup_limiter",
"(",
"key",
")",
"return",
"start_d"
]
| 31.8 | 14.8 |
def poke(library, session, address, width, data):
"""Writes an 8, 16 or 32-bit value from the specified address.
Corresponds to viPoke* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param width: Number of bits to read.
:param data: Data to be written to the bus.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
if width == 8:
return poke_8(library, session, address, data)
elif width == 16:
return poke_16(library, session, address, data)
elif width == 32:
return poke_32(library, session, address, data)
raise ValueError('%s is not a valid size. Valid values are 8, 16 or 32' % width) | [
"def",
"poke",
"(",
"library",
",",
"session",
",",
"address",
",",
"width",
",",
"data",
")",
":",
"if",
"width",
"==",
"8",
":",
"return",
"poke_8",
"(",
"library",
",",
"session",
",",
"address",
",",
"data",
")",
"elif",
"width",
"==",
"16",
":",
"return",
"poke_16",
"(",
"library",
",",
"session",
",",
"address",
",",
"data",
")",
"elif",
"width",
"==",
"32",
":",
"return",
"poke_32",
"(",
"library",
",",
"session",
",",
"address",
",",
"data",
")",
"raise",
"ValueError",
"(",
"'%s is not a valid size. Valid values are 8, 16 or 32'",
"%",
"width",
")"
]
| 38 | 18.318182 |
def analyze(problem, Y, M=4, print_to_console=False, seed=None):
"""Performs the Fourier Amplitude Sensitivity Test (FAST) on model outputs.
Returns a dictionary with keys 'S1' and 'ST', where each entry is a list of
size D (the number of parameters) containing the indices in the same order
as the parameter file.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
M : int
The interference parameter, i.e., the number of harmonics to sum in
the Fourier series decomposition (default 4)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Cukier, R. I., C. M. Fortuin, K. E. Shuler, A. G. Petschek, and J. H.
Schaibly (1973). "Study of the sensitivity of coupled reaction
systems to uncertainties in rate coefficients." J. Chem. Phys.,
59(8):3873-3878, doi:10.1063/1.1680571.
.. [2] Saltelli, A., S. Tarantola, and K. P.-S. Chan (1999). "A
Quantitative Model-Independent Method for Global Sensitivity
Analysis of Model Output." Technometrics, 41(1):39-56,
doi:10.1080/00401706.1999.10485594.
Examples
--------
>>> X = fast_sampler.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = fast.analyze(problem, Y, print_to_console=False)
"""
if seed:
np.random.seed(seed)
D = problem['num_vars']
if Y.size % (D) == 0:
N = int(Y.size / D)
else:
print("""
Error: Number of samples in model output file must be a multiple of D,
where D is the number of parameters in your parameter file.
""")
exit()
# Recreate the vector omega used in the sampling
omega = np.zeros([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Calculate and Output the First and Total Order Values
if print_to_console:
print("Parameter First Total")
Si = ResultDict((k, [None] * D) for k in ['S1', 'ST'])
Si['names'] = problem['names']
for i in range(D):
l = np.arange(i * N, (i + 1) * N)
Si['S1'][i] = compute_first_order(Y[l], N, M, omega[0])
Si['ST'][i] = compute_total_order(Y[l], N, omega[0])
if print_to_console:
print("%s %f %f" %
(problem['names'][i], Si['S1'][i], Si['ST'][i]))
return Si | [
"def",
"analyze",
"(",
"problem",
",",
"Y",
",",
"M",
"=",
"4",
",",
"print_to_console",
"=",
"False",
",",
"seed",
"=",
"None",
")",
":",
"if",
"seed",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"D",
"=",
"problem",
"[",
"'num_vars'",
"]",
"if",
"Y",
".",
"size",
"%",
"(",
"D",
")",
"==",
"0",
":",
"N",
"=",
"int",
"(",
"Y",
".",
"size",
"/",
"D",
")",
"else",
":",
"print",
"(",
"\"\"\"\r\n Error: Number of samples in model output file must be a multiple of D,\r\n where D is the number of parameters in your parameter file.\r\n \"\"\"",
")",
"exit",
"(",
")",
"# Recreate the vector omega used in the sampling\r",
"omega",
"=",
"np",
".",
"zeros",
"(",
"[",
"D",
"]",
")",
"omega",
"[",
"0",
"]",
"=",
"math",
".",
"floor",
"(",
"(",
"N",
"-",
"1",
")",
"/",
"(",
"2",
"*",
"M",
")",
")",
"m",
"=",
"math",
".",
"floor",
"(",
"omega",
"[",
"0",
"]",
"/",
"(",
"2",
"*",
"M",
")",
")",
"if",
"m",
">=",
"(",
"D",
"-",
"1",
")",
":",
"omega",
"[",
"1",
":",
"]",
"=",
"np",
".",
"floor",
"(",
"np",
".",
"linspace",
"(",
"1",
",",
"m",
",",
"D",
"-",
"1",
")",
")",
"else",
":",
"omega",
"[",
"1",
":",
"]",
"=",
"np",
".",
"arange",
"(",
"D",
"-",
"1",
")",
"%",
"m",
"+",
"1",
"# Calculate and Output the First and Total Order Values\r",
"if",
"print_to_console",
":",
"print",
"(",
"\"Parameter First Total\"",
")",
"Si",
"=",
"ResultDict",
"(",
"(",
"k",
",",
"[",
"None",
"]",
"*",
"D",
")",
"for",
"k",
"in",
"[",
"'S1'",
",",
"'ST'",
"]",
")",
"Si",
"[",
"'names'",
"]",
"=",
"problem",
"[",
"'names'",
"]",
"for",
"i",
"in",
"range",
"(",
"D",
")",
":",
"l",
"=",
"np",
".",
"arange",
"(",
"i",
"*",
"N",
",",
"(",
"i",
"+",
"1",
")",
"*",
"N",
")",
"Si",
"[",
"'S1'",
"]",
"[",
"i",
"]",
"=",
"compute_first_order",
"(",
"Y",
"[",
"l",
"]",
",",
"N",
",",
"M",
",",
"omega",
"[",
"0",
"]",
")",
"Si",
"[",
"'ST'",
"]",
"[",
"i",
"]",
"=",
"compute_total_order",
"(",
"Y",
"[",
"l",
"]",
",",
"N",
",",
"omega",
"[",
"0",
"]",
")",
"if",
"print_to_console",
":",
"print",
"(",
"\"%s %f %f\"",
"%",
"(",
"problem",
"[",
"'names'",
"]",
"[",
"i",
"]",
",",
"Si",
"[",
"'S1'",
"]",
"[",
"i",
"]",
",",
"Si",
"[",
"'ST'",
"]",
"[",
"i",
"]",
")",
")",
"return",
"Si"
]
| 34.75 | 21.894737 |
def chown(self, uid, gid):
"""
Change the owner (C{uid}) and group (C{gid}) of this file. As with
python's C{os.chown} function, you must pass both arguments, so if you
only want to change one, use L{stat} first to retrieve the current
owner and group.
@param uid: new owner's uid
@type uid: int
@param gid: new group id
@type gid: int
"""
self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self.sftp._request(CMD_FSETSTAT, self.handle, attr) | [
"def",
"chown",
"(",
"self",
",",
"uid",
",",
"gid",
")",
":",
"self",
".",
"sftp",
".",
"_log",
"(",
"DEBUG",
",",
"'chown(%s, %r, %r)'",
"%",
"(",
"hexlify",
"(",
"self",
".",
"handle",
")",
",",
"uid",
",",
"gid",
")",
")",
"attr",
"=",
"SFTPAttributes",
"(",
")",
"attr",
".",
"st_uid",
",",
"attr",
".",
"st_gid",
"=",
"uid",
",",
"gid",
"self",
".",
"sftp",
".",
"_request",
"(",
"CMD_FSETSTAT",
",",
"self",
".",
"handle",
",",
"attr",
")"
]
| 39.25 | 18.875 |
def hello_user(api_client):
"""Use an authorized client to fetch and print profile information.
Parameters
api_client (UberRidesClient)
An UberRidesClient with OAuth 2.0 credentials.
"""
try:
response = api_client.get_user_profile()
except (ClientError, ServerError) as error:
fail_print(error)
return
else:
profile = response.json
first_name = profile.get('first_name')
last_name = profile.get('last_name')
email = profile.get('email')
message = 'Hello, {} {}. Successfully granted access token to {}.'
message = message.format(first_name, last_name, email)
success_print(message)
success_print(profile)
success_print('---')
response = api_client.get_home_address()
address = response.json
success_print(address)
success_print('---')
response = api_client.get_user_activity()
history = response.json
success_print(history) | [
"def",
"hello_user",
"(",
"api_client",
")",
":",
"try",
":",
"response",
"=",
"api_client",
".",
"get_user_profile",
"(",
")",
"except",
"(",
"ClientError",
",",
"ServerError",
")",
"as",
"error",
":",
"fail_print",
"(",
"error",
")",
"return",
"else",
":",
"profile",
"=",
"response",
".",
"json",
"first_name",
"=",
"profile",
".",
"get",
"(",
"'first_name'",
")",
"last_name",
"=",
"profile",
".",
"get",
"(",
"'last_name'",
")",
"email",
"=",
"profile",
".",
"get",
"(",
"'email'",
")",
"message",
"=",
"'Hello, {} {}. Successfully granted access token to {}.'",
"message",
"=",
"message",
".",
"format",
"(",
"first_name",
",",
"last_name",
",",
"email",
")",
"success_print",
"(",
"message",
")",
"success_print",
"(",
"profile",
")",
"success_print",
"(",
"'---'",
")",
"response",
"=",
"api_client",
".",
"get_home_address",
"(",
")",
"address",
"=",
"response",
".",
"json",
"success_print",
"(",
"address",
")",
"success_print",
"(",
"'---'",
")",
"response",
"=",
"api_client",
".",
"get_user_activity",
"(",
")",
"history",
"=",
"response",
".",
"json",
"success_print",
"(",
"history",
")"
]
| 29.176471 | 17.588235 |
def get(self, service_id, insert_defaults=None):
"""
Get a service.
Args:
service_id (str): The ID of the service.
insert_defaults (boolean): If true, default values will be merged
into the output.
Returns:
:py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
If the service does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
:py:class:`docker.errors.InvalidVersion`
If one of the arguments is not supported with the current
API version.
"""
return self.prepare_model(
self.client.api.inspect_service(service_id, insert_defaults)
) | [
"def",
"get",
"(",
"self",
",",
"service_id",
",",
"insert_defaults",
"=",
"None",
")",
":",
"return",
"self",
".",
"prepare_model",
"(",
"self",
".",
"client",
".",
"api",
".",
"inspect_service",
"(",
"service_id",
",",
"insert_defaults",
")",
")"
]
| 33.125 | 18.125 |
def _reduced_call(self, **params_to_override):
"""
Simplified version of PatternGenerator's __call__ method.
"""
p=param.ParamOverrides(self,params_to_override)
fn_result = self.function(p)
self._apply_mask(p,fn_result)
result = p.scale*fn_result+p.offset
return result | [
"def",
"_reduced_call",
"(",
"self",
",",
"*",
"*",
"params_to_override",
")",
":",
"p",
"=",
"param",
".",
"ParamOverrides",
"(",
"self",
",",
"params_to_override",
")",
"fn_result",
"=",
"self",
".",
"function",
"(",
"p",
")",
"self",
".",
"_apply_mask",
"(",
"p",
",",
"fn_result",
")",
"result",
"=",
"p",
".",
"scale",
"*",
"fn_result",
"+",
"p",
".",
"offset",
"return",
"result"
]
| 32.5 | 11.5 |
def frommembers(cls, members):
"""Series from iterable of member iterables."""
return cls.frombitsets(map(cls.BitSet.frommembers, members)) | [
"def",
"frommembers",
"(",
"cls",
",",
"members",
")",
":",
"return",
"cls",
".",
"frombitsets",
"(",
"map",
"(",
"cls",
".",
"BitSet",
".",
"frommembers",
",",
"members",
")",
")"
]
| 51 | 12.666667 |
def _extract_cell(args, cell_body):
"""Implements the BigQuery extract magic used to extract query or table data to GCS.
The supported syntax is:
%bq extract <args>
Args:
args: the arguments following '%bigquery extract'.
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get('parameters')
if args['table']:
table = google.datalab.bigquery.Query.resolve_parameters(args['table'], parameters)
source = _get_table(table)
if not source:
raise Exception('Could not find table %s' % table)
csv_delimiter = args['delimiter'] if args['format'] == 'csv' else None
path = google.datalab.bigquery.Query.resolve_parameters(args['path'], parameters)
job = source.extract(path, format=args['format'], csv_delimiter=csv_delimiter,
csv_header=args['header'], compress=args['compress'])
elif args['query'] or args['view']:
source_name = args['view'] or args['query']
source = google.datalab.utils.commands.get_notebook_item(source_name)
if not source:
raise Exception('Could not find ' +
('view ' + args['view'] if args['view'] else 'query ' + args['query']))
query = source if args['query'] else bigquery.Query.from_view(source)
query_params = get_query_parameters(args, cell_body) if args['query'] else None
output_options = QueryOutput.file(path=args['path'], format=args['format'],
csv_delimiter=args['delimiter'],
csv_header=args['header'], compress=args['compress'],
use_cache=not args['nocache'])
context = google.datalab.utils._utils._construct_context_for_args(args)
job = query.execute(output_options, context=context, query_params=query_params)
else:
raise Exception('A query, table, or view is needed to extract')
if job.failed:
raise Exception('Extract failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Extract completed with errors: %s' % str(job.errors))
return job.result() | [
"def",
"_extract_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"env",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"config",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"env",
",",
"False",
")",
"or",
"{",
"}",
"parameters",
"=",
"config",
".",
"get",
"(",
"'parameters'",
")",
"if",
"args",
"[",
"'table'",
"]",
":",
"table",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Query",
".",
"resolve_parameters",
"(",
"args",
"[",
"'table'",
"]",
",",
"parameters",
")",
"source",
"=",
"_get_table",
"(",
"table",
")",
"if",
"not",
"source",
":",
"raise",
"Exception",
"(",
"'Could not find table %s'",
"%",
"table",
")",
"csv_delimiter",
"=",
"args",
"[",
"'delimiter'",
"]",
"if",
"args",
"[",
"'format'",
"]",
"==",
"'csv'",
"else",
"None",
"path",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Query",
".",
"resolve_parameters",
"(",
"args",
"[",
"'path'",
"]",
",",
"parameters",
")",
"job",
"=",
"source",
".",
"extract",
"(",
"path",
",",
"format",
"=",
"args",
"[",
"'format'",
"]",
",",
"csv_delimiter",
"=",
"csv_delimiter",
",",
"csv_header",
"=",
"args",
"[",
"'header'",
"]",
",",
"compress",
"=",
"args",
"[",
"'compress'",
"]",
")",
"elif",
"args",
"[",
"'query'",
"]",
"or",
"args",
"[",
"'view'",
"]",
":",
"source_name",
"=",
"args",
"[",
"'view'",
"]",
"or",
"args",
"[",
"'query'",
"]",
"source",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_notebook_item",
"(",
"source_name",
")",
"if",
"not",
"source",
":",
"raise",
"Exception",
"(",
"'Could not find '",
"+",
"(",
"'view '",
"+",
"args",
"[",
"'view'",
"]",
"if",
"args",
"[",
"'view'",
"]",
"else",
"'query '",
"+",
"args",
"[",
"'query'",
"]",
")",
")",
"query",
"=",
"source",
"if",
"args",
"[",
"'query'",
"]",
"else",
"bigquery",
".",
"Query",
".",
"from_view",
"(",
"source",
")",
"query_params",
"=",
"get_query_parameters",
"(",
"args",
",",
"cell_body",
")",
"if",
"args",
"[",
"'query'",
"]",
"else",
"None",
"output_options",
"=",
"QueryOutput",
".",
"file",
"(",
"path",
"=",
"args",
"[",
"'path'",
"]",
",",
"format",
"=",
"args",
"[",
"'format'",
"]",
",",
"csv_delimiter",
"=",
"args",
"[",
"'delimiter'",
"]",
",",
"csv_header",
"=",
"args",
"[",
"'header'",
"]",
",",
"compress",
"=",
"args",
"[",
"'compress'",
"]",
",",
"use_cache",
"=",
"not",
"args",
"[",
"'nocache'",
"]",
")",
"context",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"_utils",
".",
"_construct_context_for_args",
"(",
"args",
")",
"job",
"=",
"query",
".",
"execute",
"(",
"output_options",
",",
"context",
"=",
"context",
",",
"query_params",
"=",
"query_params",
")",
"else",
":",
"raise",
"Exception",
"(",
"'A query, table, or view is needed to extract'",
")",
"if",
"job",
".",
"failed",
":",
"raise",
"Exception",
"(",
"'Extract failed: %s'",
"%",
"str",
"(",
"job",
".",
"fatal_error",
")",
")",
"elif",
"job",
".",
"errors",
":",
"raise",
"Exception",
"(",
"'Extract completed with errors: %s'",
"%",
"str",
"(",
"job",
".",
"errors",
")",
")",
"return",
"job",
".",
"result",
"(",
")"
]
| 46.630435 | 27.478261 |
def pin_chat_message(self, chat_id, message_id, disable_notification=False):
"""
Use this method to pin a message in a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Returns True on success.
:param chat_id: Int or Str: Unique identifier for the target chat or username of the target channel
(in the format @channelusername)
:param message_id: Int: Identifier of a message to pin
:param disable_notification: Bool: Pass True, if it is not necessary to send a notification
to all group members about the new pinned message
:return:
"""
return apihelper.pin_chat_message(self.token, chat_id, message_id, disable_notification) | [
"def",
"pin_chat_message",
"(",
"self",
",",
"chat_id",
",",
"message_id",
",",
"disable_notification",
"=",
"False",
")",
":",
"return",
"apihelper",
".",
"pin_chat_message",
"(",
"self",
".",
"token",
",",
"chat_id",
",",
"message_id",
",",
"disable_notification",
")"
]
| 60.384615 | 29.769231 |
def is_namedtuple(val):
"""
Use Duck Typing to check if val is a named tuple. Checks that val is of type tuple and contains
the attribute _fields which is defined for named tuples.
:param val: value to check type of
:return: True if val is a namedtuple
"""
val_type = type(val)
bases = val_type.__bases__
if len(bases) != 1 or bases[0] != tuple:
return False
fields = getattr(val_type, '_fields', None)
return all(isinstance(n, str) for n in fields) | [
"def",
"is_namedtuple",
"(",
"val",
")",
":",
"val_type",
"=",
"type",
"(",
"val",
")",
"bases",
"=",
"val_type",
".",
"__bases__",
"if",
"len",
"(",
"bases",
")",
"!=",
"1",
"or",
"bases",
"[",
"0",
"]",
"!=",
"tuple",
":",
"return",
"False",
"fields",
"=",
"getattr",
"(",
"val_type",
",",
"'_fields'",
",",
"None",
")",
"return",
"all",
"(",
"isinstance",
"(",
"n",
",",
"str",
")",
"for",
"n",
"in",
"fields",
")"
]
| 37.615385 | 12.692308 |
def get_stack_info():
'''Capture locals, module name, filename, and line number from the
stacktrace to provide the source of the assertion error and
formatted note.
'''
stack = traceback.walk_stack(sys._getframe().f_back)
# We want locals from the test definition (which always begins
# with 'test_' in unittest), which will be at a different
# level in the stack depending on how many tests are in each
# test case, how many test cases there are, etc.
# The branch where we exhaust this loop is not covered
# because we always find a test.
for frame, _ in stack: # pragma: no branch
code = frame.f_code
if code.co_name.startswith('test_'):
return (frame.f_locals.copy(), frame.f_globals['__name__'],
code.co_filename, frame.f_lineno) | [
"def",
"get_stack_info",
"(",
")",
":",
"stack",
"=",
"traceback",
".",
"walk_stack",
"(",
"sys",
".",
"_getframe",
"(",
")",
".",
"f_back",
")",
"# We want locals from the test definition (which always begins",
"# with 'test_' in unittest), which will be at a different",
"# level in the stack depending on how many tests are in each",
"# test case, how many test cases there are, etc.",
"# The branch where we exhaust this loop is not covered",
"# because we always find a test.",
"for",
"frame",
",",
"_",
"in",
"stack",
":",
"# pragma: no branch",
"code",
"=",
"frame",
".",
"f_code",
"if",
"code",
".",
"co_name",
".",
"startswith",
"(",
"'test_'",
")",
":",
"return",
"(",
"frame",
".",
"f_locals",
".",
"copy",
"(",
")",
",",
"frame",
".",
"f_globals",
"[",
"'__name__'",
"]",
",",
"code",
".",
"co_filename",
",",
"frame",
".",
"f_lineno",
")"
]
| 42.894737 | 20.789474 |
def set_chime(self, sound, cycles=None):
"""
:param sound: a str, one of ["doorbell", "fur_elise", "doorbell_extended", "alert",
"william_tell", "rondo_alla_turca", "police_siren",
""evacuation", "beep_beep", "beep", "inactive"]
:param cycles: Undocumented seems to have no effect?
:return: nothing
"""
desired_state = {"activate_chime": sound}
if cycles is not None:
desired_state.update({"chime_cycles": cycles})
response = self.api_interface.set_device_state(self,
{"desired_state": desired_state})
self._update_state_from_response(response) | [
"def",
"set_chime",
"(",
"self",
",",
"sound",
",",
"cycles",
"=",
"None",
")",
":",
"desired_state",
"=",
"{",
"\"activate_chime\"",
":",
"sound",
"}",
"if",
"cycles",
"is",
"not",
"None",
":",
"desired_state",
".",
"update",
"(",
"{",
"\"chime_cycles\"",
":",
"cycles",
"}",
")",
"response",
"=",
"self",
".",
"api_interface",
".",
"set_device_state",
"(",
"self",
",",
"{",
"\"desired_state\"",
":",
"desired_state",
"}",
")",
"self",
".",
"_update_state_from_response",
"(",
"response",
")"
]
| 54.071429 | 21.5 |
def _create_mappings(self):
'Create the field type mapping.'
self.conn.indices.put_mapping(
index=self.index, doc_type=self.type,
timeout=60, request_timeout=60,
body={
self.type: {
'dynamic_templates': [{
'default_no_analyze_fc': {
'match': 'fc.*',
'mapping': {'index': 'no'},
},
}],
'_all': {
'enabled': False,
},
'_id': {
'index': 'not_analyzed', # allows range queries
},
'properties': self._get_index_mappings(),
},
})
# It is possible to create an index and quickly launch a request
# that will fail because the index hasn't been set up yet. Usually,
# you'll get a "no active shards available" error.
#
# Since index creation is a very rare operation (it only happens
# when the index doesn't already exist), we sit and wait for the
# cluster to become healthy.
self.conn.cluster.health(index=self.index, wait_for_status='yellow') | [
"def",
"_create_mappings",
"(",
"self",
")",
":",
"self",
".",
"conn",
".",
"indices",
".",
"put_mapping",
"(",
"index",
"=",
"self",
".",
"index",
",",
"doc_type",
"=",
"self",
".",
"type",
",",
"timeout",
"=",
"60",
",",
"request_timeout",
"=",
"60",
",",
"body",
"=",
"{",
"self",
".",
"type",
":",
"{",
"'dynamic_templates'",
":",
"[",
"{",
"'default_no_analyze_fc'",
":",
"{",
"'match'",
":",
"'fc.*'",
",",
"'mapping'",
":",
"{",
"'index'",
":",
"'no'",
"}",
",",
"}",
",",
"}",
"]",
",",
"'_all'",
":",
"{",
"'enabled'",
":",
"False",
",",
"}",
",",
"'_id'",
":",
"{",
"'index'",
":",
"'not_analyzed'",
",",
"# allows range queries",
"}",
",",
"'properties'",
":",
"self",
".",
"_get_index_mappings",
"(",
")",
",",
"}",
",",
"}",
")",
"# It is possible to create an index and quickly launch a request",
"# that will fail because the index hasn't been set up yet. Usually,",
"# you'll get a \"no active shards available\" error.",
"#",
"# Since index creation is a very rare operation (it only happens",
"# when the index doesn't already exist), we sit and wait for the",
"# cluster to become healthy.",
"self",
".",
"conn",
".",
"cluster",
".",
"health",
"(",
"index",
"=",
"self",
".",
"index",
",",
"wait_for_status",
"=",
"'yellow'",
")"
]
| 42.033333 | 16.833333 |
def invalidate_token(self, body, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html>`_
:arg body: The token to invalidate
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"DELETE", "/_security/oauth2/token", params=params, body=body
) | [
"def",
"invalidate_token",
"(",
"self",
",",
"body",
",",
"params",
"=",
"None",
")",
":",
"if",
"body",
"in",
"SKIP_IN_PATH",
":",
"raise",
"ValueError",
"(",
"\"Empty value passed for a required argument 'body'.\"",
")",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"DELETE\"",
",",
"\"/_security/oauth2/token\"",
",",
"params",
"=",
"params",
",",
"body",
"=",
"body",
")"
]
| 42.090909 | 21.727273 |
def read_line(self):
"""
Interrupted respecting reader for stdin.
Raises EOFError if the end of stream has been reached
"""
try:
line = self.inp.readline().strip()
except KeyboardInterrupt:
raise EOFError()
# i3status sends EOF, or an empty line
if not line:
raise EOFError()
return line | [
"def",
"read_line",
"(",
"self",
")",
":",
"try",
":",
"line",
"=",
"self",
".",
"inp",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"EOFError",
"(",
")",
"# i3status sends EOF, or an empty line",
"if",
"not",
"line",
":",
"raise",
"EOFError",
"(",
")",
"return",
"line"
]
| 23.9375 | 17.5625 |
def from_text(cls, text: str):
""" Construct an AnalysisGraph object from text, using Eidos to perform
machine reading. """
eidosProcessor = process_text(text)
return cls.from_statements(eidosProcessor.statements) | [
"def",
"from_text",
"(",
"cls",
",",
"text",
":",
"str",
")",
":",
"eidosProcessor",
"=",
"process_text",
"(",
"text",
")",
"return",
"cls",
".",
"from_statements",
"(",
"eidosProcessor",
".",
"statements",
")"
]
| 40.166667 | 12.333333 |
def write(self, presets_path):
"""Write this preset to disk in JSON notation.
:param presets_path: the directory where the preset will be
written.
"""
if self.builtin:
raise TypeError("Cannot write built-in preset")
# Make dictionaries of PresetDefaults values
odict = self.opts.dict()
pdict = {self.name: {DESC: self.desc, NOTE: self.note, OPTS: odict}}
if not os.path.exists(presets_path):
os.makedirs(presets_path, mode=0o755)
with open(os.path.join(presets_path, self.name), "w") as pfile:
json.dump(pdict, pfile) | [
"def",
"write",
"(",
"self",
",",
"presets_path",
")",
":",
"if",
"self",
".",
"builtin",
":",
"raise",
"TypeError",
"(",
"\"Cannot write built-in preset\"",
")",
"# Make dictionaries of PresetDefaults values",
"odict",
"=",
"self",
".",
"opts",
".",
"dict",
"(",
")",
"pdict",
"=",
"{",
"self",
".",
"name",
":",
"{",
"DESC",
":",
"self",
".",
"desc",
",",
"NOTE",
":",
"self",
".",
"note",
",",
"OPTS",
":",
"odict",
"}",
"}",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"presets_path",
")",
":",
"os",
".",
"makedirs",
"(",
"presets_path",
",",
"mode",
"=",
"0o755",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"presets_path",
",",
"self",
".",
"name",
")",
",",
"\"w\"",
")",
"as",
"pfile",
":",
"json",
".",
"dump",
"(",
"pdict",
",",
"pfile",
")"
]
| 36.055556 | 19 |
def get(self, id):
"""Get a single group by ID.
:param str id: a group ID
:return: a group
:rtype: :class:`~groupy.api.groups.Group`
"""
url = utils.urljoin(self.url, id)
response = self.session.get(url)
return Group(self, **response.data) | [
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"url",
"=",
"utils",
".",
"urljoin",
"(",
"self",
".",
"url",
",",
"id",
")",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"return",
"Group",
"(",
"self",
",",
"*",
"*",
"response",
".",
"data",
")"
]
| 29.5 | 9.8 |
async def _load_message_field(self, reader, msg, field):
"""
Loads message field from the reader. Field is defined by the message field specification.
Returns loaded value, supports field reference.
:param reader:
:param msg:
:param field:
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
await self.load_field(reader, ftype, params, eref(msg, fname)) | [
"async",
"def",
"_load_message_field",
"(",
"self",
",",
"reader",
",",
"msg",
",",
"field",
")",
":",
"fname",
",",
"ftype",
",",
"params",
"=",
"field",
"[",
"0",
"]",
",",
"field",
"[",
"1",
"]",
",",
"field",
"[",
"2",
":",
"]",
"await",
"self",
".",
"load_field",
"(",
"reader",
",",
"ftype",
",",
"params",
",",
"eref",
"(",
"msg",
",",
"fname",
")",
")"
]
| 36.5 | 21.666667 |
def add(self, properties):
"""
Add a faked Port resource.
Parameters:
properties (dict):
Resource properties.
Special handling and requirements for certain properties:
* 'element-id' will be auto-generated with a unique value across
all instances of this resource type, if not specified.
* 'element-uri' will be auto-generated based upon the element ID,
if not specified.
* 'class' will be auto-generated to 'network-port' or
'storage-port', if not specified.
This method also updates the 'network-port-uris' or
'storage-port-uris' property in the parent Adapter resource, by
adding the URI for the faked Port resource.
Returns:
:class:`zhmcclient_mock.FakedPort`: The faked Port resource.
"""
new_port = super(FakedPortManager, self).add(properties)
adapter = self.parent
if 'network-port-uris' in adapter.properties:
adapter.properties['network-port-uris'].append(new_port.uri)
if 'storage-port-uris' in adapter.properties:
adapter.properties['storage-port-uris'].append(new_port.uri)
return new_port | [
"def",
"add",
"(",
"self",
",",
"properties",
")",
":",
"new_port",
"=",
"super",
"(",
"FakedPortManager",
",",
"self",
")",
".",
"add",
"(",
"properties",
")",
"adapter",
"=",
"self",
".",
"parent",
"if",
"'network-port-uris'",
"in",
"adapter",
".",
"properties",
":",
"adapter",
".",
"properties",
"[",
"'network-port-uris'",
"]",
".",
"append",
"(",
"new_port",
".",
"uri",
")",
"if",
"'storage-port-uris'",
"in",
"adapter",
".",
"properties",
":",
"adapter",
".",
"properties",
"[",
"'storage-port-uris'",
"]",
".",
"append",
"(",
"new_port",
".",
"uri",
")",
"return",
"new_port"
]
| 38.71875 | 23.15625 |
def hacking_python3x_metaclass(logical_line, noqa):
r"""Check for metaclass to be Python 3.x compatible.
Okay: @six.add_metaclass(Meta)\nclass Foo(object):\n pass
Okay: @six.with_metaclass(Meta)\nclass Foo(object):\n pass
Okay: class Foo(object):\n '''docstring\n\n __metaclass__ = Meta\n'''
H236: class Foo(object):\n __metaclass__ = Meta
H236: class Foo(object):\n foo=bar\n __metaclass__ = Meta
H236: class Foo(object):\n '''docstr.'''\n __metaclass__ = Meta
H236: class Foo(object):\n __metaclass__ = \\\n Meta
Okay: class Foo(object):\n __metaclass__ = Meta # noqa
"""
if noqa:
return
split_line = logical_line.split()
if(len(split_line) > 2 and split_line[0] == '__metaclass__' and
split_line[1] == '='):
yield (logical_line.find('__metaclass__'),
"H236: Python 3.x incompatible __metaclass__, "
"use six.add_metaclass()") | [
"def",
"hacking_python3x_metaclass",
"(",
"logical_line",
",",
"noqa",
")",
":",
"if",
"noqa",
":",
"return",
"split_line",
"=",
"logical_line",
".",
"split",
"(",
")",
"if",
"(",
"len",
"(",
"split_line",
")",
">",
"2",
"and",
"split_line",
"[",
"0",
"]",
"==",
"'__metaclass__'",
"and",
"split_line",
"[",
"1",
"]",
"==",
"'='",
")",
":",
"yield",
"(",
"logical_line",
".",
"find",
"(",
"'__metaclass__'",
")",
",",
"\"H236: Python 3.x incompatible __metaclass__, \"",
"\"use six.add_metaclass()\"",
")"
]
| 47.8 | 19.45 |
def _expand_list(names):
""" Do a wildchar name expansion of object names in a list and return expanded list.
The items are expected to exist as this is used for copy sources or delete targets.
Currently we support wildchars in the key name only.
"""
if names is None:
names = []
elif isinstance(names, basestring):
names = [names]
results = [] # The expanded list.
items = {} # Cached contents of buckets; used for matching.
for name in names:
bucket, key = datalab.storage._bucket.parse_name(name)
results_len = len(results) # If we fail to add any we add name and let caller deal with it.
if bucket:
if not key:
# Just a bucket; add it.
results.append('gs://%s' % bucket)
elif datalab.storage.Item(bucket, key).exists():
results.append('gs://%s/%s' % (bucket, key))
else:
# Expand possible key values.
if bucket not in items and key[:1] == '*':
# We need the full list; cache a copy for efficiency.
items[bucket] = [item.metadata.name
for item in list(datalab.storage.Bucket(bucket).items())]
# If we have a cached copy use it
if bucket in items:
candidates = items[bucket]
# else we have no cached copy but can use prefix matching which is more efficient than
# getting the full contents.
else:
# Get the non-wildchar prefix.
match = re.search('\?|\*|\[', key)
prefix = key
if match:
prefix = key[0:match.start()]
candidates = [item.metadata.name
for item in datalab.storage.Bucket(bucket).items(prefix=prefix)]
for item in candidates:
if fnmatch.fnmatch(item, key):
results.append('gs://%s/%s' % (bucket, item))
# If we added no matches, add the original name and let caller deal with it.
if len(results) == results_len:
results.append(name)
return results | [
"def",
"_expand_list",
"(",
"names",
")",
":",
"if",
"names",
"is",
"None",
":",
"names",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"names",
",",
"basestring",
")",
":",
"names",
"=",
"[",
"names",
"]",
"results",
"=",
"[",
"]",
"# The expanded list.",
"items",
"=",
"{",
"}",
"# Cached contents of buckets; used for matching.",
"for",
"name",
"in",
"names",
":",
"bucket",
",",
"key",
"=",
"datalab",
".",
"storage",
".",
"_bucket",
".",
"parse_name",
"(",
"name",
")",
"results_len",
"=",
"len",
"(",
"results",
")",
"# If we fail to add any we add name and let caller deal with it.",
"if",
"bucket",
":",
"if",
"not",
"key",
":",
"# Just a bucket; add it.",
"results",
".",
"append",
"(",
"'gs://%s'",
"%",
"bucket",
")",
"elif",
"datalab",
".",
"storage",
".",
"Item",
"(",
"bucket",
",",
"key",
")",
".",
"exists",
"(",
")",
":",
"results",
".",
"append",
"(",
"'gs://%s/%s'",
"%",
"(",
"bucket",
",",
"key",
")",
")",
"else",
":",
"# Expand possible key values.",
"if",
"bucket",
"not",
"in",
"items",
"and",
"key",
"[",
":",
"1",
"]",
"==",
"'*'",
":",
"# We need the full list; cache a copy for efficiency.",
"items",
"[",
"bucket",
"]",
"=",
"[",
"item",
".",
"metadata",
".",
"name",
"for",
"item",
"in",
"list",
"(",
"datalab",
".",
"storage",
".",
"Bucket",
"(",
"bucket",
")",
".",
"items",
"(",
")",
")",
"]",
"# If we have a cached copy use it",
"if",
"bucket",
"in",
"items",
":",
"candidates",
"=",
"items",
"[",
"bucket",
"]",
"# else we have no cached copy but can use prefix matching which is more efficient than",
"# getting the full contents.",
"else",
":",
"# Get the non-wildchar prefix.",
"match",
"=",
"re",
".",
"search",
"(",
"'\\?|\\*|\\['",
",",
"key",
")",
"prefix",
"=",
"key",
"if",
"match",
":",
"prefix",
"=",
"key",
"[",
"0",
":",
"match",
".",
"start",
"(",
")",
"]",
"candidates",
"=",
"[",
"item",
".",
"metadata",
".",
"name",
"for",
"item",
"in",
"datalab",
".",
"storage",
".",
"Bucket",
"(",
"bucket",
")",
".",
"items",
"(",
"prefix",
"=",
"prefix",
")",
"]",
"for",
"item",
"in",
"candidates",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"item",
",",
"key",
")",
":",
"results",
".",
"append",
"(",
"'gs://%s/%s'",
"%",
"(",
"bucket",
",",
"item",
")",
")",
"# If we added no matches, add the original name and let caller deal with it.",
"if",
"len",
"(",
"results",
")",
"==",
"results_len",
":",
"results",
".",
"append",
"(",
"name",
")",
"return",
"results"
]
| 36.754717 | 19.90566 |
def _module_imports(ctx: GeneratorContext) -> Iterable[ast.Import]:
"""Generate the Python Import AST node for importing all required
language support modules."""
# Yield `import basilisp` so code attempting to call fully qualified
# `basilisp.lang...` modules don't result in compiler errors
yield ast.Import(names=[ast.alias(name="basilisp", asname=None)])
for imp in ctx.imports:
name = imp.key.name
alias = _MODULE_ALIASES.get(name, None)
yield ast.Import(names=[ast.alias(name=name, asname=alias)]) | [
"def",
"_module_imports",
"(",
"ctx",
":",
"GeneratorContext",
")",
"->",
"Iterable",
"[",
"ast",
".",
"Import",
"]",
":",
"# Yield `import basilisp` so code attempting to call fully qualified",
"# `basilisp.lang...` modules don't result in compiler errors",
"yield",
"ast",
".",
"Import",
"(",
"names",
"=",
"[",
"ast",
".",
"alias",
"(",
"name",
"=",
"\"basilisp\"",
",",
"asname",
"=",
"None",
")",
"]",
")",
"for",
"imp",
"in",
"ctx",
".",
"imports",
":",
"name",
"=",
"imp",
".",
"key",
".",
"name",
"alias",
"=",
"_MODULE_ALIASES",
".",
"get",
"(",
"name",
",",
"None",
")",
"yield",
"ast",
".",
"Import",
"(",
"names",
"=",
"[",
"ast",
".",
"alias",
"(",
"name",
"=",
"name",
",",
"asname",
"=",
"alias",
")",
"]",
")"
]
| 54.2 | 17.3 |
def do_dice_roll():
"""
Roll n-sided dice and return each result and the total
"""
options = get_options()
dice = Dice(options.sides)
rolls = [dice.roll() for n in range(options.number)]
for roll in rolls:
print('rolled', roll)
if options.number > 1:
print('total', sum(rolls)) | [
"def",
"do_dice_roll",
"(",
")",
":",
"options",
"=",
"get_options",
"(",
")",
"dice",
"=",
"Dice",
"(",
"options",
".",
"sides",
")",
"rolls",
"=",
"[",
"dice",
".",
"roll",
"(",
")",
"for",
"n",
"in",
"range",
"(",
"options",
".",
"number",
")",
"]",
"for",
"roll",
"in",
"rolls",
":",
"print",
"(",
"'rolled'",
",",
"roll",
")",
"if",
"options",
".",
"number",
">",
"1",
":",
"print",
"(",
"'total'",
",",
"sum",
"(",
"rolls",
")",
")"
]
| 25.363636 | 13.181818 |
def sanitized_name(self):
"""Sanitized name of the agent, used for file and directory creation.
"""
a = re.split("[:/]", self.name)
return "_".join([i for i in a if len(i) > 0]) | [
"def",
"sanitized_name",
"(",
"self",
")",
":",
"a",
"=",
"re",
".",
"split",
"(",
"\"[:/]\"",
",",
"self",
".",
"name",
")",
"return",
"\"_\"",
".",
"join",
"(",
"[",
"i",
"for",
"i",
"in",
"a",
"if",
"len",
"(",
"i",
")",
">",
"0",
"]",
")"
]
| 41 | 5.8 |
def graph_dot(self):
"""
Export a graph of the data in dot format.
"""
default_graphviz_template = """
digraph role_dependencies {
size="%size"
dpi=%dpi
ratio="fill"
landscape=false
rankdir="BT";
node [shape = "box",
style = "rounded,filled",
fillcolor = "lightgrey",
fontsize = 20];
edge [style = "dashed",
dir = "forward",
penwidth = 1.5];
%roles_list
%dependencies
}
"""
roles_list = ""
edges = ""
# remove the darkest and brightest colors, still have 100+ colors
adjusted_colors = c.X11_COLORS[125:-325]
random.shuffle(adjusted_colors)
backup_colors = adjusted_colors[:]
for role, fields in sorted(self.report["roles"].iteritems()):
name = utils.normalize_role(role, self.config)
color_length = len(adjusted_colors) - 1
# reset the colors if we run out
if color_length == 0:
adjusted_colors = backup_colors[:]
color_length = len(adjusted_colors) - 1
random_index = random.randint(1, color_length)
roles_list += " role_{0} [label = \"{1}\"]\n" \
.format(re.sub(r'[.-/]', '_', name), name)
edge = '\n edge [color = "{0}"];\n' \
.format(adjusted_colors[random_index])
del adjusted_colors[random_index]
if fields["dependencies"]:
dependencies = ""
for dependency in sorted(fields["dependencies"]):
dependency_name = utils.role_name(dependency)
dependencies += " role_{0} -> role_{1}\n".format(
re.sub(r'[.-/]', '_', name),
re.sub(r'[.-/]', '_',
utils.normalize_role(dependency_name,
self.config)
)
)
edges += "{0}{1}\n".format(edge, dependencies)
graphviz_template = default_graphviz_template.replace("%roles_list",
roles_list)
graphviz_template = graphviz_template.replace("%dependencies",
edges)
graphviz_template = graphviz_template.replace("%size",
self.size)
graphviz_template = graphviz_template.replace("%dpi",
str(self.dpi))
if self.out_file:
utils.string_to_file(self.out_file, graphviz_template)
else:
print graphviz_template | [
"def",
"graph_dot",
"(",
"self",
")",
":",
"default_graphviz_template",
"=",
"\"\"\"\ndigraph role_dependencies {\n size=\"%size\"\n dpi=%dpi\n ratio=\"fill\"\n landscape=false\n rankdir=\"BT\";\n\n node [shape = \"box\",\n style = \"rounded,filled\",\n fillcolor = \"lightgrey\",\n fontsize = 20];\n\n edge [style = \"dashed\",\n dir = \"forward\",\n penwidth = 1.5];\n\n%roles_list\n\n%dependencies\n}\n\"\"\"",
"roles_list",
"=",
"\"\"",
"edges",
"=",
"\"\"",
"# remove the darkest and brightest colors, still have 100+ colors",
"adjusted_colors",
"=",
"c",
".",
"X11_COLORS",
"[",
"125",
":",
"-",
"325",
"]",
"random",
".",
"shuffle",
"(",
"adjusted_colors",
")",
"backup_colors",
"=",
"adjusted_colors",
"[",
":",
"]",
"for",
"role",
",",
"fields",
"in",
"sorted",
"(",
"self",
".",
"report",
"[",
"\"roles\"",
"]",
".",
"iteritems",
"(",
")",
")",
":",
"name",
"=",
"utils",
".",
"normalize_role",
"(",
"role",
",",
"self",
".",
"config",
")",
"color_length",
"=",
"len",
"(",
"adjusted_colors",
")",
"-",
"1",
"# reset the colors if we run out",
"if",
"color_length",
"==",
"0",
":",
"adjusted_colors",
"=",
"backup_colors",
"[",
":",
"]",
"color_length",
"=",
"len",
"(",
"adjusted_colors",
")",
"-",
"1",
"random_index",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"color_length",
")",
"roles_list",
"+=",
"\" role_{0} [label = \\\"{1}\\\"]\\n\"",
".",
"format",
"(",
"re",
".",
"sub",
"(",
"r'[.-/]'",
",",
"'_'",
",",
"name",
")",
",",
"name",
")",
"edge",
"=",
"'\\n edge [color = \"{0}\"];\\n'",
".",
"format",
"(",
"adjusted_colors",
"[",
"random_index",
"]",
")",
"del",
"adjusted_colors",
"[",
"random_index",
"]",
"if",
"fields",
"[",
"\"dependencies\"",
"]",
":",
"dependencies",
"=",
"\"\"",
"for",
"dependency",
"in",
"sorted",
"(",
"fields",
"[",
"\"dependencies\"",
"]",
")",
":",
"dependency_name",
"=",
"utils",
".",
"role_name",
"(",
"dependency",
")",
"dependencies",
"+=",
"\" role_{0} -> role_{1}\\n\"",
".",
"format",
"(",
"re",
".",
"sub",
"(",
"r'[.-/]'",
",",
"'_'",
",",
"name",
")",
",",
"re",
".",
"sub",
"(",
"r'[.-/]'",
",",
"'_'",
",",
"utils",
".",
"normalize_role",
"(",
"dependency_name",
",",
"self",
".",
"config",
")",
")",
")",
"edges",
"+=",
"\"{0}{1}\\n\"",
".",
"format",
"(",
"edge",
",",
"dependencies",
")",
"graphviz_template",
"=",
"default_graphviz_template",
".",
"replace",
"(",
"\"%roles_list\"",
",",
"roles_list",
")",
"graphviz_template",
"=",
"graphviz_template",
".",
"replace",
"(",
"\"%dependencies\"",
",",
"edges",
")",
"graphviz_template",
"=",
"graphviz_template",
".",
"replace",
"(",
"\"%size\"",
",",
"self",
".",
"size",
")",
"graphviz_template",
"=",
"graphviz_template",
".",
"replace",
"(",
"\"%dpi\"",
",",
"str",
"(",
"self",
".",
"dpi",
")",
")",
"if",
"self",
".",
"out_file",
":",
"utils",
".",
"string_to_file",
"(",
"self",
".",
"out_file",
",",
"graphviz_template",
")",
"else",
":",
"print",
"graphviz_template"
]
| 34.625 | 21.15 |
def action(self, action_id, **kwargs):
"""Query an action, specify the parameters for the action as keyword parameters. An optional keyword parameter method='GET' (default) or method='POST' can be set. The character set encoding of the response can be configured using the encoding keyword parameter (defaults to utf-8 by default)"""
if 'method' in kwargs:
method = kwargs['method']
del kwargs['method']
else:
method = 'GET'
if 'encoding' in kwargs:
encoding = kwargs['encoding']
del kwargs['encoding']
else:
encoding = 'utf-8'
return self.request('actions/' + action_id, method, kwargs, False,encoding) | [
"def",
"action",
"(",
"self",
",",
"action_id",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'method'",
"in",
"kwargs",
":",
"method",
"=",
"kwargs",
"[",
"'method'",
"]",
"del",
"kwargs",
"[",
"'method'",
"]",
"else",
":",
"method",
"=",
"'GET'",
"if",
"'encoding'",
"in",
"kwargs",
":",
"encoding",
"=",
"kwargs",
"[",
"'encoding'",
"]",
"del",
"kwargs",
"[",
"'encoding'",
"]",
"else",
":",
"encoding",
"=",
"'utf-8'",
"return",
"self",
".",
"request",
"(",
"'actions/'",
"+",
"action_id",
",",
"method",
",",
"kwargs",
",",
"False",
",",
"encoding",
")"
]
| 47.4 | 15.933333 |
def send(self, packet, interfaces=None):
"""write packet to given interfaces, default is broadcast to all interfaces"""
interfaces = interfaces or self.interfaces # default to all interfaces
interfaces = interfaces if hasattr(interfaces, '__iter__') else [interfaces]
for interface in interfaces:
for f in self.filters:
packet = f.tx(packet, interface) # run outgoing packet through the filters
if packet:
# if not dropped, log the transmit and pass it to the interface's send method
# self.log("OUT ", ("<"+",".join(i.name for i in interfaces)+">").ljust(30), packet.decode())
interface.send(packet) | [
"def",
"send",
"(",
"self",
",",
"packet",
",",
"interfaces",
"=",
"None",
")",
":",
"interfaces",
"=",
"interfaces",
"or",
"self",
".",
"interfaces",
"# default to all interfaces",
"interfaces",
"=",
"interfaces",
"if",
"hasattr",
"(",
"interfaces",
",",
"'__iter__'",
")",
"else",
"[",
"interfaces",
"]",
"for",
"interface",
"in",
"interfaces",
":",
"for",
"f",
"in",
"self",
".",
"filters",
":",
"packet",
"=",
"f",
".",
"tx",
"(",
"packet",
",",
"interface",
")",
"# run outgoing packet through the filters",
"if",
"packet",
":",
"# if not dropped, log the transmit and pass it to the interface's send method",
"# self.log(\"OUT \", (\"<\"+\",\".join(i.name for i in interfaces)+\">\").ljust(30), packet.decode())",
"interface",
".",
"send",
"(",
"packet",
")"
]
| 59.666667 | 27.5 |
def _check_dataframe(dv=None, between=None, within=None, subject=None,
effects=None, data=None):
"""Check dataframe"""
# Check that data is a dataframe
if not isinstance(data, pd.DataFrame):
raise ValueError('Data must be a pandas dataframe.')
# Check that both dv and data are provided.
if any(v is None for v in [dv, data]):
raise ValueError('DV and data must be specified')
# Check that dv is a numeric variable
if data[dv].dtype.kind not in 'fi':
raise ValueError('DV must be numeric.')
# Check that effects is provided
if effects not in ['within', 'between', 'interaction', 'all']:
raise ValueError('Effects must be: within, between, interaction, all')
# Check that within is a string or a list (rm_anova2)
if effects == 'within' and not isinstance(within, (str, list)):
raise ValueError('within must be a string or a list.')
# Check that subject identifier is provided in rm_anova and friedman.
if effects == 'within' and subject is None:
raise ValueError('subject must be specified when effects=within')
# Check that between is a string or a list (anova2)
if effects == 'between' and not isinstance(between, (str,
list)):
raise ValueError('between must be a string or a list.')
# Check that both between and within are present for interaction
if effects == 'interaction':
for input in [within, between]:
if not isinstance(input, (str, list)):
raise ValueError('within and between must be specified when '
'effects=interaction') | [
"def",
"_check_dataframe",
"(",
"dv",
"=",
"None",
",",
"between",
"=",
"None",
",",
"within",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"effects",
"=",
"None",
",",
"data",
"=",
"None",
")",
":",
"# Check that data is a dataframe",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"ValueError",
"(",
"'Data must be a pandas dataframe.'",
")",
"# Check that both dv and data are provided.",
"if",
"any",
"(",
"v",
"is",
"None",
"for",
"v",
"in",
"[",
"dv",
",",
"data",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'DV and data must be specified'",
")",
"# Check that dv is a numeric variable",
"if",
"data",
"[",
"dv",
"]",
".",
"dtype",
".",
"kind",
"not",
"in",
"'fi'",
":",
"raise",
"ValueError",
"(",
"'DV must be numeric.'",
")",
"# Check that effects is provided",
"if",
"effects",
"not",
"in",
"[",
"'within'",
",",
"'between'",
",",
"'interaction'",
",",
"'all'",
"]",
":",
"raise",
"ValueError",
"(",
"'Effects must be: within, between, interaction, all'",
")",
"# Check that within is a string or a list (rm_anova2)",
"if",
"effects",
"==",
"'within'",
"and",
"not",
"isinstance",
"(",
"within",
",",
"(",
"str",
",",
"list",
")",
")",
":",
"raise",
"ValueError",
"(",
"'within must be a string or a list.'",
")",
"# Check that subject identifier is provided in rm_anova and friedman.",
"if",
"effects",
"==",
"'within'",
"and",
"subject",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'subject must be specified when effects=within'",
")",
"# Check that between is a string or a list (anova2)",
"if",
"effects",
"==",
"'between'",
"and",
"not",
"isinstance",
"(",
"between",
",",
"(",
"str",
",",
"list",
")",
")",
":",
"raise",
"ValueError",
"(",
"'between must be a string or a list.'",
")",
"# Check that both between and within are present for interaction",
"if",
"effects",
"==",
"'interaction'",
":",
"for",
"input",
"in",
"[",
"within",
",",
"between",
"]",
":",
"if",
"not",
"isinstance",
"(",
"input",
",",
"(",
"str",
",",
"list",
")",
")",
":",
"raise",
"ValueError",
"(",
"'within and between must be specified when '",
"'effects=interaction'",
")"
]
| 54.032258 | 15.677419 |
def disqus_dev(context):
"""
Return the HTML/js code to enable DISQUS comments on a local
development server if settings.DEBUG is True.
"""
if settings.DEBUG:
disqus_url = '//{}{}'.format(
Site.objects.get_current().domain,
context['request'].path
)
return {'disqus_url': disqus_url}
return {} | [
"def",
"disqus_dev",
"(",
"context",
")",
":",
"if",
"settings",
".",
"DEBUG",
":",
"disqus_url",
"=",
"'//{}{}'",
".",
"format",
"(",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
".",
"domain",
",",
"context",
"[",
"'request'",
"]",
".",
"path",
")",
"return",
"{",
"'disqus_url'",
":",
"disqus_url",
"}",
"return",
"{",
"}"
]
| 23.6 | 17.333333 |
def make_field_objects(field_data, names):
# type: (List[Dict[Text, Text]], Names) -> List[Field]
"""We're going to need to make message parameters too."""
field_objects = []
field_names = [] # type: List[Text]
for field in field_data:
if hasattr(field, 'get') and callable(field.get):
atype = cast(Text, field.get('type'))
name = cast(Text, field.get('name'))
# null values can have a default value of None
has_default = False
default = None
if 'default' in field:
has_default = True
default = field.get('default')
order = field.get('order')
doc = field.get('doc')
other_props = get_other_props(field, FIELD_RESERVED_PROPS)
new_field = Field(atype, name, has_default, default, order, names, doc,
other_props)
# make sure field name has not been used yet
if new_field.name in field_names:
fail_msg = 'Field name %s already in use.' % new_field.name
raise SchemaParseException(fail_msg)
field_names.append(new_field.name)
else:
raise SchemaParseException('Not a valid field: %s' % field)
field_objects.append(new_field)
return field_objects | [
"def",
"make_field_objects",
"(",
"field_data",
",",
"names",
")",
":",
"# type: (List[Dict[Text, Text]], Names) -> List[Field]",
"field_objects",
"=",
"[",
"]",
"field_names",
"=",
"[",
"]",
"# type: List[Text]",
"for",
"field",
"in",
"field_data",
":",
"if",
"hasattr",
"(",
"field",
",",
"'get'",
")",
"and",
"callable",
"(",
"field",
".",
"get",
")",
":",
"atype",
"=",
"cast",
"(",
"Text",
",",
"field",
".",
"get",
"(",
"'type'",
")",
")",
"name",
"=",
"cast",
"(",
"Text",
",",
"field",
".",
"get",
"(",
"'name'",
")",
")",
"# null values can have a default value of None",
"has_default",
"=",
"False",
"default",
"=",
"None",
"if",
"'default'",
"in",
"field",
":",
"has_default",
"=",
"True",
"default",
"=",
"field",
".",
"get",
"(",
"'default'",
")",
"order",
"=",
"field",
".",
"get",
"(",
"'order'",
")",
"doc",
"=",
"field",
".",
"get",
"(",
"'doc'",
")",
"other_props",
"=",
"get_other_props",
"(",
"field",
",",
"FIELD_RESERVED_PROPS",
")",
"new_field",
"=",
"Field",
"(",
"atype",
",",
"name",
",",
"has_default",
",",
"default",
",",
"order",
",",
"names",
",",
"doc",
",",
"other_props",
")",
"# make sure field name has not been used yet",
"if",
"new_field",
".",
"name",
"in",
"field_names",
":",
"fail_msg",
"=",
"'Field name %s already in use.'",
"%",
"new_field",
".",
"name",
"raise",
"SchemaParseException",
"(",
"fail_msg",
")",
"field_names",
".",
"append",
"(",
"new_field",
".",
"name",
")",
"else",
":",
"raise",
"SchemaParseException",
"(",
"'Not a valid field: %s'",
"%",
"field",
")",
"field_objects",
".",
"append",
"(",
"new_field",
")",
"return",
"field_objects"
]
| 46.225806 | 15.612903 |
def build_columns(self, X, verbose=False):
"""construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
"""
return sp.sparse.csc_matrix(np.ones((len(X), 1))) | [
"def",
"build_columns",
"(",
"self",
",",
"X",
",",
"verbose",
"=",
"False",
")",
":",
"return",
"sp",
".",
"sparse",
".",
"csc_matrix",
"(",
"np",
".",
"ones",
"(",
"(",
"len",
"(",
"X",
")",
",",
"1",
")",
")",
")"
]
| 24.3125 | 17.375 |
async def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await discord.utils.maybe_coroutine(prefix, self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.Iterable):
raise
raise TypeError("command_prefix must be plain string, iterable of strings, or callable "
"returning either of these, not {}".format(ret.__class__.__name__))
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret | [
"async",
"def",
"get_prefix",
"(",
"self",
",",
"message",
")",
":",
"prefix",
"=",
"ret",
"=",
"self",
".",
"command_prefix",
"if",
"callable",
"(",
"prefix",
")",
":",
"ret",
"=",
"await",
"discord",
".",
"utils",
".",
"maybe_coroutine",
"(",
"prefix",
",",
"self",
",",
"message",
")",
"if",
"not",
"isinstance",
"(",
"ret",
",",
"str",
")",
":",
"try",
":",
"ret",
"=",
"list",
"(",
"ret",
")",
"except",
"TypeError",
":",
"# It's possible that a generator raised this exception. Don't",
"# replace it with our own error if that's the case.",
"if",
"isinstance",
"(",
"ret",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"raise",
"TypeError",
"(",
"\"command_prefix must be plain string, iterable of strings, or callable \"",
"\"returning either of these, not {}\"",
".",
"format",
"(",
"ret",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"not",
"ret",
":",
"raise",
"ValueError",
"(",
"\"Iterable command_prefix must contain at least one prefix\"",
")",
"return",
"ret"
]
| 34.459459 | 23.216216 |
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'parallels',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
try:
data = create_node(vm_)
except Exception as exc:
log.error(
'Error creating %s on PARALLELS\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
name = vm_['name']
if not wait_until(name, 'CREATED'):
return {'Error': 'Unable to start {0}, command timed out'.format(name)}
start(vm_['name'], call='action')
if not wait_until(name, 'STARTED'):
return {'Error': 'Unable to start {0}, command timed out'.format(name)}
def __query_node_data(vm_name):
data = show_instance(vm_name, call='action')
if 'public-ip' not in data['network']:
# Trigger another iteration
return
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_['name'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=5 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=5),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
comps = data['network']['public-ip']['address'].split('/')
public_ip = comps[0]
vm_['ssh_host'] = public_ip
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data | [
"def",
"create",
"(",
"vm_",
")",
":",
"try",
":",
"# Check for required profile parameters before sending any API calls.",
"if",
"vm_",
"[",
"'profile'",
"]",
"and",
"config",
".",
"is_profile_configured",
"(",
"__opts__",
",",
"__active_provider_name__",
"or",
"'parallels'",
",",
"vm_",
"[",
"'profile'",
"]",
",",
"vm_",
"=",
"vm_",
")",
"is",
"False",
":",
"return",
"False",
"except",
"AttributeError",
":",
"pass",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'starting create'",
",",
"'salt/cloud/{0}/creating'",
".",
"format",
"(",
"vm_",
"[",
"'name'",
"]",
")",
",",
"args",
"=",
"__utils__",
"[",
"'cloud.filter_event'",
"]",
"(",
"'creating'",
",",
"vm_",
",",
"[",
"'name'",
",",
"'profile'",
",",
"'provider'",
",",
"'driver'",
"]",
")",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"log",
".",
"info",
"(",
"'Creating Cloud VM %s'",
",",
"vm_",
"[",
"'name'",
"]",
")",
"try",
":",
"data",
"=",
"create_node",
"(",
"vm_",
")",
"except",
"Exception",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"'Error creating %s on PARALLELS\\n\\n'",
"'The following exception was thrown when trying to '",
"'run the initial deployment: \\n%s'",
",",
"vm_",
"[",
"'name'",
"]",
",",
"exc",
",",
"# Show the traceback if the debug logging level is enabled",
"exc_info_on_loglevel",
"=",
"logging",
".",
"DEBUG",
")",
"return",
"False",
"name",
"=",
"vm_",
"[",
"'name'",
"]",
"if",
"not",
"wait_until",
"(",
"name",
",",
"'CREATED'",
")",
":",
"return",
"{",
"'Error'",
":",
"'Unable to start {0}, command timed out'",
".",
"format",
"(",
"name",
")",
"}",
"start",
"(",
"vm_",
"[",
"'name'",
"]",
",",
"call",
"=",
"'action'",
")",
"if",
"not",
"wait_until",
"(",
"name",
",",
"'STARTED'",
")",
":",
"return",
"{",
"'Error'",
":",
"'Unable to start {0}, command timed out'",
".",
"format",
"(",
"name",
")",
"}",
"def",
"__query_node_data",
"(",
"vm_name",
")",
":",
"data",
"=",
"show_instance",
"(",
"vm_name",
",",
"call",
"=",
"'action'",
")",
"if",
"'public-ip'",
"not",
"in",
"data",
"[",
"'network'",
"]",
":",
"# Trigger another iteration",
"return",
"return",
"data",
"try",
":",
"data",
"=",
"salt",
".",
"utils",
".",
"cloud",
".",
"wait_for_ip",
"(",
"__query_node_data",
",",
"update_args",
"=",
"(",
"vm_",
"[",
"'name'",
"]",
",",
")",
",",
"timeout",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'wait_for_ip_timeout'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"5",
"*",
"60",
")",
",",
"interval",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'wait_for_ip_interval'",
",",
"vm_",
",",
"__opts__",
",",
"default",
"=",
"5",
")",
",",
")",
"except",
"(",
"SaltCloudExecutionTimeout",
",",
"SaltCloudExecutionFailure",
")",
"as",
"exc",
":",
"try",
":",
"# It might be already up, let's destroy it!",
"destroy",
"(",
"vm_",
"[",
"'name'",
"]",
")",
"except",
"SaltCloudSystemExit",
":",
"pass",
"finally",
":",
"raise",
"SaltCloudSystemExit",
"(",
"six",
".",
"text_type",
"(",
"exc",
")",
")",
"comps",
"=",
"data",
"[",
"'network'",
"]",
"[",
"'public-ip'",
"]",
"[",
"'address'",
"]",
".",
"split",
"(",
"'/'",
")",
"public_ip",
"=",
"comps",
"[",
"0",
"]",
"vm_",
"[",
"'ssh_host'",
"]",
"=",
"public_ip",
"ret",
"=",
"__utils__",
"[",
"'cloud.bootstrap'",
"]",
"(",
"vm_",
",",
"__opts__",
")",
"log",
".",
"info",
"(",
"'Created Cloud VM \\'%s\\''",
",",
"vm_",
"[",
"'name'",
"]",
")",
"log",
".",
"debug",
"(",
"'\\'%s\\' VM creation details:\\n%s'",
",",
"vm_",
"[",
"'name'",
"]",
",",
"pprint",
".",
"pformat",
"(",
"data",
")",
")",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'created instance'",
",",
"'salt/cloud/{0}/created'",
".",
"format",
"(",
"vm_",
"[",
"'name'",
"]",
")",
",",
"args",
"=",
"__utils__",
"[",
"'cloud.filter_event'",
"]",
"(",
"'created'",
",",
"vm_",
",",
"[",
"'name'",
",",
"'profile'",
",",
"'provider'",
",",
"'driver'",
"]",
")",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"return",
"data"
]
| 33.763441 | 21.870968 |
def _compute_value(self, pkt):
# type: (packet.Packet) -> int
""" Computes the value of this field based on the provided packet and
the length_of field and the adjust callback
@param packet.Packet pkt: the packet from which is computed this field value. # noqa: E501
@return int: the computed value for this field.
@raise KeyError: the packet nor its payload do not contain an attribute
with the length_of name.
@raise AssertionError
@raise KeyError if _length_of is not one of pkt fields
"""
fld, fval = pkt.getfield_and_val(self._length_of)
val = fld.i2len(pkt, fval)
ret = self._adjust(val)
assert(ret >= 0)
return ret | [
"def",
"_compute_value",
"(",
"self",
",",
"pkt",
")",
":",
"# type: (packet.Packet) -> int",
"fld",
",",
"fval",
"=",
"pkt",
".",
"getfield_and_val",
"(",
"self",
".",
"_length_of",
")",
"val",
"=",
"fld",
".",
"i2len",
"(",
"pkt",
",",
"fval",
")",
"ret",
"=",
"self",
".",
"_adjust",
"(",
"val",
")",
"assert",
"(",
"ret",
">=",
"0",
")",
"return",
"ret"
]
| 42.882353 | 16.764706 |
def distance_to(self, other):
"""Return Euclidian distance between self and other Firefly"""
return np.linalg.norm(self.position-other.position) | [
"def",
"distance_to",
"(",
"self",
",",
"other",
")",
":",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"position",
"-",
"other",
".",
"position",
")"
]
| 52.666667 | 10 |
def multi_get(self, urls, query_params=None, to_json=True):
"""Issue multiple GET requests.
Args:
urls - A string URL or list of string URLs
query_params - None, a dict, or a list of dicts representing the query params
to_json - A boolean, should the responses be returned as JSON blobs
Returns:
a list of dicts if to_json is set of requests.response otherwise.
Raises:
InvalidRequestError - Can not decide how many requests to issue.
"""
return self._multi_request(
MultiRequest._VERB_GET, urls, query_params,
data=None, to_json=to_json,
) | [
"def",
"multi_get",
"(",
"self",
",",
"urls",
",",
"query_params",
"=",
"None",
",",
"to_json",
"=",
"True",
")",
":",
"return",
"self",
".",
"_multi_request",
"(",
"MultiRequest",
".",
"_VERB_GET",
",",
"urls",
",",
"query_params",
",",
"data",
"=",
"None",
",",
"to_json",
"=",
"to_json",
",",
")"
]
| 41.625 | 22.625 |
def Sample(self, task, status):
"""Takes a sample of the status of a task for profiling.
Args:
task (Task): a task.
status (str): status.
"""
sample_time = time.time()
sample = '{0:f}\t{1:s}\t{2:s}\n'.format(
sample_time, task.identifier, status)
self._WritesString(sample) | [
"def",
"Sample",
"(",
"self",
",",
"task",
",",
"status",
")",
":",
"sample_time",
"=",
"time",
".",
"time",
"(",
")",
"sample",
"=",
"'{0:f}\\t{1:s}\\t{2:s}\\n'",
".",
"format",
"(",
"sample_time",
",",
"task",
".",
"identifier",
",",
"status",
")",
"self",
".",
"_WritesString",
"(",
"sample",
")"
]
| 28 | 12.454545 |
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one.")
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw) | [
"def",
"request_encode_body",
"(",
"self",
",",
"method",
",",
"url",
",",
"fields",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"encode_multipart",
"=",
"True",
",",
"multipart_boundary",
"=",
"None",
",",
"*",
"*",
"urlopen_kw",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"self",
".",
"headers",
"extra_kw",
"=",
"{",
"'headers'",
":",
"{",
"}",
"}",
"if",
"fields",
":",
"if",
"'body'",
"in",
"urlopen_kw",
":",
"raise",
"TypeError",
"(",
"\"request got values for both 'fields' and 'body', can only specify one.\"",
")",
"if",
"encode_multipart",
":",
"body",
",",
"content_type",
"=",
"encode_multipart_formdata",
"(",
"fields",
",",
"boundary",
"=",
"multipart_boundary",
")",
"else",
":",
"body",
",",
"content_type",
"=",
"urlencode",
"(",
"fields",
")",
",",
"'application/x-www-form-urlencoded'",
"extra_kw",
"[",
"'body'",
"]",
"=",
"body",
"extra_kw",
"[",
"'headers'",
"]",
"=",
"{",
"'Content-Type'",
":",
"content_type",
"}",
"extra_kw",
"[",
"'headers'",
"]",
".",
"update",
"(",
"headers",
")",
"extra_kw",
".",
"update",
"(",
"urlopen_kw",
")",
"return",
"self",
".",
"urlopen",
"(",
"method",
",",
"url",
",",
"*",
"*",
"extra_kw",
")"
]
| 43.916667 | 26.216667 |
def non_decreasing(values):
"""True if values are not decreasing."""
return all(x <= y for x, y in zip(values, values[1:])) | [
"def",
"non_decreasing",
"(",
"values",
")",
":",
"return",
"all",
"(",
"x",
"<=",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"values",
",",
"values",
"[",
"1",
":",
"]",
")",
")"
]
| 43 | 10.333333 |
def get_cli_multi_parser(funcs, skip_first=0):
"""makes a parser for parsing cli arguments for `func`.
:param list funcs: the function the parser will parse
:param int skip_first: skip this many first arguments of the func
"""
parser = ArgumentParser(description='which subcommand do you want?')
subparsers = parser.add_subparsers(
title='subcommands', dest='subcmd', help=''
)
for func in funcs:
help_msg, func_args = _get_func_args(func)
sub_parser = subparsers.add_parser(func.__name__, help=help_msg)
get_cli_parser(func, skip_first=skip_first, parser=sub_parser)
return parser | [
"def",
"get_cli_multi_parser",
"(",
"funcs",
",",
"skip_first",
"=",
"0",
")",
":",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"'which subcommand do you want?'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"'subcommands'",
",",
"dest",
"=",
"'subcmd'",
",",
"help",
"=",
"''",
")",
"for",
"func",
"in",
"funcs",
":",
"help_msg",
",",
"func_args",
"=",
"_get_func_args",
"(",
"func",
")",
"sub_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"func",
".",
"__name__",
",",
"help",
"=",
"help_msg",
")",
"get_cli_parser",
"(",
"func",
",",
"skip_first",
"=",
"skip_first",
",",
"parser",
"=",
"sub_parser",
")",
"return",
"parser"
]
| 42.4 | 18.933333 |
def delete(table, chain=None, position=None, rule=None, family='ipv4'):
'''
Delete a rule from the specified table/chain, specifying either the rule
in its entirety, or the rule's position in the chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples:
.. code-block:: bash
salt '*' iptables.delete filter INPUT position=3
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.delete filter INPUT position=3 family=ipv6
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
'''
if position and rule:
return 'Error: Only specify a position or a rule, not both'
if position:
rule = position
wait = '--wait' if _has_option('--wait', family) else ''
cmd = '{0} {1} -t {2} -D {3} {4}'.format(
_iptables_cmd(family), wait, table, chain, rule)
out = __salt__['cmd.run'](cmd)
return out | [
"def",
"delete",
"(",
"table",
",",
"chain",
"=",
"None",
",",
"position",
"=",
"None",
",",
"rule",
"=",
"None",
",",
"family",
"=",
"'ipv4'",
")",
":",
"if",
"position",
"and",
"rule",
":",
"return",
"'Error: Only specify a position or a rule, not both'",
"if",
"position",
":",
"rule",
"=",
"position",
"wait",
"=",
"'--wait'",
"if",
"_has_option",
"(",
"'--wait'",
",",
"family",
")",
"else",
"''",
"cmd",
"=",
"'{0} {1} -t {2} -D {3} {4}'",
".",
"format",
"(",
"_iptables_cmd",
"(",
"family",
")",
",",
"wait",
",",
"table",
",",
"chain",
",",
"rule",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
")",
"return",
"out"
]
| 34.888889 | 25.833333 |
def __replace_names(sentence, counts):
"""Lets find and replace all instances of #NAME
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#NAME') != -1:
sentence = sentence.replace('#NAME', str(__get_name(counts)), 1)
if sentence.find('#NAME') == -1:
return sentence
return sentence
else:
return sentence | [
"def",
"__replace_names",
"(",
"sentence",
",",
"counts",
")",
":",
"if",
"sentence",
"is",
"not",
"None",
":",
"while",
"sentence",
".",
"find",
"(",
"'#NAME'",
")",
"!=",
"-",
"1",
":",
"sentence",
"=",
"sentence",
".",
"replace",
"(",
"'#NAME'",
",",
"str",
"(",
"__get_name",
"(",
"counts",
")",
")",
",",
"1",
")",
"if",
"sentence",
".",
"find",
"(",
"'#NAME'",
")",
"==",
"-",
"1",
":",
"return",
"sentence",
"return",
"sentence",
"else",
":",
"return",
"sentence"
]
| 25.75 | 18.25 |
def encode_varint(v, f):
"""Encode integer `v` to file `f`.
Parameters
----------
v: int
Integer v >= 0.
f: file
Object containing a write method.
Returns
-------
int
Number of bytes written.
"""
assert v >= 0
num_bytes = 0
while True:
b = v % 0x80
v = v // 0x80
if v > 0:
b = b | 0x80
f.write(FIELD_U8.pack(b))
num_bytes += 1
if v == 0:
break
return num_bytes | [
"def",
"encode_varint",
"(",
"v",
",",
"f",
")",
":",
"assert",
"v",
">=",
"0",
"num_bytes",
"=",
"0",
"while",
"True",
":",
"b",
"=",
"v",
"%",
"0x80",
"v",
"=",
"v",
"//",
"0x80",
"if",
"v",
">",
"0",
":",
"b",
"=",
"b",
"|",
"0x80",
"f",
".",
"write",
"(",
"FIELD_U8",
".",
"pack",
"(",
"b",
")",
")",
"num_bytes",
"+=",
"1",
"if",
"v",
"==",
"0",
":",
"break",
"return",
"num_bytes"
]
| 15.125 | 23.84375 |
def _notification_stmt(self, stmt: Statement, sctx: SchemaContext) -> None:
"""Handle notification statement."""
self._handle_child(NotificationNode(), stmt, sctx) | [
"def",
"_notification_stmt",
"(",
"self",
",",
"stmt",
":",
"Statement",
",",
"sctx",
":",
"SchemaContext",
")",
"->",
"None",
":",
"self",
".",
"_handle_child",
"(",
"NotificationNode",
"(",
")",
",",
"stmt",
",",
"sctx",
")"
]
| 59 | 17.666667 |
def _lmder1_kowalik_osborne():
"""Kowalik & Osborne function (lmder1 test #9)"""
v = np.asfarray([4, 2, 1, 0.5, 0.25, 0.167, 0.125, 0.1, 0.0833, 0.0714, 0.0625])
y2 = np.asfarray([0.1957, 0.1947, 0.1735, 0.16, 0.0844, 0.0627, 0.0456,
0.0342, 0.0323, 0.0235, 0.0246])
def func(params, vec):
tmp1 = v * (v + params[1])
tmp2 = v * (v + params[2]) + params[3]
vec[:] = y2 - params[0] * tmp1 / tmp2
def jac(params, jac):
tmp1 = v * (v + params[1])
tmp2 = v * (v + params[2]) + params[3]
jac[0] = -tmp1 / tmp2
jac[1] = -v * params[0] / tmp2
jac[2] = jac[0] * jac[1]
jac[3] = jac[2] / v
guess = np.asfarray([0.25, 0.39, 0.415, 0.39])
_lmder1_driver(11, func, jac, guess,
0.7289151028829448e-01, 0.1753583772112895e-01,
[0.1928078104762493e+00, 0.1912626533540709e+00,
0.1230528010469309e+00, 0.1360532211505167e+00])
_lmder1_driver(11, func, jac, guess * 10,
0.2979370075552020e+01, 0.3205219291793696e-01,
[0.7286754737686598e+06, -0.1407588031293926e+02,
-0.3297779778419661e+08, -0.2057159419780170e+08]) | [
"def",
"_lmder1_kowalik_osborne",
"(",
")",
":",
"v",
"=",
"np",
".",
"asfarray",
"(",
"[",
"4",
",",
"2",
",",
"1",
",",
"0.5",
",",
"0.25",
",",
"0.167",
",",
"0.125",
",",
"0.1",
",",
"0.0833",
",",
"0.0714",
",",
"0.0625",
"]",
")",
"y2",
"=",
"np",
".",
"asfarray",
"(",
"[",
"0.1957",
",",
"0.1947",
",",
"0.1735",
",",
"0.16",
",",
"0.0844",
",",
"0.0627",
",",
"0.0456",
",",
"0.0342",
",",
"0.0323",
",",
"0.0235",
",",
"0.0246",
"]",
")",
"def",
"func",
"(",
"params",
",",
"vec",
")",
":",
"tmp1",
"=",
"v",
"*",
"(",
"v",
"+",
"params",
"[",
"1",
"]",
")",
"tmp2",
"=",
"v",
"*",
"(",
"v",
"+",
"params",
"[",
"2",
"]",
")",
"+",
"params",
"[",
"3",
"]",
"vec",
"[",
":",
"]",
"=",
"y2",
"-",
"params",
"[",
"0",
"]",
"*",
"tmp1",
"/",
"tmp2",
"def",
"jac",
"(",
"params",
",",
"jac",
")",
":",
"tmp1",
"=",
"v",
"*",
"(",
"v",
"+",
"params",
"[",
"1",
"]",
")",
"tmp2",
"=",
"v",
"*",
"(",
"v",
"+",
"params",
"[",
"2",
"]",
")",
"+",
"params",
"[",
"3",
"]",
"jac",
"[",
"0",
"]",
"=",
"-",
"tmp1",
"/",
"tmp2",
"jac",
"[",
"1",
"]",
"=",
"-",
"v",
"*",
"params",
"[",
"0",
"]",
"/",
"tmp2",
"jac",
"[",
"2",
"]",
"=",
"jac",
"[",
"0",
"]",
"*",
"jac",
"[",
"1",
"]",
"jac",
"[",
"3",
"]",
"=",
"jac",
"[",
"2",
"]",
"/",
"v",
"guess",
"=",
"np",
".",
"asfarray",
"(",
"[",
"0.25",
",",
"0.39",
",",
"0.415",
",",
"0.39",
"]",
")",
"_lmder1_driver",
"(",
"11",
",",
"func",
",",
"jac",
",",
"guess",
",",
"0.7289151028829448e-01",
",",
"0.1753583772112895e-01",
",",
"[",
"0.1928078104762493e+00",
",",
"0.1912626533540709e+00",
",",
"0.1230528010469309e+00",
",",
"0.1360532211505167e+00",
"]",
")",
"_lmder1_driver",
"(",
"11",
",",
"func",
",",
"jac",
",",
"guess",
"*",
"10",
",",
"0.2979370075552020e+01",
",",
"0.3205219291793696e-01",
",",
"[",
"0.7286754737686598e+06",
",",
"-",
"0.1407588031293926e+02",
",",
"-",
"0.3297779778419661e+08",
",",
"-",
"0.2057159419780170e+08",
"]",
")"
]
| 42 | 18.448276 |
def getByteStatistic(self, wanInterfaceId=1, timeout=1):
"""Execute GetTotalBytesSent&GetTotalBytesReceived actions to get WAN statistics.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: a tuple of two values, total bytes sent and total bytes received
:rtype: list[int]
"""
namespace = Wan.getServiceType("getByteStatistic") + str(wanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetTotalBytesSent", timeout=timeout)
results2 = self.execute(uri, namespace, "GetTotalBytesReceived", timeout=timeout)
return [int(results["NewTotalBytesSent"]),
int(results2["NewTotalBytesReceived"])] | [
"def",
"getByteStatistic",
"(",
"self",
",",
"wanInterfaceId",
"=",
"1",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Wan",
".",
"getServiceType",
"(",
"\"getByteStatistic\"",
")",
"+",
"str",
"(",
"wanInterfaceId",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"results",
"=",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"GetTotalBytesSent\"",
",",
"timeout",
"=",
"timeout",
")",
"results2",
"=",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"GetTotalBytesReceived\"",
",",
"timeout",
"=",
"timeout",
")",
"return",
"[",
"int",
"(",
"results",
"[",
"\"NewTotalBytesSent\"",
"]",
")",
",",
"int",
"(",
"results2",
"[",
"\"NewTotalBytesReceived\"",
"]",
")",
"]"
]
| 50.0625 | 25.6875 |
def add_papyrus_handler(self, route_name_prefix, base_url, handler):
""" Add a Papyrus handler, i.e. a handler defining the MapFish
HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_handler(
'spots', '/spots', 'mypackage.handlers.SpotHandler')
Arguments:
``route_name_prefix`` The prefix used for the route names
passed to ``config.add_handler``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
``handler`` a dotted name or a reference to a handler class,
e.g. ``'mypackage.handlers.MyHandler'``.
"""
route_name = route_name_prefix + '_read_many'
self.add_handler(route_name, base_url, handler,
action='read_many', request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_handler(route_name, base_url + '/{id}', handler,
action='read_one', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_handler(route_name, base_url + '/count', handler,
action='count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_handler(route_name, base_url, handler,
action='create', request_method='POST')
route_name = route_name_prefix + '_update'
self.add_handler(route_name, base_url + '/{id}', handler,
action='update', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_handler(route_name, base_url + '/{id}', handler,
action='delete', request_method='DELETE') | [
"def",
"add_papyrus_handler",
"(",
"self",
",",
"route_name_prefix",
",",
"base_url",
",",
"handler",
")",
":",
"route_name",
"=",
"route_name_prefix",
"+",
"'_read_many'",
"self",
".",
"add_handler",
"(",
"route_name",
",",
"base_url",
",",
"handler",
",",
"action",
"=",
"'read_many'",
",",
"request_method",
"=",
"'GET'",
")",
"route_name",
"=",
"route_name_prefix",
"+",
"'_read_one'",
"self",
".",
"add_handler",
"(",
"route_name",
",",
"base_url",
"+",
"'/{id}'",
",",
"handler",
",",
"action",
"=",
"'read_one'",
",",
"request_method",
"=",
"'GET'",
")",
"route_name",
"=",
"route_name_prefix",
"+",
"'_count'",
"self",
".",
"add_handler",
"(",
"route_name",
",",
"base_url",
"+",
"'/count'",
",",
"handler",
",",
"action",
"=",
"'count'",
",",
"request_method",
"=",
"'GET'",
")",
"route_name",
"=",
"route_name_prefix",
"+",
"'_create'",
"self",
".",
"add_handler",
"(",
"route_name",
",",
"base_url",
",",
"handler",
",",
"action",
"=",
"'create'",
",",
"request_method",
"=",
"'POST'",
")",
"route_name",
"=",
"route_name_prefix",
"+",
"'_update'",
"self",
".",
"add_handler",
"(",
"route_name",
",",
"base_url",
"+",
"'/{id}'",
",",
"handler",
",",
"action",
"=",
"'update'",
",",
"request_method",
"=",
"'PUT'",
")",
"route_name",
"=",
"route_name_prefix",
"+",
"'_delete'",
"self",
".",
"add_handler",
"(",
"route_name",
",",
"base_url",
"+",
"'/{id}'",
",",
"handler",
",",
"action",
"=",
"'delete'",
",",
"request_method",
"=",
"'DELETE'",
")"
]
| 40.425 | 19.1 |
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2 | [
"def",
"join_parsed_parts",
"(",
"self",
",",
"drv",
",",
"root",
",",
"parts",
",",
"drv2",
",",
"root2",
",",
"parts2",
")",
":",
"if",
"root2",
":",
"if",
"not",
"drv2",
"and",
"drv",
":",
"return",
"drv",
",",
"root2",
",",
"[",
"drv",
"+",
"root2",
"]",
"+",
"parts2",
"[",
"1",
":",
"]",
"elif",
"drv2",
":",
"if",
"drv2",
"==",
"drv",
"or",
"self",
".",
"casefold",
"(",
"drv2",
")",
"==",
"self",
".",
"casefold",
"(",
"drv",
")",
":",
"# Same drive => second path is relative to the first",
"return",
"drv",
",",
"root",
",",
"parts",
"+",
"parts2",
"[",
"1",
":",
"]",
"else",
":",
"# Second path is non-anchored (common case)",
"return",
"drv",
",",
"root",
",",
"parts",
"+",
"parts2",
"return",
"drv2",
",",
"root2",
",",
"parts2"
]
| 43.0625 | 17.4375 |
def get_sensor_data(self):
"""Get sensor reading objects
Iterates sensor reading objects pertaining to the currently
managed BMC.
:returns: Iterator of sdr.SensorReading objects
"""
self.init_sdr()
for sensor in self._sdr.get_sensor_numbers():
rsp = self.raw_command(command=0x2d, netfn=4, data=(sensor,))
if 'error' in rsp:
if rsp['code'] == 203: # Sensor does not exist, optional dev
continue
raise exc.IpmiException(rsp['error'], code=rsp['code'])
yield self._sdr.sensors[sensor].decode_sensor_reading(rsp['data'])
self.oem_init()
for reading in self._oem.get_sensor_data():
yield reading | [
"def",
"get_sensor_data",
"(",
"self",
")",
":",
"self",
".",
"init_sdr",
"(",
")",
"for",
"sensor",
"in",
"self",
".",
"_sdr",
".",
"get_sensor_numbers",
"(",
")",
":",
"rsp",
"=",
"self",
".",
"raw_command",
"(",
"command",
"=",
"0x2d",
",",
"netfn",
"=",
"4",
",",
"data",
"=",
"(",
"sensor",
",",
")",
")",
"if",
"'error'",
"in",
"rsp",
":",
"if",
"rsp",
"[",
"'code'",
"]",
"==",
"203",
":",
"# Sensor does not exist, optional dev",
"continue",
"raise",
"exc",
".",
"IpmiException",
"(",
"rsp",
"[",
"'error'",
"]",
",",
"code",
"=",
"rsp",
"[",
"'code'",
"]",
")",
"yield",
"self",
".",
"_sdr",
".",
"sensors",
"[",
"sensor",
"]",
".",
"decode_sensor_reading",
"(",
"rsp",
"[",
"'data'",
"]",
")",
"self",
".",
"oem_init",
"(",
")",
"for",
"reading",
"in",
"self",
".",
"_oem",
".",
"get_sensor_data",
"(",
")",
":",
"yield",
"reading"
]
| 39.368421 | 20.526316 |
def quantile_normalize(matrix, inplace=False, target=None):
"""Quantile normalization, allowing for missing values (NaN).
In case of nan values, this implementation will calculate evenly
distributed quantiles and fill in the missing data with those values.
Quantile normalization is then performed on the filled-in matrix,
and the nan values are restored afterwards.
Parameters
----------
matrix: `ExpMatrix`
The expression matrix (rows = genes, columns = samples).
inplace: bool
Whether or not to perform the operation in-place. [False]
target: `numpy.ndarray`
Target distribution to use. needs to be a vector whose first
dimension matches that of the expression matrix. If ``None``,
the target distribution is calculated based on the matrix
itself. [None]
Returns
-------
numpy.ndarray (ndim = 2)
The normalized matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(inplace, bool)
if target is not None:
assert isinstance(target, np.ndarray) and \
np.issubdtype(target.dtype, np.float)
if not inplace:
# make a copy of the original data
matrix = matrix.copy()
X = matrix.X
_, n = X.shape
nan = []
# fill in missing values with evenly spaced quantiles
for j in range(n):
nan.append(np.nonzero(np.isnan(X[:, j]))[0])
if nan[j].size > 0:
q = np.arange(1, nan[j].size + 1, dtype=np.float64) / \
(nan[j].size + 1.0)
fill = np.nanpercentile(X[:, j], 100 * q)
X[nan[j], j] = fill
# generate sorting indices
A = np.argsort(X, axis=0, kind='mergesort') # mergesort is stable
# reorder matrix
for j in range(n):
matrix.iloc[:, j] = matrix.X[A[:, j], j]
# determine target distribution
if target is None:
# No target distribution is specified, calculate one based on the
# expression matrix.
target = np.mean(matrix.X, axis=1)
else:
# Use specified target distribution (after sorting).
target = np.sort(target)
# generate indices to reverse sorting
A = np.argsort(A, axis=0, kind='mergesort') # mergesort is stable
# quantile-normalize
for j in range(n):
matrix.iloc[:, j] = target[A[:, j]]
# set missing values to NaN again
for j in range(n):
if nan[j].size > 0:
matrix.iloc[nan[j], j] = np.nan
return matrix | [
"def",
"quantile_normalize",
"(",
"matrix",
",",
"inplace",
"=",
"False",
",",
"target",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"matrix",
",",
"ExpMatrix",
")",
"assert",
"isinstance",
"(",
"inplace",
",",
"bool",
")",
"if",
"target",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"target",
",",
"np",
".",
"ndarray",
")",
"and",
"np",
".",
"issubdtype",
"(",
"target",
".",
"dtype",
",",
"np",
".",
"float",
")",
"if",
"not",
"inplace",
":",
"# make a copy of the original data",
"matrix",
"=",
"matrix",
".",
"copy",
"(",
")",
"X",
"=",
"matrix",
".",
"X",
"_",
",",
"n",
"=",
"X",
".",
"shape",
"nan",
"=",
"[",
"]",
"# fill in missing values with evenly spaced quantiles",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"nan",
".",
"append",
"(",
"np",
".",
"nonzero",
"(",
"np",
".",
"isnan",
"(",
"X",
"[",
":",
",",
"j",
"]",
")",
")",
"[",
"0",
"]",
")",
"if",
"nan",
"[",
"j",
"]",
".",
"size",
">",
"0",
":",
"q",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"nan",
"[",
"j",
"]",
".",
"size",
"+",
"1",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"/",
"(",
"nan",
"[",
"j",
"]",
".",
"size",
"+",
"1.0",
")",
"fill",
"=",
"np",
".",
"nanpercentile",
"(",
"X",
"[",
":",
",",
"j",
"]",
",",
"100",
"*",
"q",
")",
"X",
"[",
"nan",
"[",
"j",
"]",
",",
"j",
"]",
"=",
"fill",
"# generate sorting indices",
"A",
"=",
"np",
".",
"argsort",
"(",
"X",
",",
"axis",
"=",
"0",
",",
"kind",
"=",
"'mergesort'",
")",
"# mergesort is stable",
"# reorder matrix",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"matrix",
".",
"iloc",
"[",
":",
",",
"j",
"]",
"=",
"matrix",
".",
"X",
"[",
"A",
"[",
":",
",",
"j",
"]",
",",
"j",
"]",
"# determine target distribution",
"if",
"target",
"is",
"None",
":",
"# No target distribution is specified, calculate one based on the",
"# expression matrix.",
"target",
"=",
"np",
".",
"mean",
"(",
"matrix",
".",
"X",
",",
"axis",
"=",
"1",
")",
"else",
":",
"# Use specified target distribution (after sorting).",
"target",
"=",
"np",
".",
"sort",
"(",
"target",
")",
"# generate indices to reverse sorting",
"A",
"=",
"np",
".",
"argsort",
"(",
"A",
",",
"axis",
"=",
"0",
",",
"kind",
"=",
"'mergesort'",
")",
"# mergesort is stable",
"# quantile-normalize",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"matrix",
".",
"iloc",
"[",
":",
",",
"j",
"]",
"=",
"target",
"[",
"A",
"[",
":",
",",
"j",
"]",
"]",
"# set missing values to NaN again",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"if",
"nan",
"[",
"j",
"]",
".",
"size",
">",
"0",
":",
"matrix",
".",
"iloc",
"[",
"nan",
"[",
"j",
"]",
",",
"j",
"]",
"=",
"np",
".",
"nan",
"return",
"matrix"
]
| 32.105263 | 20.210526 |
def delete(self, user: str) -> None:
"""Remove user."""
data = {
'action': 'remove',
'user': user
}
self._request('post', URL, data=data) | [
"def",
"delete",
"(",
"self",
",",
"user",
":",
"str",
")",
"->",
"None",
":",
"data",
"=",
"{",
"'action'",
":",
"'remove'",
",",
"'user'",
":",
"user",
"}",
"self",
".",
"_request",
"(",
"'post'",
",",
"URL",
",",
"data",
"=",
"data",
")"
]
| 23.375 | 16.125 |
def sample(net: BayesNet,
sample_from: Iterable[Vertex],
sampling_algorithm: PosteriorSamplingAlgorithm = None,
draws: int = 500,
drop: int = 0,
down_sample_interval: int = 1,
plot: bool = False,
ax: Any = None) -> sample_types:
"""
:param net: Bayesian Network containing latent variables.
:param sample_from: Vertices to include in the returned samples.
:param sampling_algorithm: The posterior sampling algorithm to use.
Options are :class:`keanu.algorithm.MetropolisHastingsSampler`, :class:`keanu.algorithm.NUTSSampler` and :class:`keanu.algorithm.ForwardSampler`
If not set, :class:`keanu.algorithm.MetropolisHastingsSampler` is chosen with 'prior' as its proposal distribution.
:param draws: The number of samples to take.
:param drop: The number of samples to drop before collecting anything.
If this is zero then no samples will be dropped before collecting.
:param down_sample_interval: Collect 1 sample for every `down_sample_interval`.
If this is 1 then there will be no down-sampling.
If this is 2 then every other sample will be taken.
If this is 3 then 2 samples will be dropped before one is taken.
And so on.
:param plot: Flag for plotting the trace after sampling.
Call `matplotlib.pyplot.show <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.show.html>`_ to display the plot.
:param Axes ax: `matplotlib.axes.Axes <https://matplotlib.org/api/axes_api.html>`_.
If not set, a new one is created.
:raises ValueError: If `sample_from` contains vertices without labels.
:return: Dictionary of samples at an index (tuple) for each vertex label (str). If all the vertices in `sample_from` are scalar, the dictionary is only keyed by label.
"""
sample_from = list(sample_from)
id_to_label = __check_if_vertices_are_labelled(sample_from)
if sampling_algorithm is None:
sampling_algorithm = MetropolisHastingsSampler(proposal_distribution="prior", latents=sample_from)
vertices_unwrapped: JavaList = k.to_java_object_list(sample_from)
probabilistic_model = ProbabilisticModel(net) if (
isinstance(sampling_algorithm, MetropolisHastingsSampler) or
isinstance(sampling_algorithm, ForwardSampler)) else ProbabilisticModelWithGradient(net)
network_samples: JavaObject = sampling_algorithm.get_sampler().getPosteriorSamples(
probabilistic_model.unwrap(), vertices_unwrapped, draws).drop(drop).downSample(down_sample_interval)
if __all_scalar(sample_from):
vertex_samples = __create_single_indexed_samples(network_samples, vertices_unwrapped, id_to_label)
else:
vertex_samples = __create_multi_indexed_samples(vertices_unwrapped, network_samples, id_to_label)
if plot:
traceplot(vertex_samples, ax=ax)
return vertex_samples | [
"def",
"sample",
"(",
"net",
":",
"BayesNet",
",",
"sample_from",
":",
"Iterable",
"[",
"Vertex",
"]",
",",
"sampling_algorithm",
":",
"PosteriorSamplingAlgorithm",
"=",
"None",
",",
"draws",
":",
"int",
"=",
"500",
",",
"drop",
":",
"int",
"=",
"0",
",",
"down_sample_interval",
":",
"int",
"=",
"1",
",",
"plot",
":",
"bool",
"=",
"False",
",",
"ax",
":",
"Any",
"=",
"None",
")",
"->",
"sample_types",
":",
"sample_from",
"=",
"list",
"(",
"sample_from",
")",
"id_to_label",
"=",
"__check_if_vertices_are_labelled",
"(",
"sample_from",
")",
"if",
"sampling_algorithm",
"is",
"None",
":",
"sampling_algorithm",
"=",
"MetropolisHastingsSampler",
"(",
"proposal_distribution",
"=",
"\"prior\"",
",",
"latents",
"=",
"sample_from",
")",
"vertices_unwrapped",
":",
"JavaList",
"=",
"k",
".",
"to_java_object_list",
"(",
"sample_from",
")",
"probabilistic_model",
"=",
"ProbabilisticModel",
"(",
"net",
")",
"if",
"(",
"isinstance",
"(",
"sampling_algorithm",
",",
"MetropolisHastingsSampler",
")",
"or",
"isinstance",
"(",
"sampling_algorithm",
",",
"ForwardSampler",
")",
")",
"else",
"ProbabilisticModelWithGradient",
"(",
"net",
")",
"network_samples",
":",
"JavaObject",
"=",
"sampling_algorithm",
".",
"get_sampler",
"(",
")",
".",
"getPosteriorSamples",
"(",
"probabilistic_model",
".",
"unwrap",
"(",
")",
",",
"vertices_unwrapped",
",",
"draws",
")",
".",
"drop",
"(",
"drop",
")",
".",
"downSample",
"(",
"down_sample_interval",
")",
"if",
"__all_scalar",
"(",
"sample_from",
")",
":",
"vertex_samples",
"=",
"__create_single_indexed_samples",
"(",
"network_samples",
",",
"vertices_unwrapped",
",",
"id_to_label",
")",
"else",
":",
"vertex_samples",
"=",
"__create_multi_indexed_samples",
"(",
"vertices_unwrapped",
",",
"network_samples",
",",
"id_to_label",
")",
"if",
"plot",
":",
"traceplot",
"(",
"vertex_samples",
",",
"ax",
"=",
"ax",
")",
"return",
"vertex_samples"
]
| 51.410714 | 32.803571 |
def command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
"""Communicate with the daemon.
This function sends a payload to the daemon and returns the unpickled
object sent by the daemon.
Args:
body (dir): Any other arguments that should be put into the payload.
root_dir (str): The root directory in which we expect the daemon.
We need this to connect to the daemons socket.
Returns:
function: The returned payload.
"""
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message, unpickle and return it
response = receive_data(client)
return response
return communicate | [
"def",
"command_factory",
"(",
"command",
")",
":",
"def",
"communicate",
"(",
"body",
"=",
"{",
"}",
",",
"root_dir",
"=",
"None",
")",
":",
"\"\"\"Communicate with the daemon.\n\n This function sends a payload to the daemon and returns the unpickled\n object sent by the daemon.\n\n Args:\n body (dir): Any other arguments that should be put into the payload.\n root_dir (str): The root directory in which we expect the daemon.\n We need this to connect to the daemons socket.\n Returns:\n function: The returned payload.\n \"\"\"",
"client",
"=",
"connect_socket",
"(",
"root_dir",
")",
"body",
"[",
"'mode'",
"]",
"=",
"command",
"# Delete the func entry we use to call the correct function with argparse",
"# as functions can't be pickled and this shouldn't be send to the daemon.",
"if",
"'func'",
"in",
"body",
":",
"del",
"body",
"[",
"'func'",
"]",
"data_string",
"=",
"pickle",
".",
"dumps",
"(",
"body",
",",
"-",
"1",
")",
"client",
".",
"send",
"(",
"data_string",
")",
"# Receive message, unpickle and return it",
"response",
"=",
"receive_data",
"(",
"client",
")",
"return",
"response",
"return",
"communicate"
]
| 37.769231 | 21.512821 |
def fetch(self):
"""
Fetch a CallSummaryInstance
:returns: Fetched CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CallSummaryInstance(self._version, payload, call_sid=self._solution['call_sid'], ) | [
"def",
"fetch",
"(",
"self",
")",
":",
"params",
"=",
"values",
".",
"of",
"(",
"{",
"}",
")",
"payload",
"=",
"self",
".",
"_version",
".",
"fetch",
"(",
"'GET'",
",",
"self",
".",
"_uri",
",",
"params",
"=",
"params",
",",
")",
"return",
"CallSummaryInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"call_sid",
"=",
"self",
".",
"_solution",
"[",
"'call_sid'",
"]",
",",
")"
]
| 26.5625 | 20.9375 |
def convert_tkinter_size_to_Wx(size):
"""
Converts size in characters to size in pixels
:param size: size in characters, rows
:return: size in pixels, pixels
"""
qtsize = size
if size[1] is not None and size[1] < DEFAULT_PIXEL_TO_CHARS_CUTOFF: # change from character based size to pixels (roughly)
qtsize = size[0]*DEFAULT_PIXELS_TO_CHARS_SCALING[0], size[1]*DEFAULT_PIXELS_TO_CHARS_SCALING[1]
return qtsize | [
"def",
"convert_tkinter_size_to_Wx",
"(",
"size",
")",
":",
"qtsize",
"=",
"size",
"if",
"size",
"[",
"1",
"]",
"is",
"not",
"None",
"and",
"size",
"[",
"1",
"]",
"<",
"DEFAULT_PIXEL_TO_CHARS_CUTOFF",
":",
"# change from character based size to pixels (roughly)",
"qtsize",
"=",
"size",
"[",
"0",
"]",
"*",
"DEFAULT_PIXELS_TO_CHARS_SCALING",
"[",
"0",
"]",
",",
"size",
"[",
"1",
"]",
"*",
"DEFAULT_PIXELS_TO_CHARS_SCALING",
"[",
"1",
"]",
"return",
"qtsize"
]
| 44.7 | 22.1 |
def run_spec(spec,
benchmark_hosts,
result_hosts=None,
output_fmt=None,
logfile_info=None,
logfile_result=None,
action=None,
fail_if=None,
sample_mode='reservoir'):
"""Run a spec file, executing the statements on the benchmark_hosts.
Short example of a spec file:
[setup]
statement_files = ["sql/create_table.sql"]
[[setup.data_files]]
target = "t"
source = "data/t.json"
[[queries]]
statement = "select count(*) from t"
iterations = 2000
concurrency = 10
[teardown]
statements = ["drop table t"]
See https://github.com/mfussenegger/cr8/tree/master/specs
for more examples.
Args:
spec: path to a spec file
benchmark_hosts: hostname[:port] pairs of Crate nodes
result_hosts: optional hostname[:port] Crate node pairs into which the
runtime statistics should be inserted.
output_fmt: output format
action: Optional action to execute.
Default is to execute all actions - setup, queries and teardown.
If present only the specified action will be executed.
The argument can be provided multiple times to execute more than
one action.
fail-if: An expression that causes cr8 to exit with a failure if it
evaluates to true.
The expression can contain formatting expressions for:
- runtime_stats
- statement
- meta
- concurrency
- bulk_size
For example:
--fail-if "{runtime_stats.mean} > 1.34"
"""
with Logger(output_fmt=output_fmt,
logfile_info=logfile_info,
logfile_result=logfile_result) as log:
do_run_spec(
spec=spec,
benchmark_hosts=benchmark_hosts,
log=log,
result_hosts=result_hosts,
action=action,
fail_if=fail_if,
sample_mode=sample_mode
) | [
"def",
"run_spec",
"(",
"spec",
",",
"benchmark_hosts",
",",
"result_hosts",
"=",
"None",
",",
"output_fmt",
"=",
"None",
",",
"logfile_info",
"=",
"None",
",",
"logfile_result",
"=",
"None",
",",
"action",
"=",
"None",
",",
"fail_if",
"=",
"None",
",",
"sample_mode",
"=",
"'reservoir'",
")",
":",
"with",
"Logger",
"(",
"output_fmt",
"=",
"output_fmt",
",",
"logfile_info",
"=",
"logfile_info",
",",
"logfile_result",
"=",
"logfile_result",
")",
"as",
"log",
":",
"do_run_spec",
"(",
"spec",
"=",
"spec",
",",
"benchmark_hosts",
"=",
"benchmark_hosts",
",",
"log",
"=",
"log",
",",
"result_hosts",
"=",
"result_hosts",
",",
"action",
"=",
"action",
",",
"fail_if",
"=",
"fail_if",
",",
"sample_mode",
"=",
"sample_mode",
")"
]
| 32.107692 | 17.138462 |
def generate_content_encoding(self):
"""
Means decoding value when it's encoded by base64.
.. code-block:: python
{
'contentEncoding': 'base64',
}
"""
if self._definition['contentEncoding'] == 'base64':
with self.l('if isinstance({variable}, str):'):
with self.l('try:'):
self.l('import base64')
self.l('{variable} = base64.b64decode({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be encoded by base64")')
with self.l('if {variable} == "":'):
self.l('raise JsonSchemaException("contentEncoding must be base64")') | [
"def",
"generate_content_encoding",
"(",
"self",
")",
":",
"if",
"self",
".",
"_definition",
"[",
"'contentEncoding'",
"]",
"==",
"'base64'",
":",
"with",
"self",
".",
"l",
"(",
"'if isinstance({variable}, str):'",
")",
":",
"with",
"self",
".",
"l",
"(",
"'try:'",
")",
":",
"self",
".",
"l",
"(",
"'import base64'",
")",
"self",
".",
"l",
"(",
"'{variable} = base64.b64decode({variable})'",
")",
"with",
"self",
".",
"l",
"(",
"'except Exception:'",
")",
":",
"self",
".",
"l",
"(",
"'raise JsonSchemaException(\"{name} must be encoded by base64\")'",
")",
"with",
"self",
".",
"l",
"(",
"'if {variable} == \"\":'",
")",
":",
"self",
".",
"l",
"(",
"'raise JsonSchemaException(\"contentEncoding must be base64\")'",
")"
]
| 40.210526 | 19.263158 |
def last_month(self):
"""
Access the last_month
:returns: twilio.rest.api.v2010.account.usage.record.last_month.LastMonthList
:rtype: twilio.rest.api.v2010.account.usage.record.last_month.LastMonthList
"""
if self._last_month is None:
self._last_month = LastMonthList(self._version, account_sid=self._solution['account_sid'], )
return self._last_month | [
"def",
"last_month",
"(",
"self",
")",
":",
"if",
"self",
".",
"_last_month",
"is",
"None",
":",
"self",
".",
"_last_month",
"=",
"LastMonthList",
"(",
"self",
".",
"_version",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
",",
")",
"return",
"self",
".",
"_last_month"
]
| 41.1 | 23.5 |
def close(self):
"""
Close the connection by handshaking with the server.
This method is a :ref:`coroutine <coroutine>`.
"""
if not self.is_closed():
self._closing = True
# Let the ConnectionActor do the actual close operations.
# It will do the work on CloseOK
self.sender.send_Close(
0, 'Connection closed by application', 0, 0)
try:
yield from self.synchroniser.wait(spec.ConnectionCloseOK)
except AMQPConnectionError:
# For example if both sides want to close or the connection
# is closed.
pass
else:
if self._closing:
log.warn("Called `close` on already closing connection...")
# finish all pending tasks
yield from self.protocol.heartbeat_monitor.wait_closed() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_closed",
"(",
")",
":",
"self",
".",
"_closing",
"=",
"True",
"# Let the ConnectionActor do the actual close operations.",
"# It will do the work on CloseOK",
"self",
".",
"sender",
".",
"send_Close",
"(",
"0",
",",
"'Connection closed by application'",
",",
"0",
",",
"0",
")",
"try",
":",
"yield",
"from",
"self",
".",
"synchroniser",
".",
"wait",
"(",
"spec",
".",
"ConnectionCloseOK",
")",
"except",
"AMQPConnectionError",
":",
"# For example if both sides want to close or the connection",
"# is closed.",
"pass",
"else",
":",
"if",
"self",
".",
"_closing",
":",
"log",
".",
"warn",
"(",
"\"Called `close` on already closing connection...\"",
")",
"# finish all pending tasks",
"yield",
"from",
"self",
".",
"protocol",
".",
"heartbeat_monitor",
".",
"wait_closed",
"(",
")"
]
| 38.695652 | 17.391304 |
def decode(code, encoding_type='default'):
"""Converts a string of morse code into English message
The encoded message can also be decoded using the same morse chart
backwards.
>>> code = '... --- ...'
>>> decode(code)
'SOS'
"""
reversed_morsetab = {symbol: character for character,
symbol in list(getattr(encoding, 'morsetab').items())}
encoding_type = encoding_type.lower()
allowed_encoding_type = ['default', 'binary']
if encoding_type == 'default':
# For spacing the words
letters = 0
words = 0
index = {}
for i in range(len(code)):
if code[i:i + 3] == ' ' * 3:
if code[i:i + 7] == ' ' * 7:
words += 1
letters += 1
index[words] = letters
elif code[i + 4] and code[i - 1] != ' ': # Check for ' '
letters += 1
message = [reversed_morsetab[i] for i in code.split()]
for i, (word, letter) in enumerate(list(index.items())):
message.insert(letter + i, ' ')
return ''.join(message)
elif encoding_type == 'binary':
lst = list(map(lambda word: word.split('0' * 3), code.split('0' * 7)))
# list of list of character (each sub list being a word)
for i, word in enumerate(lst):
for j, bin_letter in enumerate(word):
lst[i][j] = binary_lookup[bin_letter]
lst[i] = "".join(lst[i])
s = " ".join(lst)
return s
else:
raise NotImplementedError("encoding_type must be in %s" % allowed_encoding_type) | [
"def",
"decode",
"(",
"code",
",",
"encoding_type",
"=",
"'default'",
")",
":",
"reversed_morsetab",
"=",
"{",
"symbol",
":",
"character",
"for",
"character",
",",
"symbol",
"in",
"list",
"(",
"getattr",
"(",
"encoding",
",",
"'morsetab'",
")",
".",
"items",
"(",
")",
")",
"}",
"encoding_type",
"=",
"encoding_type",
".",
"lower",
"(",
")",
"allowed_encoding_type",
"=",
"[",
"'default'",
",",
"'binary'",
"]",
"if",
"encoding_type",
"==",
"'default'",
":",
"# For spacing the words",
"letters",
"=",
"0",
"words",
"=",
"0",
"index",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"code",
")",
")",
":",
"if",
"code",
"[",
"i",
":",
"i",
"+",
"3",
"]",
"==",
"' '",
"*",
"3",
":",
"if",
"code",
"[",
"i",
":",
"i",
"+",
"7",
"]",
"==",
"' '",
"*",
"7",
":",
"words",
"+=",
"1",
"letters",
"+=",
"1",
"index",
"[",
"words",
"]",
"=",
"letters",
"elif",
"code",
"[",
"i",
"+",
"4",
"]",
"and",
"code",
"[",
"i",
"-",
"1",
"]",
"!=",
"' '",
":",
"# Check for ' '",
"letters",
"+=",
"1",
"message",
"=",
"[",
"reversed_morsetab",
"[",
"i",
"]",
"for",
"i",
"in",
"code",
".",
"split",
"(",
")",
"]",
"for",
"i",
",",
"(",
"word",
",",
"letter",
")",
"in",
"enumerate",
"(",
"list",
"(",
"index",
".",
"items",
"(",
")",
")",
")",
":",
"message",
".",
"insert",
"(",
"letter",
"+",
"i",
",",
"' '",
")",
"return",
"''",
".",
"join",
"(",
"message",
")",
"elif",
"encoding_type",
"==",
"'binary'",
":",
"lst",
"=",
"list",
"(",
"map",
"(",
"lambda",
"word",
":",
"word",
".",
"split",
"(",
"'0'",
"*",
"3",
")",
",",
"code",
".",
"split",
"(",
"'0'",
"*",
"7",
")",
")",
")",
"# list of list of character (each sub list being a word)",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"lst",
")",
":",
"for",
"j",
",",
"bin_letter",
"in",
"enumerate",
"(",
"word",
")",
":",
"lst",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"binary_lookup",
"[",
"bin_letter",
"]",
"lst",
"[",
"i",
"]",
"=",
"\"\"",
".",
"join",
"(",
"lst",
"[",
"i",
"]",
")",
"s",
"=",
"\" \"",
".",
"join",
"(",
"lst",
")",
"return",
"s",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"encoding_type must be in %s\"",
"%",
"allowed_encoding_type",
")"
]
| 33.020408 | 19.755102 |
def create(dataset, annotations=None, feature=None, model='darknet-yolo',
classes=None, batch_size=0, max_iterations=0, verbose=True,
**kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
from .._mxnet import _mxnet_utils
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset,
target_type=[list, dict],
type_name='list',
col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(dataset, feature, annotations,
require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model,
supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
params = {
'anchors': [
(1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
(2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
(4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
(8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
(16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
],
'grid_shape': [13, 13],
'aug_resize': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_min_object_covered': 0,
'aug_min_eject_coverage': 0.5,
'aug_area_range': (.15, 2),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
'lmb_coord_xy': 10.0,
'lmb_coord_wh': 10.0,
'lmb_obj': 100.0,
'lmb_noobj': 5.0,
'lmb_class': 2.0,
'non_maximum_suppression_threshold': 0.45,
'rescore': True,
'clip_gradients': 0.025,
'weight_decay': 0.0005,
'sgd_momentum': 0.9,
'learning_rate': 1.0e-3,
'shuffle': True,
'mps_loss_mult': 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
'io_thread_buffer_size': 8,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
anchors = params['anchors']
num_anchors = len(anchors)
if batch_size < 1:
batch_size = 32 # Default if not user-specified
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=batch_size)
num_mxnet_gpus = len(cuda_gpus)
use_mps = _use_mps() and num_mxnet_gpus == 0
batch_size_each = batch_size // max(num_mxnet_gpus, 1)
if use_mps and _mps_device_memory_limit() < 4 * 1024 * 1024 * 1024:
# Reduce batch size for GPUs with less than 4GB RAM
batch_size_each = 16
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
if verbose:
print("Setting 'batch_size' to {}".format(batch_size))
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 550 + batch_size_each * 85
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=use_mps,
cuda_mem_req=cuda_mem_req)
grid_shape = params['grid_shape']
input_image_shape = (3,
grid_shape[0] * ref_model.spatial_reduction,
grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = (dataset.stack(annotations, new_column_name='_bbox', drop_na=True)
.unpack('_bbox', limit=['label']))
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label'])
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of "
"dictionaries or single dictionary containing 'label' and 'coordinates'.")
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique()
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for index, name in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations))
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset,
batch_size=batch_size,
input_shape=input_image_shape[1:],
output_shape=grid_shape,
anchors=anchors,
class_to_index=class_to_index,
aug_params=params,
shuffle=params['shuffle'],
loader_type='augmented',
feature_column=feature,
annotations_column=annotations,
io_thread_buffer_size=io_thread_buffer_size,
iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:],
output_shape=grid_shape,
batch_size=batch_size_each,
num_classes=num_classes,
anchors=anchors,
parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10**(-i)) for i, step in enumerate(steps)]
steps, factors = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(0.00005), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
# Printing of table header is deferred, so that start-of-training
# warnings appear above the table
if verbose and iteration == 0:
# Print progress table header
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
if verbose and (cur_time > progress['last_time'] + 10 or
iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter=iteration_base1, loss=progress['smoothed_loss'],
time=elapsed_time , width=column_width-1))
progress['last_time'] = cur_time
if use_mps:
# Force initialization of net_params
# TODO: Do not rely on MXNet to initialize MPS-based network
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = net_params[k].data().asnumpy()
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {
'mode': _MpsGraphMode.Train,
'use_sgd': True,
'learning_rate': base_lr / params['mps_loss_mult'],
'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult,
'weight_decay': params['weight_decay'],
'od_include_network': True,
'od_include_loss': True,
'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult,
'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult,
'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult,
'od_scale_object': params['lmb_obj'] * mps_loss_mult,
'od_scale_class': params['lmb_class'] * mps_loss_mult,
'od_max_iou_for_no_object': 0.3,
'od_min_iou_for_object': 0.7,
'od_rescore': params['rescore'],
}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape,
batch_size=batch_size,
output_size=output_size,
anchors=anchors,
config=mps_config,
weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch)
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break
for x, y in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy()
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy()
# Push this batch to the main thread.
numpy_batch_queue.put({'input' : input_data,
'label' : label_data,
'iteration' : batch.iteration})
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batch_queue = []
def wait_for_batch():
pending_loss = batch_queue.pop(0)
batch_loss = pending_loss.asnumpy() # Waits for the batch to finish
return batch_loss.sum() / mps_loss_mult
while True:
batch = numpy_batch_queue.get()
if batch is None:
break
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult)
# Submit this match to MPS.
batch_queue.append(mps_net.train(batch['input'], batch['label']))
# If we have two batches in flight, wait for the first one.
if len(batch_queue) > 1:
cur_loss = wait_for_batch()
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration)
iteration = batch['iteration']
# Wait for any pending batches and finalize our progress updates.
while len(batch_queue) > 0:
cur_loss = wait_for_batch()
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(mps_net_params[k])
else: # Use MxNet
net.hybridize()
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler,
'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for x, y in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L)
for L in Ls:
L.backward()
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer
# Save the model
training_iterations = iteration + 1
state = {
'_model': net,
'_class_to_index': class_to_index,
'_training_time_as_string': _seconds_as_string(training_time),
'_grid_shape': grid_shape,
'anchors': anchors,
'model': model,
'classes': classes,
'batch_size': batch_size,
'input_image_shape': input_image_shape,
'feature': feature,
'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'],
'annotations': annotations,
'num_classes': num_classes,
'num_examples': num_images,
'num_bounding_boxes': num_instances,
'training_time': training_time,
'training_epochs': training_iterations * batch_size // num_images,
'training_iterations': training_iterations,
'max_iterations': max_iterations,
'training_loss': progress['smoothed_loss'],
}
return ObjectDetector(state) | [
"def",
"create",
"(",
"dataset",
",",
"annotations",
"=",
"None",
",",
"feature",
"=",
"None",
",",
"model",
"=",
"'darknet-yolo'",
",",
"classes",
"=",
"None",
",",
"batch_size",
"=",
"0",
",",
"max_iterations",
"=",
"0",
",",
"verbose",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"from",
".",
"_mx_detector",
"import",
"YOLOLoss",
"as",
"_YOLOLoss",
"from",
".",
"_model",
"import",
"tiny_darknet",
"as",
"_tiny_darknet",
"from",
".",
"_sframe_loader",
"import",
"SFrameDetectionIter",
"as",
"_SFrameDetectionIter",
"from",
".",
"_manual_scheduler",
"import",
"ManualScheduler",
"as",
"_ManualScheduler",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"if",
"len",
"(",
"dataset",
")",
"==",
"0",
":",
"raise",
"_ToolkitError",
"(",
"'Unable to train on empty dataset'",
")",
"_numeric_param_check_range",
"(",
"'max_iterations'",
",",
"max_iterations",
",",
"0",
",",
"_six",
".",
"MAXSIZE",
")",
"start_time",
"=",
"_time",
".",
"time",
"(",
")",
"supported_detectors",
"=",
"[",
"'darknet-yolo'",
"]",
"if",
"feature",
"is",
"None",
":",
"feature",
"=",
"_tkutl",
".",
"_find_only_image_column",
"(",
"dataset",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Using '%s' as feature column\"",
"%",
"feature",
")",
"if",
"annotations",
"is",
"None",
":",
"annotations",
"=",
"_tkutl",
".",
"_find_only_column_of_type",
"(",
"dataset",
",",
"target_type",
"=",
"[",
"list",
",",
"dict",
"]",
",",
"type_name",
"=",
"'list'",
",",
"col_name",
"=",
"'annotations'",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Using '%s' as annotations column\"",
"%",
"annotations",
")",
"_raise_error_if_not_detection_sframe",
"(",
"dataset",
",",
"feature",
",",
"annotations",
",",
"require_annotations",
"=",
"True",
")",
"is_annotations_list",
"=",
"dataset",
"[",
"annotations",
"]",
".",
"dtype",
"==",
"list",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"'model'",
",",
"model",
",",
"supported_detectors",
")",
"base_model",
"=",
"model",
".",
"split",
"(",
"'-'",
",",
"1",
")",
"[",
"0",
"]",
"ref_model",
"=",
"_pre_trained_models",
".",
"OBJECT_DETECTION_BASE_MODELS",
"[",
"base_model",
"]",
"(",
")",
"params",
"=",
"{",
"'anchors'",
":",
"[",
"(",
"1.0",
",",
"2.0",
")",
",",
"(",
"1.0",
",",
"1.0",
")",
",",
"(",
"2.0",
",",
"1.0",
")",
",",
"(",
"2.0",
",",
"4.0",
")",
",",
"(",
"2.0",
",",
"2.0",
")",
",",
"(",
"4.0",
",",
"2.0",
")",
",",
"(",
"4.0",
",",
"8.0",
")",
",",
"(",
"4.0",
",",
"4.0",
")",
",",
"(",
"8.0",
",",
"4.0",
")",
",",
"(",
"8.0",
",",
"16.0",
")",
",",
"(",
"8.0",
",",
"8.0",
")",
",",
"(",
"16.0",
",",
"8.0",
")",
",",
"(",
"16.0",
",",
"32.0",
")",
",",
"(",
"16.0",
",",
"16.0",
")",
",",
"(",
"32.0",
",",
"16.0",
")",
",",
"]",
",",
"'grid_shape'",
":",
"[",
"13",
",",
"13",
"]",
",",
"'aug_resize'",
":",
"0",
",",
"'aug_rand_crop'",
":",
"0.9",
",",
"'aug_rand_pad'",
":",
"0.9",
",",
"'aug_rand_gray'",
":",
"0.0",
",",
"'aug_aspect_ratio'",
":",
"1.25",
",",
"'aug_hue'",
":",
"0.05",
",",
"'aug_brightness'",
":",
"0.05",
",",
"'aug_saturation'",
":",
"0.05",
",",
"'aug_contrast'",
":",
"0.05",
",",
"'aug_horizontal_flip'",
":",
"True",
",",
"'aug_min_object_covered'",
":",
"0",
",",
"'aug_min_eject_coverage'",
":",
"0.5",
",",
"'aug_area_range'",
":",
"(",
".15",
",",
"2",
")",
",",
"'aug_pca_noise'",
":",
"0.0",
",",
"'aug_max_attempts'",
":",
"20",
",",
"'aug_inter_method'",
":",
"2",
",",
"'lmb_coord_xy'",
":",
"10.0",
",",
"'lmb_coord_wh'",
":",
"10.0",
",",
"'lmb_obj'",
":",
"100.0",
",",
"'lmb_noobj'",
":",
"5.0",
",",
"'lmb_class'",
":",
"2.0",
",",
"'non_maximum_suppression_threshold'",
":",
"0.45",
",",
"'rescore'",
":",
"True",
",",
"'clip_gradients'",
":",
"0.025",
",",
"'weight_decay'",
":",
"0.0005",
",",
"'sgd_momentum'",
":",
"0.9",
",",
"'learning_rate'",
":",
"1.0e-3",
",",
"'shuffle'",
":",
"True",
",",
"'mps_loss_mult'",
":",
"8",
",",
"# This large buffer size (8 batches) is an attempt to mitigate against",
"# the SFrame shuffle operation that can occur after each epoch.",
"'io_thread_buffer_size'",
":",
"8",
",",
"}",
"if",
"'_advanced_parameters'",
"in",
"kwargs",
":",
"# Make sure no additional parameters are provided",
"new_keys",
"=",
"set",
"(",
"kwargs",
"[",
"'_advanced_parameters'",
"]",
".",
"keys",
"(",
")",
")",
"set_keys",
"=",
"set",
"(",
"params",
".",
"keys",
"(",
")",
")",
"unsupported",
"=",
"new_keys",
"-",
"set_keys",
"if",
"unsupported",
":",
"raise",
"_ToolkitError",
"(",
"'Unknown advanced parameters: {}'",
".",
"format",
"(",
"unsupported",
")",
")",
"params",
".",
"update",
"(",
"kwargs",
"[",
"'_advanced_parameters'",
"]",
")",
"anchors",
"=",
"params",
"[",
"'anchors'",
"]",
"num_anchors",
"=",
"len",
"(",
"anchors",
")",
"if",
"batch_size",
"<",
"1",
":",
"batch_size",
"=",
"32",
"# Default if not user-specified",
"cuda_gpus",
"=",
"_mxnet_utils",
".",
"get_gpus_in_use",
"(",
"max_devices",
"=",
"batch_size",
")",
"num_mxnet_gpus",
"=",
"len",
"(",
"cuda_gpus",
")",
"use_mps",
"=",
"_use_mps",
"(",
")",
"and",
"num_mxnet_gpus",
"==",
"0",
"batch_size_each",
"=",
"batch_size",
"//",
"max",
"(",
"num_mxnet_gpus",
",",
"1",
")",
"if",
"use_mps",
"and",
"_mps_device_memory_limit",
"(",
")",
"<",
"4",
"*",
"1024",
"*",
"1024",
"*",
"1024",
":",
"# Reduce batch size for GPUs with less than 4GB RAM",
"batch_size_each",
"=",
"16",
"# Note, this may slightly alter the batch size to fit evenly on the GPUs",
"batch_size",
"=",
"max",
"(",
"num_mxnet_gpus",
",",
"1",
")",
"*",
"batch_size_each",
"if",
"verbose",
":",
"print",
"(",
"\"Setting 'batch_size' to {}\"",
".",
"format",
"(",
"batch_size",
")",
")",
"# The IO thread also handles MXNet-powered data augmentation. This seems",
"# to be problematic to run independently of a MXNet-powered neural network",
"# in a separate thread. For this reason, we restrict IO threads to when",
"# the neural network backend is MPS.",
"io_thread_buffer_size",
"=",
"params",
"[",
"'io_thread_buffer_size'",
"]",
"if",
"use_mps",
"else",
"0",
"if",
"verbose",
":",
"# Estimate memory usage (based on experiments)",
"cuda_mem_req",
"=",
"550",
"+",
"batch_size_each",
"*",
"85",
"_tkutl",
".",
"_print_neural_compute_device",
"(",
"cuda_gpus",
"=",
"cuda_gpus",
",",
"use_mps",
"=",
"use_mps",
",",
"cuda_mem_req",
"=",
"cuda_mem_req",
")",
"grid_shape",
"=",
"params",
"[",
"'grid_shape'",
"]",
"input_image_shape",
"=",
"(",
"3",
",",
"grid_shape",
"[",
"0",
"]",
"*",
"ref_model",
".",
"spatial_reduction",
",",
"grid_shape",
"[",
"1",
"]",
"*",
"ref_model",
".",
"spatial_reduction",
")",
"try",
":",
"if",
"is_annotations_list",
":",
"instances",
"=",
"(",
"dataset",
".",
"stack",
"(",
"annotations",
",",
"new_column_name",
"=",
"'_bbox'",
",",
"drop_na",
"=",
"True",
")",
".",
"unpack",
"(",
"'_bbox'",
",",
"limit",
"=",
"[",
"'label'",
"]",
")",
")",
"else",
":",
"instances",
"=",
"dataset",
".",
"rename",
"(",
"{",
"annotations",
":",
"'_bbox'",
"}",
")",
".",
"dropna",
"(",
"'_bbox'",
")",
"instances",
"=",
"instances",
".",
"unpack",
"(",
"'_bbox'",
",",
"limit",
"=",
"[",
"'label'",
"]",
")",
"except",
"(",
"TypeError",
",",
"RuntimeError",
")",
":",
"# If this fails, the annotation format isinvalid at the coarsest level",
"raise",
"_ToolkitError",
"(",
"\"Annotations format is invalid. Must be a list of \"",
"\"dictionaries or single dictionary containing 'label' and 'coordinates'.\"",
")",
"num_images",
"=",
"len",
"(",
"dataset",
")",
"num_instances",
"=",
"len",
"(",
"instances",
")",
"if",
"classes",
"is",
"None",
":",
"classes",
"=",
"instances",
"[",
"'_bbox.label'",
"]",
".",
"unique",
"(",
")",
"classes",
"=",
"sorted",
"(",
"classes",
")",
"# Make a class-to-index look-up table",
"class_to_index",
"=",
"{",
"name",
":",
"index",
"for",
"index",
",",
"name",
"in",
"enumerate",
"(",
"classes",
")",
"}",
"num_classes",
"=",
"len",
"(",
"classes",
")",
"if",
"max_iterations",
"==",
"0",
":",
"# Set number of iterations through a heuristic",
"num_iterations_raw",
"=",
"5000",
"*",
"_np",
".",
"sqrt",
"(",
"num_instances",
")",
"/",
"batch_size",
"num_iterations",
"=",
"1000",
"*",
"max",
"(",
"1",
",",
"int",
"(",
"round",
"(",
"num_iterations_raw",
"/",
"1000",
")",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Setting 'max_iterations' to {}\"",
".",
"format",
"(",
"num_iterations",
")",
")",
"else",
":",
"num_iterations",
"=",
"max_iterations",
"# Create data loader",
"loader",
"=",
"_SFrameDetectionIter",
"(",
"dataset",
",",
"batch_size",
"=",
"batch_size",
",",
"input_shape",
"=",
"input_image_shape",
"[",
"1",
":",
"]",
",",
"output_shape",
"=",
"grid_shape",
",",
"anchors",
"=",
"anchors",
",",
"class_to_index",
"=",
"class_to_index",
",",
"aug_params",
"=",
"params",
",",
"shuffle",
"=",
"params",
"[",
"'shuffle'",
"]",
",",
"loader_type",
"=",
"'augmented'",
",",
"feature_column",
"=",
"feature",
",",
"annotations_column",
"=",
"annotations",
",",
"io_thread_buffer_size",
"=",
"io_thread_buffer_size",
",",
"iterations",
"=",
"num_iterations",
")",
"# Predictions per anchor box: x/y + w/h + object confidence + class probs",
"preds_per_box",
"=",
"5",
"+",
"num_classes",
"output_size",
"=",
"preds_per_box",
"*",
"num_anchors",
"ymap_shape",
"=",
"(",
"batch_size_each",
",",
")",
"+",
"tuple",
"(",
"grid_shape",
")",
"+",
"(",
"num_anchors",
",",
"preds_per_box",
")",
"net",
"=",
"_tiny_darknet",
"(",
"output_size",
"=",
"output_size",
")",
"loss",
"=",
"_YOLOLoss",
"(",
"input_shape",
"=",
"input_image_shape",
"[",
"1",
":",
"]",
",",
"output_shape",
"=",
"grid_shape",
",",
"batch_size",
"=",
"batch_size_each",
",",
"num_classes",
"=",
"num_classes",
",",
"anchors",
"=",
"anchors",
",",
"parameters",
"=",
"params",
")",
"base_lr",
"=",
"params",
"[",
"'learning_rate'",
"]",
"steps",
"=",
"[",
"num_iterations",
"//",
"2",
",",
"3",
"*",
"num_iterations",
"//",
"4",
",",
"num_iterations",
"]",
"steps_and_factors",
"=",
"[",
"(",
"step",
",",
"10",
"**",
"(",
"-",
"i",
")",
")",
"for",
"i",
",",
"step",
"in",
"enumerate",
"(",
"steps",
")",
"]",
"steps",
",",
"factors",
"=",
"zip",
"(",
"*",
"steps_and_factors",
")",
"lr_scheduler",
"=",
"_ManualScheduler",
"(",
"step",
"=",
"steps",
",",
"factor",
"=",
"factors",
")",
"ctx",
"=",
"_mxnet_utils",
".",
"get_mxnet_context",
"(",
"max_devices",
"=",
"batch_size",
")",
"net_params",
"=",
"net",
".",
"collect_params",
"(",
")",
"net_params",
".",
"initialize",
"(",
"_mx",
".",
"init",
".",
"Xavier",
"(",
")",
",",
"ctx",
"=",
"ctx",
")",
"net_params",
"[",
"'conv7_weight'",
"]",
".",
"initialize",
"(",
"_mx",
".",
"init",
".",
"Xavier",
"(",
"factor_type",
"=",
"'avg'",
")",
",",
"ctx",
"=",
"ctx",
",",
"force_reinit",
"=",
"True",
")",
"net_params",
"[",
"'conv8_weight'",
"]",
".",
"initialize",
"(",
"_mx",
".",
"init",
".",
"Uniform",
"(",
"0.00005",
")",
",",
"ctx",
"=",
"ctx",
",",
"force_reinit",
"=",
"True",
")",
"# Initialize object confidence low, preventing an unnecessary adjustment",
"# period toward conservative estimates",
"bias",
"=",
"_np",
".",
"zeros",
"(",
"output_size",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
"bias",
"[",
"4",
":",
":",
"preds_per_box",
"]",
"-=",
"6",
"from",
".",
"_mx_detector",
"import",
"ConstantArray",
"net_params",
"[",
"'conv8_bias'",
"]",
".",
"initialize",
"(",
"ConstantArray",
"(",
"bias",
")",
",",
"ctx",
",",
"force_reinit",
"=",
"True",
")",
"# Take a subset and then load the rest of the parameters. It is possible to",
"# do allow_missing=True directly on net_params. However, this will more",
"# easily hide bugs caused by names getting out of sync.",
"ref_model",
".",
"available_parameters_subset",
"(",
"net_params",
")",
".",
"load",
"(",
"ref_model",
".",
"model_path",
",",
"ctx",
")",
"column_names",
"=",
"[",
"'Iteration'",
",",
"'Loss'",
",",
"'Elapsed Time'",
"]",
"num_columns",
"=",
"len",
"(",
"column_names",
")",
"column_width",
"=",
"max",
"(",
"map",
"(",
"lambda",
"x",
":",
"len",
"(",
"x",
")",
",",
"column_names",
")",
")",
"+",
"2",
"hr",
"=",
"'+'",
"+",
"'+'",
".",
"join",
"(",
"[",
"'-'",
"*",
"column_width",
"]",
"*",
"num_columns",
")",
"+",
"'+'",
"progress",
"=",
"{",
"'smoothed_loss'",
":",
"None",
",",
"'last_time'",
":",
"0",
"}",
"iteration",
"=",
"0",
"def",
"update_progress",
"(",
"cur_loss",
",",
"iteration",
")",
":",
"iteration_base1",
"=",
"iteration",
"+",
"1",
"if",
"progress",
"[",
"'smoothed_loss'",
"]",
"is",
"None",
":",
"progress",
"[",
"'smoothed_loss'",
"]",
"=",
"cur_loss",
"else",
":",
"progress",
"[",
"'smoothed_loss'",
"]",
"=",
"0.9",
"*",
"progress",
"[",
"'smoothed_loss'",
"]",
"+",
"0.1",
"*",
"cur_loss",
"cur_time",
"=",
"_time",
".",
"time",
"(",
")",
"# Printing of table header is deferred, so that start-of-training",
"# warnings appear above the table",
"if",
"verbose",
"and",
"iteration",
"==",
"0",
":",
"# Print progress table header",
"print",
"(",
"hr",
")",
"print",
"(",
"(",
"'| {:<{width}}'",
"*",
"num_columns",
"+",
"'|'",
")",
".",
"format",
"(",
"*",
"column_names",
",",
"width",
"=",
"column_width",
"-",
"1",
")",
")",
"print",
"(",
"hr",
")",
"if",
"verbose",
"and",
"(",
"cur_time",
">",
"progress",
"[",
"'last_time'",
"]",
"+",
"10",
"or",
"iteration_base1",
"==",
"max_iterations",
")",
":",
"# Print progress table row",
"elapsed_time",
"=",
"cur_time",
"-",
"start_time",
"print",
"(",
"\"| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|\"",
".",
"format",
"(",
"cur_iter",
"=",
"iteration_base1",
",",
"loss",
"=",
"progress",
"[",
"'smoothed_loss'",
"]",
",",
"time",
"=",
"elapsed_time",
",",
"width",
"=",
"column_width",
"-",
"1",
")",
")",
"progress",
"[",
"'last_time'",
"]",
"=",
"cur_time",
"if",
"use_mps",
":",
"# Force initialization of net_params",
"# TODO: Do not rely on MXNet to initialize MPS-based network",
"net",
".",
"forward",
"(",
"_mx",
".",
"nd",
".",
"uniform",
"(",
"0",
",",
"1",
",",
"(",
"batch_size_each",
",",
")",
"+",
"input_image_shape",
")",
")",
"mps_net_params",
"=",
"{",
"}",
"keys",
"=",
"list",
"(",
"net_params",
")",
"for",
"k",
"in",
"keys",
":",
"mps_net_params",
"[",
"k",
"]",
"=",
"net_params",
"[",
"k",
"]",
".",
"data",
"(",
")",
".",
"asnumpy",
"(",
")",
"# Multiplies the loss to move the fp16 gradients away from subnormals",
"# and gradual underflow. The learning rate is correspondingly divided",
"# by the same multiple to make training mathematically equivalent. The",
"# update is done in fp32, which is why this trick works. Does not",
"# affect how loss is presented to the user.",
"mps_loss_mult",
"=",
"params",
"[",
"'mps_loss_mult'",
"]",
"mps_config",
"=",
"{",
"'mode'",
":",
"_MpsGraphMode",
".",
"Train",
",",
"'use_sgd'",
":",
"True",
",",
"'learning_rate'",
":",
"base_lr",
"/",
"params",
"[",
"'mps_loss_mult'",
"]",
",",
"'gradient_clipping'",
":",
"params",
".",
"get",
"(",
"'clip_gradients'",
",",
"0.0",
")",
"*",
"mps_loss_mult",
",",
"'weight_decay'",
":",
"params",
"[",
"'weight_decay'",
"]",
",",
"'od_include_network'",
":",
"True",
",",
"'od_include_loss'",
":",
"True",
",",
"'od_scale_xy'",
":",
"params",
"[",
"'lmb_coord_xy'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_wh'",
":",
"params",
"[",
"'lmb_coord_wh'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_no_object'",
":",
"params",
"[",
"'lmb_noobj'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_object'",
":",
"params",
"[",
"'lmb_obj'",
"]",
"*",
"mps_loss_mult",
",",
"'od_scale_class'",
":",
"params",
"[",
"'lmb_class'",
"]",
"*",
"mps_loss_mult",
",",
"'od_max_iou_for_no_object'",
":",
"0.3",
",",
"'od_min_iou_for_object'",
":",
"0.7",
",",
"'od_rescore'",
":",
"params",
"[",
"'rescore'",
"]",
",",
"}",
"mps_net",
"=",
"_get_mps_od_net",
"(",
"input_image_shape",
"=",
"input_image_shape",
",",
"batch_size",
"=",
"batch_size",
",",
"output_size",
"=",
"output_size",
",",
"anchors",
"=",
"anchors",
",",
"config",
"=",
"mps_config",
",",
"weights",
"=",
"mps_net_params",
")",
"# Use worker threads to isolate different points of synchronization",
"# and/or waiting for non-Python tasks to finish. The",
"# sframe_worker_thread will spend most of its time waiting for SFrame",
"# operations, largely image I/O and decoding, along with scheduling",
"# MXNet data augmentation. The numpy_worker_thread will spend most of",
"# its time waiting for MXNet data augmentation to complete, along with",
"# copying the results into NumPy arrays. Finally, the main thread will",
"# spend most of its time copying NumPy data into MPS and waiting for the",
"# results. Note that using three threads here only makes sense because",
"# each thread spends time waiting for non-Python code to finish (so that",
"# no thread hogs the global interpreter lock).",
"mxnet_batch_queue",
"=",
"_Queue",
"(",
"1",
")",
"numpy_batch_queue",
"=",
"_Queue",
"(",
"1",
")",
"def",
"sframe_worker",
"(",
")",
":",
"# Once a batch is loaded into NumPy, pass it immediately to the",
"# numpy_worker so that we can start I/O and decoding for the next",
"# batch.",
"for",
"batch",
"in",
"loader",
":",
"mxnet_batch_queue",
".",
"put",
"(",
"batch",
")",
"mxnet_batch_queue",
".",
"put",
"(",
"None",
")",
"def",
"numpy_worker",
"(",
")",
":",
"while",
"True",
":",
"batch",
"=",
"mxnet_batch_queue",
".",
"get",
"(",
")",
"if",
"batch",
"is",
"None",
":",
"break",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"batch",
".",
"data",
",",
"batch",
".",
"label",
")",
":",
"# Convert to NumPy arrays with required shapes. Note that",
"# asnumpy waits for any pending MXNet operations to finish.",
"input_data",
"=",
"_mxnet_to_mps",
"(",
"x",
".",
"asnumpy",
"(",
")",
")",
"label_data",
"=",
"y",
".",
"asnumpy",
"(",
")",
".",
"reshape",
"(",
"y",
".",
"shape",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"-",
"1",
",",
")",
")",
"# Convert to packed 32-bit arrays.",
"input_data",
"=",
"input_data",
".",
"astype",
"(",
"_np",
".",
"float32",
")",
"if",
"not",
"input_data",
".",
"flags",
".",
"c_contiguous",
":",
"input_data",
"=",
"input_data",
".",
"copy",
"(",
")",
"label_data",
"=",
"label_data",
".",
"astype",
"(",
"_np",
".",
"float32",
")",
"if",
"not",
"label_data",
".",
"flags",
".",
"c_contiguous",
":",
"label_data",
"=",
"label_data",
".",
"copy",
"(",
")",
"# Push this batch to the main thread.",
"numpy_batch_queue",
".",
"put",
"(",
"{",
"'input'",
":",
"input_data",
",",
"'label'",
":",
"label_data",
",",
"'iteration'",
":",
"batch",
".",
"iteration",
"}",
")",
"# Tell the main thread there's no more data.",
"numpy_batch_queue",
".",
"put",
"(",
"None",
")",
"sframe_worker_thread",
"=",
"_Thread",
"(",
"target",
"=",
"sframe_worker",
")",
"sframe_worker_thread",
".",
"start",
"(",
")",
"numpy_worker_thread",
"=",
"_Thread",
"(",
"target",
"=",
"numpy_worker",
")",
"numpy_worker_thread",
".",
"start",
"(",
")",
"batch_queue",
"=",
"[",
"]",
"def",
"wait_for_batch",
"(",
")",
":",
"pending_loss",
"=",
"batch_queue",
".",
"pop",
"(",
"0",
")",
"batch_loss",
"=",
"pending_loss",
".",
"asnumpy",
"(",
")",
"# Waits for the batch to finish",
"return",
"batch_loss",
".",
"sum",
"(",
")",
"/",
"mps_loss_mult",
"while",
"True",
":",
"batch",
"=",
"numpy_batch_queue",
".",
"get",
"(",
")",
"if",
"batch",
"is",
"None",
":",
"break",
"# Adjust learning rate according to our schedule.",
"if",
"batch",
"[",
"'iteration'",
"]",
"in",
"steps",
":",
"ii",
"=",
"steps",
".",
"index",
"(",
"batch",
"[",
"'iteration'",
"]",
")",
"+",
"1",
"new_lr",
"=",
"factors",
"[",
"ii",
"]",
"*",
"base_lr",
"mps_net",
".",
"set_learning_rate",
"(",
"new_lr",
"/",
"mps_loss_mult",
")",
"# Submit this match to MPS.",
"batch_queue",
".",
"append",
"(",
"mps_net",
".",
"train",
"(",
"batch",
"[",
"'input'",
"]",
",",
"batch",
"[",
"'label'",
"]",
")",
")",
"# If we have two batches in flight, wait for the first one.",
"if",
"len",
"(",
"batch_queue",
")",
">",
"1",
":",
"cur_loss",
"=",
"wait_for_batch",
"(",
")",
"# If we just submitted the first batch of an iteration, update",
"# progress for the iteration completed by the last batch we just",
"# waited for.",
"if",
"batch",
"[",
"'iteration'",
"]",
">",
"iteration",
":",
"update_progress",
"(",
"cur_loss",
",",
"iteration",
")",
"iteration",
"=",
"batch",
"[",
"'iteration'",
"]",
"# Wait for any pending batches and finalize our progress updates.",
"while",
"len",
"(",
"batch_queue",
")",
">",
"0",
":",
"cur_loss",
"=",
"wait_for_batch",
"(",
")",
"update_progress",
"(",
"cur_loss",
",",
"iteration",
")",
"sframe_worker_thread",
".",
"join",
"(",
")",
"numpy_worker_thread",
".",
"join",
"(",
")",
"# Load back into mxnet",
"mps_net_params",
"=",
"mps_net",
".",
"export",
"(",
")",
"keys",
"=",
"mps_net_params",
".",
"keys",
"(",
")",
"for",
"k",
"in",
"keys",
":",
"if",
"k",
"in",
"net_params",
":",
"net_params",
"[",
"k",
"]",
".",
"set_data",
"(",
"mps_net_params",
"[",
"k",
"]",
")",
"else",
":",
"# Use MxNet",
"net",
".",
"hybridize",
"(",
")",
"options",
"=",
"{",
"'learning_rate'",
":",
"base_lr",
",",
"'lr_scheduler'",
":",
"lr_scheduler",
",",
"'momentum'",
":",
"params",
"[",
"'sgd_momentum'",
"]",
",",
"'wd'",
":",
"params",
"[",
"'weight_decay'",
"]",
",",
"'rescale_grad'",
":",
"1.0",
"}",
"clip_grad",
"=",
"params",
".",
"get",
"(",
"'clip_gradients'",
")",
"if",
"clip_grad",
":",
"options",
"[",
"'clip_gradient'",
"]",
"=",
"clip_grad",
"trainer",
"=",
"_mx",
".",
"gluon",
".",
"Trainer",
"(",
"net",
".",
"collect_params",
"(",
")",
",",
"'sgd'",
",",
"options",
")",
"for",
"batch",
"in",
"loader",
":",
"data",
"=",
"_mx",
".",
"gluon",
".",
"utils",
".",
"split_and_load",
"(",
"batch",
".",
"data",
"[",
"0",
"]",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"label",
"=",
"_mx",
".",
"gluon",
".",
"utils",
".",
"split_and_load",
"(",
"batch",
".",
"label",
"[",
"0",
"]",
",",
"ctx_list",
"=",
"ctx",
",",
"batch_axis",
"=",
"0",
")",
"Ls",
"=",
"[",
"]",
"Zs",
"=",
"[",
"]",
"with",
"_mx",
".",
"autograd",
".",
"record",
"(",
")",
":",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"data",
",",
"label",
")",
":",
"z",
"=",
"net",
"(",
"x",
")",
"z0",
"=",
"_mx",
".",
"nd",
".",
"transpose",
"(",
"z",
",",
"[",
"0",
",",
"2",
",",
"3",
",",
"1",
"]",
")",
".",
"reshape",
"(",
"ymap_shape",
")",
"L",
"=",
"loss",
"(",
"z0",
",",
"y",
")",
"Ls",
".",
"append",
"(",
"L",
")",
"for",
"L",
"in",
"Ls",
":",
"L",
".",
"backward",
"(",
")",
"trainer",
".",
"step",
"(",
"1",
")",
"cur_loss",
"=",
"_np",
".",
"mean",
"(",
"[",
"L",
".",
"asnumpy",
"(",
")",
"[",
"0",
"]",
"for",
"L",
"in",
"Ls",
"]",
")",
"update_progress",
"(",
"cur_loss",
",",
"batch",
".",
"iteration",
")",
"iteration",
"=",
"batch",
".",
"iteration",
"training_time",
"=",
"_time",
".",
"time",
"(",
")",
"-",
"start_time",
"if",
"verbose",
":",
"print",
"(",
"hr",
")",
"# progress table footer",
"# Save the model",
"training_iterations",
"=",
"iteration",
"+",
"1",
"state",
"=",
"{",
"'_model'",
":",
"net",
",",
"'_class_to_index'",
":",
"class_to_index",
",",
"'_training_time_as_string'",
":",
"_seconds_as_string",
"(",
"training_time",
")",
",",
"'_grid_shape'",
":",
"grid_shape",
",",
"'anchors'",
":",
"anchors",
",",
"'model'",
":",
"model",
",",
"'classes'",
":",
"classes",
",",
"'batch_size'",
":",
"batch_size",
",",
"'input_image_shape'",
":",
"input_image_shape",
",",
"'feature'",
":",
"feature",
",",
"'non_maximum_suppression_threshold'",
":",
"params",
"[",
"'non_maximum_suppression_threshold'",
"]",
",",
"'annotations'",
":",
"annotations",
",",
"'num_classes'",
":",
"num_classes",
",",
"'num_examples'",
":",
"num_images",
",",
"'num_bounding_boxes'",
":",
"num_instances",
",",
"'training_time'",
":",
"training_time",
",",
"'training_epochs'",
":",
"training_iterations",
"*",
"batch_size",
"//",
"num_images",
",",
"'training_iterations'",
":",
"training_iterations",
",",
"'max_iterations'",
":",
"max_iterations",
",",
"'training_loss'",
":",
"progress",
"[",
"'smoothed_loss'",
"]",
",",
"}",
"return",
"ObjectDetector",
"(",
"state",
")"
]
| 41.057143 | 22.028571 |
def read_filtering_config (self):
"""
Read configuration options in section "filtering".
"""
section = "filtering"
if self.has_option(section, "ignorewarnings"):
self.config['ignorewarnings'] = [f.strip().lower() for f in \
self.get(section, 'ignorewarnings').split(',')]
if self.has_option(section, "ignore"):
for line in read_multiline(self.get(section, "ignore")):
pat = get_link_pat(line, strict=1)
self.config["externlinks"].append(pat)
if self.has_option(section, "nofollow"):
for line in read_multiline(self.get(section, "nofollow")):
pat = get_link_pat(line, strict=0)
self.config["externlinks"].append(pat)
if self.has_option(section, "internlinks"):
pat = get_link_pat(self.get(section, "internlinks"))
self.config["internlinks"].append(pat)
self.read_boolean_option(section, "checkextern") | [
"def",
"read_filtering_config",
"(",
"self",
")",
":",
"section",
"=",
"\"filtering\"",
"if",
"self",
".",
"has_option",
"(",
"section",
",",
"\"ignorewarnings\"",
")",
":",
"self",
".",
"config",
"[",
"'ignorewarnings'",
"]",
"=",
"[",
"f",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"f",
"in",
"self",
".",
"get",
"(",
"section",
",",
"'ignorewarnings'",
")",
".",
"split",
"(",
"','",
")",
"]",
"if",
"self",
".",
"has_option",
"(",
"section",
",",
"\"ignore\"",
")",
":",
"for",
"line",
"in",
"read_multiline",
"(",
"self",
".",
"get",
"(",
"section",
",",
"\"ignore\"",
")",
")",
":",
"pat",
"=",
"get_link_pat",
"(",
"line",
",",
"strict",
"=",
"1",
")",
"self",
".",
"config",
"[",
"\"externlinks\"",
"]",
".",
"append",
"(",
"pat",
")",
"if",
"self",
".",
"has_option",
"(",
"section",
",",
"\"nofollow\"",
")",
":",
"for",
"line",
"in",
"read_multiline",
"(",
"self",
".",
"get",
"(",
"section",
",",
"\"nofollow\"",
")",
")",
":",
"pat",
"=",
"get_link_pat",
"(",
"line",
",",
"strict",
"=",
"0",
")",
"self",
".",
"config",
"[",
"\"externlinks\"",
"]",
".",
"append",
"(",
"pat",
")",
"if",
"self",
".",
"has_option",
"(",
"section",
",",
"\"internlinks\"",
")",
":",
"pat",
"=",
"get_link_pat",
"(",
"self",
".",
"get",
"(",
"section",
",",
"\"internlinks\"",
")",
")",
"self",
".",
"config",
"[",
"\"internlinks\"",
"]",
".",
"append",
"(",
"pat",
")",
"self",
".",
"read_boolean_option",
"(",
"section",
",",
"\"checkextern\"",
")"
]
| 49.7 | 14.4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.