text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def search(self, fields=[], **kwargs):
'''taobao.products.search 搜索产品信息
两种方式搜索所有产品信息(二种至少传一种): 传入关键字q搜索 传入cid和props搜索 返回值支持:product_id,name,pic_path,cid,props,price,tsc 当用户指定了cid并且cid为垂直市场(3C电器城、鞋城)的类目id时,默认只返回小二确认的产品。如果用户没有指定cid,或cid为普通的类目,默认返回商家确认或小二确认的产品。如果用户自定了status字段,以指定的status类型为准'''
request = TOPRequest('taobao.products.search')
if not fields:
product = Product()
fields = product.fields
request['fields'] = fields
for k, v in kwargs.iteritems():
if k not in ('q', 'cid', 'props', 'status', 'page_no', 'page_size', 'vertical_market') and v==None: continue
request[k] = v
self.create(self.execute(request))
return self.products
|
[
"def",
"search",
"(",
"self",
",",
"fields",
"=",
"[",
"]",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"TOPRequest",
"(",
"'taobao.products.search'",
")",
"if",
"not",
"fields",
":",
"product",
"=",
"Product",
"(",
")",
"fields",
"=",
"product",
".",
"fields",
"request",
"[",
"'fields'",
"]",
"=",
"fields",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"(",
"'q'",
",",
"'cid'",
",",
"'props'",
",",
"'status'",
",",
"'page_no'",
",",
"'page_size'",
",",
"'vertical_market'",
")",
"and",
"v",
"==",
"None",
":",
"continue",
"request",
"[",
"k",
"]",
"=",
"v",
"self",
".",
"create",
"(",
"self",
".",
"execute",
"(",
"request",
")",
")",
"return",
"self",
".",
"products"
] | 53.142857 | 27.285714 |
def jstemplate(parser, token):
"""Templatetag to handle any of the Mustache-based templates.
Replaces ``[[[`` and ``]]]`` with ``{{{`` and ``}}}``,
``[[`` and ``]]`` with ``{{`` and ``}}`` and
``[%`` and ``%]`` with ``{%`` and ``%}`` to avoid conflicts
with Django's template engine when using any of the Mustache-based
templating libraries.
"""
nodelist = parser.parse(('endjstemplate',))
parser.delete_first_token()
return JSTemplateNode(nodelist)
|
[
"def",
"jstemplate",
"(",
"parser",
",",
"token",
")",
":",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"'endjstemplate'",
",",
")",
")",
"parser",
".",
"delete_first_token",
"(",
")",
"return",
"JSTemplateNode",
"(",
"nodelist",
")"
] | 40 | 13.833333 |
def get(self, limit=None, offset=None):
"""
:param offset: int of the offset to use
:param limit: int of max number of puzzles to return
:return: list of Transfer dict
"""
return self.connection.get('account/transfer/admin/array', limit=limit, offset=offset)
|
[
"def",
"get",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"None",
")",
":",
"return",
"self",
".",
"connection",
".",
"get",
"(",
"'account/transfer/admin/array'",
",",
"limit",
"=",
"limit",
",",
"offset",
"=",
"offset",
")"
] | 47.285714 | 15.857143 |
def merge(cls, first, second):
"""
Return an AttributeList that is the result of merging first with second.
"""
merged = AttributeList([], None)
assert (isinstance(first, AttributeList))
assert (isinstance(second, AttributeList))
merged._contents = first._contents[:]
merged._contents += second._contents[:]
return merged
|
[
"def",
"merge",
"(",
"cls",
",",
"first",
",",
"second",
")",
":",
"merged",
"=",
"AttributeList",
"(",
"[",
"]",
",",
"None",
")",
"assert",
"(",
"isinstance",
"(",
"first",
",",
"AttributeList",
")",
")",
"assert",
"(",
"isinstance",
"(",
"second",
",",
"AttributeList",
")",
")",
"merged",
".",
"_contents",
"=",
"first",
".",
"_contents",
"[",
":",
"]",
"merged",
".",
"_contents",
"+=",
"second",
".",
"_contents",
"[",
":",
"]",
"return",
"merged"
] | 32 | 15 |
def objects_get(self, bucket, key, projection='noAcl'):
"""Issues a request to retrieve information about an object.
Args:
bucket: the name of the bucket.
key: the key of the object within the bucket.
projection: the projection of the object to retrieve.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation.
"""
args = {}
if projection is not None:
args['projection'] = projection
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))
return datalab.utils.Http.request(url, args=args, credentials=self._credentials)
|
[
"def",
"objects_get",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"projection",
"=",
"'noAcl'",
")",
":",
"args",
"=",
"{",
"}",
"if",
"projection",
"is",
"not",
"None",
":",
"args",
"[",
"'projection'",
"]",
"=",
"projection",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_OBJECT_PATH",
"%",
"(",
"bucket",
",",
"Api",
".",
"_escape_key",
"(",
"key",
")",
")",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | 36.277778 | 20.222222 |
def main(req_port=None, res_port=None, use_security=False):
'''main of queue
:param req_port: port for clients
:param res_port: port for servers
'''
if req_port is None:
req_port = env.get_req_port()
if res_port is None:
res_port = env.get_res_port()
auth = None
try:
context = zmq.Context()
frontend_service = context.socket(zmq.XREP)
backend_service = context.socket(zmq.XREQ)
if use_security:
if not os.path.exists(env.get_server_public_key_dir()):
create_certificates(env.get_server_public_key_dir())
auth = Authenticator.instance(env.get_server_public_key_dir())
auth.set_server_key(
frontend_service, env.get_server_secret_key_path())
auth.set_client_key(backend_service, env.get_client_secret_key_path(),
env.get_server_public_key_path())
frontend_service.bind('tcp://*:{req_port}'.format(req_port=req_port))
backend_service.bind('tcp://*:{res_port}'.format(res_port=res_port))
zmq.device(zmq.QUEUE, frontend_service, backend_service)
except KeyboardInterrupt:
pass
finally:
frontend_service.close()
backend_service.close()
context.term()
if use_security and auth is not None:
auth.stop()
|
[
"def",
"main",
"(",
"req_port",
"=",
"None",
",",
"res_port",
"=",
"None",
",",
"use_security",
"=",
"False",
")",
":",
"if",
"req_port",
"is",
"None",
":",
"req_port",
"=",
"env",
".",
"get_req_port",
"(",
")",
"if",
"res_port",
"is",
"None",
":",
"res_port",
"=",
"env",
".",
"get_res_port",
"(",
")",
"auth",
"=",
"None",
"try",
":",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"frontend_service",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"XREP",
")",
"backend_service",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"XREQ",
")",
"if",
"use_security",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"env",
".",
"get_server_public_key_dir",
"(",
")",
")",
":",
"create_certificates",
"(",
"env",
".",
"get_server_public_key_dir",
"(",
")",
")",
"auth",
"=",
"Authenticator",
".",
"instance",
"(",
"env",
".",
"get_server_public_key_dir",
"(",
")",
")",
"auth",
".",
"set_server_key",
"(",
"frontend_service",
",",
"env",
".",
"get_server_secret_key_path",
"(",
")",
")",
"auth",
".",
"set_client_key",
"(",
"backend_service",
",",
"env",
".",
"get_client_secret_key_path",
"(",
")",
",",
"env",
".",
"get_server_public_key_path",
"(",
")",
")",
"frontend_service",
".",
"bind",
"(",
"'tcp://*:{req_port}'",
".",
"format",
"(",
"req_port",
"=",
"req_port",
")",
")",
"backend_service",
".",
"bind",
"(",
"'tcp://*:{res_port}'",
".",
"format",
"(",
"res_port",
"=",
"res_port",
")",
")",
"zmq",
".",
"device",
"(",
"zmq",
".",
"QUEUE",
",",
"frontend_service",
",",
"backend_service",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"finally",
":",
"frontend_service",
".",
"close",
"(",
")",
"backend_service",
".",
"close",
"(",
")",
"context",
".",
"term",
"(",
")",
"if",
"use_security",
"and",
"auth",
"is",
"not",
"None",
":",
"auth",
".",
"stop",
"(",
")"
] | 39.382353 | 19.735294 |
def gen_fields(field: Field) -> Generator[Field, None, None]:
"""
Starting with a Deform :class:`Field`, yield the field itself and any
children.
"""
yield field
for c in field.children:
for f in gen_fields(c):
yield f
|
[
"def",
"gen_fields",
"(",
"field",
":",
"Field",
")",
"->",
"Generator",
"[",
"Field",
",",
"None",
",",
"None",
"]",
":",
"yield",
"field",
"for",
"c",
"in",
"field",
".",
"children",
":",
"for",
"f",
"in",
"gen_fields",
"(",
"c",
")",
":",
"yield",
"f"
] | 28.222222 | 16.444444 |
def phone_numbers(self):
"""
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberList
"""
if self._phone_numbers is None:
self._phone_numbers = PhoneNumberList(self)
return self._phone_numbers
|
[
"def",
"phone_numbers",
"(",
"self",
")",
":",
"if",
"self",
".",
"_phone_numbers",
"is",
"None",
":",
"self",
".",
"_phone_numbers",
"=",
"PhoneNumberList",
"(",
"self",
")",
"return",
"self",
".",
"_phone_numbers"
] | 34.428571 | 9.285714 |
def check_cache(self, e_tag, match):
"""Checks the ETag and sends a cache match response if it matches."""
if e_tag != match:
return False
self.send_response(304)
self.send_header("ETag", e_tag)
self.send_header("Cache-Control",
"max-age={0}".format(self.server.max_age))
self.end_headers()
thread_local.size = 0
return True
|
[
"def",
"check_cache",
"(",
"self",
",",
"e_tag",
",",
"match",
")",
":",
"if",
"e_tag",
"!=",
"match",
":",
"return",
"False",
"self",
".",
"send_response",
"(",
"304",
")",
"self",
".",
"send_header",
"(",
"\"ETag\"",
",",
"e_tag",
")",
"self",
".",
"send_header",
"(",
"\"Cache-Control\"",
",",
"\"max-age={0}\"",
".",
"format",
"(",
"self",
".",
"server",
".",
"max_age",
")",
")",
"self",
".",
"end_headers",
"(",
")",
"thread_local",
".",
"size",
"=",
"0",
"return",
"True"
] | 37.727273 | 10.727273 |
def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._values.insert(key, self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified()
|
[
"def",
"insert",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_values",
".",
"insert",
"(",
"key",
",",
"self",
".",
"_type_checker",
".",
"CheckValue",
"(",
"value",
")",
")",
"if",
"not",
"self",
".",
"_message_listener",
".",
"dirty",
":",
"self",
".",
"_message_listener",
".",
"Modified",
"(",
")"
] | 50.6 | 7.6 |
async def request(self, request):
"""
Execute a STUN transaction and return the response.
"""
assert request.transaction_id not in self.transactions
if self.integrity_key:
self.__add_authentication(request)
transaction = stun.Transaction(request, self.server, self)
self.transactions[request.transaction_id] = transaction
try:
return await transaction.run()
finally:
del self.transactions[request.transaction_id]
|
[
"async",
"def",
"request",
"(",
"self",
",",
"request",
")",
":",
"assert",
"request",
".",
"transaction_id",
"not",
"in",
"self",
".",
"transactions",
"if",
"self",
".",
"integrity_key",
":",
"self",
".",
"__add_authentication",
"(",
"request",
")",
"transaction",
"=",
"stun",
".",
"Transaction",
"(",
"request",
",",
"self",
".",
"server",
",",
"self",
")",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]",
"=",
"transaction",
"try",
":",
"return",
"await",
"transaction",
".",
"run",
"(",
")",
"finally",
":",
"del",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]"
] | 33.866667 | 17.6 |
def list_user_page_views(self, user_id, end_time=None, start_time=None):
"""
List user page views.
Return the user's page view history in json format, similar to the
available CSV download. Pagination is used as described in API basics
section. Page views are returned in descending order, newest to oldest.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want page views."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want page views."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/users/{user_id}/page_views with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/page_views".format(**path), data=data, params=params, all_pages=True)
|
[
"def",
"list_user_page_views",
"(",
"self",
",",
"user_id",
",",
"end_time",
"=",
"None",
",",
"start_time",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - user_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"user_id\"",
"]",
"=",
"user_id",
"# OPTIONAL - start_time\r",
"\"\"\"The beginning of the time range from which you want page views.\"\"\"",
"if",
"start_time",
"is",
"not",
"None",
":",
"params",
"[",
"\"start_time\"",
"]",
"=",
"start_time",
"# OPTIONAL - end_time\r",
"\"\"\"The end of the time range from which you want page views.\"\"\"",
"if",
"end_time",
"is",
"not",
"None",
":",
"params",
"[",
"\"end_time\"",
"]",
"=",
"end_time",
"self",
".",
"logger",
".",
"debug",
"(",
"\"GET /api/v1/users/{user_id}/page_views with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"GET\"",
",",
"\"/api/v1/users/{user_id}/page_views\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"all_pages",
"=",
"True",
")"
] | 41.857143 | 24.25 |
def generate_overlapping_psds(opt, gwstrain, flen, delta_f, flow,
dyn_range_factor=1., precision=None):
"""Generate a set of overlapping PSDs to cover a stretch of data. This
allows one to analyse a long stretch of data with PSD measurements that
change with time.
Parameters
-----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
gwstrain : Strain object
The timeseries of raw data on which to estimate PSDs.
flen : int
The length in samples of the output PSDs.
delta_f : float
The frequency step of the output PSDs.
flow: float
The low frequncy cutoff to use when calculating the PSD.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
--------
psd_and_times : list of (start, end, PSD) tuples
This is a list of tuples containing one entry for each PSD. The first
and second entries (start, end) in each tuple represent the index
range of the gwstrain data that was used to estimate that PSD. The
third entry (psd) contains the PSD estimate between that interval.
"""
if not opt.psd_estimation:
psd = from_cli(opt, flen, delta_f, flow, strain=gwstrain,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times = [ (0, len(gwstrain), psd) ]
return psds_and_times
# Figure out the data length used for PSD generation
seg_stride = int(opt.psd_segment_stride * gwstrain.sample_rate)
seg_len = int(opt.psd_segment_length * gwstrain.sample_rate)
input_data_len = len(gwstrain)
if opt.psd_num_segments is None:
# FIXME: Should we make --psd-num-segments mandatory?
# err_msg = "You must supply --num-segments."
# raise ValueError(err_msg)
num_segments = int(input_data_len // seg_stride) - 1
else:
num_segments = int(opt.psd_num_segments)
psd_data_len = (num_segments - 1) * seg_stride + seg_len
# How many unique PSD measurements is this?
psds_and_times = []
if input_data_len < psd_data_len:
err_msg = "Input data length must be longer than data length needed "
err_msg += "to estimate a PSD. You specified that a PSD should be "
err_msg += "estimated with %d seconds. " %(psd_data_len)
err_msg += "Input data length is %d seconds. " %(input_data_len)
raise ValueError(err_msg)
elif input_data_len == psd_data_len:
num_psd_measurements = 1
psd_stride = 0
else:
num_psd_measurements = int(2 * (input_data_len-1) / psd_data_len)
psd_stride = int((input_data_len - psd_data_len) / num_psd_measurements)
for idx in range(num_psd_measurements):
if idx == (num_psd_measurements - 1):
start_idx = input_data_len - psd_data_len
end_idx = input_data_len
else:
start_idx = psd_stride * idx
end_idx = psd_data_len + psd_stride * idx
strain_part = gwstrain[start_idx:end_idx]
psd = from_cli(opt, flen, delta_f, flow, strain=strain_part,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times.append( (start_idx, end_idx, psd) )
return psds_and_times
|
[
"def",
"generate_overlapping_psds",
"(",
"opt",
",",
"gwstrain",
",",
"flen",
",",
"delta_f",
",",
"flow",
",",
"dyn_range_factor",
"=",
"1.",
",",
"precision",
"=",
"None",
")",
":",
"if",
"not",
"opt",
".",
"psd_estimation",
":",
"psd",
"=",
"from_cli",
"(",
"opt",
",",
"flen",
",",
"delta_f",
",",
"flow",
",",
"strain",
"=",
"gwstrain",
",",
"dyn_range_factor",
"=",
"dyn_range_factor",
",",
"precision",
"=",
"precision",
")",
"psds_and_times",
"=",
"[",
"(",
"0",
",",
"len",
"(",
"gwstrain",
")",
",",
"psd",
")",
"]",
"return",
"psds_and_times",
"# Figure out the data length used for PSD generation",
"seg_stride",
"=",
"int",
"(",
"opt",
".",
"psd_segment_stride",
"*",
"gwstrain",
".",
"sample_rate",
")",
"seg_len",
"=",
"int",
"(",
"opt",
".",
"psd_segment_length",
"*",
"gwstrain",
".",
"sample_rate",
")",
"input_data_len",
"=",
"len",
"(",
"gwstrain",
")",
"if",
"opt",
".",
"psd_num_segments",
"is",
"None",
":",
"# FIXME: Should we make --psd-num-segments mandatory?",
"# err_msg = \"You must supply --num-segments.\"",
"# raise ValueError(err_msg)",
"num_segments",
"=",
"int",
"(",
"input_data_len",
"//",
"seg_stride",
")",
"-",
"1",
"else",
":",
"num_segments",
"=",
"int",
"(",
"opt",
".",
"psd_num_segments",
")",
"psd_data_len",
"=",
"(",
"num_segments",
"-",
"1",
")",
"*",
"seg_stride",
"+",
"seg_len",
"# How many unique PSD measurements is this?",
"psds_and_times",
"=",
"[",
"]",
"if",
"input_data_len",
"<",
"psd_data_len",
":",
"err_msg",
"=",
"\"Input data length must be longer than data length needed \"",
"err_msg",
"+=",
"\"to estimate a PSD. You specified that a PSD should be \"",
"err_msg",
"+=",
"\"estimated with %d seconds. \"",
"%",
"(",
"psd_data_len",
")",
"err_msg",
"+=",
"\"Input data length is %d seconds. \"",
"%",
"(",
"input_data_len",
")",
"raise",
"ValueError",
"(",
"err_msg",
")",
"elif",
"input_data_len",
"==",
"psd_data_len",
":",
"num_psd_measurements",
"=",
"1",
"psd_stride",
"=",
"0",
"else",
":",
"num_psd_measurements",
"=",
"int",
"(",
"2",
"*",
"(",
"input_data_len",
"-",
"1",
")",
"/",
"psd_data_len",
")",
"psd_stride",
"=",
"int",
"(",
"(",
"input_data_len",
"-",
"psd_data_len",
")",
"/",
"num_psd_measurements",
")",
"for",
"idx",
"in",
"range",
"(",
"num_psd_measurements",
")",
":",
"if",
"idx",
"==",
"(",
"num_psd_measurements",
"-",
"1",
")",
":",
"start_idx",
"=",
"input_data_len",
"-",
"psd_data_len",
"end_idx",
"=",
"input_data_len",
"else",
":",
"start_idx",
"=",
"psd_stride",
"*",
"idx",
"end_idx",
"=",
"psd_data_len",
"+",
"psd_stride",
"*",
"idx",
"strain_part",
"=",
"gwstrain",
"[",
"start_idx",
":",
"end_idx",
"]",
"psd",
"=",
"from_cli",
"(",
"opt",
",",
"flen",
",",
"delta_f",
",",
"flow",
",",
"strain",
"=",
"strain_part",
",",
"dyn_range_factor",
"=",
"dyn_range_factor",
",",
"precision",
"=",
"precision",
")",
"psds_and_times",
".",
"append",
"(",
"(",
"start_idx",
",",
"end_idx",
",",
"psd",
")",
")",
"return",
"psds_and_times"
] | 45.977011 | 22.425287 |
def v_depth(d, depth):
"""Iterate values on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
"""
if depth == 0:
yield d
else:
for node in DictTree.v(d):
for node1 in DictTree.v_depth(node, depth-1):
yield node1
|
[
"def",
"v_depth",
"(",
"d",
",",
"depth",
")",
":",
"if",
"depth",
"==",
"0",
":",
"yield",
"d",
"else",
":",
"for",
"node",
"in",
"DictTree",
".",
"v",
"(",
"d",
")",
":",
"for",
"node1",
"in",
"DictTree",
".",
"v_depth",
"(",
"node",
",",
"depth",
"-",
"1",
")",
":",
"yield",
"node1"
] | 34.545455 | 14.090909 |
def create_report(self, report_type, account_id, term_id=None, params={}):
"""
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
"""
if term_id is not None:
params["enrollment_term_id"] = term_id
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
body = {"parameters": params}
data = self._post_resource(url, body)
data["account_id"] = account_id
return Report(data=data)
|
[
"def",
"create_report",
"(",
"self",
",",
"report_type",
",",
"account_id",
",",
"term_id",
"=",
"None",
",",
"params",
"=",
"{",
"}",
")",
":",
"if",
"term_id",
"is",
"not",
"None",
":",
"params",
"[",
"\"enrollment_term_id\"",
"]",
"=",
"term_id",
"url",
"=",
"ACCOUNTS_API",
".",
"format",
"(",
"account_id",
")",
"+",
"\"/reports/{}\"",
".",
"format",
"(",
"report_type",
")",
"body",
"=",
"{",
"\"parameters\"",
":",
"params",
"}",
"data",
"=",
"self",
".",
"_post_resource",
"(",
"url",
",",
"body",
")",
"data",
"[",
"\"account_id\"",
"]",
"=",
"account_id",
"return",
"Report",
"(",
"data",
"=",
"data",
")"
] | 36.375 | 19.625 |
def calculate_concordance_probability(graph: BELGraph,
key: str,
cutoff: Optional[float] = None,
permutations: Optional[int] = None,
percentage: Optional[float] = None,
use_ambiguous: bool = False,
permute_type: str = 'shuffle_node_data',
) -> Tuple[float, List[float], float]:
"""Calculates a graph's concordance as well as its statistical probability.
:param graph: A BEL graph
:param str key: The node data dictionary key storing the logFC
:param float cutoff: The optional logFC cutoff for significance
:param int permutations: The number of random permutations to test. Defaults to 500
:param float percentage: The percentage of the graph's edges to maintain. Defaults to 0.9
:param bool use_ambiguous: Compare to ambiguous edges as well
:returns: A triple of the concordance score, the null distribution, and the p-value.
"""
if permute_type == 'random_by_edges':
permute_func = partial(random_by_edges, percentage=percentage)
elif permute_type == 'shuffle_node_data':
permute_func = partial(shuffle_node_data, key=key, percentage=percentage)
elif permute_type == 'shuffle_relations':
permute_func = partial(shuffle_relations, percentage=percentage)
else:
raise ValueError('Invalid permute_type: {}'.format(permute_type))
graph: BELGraph = graph.copy()
collapse_to_genes(graph)
collapse_all_variants(graph)
score = calculate_concordance(graph, key, cutoff=cutoff)
distribution = []
for _ in range(permutations or 500):
permuted_graph = permute_func(graph)
permuted_graph_scores = calculate_concordance(permuted_graph, key, cutoff=cutoff, use_ambiguous=use_ambiguous)
distribution.append(permuted_graph_scores)
return score, distribution, one_sided(score, distribution)
|
[
"def",
"calculate_concordance_probability",
"(",
"graph",
":",
"BELGraph",
",",
"key",
":",
"str",
",",
"cutoff",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
"permutations",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"percentage",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
"use_ambiguous",
":",
"bool",
"=",
"False",
",",
"permute_type",
":",
"str",
"=",
"'shuffle_node_data'",
",",
")",
"->",
"Tuple",
"[",
"float",
",",
"List",
"[",
"float",
"]",
",",
"float",
"]",
":",
"if",
"permute_type",
"==",
"'random_by_edges'",
":",
"permute_func",
"=",
"partial",
"(",
"random_by_edges",
",",
"percentage",
"=",
"percentage",
")",
"elif",
"permute_type",
"==",
"'shuffle_node_data'",
":",
"permute_func",
"=",
"partial",
"(",
"shuffle_node_data",
",",
"key",
"=",
"key",
",",
"percentage",
"=",
"percentage",
")",
"elif",
"permute_type",
"==",
"'shuffle_relations'",
":",
"permute_func",
"=",
"partial",
"(",
"shuffle_relations",
",",
"percentage",
"=",
"percentage",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid permute_type: {}'",
".",
"format",
"(",
"permute_type",
")",
")",
"graph",
":",
"BELGraph",
"=",
"graph",
".",
"copy",
"(",
")",
"collapse_to_genes",
"(",
"graph",
")",
"collapse_all_variants",
"(",
"graph",
")",
"score",
"=",
"calculate_concordance",
"(",
"graph",
",",
"key",
",",
"cutoff",
"=",
"cutoff",
")",
"distribution",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"permutations",
"or",
"500",
")",
":",
"permuted_graph",
"=",
"permute_func",
"(",
"graph",
")",
"permuted_graph_scores",
"=",
"calculate_concordance",
"(",
"permuted_graph",
",",
"key",
",",
"cutoff",
"=",
"cutoff",
",",
"use_ambiguous",
"=",
"use_ambiguous",
")",
"distribution",
".",
"append",
"(",
"permuted_graph_scores",
")",
"return",
"score",
",",
"distribution",
",",
"one_sided",
"(",
"score",
",",
"distribution",
")"
] | 47.488372 | 26.27907 |
def create(self, attributes):
"""
Create a new space
:param attributes: Refer to API. Pass in argument as dictionary
:type attributes: dict
:return: Details of newly created space
:rtype: dict
"""
if not isinstance(attributes, dict):
raise TypeError('Dictionary of values expected')
attributes = json.dumps(attributes)
return self.transport.POST(url='/space/', body=attributes, type='application/json')
|
[
"def",
"create",
"(",
"self",
",",
"attributes",
")",
":",
"if",
"not",
"isinstance",
"(",
"attributes",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'Dictionary of values expected'",
")",
"attributes",
"=",
"json",
".",
"dumps",
"(",
"attributes",
")",
"return",
"self",
".",
"transport",
".",
"POST",
"(",
"url",
"=",
"'/space/'",
",",
"body",
"=",
"attributes",
",",
"type",
"=",
"'application/json'",
")"
] | 37.769231 | 15.615385 |
def defaults(f, self, *args, **kwargs):
"""
For ``PARAMETERS`` keys, replace None ``kwargs`` with ``self`` attr values.
Should be applied on the top of any decorator stack so other decorators see
the "right" kwargs.
Will also apply transformations found in ``TRANSFORMS``.
"""
for name, data in PARAMETERS.iteritems():
kwargs[name] = kwargs.get(name) or getattr(self, name)
if 'transform' in data:
kwargs[name] = data['transform'](kwargs[name])
return f(self, *args, **kwargs)
|
[
"def",
"defaults",
"(",
"f",
",",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"name",
",",
"data",
"in",
"PARAMETERS",
".",
"iteritems",
"(",
")",
":",
"kwargs",
"[",
"name",
"]",
"=",
"kwargs",
".",
"get",
"(",
"name",
")",
"or",
"getattr",
"(",
"self",
",",
"name",
")",
"if",
"'transform'",
"in",
"data",
":",
"kwargs",
"[",
"name",
"]",
"=",
"data",
"[",
"'transform'",
"]",
"(",
"kwargs",
"[",
"name",
"]",
")",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 37.5 | 18.214286 |
async def build(self, building: UnitTypeId, near: Union[Point2, Point3], max_distance: int=20, unit: Optional[Unit]=None, random_alternative: bool=True, placement_step: int=2):
"""Build a building."""
if isinstance(near, Unit):
near = near.position.to2
elif near is not None:
near = near.to2
else:
return
p = await self.find_placement(building, near.rounded, max_distance, random_alternative, placement_step)
if p is None:
return ActionResult.CantFindPlacementLocation
unit = unit or self.select_build_worker(p)
if unit is None or not self.can_afford(building):
return ActionResult.Error
return await self.do(unit.build(building, p))
|
[
"async",
"def",
"build",
"(",
"self",
",",
"building",
":",
"UnitTypeId",
",",
"near",
":",
"Union",
"[",
"Point2",
",",
"Point3",
"]",
",",
"max_distance",
":",
"int",
"=",
"20",
",",
"unit",
":",
"Optional",
"[",
"Unit",
"]",
"=",
"None",
",",
"random_alternative",
":",
"bool",
"=",
"True",
",",
"placement_step",
":",
"int",
"=",
"2",
")",
":",
"if",
"isinstance",
"(",
"near",
",",
"Unit",
")",
":",
"near",
"=",
"near",
".",
"position",
".",
"to2",
"elif",
"near",
"is",
"not",
"None",
":",
"near",
"=",
"near",
".",
"to2",
"else",
":",
"return",
"p",
"=",
"await",
"self",
".",
"find_placement",
"(",
"building",
",",
"near",
".",
"rounded",
",",
"max_distance",
",",
"random_alternative",
",",
"placement_step",
")",
"if",
"p",
"is",
"None",
":",
"return",
"ActionResult",
".",
"CantFindPlacementLocation",
"unit",
"=",
"unit",
"or",
"self",
".",
"select_build_worker",
"(",
"p",
")",
"if",
"unit",
"is",
"None",
"or",
"not",
"self",
".",
"can_afford",
"(",
"building",
")",
":",
"return",
"ActionResult",
".",
"Error",
"return",
"await",
"self",
".",
"do",
"(",
"unit",
".",
"build",
"(",
"building",
",",
"p",
")",
")"
] | 41.722222 | 27.111111 |
def get_random(self):
"""
Returns a random statement from the database
"""
Statement = self.get_model('statement')
statement = Statement.objects.order_by('?').first()
if statement is None:
raise self.EmptyDatabaseException()
return statement
|
[
"def",
"get_random",
"(",
"self",
")",
":",
"Statement",
"=",
"self",
".",
"get_model",
"(",
"'statement'",
")",
"statement",
"=",
"Statement",
".",
"objects",
".",
"order_by",
"(",
"'?'",
")",
".",
"first",
"(",
")",
"if",
"statement",
"is",
"None",
":",
"raise",
"self",
".",
"EmptyDatabaseException",
"(",
")",
"return",
"statement"
] | 25.083333 | 17.583333 |
def update_parent_directory_number(self, parent_dir_num):
# type: (int) -> None
'''
A method to update the parent directory number for this Path Table
Record from the directory record.
Parameters:
parent_dir_num - The new parent directory number to assign to this PTR.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
self.parent_directory_num = parent_dir_num
|
[
"def",
"update_parent_directory_number",
"(",
"self",
",",
"parent_dir_num",
")",
":",
"# type: (int) -> None",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Path Table Record not yet initialized'",
")",
"self",
".",
"parent_directory_num",
"=",
"parent_dir_num"
] | 38.142857 | 24.428571 |
def __neighbor_indexes_points(self, optic_object):
"""!
@brief Return neighbors of the specified object in case of sequence of points.
@param[in] optic_object (optics_descriptor): Object for which neighbors should be returned in line with connectivity radius.
@return (list) List of indexes of neighbors in line the connectivity radius.
"""
kdnodes = self.__kdtree.find_nearest_dist_nodes(self.__sample_pointer[optic_object.index_object], self.__eps)
return [[node_tuple[1].payload, math.sqrt(node_tuple[0])] for node_tuple in kdnodes if
node_tuple[1].payload != optic_object.index_object]
|
[
"def",
"__neighbor_indexes_points",
"(",
"self",
",",
"optic_object",
")",
":",
"kdnodes",
"=",
"self",
".",
"__kdtree",
".",
"find_nearest_dist_nodes",
"(",
"self",
".",
"__sample_pointer",
"[",
"optic_object",
".",
"index_object",
"]",
",",
"self",
".",
"__eps",
")",
"return",
"[",
"[",
"node_tuple",
"[",
"1",
"]",
".",
"payload",
",",
"math",
".",
"sqrt",
"(",
"node_tuple",
"[",
"0",
"]",
")",
"]",
"for",
"node_tuple",
"in",
"kdnodes",
"if",
"node_tuple",
"[",
"1",
"]",
".",
"payload",
"!=",
"optic_object",
".",
"index_object",
"]"
] | 55.333333 | 39.416667 |
def inherit_flags(toolset, base, prohibited_properties = []):
"""Brings all flag definitions from the 'base' toolset into the 'toolset'
toolset. Flag definitions whose conditions make use of properties in
'prohibited-properties' are ignored. Don't confuse property and feature, for
example <debug-symbols>on and <debug-symbols>off, so blocking one of them does
not block the other one.
The flag conditions are not altered at all, so if a condition includes a name,
or version of a base toolset, it won't ever match the inheriting toolset. When
such flag settings must be inherited, define a rule in base toolset module and
call it as needed."""
assert isinstance(toolset, basestring)
assert isinstance(base, basestring)
assert is_iterable_typed(prohibited_properties, basestring)
for f in __module_flags.get(base, []):
if not f.condition or b2.util.set.difference(f.condition, prohibited_properties):
match = __re_first_group.match(f.rule)
rule_ = None
if match:
rule_ = match.group(1)
new_rule_or_module = ''
if rule_:
new_rule_or_module = toolset + '.' + rule_
else:
new_rule_or_module = toolset
__add_flag (new_rule_or_module, f.variable_name, f.condition, f.values)
|
[
"def",
"inherit_flags",
"(",
"toolset",
",",
"base",
",",
"prohibited_properties",
"=",
"[",
"]",
")",
":",
"assert",
"isinstance",
"(",
"toolset",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"base",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"prohibited_properties",
",",
"basestring",
")",
"for",
"f",
"in",
"__module_flags",
".",
"get",
"(",
"base",
",",
"[",
"]",
")",
":",
"if",
"not",
"f",
".",
"condition",
"or",
"b2",
".",
"util",
".",
"set",
".",
"difference",
"(",
"f",
".",
"condition",
",",
"prohibited_properties",
")",
":",
"match",
"=",
"__re_first_group",
".",
"match",
"(",
"f",
".",
"rule",
")",
"rule_",
"=",
"None",
"if",
"match",
":",
"rule_",
"=",
"match",
".",
"group",
"(",
"1",
")",
"new_rule_or_module",
"=",
"''",
"if",
"rule_",
":",
"new_rule_or_module",
"=",
"toolset",
"+",
"'.'",
"+",
"rule_",
"else",
":",
"new_rule_or_module",
"=",
"toolset",
"__add_flag",
"(",
"new_rule_or_module",
",",
"f",
".",
"variable_name",
",",
"f",
".",
"condition",
",",
"f",
".",
"values",
")"
] | 44.566667 | 23.633333 |
def option(name, help=""):
"""Decorator that add an option to the wrapped command or function."""
def decorator(func):
options = getattr(func, "options", [])
_option = Param(name, help)
# Insert at the beginning so the apparent order is preserved
options.insert(0, _option)
func.options = options
return func
return decorator
|
[
"def",
"option",
"(",
"name",
",",
"help",
"=",
"\"\"",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"options",
"=",
"getattr",
"(",
"func",
",",
"\"options\"",
",",
"[",
"]",
")",
"_option",
"=",
"Param",
"(",
"name",
",",
"help",
")",
"# Insert at the beginning so the apparent order is preserved",
"options",
".",
"insert",
"(",
"0",
",",
"_option",
")",
"func",
".",
"options",
"=",
"options",
"return",
"func",
"return",
"decorator"
] | 34.181818 | 15.090909 |
def _init_notes(self):
"""Set up the UserNotes page with the initial JSON schema."""
self.cached_json = {
'ver': self.schema,
'users': {},
'constants': {
'users': [x.name for x in self.subreddit.moderator()],
'warnings': Note.warnings
}
}
self.set_json('Initializing JSON via puni', True)
|
[
"def",
"_init_notes",
"(",
"self",
")",
":",
"self",
".",
"cached_json",
"=",
"{",
"'ver'",
":",
"self",
".",
"schema",
",",
"'users'",
":",
"{",
"}",
",",
"'constants'",
":",
"{",
"'users'",
":",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"self",
".",
"subreddit",
".",
"moderator",
"(",
")",
"]",
",",
"'warnings'",
":",
"Note",
".",
"warnings",
"}",
"}",
"self",
".",
"set_json",
"(",
"'Initializing JSON via puni'",
",",
"True",
")"
] | 32.5 | 17.916667 |
def _parse_from_table(html_chunk, what):
"""
Go thru table data in `html_chunk` and try to locate content of the
neighbor cell of the cell containing `what`.
Returns:
str: Table data or None.
"""
ean_tag = html_chunk.find("tr", fn=must_contain("th", what, "td"))
if not ean_tag:
return None
return get_first_content(ean_tag[0].find("td"))
|
[
"def",
"_parse_from_table",
"(",
"html_chunk",
",",
"what",
")",
":",
"ean_tag",
"=",
"html_chunk",
".",
"find",
"(",
"\"tr\"",
",",
"fn",
"=",
"must_contain",
"(",
"\"th\"",
",",
"what",
",",
"\"td\"",
")",
")",
"if",
"not",
"ean_tag",
":",
"return",
"None",
"return",
"get_first_content",
"(",
"ean_tag",
"[",
"0",
"]",
".",
"find",
"(",
"\"td\"",
")",
")"
] | 26.857143 | 19.857143 |
def download_kitchen(split=False):
"""Download structured grid of kitchen with velocity field. Use the
``split`` argument to extract all of the furniture in the kitchen.
"""
mesh = _download_and_read('kitchen.vtk')
if not split:
return mesh
extents = {
'door' : (27, 27, 14, 18, 0, 11),
'window1' : (0, 0, 9, 18, 6, 12),
'window2' : (5, 12, 23, 23, 6, 12),
'klower1' : (17, 17, 0, 11, 0, 6),
'klower2' : (19, 19, 0, 11, 0, 6),
'klower3' : (17, 19, 0, 0, 0, 6),
'klower4' : (17, 19, 11, 11, 0, 6),
'klower5' : (17, 19, 0, 11, 0, 0),
'klower6' : (17, 19, 0, 7, 6, 6),
'klower7' : (17, 19, 9, 11, 6, 6),
'hood1' : (17, 17, 0, 11, 11, 16),
'hood2' : (19, 19, 0, 11, 11, 16),
'hood3' : (17, 19, 0, 0, 11, 16),
'hood4' : (17, 19, 11, 11, 11, 16),
'hood5' : (17, 19, 0, 11, 16, 16),
'cookingPlate' : (17, 19, 7, 9, 6, 6),
'furniture' : (17, 19, 7, 9, 11, 11),
}
kitchen = vtki.MultiBlock()
for key, extent in extents.items():
alg = vtk.vtkStructuredGridGeometryFilter()
alg.SetInputDataObject(mesh)
alg.SetExtent(extent)
alg.Update()
result = vtki.filters._get_output(alg)
kitchen[key] = result
return kitchen
|
[
"def",
"download_kitchen",
"(",
"split",
"=",
"False",
")",
":",
"mesh",
"=",
"_download_and_read",
"(",
"'kitchen.vtk'",
")",
"if",
"not",
"split",
":",
"return",
"mesh",
"extents",
"=",
"{",
"'door'",
":",
"(",
"27",
",",
"27",
",",
"14",
",",
"18",
",",
"0",
",",
"11",
")",
",",
"'window1'",
":",
"(",
"0",
",",
"0",
",",
"9",
",",
"18",
",",
"6",
",",
"12",
")",
",",
"'window2'",
":",
"(",
"5",
",",
"12",
",",
"23",
",",
"23",
",",
"6",
",",
"12",
")",
",",
"'klower1'",
":",
"(",
"17",
",",
"17",
",",
"0",
",",
"11",
",",
"0",
",",
"6",
")",
",",
"'klower2'",
":",
"(",
"19",
",",
"19",
",",
"0",
",",
"11",
",",
"0",
",",
"6",
")",
",",
"'klower3'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"0",
",",
"0",
",",
"6",
")",
",",
"'klower4'",
":",
"(",
"17",
",",
"19",
",",
"11",
",",
"11",
",",
"0",
",",
"6",
")",
",",
"'klower5'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"11",
",",
"0",
",",
"0",
")",
",",
"'klower6'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"7",
",",
"6",
",",
"6",
")",
",",
"'klower7'",
":",
"(",
"17",
",",
"19",
",",
"9",
",",
"11",
",",
"6",
",",
"6",
")",
",",
"'hood1'",
":",
"(",
"17",
",",
"17",
",",
"0",
",",
"11",
",",
"11",
",",
"16",
")",
",",
"'hood2'",
":",
"(",
"19",
",",
"19",
",",
"0",
",",
"11",
",",
"11",
",",
"16",
")",
",",
"'hood3'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"0",
",",
"11",
",",
"16",
")",
",",
"'hood4'",
":",
"(",
"17",
",",
"19",
",",
"11",
",",
"11",
",",
"11",
",",
"16",
")",
",",
"'hood5'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"11",
",",
"16",
",",
"16",
")",
",",
"'cookingPlate'",
":",
"(",
"17",
",",
"19",
",",
"7",
",",
"9",
",",
"6",
",",
"6",
")",
",",
"'furniture'",
":",
"(",
"17",
",",
"19",
",",
"7",
",",
"9",
",",
"11",
",",
"11",
")",
",",
"}",
"kitchen",
"=",
"vtki",
".",
"MultiBlock",
"(",
")",
"for",
"key",
",",
"extent",
"in",
"extents",
".",
"items",
"(",
")",
":",
"alg",
"=",
"vtk",
".",
"vtkStructuredGridGeometryFilter",
"(",
")",
"alg",
".",
"SetInputDataObject",
"(",
"mesh",
")",
"alg",
".",
"SetExtent",
"(",
"extent",
")",
"alg",
".",
"Update",
"(",
")",
"result",
"=",
"vtki",
".",
"filters",
".",
"_get_output",
"(",
"alg",
")",
"kitchen",
"[",
"key",
"]",
"=",
"result",
"return",
"kitchen"
] | 37.171429 | 7.971429 |
def __enforce_only_strings_dict(dictionary):
'''
Returns a dictionary that has string keys and values.
'''
ret = {}
for key, value in iteritems(dictionary):
ret[six.text_type(key)] = six.text_type(value)
return ret
|
[
"def",
"__enforce_only_strings_dict",
"(",
"dictionary",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"dictionary",
")",
":",
"ret",
"[",
"six",
".",
"text_type",
"(",
"key",
")",
"]",
"=",
"six",
".",
"text_type",
"(",
"value",
")",
"return",
"ret"
] | 23.9 | 23.9 |
def get_assessment_section_mdata():
"""Return default mdata map for AssessmentSection"""
return {
'assessment_taken': {
'element_label': {
'text': 'assessment taken',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
}
|
[
"def",
"get_assessment_section_mdata",
"(",
")",
":",
"return",
"{",
"'assessment_taken'",
":",
"{",
"'element_label'",
":",
"{",
"'text'",
":",
"'assessment taken'",
",",
"'languageTypeId'",
":",
"str",
"(",
"DEFAULT_LANGUAGE_TYPE",
")",
",",
"'scriptTypeId'",
":",
"str",
"(",
"DEFAULT_SCRIPT_TYPE",
")",
",",
"'formatTypeId'",
":",
"str",
"(",
"DEFAULT_FORMAT_TYPE",
")",
",",
"}",
",",
"'instructions'",
":",
"{",
"'text'",
":",
"'accepts an osid.id.Id object'",
",",
"'languageTypeId'",
":",
"str",
"(",
"DEFAULT_LANGUAGE_TYPE",
")",
",",
"'scriptTypeId'",
":",
"str",
"(",
"DEFAULT_SCRIPT_TYPE",
")",
",",
"'formatTypeId'",
":",
"str",
"(",
"DEFAULT_FORMAT_TYPE",
")",
",",
"}",
",",
"'required'",
":",
"False",
",",
"'read_only'",
":",
"False",
",",
"'linked'",
":",
"False",
",",
"'array'",
":",
"False",
",",
"'default_id_values'",
":",
"[",
"''",
"]",
",",
"'syntax'",
":",
"'ID'",
",",
"'id_set'",
":",
"[",
"]",
",",
"}",
",",
"}"
] | 35.52 | 15.36 |
def setattrs(obj, **kwargs):
"""
>>> data = SimpleNamespace()
>>> setattrs(data, a=1, b=2) # doctest:+ELLIPSIS
namespace(a=1, b=2)
"""
for key, value in kwargs.items():
setattr(obj, key, value)
return obj
|
[
"def",
"setattrs",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"obj",
",",
"key",
",",
"value",
")",
"return",
"obj"
] | 25.888889 | 9.666667 |
def B3(formula):
"""
Rewrite formula eval result into Boolean3
:param formula:
:return: Boolean3
"""
if isinstance(formula, true) or formula is True or formula == Boolean3.Top.name or formula == Boolean3.Top.value:
return Boolean3.Top
if isinstance(formula, false) or formula is False or formula == Boolean3.Bottom.name or formula == Boolean3.Bottom.value:
return Boolean3.Bottom
else:
return Boolean3.Unknown
|
[
"def",
"B3",
"(",
"formula",
")",
":",
"if",
"isinstance",
"(",
"formula",
",",
"true",
")",
"or",
"formula",
"is",
"True",
"or",
"formula",
"==",
"Boolean3",
".",
"Top",
".",
"name",
"or",
"formula",
"==",
"Boolean3",
".",
"Top",
".",
"value",
":",
"return",
"Boolean3",
".",
"Top",
"if",
"isinstance",
"(",
"formula",
",",
"false",
")",
"or",
"formula",
"is",
"False",
"or",
"formula",
"==",
"Boolean3",
".",
"Bottom",
".",
"name",
"or",
"formula",
"==",
"Boolean3",
".",
"Bottom",
".",
"value",
":",
"return",
"Boolean3",
".",
"Bottom",
"else",
":",
"return",
"Boolean3",
".",
"Unknown"
] | 37.833333 | 24.5 |
def add_callback(self, method):
"""
Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor.
"""
from_actor = get_current()
if from_actor is not None:
callback = (method, from_actor.channel, from_actor.url)
with self.__condition:
if self.__state is not FINISHED:
self.__callbacks.append(callback)
return
# Invoke the callback directly
# msg = TellRequest(TELL, method, [self], from_actor.url)
msg = {TYPE: TELL, METHOD: method, PARAMS: ([self], {}),
TO: from_actor.url}
from_actor.channel.send(msg)
else:
raise FutureError("add_callback only works when called " +
"from inside an actor")
|
[
"def",
"add_callback",
"(",
"self",
",",
"method",
")",
":",
"from_actor",
"=",
"get_current",
"(",
")",
"if",
"from_actor",
"is",
"not",
"None",
":",
"callback",
"=",
"(",
"method",
",",
"from_actor",
".",
"channel",
",",
"from_actor",
".",
"url",
")",
"with",
"self",
".",
"__condition",
":",
"if",
"self",
".",
"__state",
"is",
"not",
"FINISHED",
":",
"self",
".",
"__callbacks",
".",
"append",
"(",
"callback",
")",
"return",
"# Invoke the callback directly",
"# msg = TellRequest(TELL, method, [self], from_actor.url)",
"msg",
"=",
"{",
"TYPE",
":",
"TELL",
",",
"METHOD",
":",
"method",
",",
"PARAMS",
":",
"(",
"[",
"self",
"]",
",",
"{",
"}",
")",
",",
"TO",
":",
"from_actor",
".",
"url",
"}",
"from_actor",
".",
"channel",
".",
"send",
"(",
"msg",
")",
"else",
":",
"raise",
"FutureError",
"(",
"\"add_callback only works when called \"",
"+",
"\"from inside an actor\"",
")"
] | 44.793103 | 18.241379 |
def query_entity_with_permission(self, permission, user=None, Model=Entity):
"""Filter a query on an :class:`Entity` or on of its subclasses.
Usage::
read_q = security.query_entity_with_permission(READ, Model=MyModel)
MyModel.query.filter(read_q)
It should always be placed before any `.join()` happens in the query; else
sqlalchemy might join to the "wrong" entity table when joining to other
:class:`Entity`.
:param user: user to filter for. Default: `current_user`.
:param permission: required :class:`Permission`
:param Model: An :class:`Entity` based class. Useful when there is more than
one Entity based object in query, or if an alias should be used.
:returns: a `sqlalchemy.sql.exists()` expression.
"""
assert isinstance(permission, Permission)
assert issubclass(Model, Entity)
RA = sa.orm.aliased(RoleAssignment)
PA = sa.orm.aliased(PermissionAssignment)
# id column from entity table. Model.id would refer to 'model' table.
# this allows the DB to use indexes / foreign key constraints.
id_column = sa.inspect(Model).primary_key[0]
creator = Model.creator
owner = Model.owner
if not self.running:
return sa.sql.exists([1])
if user is None:
user = unwrap(current_user)
# build role CTE
principal_filter = RA.anonymous == True
if not user.is_anonymous:
principal_filter |= RA.user == user
if user.groups:
principal_filter |= RA.group_id.in_([g.id for g in user.groups])
RA = sa.sql.select([RA], principal_filter).cte()
permission_exists = sa.sql.exists([1]).where(
sa.sql.and_(
PA.permission == permission,
PA.object_id == id_column,
(RA.c.role == PA.role) | (PA.role == AnonymousRole),
(RA.c.object_id == PA.object_id) | (RA.c.object_id == None),
)
)
# is_admin: self-explanatory. It search for local or global admin
# role, but PermissionAssignment is not involved, thus it can match on
# entities that don't have *any permission assignment*, whereas previous
# expressions cannot.
is_admin = sa.sql.exists([1]).where(
sa.sql.and_(
RA.c.role == Admin,
(RA.c.object_id == id_column) | (RA.c.object_id == None),
principal_filter,
)
)
filter_expr = permission_exists | is_admin
if user and not user.is_anonymous:
is_owner_or_creator = sa.sql.exists([1]).where(
sa.sql.and_(
PA.permission == permission,
PA.object_id == id_column,
sa.sql.or_(
(PA.role == Owner) & (owner == user),
(PA.role == Creator) & (creator == user),
),
)
)
filter_expr |= is_owner_or_creator
return filter_expr
|
[
"def",
"query_entity_with_permission",
"(",
"self",
",",
"permission",
",",
"user",
"=",
"None",
",",
"Model",
"=",
"Entity",
")",
":",
"assert",
"isinstance",
"(",
"permission",
",",
"Permission",
")",
"assert",
"issubclass",
"(",
"Model",
",",
"Entity",
")",
"RA",
"=",
"sa",
".",
"orm",
".",
"aliased",
"(",
"RoleAssignment",
")",
"PA",
"=",
"sa",
".",
"orm",
".",
"aliased",
"(",
"PermissionAssignment",
")",
"# id column from entity table. Model.id would refer to 'model' table.",
"# this allows the DB to use indexes / foreign key constraints.",
"id_column",
"=",
"sa",
".",
"inspect",
"(",
"Model",
")",
".",
"primary_key",
"[",
"0",
"]",
"creator",
"=",
"Model",
".",
"creator",
"owner",
"=",
"Model",
".",
"owner",
"if",
"not",
"self",
".",
"running",
":",
"return",
"sa",
".",
"sql",
".",
"exists",
"(",
"[",
"1",
"]",
")",
"if",
"user",
"is",
"None",
":",
"user",
"=",
"unwrap",
"(",
"current_user",
")",
"# build role CTE",
"principal_filter",
"=",
"RA",
".",
"anonymous",
"==",
"True",
"if",
"not",
"user",
".",
"is_anonymous",
":",
"principal_filter",
"|=",
"RA",
".",
"user",
"==",
"user",
"if",
"user",
".",
"groups",
":",
"principal_filter",
"|=",
"RA",
".",
"group_id",
".",
"in_",
"(",
"[",
"g",
".",
"id",
"for",
"g",
"in",
"user",
".",
"groups",
"]",
")",
"RA",
"=",
"sa",
".",
"sql",
".",
"select",
"(",
"[",
"RA",
"]",
",",
"principal_filter",
")",
".",
"cte",
"(",
")",
"permission_exists",
"=",
"sa",
".",
"sql",
".",
"exists",
"(",
"[",
"1",
"]",
")",
".",
"where",
"(",
"sa",
".",
"sql",
".",
"and_",
"(",
"PA",
".",
"permission",
"==",
"permission",
",",
"PA",
".",
"object_id",
"==",
"id_column",
",",
"(",
"RA",
".",
"c",
".",
"role",
"==",
"PA",
".",
"role",
")",
"|",
"(",
"PA",
".",
"role",
"==",
"AnonymousRole",
")",
",",
"(",
"RA",
".",
"c",
".",
"object_id",
"==",
"PA",
".",
"object_id",
")",
"|",
"(",
"RA",
".",
"c",
".",
"object_id",
"==",
"None",
")",
",",
")",
")",
"# is_admin: self-explanatory. It search for local or global admin",
"# role, but PermissionAssignment is not involved, thus it can match on",
"# entities that don't have *any permission assignment*, whereas previous",
"# expressions cannot.",
"is_admin",
"=",
"sa",
".",
"sql",
".",
"exists",
"(",
"[",
"1",
"]",
")",
".",
"where",
"(",
"sa",
".",
"sql",
".",
"and_",
"(",
"RA",
".",
"c",
".",
"role",
"==",
"Admin",
",",
"(",
"RA",
".",
"c",
".",
"object_id",
"==",
"id_column",
")",
"|",
"(",
"RA",
".",
"c",
".",
"object_id",
"==",
"None",
")",
",",
"principal_filter",
",",
")",
")",
"filter_expr",
"=",
"permission_exists",
"|",
"is_admin",
"if",
"user",
"and",
"not",
"user",
".",
"is_anonymous",
":",
"is_owner_or_creator",
"=",
"sa",
".",
"sql",
".",
"exists",
"(",
"[",
"1",
"]",
")",
".",
"where",
"(",
"sa",
".",
"sql",
".",
"and_",
"(",
"PA",
".",
"permission",
"==",
"permission",
",",
"PA",
".",
"object_id",
"==",
"id_column",
",",
"sa",
".",
"sql",
".",
"or_",
"(",
"(",
"PA",
".",
"role",
"==",
"Owner",
")",
"&",
"(",
"owner",
"==",
"user",
")",
",",
"(",
"PA",
".",
"role",
"==",
"Creator",
")",
"&",
"(",
"creator",
"==",
"user",
")",
",",
")",
",",
")",
")",
"filter_expr",
"|=",
"is_owner_or_creator",
"return",
"filter_expr"
] | 36.392857 | 22.285714 |
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
|
[
"def",
"generate",
"(",
"env",
")",
":",
"import",
"SCons",
".",
"Defaults",
"import",
"SCons",
".",
"Tool",
"set_vars",
"(",
"env",
")",
"static_obj",
",",
"shared_obj",
"=",
"SCons",
".",
"Tool",
".",
"createObjBuilders",
"(",
"env",
")",
"for",
"suffix",
"in",
"CSuffixes",
":",
"static_obj",
".",
"add_action",
"(",
"suffix",
",",
"SCons",
".",
"Defaults",
".",
"CAction",
")",
"shared_obj",
".",
"add_action",
"(",
"suffix",
",",
"SCons",
".",
"Defaults",
".",
"ShCAction",
")",
"for",
"suffix",
"in",
"CXXSuffixes",
":",
"static_obj",
".",
"add_action",
"(",
"suffix",
",",
"SCons",
".",
"Defaults",
".",
"CXXAction",
")",
"shared_obj",
".",
"add_action",
"(",
"suffix",
",",
"SCons",
".",
"Defaults",
".",
"ShCXXAction",
")",
"env",
"[",
"'CCCOMFLAGS'",
"]",
"=",
"'$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'",
"env",
"[",
"'CC'",
"]",
"=",
"'mwcc'",
"env",
"[",
"'CCCOM'",
"]",
"=",
"'$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'",
"env",
"[",
"'CXX'",
"]",
"=",
"'mwcc'",
"env",
"[",
"'CXXCOM'",
"]",
"=",
"'$CXX $CXXFLAGS $CCCOMFLAGS'",
"env",
"[",
"'SHCC'",
"]",
"=",
"'$CC'",
"env",
"[",
"'SHCCFLAGS'",
"]",
"=",
"'$CCFLAGS'",
"env",
"[",
"'SHCFLAGS'",
"]",
"=",
"'$CFLAGS'",
"env",
"[",
"'SHCCCOM'",
"]",
"=",
"'$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'",
"env",
"[",
"'SHCXX'",
"]",
"=",
"'$CXX'",
"env",
"[",
"'SHCXXFLAGS'",
"]",
"=",
"'$CXXFLAGS'",
"env",
"[",
"'SHCXXCOM'",
"]",
"=",
"'$SHCXX $SHCXXFLAGS $CCCOMFLAGS'",
"env",
"[",
"'CFILESUFFIX'",
"]",
"=",
"'.c'",
"env",
"[",
"'CXXFILESUFFIX'",
"]",
"=",
"'.cpp'",
"env",
"[",
"'CPPDEFPREFIX'",
"]",
"=",
"'-D'",
"env",
"[",
"'CPPDEFSUFFIX'",
"]",
"=",
"''",
"env",
"[",
"'INCPREFIX'",
"]",
"=",
"'-I'",
"env",
"[",
"'INCSUFFIX'",
"]",
"=",
"''"
] | 31.375 | 21.45 |
def _ask_openapi():
"""Return whether we should create a (new) skeleton."""
if Path('openapi.yml').exists():
question = 'Override local openapi.yml with a new skeleton? (y/N) '
default = False
else:
question = 'Do you have REST endpoints and wish to create an API' \
' skeleton in openapi.yml? (Y/n) '
default = True
while True:
answer = input(question)
if answer == '':
return default
if answer.lower() in ['y', 'yes']:
return True
if answer.lower() in ['n', 'no']:
return False
|
[
"def",
"_ask_openapi",
"(",
")",
":",
"if",
"Path",
"(",
"'openapi.yml'",
")",
".",
"exists",
"(",
")",
":",
"question",
"=",
"'Override local openapi.yml with a new skeleton? (y/N) '",
"default",
"=",
"False",
"else",
":",
"question",
"=",
"'Do you have REST endpoints and wish to create an API'",
"' skeleton in openapi.yml? (Y/n) '",
"default",
"=",
"True",
"while",
"True",
":",
"answer",
"=",
"input",
"(",
"question",
")",
"if",
"answer",
"==",
"''",
":",
"return",
"default",
"if",
"answer",
".",
"lower",
"(",
")",
"in",
"[",
"'y'",
",",
"'yes'",
"]",
":",
"return",
"True",
"if",
"answer",
".",
"lower",
"(",
")",
"in",
"[",
"'n'",
",",
"'no'",
"]",
":",
"return",
"False"
] | 36.388889 | 15.888889 |
def optimized(fn):
"""Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
:rtype: function
"""
def _optimized(self, *args, **kwargs):
""" This method calls the pycastC function if
optimization is enabled and the pycastC function
is available.
:param: PyCastObject self: reference to the calling object.
Needs to be passed to the pycastC function,
so that all uts members are available.
:param: list *args: list of arguments the function is called with.
:param: dict **kwargs: dictionary of parameter names and values the function has been called with.
:return result of the function call either from pycast or pycastC module.
:rtype: function
"""
if self.optimizationEnabled:
class_name = self.__class__.__name__
module = self.__module__.replace("pycast", "pycastC")
try:
imported = __import__("%s.%s" % (module, class_name), globals(), locals(), [fn.__name__])
function = getattr(imported, fn.__name__)
return function(self, *args, **kwargs)
except ImportError:
print "[WARNING] Could not enable optimization for %s, %s" % (fn.__name__, self)
return fn(self, *args, **kwargs)
else:
return fn(self, *args, **kwargs)
setattr(_optimized, "__name__", fn.__name__)
setattr(_optimized, "__repr__", fn.__repr__)
setattr(_optimized, "__str__", fn.__str__)
setattr(_optimized, "__doc__", fn.__doc__)
return _optimized
|
[
"def",
"optimized",
"(",
"fn",
")",
":",
"def",
"_optimized",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\" This method calls the pycastC function if\n optimization is enabled and the pycastC function\n is available.\n\n :param: PyCastObject self: reference to the calling object.\n Needs to be passed to the pycastC function,\n so that all uts members are available.\n :param: list *args: list of arguments the function is called with.\n :param: dict **kwargs: dictionary of parameter names and values the function has been called with.\n\n :return result of the function call either from pycast or pycastC module.\n :rtype: function\n \"\"\"",
"if",
"self",
".",
"optimizationEnabled",
":",
"class_name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"module",
"=",
"self",
".",
"__module__",
".",
"replace",
"(",
"\"pycast\"",
",",
"\"pycastC\"",
")",
"try",
":",
"imported",
"=",
"__import__",
"(",
"\"%s.%s\"",
"%",
"(",
"module",
",",
"class_name",
")",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"fn",
".",
"__name__",
"]",
")",
"function",
"=",
"getattr",
"(",
"imported",
",",
"fn",
".",
"__name__",
")",
"return",
"function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"ImportError",
":",
"print",
"\"[WARNING] Could not enable optimization for %s, %s\"",
"%",
"(",
"fn",
".",
"__name__",
",",
"self",
")",
"return",
"fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"setattr",
"(",
"_optimized",
",",
"\"__name__\"",
",",
"fn",
".",
"__name__",
")",
"setattr",
"(",
"_optimized",
",",
"\"__repr__\"",
",",
"fn",
".",
"__repr__",
")",
"setattr",
"(",
"_optimized",
",",
"\"__str__\"",
",",
"fn",
".",
"__str__",
")",
"setattr",
"(",
"_optimized",
",",
"\"__doc__\"",
",",
"fn",
".",
"__doc__",
")",
"return",
"_optimized"
] | 40.681818 | 21.954545 |
def fill_datetime(self):
"""Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._fill_datetime
|
[
"def",
"fill_datetime",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"filled",
":",
"raise",
"SlotNotFilledError",
"(",
"'Slot with name \"%s\", key \"%s\" not yet filled.'",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"key",
")",
")",
"return",
"self",
".",
"_fill_datetime"
] | 28.230769 | 20.615385 |
def image_meta_delete(self,
image_id=None, # pylint: disable=C0103
name=None,
keys=None):
'''
Delete image metadata
'''
nt_ks = self.compute_conn
if name:
for image in nt_ks.images.list():
if image.name == name:
image_id = image.id # pylint: disable=C0103
pairs = keys.split(',')
if not image_id:
return {'Error': 'A valid image name or id was not specified'}
nt_ks.images.delete_meta(image_id, pairs)
return {image_id: 'Deleted: {0}'.format(pairs)}
|
[
"def",
"image_meta_delete",
"(",
"self",
",",
"image_id",
"=",
"None",
",",
"# pylint: disable=C0103",
"name",
"=",
"None",
",",
"keys",
"=",
"None",
")",
":",
"nt_ks",
"=",
"self",
".",
"compute_conn",
"if",
"name",
":",
"for",
"image",
"in",
"nt_ks",
".",
"images",
".",
"list",
"(",
")",
":",
"if",
"image",
".",
"name",
"==",
"name",
":",
"image_id",
"=",
"image",
".",
"id",
"# pylint: disable=C0103",
"pairs",
"=",
"keys",
".",
"split",
"(",
"','",
")",
"if",
"not",
"image_id",
":",
"return",
"{",
"'Error'",
":",
"'A valid image name or id was not specified'",
"}",
"nt_ks",
".",
"images",
".",
"delete_meta",
"(",
"image_id",
",",
"pairs",
")",
"return",
"{",
"image_id",
":",
"'Deleted: {0}'",
".",
"format",
"(",
"pairs",
")",
"}"
] | 38.117647 | 15.411765 |
def require_qt(func):
"""Specify that a function requires a Qt application.
Use this decorator to specify that a function needs a running
Qt application before it can run. An error is raised if that is not
the case.
"""
@wraps(func)
def wrapped(*args, **kwargs):
if not QApplication.instance(): # pragma: no cover
raise RuntimeError("A Qt application must be created.")
return func(*args, **kwargs)
return wrapped
|
[
"def",
"require_qt",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"QApplication",
".",
"instance",
"(",
")",
":",
"# pragma: no cover",
"raise",
"RuntimeError",
"(",
"\"A Qt application must be created.\"",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | 33.071429 | 20.357143 |
def __update(self, task_source):
""" Recheck next start of tasks from the given one only
:param task_source: source to check
:return: None
"""
next_start = task_source.next_start()
if next_start is not None:
if next_start.tzinfo is None or next_start.tzinfo != timezone.utc:
raise ValueError('Invalid timezone information')
if self.__next_start is None or next_start < self.__next_start:
self.__next_start = next_start
self.__next_sources = [task_source]
elif next_start == self.__next_start:
self.__next_sources.append(task_source)
|
[
"def",
"__update",
"(",
"self",
",",
"task_source",
")",
":",
"next_start",
"=",
"task_source",
".",
"next_start",
"(",
")",
"if",
"next_start",
"is",
"not",
"None",
":",
"if",
"next_start",
".",
"tzinfo",
"is",
"None",
"or",
"next_start",
".",
"tzinfo",
"!=",
"timezone",
".",
"utc",
":",
"raise",
"ValueError",
"(",
"'Invalid timezone information'",
")",
"if",
"self",
".",
"__next_start",
"is",
"None",
"or",
"next_start",
"<",
"self",
".",
"__next_start",
":",
"self",
".",
"__next_start",
"=",
"next_start",
"self",
".",
"__next_sources",
"=",
"[",
"task_source",
"]",
"elif",
"next_start",
"==",
"self",
".",
"__next_start",
":",
"self",
".",
"__next_sources",
".",
"append",
"(",
"task_source",
")"
] | 30.888889 | 15.888889 |
def _sanitize_config(custom_config):
"""Checks whether ``custom_config`` is sane and returns a sanitized dict <str -> (str|object)>
It checks if keys are all strings and sanitizes values of a given dictionary as follows:
- If string, number or boolean is given as a value, it is converted to string.
For string and number (int, float), it is converted to string by a built-in ``str()`` method.
For a boolean value, ``True`` is converted to "true" instead of "True", and ``False`` is
converted to "false" instead of "False", in order to keep the consistency with
Java configuration.
- If neither of the above is given as a value, it is inserted into the sanitized dict as it is.
These values will need to be serialized before adding to a protobuf message.
"""
if not isinstance(custom_config, dict):
raise TypeError("Component-specific configuration must be given as a dict type, given: %s"
% str(type(custom_config)))
sanitized = {}
for key, value in custom_config.items():
if not isinstance(key, str):
raise TypeError("Key for component-specific configuration must be string, given: %s:%s"
% (str(type(key)), str(key)))
if isinstance(value, bool):
sanitized[key] = "true" if value else "false"
elif isinstance(value, (str, int, float)):
sanitized[key] = str(value)
else:
sanitized[key] = value
return sanitized
|
[
"def",
"_sanitize_config",
"(",
"custom_config",
")",
":",
"if",
"not",
"isinstance",
"(",
"custom_config",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"Component-specific configuration must be given as a dict type, given: %s\"",
"%",
"str",
"(",
"type",
"(",
"custom_config",
")",
")",
")",
"sanitized",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"custom_config",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Key for component-specific configuration must be string, given: %s:%s\"",
"%",
"(",
"str",
"(",
"type",
"(",
"key",
")",
")",
",",
"str",
"(",
"key",
")",
")",
")",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"sanitized",
"[",
"key",
"]",
"=",
"\"true\"",
"if",
"value",
"else",
"\"false\"",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"str",
",",
"int",
",",
"float",
")",
")",
":",
"sanitized",
"[",
"key",
"]",
"=",
"str",
"(",
"value",
")",
"else",
":",
"sanitized",
"[",
"key",
"]",
"=",
"value",
"return",
"sanitized"
] | 47.096774 | 26.806452 |
def getCollectionClass(cls, name) :
"""Return the class object of a collection given its 'name'"""
try :
return cls.collectionClasses[name]
except KeyError :
raise KeyError( "There is no Collection Class of type: '%s'; currently supported values: [%s]" % (name, ', '.join(getCollectionClasses().keys())) )
|
[
"def",
"getCollectionClass",
"(",
"cls",
",",
"name",
")",
":",
"try",
":",
"return",
"cls",
".",
"collectionClasses",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"There is no Collection Class of type: '%s'; currently supported values: [%s]\"",
"%",
"(",
"name",
",",
"', '",
".",
"join",
"(",
"getCollectionClasses",
"(",
")",
".",
"keys",
"(",
")",
")",
")",
")"
] | 58 | 28.666667 |
def index_order(self, sources=True, destinations=True):
"""
Return the order of indices as they appear in array references.
Use *source* and *destination* to filter output
"""
if sources:
arefs = chain(*self.sources.values())
else:
arefs = []
if destinations:
arefs = chain(arefs, *self.destinations.values())
ret = []
for a in [aref for aref in arefs if aref is not None]:
ref = []
for expr in a:
ref.append(expr.free_symbols)
ret.append(ref)
return ret
|
[
"def",
"index_order",
"(",
"self",
",",
"sources",
"=",
"True",
",",
"destinations",
"=",
"True",
")",
":",
"if",
"sources",
":",
"arefs",
"=",
"chain",
"(",
"*",
"self",
".",
"sources",
".",
"values",
"(",
")",
")",
"else",
":",
"arefs",
"=",
"[",
"]",
"if",
"destinations",
":",
"arefs",
"=",
"chain",
"(",
"arefs",
",",
"*",
"self",
".",
"destinations",
".",
"values",
"(",
")",
")",
"ret",
"=",
"[",
"]",
"for",
"a",
"in",
"[",
"aref",
"for",
"aref",
"in",
"arefs",
"if",
"aref",
"is",
"not",
"None",
"]",
":",
"ref",
"=",
"[",
"]",
"for",
"expr",
"in",
"a",
":",
"ref",
".",
"append",
"(",
"expr",
".",
"free_symbols",
")",
"ret",
".",
"append",
"(",
"ref",
")",
"return",
"ret"
] | 28.809524 | 19.666667 |
def get_item_content(self, repository_id, path, project=None, scope_path=None, recursion_level=None, include_content_metadata=None, latest_processed_change=None, download=None, version_descriptor=None, include_content=None, resolve_lfs=None, **kwargs):
"""GetItemContent.
Get Item Metadata and/or Content for a single item. The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response. Doesn't apply to zipped content, which is always returned as a download.
:param str repository_id: The name or ID of the repository.
:param str path: The item path.
:param str project: Project ID or project name
:param str scope_path: The path scope. The default is null.
:param str recursion_level: The recursion level of this request. The default is 'none', no recursion.
:param bool include_content_metadata: Set to true to include content metadata. Default is false.
:param bool latest_processed_change: Set to true to include the lastest changes. Default is false.
:param bool download: Set to true to download the response as a file. Default is false.
:param :class:`<GitVersionDescriptor> <azure.devops.v5_0.git.models.GitVersionDescriptor>` version_descriptor: Version descriptor. Default is null.
:param bool include_content: Set to true to include item content when requesting json. Default is false.
:param bool resolve_lfs: Set to true to resolve Git LFS pointer files to return actual content from Git LFS. Default is false.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if scope_path is not None:
query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_content_metadata is not None:
query_parameters['includeContentMetadata'] = self._serialize.query('include_content_metadata', include_content_metadata, 'bool')
if latest_processed_change is not None:
query_parameters['latestProcessedChange'] = self._serialize.query('latest_processed_change', latest_processed_change, 'bool')
if download is not None:
query_parameters['download'] = self._serialize.query('download', download, 'bool')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
if resolve_lfs is not None:
query_parameters['resolveLfs'] = self._serialize.query('resolve_lfs', resolve_lfs, 'bool')
response = self._send(http_method='GET',
location_id='fb93c0db-47ed-4a31-8c20-47552878fb44',
version='5.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
|
[
"def",
"get_item_content",
"(",
"self",
",",
"repository_id",
",",
"path",
",",
"project",
"=",
"None",
",",
"scope_path",
"=",
"None",
",",
"recursion_level",
"=",
"None",
",",
"include_content_metadata",
"=",
"None",
",",
"latest_processed_change",
"=",
"None",
",",
"download",
"=",
"None",
",",
"version_descriptor",
"=",
"None",
",",
"include_content",
"=",
"None",
",",
"resolve_lfs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"repository_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'repositoryId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'repository_id'",
",",
"repository_id",
",",
"'str'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"path",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'path'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'path'",
",",
"path",
",",
"'str'",
")",
"if",
"scope_path",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'scopePath'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'scope_path'",
",",
"scope_path",
",",
"'str'",
")",
"if",
"recursion_level",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'recursionLevel'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'recursion_level'",
",",
"recursion_level",
",",
"'str'",
")",
"if",
"include_content_metadata",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'includeContentMetadata'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'include_content_metadata'",
",",
"include_content_metadata",
",",
"'bool'",
")",
"if",
"latest_processed_change",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'latestProcessedChange'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'latest_processed_change'",
",",
"latest_processed_change",
",",
"'bool'",
")",
"if",
"download",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'download'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'download'",
",",
"download",
",",
"'bool'",
")",
"if",
"version_descriptor",
"is",
"not",
"None",
":",
"if",
"version_descriptor",
".",
"version_type",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'versionDescriptor.versionType'",
"]",
"=",
"version_descriptor",
".",
"version_type",
"if",
"version_descriptor",
".",
"version",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'versionDescriptor.version'",
"]",
"=",
"version_descriptor",
".",
"version",
"if",
"version_descriptor",
".",
"version_options",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'versionDescriptor.versionOptions'",
"]",
"=",
"version_descriptor",
".",
"version_options",
"if",
"include_content",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'includeContent'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'include_content'",
",",
"include_content",
",",
"'bool'",
")",
"if",
"resolve_lfs",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'resolveLfs'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'resolve_lfs'",
",",
"resolve_lfs",
",",
"'bool'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'fb93c0db-47ed-4a31-8c20-47552878fb44'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
",",
"accept_media_type",
"=",
"'application/octet-stream'",
")",
"if",
"\"callback\"",
"in",
"kwargs",
":",
"callback",
"=",
"kwargs",
"[",
"\"callback\"",
"]",
"else",
":",
"callback",
"=",
"None",
"return",
"self",
".",
"_client",
".",
"stream_download",
"(",
"response",
",",
"callback",
"=",
"callback",
")"
] | 74.607143 | 40.303571 |
def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror)
|
[
"def",
"open_database",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"con",
":",
"try",
":",
"self",
".",
"con",
"=",
"psycopg2",
".",
"connect",
"(",
"host",
"=",
"self",
".",
"host",
",",
"database",
"=",
"self",
".",
"dbname",
",",
"user",
"=",
"self",
".",
"user",
",",
"password",
"=",
"self",
".",
"password",
",",
"port",
"=",
"self",
".",
"port",
")",
"except",
"psycopg2",
".",
"Error",
"as",
"e",
":",
"print",
"(",
"\"Error while opening database:\"",
")",
"print",
"(",
"e",
".",
"pgerror",
")"
] | 32.307692 | 13.846154 |
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See https://code.activestate.com/recipes/442473/
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
|
[
"def",
"get_context",
"(",
"self",
")",
":",
"# See https://code.activestate.com/recipes/442473/",
"c",
"=",
"SSL",
".",
"Context",
"(",
"SSL",
".",
"SSLv23_METHOD",
")",
"c",
".",
"use_privatekey_file",
"(",
"self",
".",
"private_key",
")",
"if",
"self",
".",
"certificate_chain",
":",
"c",
".",
"load_verify_locations",
"(",
"self",
".",
"certificate_chain",
")",
"c",
".",
"use_certificate_file",
"(",
"self",
".",
"certificate",
")",
"return",
"c"
] | 42.555556 | 11.333333 |
def quat_to_rotation_matrix(q):
"""Convert unit quaternion to rotation matrix
Parameters
-------------
q : (4,) ndarray
Unit quaternion, scalar as first element
Returns
----------------
R : (3,3) ndarray
Rotation matrix
"""
q = q.flatten()
assert q.size == 4
assert_almost_equal(np.linalg.norm(q), 1.0, err_msg="Not a unit quaternion!")
qq = q ** 2
R = np.array([[qq[0] + qq[1] - qq[2] - qq[3], 2*q[1]*q[2] -
2*q[0]*q[3], 2*q[1]*q[3] + 2*q[0]*q[2]],
[2*q[1]*q[2] + 2*q[0]*q[3], qq[0] - qq[1] + qq[2] -
qq[3], 2*q[2]*q[3] - 2*q[0]*q[1]],
[2*q[1]*q[3] - 2*q[0]*q[2], 2*q[2]*q[3] + 2*q[0]*q[1],
qq[0] - qq[1] - qq[2] + qq[3]]])
return R
|
[
"def",
"quat_to_rotation_matrix",
"(",
"q",
")",
":",
"q",
"=",
"q",
".",
"flatten",
"(",
")",
"assert",
"q",
".",
"size",
"==",
"4",
"assert_almost_equal",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"q",
")",
",",
"1.0",
",",
"err_msg",
"=",
"\"Not a unit quaternion!\"",
")",
"qq",
"=",
"q",
"**",
"2",
"R",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"qq",
"[",
"0",
"]",
"+",
"qq",
"[",
"1",
"]",
"-",
"qq",
"[",
"2",
"]",
"-",
"qq",
"[",
"3",
"]",
",",
"2",
"*",
"q",
"[",
"1",
"]",
"*",
"q",
"[",
"2",
"]",
"-",
"2",
"*",
"q",
"[",
"0",
"]",
"*",
"q",
"[",
"3",
"]",
",",
"2",
"*",
"q",
"[",
"1",
"]",
"*",
"q",
"[",
"3",
"]",
"+",
"2",
"*",
"q",
"[",
"0",
"]",
"*",
"q",
"[",
"2",
"]",
"]",
",",
"[",
"2",
"*",
"q",
"[",
"1",
"]",
"*",
"q",
"[",
"2",
"]",
"+",
"2",
"*",
"q",
"[",
"0",
"]",
"*",
"q",
"[",
"3",
"]",
",",
"qq",
"[",
"0",
"]",
"-",
"qq",
"[",
"1",
"]",
"+",
"qq",
"[",
"2",
"]",
"-",
"qq",
"[",
"3",
"]",
",",
"2",
"*",
"q",
"[",
"2",
"]",
"*",
"q",
"[",
"3",
"]",
"-",
"2",
"*",
"q",
"[",
"0",
"]",
"*",
"q",
"[",
"1",
"]",
"]",
",",
"[",
"2",
"*",
"q",
"[",
"1",
"]",
"*",
"q",
"[",
"3",
"]",
"-",
"2",
"*",
"q",
"[",
"0",
"]",
"*",
"q",
"[",
"2",
"]",
",",
"2",
"*",
"q",
"[",
"2",
"]",
"*",
"q",
"[",
"3",
"]",
"+",
"2",
"*",
"q",
"[",
"0",
"]",
"*",
"q",
"[",
"1",
"]",
",",
"qq",
"[",
"0",
"]",
"-",
"qq",
"[",
"1",
"]",
"-",
"qq",
"[",
"2",
"]",
"+",
"qq",
"[",
"3",
"]",
"]",
"]",
")",
"return",
"R"
] | 29.28 | 20.4 |
def latex(self):
"""
Returns a string representation for use in LaTeX
"""
if not self:
return ""
s = str(self)
s = s.replace("==", " = ")
s = s.replace("<=", " \leq ")
s = s.replace(">=", " \geq ")
s = s.replace("&&", r" \text{ and } ")
s = s.replace("||", r" \text{ or } ")
return s
|
[
"def",
"latex",
"(",
"self",
")",
":",
"if",
"not",
"self",
":",
"return",
"\"\"",
"s",
"=",
"str",
"(",
"self",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"==\"",
",",
"\" = \"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"<=\"",
",",
"\" \\leq \"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\">=\"",
",",
"\" \\geq \"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"&&\"",
",",
"r\" \\text{ and } \"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"||\"",
",",
"r\" \\text{ or } \"",
")",
"return",
"s"
] | 28.538462 | 11.153846 |
def volatility(self, n, freq=None, which='close', ann=True, model='ln', min_periods=1, rolling='simple'):
"""Return the annualized volatility series. N is the number of lookback periods.
:param n: int, number of lookback periods
:param freq: resample frequency or None
:param which: price series to use
:param ann: If True then annualize
:param model: {'ln', 'pct', 'bbg'}
ln - use logarithmic price changes
pct - use pct price changes
bbg - use logarithmic price changes but Bloomberg uses actual business days
:param rolling:{'simple', 'exp'}, if exp, use ewmstd. if simple, use rolling_std
:return:
"""
if model not in ('bbg', 'ln', 'pct'):
raise ValueError('model must be one of (bbg, ln, pct), not %s' % model)
if rolling not in ('simple', 'exp'):
raise ValueError('rolling must be one of (simple, exp), not %s' % rolling)
px = self.frame[which]
px = px if not freq else px.resample(freq, how='last')
if model == 'bbg' and periods_in_year(px) == 252:
# Bloomberg uses business days, so need to convert and reindex
orig = px.index
px = px.resample('B').ffill()
chg = np.log(px / px.shift(1))
chg[chg.index - orig] = np.nan
if rolling == 'simple':
vol = pd.rolling_std(chg, n, min_periods=min_periods).reindex(orig)
else:
vol = pd.ewmstd(chg, span=n, min_periods=n)
return vol if not ann else vol * np.sqrt(260)
else:
chg = px.pct_change() if model == 'pct' else np.log(px / px.shift(1))
if rolling == 'simple':
vol = pd.rolling_std(chg, n, min_periods=min_periods)
else:
vol = pd.ewmstd(chg, span=n, min_periods=n)
return vol if not ann else vol * np.sqrt(periods_in_year(vol))
|
[
"def",
"volatility",
"(",
"self",
",",
"n",
",",
"freq",
"=",
"None",
",",
"which",
"=",
"'close'",
",",
"ann",
"=",
"True",
",",
"model",
"=",
"'ln'",
",",
"min_periods",
"=",
"1",
",",
"rolling",
"=",
"'simple'",
")",
":",
"if",
"model",
"not",
"in",
"(",
"'bbg'",
",",
"'ln'",
",",
"'pct'",
")",
":",
"raise",
"ValueError",
"(",
"'model must be one of (bbg, ln, pct), not %s'",
"%",
"model",
")",
"if",
"rolling",
"not",
"in",
"(",
"'simple'",
",",
"'exp'",
")",
":",
"raise",
"ValueError",
"(",
"'rolling must be one of (simple, exp), not %s'",
"%",
"rolling",
")",
"px",
"=",
"self",
".",
"frame",
"[",
"which",
"]",
"px",
"=",
"px",
"if",
"not",
"freq",
"else",
"px",
".",
"resample",
"(",
"freq",
",",
"how",
"=",
"'last'",
")",
"if",
"model",
"==",
"'bbg'",
"and",
"periods_in_year",
"(",
"px",
")",
"==",
"252",
":",
"# Bloomberg uses business days, so need to convert and reindex",
"orig",
"=",
"px",
".",
"index",
"px",
"=",
"px",
".",
"resample",
"(",
"'B'",
")",
".",
"ffill",
"(",
")",
"chg",
"=",
"np",
".",
"log",
"(",
"px",
"/",
"px",
".",
"shift",
"(",
"1",
")",
")",
"chg",
"[",
"chg",
".",
"index",
"-",
"orig",
"]",
"=",
"np",
".",
"nan",
"if",
"rolling",
"==",
"'simple'",
":",
"vol",
"=",
"pd",
".",
"rolling_std",
"(",
"chg",
",",
"n",
",",
"min_periods",
"=",
"min_periods",
")",
".",
"reindex",
"(",
"orig",
")",
"else",
":",
"vol",
"=",
"pd",
".",
"ewmstd",
"(",
"chg",
",",
"span",
"=",
"n",
",",
"min_periods",
"=",
"n",
")",
"return",
"vol",
"if",
"not",
"ann",
"else",
"vol",
"*",
"np",
".",
"sqrt",
"(",
"260",
")",
"else",
":",
"chg",
"=",
"px",
".",
"pct_change",
"(",
")",
"if",
"model",
"==",
"'pct'",
"else",
"np",
".",
"log",
"(",
"px",
"/",
"px",
".",
"shift",
"(",
"1",
")",
")",
"if",
"rolling",
"==",
"'simple'",
":",
"vol",
"=",
"pd",
".",
"rolling_std",
"(",
"chg",
",",
"n",
",",
"min_periods",
"=",
"min_periods",
")",
"else",
":",
"vol",
"=",
"pd",
".",
"ewmstd",
"(",
"chg",
",",
"span",
"=",
"n",
",",
"min_periods",
"=",
"n",
")",
"return",
"vol",
"if",
"not",
"ann",
"else",
"vol",
"*",
"np",
".",
"sqrt",
"(",
"periods_in_year",
"(",
"vol",
")",
")"
] | 50.487179 | 20.769231 |
def get_external_ip():
"""Returns the current external IP, based on http://icanhazip.com/. It will
probably fail if the network setup is too complicated or the service is
down.
"""
response = requests.get('http://icanhazip.com/')
if not response.ok:
raise RuntimeError('Failed to get external ip: %s' % response.content)
return response.content.strip()
|
[
"def",
"get_external_ip",
"(",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"'http://icanhazip.com/'",
")",
"if",
"not",
"response",
".",
"ok",
":",
"raise",
"RuntimeError",
"(",
"'Failed to get external ip: %s'",
"%",
"response",
".",
"content",
")",
"return",
"response",
".",
"content",
".",
"strip",
"(",
")"
] | 42.222222 | 17.333333 |
def import_items(import_directives, style, quiet_load=False):
"""
Import the items in import_directives and return a list of the imported items
Each item in import_directives should be one of the following forms
* a tuple like ('module.submodule', ('classname1', 'classname2')), which indicates a 'from module.submodule import classname1, classname2'
* a tuple like ('module.submodule', 'classname1'), which indicates a 'from module.submodule import classname1'
* a tuple like ('module.submodule', '*'), which indicates a 'from module.submodule import *'
* a simple 'module.submodule' which indicates 'import module.submodule'.
Returns a dict mapping the names to the imported items
"""
imported_objects = {}
for directive in import_directives:
try:
# First try a straight import
if isinstance(directive, six.string_types):
imported_object = __import__(directive)
imported_objects[directive.split('.')[0]] = imported_object
if not quiet_load:
print(style.SQL_COLTYPE("import %s" % directive))
continue
elif isinstance(directive, (list, tuple)) and len(directive) == 2:
if not isinstance(directive[0], six.string_types):
if not quiet_load:
print(style.ERROR("Unable to import %r: module name must be of type string" % directive[0]))
continue
if isinstance(directive[1], (list, tuple)) and all(isinstance(e, six.string_types) for e in directive[1]):
# Try the ('module.submodule', ('classname1', 'classname2')) form
imported_object = __import__(directive[0], {}, {}, directive[1])
imported_names = []
for name in directive[1]:
try:
imported_objects[name] = getattr(imported_object, name)
except AttributeError:
if not quiet_load:
print(style.ERROR("Unable to import %r from %r: %r does not exist" % (name, directive[0], name)))
else:
imported_names.append(name)
if not quiet_load:
print(style.SQL_COLTYPE("from %s import %s" % (directive[0], ', '.join(imported_names))))
elif isinstance(directive[1], six.string_types):
# If it is a tuple, but the second item isn't a list, so we have something like ('module.submodule', 'classname1')
# Check for the special '*' to import all
if directive[1] == '*':
imported_object = __import__(directive[0], {}, {}, directive[1])
for k in dir(imported_object):
imported_objects[k] = getattr(imported_object, k)
if not quiet_load:
print(style.SQL_COLTYPE("from %s import *" % directive[0]))
else:
imported_object = getattr(__import__(directive[0], {}, {}, [directive[1]]), directive[1])
imported_objects[directive[1]] = imported_object
if not quiet_load:
print(style.SQL_COLTYPE("from %s import %s" % (directive[0], directive[1])))
else:
if not quiet_load:
print(style.ERROR("Unable to import %r from %r: names must be of type string" % (directive[1], directive[0])))
else:
if not quiet_load:
print(style.ERROR("Unable to import %r: names must be of type string" % directive))
except ImportError:
try:
if not quiet_load:
print(style.ERROR("Unable to import %r" % directive))
except TypeError:
if not quiet_load:
print(style.ERROR("Unable to import %r from %r" % directive))
return imported_objects
|
[
"def",
"import_items",
"(",
"import_directives",
",",
"style",
",",
"quiet_load",
"=",
"False",
")",
":",
"imported_objects",
"=",
"{",
"}",
"for",
"directive",
"in",
"import_directives",
":",
"try",
":",
"# First try a straight import",
"if",
"isinstance",
"(",
"directive",
",",
"six",
".",
"string_types",
")",
":",
"imported_object",
"=",
"__import__",
"(",
"directive",
")",
"imported_objects",
"[",
"directive",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"]",
"=",
"imported_object",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"SQL_COLTYPE",
"(",
"\"import %s\"",
"%",
"directive",
")",
")",
"continue",
"elif",
"isinstance",
"(",
"directive",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"directive",
")",
"==",
"2",
":",
"if",
"not",
"isinstance",
"(",
"directive",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"ERROR",
"(",
"\"Unable to import %r: module name must be of type string\"",
"%",
"directive",
"[",
"0",
"]",
")",
")",
"continue",
"if",
"isinstance",
"(",
"directive",
"[",
"1",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"all",
"(",
"isinstance",
"(",
"e",
",",
"six",
".",
"string_types",
")",
"for",
"e",
"in",
"directive",
"[",
"1",
"]",
")",
":",
"# Try the ('module.submodule', ('classname1', 'classname2')) form",
"imported_object",
"=",
"__import__",
"(",
"directive",
"[",
"0",
"]",
",",
"{",
"}",
",",
"{",
"}",
",",
"directive",
"[",
"1",
"]",
")",
"imported_names",
"=",
"[",
"]",
"for",
"name",
"in",
"directive",
"[",
"1",
"]",
":",
"try",
":",
"imported_objects",
"[",
"name",
"]",
"=",
"getattr",
"(",
"imported_object",
",",
"name",
")",
"except",
"AttributeError",
":",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"ERROR",
"(",
"\"Unable to import %r from %r: %r does not exist\"",
"%",
"(",
"name",
",",
"directive",
"[",
"0",
"]",
",",
"name",
")",
")",
")",
"else",
":",
"imported_names",
".",
"append",
"(",
"name",
")",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"SQL_COLTYPE",
"(",
"\"from %s import %s\"",
"%",
"(",
"directive",
"[",
"0",
"]",
",",
"', '",
".",
"join",
"(",
"imported_names",
")",
")",
")",
")",
"elif",
"isinstance",
"(",
"directive",
"[",
"1",
"]",
",",
"six",
".",
"string_types",
")",
":",
"# If it is a tuple, but the second item isn't a list, so we have something like ('module.submodule', 'classname1')",
"# Check for the special '*' to import all",
"if",
"directive",
"[",
"1",
"]",
"==",
"'*'",
":",
"imported_object",
"=",
"__import__",
"(",
"directive",
"[",
"0",
"]",
",",
"{",
"}",
",",
"{",
"}",
",",
"directive",
"[",
"1",
"]",
")",
"for",
"k",
"in",
"dir",
"(",
"imported_object",
")",
":",
"imported_objects",
"[",
"k",
"]",
"=",
"getattr",
"(",
"imported_object",
",",
"k",
")",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"SQL_COLTYPE",
"(",
"\"from %s import *\"",
"%",
"directive",
"[",
"0",
"]",
")",
")",
"else",
":",
"imported_object",
"=",
"getattr",
"(",
"__import__",
"(",
"directive",
"[",
"0",
"]",
",",
"{",
"}",
",",
"{",
"}",
",",
"[",
"directive",
"[",
"1",
"]",
"]",
")",
",",
"directive",
"[",
"1",
"]",
")",
"imported_objects",
"[",
"directive",
"[",
"1",
"]",
"]",
"=",
"imported_object",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"SQL_COLTYPE",
"(",
"\"from %s import %s\"",
"%",
"(",
"directive",
"[",
"0",
"]",
",",
"directive",
"[",
"1",
"]",
")",
")",
")",
"else",
":",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"ERROR",
"(",
"\"Unable to import %r from %r: names must be of type string\"",
"%",
"(",
"directive",
"[",
"1",
"]",
",",
"directive",
"[",
"0",
"]",
")",
")",
")",
"else",
":",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"ERROR",
"(",
"\"Unable to import %r: names must be of type string\"",
"%",
"directive",
")",
")",
"except",
"ImportError",
":",
"try",
":",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"ERROR",
"(",
"\"Unable to import %r\"",
"%",
"directive",
")",
")",
"except",
"TypeError",
":",
"if",
"not",
"quiet_load",
":",
"print",
"(",
"style",
".",
"ERROR",
"(",
"\"Unable to import %r from %r\"",
"%",
"directive",
")",
")",
"return",
"imported_objects"
] | 58.571429 | 29.914286 |
def neg(self):
"""Negative value of all components."""
self.x = -self.x
self.y = -self.y
self.z = -self.z
|
[
"def",
"neg",
"(",
"self",
")",
":",
"self",
".",
"x",
"=",
"-",
"self",
".",
"x",
"self",
".",
"y",
"=",
"-",
"self",
".",
"y",
"self",
".",
"z",
"=",
"-",
"self",
".",
"z"
] | 26.6 | 14.8 |
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
|
[
"def",
"update_edit_menu",
"(",
"self",
")",
":",
"widget",
",",
"textedit_properties",
"=",
"self",
".",
"get_focus_widget_properties",
"(",
")",
"if",
"textedit_properties",
"is",
"None",
":",
"# widget is not an editor/console\r",
"return",
"# !!! Below this line, widget is expected to be a QPlainTextEdit\r",
"# instance\r",
"console",
",",
"not_readonly",
",",
"readwrite_editor",
"=",
"textedit_properties",
"# Editor has focus and there is no file opened in it\r",
"if",
"not",
"console",
"and",
"not_readonly",
"and",
"not",
"self",
".",
"editor",
".",
"is_file_opened",
"(",
")",
":",
"return",
"# Disabling all actions to begin with\r",
"for",
"child",
"in",
"self",
".",
"edit_menu",
".",
"actions",
"(",
")",
":",
"child",
".",
"setEnabled",
"(",
"False",
")",
"self",
".",
"selectall_action",
".",
"setEnabled",
"(",
"True",
")",
"# Undo, redo\r",
"self",
".",
"undo_action",
".",
"setEnabled",
"(",
"readwrite_editor",
"and",
"widget",
".",
"document",
"(",
")",
".",
"isUndoAvailable",
"(",
")",
")",
"self",
".",
"redo_action",
".",
"setEnabled",
"(",
"readwrite_editor",
"and",
"widget",
".",
"document",
"(",
")",
".",
"isRedoAvailable",
"(",
")",
")",
"# Copy, cut, paste, delete\r",
"has_selection",
"=",
"widget",
".",
"has_selected_text",
"(",
")",
"self",
".",
"copy_action",
".",
"setEnabled",
"(",
"has_selection",
")",
"self",
".",
"cut_action",
".",
"setEnabled",
"(",
"has_selection",
"and",
"not_readonly",
")",
"self",
".",
"paste_action",
".",
"setEnabled",
"(",
"not_readonly",
")",
"# Comment, uncomment, indent, unindent...\r",
"if",
"not",
"console",
"and",
"not_readonly",
":",
"# This is the editor and current file is writable\r",
"for",
"action",
"in",
"self",
".",
"editor",
".",
"edit_menu_actions",
":",
"action",
".",
"setEnabled",
"(",
"True",
")"
] | 42.388889 | 21.194444 |
def _check_err(resp, url_suffix, data, allow_pagination):
"""
Raise DataServiceError if the response wasn't successful.
:param resp: requests.Response back from the request
:param url_suffix: str url to include in an error message
:param data: data payload we sent
:param allow_pagination: when False and response headers contains 'x-total-pages' raises an error.
:return: requests.Response containing the successful result
"""
total_pages = resp.headers.get('x-total-pages')
if not allow_pagination and total_pages:
raise UnexpectedPagingReceivedError()
if 200 <= resp.status_code < 300:
return resp
if resp.status_code == 404:
if resp.json().get("code") == "resource_not_consistent":
raise DSResourceNotConsistentError(resp, url_suffix, data)
raise DataServiceError(resp, url_suffix, data)
|
[
"def",
"_check_err",
"(",
"resp",
",",
"url_suffix",
",",
"data",
",",
"allow_pagination",
")",
":",
"total_pages",
"=",
"resp",
".",
"headers",
".",
"get",
"(",
"'x-total-pages'",
")",
"if",
"not",
"allow_pagination",
"and",
"total_pages",
":",
"raise",
"UnexpectedPagingReceivedError",
"(",
")",
"if",
"200",
"<=",
"resp",
".",
"status_code",
"<",
"300",
":",
"return",
"resp",
"if",
"resp",
".",
"status_code",
"==",
"404",
":",
"if",
"resp",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"code\"",
")",
"==",
"\"resource_not_consistent\"",
":",
"raise",
"DSResourceNotConsistentError",
"(",
"resp",
",",
"url_suffix",
",",
"data",
")",
"raise",
"DataServiceError",
"(",
"resp",
",",
"url_suffix",
",",
"data",
")"
] | 51.666667 | 17.333333 |
def _throat_props(self):
r'''
Helper Function to calculate the throat normal vectors
'''
network = self.network
net_Ts = network.throats(self.name)
conns = network['throat.conns'][net_Ts]
p1 = conns[:, 0]
p2 = conns[:, 1]
coords = network['pore.coords']
normals = tr.unit_vector(coords[p2]-coords[p1])
self['throat.normal'] = normals
self['throat.centroid'] = (coords[p1] + coords[p2])/2
self['throat.incenter'] = self['throat.centroid']
|
[
"def",
"_throat_props",
"(",
"self",
")",
":",
"network",
"=",
"self",
".",
"network",
"net_Ts",
"=",
"network",
".",
"throats",
"(",
"self",
".",
"name",
")",
"conns",
"=",
"network",
"[",
"'throat.conns'",
"]",
"[",
"net_Ts",
"]",
"p1",
"=",
"conns",
"[",
":",
",",
"0",
"]",
"p2",
"=",
"conns",
"[",
":",
",",
"1",
"]",
"coords",
"=",
"network",
"[",
"'pore.coords'",
"]",
"normals",
"=",
"tr",
".",
"unit_vector",
"(",
"coords",
"[",
"p2",
"]",
"-",
"coords",
"[",
"p1",
"]",
")",
"self",
"[",
"'throat.normal'",
"]",
"=",
"normals",
"self",
"[",
"'throat.centroid'",
"]",
"=",
"(",
"coords",
"[",
"p1",
"]",
"+",
"coords",
"[",
"p2",
"]",
")",
"/",
"2",
"self",
"[",
"'throat.incenter'",
"]",
"=",
"self",
"[",
"'throat.centroid'",
"]"
] | 37.714286 | 14.428571 |
def cdf(self, y, f, var):
r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y.
"""
var = self._check_param(var)
return norm.cdf(y, loc=f, scale=np.sqrt(var))
|
[
"def",
"cdf",
"(",
"self",
",",
"y",
",",
"f",
",",
"var",
")",
":",
"var",
"=",
"self",
".",
"_check_param",
"(",
"var",
")",
"return",
"norm",
".",
"cdf",
"(",
"y",
",",
"loc",
"=",
"f",
",",
"scale",
"=",
"np",
".",
"sqrt",
"(",
"var",
")",
")"
] | 30.772727 | 17.727273 |
def part(self, channel, reason=''):
"""
Part a channel.
Required arguments:
* channel - Channel to part.
Optional arguments:
* reason='' - Reason for parting.
"""
with self.lock:
self.is_in_channel(channel)
self.send('PART %s :%s' % (channel, reason))
msg = self._recv(expected_replies=('PART',))
if msg[0] == 'PART':
del self.channels[msg[1]]
if not self.hide_called_events:
self.stepback()
|
[
"def",
"part",
"(",
"self",
",",
"channel",
",",
"reason",
"=",
"''",
")",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"is_in_channel",
"(",
"channel",
")",
"self",
".",
"send",
"(",
"'PART %s :%s'",
"%",
"(",
"channel",
",",
"reason",
")",
")",
"msg",
"=",
"self",
".",
"_recv",
"(",
"expected_replies",
"=",
"(",
"'PART'",
",",
")",
")",
"if",
"msg",
"[",
"0",
"]",
"==",
"'PART'",
":",
"del",
"self",
".",
"channels",
"[",
"msg",
"[",
"1",
"]",
"]",
"if",
"not",
"self",
".",
"hide_called_events",
":",
"self",
".",
"stepback",
"(",
")"
] | 31.764706 | 9.647059 |
def remove_user(config, group, username):
"""Remove specified user from specified group."""
client = Client()
client.prepare_connection()
group_api = API(client)
try:
group_api.remove_user(group, username)
except ldap_tools.exceptions.NoGroupsFound: # pragma: no cover
print("Group ({}) not found".format(group))
except ldap_tools.exceptions.TooManyResults: # pragma: no cover
print("Query for group ({}) returned multiple results.".format(
group))
except ldap3.NO_SUCH_ATTRIBUTE: # pragma: no cover
print("{} does not exist in {}".format(username, group))
|
[
"def",
"remove_user",
"(",
"config",
",",
"group",
",",
"username",
")",
":",
"client",
"=",
"Client",
"(",
")",
"client",
".",
"prepare_connection",
"(",
")",
"group_api",
"=",
"API",
"(",
"client",
")",
"try",
":",
"group_api",
".",
"remove_user",
"(",
"group",
",",
"username",
")",
"except",
"ldap_tools",
".",
"exceptions",
".",
"NoGroupsFound",
":",
"# pragma: no cover",
"print",
"(",
"\"Group ({}) not found\"",
".",
"format",
"(",
"group",
")",
")",
"except",
"ldap_tools",
".",
"exceptions",
".",
"TooManyResults",
":",
"# pragma: no cover",
"print",
"(",
"\"Query for group ({}) returned multiple results.\"",
".",
"format",
"(",
"group",
")",
")",
"except",
"ldap3",
".",
"NO_SUCH_ATTRIBUTE",
":",
"# pragma: no cover",
"print",
"(",
"\"{} does not exist in {}\"",
".",
"format",
"(",
"username",
",",
"group",
")",
")"
] | 48.142857 | 17.5 |
def _generate_metrics(bam_fname, config_file, ref_file,
bait_file, target_file):
"""Run Picard commands to generate metrics files when missing.
"""
with open(config_file) as in_handle:
config = yaml.safe_load(in_handle)
broad_runner = broad.runner_from_config(config)
bam_fname = os.path.abspath(bam_fname)
path = os.path.dirname(bam_fname)
out_dir = os.path.join(path, "metrics")
utils.safe_makedir(out_dir)
with utils.chdir(out_dir):
with tx_tmpdir() as tmp_dir:
cur_bam = os.path.basename(bam_fname)
if not os.path.exists(cur_bam):
os.symlink(bam_fname, cur_bam)
gen_metrics = PicardMetrics(broad_runner, tmp_dir)
gen_metrics.report(cur_bam, ref_file,
_bam_is_paired(bam_fname),
bait_file, target_file)
return out_dir
|
[
"def",
"_generate_metrics",
"(",
"bam_fname",
",",
"config_file",
",",
"ref_file",
",",
"bait_file",
",",
"target_file",
")",
":",
"with",
"open",
"(",
"config_file",
")",
"as",
"in_handle",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"in_handle",
")",
"broad_runner",
"=",
"broad",
".",
"runner_from_config",
"(",
"config",
")",
"bam_fname",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"bam_fname",
")",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"bam_fname",
")",
"out_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"metrics\"",
")",
"utils",
".",
"safe_makedir",
"(",
"out_dir",
")",
"with",
"utils",
".",
"chdir",
"(",
"out_dir",
")",
":",
"with",
"tx_tmpdir",
"(",
")",
"as",
"tmp_dir",
":",
"cur_bam",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"bam_fname",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cur_bam",
")",
":",
"os",
".",
"symlink",
"(",
"bam_fname",
",",
"cur_bam",
")",
"gen_metrics",
"=",
"PicardMetrics",
"(",
"broad_runner",
",",
"tmp_dir",
")",
"gen_metrics",
".",
"report",
"(",
"cur_bam",
",",
"ref_file",
",",
"_bam_is_paired",
"(",
"bam_fname",
")",
",",
"bait_file",
",",
"target_file",
")",
"return",
"out_dir"
] | 43.047619 | 7.952381 |
def array_to_encoded(array, dtype=None, encoding='base64'):
"""
Export a numpy array to a compact serializable dictionary.
Parameters
------------
array : array
Any numpy array
dtype : str or None
Optional dtype to encode array
encoding : str
'base64' or 'binary'
Returns
---------
encoded : dict
Has keys:
'dtype': str, of dtype
'shape': tuple of shape
'base64': str, base64 encoded string
"""
array = np.asanyarray(array)
shape = array.shape
# ravel also forces contiguous
flat = np.ravel(array)
if dtype is None:
dtype = array.dtype
encoded = {'dtype': np.dtype(dtype).str,
'shape': shape}
if encoding in ['base64', 'dict64']:
packed = base64.b64encode(flat.astype(dtype).tostring())
if hasattr(packed, 'decode'):
packed = packed.decode('utf-8')
encoded['base64'] = packed
elif encoding == 'binary':
encoded['binary'] = array.tostring(order='C')
else:
raise ValueError('encoding {} is not available!'.format(encoding))
return encoded
|
[
"def",
"array_to_encoded",
"(",
"array",
",",
"dtype",
"=",
"None",
",",
"encoding",
"=",
"'base64'",
")",
":",
"array",
"=",
"np",
".",
"asanyarray",
"(",
"array",
")",
"shape",
"=",
"array",
".",
"shape",
"# ravel also forces contiguous",
"flat",
"=",
"np",
".",
"ravel",
"(",
"array",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"array",
".",
"dtype",
"encoded",
"=",
"{",
"'dtype'",
":",
"np",
".",
"dtype",
"(",
"dtype",
")",
".",
"str",
",",
"'shape'",
":",
"shape",
"}",
"if",
"encoding",
"in",
"[",
"'base64'",
",",
"'dict64'",
"]",
":",
"packed",
"=",
"base64",
".",
"b64encode",
"(",
"flat",
".",
"astype",
"(",
"dtype",
")",
".",
"tostring",
"(",
")",
")",
"if",
"hasattr",
"(",
"packed",
",",
"'decode'",
")",
":",
"packed",
"=",
"packed",
".",
"decode",
"(",
"'utf-8'",
")",
"encoded",
"[",
"'base64'",
"]",
"=",
"packed",
"elif",
"encoding",
"==",
"'binary'",
":",
"encoded",
"[",
"'binary'",
"]",
"=",
"array",
".",
"tostring",
"(",
"order",
"=",
"'C'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'encoding {} is not available!'",
".",
"format",
"(",
"encoding",
")",
")",
"return",
"encoded"
] | 27.575 | 16.825 |
def prune_by_ngram_count(self, minimum=None, maximum=None, label=None):
"""Removes results rows whose total n-gram count (across all
works bearing this n-gram) is outside the range specified by
`minimum` and `maximum`.
For each text, the count used as part of the sum across all
works is the maximum count across the witnesses for that work.
If `label` is specified, the works checked are restricted to
those associated with `label`.
:param minimum: minimum n-gram count
:type minimum: `int`
:param maximum: maximum n-gram count
:type maximum: `int`
:param label: optional label to restrict requirement to
:type label: `str`
"""
self._logger.info('Pruning results by n-gram count')
def calculate_total(group):
work_grouped = group.groupby(constants.WORK_FIELDNAME, sort=False)
total_count = work_grouped[constants.COUNT_FIELDNAME].max().sum()
group['total_count'] = pd.Series([total_count] * len(group.index),
index=group.index)
return group
# self._matches may be empty, in which case not only is there
# no point trying to do the pruning, but it will raise an
# exception due to referencing the column 'total_count' which
# won't have been added. Therefore just return immediately.
if self._matches.empty:
return
matches = self._matches
if label is not None:
matches = matches[matches[constants.LABEL_FIELDNAME] == label]
matches = matches.groupby(
constants.NGRAM_FIELDNAME, sort=False).apply(calculate_total)
ngrams = None
if minimum:
ngrams = matches[matches['total_count'] >= minimum][
constants.NGRAM_FIELDNAME].unique()
if maximum:
max_ngrams = matches[matches['total_count'] <= maximum][
constants.NGRAM_FIELDNAME].unique()
if ngrams is None:
ngrams = max_ngrams
else:
ngrams = list(set(ngrams) & set(max_ngrams))
self._matches = self._matches[
self._matches[constants.NGRAM_FIELDNAME].isin(ngrams)]
|
[
"def",
"prune_by_ngram_count",
"(",
"self",
",",
"minimum",
"=",
"None",
",",
"maximum",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Pruning results by n-gram count'",
")",
"def",
"calculate_total",
"(",
"group",
")",
":",
"work_grouped",
"=",
"group",
".",
"groupby",
"(",
"constants",
".",
"WORK_FIELDNAME",
",",
"sort",
"=",
"False",
")",
"total_count",
"=",
"work_grouped",
"[",
"constants",
".",
"COUNT_FIELDNAME",
"]",
".",
"max",
"(",
")",
".",
"sum",
"(",
")",
"group",
"[",
"'total_count'",
"]",
"=",
"pd",
".",
"Series",
"(",
"[",
"total_count",
"]",
"*",
"len",
"(",
"group",
".",
"index",
")",
",",
"index",
"=",
"group",
".",
"index",
")",
"return",
"group",
"# self._matches may be empty, in which case not only is there",
"# no point trying to do the pruning, but it will raise an",
"# exception due to referencing the column 'total_count' which",
"# won't have been added. Therefore just return immediately.",
"if",
"self",
".",
"_matches",
".",
"empty",
":",
"return",
"matches",
"=",
"self",
".",
"_matches",
"if",
"label",
"is",
"not",
"None",
":",
"matches",
"=",
"matches",
"[",
"matches",
"[",
"constants",
".",
"LABEL_FIELDNAME",
"]",
"==",
"label",
"]",
"matches",
"=",
"matches",
".",
"groupby",
"(",
"constants",
".",
"NGRAM_FIELDNAME",
",",
"sort",
"=",
"False",
")",
".",
"apply",
"(",
"calculate_total",
")",
"ngrams",
"=",
"None",
"if",
"minimum",
":",
"ngrams",
"=",
"matches",
"[",
"matches",
"[",
"'total_count'",
"]",
">=",
"minimum",
"]",
"[",
"constants",
".",
"NGRAM_FIELDNAME",
"]",
".",
"unique",
"(",
")",
"if",
"maximum",
":",
"max_ngrams",
"=",
"matches",
"[",
"matches",
"[",
"'total_count'",
"]",
"<=",
"maximum",
"]",
"[",
"constants",
".",
"NGRAM_FIELDNAME",
"]",
".",
"unique",
"(",
")",
"if",
"ngrams",
"is",
"None",
":",
"ngrams",
"=",
"max_ngrams",
"else",
":",
"ngrams",
"=",
"list",
"(",
"set",
"(",
"ngrams",
")",
"&",
"set",
"(",
"max_ngrams",
")",
")",
"self",
".",
"_matches",
"=",
"self",
".",
"_matches",
"[",
"self",
".",
"_matches",
"[",
"constants",
".",
"NGRAM_FIELDNAME",
"]",
".",
"isin",
"(",
"ngrams",
")",
"]"
] | 43.076923 | 21.057692 |
def extract(data, defs, byteoffset=0):
"""
Extract fields from data into a structure based on field definitions in defs.
byteoffset is added to each local byte offset to get the byte offset returned for each field.
defs is a list of lists comprising start, width in bits, format, nickname, description.
field start is either a byte number or a tuple with byte number and bit number.
Return a ListDict of Fields.
"""
retval = ListDict()
for fielddef in defs:
start, width, form, name, desc = fielddef
if form == "int":
if type(start) == type(0):
# It's a number. Convert it into a (bytenum,bitnum) tuple.
start = (start,7)
ix, bitnum = start
val = 0
while (width > 0):
if bitnum == 7 and width >= 8:
val = (val << 8) | ord(data[ix])
ix += 1
width -= 8
else:
lastbit = bitnum+1 - width
if lastbit < 0:
lastbit = 0
thiswidth = bitnum+1 - lastbit
val = (val << thiswidth) | ((ord(data[ix]) >> lastbit) & ((1<<thiswidth)-1))
bitnum = 7
ix += 1
width -= thiswidth
retval.append(Cmd.Field(val, byteoffset+start[0], name, desc), name)
elif form == "str":
assert(type(start) == type(0))
assert(width % 8 == 0)
retval.append(Cmd.Field(data[start:start+width/8], byteoffset+start, name, desc), name)
else:
# error in form
pass
return retval
|
[
"def",
"extract",
"(",
"data",
",",
"defs",
",",
"byteoffset",
"=",
"0",
")",
":",
"retval",
"=",
"ListDict",
"(",
")",
"for",
"fielddef",
"in",
"defs",
":",
"start",
",",
"width",
",",
"form",
",",
"name",
",",
"desc",
"=",
"fielddef",
"if",
"form",
"==",
"\"int\"",
":",
"if",
"type",
"(",
"start",
")",
"==",
"type",
"(",
"0",
")",
":",
"# It's a number. Convert it into a (bytenum,bitnum) tuple.",
"start",
"=",
"(",
"start",
",",
"7",
")",
"ix",
",",
"bitnum",
"=",
"start",
"val",
"=",
"0",
"while",
"(",
"width",
">",
"0",
")",
":",
"if",
"bitnum",
"==",
"7",
"and",
"width",
">=",
"8",
":",
"val",
"=",
"(",
"val",
"<<",
"8",
")",
"|",
"ord",
"(",
"data",
"[",
"ix",
"]",
")",
"ix",
"+=",
"1",
"width",
"-=",
"8",
"else",
":",
"lastbit",
"=",
"bitnum",
"+",
"1",
"-",
"width",
"if",
"lastbit",
"<",
"0",
":",
"lastbit",
"=",
"0",
"thiswidth",
"=",
"bitnum",
"+",
"1",
"-",
"lastbit",
"val",
"=",
"(",
"val",
"<<",
"thiswidth",
")",
"|",
"(",
"(",
"ord",
"(",
"data",
"[",
"ix",
"]",
")",
">>",
"lastbit",
")",
"&",
"(",
"(",
"1",
"<<",
"thiswidth",
")",
"-",
"1",
")",
")",
"bitnum",
"=",
"7",
"ix",
"+=",
"1",
"width",
"-=",
"thiswidth",
"retval",
".",
"append",
"(",
"Cmd",
".",
"Field",
"(",
"val",
",",
"byteoffset",
"+",
"start",
"[",
"0",
"]",
",",
"name",
",",
"desc",
")",
",",
"name",
")",
"elif",
"form",
"==",
"\"str\"",
":",
"assert",
"(",
"type",
"(",
"start",
")",
"==",
"type",
"(",
"0",
")",
")",
"assert",
"(",
"width",
"%",
"8",
"==",
"0",
")",
"retval",
".",
"append",
"(",
"Cmd",
".",
"Field",
"(",
"data",
"[",
"start",
":",
"start",
"+",
"width",
"/",
"8",
"]",
",",
"byteoffset",
"+",
"start",
",",
"name",
",",
"desc",
")",
",",
"name",
")",
"else",
":",
"# error in form",
"pass",
"return",
"retval"
] | 43.023256 | 18.232558 |
def give_us_somethin_to_talk_about(self, message):
"""new topic: set the room topic to a random conversation starter."""
r = requests.get("http://www.chatoms.com/chatom.json?Normal=1&Fun=2&Philosophy=3&Out+There=4")
data = r.json()
self.set_topic(data["text"], message=message)
|
[
"def",
"give_us_somethin_to_talk_about",
"(",
"self",
",",
"message",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"\"http://www.chatoms.com/chatom.json?Normal=1&Fun=2&Philosophy=3&Out+There=4\"",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"self",
".",
"set_topic",
"(",
"data",
"[",
"\"text\"",
"]",
",",
"message",
"=",
"message",
")"
] | 61 | 20.4 |
def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self)
|
[
"def",
"remove",
"(",
"self",
")",
":",
"for",
"disconnectors",
"in",
"self",
".",
"_disconnectors",
":",
"disconnectors",
"(",
")",
"for",
"sel",
"in",
"self",
".",
"selections",
":",
"self",
".",
"remove_selection",
"(",
"sel",
")",
"for",
"s",
"in",
"type",
"(",
"self",
")",
".",
"_keep_alive",
".",
"values",
"(",
")",
":",
"with",
"suppress",
"(",
"KeyError",
")",
":",
"s",
".",
"remove",
"(",
"self",
")"
] | 31.428571 | 11.857143 |
def identify(file_elements):
"""
Outputs an ordered sequence of instances of TopLevel types.
Elements start with an optional TableElement, followed by zero or more pairs of (TableHeaderElement, TableElement).
"""
if not file_elements:
return
_validate_file_elements(file_elements)
# An iterator over enumerate(the non-metadata) elements
iterator = PeekableIterator((element_i, element) for (element_i, element) in enumerate(file_elements)
if element.type != elements.TYPE_METADATA)
try:
_, first_element = iterator.peek()
if isinstance(first_element, TableElement):
iterator.next()
yield AnonymousTable(first_element)
except KeyError:
pass
except StopIteration:
return
for element_i, element in iterator:
if not isinstance(element, TableHeaderElement):
continue
# If TableHeader of a regular table, return Table following it
if not element.is_array_of_tables:
table_element_i, table_element = next(iterator)
yield Table(names=element.names, table_element=table_element)
# If TableHeader of an array of tables, do your thing
else:
table_element_i, table_element = next(iterator)
yield ArrayOfTables(names=element.names, table_element=table_element)
|
[
"def",
"identify",
"(",
"file_elements",
")",
":",
"if",
"not",
"file_elements",
":",
"return",
"_validate_file_elements",
"(",
"file_elements",
")",
"# An iterator over enumerate(the non-metadata) elements",
"iterator",
"=",
"PeekableIterator",
"(",
"(",
"element_i",
",",
"element",
")",
"for",
"(",
"element_i",
",",
"element",
")",
"in",
"enumerate",
"(",
"file_elements",
")",
"if",
"element",
".",
"type",
"!=",
"elements",
".",
"TYPE_METADATA",
")",
"try",
":",
"_",
",",
"first_element",
"=",
"iterator",
".",
"peek",
"(",
")",
"if",
"isinstance",
"(",
"first_element",
",",
"TableElement",
")",
":",
"iterator",
".",
"next",
"(",
")",
"yield",
"AnonymousTable",
"(",
"first_element",
")",
"except",
"KeyError",
":",
"pass",
"except",
"StopIteration",
":",
"return",
"for",
"element_i",
",",
"element",
"in",
"iterator",
":",
"if",
"not",
"isinstance",
"(",
"element",
",",
"TableHeaderElement",
")",
":",
"continue",
"# If TableHeader of a regular table, return Table following it",
"if",
"not",
"element",
".",
"is_array_of_tables",
":",
"table_element_i",
",",
"table_element",
"=",
"next",
"(",
"iterator",
")",
"yield",
"Table",
"(",
"names",
"=",
"element",
".",
"names",
",",
"table_element",
"=",
"table_element",
")",
"# If TableHeader of an array of tables, do your thing",
"else",
":",
"table_element_i",
",",
"table_element",
"=",
"next",
"(",
"iterator",
")",
"yield",
"ArrayOfTables",
"(",
"names",
"=",
"element",
".",
"names",
",",
"table_element",
"=",
"table_element",
")"
] | 34.025 | 25.425 |
def norm_package_version(version):
"""Normalize a version by removing extra spaces and parentheses."""
if version:
version = ','.join(v.strip() for v in version.split(',')).strip()
if version.startswith('(') and version.endswith(')'):
version = version[1:-1]
version = ''.join(v for v in version if v.strip())
else:
version = ''
return version
|
[
"def",
"norm_package_version",
"(",
"version",
")",
":",
"if",
"version",
":",
"version",
"=",
"','",
".",
"join",
"(",
"v",
".",
"strip",
"(",
")",
"for",
"v",
"in",
"version",
".",
"split",
"(",
"','",
")",
")",
".",
"strip",
"(",
")",
"if",
"version",
".",
"startswith",
"(",
"'('",
")",
"and",
"version",
".",
"endswith",
"(",
"')'",
")",
":",
"version",
"=",
"version",
"[",
"1",
":",
"-",
"1",
"]",
"version",
"=",
"''",
".",
"join",
"(",
"v",
"for",
"v",
"in",
"version",
"if",
"v",
".",
"strip",
"(",
")",
")",
"else",
":",
"version",
"=",
"''",
"return",
"version"
] | 30.307692 | 23.153846 |
def cmd_ssh(self, argv, help):
"""Log into the instance with ssh using the automatically generated known hosts"""
parser = argparse.ArgumentParser(
prog="%s ssh" % self.progname,
description=help,
)
instances = self.get_instances(command='init_ssh_key')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
parser.add_argument("...", nargs=argparse.REMAINDER,
help="ssh options")
iargs = enumerate(argv)
sid_index = None
user = None
for i, arg in iargs:
if not arg.startswith('-'):
sid_index = i
break
if arg[1] in '1246AaCfgKkMNnqsTtVvXxYy':
continue
elif arg[1] in 'bcDeFiLlmOopRSw':
value = iargs.next()
if arg[1] == 'l':
user = value[1]
continue
# fake parsing for nice error messages
if sid_index is None:
parser.parse_args([])
else:
sid = argv[sid_index]
if '@' in sid:
user, sid = sid.split('@', 1)
parser.parse_args([sid])
instance = instances[sid]
if user is None:
user = instance.config.get('user')
try:
ssh_info = instance.init_ssh_key(user=user)
except (instance.paramiko.SSHException, socket.error) as e:
log.error("Couldn't validate fingerprint for ssh connection.")
log.error(unicode(e))
log.error("Is the instance finished starting up?")
sys.exit(1)
client = ssh_info['client']
client.get_transport().sock.close()
client.close()
argv[sid_index:sid_index + 1] = instance.ssh_args_from_info(ssh_info)
argv[0:0] = ['ssh']
os.execvp('ssh', argv)
|
[
"def",
"cmd_ssh",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s ssh\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"instances",
"=",
"self",
".",
"get_instances",
"(",
"command",
"=",
"'init_ssh_key'",
")",
"parser",
".",
"add_argument",
"(",
"\"instance\"",
",",
"nargs",
"=",
"1",
",",
"metavar",
"=",
"\"instance\"",
",",
"help",
"=",
"\"Name of the instance from the config.\"",
",",
"choices",
"=",
"sorted",
"(",
"instances",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"...\"",
",",
"nargs",
"=",
"argparse",
".",
"REMAINDER",
",",
"help",
"=",
"\"ssh options\"",
")",
"iargs",
"=",
"enumerate",
"(",
"argv",
")",
"sid_index",
"=",
"None",
"user",
"=",
"None",
"for",
"i",
",",
"arg",
"in",
"iargs",
":",
"if",
"not",
"arg",
".",
"startswith",
"(",
"'-'",
")",
":",
"sid_index",
"=",
"i",
"break",
"if",
"arg",
"[",
"1",
"]",
"in",
"'1246AaCfgKkMNnqsTtVvXxYy'",
":",
"continue",
"elif",
"arg",
"[",
"1",
"]",
"in",
"'bcDeFiLlmOopRSw'",
":",
"value",
"=",
"iargs",
".",
"next",
"(",
")",
"if",
"arg",
"[",
"1",
"]",
"==",
"'l'",
":",
"user",
"=",
"value",
"[",
"1",
"]",
"continue",
"# fake parsing for nice error messages",
"if",
"sid_index",
"is",
"None",
":",
"parser",
".",
"parse_args",
"(",
"[",
"]",
")",
"else",
":",
"sid",
"=",
"argv",
"[",
"sid_index",
"]",
"if",
"'@'",
"in",
"sid",
":",
"user",
",",
"sid",
"=",
"sid",
".",
"split",
"(",
"'@'",
",",
"1",
")",
"parser",
".",
"parse_args",
"(",
"[",
"sid",
"]",
")",
"instance",
"=",
"instances",
"[",
"sid",
"]",
"if",
"user",
"is",
"None",
":",
"user",
"=",
"instance",
".",
"config",
".",
"get",
"(",
"'user'",
")",
"try",
":",
"ssh_info",
"=",
"instance",
".",
"init_ssh_key",
"(",
"user",
"=",
"user",
")",
"except",
"(",
"instance",
".",
"paramiko",
".",
"SSHException",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Couldn't validate fingerprint for ssh connection.\"",
")",
"log",
".",
"error",
"(",
"unicode",
"(",
"e",
")",
")",
"log",
".",
"error",
"(",
"\"Is the instance finished starting up?\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"client",
"=",
"ssh_info",
"[",
"'client'",
"]",
"client",
".",
"get_transport",
"(",
")",
".",
"sock",
".",
"close",
"(",
")",
"client",
".",
"close",
"(",
")",
"argv",
"[",
"sid_index",
":",
"sid_index",
"+",
"1",
"]",
"=",
"instance",
".",
"ssh_args_from_info",
"(",
"ssh_info",
")",
"argv",
"[",
"0",
":",
"0",
"]",
"=",
"[",
"'ssh'",
"]",
"os",
".",
"execvp",
"(",
"'ssh'",
",",
"argv",
")"
] | 39.137255 | 13.058824 |
def elements(self):
"""Return the BIC's Party Prefix, Country Code, Party Suffix and
Branch Code as a tuple."""
return (self.party_prefix, self.country_code, self.party_suffix,
self.branch_code)
|
[
"def",
"elements",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"party_prefix",
",",
"self",
".",
"country_code",
",",
"self",
".",
"party_suffix",
",",
"self",
".",
"branch_code",
")"
] | 46 | 12 |
def load(self, path):
"""
Load yaml-formatted config file.
Parameters
----------
path : str
path to config file
"""
with open(path) as f:
self.config = full_load(f)
if self.config is None:
sys.stderr.write("Warning: config file is empty!\n")
self.config = {}
|
[
"def",
"load",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"self",
".",
"config",
"=",
"full_load",
"(",
"f",
")",
"if",
"self",
".",
"config",
"is",
"None",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Warning: config file is empty!\\n\"",
")",
"self",
".",
"config",
"=",
"{",
"}"
] | 26.5 | 13.428571 |
def get(self):
'''Called by the protocol consumer'''
if self._current:
return self._resume(self._current, False)
else:
return self._get(None)
|
[
"def",
"get",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current",
":",
"return",
"self",
".",
"_resume",
"(",
"self",
".",
"_current",
",",
"False",
")",
"else",
":",
"return",
"self",
".",
"_get",
"(",
"None",
")"
] | 30.666667 | 15.333333 |
def start(self):
"""
Start the client. If it is already :attr:`running`,
:class:`RuntimeError` is raised.
While the client is running, it will try to keep an XMPP connection
open to the server associated with :attr:`local_jid`.
"""
if self.running:
raise RuntimeError("client already running")
self._main_task = asyncio.ensure_future(
self._main(),
loop=self._loop
)
self._main_task.add_done_callback(self._on_main_done)
|
[
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"running",
":",
"raise",
"RuntimeError",
"(",
"\"client already running\"",
")",
"self",
".",
"_main_task",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_main",
"(",
")",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"self",
".",
"_main_task",
".",
"add_done_callback",
"(",
"self",
".",
"_on_main_done",
")"
] | 32.6875 | 18.6875 |
def _copy_template_to_config(self, template_path,
config_path, overwrite=False):
"""Write the default config from a template.
:type template_path: str
:param template_path: The template config file path.
:type config_path: str
:param config_path: The user's config file path.
:type overwrite: bool
:param overwrite: (Optional) Determines whether to overwrite the
existing config file, if it exists.
:raises: :class:`OSError <exceptions.OSError>`
"""
config_path = os.path.expanduser(config_path)
if not overwrite and os.path.isfile(config_path):
return
else:
try:
config_path_dir_name = os.path.dirname(config_path)
os.makedirs(config_path_dir_name)
except OSError:
if not os.path.isdir(config_path_dir_name):
raise
shutil.copyfile(template_path, config_path)
|
[
"def",
"_copy_template_to_config",
"(",
"self",
",",
"template_path",
",",
"config_path",
",",
"overwrite",
"=",
"False",
")",
":",
"config_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"config_path",
")",
"if",
"not",
"overwrite",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"config_path",
")",
":",
"return",
"else",
":",
"try",
":",
"config_path_dir_name",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_path",
")",
"os",
".",
"makedirs",
"(",
"config_path_dir_name",
")",
"except",
"OSError",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"config_path_dir_name",
")",
":",
"raise",
"shutil",
".",
"copyfile",
"(",
"template_path",
",",
"config_path",
")"
] | 36.814815 | 18.925926 |
def strel_line(length, angle):
"""Create a line structuring element for morphological operations
length - distance between first and last pixels of the line, rounded down
angle - angle from the horizontal, counter-clockwise in degrees.
Note: uses draw_line's Bresenham algorithm to select points.
"""
angle = float(angle) * np.pi / 180.
x_off = int(np.finfo(float).eps + np.cos(angle) * length / 2)
# Y is flipped here because "up" is negative
y_off = -int(np.finfo(float).eps + np.sin(angle) * length / 2)
x_center = abs(x_off)
y_center = abs(y_off)
strel = np.zeros((y_center * 2 + 1,
x_center * 2 + 1), bool)
draw_line(strel,
(y_center - y_off, x_center - x_off),
(y_center, x_center), True)
draw_line(strel,
(y_center + y_off, x_center + x_off),
(y_center, x_center), True)
return strel
|
[
"def",
"strel_line",
"(",
"length",
",",
"angle",
")",
":",
"angle",
"=",
"float",
"(",
"angle",
")",
"*",
"np",
".",
"pi",
"/",
"180.",
"x_off",
"=",
"int",
"(",
"np",
".",
"finfo",
"(",
"float",
")",
".",
"eps",
"+",
"np",
".",
"cos",
"(",
"angle",
")",
"*",
"length",
"/",
"2",
")",
"# Y is flipped here because \"up\" is negative",
"y_off",
"=",
"-",
"int",
"(",
"np",
".",
"finfo",
"(",
"float",
")",
".",
"eps",
"+",
"np",
".",
"sin",
"(",
"angle",
")",
"*",
"length",
"/",
"2",
")",
"x_center",
"=",
"abs",
"(",
"x_off",
")",
"y_center",
"=",
"abs",
"(",
"y_off",
")",
"strel",
"=",
"np",
".",
"zeros",
"(",
"(",
"y_center",
"*",
"2",
"+",
"1",
",",
"x_center",
"*",
"2",
"+",
"1",
")",
",",
"bool",
")",
"draw_line",
"(",
"strel",
",",
"(",
"y_center",
"-",
"y_off",
",",
"x_center",
"-",
"x_off",
")",
",",
"(",
"y_center",
",",
"x_center",
")",
",",
"True",
")",
"draw_line",
"(",
"strel",
",",
"(",
"y_center",
"+",
"y_off",
",",
"x_center",
"+",
"x_off",
")",
",",
"(",
"y_center",
",",
"x_center",
")",
",",
"True",
")",
"return",
"strel"
] | 36.96 | 17.28 |
def stop(vm_):
'''
Hard power down the virtual machine, this is equivalent to pulling the
power
CLI Example:
.. code-block:: bash
salt '*' virt.stop <vm name>
'''
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if vm_uuid is False:
return False
try:
xapi.VM.hard_shutdown(vm_uuid)
return True
except Exception:
return False
|
[
"def",
"stop",
"(",
"vm_",
")",
":",
"with",
"_get_xapi_session",
"(",
")",
"as",
"xapi",
":",
"vm_uuid",
"=",
"_get_label_uuid",
"(",
"xapi",
",",
"'VM'",
",",
"vm_",
")",
"if",
"vm_uuid",
"is",
"False",
":",
"return",
"False",
"try",
":",
"xapi",
".",
"VM",
".",
"hard_shutdown",
"(",
"vm_uuid",
")",
"return",
"True",
"except",
"Exception",
":",
"return",
"False"
] | 22.6 | 22 |
def sqlinsert(table, row):
"""Generates SQL insert into table ...
Returns (sql, parameters)
>>> sqlinsert('mytable', {'field1': 2, 'field2': 'toto'})
('insert into mytable (field1, field2) values (%s, %s)', [2, 'toto'])
>>> sqlinsert('t2', {'id': 1, 'name': 'Toto'})
('insert into t2 (id, name) values (%s, %s)', [1, 'Toto'])
"""
validate_name(table)
fields = sorted(row.keys())
validate_names(fields)
values = [row[field] for field in fields]
sql = "insert into {} ({}) values ({})".format(
table, ', '.join(fields), ', '.join(['%s'] * len(fields)))
return sql, values
|
[
"def",
"sqlinsert",
"(",
"table",
",",
"row",
")",
":",
"validate_name",
"(",
"table",
")",
"fields",
"=",
"sorted",
"(",
"row",
".",
"keys",
"(",
")",
")",
"validate_names",
"(",
"fields",
")",
"values",
"=",
"[",
"row",
"[",
"field",
"]",
"for",
"field",
"in",
"fields",
"]",
"sql",
"=",
"\"insert into {} ({}) values ({})\"",
".",
"format",
"(",
"table",
",",
"', '",
".",
"join",
"(",
"fields",
")",
",",
"', '",
".",
"join",
"(",
"[",
"'%s'",
"]",
"*",
"len",
"(",
"fields",
")",
")",
")",
"return",
"sql",
",",
"values"
] | 38.4375 | 15.625 |
def is_equal(self, other):
"""
Computes whether two Partial Orderings contain the same information
"""
if not (hasattr(other, 'get_domain') or hasattr(other, 'upper') or hasattr(other, 'lower')):
other = self.coerce(other)
if self.is_domain_equal(other) \
and len(self.upper.symmetric_difference(other.upper)) == 0 \
and len(self.lower.symmetric_difference(other.lower)) == 0:
return True
return False
|
[
"def",
"is_equal",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"(",
"hasattr",
"(",
"other",
",",
"'get_domain'",
")",
"or",
"hasattr",
"(",
"other",
",",
"'upper'",
")",
"or",
"hasattr",
"(",
"other",
",",
"'lower'",
")",
")",
":",
"other",
"=",
"self",
".",
"coerce",
"(",
"other",
")",
"if",
"self",
".",
"is_domain_equal",
"(",
"other",
")",
"and",
"len",
"(",
"self",
".",
"upper",
".",
"symmetric_difference",
"(",
"other",
".",
"upper",
")",
")",
"==",
"0",
"and",
"len",
"(",
"self",
".",
"lower",
".",
"symmetric_difference",
"(",
"other",
".",
"lower",
")",
")",
"==",
"0",
":",
"return",
"True",
"return",
"False"
] | 44.363636 | 19.272727 |
def section(self, ctx, optional=False):
"""
Return section of the config for a specific context (sub-command).
Parameters:
ctx (Context): The Click context object.
optional (bool): If ``True``, return an empty config object when section is missing.
Returns:
Section: The configuration section belonging to
the active (sub-)command (based on ``ctx.info_name``).
"""
values = self.load()
try:
return values[ctx.info_name]
except KeyError:
if optional:
return configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS)
raise LoggedFailure("Configuration section '{}' not found!".format(ctx.info_name))
|
[
"def",
"section",
"(",
"self",
",",
"ctx",
",",
"optional",
"=",
"False",
")",
":",
"values",
"=",
"self",
".",
"load",
"(",
")",
"try",
":",
"return",
"values",
"[",
"ctx",
".",
"info_name",
"]",
"except",
"KeyError",
":",
"if",
"optional",
":",
"return",
"configobj",
".",
"ConfigObj",
"(",
"{",
"}",
",",
"*",
"*",
"self",
".",
"DEFAULT_CONFIG_OPTS",
")",
"raise",
"LoggedFailure",
"(",
"\"Configuration section '{}' not found!\"",
".",
"format",
"(",
"ctx",
".",
"info_name",
")",
")"
] | 38.55 | 24.45 |
def consume(self, word_ids: mx.nd.NDArray) -> None:
"""
Consumes a word for each trie, updating respective states.
:param word_ids: The set of word IDs.
"""
word_ids = word_ids.asnumpy().tolist()
for i, word_id in enumerate(word_ids):
if self.global_avoid_states:
self.global_avoid_states[i] = self.global_avoid_states[i].consume(word_id)
if self.local_avoid_states:
self.local_avoid_states[i] = self.local_avoid_states[i].consume(word_id)
|
[
"def",
"consume",
"(",
"self",
",",
"word_ids",
":",
"mx",
".",
"nd",
".",
"NDArray",
")",
"->",
"None",
":",
"word_ids",
"=",
"word_ids",
".",
"asnumpy",
"(",
")",
".",
"tolist",
"(",
")",
"for",
"i",
",",
"word_id",
"in",
"enumerate",
"(",
"word_ids",
")",
":",
"if",
"self",
".",
"global_avoid_states",
":",
"self",
".",
"global_avoid_states",
"[",
"i",
"]",
"=",
"self",
".",
"global_avoid_states",
"[",
"i",
"]",
".",
"consume",
"(",
"word_id",
")",
"if",
"self",
".",
"local_avoid_states",
":",
"self",
".",
"local_avoid_states",
"[",
"i",
"]",
"=",
"self",
".",
"local_avoid_states",
"[",
"i",
"]",
".",
"consume",
"(",
"word_id",
")"
] | 44.416667 | 16.083333 |
def pattern(head, *args, mode=1, wc_name=None, conditions=None, **kwargs) \
-> Pattern:
"""'Flat' constructor for the Pattern class
Positional and keyword arguments are mapped into `args` and `kwargs`,
respectively. Useful for defining rules that match an instantiated
Expression with specific arguments
"""
if len(args) == 0:
args = None
if len(kwargs) == 0:
kwargs = None
return Pattern(head, args, kwargs, mode=mode, wc_name=wc_name,
conditions=conditions)
|
[
"def",
"pattern",
"(",
"head",
",",
"*",
"args",
",",
"mode",
"=",
"1",
",",
"wc_name",
"=",
"None",
",",
"conditions",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"Pattern",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"args",
"=",
"None",
"if",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"kwargs",
"=",
"None",
"return",
"Pattern",
"(",
"head",
",",
"args",
",",
"kwargs",
",",
"mode",
"=",
"mode",
",",
"wc_name",
"=",
"wc_name",
",",
"conditions",
"=",
"conditions",
")"
] | 37.285714 | 18.714286 |
def find_embedding(elt, embedding=None):
"""Try to get elt embedding elements.
:param embedding: embedding element. Must have a module.
:return: a list of [module [,class]*] embedding elements which define elt.
:rtype: list
"""
result = [] # result is empty in the worst case
# start to get module
module = getmodule(elt)
if module is not None: # if module exists
visited = set() # cache to avoid to visit twice same element
if embedding is None:
embedding = module
# list of compounds elements which construct the path to elt
compounds = [embedding]
while compounds: # while compounds elements exist
# get last compound
last_embedding = compounds[-1]
# stop to iterate on compounds when last embedding is elt
if last_embedding == elt:
result = compounds # result is compounds
break
else:
# search among embedded elements
for name in dir(last_embedding):
# get embedded element
embedded = getattr(last_embedding, name)
try: # check if embedded has already been visited
if embedded not in visited:
visited.add(embedded) # set it as visited
else:
continue
except TypeError:
pass
else:
# get embedded module
embedded_module = getmodule(embedded)
# and compare it with elt module
if embedded_module is module:
# add embedded to compounds
compounds.append(embedded)
# end the second loop
break
else:
# remove last element if no coumpound element is found
compounds.pop(-1)
return result
|
[
"def",
"find_embedding",
"(",
"elt",
",",
"embedding",
"=",
"None",
")",
":",
"result",
"=",
"[",
"]",
"# result is empty in the worst case",
"# start to get module",
"module",
"=",
"getmodule",
"(",
"elt",
")",
"if",
"module",
"is",
"not",
"None",
":",
"# if module exists",
"visited",
"=",
"set",
"(",
")",
"# cache to avoid to visit twice same element",
"if",
"embedding",
"is",
"None",
":",
"embedding",
"=",
"module",
"# list of compounds elements which construct the path to elt",
"compounds",
"=",
"[",
"embedding",
"]",
"while",
"compounds",
":",
"# while compounds elements exist",
"# get last compound",
"last_embedding",
"=",
"compounds",
"[",
"-",
"1",
"]",
"# stop to iterate on compounds when last embedding is elt",
"if",
"last_embedding",
"==",
"elt",
":",
"result",
"=",
"compounds",
"# result is compounds",
"break",
"else",
":",
"# search among embedded elements",
"for",
"name",
"in",
"dir",
"(",
"last_embedding",
")",
":",
"# get embedded element",
"embedded",
"=",
"getattr",
"(",
"last_embedding",
",",
"name",
")",
"try",
":",
"# check if embedded has already been visited",
"if",
"embedded",
"not",
"in",
"visited",
":",
"visited",
".",
"add",
"(",
"embedded",
")",
"# set it as visited",
"else",
":",
"continue",
"except",
"TypeError",
":",
"pass",
"else",
":",
"# get embedded module",
"embedded_module",
"=",
"getmodule",
"(",
"embedded",
")",
"# and compare it with elt module",
"if",
"embedded_module",
"is",
"module",
":",
"# add embedded to compounds",
"compounds",
".",
"append",
"(",
"embedded",
")",
"# end the second loop",
"break",
"else",
":",
"# remove last element if no coumpound element is found",
"compounds",
".",
"pop",
"(",
"-",
"1",
")",
"return",
"result"
] | 32.03125 | 21.0625 |
def group_dict_by_value(d: dict) -> dict:
"""
Group a dictionary by values.
Parameters
----------
d : dict
Input dictionary
Returns
-------
dict
Output dictionary. The keys are the values of the initial dictionary
and the values ae given by a list of keys corresponding to the value.
>>> group_dict_by_value({2: 3, 1: 2, 3: 1})
{3: [2], 2: [1], 1: [3]}
>>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3})
{3: [2, 12], 2: [1], 1: [3, 10]}
"""
d_out = {}
for k, v in d.items():
if v in d_out:
d_out[v].append(k)
else:
d_out[v] = [k]
return d_out
|
[
"def",
"group_dict_by_value",
"(",
"d",
":",
"dict",
")",
"->",
"dict",
":",
"d_out",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"v",
"in",
"d_out",
":",
"d_out",
"[",
"v",
"]",
".",
"append",
"(",
"k",
")",
"else",
":",
"d_out",
"[",
"v",
"]",
"=",
"[",
"k",
"]",
"return",
"d_out"
] | 23.321429 | 21.535714 |
def pov(self, color: chess.Color) -> "Score":
"""Get the score from the point of view of the given *color*."""
return self.relative if self.turn == color else -self.relative
|
[
"def",
"pov",
"(",
"self",
",",
"color",
":",
"chess",
".",
"Color",
")",
"->",
"\"Score\"",
":",
"return",
"self",
".",
"relative",
"if",
"self",
".",
"turn",
"==",
"color",
"else",
"-",
"self",
".",
"relative"
] | 62.333333 | 11.666667 |
def separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
|
[
"def",
"separate",
"(",
"text",
")",
":",
"alphabet",
"=",
"'abcdefghijklmnopqrstuvwxyz'",
"words",
"=",
"text",
".",
"split",
"(",
")",
"standardwords",
"=",
"[",
"]",
"for",
"word",
"in",
"words",
":",
"newstr",
"=",
"''",
"for",
"char",
"in",
"word",
":",
"if",
"char",
"in",
"alphabet",
"or",
"char",
"in",
"alphabet",
".",
"upper",
"(",
")",
":",
"newstr",
"+=",
"char",
"if",
"newstr",
"!=",
"''",
":",
"standardwords",
".",
"append",
"(",
"newstr",
")",
"return",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"lower",
"(",
")",
",",
"standardwords",
")"
] | 33.384615 | 14.307692 |
def custom_sort(param):
"""Custom Click(Command|Group).params sorter.
Case insensitive sort with capitals after lowercase. --version at the end since I can't sort --help.
:param click.core.Option param: Parameter to evaluate.
:return: Sort weight.
:rtype: int
"""
option = param.opts[0].lstrip('-')
if param.param_type_name != 'option':
return False,
return True, option == 'version', option.lower(), option.swapcase()
|
[
"def",
"custom_sort",
"(",
"param",
")",
":",
"option",
"=",
"param",
".",
"opts",
"[",
"0",
"]",
".",
"lstrip",
"(",
"'-'",
")",
"if",
"param",
".",
"param_type_name",
"!=",
"'option'",
":",
"return",
"False",
",",
"return",
"True",
",",
"option",
"==",
"'version'",
",",
"option",
".",
"lower",
"(",
")",
",",
"option",
".",
"swapcase",
"(",
")"
] | 35.142857 | 22.571429 |
def send_voice(self, voice, **options):
"""
Send an OPUS-encoded .ogg audio file.
:param voice: Object containing the audio data
:param options: Additional sendVoice options (see
https://core.telegram.org/bots/api#sendvoice)
:Example:
>>> with open("voice.ogg", "rb") as f:
>>> await chat.send_voice(f)
"""
return self.bot.api_call(
"sendVoice", chat_id=str(self.id), voice=voice, **options
)
|
[
"def",
"send_voice",
"(",
"self",
",",
"voice",
",",
"*",
"*",
"options",
")",
":",
"return",
"self",
".",
"bot",
".",
"api_call",
"(",
"\"sendVoice\"",
",",
"chat_id",
"=",
"str",
"(",
"self",
".",
"id",
")",
",",
"voice",
"=",
"voice",
",",
"*",
"*",
"options",
")"
] | 30.5 | 16.875 |
def register_post_execute(self, func):
"""Register a function for calling after code execution.
"""
if not callable(func):
raise ValueError('argument %s must be callable' % func)
self._post_execute[func] = True
|
[
"def",
"register_post_execute",
"(",
"self",
",",
"func",
")",
":",
"if",
"not",
"callable",
"(",
"func",
")",
":",
"raise",
"ValueError",
"(",
"'argument %s must be callable'",
"%",
"func",
")",
"self",
".",
"_post_execute",
"[",
"func",
"]",
"=",
"True"
] | 41.5 | 6.666667 |
def __AddAdditionalProperties(self, message, schema, properties):
"""Add an additionalProperties field to message."""
additional_properties_info = schema['additionalProperties']
entries_type_name = self.__AddAdditionalPropertyType(
message.name, additional_properties_info)
description = util.CleanDescription(
additional_properties_info.get('description'))
if description is None:
description = 'Additional properties of type %s' % message.name
attrs = {
'items': {
'$ref': entries_type_name,
},
'description': description,
'type': 'array',
}
field_name = 'additionalProperties'
message.fields.append(self.__FieldDescriptorFromProperties(
field_name, len(properties) + 1, attrs))
self.__AddImport('from %s import encoding' % self.__base_files_package)
message.decorators.append(
'encoding.MapUnrecognizedFields(%r)' % field_name)
|
[
"def",
"__AddAdditionalProperties",
"(",
"self",
",",
"message",
",",
"schema",
",",
"properties",
")",
":",
"additional_properties_info",
"=",
"schema",
"[",
"'additionalProperties'",
"]",
"entries_type_name",
"=",
"self",
".",
"__AddAdditionalPropertyType",
"(",
"message",
".",
"name",
",",
"additional_properties_info",
")",
"description",
"=",
"util",
".",
"CleanDescription",
"(",
"additional_properties_info",
".",
"get",
"(",
"'description'",
")",
")",
"if",
"description",
"is",
"None",
":",
"description",
"=",
"'Additional properties of type %s'",
"%",
"message",
".",
"name",
"attrs",
"=",
"{",
"'items'",
":",
"{",
"'$ref'",
":",
"entries_type_name",
",",
"}",
",",
"'description'",
":",
"description",
",",
"'type'",
":",
"'array'",
",",
"}",
"field_name",
"=",
"'additionalProperties'",
"message",
".",
"fields",
".",
"append",
"(",
"self",
".",
"__FieldDescriptorFromProperties",
"(",
"field_name",
",",
"len",
"(",
"properties",
")",
"+",
"1",
",",
"attrs",
")",
")",
"self",
".",
"__AddImport",
"(",
"'from %s import encoding'",
"%",
"self",
".",
"__base_files_package",
")",
"message",
".",
"decorators",
".",
"append",
"(",
"'encoding.MapUnrecognizedFields(%r)'",
"%",
"field_name",
")"
] | 46.409091 | 17 |
def process_path_part(part, parameters):
"""
Given a part of a path either:
- If it is a parameter:
parse it to a regex group
- Otherwise:
escape any special regex characters
"""
if PARAMETER_REGEX.match(part):
parameter_name = part.strip('{}')
try:
parameter = find_parameter(
parameters,
name=parameter_name,
in_=PATH
)
except ValueError:
pass
else:
return construct_parameter_pattern(parameter)
return escape_regex_special_chars(part)
|
[
"def",
"process_path_part",
"(",
"part",
",",
"parameters",
")",
":",
"if",
"PARAMETER_REGEX",
".",
"match",
"(",
"part",
")",
":",
"parameter_name",
"=",
"part",
".",
"strip",
"(",
"'{}'",
")",
"try",
":",
"parameter",
"=",
"find_parameter",
"(",
"parameters",
",",
"name",
"=",
"parameter_name",
",",
"in_",
"=",
"PATH",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"return",
"construct_parameter_pattern",
"(",
"parameter",
")",
"return",
"escape_regex_special_chars",
"(",
"part",
")"
] | 28.809524 | 10.714286 |
def super_lm_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.moe_hidden_sizes = "512"
hparams.batch_size = 16384
hparams.max_length = 0
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.layer_prepostprocess_dropout = 0.0
hparams.symbol_dropout = 0.1
hparams.add_hparam("attention_dropout", 0.0)
hparams.label_smoothing = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer = "Adafactor"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.initializer_gain = 1.0
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.shared_embedding_and_softmax_weights = False
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# we only want one data shard.
hparams.no_data_parallelism = True
# bypass the symbol modality so that we can use model parallelism.
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
hparams.add_hparam("filter_size", 512)
hparams.add_hparam("mix_fraction", 0.5)
# attention-related flags
hparams.add_hparam("multihead_attention_num_heads", 4)
hparams.add_hparam("multihead_attention_key_channels", 0)
hparams.add_hparam("multihead_attention_value_channels", 0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam(
"layers", ("n,att,m,d,a," "n,ffn,m,d,a,") * 4 + "n,ffn,d")
# Number of model shards - each one has separate parameters.
# Changing this number invalidates checkpoints.
hparams.add_hparam("num_model_shards", 8)
hparams.add_hparam("diet_experts", False)
return hparams
|
[
"def",
"super_lm_base",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"512",
"hparams",
".",
"moe_hidden_sizes",
"=",
"\"512\"",
"hparams",
".",
"batch_size",
"=",
"16384",
"hparams",
".",
"max_length",
"=",
"0",
"# All hyperparameters ending in \"dropout\" are automatically set to 0.0",
"# when not in training mode.",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.0",
"hparams",
".",
"symbol_dropout",
"=",
"0.1",
"hparams",
".",
"add_hparam",
"(",
"\"attention_dropout\"",
",",
"0.0",
")",
"hparams",
".",
"label_smoothing",
"=",
"0.0",
"hparams",
".",
"clip_grad_norm",
"=",
"0.",
"# i.e. no gradient clipping",
"hparams",
".",
"optimizer",
"=",
"\"Adafactor\"",
"hparams",
".",
"learning_rate_decay_scheme",
"=",
"\"noam\"",
"hparams",
".",
"learning_rate",
"=",
"0.1",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"8000",
"hparams",
".",
"initializer_gain",
"=",
"1.0",
"hparams",
".",
"initializer",
"=",
"\"uniform_unit_scaling\"",
"hparams",
".",
"weight_decay",
"=",
"0.0",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"n\"",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"da\"",
"# we only want one data shard.",
"hparams",
".",
"no_data_parallelism",
"=",
"True",
"# bypass the symbol modality so that we can use model parallelism.",
"hparams",
".",
"bottom",
"=",
"{",
"\"inputs\"",
":",
"modalities",
".",
"identity_bottom",
",",
"\"targets\"",
":",
"modalities",
".",
"identity_bottom",
",",
"}",
"hparams",
".",
"top",
"=",
"{",
"\"targets\"",
":",
"modalities",
".",
"identity_top",
",",
"}",
"hparams",
".",
"add_hparam",
"(",
"\"filter_size\"",
",",
"512",
")",
"hparams",
".",
"add_hparam",
"(",
"\"mix_fraction\"",
",",
"0.5",
")",
"# attention-related flags",
"hparams",
".",
"add_hparam",
"(",
"\"multihead_attention_num_heads\"",
",",
"4",
")",
"hparams",
".",
"add_hparam",
"(",
"\"multihead_attention_key_channels\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"multihead_attention_value_channels\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"pos\"",
",",
"\"timing\"",
")",
"# timing, none",
"hparams",
".",
"add_hparam",
"(",
"\"layers\"",
",",
"(",
"\"n,att,m,d,a,\"",
"\"n,ffn,m,d,a,\"",
")",
"*",
"4",
"+",
"\"n,ffn,d\"",
")",
"# Number of model shards - each one has separate parameters.",
"# Changing this number invalidates checkpoints.",
"hparams",
".",
"add_hparam",
"(",
"\"num_model_shards\"",
",",
"8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"diet_experts\"",
",",
"False",
")",
"return",
"hparams"
] | 38.604167 | 12.083333 |
def simple_ensure_index(request, database_name, collection_name):
"""Ensure a MongoDB index on a particular field name"""
name = "Ensure a MongoDB index on a particular field name"
if request.method == 'POST':
form = EnsureIndexForm(request.POST)
if form.is_valid():
result = form.save(database_name, collection_name)
messages.success(request,
_("Index for %s created successfully" % result))
return HttpResponseRedirect(reverse('djmongo_show_dbs'))
else:
# The form is invalid
messages.error(
request, _("Please correct the errors in the form."))
return render(request,
'djmongo/console/generic/bootstrapform.html',
{'form': form, 'name': name})
else:
# this is a GET
context = {'name': name,
'form': EnsureIndexForm(
initial={"database_name": database_name,
"collection_name": collection_name})
}
return render(request, 'djmongo/console/generic/bootstrapform.html',
context)
|
[
"def",
"simple_ensure_index",
"(",
"request",
",",
"database_name",
",",
"collection_name",
")",
":",
"name",
"=",
"\"Ensure a MongoDB index on a particular field name\"",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"EnsureIndexForm",
"(",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"result",
"=",
"form",
".",
"save",
"(",
"database_name",
",",
"collection_name",
")",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"\"Index for %s created successfully\"",
"%",
"result",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'djmongo_show_dbs'",
")",
")",
"else",
":",
"# The form is invalid",
"messages",
".",
"error",
"(",
"request",
",",
"_",
"(",
"\"Please correct the errors in the form.\"",
")",
")",
"return",
"render",
"(",
"request",
",",
"'djmongo/console/generic/bootstrapform.html'",
",",
"{",
"'form'",
":",
"form",
",",
"'name'",
":",
"name",
"}",
")",
"else",
":",
"# this is a GET",
"context",
"=",
"{",
"'name'",
":",
"name",
",",
"'form'",
":",
"EnsureIndexForm",
"(",
"initial",
"=",
"{",
"\"database_name\"",
":",
"database_name",
",",
"\"collection_name\"",
":",
"collection_name",
"}",
")",
"}",
"return",
"render",
"(",
"request",
",",
"'djmongo/console/generic/bootstrapform.html'",
",",
"context",
")"
] | 41.344828 | 20.206897 |
def __release_particle(self):
"""Pull a particle from the queue and add it to the active list."""
# Calculate a potential angle for the particle.
angle = random.randint(int(self.direction_range[0]), int(self.direction_range[1]))
velocity = Vector2.from_polar(angle, self.particle_speed)
physics = PhysicsObj(self.coords, Vector2(), velocity)
particle = self.particle_pool.request_object()
particle.initalize(physics)
self.particles.append(particle)
self.current_particle_count += 1
|
[
"def",
"__release_particle",
"(",
"self",
")",
":",
"# Calculate a potential angle for the particle.",
"angle",
"=",
"random",
".",
"randint",
"(",
"int",
"(",
"self",
".",
"direction_range",
"[",
"0",
"]",
")",
",",
"int",
"(",
"self",
".",
"direction_range",
"[",
"1",
"]",
")",
")",
"velocity",
"=",
"Vector2",
".",
"from_polar",
"(",
"angle",
",",
"self",
".",
"particle_speed",
")",
"physics",
"=",
"PhysicsObj",
"(",
"self",
".",
"coords",
",",
"Vector2",
"(",
")",
",",
"velocity",
")",
"particle",
"=",
"self",
".",
"particle_pool",
".",
"request_object",
"(",
")",
"particle",
".",
"initalize",
"(",
"physics",
")",
"self",
".",
"particles",
".",
"append",
"(",
"particle",
")",
"self",
".",
"current_particle_count",
"+=",
"1"
] | 49.454545 | 16.636364 |
def from_time(cls, source):
"""
datetime.time -> SubRipTime corresponding to time object
"""
return cls(hours=source.hour, minutes=source.minute,
seconds=source.second, milliseconds=source.microsecond // 1000)
|
[
"def",
"from_time",
"(",
"cls",
",",
"source",
")",
":",
"return",
"cls",
"(",
"hours",
"=",
"source",
".",
"hour",
",",
"minutes",
"=",
"source",
".",
"minute",
",",
"seconds",
"=",
"source",
".",
"second",
",",
"milliseconds",
"=",
"source",
".",
"microsecond",
"//",
"1000",
")"
] | 41.333333 | 15.333333 |
def is_pangram(string):
"""
Checks if the string is a pangram (https://en.wikipedia.org/wiki/Pangram).
:param string: String to check.
:type string: str
:return: True if the string is a pangram, False otherwise.
"""
return is_full_string(string) and set(SPACES_RE.sub('', string)).issuperset(letters_set)
|
[
"def",
"is_pangram",
"(",
"string",
")",
":",
"return",
"is_full_string",
"(",
"string",
")",
"and",
"set",
"(",
"SPACES_RE",
".",
"sub",
"(",
"''",
",",
"string",
")",
")",
".",
"issuperset",
"(",
"letters_set",
")"
] | 36.111111 | 21.444444 |
def get_all(self, **kwargs):
"""
Get all keys currently stored in etcd.
:param keys_only: if True, retrieve only the keys, not the values
:returns: sequence of (value, metadata) tuples
"""
range_response = self.get_all_response(**kwargs)
for kv in range_response.kvs:
yield (kv.value, KVMetadata(kv, range_response.header))
|
[
"def",
"get_all",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"range_response",
"=",
"self",
".",
"get_all_response",
"(",
"*",
"*",
"kwargs",
")",
"for",
"kv",
"in",
"range_response",
".",
"kvs",
":",
"yield",
"(",
"kv",
".",
"value",
",",
"KVMetadata",
"(",
"kv",
",",
"range_response",
".",
"header",
")",
")"
] | 38.3 | 15.1 |
def _set_fabric_trunk(self, v, load=False):
"""
Setter method for fabric_trunk, mapped from YANG variable /interface/fortygigabitethernet/fabric/fabric_trunk (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fabric_trunk is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fabric_trunk() directly.
YANG Description: Configure the Fabric Protocol Trunk parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fabric_trunk.fabric_trunk, is_container='container', presence=False, yang_name="fabric-trunk", rest_name="trunk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric trunk status ', u'alt-name': u'trunk', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fabric_trunk must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fabric_trunk.fabric_trunk, is_container='container', presence=False, yang_name="fabric-trunk", rest_name="trunk", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric trunk status ', u'alt-name': u'trunk', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""",
})
self.__fabric_trunk = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_fabric_trunk",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"fabric_trunk",
".",
"fabric_trunk",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"fabric-trunk\"",
",",
"rest_name",
"=",
"\"trunk\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Fabric trunk status '",
",",
"u'alt-name'",
":",
"u'trunk'",
",",
"u'cli-incomplete-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-fcoe'",
",",
"defining_module",
"=",
"'brocade-fcoe'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"fabric_trunk must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=fabric_trunk.fabric_trunk, is_container='container', presence=False, yang_name=\"fabric-trunk\", rest_name=\"trunk\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric trunk status ', u'alt-name': u'trunk', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__fabric_trunk",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 74.583333 | 36.583333 |
def validate(self, scorer=None, k=1, test_size=0.1, stratify=False, shuffle=True, seed=100, indices=None):
"""Evaluate score by cross-validation.
Parameters
----------
scorer : function(y_true,y_pred), default None
Scikit-learn like metric that returns a score.
k : int, default 1
The number of folds for validation.
If k=1 then randomly split X_train into two parts otherwise use K-fold approach.
test_size : float, default 0.1
Size of the test holdout if k=1.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
indices : list(np.array,np.array), default None
Two numpy arrays that contain indices for train/test slicing. (train_index,test_index)
Returns
-------
y_true: list
Actual labels.
y_pred: list
Predicted labels.
Examples
--------
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.validate(mean_absolute_error,indices=(train_index,test_index))
"""
if self.use_cache:
pdict = {'k': k, 'stratify': stratify, 'shuffle': shuffle, 'seed': seed, 'test_size': test_size}
if indices is not None:
pdict['train_index'] = np_hash(indices[0])
pdict['test_index'] = np_hash(indices[1])
dhash = self._dhash(pdict)
c = Cache(dhash, prefix='v')
if c.available:
logger.info('Loading %s\'s validation results from cache.' % self._name)
elif (self.dataset.X_train is None) and (self.dataset.y_train is None):
self.dataset.load()
scores = []
y_true = []
y_pred = []
if k == 1:
X_train, y_train, X_test, y_test = self.dataset.split(test_size=test_size, stratify=stratify,
seed=seed, indices=indices)
if self.use_cache and c.available:
prediction = c.retrieve('0')
else:
prediction = self._predict(X_train, y_train, X_test, y_test)
if self.use_cache:
c.store('0', prediction)
if scorer is not None:
scores.append(scorer(y_test, prediction))
y_true.append(y_test)
y_pred.append(prediction)
else:
for i, fold in enumerate(self.dataset.kfold(k, stratify=stratify, seed=seed, shuffle=shuffle)):
X_train, y_train, X_test, y_test, train_index, test_index = fold
if self.use_cache and c.available:
prediction = c.retrieve(str(i))
else:
prediction = None
if prediction is None:
logger.info('Calculating %s\'s fold #%s' % (self._name, i + 1))
prediction = self._predict(X_train, y_train, X_test, y_test)
if self.use_cache:
c.store(str(i), prediction)
if scorer is not None:
scores.append(scorer(y_test, prediction))
y_true.append(y_test)
y_pred.append(prediction)
if scorer is not None:
report_score(scores, scorer)
return y_true, y_pred
|
[
"def",
"validate",
"(",
"self",
",",
"scorer",
"=",
"None",
",",
"k",
"=",
"1",
",",
"test_size",
"=",
"0.1",
",",
"stratify",
"=",
"False",
",",
"shuffle",
"=",
"True",
",",
"seed",
"=",
"100",
",",
"indices",
"=",
"None",
")",
":",
"if",
"self",
".",
"use_cache",
":",
"pdict",
"=",
"{",
"'k'",
":",
"k",
",",
"'stratify'",
":",
"stratify",
",",
"'shuffle'",
":",
"shuffle",
",",
"'seed'",
":",
"seed",
",",
"'test_size'",
":",
"test_size",
"}",
"if",
"indices",
"is",
"not",
"None",
":",
"pdict",
"[",
"'train_index'",
"]",
"=",
"np_hash",
"(",
"indices",
"[",
"0",
"]",
")",
"pdict",
"[",
"'test_index'",
"]",
"=",
"np_hash",
"(",
"indices",
"[",
"1",
"]",
")",
"dhash",
"=",
"self",
".",
"_dhash",
"(",
"pdict",
")",
"c",
"=",
"Cache",
"(",
"dhash",
",",
"prefix",
"=",
"'v'",
")",
"if",
"c",
".",
"available",
":",
"logger",
".",
"info",
"(",
"'Loading %s\\'s validation results from cache.'",
"%",
"self",
".",
"_name",
")",
"elif",
"(",
"self",
".",
"dataset",
".",
"X_train",
"is",
"None",
")",
"and",
"(",
"self",
".",
"dataset",
".",
"y_train",
"is",
"None",
")",
":",
"self",
".",
"dataset",
".",
"load",
"(",
")",
"scores",
"=",
"[",
"]",
"y_true",
"=",
"[",
"]",
"y_pred",
"=",
"[",
"]",
"if",
"k",
"==",
"1",
":",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
"=",
"self",
".",
"dataset",
".",
"split",
"(",
"test_size",
"=",
"test_size",
",",
"stratify",
"=",
"stratify",
",",
"seed",
"=",
"seed",
",",
"indices",
"=",
"indices",
")",
"if",
"self",
".",
"use_cache",
"and",
"c",
".",
"available",
":",
"prediction",
"=",
"c",
".",
"retrieve",
"(",
"'0'",
")",
"else",
":",
"prediction",
"=",
"self",
".",
"_predict",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
")",
"if",
"self",
".",
"use_cache",
":",
"c",
".",
"store",
"(",
"'0'",
",",
"prediction",
")",
"if",
"scorer",
"is",
"not",
"None",
":",
"scores",
".",
"append",
"(",
"scorer",
"(",
"y_test",
",",
"prediction",
")",
")",
"y_true",
".",
"append",
"(",
"y_test",
")",
"y_pred",
".",
"append",
"(",
"prediction",
")",
"else",
":",
"for",
"i",
",",
"fold",
"in",
"enumerate",
"(",
"self",
".",
"dataset",
".",
"kfold",
"(",
"k",
",",
"stratify",
"=",
"stratify",
",",
"seed",
"=",
"seed",
",",
"shuffle",
"=",
"shuffle",
")",
")",
":",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"train_index",
",",
"test_index",
"=",
"fold",
"if",
"self",
".",
"use_cache",
"and",
"c",
".",
"available",
":",
"prediction",
"=",
"c",
".",
"retrieve",
"(",
"str",
"(",
"i",
")",
")",
"else",
":",
"prediction",
"=",
"None",
"if",
"prediction",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'Calculating %s\\'s fold #%s'",
"%",
"(",
"self",
".",
"_name",
",",
"i",
"+",
"1",
")",
")",
"prediction",
"=",
"self",
".",
"_predict",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
")",
"if",
"self",
".",
"use_cache",
":",
"c",
".",
"store",
"(",
"str",
"(",
"i",
")",
",",
"prediction",
")",
"if",
"scorer",
"is",
"not",
"None",
":",
"scores",
".",
"append",
"(",
"scorer",
"(",
"y_test",
",",
"prediction",
")",
")",
"y_true",
".",
"append",
"(",
"y_test",
")",
"y_pred",
".",
"append",
"(",
"prediction",
")",
"if",
"scorer",
"is",
"not",
"None",
":",
"report_score",
"(",
"scores",
",",
"scorer",
")",
"return",
"y_true",
",",
"y_pred"
] | 39.08046 | 21.827586 |
def ekucec(handle, segno, recno, column, nvals, cvals, isnull):
"""
Update a character column entry in a specified EK record.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekucec_c.html
:param handle: EK file handle.
:type handle: int
:param segno: Index of segment containing record.
:type segno: int
:param recno: Record to which data is to be updated.
:type recno: int
:param column: Column name.
:type column: str
:param nvals: Number of values in new column entry.
:type nvals: int
:param cvals: Character values comprising new column entry.
:type cvals: list of str.
:param isnull: Flag indicating whether column entry is null.
:type isnull: bool
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
recno = ctypes.c_int(recno)
column = stypes.stringToCharP(column)
nvals = ctypes.c_int(nvals)
vallen = ctypes.c_int(len(max(cvals, key=len)) + 1)
cvals = stypes.listToCharArrayPtr(cvals, xLen=vallen)
isnull = ctypes.c_int(isnull)
libspice.ekucec_c(handle, segno, recno, column, nvals, vallen, cvals, isnull)
|
[
"def",
"ekucec",
"(",
"handle",
",",
"segno",
",",
"recno",
",",
"column",
",",
"nvals",
",",
"cvals",
",",
"isnull",
")",
":",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"segno",
"=",
"ctypes",
".",
"c_int",
"(",
"segno",
")",
"recno",
"=",
"ctypes",
".",
"c_int",
"(",
"recno",
")",
"column",
"=",
"stypes",
".",
"stringToCharP",
"(",
"column",
")",
"nvals",
"=",
"ctypes",
".",
"c_int",
"(",
"nvals",
")",
"vallen",
"=",
"ctypes",
".",
"c_int",
"(",
"len",
"(",
"max",
"(",
"cvals",
",",
"key",
"=",
"len",
")",
")",
"+",
"1",
")",
"cvals",
"=",
"stypes",
".",
"listToCharArrayPtr",
"(",
"cvals",
",",
"xLen",
"=",
"vallen",
")",
"isnull",
"=",
"ctypes",
".",
"c_int",
"(",
"isnull",
")",
"libspice",
".",
"ekucec_c",
"(",
"handle",
",",
"segno",
",",
"recno",
",",
"column",
",",
"nvals",
",",
"vallen",
",",
"cvals",
",",
"isnull",
")"
] | 37.1 | 16.833333 |
def _check_cygwin_installed(cyg_arch='x86_64'):
'''
Return True or False if cygwin is installed.
Use the cygcheck executable to check install. It is installed as part of
the base package, and we use it to check packages
'''
path_to_cygcheck = os.sep.join(['C:',
_get_cyg_dir(cyg_arch),
'bin', 'cygcheck.exe'])
LOG.debug('Path to cygcheck.exe: %s', path_to_cygcheck)
if not os.path.exists(path_to_cygcheck):
LOG.debug('Could not find cygcheck.exe')
return False
return True
|
[
"def",
"_check_cygwin_installed",
"(",
"cyg_arch",
"=",
"'x86_64'",
")",
":",
"path_to_cygcheck",
"=",
"os",
".",
"sep",
".",
"join",
"(",
"[",
"'C:'",
",",
"_get_cyg_dir",
"(",
"cyg_arch",
")",
",",
"'bin'",
",",
"'cygcheck.exe'",
"]",
")",
"LOG",
".",
"debug",
"(",
"'Path to cygcheck.exe: %s'",
",",
"path_to_cygcheck",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path_to_cygcheck",
")",
":",
"LOG",
".",
"debug",
"(",
"'Could not find cygcheck.exe'",
")",
"return",
"False",
"return",
"True"
] | 38.866667 | 19 |
def available(name):
'''
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
(enabled_services, disabled_services) = _get_service_list(include_enabled=True,
include_disabled=True)
return name in enabled_services or name in disabled_services
|
[
"def",
"available",
"(",
"name",
")",
":",
"(",
"enabled_services",
",",
"disabled_services",
")",
"=",
"_get_service_list",
"(",
"include_enabled",
"=",
"True",
",",
"include_disabled",
"=",
"True",
")",
"return",
"name",
"in",
"enabled_services",
"or",
"name",
"in",
"disabled_services"
] | 31.071429 | 30.071429 |
def property_list(self, property_list):
"""Setter method; for a description see the getter method."""
if property_list is not None:
msg = "The 'property_list' init parameter and attribute of " \
"CIMInstance is deprecated; Set only the desired properties " \
"instead."
if DEBUG_WARNING_ORIGIN:
msg += "\nTraceback:\n" + ''.join(traceback.format_stack())
warnings.warn(msg, DeprecationWarning,
stacklevel=_stacklevel_above_module(__name__))
property_list = [_ensure_unicode(x).lower()
for x in property_list]
# pylint: disable=attribute-defined-outside-init
self._property_list = property_list
|
[
"def",
"property_list",
"(",
"self",
",",
"property_list",
")",
":",
"if",
"property_list",
"is",
"not",
"None",
":",
"msg",
"=",
"\"The 'property_list' init parameter and attribute of \"",
"\"CIMInstance is deprecated; Set only the desired properties \"",
"\"instead.\"",
"if",
"DEBUG_WARNING_ORIGIN",
":",
"msg",
"+=",
"\"\\nTraceback:\\n\"",
"+",
"''",
".",
"join",
"(",
"traceback",
".",
"format_stack",
"(",
")",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"_stacklevel_above_module",
"(",
"__name__",
")",
")",
"property_list",
"=",
"[",
"_ensure_unicode",
"(",
"x",
")",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"property_list",
"]",
"# pylint: disable=attribute-defined-outside-init",
"self",
".",
"_property_list",
"=",
"property_list"
] | 50.866667 | 17.2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.