nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kubernetes-client/python
|
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
|
kubernetes/client/api/batch_v1_api.py
|
python
|
BatchV1Api.read_namespaced_job_status_with_http_info
|
(self, name, namespace, **kwargs)
|
return self.api_client.call_api(
'/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
read_namespaced_job_status # noqa: E501
read status of the specified Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
|
read_namespaced_job_status # noqa: E501
|
[
"read_namespaced_job_status",
"#",
"noqa",
":",
"E501"
] |
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_job_status # noqa: E501
read status of the specified Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Job, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"def",
"read_namespaced_job_status_with_http_info",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"local_var_params",
"=",
"locals",
"(",
")",
"all_params",
"=",
"[",
"'name'",
",",
"'namespace'",
",",
"'pretty'",
"]",
"all_params",
".",
"extend",
"(",
"[",
"'async_req'",
",",
"'_return_http_data_only'",
",",
"'_preload_content'",
",",
"'_request_timeout'",
"]",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"local_var_params",
"[",
"'kwargs'",
"]",
")",
":",
"if",
"key",
"not",
"in",
"all_params",
":",
"raise",
"ApiTypeError",
"(",
"\"Got an unexpected keyword argument '%s'\"",
"\" to method read_namespaced_job_status\"",
"%",
"key",
")",
"local_var_params",
"[",
"key",
"]",
"=",
"val",
"del",
"local_var_params",
"[",
"'kwargs'",
"]",
"# verify the required parameter 'name' is set",
"if",
"self",
".",
"api_client",
".",
"client_side_validation",
"and",
"(",
"'name'",
"not",
"in",
"local_var_params",
"or",
"# noqa: E501",
"local_var_params",
"[",
"'name'",
"]",
"is",
"None",
")",
":",
"# noqa: E501",
"raise",
"ApiValueError",
"(",
"\"Missing the required parameter `name` when calling `read_namespaced_job_status`\"",
")",
"# noqa: E501",
"# verify the required parameter 'namespace' is set",
"if",
"self",
".",
"api_client",
".",
"client_side_validation",
"and",
"(",
"'namespace'",
"not",
"in",
"local_var_params",
"or",
"# noqa: E501",
"local_var_params",
"[",
"'namespace'",
"]",
"is",
"None",
")",
":",
"# noqa: E501",
"raise",
"ApiValueError",
"(",
"\"Missing the required parameter `namespace` when calling `read_namespaced_job_status`\"",
")",
"# noqa: E501",
"collection_formats",
"=",
"{",
"}",
"path_params",
"=",
"{",
"}",
"if",
"'name'",
"in",
"local_var_params",
":",
"path_params",
"[",
"'name'",
"]",
"=",
"local_var_params",
"[",
"'name'",
"]",
"# noqa: E501",
"if",
"'namespace'",
"in",
"local_var_params",
":",
"path_params",
"[",
"'namespace'",
"]",
"=",
"local_var_params",
"[",
"'namespace'",
"]",
"# noqa: E501",
"query_params",
"=",
"[",
"]",
"if",
"'pretty'",
"in",
"local_var_params",
"and",
"local_var_params",
"[",
"'pretty'",
"]",
"is",
"not",
"None",
":",
"# noqa: E501",
"query_params",
".",
"append",
"(",
"(",
"'pretty'",
",",
"local_var_params",
"[",
"'pretty'",
"]",
")",
")",
"# noqa: E501",
"header_params",
"=",
"{",
"}",
"form_params",
"=",
"[",
"]",
"local_var_files",
"=",
"{",
"}",
"body_params",
"=",
"None",
"# HTTP header `Accept`",
"header_params",
"[",
"'Accept'",
"]",
"=",
"self",
".",
"api_client",
".",
"select_header_accept",
"(",
"[",
"'application/json'",
",",
"'application/yaml'",
",",
"'application/vnd.kubernetes.protobuf'",
"]",
")",
"# noqa: E501",
"# Authentication setting",
"auth_settings",
"=",
"[",
"'BearerToken'",
"]",
"# noqa: E501",
"return",
"self",
".",
"api_client",
".",
"call_api",
"(",
"'/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status'",
",",
"'GET'",
",",
"path_params",
",",
"query_params",
",",
"header_params",
",",
"body",
"=",
"body_params",
",",
"post_params",
"=",
"form_params",
",",
"files",
"=",
"local_var_files",
",",
"response_type",
"=",
"'V1Job'",
",",
"# noqa: E501",
"auth_settings",
"=",
"auth_settings",
",",
"async_req",
"=",
"local_var_params",
".",
"get",
"(",
"'async_req'",
")",
",",
"_return_http_data_only",
"=",
"local_var_params",
".",
"get",
"(",
"'_return_http_data_only'",
")",
",",
"# noqa: E501",
"_preload_content",
"=",
"local_var_params",
".",
"get",
"(",
"'_preload_content'",
",",
"True",
")",
",",
"_request_timeout",
"=",
"local_var_params",
".",
"get",
"(",
"'_request_timeout'",
")",
",",
"collection_formats",
"=",
"collection_formats",
")"
] |
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/api/batch_v1_api.py#L2757-L2856
|
|
sqlfluff/sqlfluff
|
c2278f41f270a29ef5ffc6b179236abf32dc18e1
|
src/sqlfluff/core/linter/linted_file.py
|
python
|
LintedFile.ignore_masked_violations
|
(
cls, violations: List[SQLBaseError], ignore_mask: List[NoQaDirective]
)
|
return violations
|
Remove any violations specified by ignore_mask.
This involves two steps:
1. Filter out violations affected by single-line "noqa" directives.
2. Filter out violations affected by disable/enable "noqa" directives.
|
Remove any violations specified by ignore_mask.
|
[
"Remove",
"any",
"violations",
"specified",
"by",
"ignore_mask",
"."
] |
def ignore_masked_violations(
cls, violations: List[SQLBaseError], ignore_mask: List[NoQaDirective]
) -> List[SQLBaseError]:
"""Remove any violations specified by ignore_mask.
This involves two steps:
1. Filter out violations affected by single-line "noqa" directives.
2. Filter out violations affected by disable/enable "noqa" directives.
"""
ignore_specific = [ignore for ignore in ignore_mask if not ignore.action]
ignore_range = [ignore for ignore in ignore_mask if ignore.action]
violations = cls._ignore_masked_violations_single_line(
violations, ignore_specific
)
violations = cls._ignore_masked_violations_line_range(violations, ignore_range)
return violations
|
[
"def",
"ignore_masked_violations",
"(",
"cls",
",",
"violations",
":",
"List",
"[",
"SQLBaseError",
"]",
",",
"ignore_mask",
":",
"List",
"[",
"NoQaDirective",
"]",
")",
"->",
"List",
"[",
"SQLBaseError",
"]",
":",
"ignore_specific",
"=",
"[",
"ignore",
"for",
"ignore",
"in",
"ignore_mask",
"if",
"not",
"ignore",
".",
"action",
"]",
"ignore_range",
"=",
"[",
"ignore",
"for",
"ignore",
"in",
"ignore_mask",
"if",
"ignore",
".",
"action",
"]",
"violations",
"=",
"cls",
".",
"_ignore_masked_violations_single_line",
"(",
"violations",
",",
"ignore_specific",
")",
"violations",
"=",
"cls",
".",
"_ignore_masked_violations_line_range",
"(",
"violations",
",",
"ignore_range",
")",
"return",
"violations"
] |
https://github.com/sqlfluff/sqlfluff/blob/c2278f41f270a29ef5ffc6b179236abf32dc18e1/src/sqlfluff/core/linter/linted_file.py#L176-L191
|
|
tensorflow/models
|
6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3
|
official/vision/beta/ops/augment.py
|
python
|
AutoAugment.distort
|
(self, image: tf.Tensor)
|
return image
|
See base class.
|
See base class.
|
[
"See",
"base",
"class",
"."
] |
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""See base class."""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
tf_policies = self._make_tf_policies()
image, _ = select_and_apply_random_policy(tf_policies, image, bboxes=None)
return image
|
[
"def",
"distort",
"(",
"self",
",",
"image",
":",
"tf",
".",
"Tensor",
")",
"->",
"tf",
".",
"Tensor",
":",
"input_image_type",
"=",
"image",
".",
"dtype",
"if",
"input_image_type",
"!=",
"tf",
".",
"uint8",
":",
"image",
"=",
"tf",
".",
"clip_by_value",
"(",
"image",
",",
"0.0",
",",
"255.0",
")",
"image",
"=",
"tf",
".",
"cast",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"tf_policies",
"=",
"self",
".",
"_make_tf_policies",
"(",
")",
"image",
",",
"_",
"=",
"select_and_apply_random_policy",
"(",
"tf_policies",
",",
"image",
",",
"bboxes",
"=",
"None",
")",
"return",
"image"
] |
https://github.com/tensorflow/models/blob/6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3/official/vision/beta/ops/augment.py#L1657-L1666
|
|
celery/billiard
|
269ef67354a3a205cea780aa8ea451e0d17cd37c
|
billiard/managers.py
|
python
|
Server.serve_client
|
(self, conn)
|
Handle requests from the proxies in a particular process/thread
|
Handle requests from the proxies in a particular process/thread
|
[
"Handle",
"requests",
"from",
"the",
"proxies",
"in",
"a",
"particular",
"process",
"/",
"thread"
] |
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' % (
methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as exc:
msg = ('#ERROR', exc)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as exc:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', exc)
conn.close()
sys.exit(1)
|
[
"def",
"serve_client",
"(",
"self",
",",
"conn",
")",
":",
"util",
".",
"debug",
"(",
"'starting server thread to service %r'",
",",
"threading",
".",
"current_thread",
"(",
")",
".",
"name",
")",
"recv",
"=",
"conn",
".",
"recv",
"send",
"=",
"conn",
".",
"send",
"id_to_obj",
"=",
"self",
".",
"id_to_obj",
"while",
"not",
"self",
".",
"stop_event",
".",
"is_set",
"(",
")",
":",
"try",
":",
"methodname",
"=",
"obj",
"=",
"None",
"request",
"=",
"recv",
"(",
")",
"ident",
",",
"methodname",
",",
"args",
",",
"kwds",
"=",
"request",
"obj",
",",
"exposed",
",",
"gettypeid",
"=",
"id_to_obj",
"[",
"ident",
"]",
"if",
"methodname",
"not",
"in",
"exposed",
":",
"raise",
"AttributeError",
"(",
"'method %r of %r object is not in exposed=%r'",
"%",
"(",
"methodname",
",",
"type",
"(",
"obj",
")",
",",
"exposed",
")",
")",
"function",
"=",
"getattr",
"(",
"obj",
",",
"methodname",
")",
"try",
":",
"res",
"=",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"except",
"Exception",
"as",
"exc",
":",
"msg",
"=",
"(",
"'#ERROR'",
",",
"exc",
")",
"else",
":",
"typeid",
"=",
"gettypeid",
"and",
"gettypeid",
".",
"get",
"(",
"methodname",
",",
"None",
")",
"if",
"typeid",
":",
"rident",
",",
"rexposed",
"=",
"self",
".",
"create",
"(",
"conn",
",",
"typeid",
",",
"res",
")",
"token",
"=",
"Token",
"(",
"typeid",
",",
"self",
".",
"address",
",",
"rident",
")",
"msg",
"=",
"(",
"'#PROXY'",
",",
"(",
"rexposed",
",",
"token",
")",
")",
"else",
":",
"msg",
"=",
"(",
"'#RETURN'",
",",
"res",
")",
"except",
"AttributeError",
":",
"if",
"methodname",
"is",
"None",
":",
"msg",
"=",
"(",
"'#TRACEBACK'",
",",
"format_exc",
"(",
")",
")",
"else",
":",
"try",
":",
"fallback_func",
"=",
"self",
".",
"fallback_mapping",
"[",
"methodname",
"]",
"result",
"=",
"fallback_func",
"(",
"self",
",",
"conn",
",",
"ident",
",",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"msg",
"=",
"(",
"'#RETURN'",
",",
"result",
")",
"except",
"Exception",
":",
"msg",
"=",
"(",
"'#TRACEBACK'",
",",
"format_exc",
"(",
")",
")",
"except",
"EOFError",
":",
"util",
".",
"debug",
"(",
"'got EOF -- exiting thread serving %r'",
",",
"threading",
".",
"current_thread",
"(",
")",
".",
"name",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"except",
"Exception",
":",
"msg",
"=",
"(",
"'#TRACEBACK'",
",",
"format_exc",
"(",
")",
")",
"try",
":",
"try",
":",
"send",
"(",
"msg",
")",
"except",
"Exception",
":",
"send",
"(",
"(",
"'#UNSERIALIZABLE'",
",",
"repr",
"(",
"msg",
")",
")",
")",
"except",
"Exception",
"as",
"exc",
":",
"util",
".",
"info",
"(",
"'exception in thread serving %r'",
",",
"threading",
".",
"current_thread",
"(",
")",
".",
"name",
")",
"util",
".",
"info",
"(",
"' ... message was %r'",
",",
"msg",
")",
"util",
".",
"info",
"(",
"' ... exception was %r'",
",",
"exc",
")",
"conn",
".",
"close",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
https://github.com/celery/billiard/blob/269ef67354a3a205cea780aa8ea451e0d17cd37c/billiard/managers.py#L232-L304
|
||
openedx/edx-platform
|
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
|
common/djangoapps/edxmako/template.py
|
python
|
Template.render
|
(self, context=None, request=None)
|
return self.mako_template.render_unicode(**context_dictionary)
|
This takes a render call with a context (from Django) and translates
it to a render call on the mako template.
When rendering a large sequence of XBlocks, we may end up rendering
hundreds of small templates. Even if context processors aren't very
expensive individually, they will quickly add up in that situation. To
help guard against this, we do context processing once for a given
request and then cache it.
|
This takes a render call with a context (from Django) and translates
it to a render call on the mako template.
|
[
"This",
"takes",
"a",
"render",
"call",
"with",
"a",
"context",
"(",
"from",
"Django",
")",
"and",
"translates",
"it",
"to",
"a",
"render",
"call",
"on",
"the",
"mako",
"template",
"."
] |
def render(self, context=None, request=None):
"""
This takes a render call with a context (from Django) and translates
it to a render call on the mako template.
When rendering a large sequence of XBlocks, we may end up rendering
hundreds of small templates. Even if context processors aren't very
expensive individually, they will quickly add up in that situation. To
help guard against this, we do context processing once for a given
request and then cache it.
"""
context_object = self._get_context_object(request)
request_cache = RequestCache('context_processors')
cache_response = request_cache.get_cached_response('cp_output')
if cache_response.is_found:
context_dictionary = dict(cache_response.value)
else:
context_dictionary = self._get_context_processors_output_dict(context_object)
# The context_dictionary is later updated with template specific
# variables. There are potentially hundreds of calls to templates
# rendering and we don't want them to interfere with each other, so
# we make a copy from the output of the context processors and then
# recreate a new dict every time we pull from the cache.
request_cache.set('cp_output', dict(context_dictionary))
if isinstance(context, Context):
context_dictionary.update(context.flatten())
elif context is not None:
context_dictionary.update(context)
self._add_core_context(context_dictionary)
self._evaluate_lazy_csrf_tokens(context_dictionary)
return self.mako_template.render_unicode(**context_dictionary)
|
[
"def",
"render",
"(",
"self",
",",
"context",
"=",
"None",
",",
"request",
"=",
"None",
")",
":",
"context_object",
"=",
"self",
".",
"_get_context_object",
"(",
"request",
")",
"request_cache",
"=",
"RequestCache",
"(",
"'context_processors'",
")",
"cache_response",
"=",
"request_cache",
".",
"get_cached_response",
"(",
"'cp_output'",
")",
"if",
"cache_response",
".",
"is_found",
":",
"context_dictionary",
"=",
"dict",
"(",
"cache_response",
".",
"value",
")",
"else",
":",
"context_dictionary",
"=",
"self",
".",
"_get_context_processors_output_dict",
"(",
"context_object",
")",
"# The context_dictionary is later updated with template specific",
"# variables. There are potentially hundreds of calls to templates",
"# rendering and we don't want them to interfere with each other, so",
"# we make a copy from the output of the context processors and then",
"# recreate a new dict every time we pull from the cache.",
"request_cache",
".",
"set",
"(",
"'cp_output'",
",",
"dict",
"(",
"context_dictionary",
")",
")",
"if",
"isinstance",
"(",
"context",
",",
"Context",
")",
":",
"context_dictionary",
".",
"update",
"(",
"context",
".",
"flatten",
"(",
")",
")",
"elif",
"context",
"is",
"not",
"None",
":",
"context_dictionary",
".",
"update",
"(",
"context",
")",
"self",
".",
"_add_core_context",
"(",
"context_dictionary",
")",
"self",
".",
"_evaluate_lazy_csrf_tokens",
"(",
"context_dictionary",
")",
"return",
"self",
".",
"mako_template",
".",
"render_unicode",
"(",
"*",
"*",
"context_dictionary",
")"
] |
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/common/djangoapps/edxmako/template.py#L48-L82
|
|
TensorMSA/tensormsa
|
c36b565159cd934533636429add3c7d7263d622b
|
master/workflow/preprocess/workflow_feed_fr2wv.py
|
python
|
WorkflowFeedFr2Wv.get_column_list
|
(self)
|
return self.conf['col_list']
|
:param node_id:
:return:
|
[] |
def get_column_list(self):
"""
:param node_id:
:return:
"""
if('conf' not in self.__dict__) :
self.conf = self.get_view_obj(self.key)
return self.conf['col_list']
|
[
"def",
"get_column_list",
"(",
"self",
")",
":",
"if",
"(",
"'conf'",
"not",
"in",
"self",
".",
"__dict__",
")",
":",
"self",
".",
"conf",
"=",
"self",
".",
"get_view_obj",
"(",
"self",
".",
"key",
")",
"return",
"self",
".",
"conf",
"[",
"'col_list'",
"]"
] |
https://github.com/TensorMSA/tensormsa/blob/c36b565159cd934533636429add3c7d7263d622b/master/workflow/preprocess/workflow_feed_fr2wv.py#L18-L26
|
||
openedx/edx-platform
|
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
|
lms/djangoapps/courseware/plugins.py
|
python
|
TextbooksCourseApp.set_enabled
|
(cls, course_key: CourseKey, enabled: bool, user: 'User')
|
The textbook app can be globally enabled/disabled.
Currently, it isn't possible to enable/disable this app on a per-course basis.
|
The textbook app can be globally enabled/disabled.
|
[
"The",
"textbook",
"app",
"can",
"be",
"globally",
"enabled",
"/",
"disabled",
"."
] |
def set_enabled(cls, course_key: CourseKey, enabled: bool, user: 'User') -> bool:
"""
The textbook app can be globally enabled/disabled.
Currently, it isn't possible to enable/disable this app on a per-course basis.
"""
raise ValueError("The textbook app can not be enabled/disabled for a single course.")
|
[
"def",
"set_enabled",
"(",
"cls",
",",
"course_key",
":",
"CourseKey",
",",
"enabled",
":",
"bool",
",",
"user",
":",
"'User'",
")",
"->",
"bool",
":",
"raise",
"ValueError",
"(",
"\"The textbook app can not be enabled/disabled for a single course.\"",
")"
] |
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/lms/djangoapps/courseware/plugins.py#L97-L103
|
||
renskiy/fabricio
|
030b1d6e8050628fcfce78615a056e27e8e56324
|
fabricio/docker/service.py
|
python
|
RemovableOption.__init__
|
(self, func=None, path=None, force_add=None, force_rm=None, **kwargs)
|
[] |
def __init__(self, func=None, path=None, force_add=None, force_rm=None, **kwargs): # noqa
super(RemovableOption, self).__init__(func=func, **kwargs)
self.path = path or self.path
self.force_add = force_add if force_add is not None else self.force_add
self.force_rm = force_rm if force_rm is not None else self.force_rm
|
[
"def",
"__init__",
"(",
"self",
",",
"func",
"=",
"None",
",",
"path",
"=",
"None",
",",
"force_add",
"=",
"None",
",",
"force_rm",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa",
"super",
"(",
"RemovableOption",
",",
"self",
")",
".",
"__init__",
"(",
"func",
"=",
"func",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"path",
"=",
"path",
"or",
"self",
".",
"path",
"self",
".",
"force_add",
"=",
"force_add",
"if",
"force_add",
"is",
"not",
"None",
"else",
"self",
".",
"force_add",
"self",
".",
"force_rm",
"=",
"force_rm",
"if",
"force_rm",
"is",
"not",
"None",
"else",
"self",
".",
"force_rm"
] |
https://github.com/renskiy/fabricio/blob/030b1d6e8050628fcfce78615a056e27e8e56324/fabricio/docker/service.py#L49-L53
|
||||
owtf/owtf
|
22d6d35fb2a232fcc56bf5ed504ec52fd65f15b6
|
owtf/managers/target.py
|
python
|
TargetManager.get_target_urls
|
(self)
|
return get_all_targets(self.session, "target_url")
|
Return target URLs
:return: List of target urls
:rtype: `list`
|
Return target URLs
:return: List of target urls
:rtype: `list`
|
[
"Return",
"target",
"URLs",
":",
"return",
":",
"List",
"of",
"target",
"urls",
":",
"rtype",
":",
"list"
] |
def get_target_urls(self):
"""Return target URLs
:return: List of target urls
:rtype: `list`
"""
return get_all_targets(self.session, "target_url")
|
[
"def",
"get_target_urls",
"(",
"self",
")",
":",
"return",
"get_all_targets",
"(",
"self",
".",
"session",
",",
"\"target_url\"",
")"
] |
https://github.com/owtf/owtf/blob/22d6d35fb2a232fcc56bf5ed504ec52fd65f15b6/owtf/managers/target.py#L186-L191
|
|
meijieru/AtomNAS
|
a3ea885c5e94269c6a95140e1ee30ed750b314e8
|
utils/rmsprop.py
|
python
|
RMSprop.compress_drop
|
(self, info, verbose=False)
|
Remove unused parameters for dynamic network shrinkage.
|
Remove unused parameters for dynamic network shrinkage.
|
[
"Remove",
"unused",
"parameters",
"for",
"dynamic",
"network",
"shrinkage",
"."
] |
def compress_drop(self, info, verbose=False):
"""Remove unused parameters for dynamic network shrinkage."""
var_old = info['var_old']
if verbose:
logging.info('RMSProp drop: {}'.format(info['var_old_name']))
assert info['type'] == 'variable'
found = False
for group in self.param_groups:
index = index_tensor_in(var_old, group['params'], raise_error=False)
found = index is not None
if found:
if check_tensor_in(var_old, self.state):
self.state.pop(var_old)
del group['params'][index]
assert found, 'Var: {} not in RMSProp'.format(info['var_old_name'])
|
[
"def",
"compress_drop",
"(",
"self",
",",
"info",
",",
"verbose",
"=",
"False",
")",
":",
"var_old",
"=",
"info",
"[",
"'var_old'",
"]",
"if",
"verbose",
":",
"logging",
".",
"info",
"(",
"'RMSProp drop: {}'",
".",
"format",
"(",
"info",
"[",
"'var_old_name'",
"]",
")",
")",
"assert",
"info",
"[",
"'type'",
"]",
"==",
"'variable'",
"found",
"=",
"False",
"for",
"group",
"in",
"self",
".",
"param_groups",
":",
"index",
"=",
"index_tensor_in",
"(",
"var_old",
",",
"group",
"[",
"'params'",
"]",
",",
"raise_error",
"=",
"False",
")",
"found",
"=",
"index",
"is",
"not",
"None",
"if",
"found",
":",
"if",
"check_tensor_in",
"(",
"var_old",
",",
"self",
".",
"state",
")",
":",
"self",
".",
"state",
".",
"pop",
"(",
"var_old",
")",
"del",
"group",
"[",
"'params'",
"]",
"[",
"index",
"]",
"assert",
"found",
",",
"'Var: {} not in RMSProp'",
".",
"format",
"(",
"info",
"[",
"'var_old_name'",
"]",
")"
] |
https://github.com/meijieru/AtomNAS/blob/a3ea885c5e94269c6a95140e1ee30ed750b314e8/utils/rmsprop.py#L167-L182
|
||
Parsl/parsl
|
af2535341152b2640fdd1a3b73b891992bf1b3ea
|
parsl/monitoring/db_manager.py
|
python
|
DatabaseManager.start
|
(self,
priority_queue: "queue.Queue[Tuple[MessageType, Dict[str, Any]]]",
node_queue: "queue.Queue[Dict[str, Any]]",
block_queue: "queue.Queue[Dict[str, Any]]",
resource_queue: "queue.Queue[Dict[str, Any]]")
|
maintain a set to track the tasks that are already INSERTed into database
to prevent race condition that the first resource message (indicate 'running' state)
arrives before the first task message. In such a case, the resource table
primary key would be violated.
If that happens, the message will be added to deferred_resource_messages and processed later.
|
maintain a set to track the tasks that are already INSERTed into database
to prevent race condition that the first resource message (indicate 'running' state)
arrives before the first task message. In such a case, the resource table
primary key would be violated.
If that happens, the message will be added to deferred_resource_messages and processed later.
|
[
"maintain",
"a",
"set",
"to",
"track",
"the",
"tasks",
"that",
"are",
"already",
"INSERTed",
"into",
"database",
"to",
"prevent",
"race",
"condition",
"that",
"the",
"first",
"resource",
"message",
"(",
"indicate",
"running",
"state",
")",
"arrives",
"before",
"the",
"first",
"task",
"message",
".",
"In",
"such",
"a",
"case",
"the",
"resource",
"table",
"primary",
"key",
"would",
"be",
"violated",
".",
"If",
"that",
"happens",
"the",
"message",
"will",
"be",
"added",
"to",
"deferred_resource_messages",
"and",
"processed",
"later",
"."
] |
def start(self,
priority_queue: "queue.Queue[Tuple[MessageType, Dict[str, Any]]]",
node_queue: "queue.Queue[Dict[str, Any]]",
block_queue: "queue.Queue[Dict[str, Any]]",
resource_queue: "queue.Queue[Dict[str, Any]]") -> None:
self._kill_event = threading.Event()
self._priority_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
args=(
priority_queue, 'priority', self._kill_event,),
name="Monitoring-migrate-priority",
daemon=True,
)
self._priority_queue_pull_thread.start()
self._node_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
args=(
node_queue, 'node', self._kill_event,),
name="Monitoring-migrate-node",
daemon=True,
)
self._node_queue_pull_thread.start()
self._block_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
args=(
block_queue, 'block', self._kill_event,),
name="Monitoring-migrate-block",
daemon=True,
)
self._block_queue_pull_thread.start()
self._resource_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal,
args=(
resource_queue, 'resource', self._kill_event,),
name="Monitoring-migrate-resource",
daemon=True,
)
self._resource_queue_pull_thread.start()
"""
maintain a set to track the tasks that are already INSERTed into database
to prevent race condition that the first resource message (indicate 'running' state)
arrives before the first task message. In such a case, the resource table
primary key would be violated.
If that happens, the message will be added to deferred_resource_messages and processed later.
"""
inserted_tasks = set() # type: Set[object]
"""
like inserted_tasks but for task,try tuples
"""
inserted_tries = set() # type: Set[Any]
# for any task ID, we can defer exactly one message, which is the
# assumed-to-be-unique first message (with first message flag set).
# The code prior to this patch will discard previous message in
# the case of multiple messages to defer.
deferred_resource_messages = {} # type: Dict[str, Any]
exception_happened = False
while (not self._kill_event.is_set() or
self.pending_priority_queue.qsize() != 0 or self.pending_resource_queue.qsize() != 0 or
self.pending_node_queue.qsize() != 0 or self.pending_block_queue.qsize() != 0 or
priority_queue.qsize() != 0 or resource_queue.qsize() != 0 or
node_queue.qsize() != 0 or block_queue.qsize() != 0):
"""
WORKFLOW_INFO and TASK_INFO messages (i.e. priority messages)
"""
try:
logger.debug("""Checking STOP conditions: {}, {}, {}, {}, {}, {}, {}, {}, {}""".format(
self._kill_event.is_set(),
self.pending_priority_queue.qsize() != 0, self.pending_resource_queue.qsize() != 0,
self.pending_node_queue.qsize() != 0, self.pending_block_queue.qsize() != 0,
priority_queue.qsize() != 0, resource_queue.qsize() != 0,
node_queue.qsize() != 0, block_queue.qsize() != 0))
# This is the list of resource messages which can be reprocessed as if they
# had just arrived because the corresponding first task message has been
# processed (corresponding by task id)
reprocessable_first_resource_messages = []
# Get a batch of priority messages
priority_messages = self._get_messages_in_batch(self.pending_priority_queue)
if priority_messages:
logger.debug(
"Got {} messages from priority queue".format(len(priority_messages)))
task_info_update_messages, task_info_insert_messages, task_info_all_messages = [], [], []
try_update_messages, try_insert_messages, try_all_messages = [], [], []
for msg_type, msg in priority_messages:
if msg_type == MessageType.WORKFLOW_INFO:
if "python_version" in msg: # workflow start message
logger.debug(
"Inserting workflow start info to WORKFLOW table")
self._insert(table=WORKFLOW, messages=[msg])
self.workflow_start_message = msg
else: # workflow end message
logger.debug(
"Updating workflow end info to WORKFLOW table")
self._update(table=WORKFLOW,
columns=['run_id', 'tasks_failed_count',
'tasks_completed_count', 'time_completed'],
messages=[msg])
self.workflow_end = True
elif msg_type == MessageType.TASK_INFO:
task_try_id = str(msg['task_id']) + "." + str(msg['try_id'])
task_info_all_messages.append(msg)
if msg['task_id'] in inserted_tasks:
task_info_update_messages.append(msg)
else:
inserted_tasks.add(msg['task_id'])
task_info_insert_messages.append(msg)
try_all_messages.append(msg)
if task_try_id in inserted_tries:
try_update_messages.append(msg)
else:
inserted_tries.add(task_try_id)
try_insert_messages.append(msg)
# check if there is a left_message for this task
if task_try_id in deferred_resource_messages:
reprocessable_first_resource_messages.append(
deferred_resource_messages.pop(task_try_id))
else:
raise RuntimeError("Unexpected message type {} received on priority queue".format(msg_type))
logger.debug("Updating and inserting TASK_INFO to all tables")
logger.debug("Updating {} TASK_INFO into workflow table".format(len(task_info_update_messages)))
self._update(table=WORKFLOW,
columns=['run_id', 'tasks_failed_count',
'tasks_completed_count'],
messages=task_info_all_messages)
if task_info_insert_messages:
self._insert(table=TASK, messages=task_info_insert_messages)
logger.debug(
"There are {} inserted task records".format(len(inserted_tasks)))
if task_info_update_messages:
logger.debug("Updating {} TASK_INFO into task table".format(len(task_info_update_messages)))
self._update(table=TASK,
columns=['task_time_invoked',
'task_time_returned',
'run_id', 'task_id',
'task_fail_count',
'task_fail_cost',
'task_hashsum'],
messages=task_info_update_messages)
logger.debug("Inserting {} task_info_all_messages into status table".format(len(task_info_all_messages)))
self._insert(table=STATUS, messages=task_info_all_messages)
if try_insert_messages:
logger.debug("Inserting {} TASK_INFO to try table".format(len(try_insert_messages)))
self._insert(table=TRY, messages=try_insert_messages)
logger.debug(
"There are {} inserted task records".format(len(inserted_tasks)))
if try_update_messages:
logger.debug("Updating {} TASK_INFO into try table".format(len(try_update_messages)))
self._update(table=TRY,
columns=['run_id', 'task_id', 'try_id',
'task_fail_history',
'task_try_time_launched',
'task_try_time_returned',
'task_joins'],
messages=try_update_messages)
"""
NODE_INFO messages
"""
node_info_messages = self._get_messages_in_batch(self.pending_node_queue)
if node_info_messages:
logger.debug(
"Got {} messages from node queue".format(len(node_info_messages)))
self._insert(table=NODE, messages=node_info_messages)
"""
BLOCK_INFO messages
"""
block_info_messages = self._get_messages_in_batch(self.pending_block_queue)
if block_info_messages:
logger.debug(
"Got {} messages from block queue".format(len(block_info_messages)))
# block_info_messages is possibly a nested list of dict (at different polling times)
# Each dict refers to the info of a job/block at one polling time
block_messages_to_insert = [] # type: List[Any]
for block_msg in block_info_messages:
block_messages_to_insert.extend(block_msg)
self._insert(table=BLOCK, messages=block_messages_to_insert)
"""
Resource info messages
"""
resource_messages = self._get_messages_in_batch(self.pending_resource_queue)
if resource_messages:
logger.debug(
"Got {} messages from resource queue, {} reprocessable".format(len(resource_messages), len(reprocessable_first_resource_messages)))
insert_resource_messages = []
for msg in resource_messages:
task_try_id = str(msg['task_id']) + "." + str(msg['try_id'])
if msg['first_msg']:
# Update the running time to try table if first message
msg['task_status_name'] = States.running.name
msg['task_try_time_running'] = msg['timestamp']
if task_try_id in inserted_tries: # TODO: needs to become task_id and try_id, and check against inserted_tries
reprocessable_first_resource_messages.append(msg)
else:
if task_try_id in deferred_resource_messages:
logger.error("Task {} already has a deferred resource message. Discarding previous message.".format(msg['task_id']))
deferred_resource_messages[task_try_id] = msg
else:
# Insert to resource table if not first message
insert_resource_messages.append(msg)
if insert_resource_messages:
self._insert(table=RESOURCE, messages=insert_resource_messages)
if reprocessable_first_resource_messages:
self._insert(table=STATUS, messages=reprocessable_first_resource_messages)
self._update(table=TRY,
columns=['task_try_time_running',
'run_id', 'task_id', 'try_id',
'block_id', 'hostname'],
messages=reprocessable_first_resource_messages)
except Exception:
logger.exception("Exception in db loop: this might have been a malformed message, or some other error. monitoring data may have been lost")
exception_happened = True
if exception_happened:
raise RuntimeError("An exception happened sometime during database processing and should have been logged in database_manager.log")
|
[
"def",
"start",
"(",
"self",
",",
"priority_queue",
":",
"\"queue.Queue[Tuple[MessageType, Dict[str, Any]]]\"",
",",
"node_queue",
":",
"\"queue.Queue[Dict[str, Any]]\"",
",",
"block_queue",
":",
"\"queue.Queue[Dict[str, Any]]\"",
",",
"resource_queue",
":",
"\"queue.Queue[Dict[str, Any]]\"",
")",
"->",
"None",
":",
"self",
".",
"_kill_event",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"_priority_queue_pull_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_migrate_logs_to_internal",
",",
"args",
"=",
"(",
"priority_queue",
",",
"'priority'",
",",
"self",
".",
"_kill_event",
",",
")",
",",
"name",
"=",
"\"Monitoring-migrate-priority\"",
",",
"daemon",
"=",
"True",
",",
")",
"self",
".",
"_priority_queue_pull_thread",
".",
"start",
"(",
")",
"self",
".",
"_node_queue_pull_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_migrate_logs_to_internal",
",",
"args",
"=",
"(",
"node_queue",
",",
"'node'",
",",
"self",
".",
"_kill_event",
",",
")",
",",
"name",
"=",
"\"Monitoring-migrate-node\"",
",",
"daemon",
"=",
"True",
",",
")",
"self",
".",
"_node_queue_pull_thread",
".",
"start",
"(",
")",
"self",
".",
"_block_queue_pull_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_migrate_logs_to_internal",
",",
"args",
"=",
"(",
"block_queue",
",",
"'block'",
",",
"self",
".",
"_kill_event",
",",
")",
",",
"name",
"=",
"\"Monitoring-migrate-block\"",
",",
"daemon",
"=",
"True",
",",
")",
"self",
".",
"_block_queue_pull_thread",
".",
"start",
"(",
")",
"self",
".",
"_resource_queue_pull_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_migrate_logs_to_internal",
",",
"args",
"=",
"(",
"resource_queue",
",",
"'resource'",
",",
"self",
".",
"_kill_event",
",",
")",
",",
"name",
"=",
"\"Monitoring-migrate-resource\"",
",",
"daemon",
"=",
"True",
",",
")",
"self",
".",
"_resource_queue_pull_thread",
".",
"start",
"(",
")",
"inserted_tasks",
"=",
"set",
"(",
")",
"# type: Set[object]",
"\"\"\"\n like inserted_tasks but for task,try tuples\n \"\"\"",
"inserted_tries",
"=",
"set",
"(",
")",
"# type: Set[Any]",
"# for any task ID, we can defer exactly one message, which is the",
"# assumed-to-be-unique first message (with first message flag set).",
"# The code prior to this patch will discard previous message in",
"# the case of multiple messages to defer.",
"deferred_resource_messages",
"=",
"{",
"}",
"# type: Dict[str, Any]",
"exception_happened",
"=",
"False",
"while",
"(",
"not",
"self",
".",
"_kill_event",
".",
"is_set",
"(",
")",
"or",
"self",
".",
"pending_priority_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
"or",
"self",
".",
"pending_resource_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
"or",
"self",
".",
"pending_node_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
"or",
"self",
".",
"pending_block_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
"or",
"priority_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
"or",
"resource_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
"or",
"node_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
"or",
"block_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
")",
":",
"\"\"\"\n WORKFLOW_INFO and TASK_INFO messages (i.e. priority messages)\n\n \"\"\"",
"try",
":",
"logger",
".",
"debug",
"(",
"\"\"\"Checking STOP conditions: {}, {}, {}, {}, {}, {}, {}, {}, {}\"\"\"",
".",
"format",
"(",
"self",
".",
"_kill_event",
".",
"is_set",
"(",
")",
",",
"self",
".",
"pending_priority_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
",",
"self",
".",
"pending_resource_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
",",
"self",
".",
"pending_node_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
",",
"self",
".",
"pending_block_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
",",
"priority_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
",",
"resource_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
",",
"node_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
",",
"block_queue",
".",
"qsize",
"(",
")",
"!=",
"0",
")",
")",
"# This is the list of resource messages which can be reprocessed as if they",
"# had just arrived because the corresponding first task message has been",
"# processed (corresponding by task id)",
"reprocessable_first_resource_messages",
"=",
"[",
"]",
"# Get a batch of priority messages",
"priority_messages",
"=",
"self",
".",
"_get_messages_in_batch",
"(",
"self",
".",
"pending_priority_queue",
")",
"if",
"priority_messages",
":",
"logger",
".",
"debug",
"(",
"\"Got {} messages from priority queue\"",
".",
"format",
"(",
"len",
"(",
"priority_messages",
")",
")",
")",
"task_info_update_messages",
",",
"task_info_insert_messages",
",",
"task_info_all_messages",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"try_update_messages",
",",
"try_insert_messages",
",",
"try_all_messages",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"msg_type",
",",
"msg",
"in",
"priority_messages",
":",
"if",
"msg_type",
"==",
"MessageType",
".",
"WORKFLOW_INFO",
":",
"if",
"\"python_version\"",
"in",
"msg",
":",
"# workflow start message",
"logger",
".",
"debug",
"(",
"\"Inserting workflow start info to WORKFLOW table\"",
")",
"self",
".",
"_insert",
"(",
"table",
"=",
"WORKFLOW",
",",
"messages",
"=",
"[",
"msg",
"]",
")",
"self",
".",
"workflow_start_message",
"=",
"msg",
"else",
":",
"# workflow end message",
"logger",
".",
"debug",
"(",
"\"Updating workflow end info to WORKFLOW table\"",
")",
"self",
".",
"_update",
"(",
"table",
"=",
"WORKFLOW",
",",
"columns",
"=",
"[",
"'run_id'",
",",
"'tasks_failed_count'",
",",
"'tasks_completed_count'",
",",
"'time_completed'",
"]",
",",
"messages",
"=",
"[",
"msg",
"]",
")",
"self",
".",
"workflow_end",
"=",
"True",
"elif",
"msg_type",
"==",
"MessageType",
".",
"TASK_INFO",
":",
"task_try_id",
"=",
"str",
"(",
"msg",
"[",
"'task_id'",
"]",
")",
"+",
"\".\"",
"+",
"str",
"(",
"msg",
"[",
"'try_id'",
"]",
")",
"task_info_all_messages",
".",
"append",
"(",
"msg",
")",
"if",
"msg",
"[",
"'task_id'",
"]",
"in",
"inserted_tasks",
":",
"task_info_update_messages",
".",
"append",
"(",
"msg",
")",
"else",
":",
"inserted_tasks",
".",
"add",
"(",
"msg",
"[",
"'task_id'",
"]",
")",
"task_info_insert_messages",
".",
"append",
"(",
"msg",
")",
"try_all_messages",
".",
"append",
"(",
"msg",
")",
"if",
"task_try_id",
"in",
"inserted_tries",
":",
"try_update_messages",
".",
"append",
"(",
"msg",
")",
"else",
":",
"inserted_tries",
".",
"add",
"(",
"task_try_id",
")",
"try_insert_messages",
".",
"append",
"(",
"msg",
")",
"# check if there is a left_message for this task",
"if",
"task_try_id",
"in",
"deferred_resource_messages",
":",
"reprocessable_first_resource_messages",
".",
"append",
"(",
"deferred_resource_messages",
".",
"pop",
"(",
"task_try_id",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unexpected message type {} received on priority queue\"",
".",
"format",
"(",
"msg_type",
")",
")",
"logger",
".",
"debug",
"(",
"\"Updating and inserting TASK_INFO to all tables\"",
")",
"logger",
".",
"debug",
"(",
"\"Updating {} TASK_INFO into workflow table\"",
".",
"format",
"(",
"len",
"(",
"task_info_update_messages",
")",
")",
")",
"self",
".",
"_update",
"(",
"table",
"=",
"WORKFLOW",
",",
"columns",
"=",
"[",
"'run_id'",
",",
"'tasks_failed_count'",
",",
"'tasks_completed_count'",
"]",
",",
"messages",
"=",
"task_info_all_messages",
")",
"if",
"task_info_insert_messages",
":",
"self",
".",
"_insert",
"(",
"table",
"=",
"TASK",
",",
"messages",
"=",
"task_info_insert_messages",
")",
"logger",
".",
"debug",
"(",
"\"There are {} inserted task records\"",
".",
"format",
"(",
"len",
"(",
"inserted_tasks",
")",
")",
")",
"if",
"task_info_update_messages",
":",
"logger",
".",
"debug",
"(",
"\"Updating {} TASK_INFO into task table\"",
".",
"format",
"(",
"len",
"(",
"task_info_update_messages",
")",
")",
")",
"self",
".",
"_update",
"(",
"table",
"=",
"TASK",
",",
"columns",
"=",
"[",
"'task_time_invoked'",
",",
"'task_time_returned'",
",",
"'run_id'",
",",
"'task_id'",
",",
"'task_fail_count'",
",",
"'task_fail_cost'",
",",
"'task_hashsum'",
"]",
",",
"messages",
"=",
"task_info_update_messages",
")",
"logger",
".",
"debug",
"(",
"\"Inserting {} task_info_all_messages into status table\"",
".",
"format",
"(",
"len",
"(",
"task_info_all_messages",
")",
")",
")",
"self",
".",
"_insert",
"(",
"table",
"=",
"STATUS",
",",
"messages",
"=",
"task_info_all_messages",
")",
"if",
"try_insert_messages",
":",
"logger",
".",
"debug",
"(",
"\"Inserting {} TASK_INFO to try table\"",
".",
"format",
"(",
"len",
"(",
"try_insert_messages",
")",
")",
")",
"self",
".",
"_insert",
"(",
"table",
"=",
"TRY",
",",
"messages",
"=",
"try_insert_messages",
")",
"logger",
".",
"debug",
"(",
"\"There are {} inserted task records\"",
".",
"format",
"(",
"len",
"(",
"inserted_tasks",
")",
")",
")",
"if",
"try_update_messages",
":",
"logger",
".",
"debug",
"(",
"\"Updating {} TASK_INFO into try table\"",
".",
"format",
"(",
"len",
"(",
"try_update_messages",
")",
")",
")",
"self",
".",
"_update",
"(",
"table",
"=",
"TRY",
",",
"columns",
"=",
"[",
"'run_id'",
",",
"'task_id'",
",",
"'try_id'",
",",
"'task_fail_history'",
",",
"'task_try_time_launched'",
",",
"'task_try_time_returned'",
",",
"'task_joins'",
"]",
",",
"messages",
"=",
"try_update_messages",
")",
"\"\"\"\n NODE_INFO messages\n\n \"\"\"",
"node_info_messages",
"=",
"self",
".",
"_get_messages_in_batch",
"(",
"self",
".",
"pending_node_queue",
")",
"if",
"node_info_messages",
":",
"logger",
".",
"debug",
"(",
"\"Got {} messages from node queue\"",
".",
"format",
"(",
"len",
"(",
"node_info_messages",
")",
")",
")",
"self",
".",
"_insert",
"(",
"table",
"=",
"NODE",
",",
"messages",
"=",
"node_info_messages",
")",
"\"\"\"\n BLOCK_INFO messages\n\n \"\"\"",
"block_info_messages",
"=",
"self",
".",
"_get_messages_in_batch",
"(",
"self",
".",
"pending_block_queue",
")",
"if",
"block_info_messages",
":",
"logger",
".",
"debug",
"(",
"\"Got {} messages from block queue\"",
".",
"format",
"(",
"len",
"(",
"block_info_messages",
")",
")",
")",
"# block_info_messages is possibly a nested list of dict (at different polling times)",
"# Each dict refers to the info of a job/block at one polling time",
"block_messages_to_insert",
"=",
"[",
"]",
"# type: List[Any]",
"for",
"block_msg",
"in",
"block_info_messages",
":",
"block_messages_to_insert",
".",
"extend",
"(",
"block_msg",
")",
"self",
".",
"_insert",
"(",
"table",
"=",
"BLOCK",
",",
"messages",
"=",
"block_messages_to_insert",
")",
"\"\"\"\n Resource info messages\n\n \"\"\"",
"resource_messages",
"=",
"self",
".",
"_get_messages_in_batch",
"(",
"self",
".",
"pending_resource_queue",
")",
"if",
"resource_messages",
":",
"logger",
".",
"debug",
"(",
"\"Got {} messages from resource queue, {} reprocessable\"",
".",
"format",
"(",
"len",
"(",
"resource_messages",
")",
",",
"len",
"(",
"reprocessable_first_resource_messages",
")",
")",
")",
"insert_resource_messages",
"=",
"[",
"]",
"for",
"msg",
"in",
"resource_messages",
":",
"task_try_id",
"=",
"str",
"(",
"msg",
"[",
"'task_id'",
"]",
")",
"+",
"\".\"",
"+",
"str",
"(",
"msg",
"[",
"'try_id'",
"]",
")",
"if",
"msg",
"[",
"'first_msg'",
"]",
":",
"# Update the running time to try table if first message",
"msg",
"[",
"'task_status_name'",
"]",
"=",
"States",
".",
"running",
".",
"name",
"msg",
"[",
"'task_try_time_running'",
"]",
"=",
"msg",
"[",
"'timestamp'",
"]",
"if",
"task_try_id",
"in",
"inserted_tries",
":",
"# TODO: needs to become task_id and try_id, and check against inserted_tries",
"reprocessable_first_resource_messages",
".",
"append",
"(",
"msg",
")",
"else",
":",
"if",
"task_try_id",
"in",
"deferred_resource_messages",
":",
"logger",
".",
"error",
"(",
"\"Task {} already has a deferred resource message. Discarding previous message.\"",
".",
"format",
"(",
"msg",
"[",
"'task_id'",
"]",
")",
")",
"deferred_resource_messages",
"[",
"task_try_id",
"]",
"=",
"msg",
"else",
":",
"# Insert to resource table if not first message",
"insert_resource_messages",
".",
"append",
"(",
"msg",
")",
"if",
"insert_resource_messages",
":",
"self",
".",
"_insert",
"(",
"table",
"=",
"RESOURCE",
",",
"messages",
"=",
"insert_resource_messages",
")",
"if",
"reprocessable_first_resource_messages",
":",
"self",
".",
"_insert",
"(",
"table",
"=",
"STATUS",
",",
"messages",
"=",
"reprocessable_first_resource_messages",
")",
"self",
".",
"_update",
"(",
"table",
"=",
"TRY",
",",
"columns",
"=",
"[",
"'task_try_time_running'",
",",
"'run_id'",
",",
"'task_id'",
",",
"'try_id'",
",",
"'block_id'",
",",
"'hostname'",
"]",
",",
"messages",
"=",
"reprocessable_first_resource_messages",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"\"Exception in db loop: this might have been a malformed message, or some other error. monitoring data may have been lost\"",
")",
"exception_happened",
"=",
"True",
"if",
"exception_happened",
":",
"raise",
"RuntimeError",
"(",
"\"An exception happened sometime during database processing and should have been logged in database_manager.log\"",
")"
] |
https://github.com/Parsl/parsl/blob/af2535341152b2640fdd1a3b73b891992bf1b3ea/parsl/monitoring/db_manager.py#L281-L521
|
||
flyyufelix/DenseNet-Keras
|
8c42d8092b2616a9fbf025c756b14c67be708685
|
densenet169.py
|
python
|
conv_block
|
(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4)
|
return x
|
Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
|
Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
|
[
"Apply",
"BatchNorm",
"Relu",
"bottleneck",
"1x1",
"Conv2D",
"3x3",
"Conv2D",
"and",
"option",
"dropout",
"#",
"Arguments",
"x",
":",
"input",
"tensor",
"stage",
":",
"index",
"for",
"dense",
"block",
"branch",
":",
"layer",
"index",
"within",
"each",
"dense",
"block",
"nb_filter",
":",
"number",
"of",
"filters",
"dropout_rate",
":",
"dropout",
"rate",
"weight_decay",
":",
"weight",
"decay",
"factor"
] |
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_' + str(branch)
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
|
[
"def",
"conv_block",
"(",
"x",
",",
"stage",
",",
"branch",
",",
"nb_filter",
",",
"dropout_rate",
"=",
"None",
",",
"weight_decay",
"=",
"1e-4",
")",
":",
"eps",
"=",
"1.1e-5",
"conv_name_base",
"=",
"'conv'",
"+",
"str",
"(",
"stage",
")",
"+",
"'_'",
"+",
"str",
"(",
"branch",
")",
"relu_name_base",
"=",
"'relu'",
"+",
"str",
"(",
"stage",
")",
"+",
"'_'",
"+",
"str",
"(",
"branch",
")",
"# 1x1 Convolution (Bottleneck layer)",
"inter_channel",
"=",
"nb_filter",
"*",
"4",
"x",
"=",
"BatchNormalization",
"(",
"epsilon",
"=",
"eps",
",",
"axis",
"=",
"concat_axis",
",",
"name",
"=",
"conv_name_base",
"+",
"'_x1_bn'",
")",
"(",
"x",
")",
"x",
"=",
"Scale",
"(",
"axis",
"=",
"concat_axis",
",",
"name",
"=",
"conv_name_base",
"+",
"'_x1_scale'",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"'relu'",
",",
"name",
"=",
"relu_name_base",
"+",
"'_x1'",
")",
"(",
"x",
")",
"x",
"=",
"Convolution2D",
"(",
"inter_channel",
",",
"1",
",",
"1",
",",
"name",
"=",
"conv_name_base",
"+",
"'_x1'",
",",
"bias",
"=",
"False",
")",
"(",
"x",
")",
"if",
"dropout_rate",
":",
"x",
"=",
"Dropout",
"(",
"dropout_rate",
")",
"(",
"x",
")",
"# 3x3 Convolution",
"x",
"=",
"BatchNormalization",
"(",
"epsilon",
"=",
"eps",
",",
"axis",
"=",
"concat_axis",
",",
"name",
"=",
"conv_name_base",
"+",
"'_x2_bn'",
")",
"(",
"x",
")",
"x",
"=",
"Scale",
"(",
"axis",
"=",
"concat_axis",
",",
"name",
"=",
"conv_name_base",
"+",
"'_x2_scale'",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"'relu'",
",",
"name",
"=",
"relu_name_base",
"+",
"'_x2'",
")",
"(",
"x",
")",
"x",
"=",
"ZeroPadding2D",
"(",
"(",
"1",
",",
"1",
")",
",",
"name",
"=",
"conv_name_base",
"+",
"'_x2_zeropadding'",
")",
"(",
"x",
")",
"x",
"=",
"Convolution2D",
"(",
"nb_filter",
",",
"3",
",",
"3",
",",
"name",
"=",
"conv_name_base",
"+",
"'_x2'",
",",
"bias",
"=",
"False",
")",
"(",
"x",
")",
"if",
"dropout_rate",
":",
"x",
"=",
"Dropout",
"(",
"dropout_rate",
")",
"(",
"x",
")",
"return",
"x"
] |
https://github.com/flyyufelix/DenseNet-Keras/blob/8c42d8092b2616a9fbf025c756b14c67be708685/densenet169.py#L80-L114
|
|
eirannejad/pyRevit
|
49c0b7eb54eb343458ce1365425e6552d0c47d44
|
site-packages/sqlalchemy/sql/type_api.py
|
python
|
TypeDecorator._has_bind_processor
|
(self)
|
return self.__class__.process_bind_param.__code__ \
is not TypeDecorator.process_bind_param.__code__
|
memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
|
memoized boolean, check if process_bind_param is implemented.
|
[
"memoized",
"boolean",
"check",
"if",
"process_bind_param",
"is",
"implemented",
"."
] |
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_bind_param.__code__ \
is not TypeDecorator.process_bind_param.__code__
|
[
"def",
"_has_bind_processor",
"(",
"self",
")",
":",
"return",
"self",
".",
"__class__",
".",
"process_bind_param",
".",
"__code__",
"is",
"not",
"TypeDecorator",
".",
"process_bind_param",
".",
"__code__"
] |
https://github.com/eirannejad/pyRevit/blob/49c0b7eb54eb343458ce1365425e6552d0c47d44/site-packages/sqlalchemy/sql/type_api.py#L983-L993
|
|
cloudbase/cloudbase-init
|
300dabb36e91502f4ae93640136ad7a804aebd88
|
cloudbaseinit/metadata/services/packet.py
|
python
|
PacketService.get_user_data
|
(self)
|
return self._get_cache_data("userdata")
|
Get the available user data for the current instance.
|
Get the available user data for the current instance.
|
[
"Get",
"the",
"available",
"user",
"data",
"for",
"the",
"current",
"instance",
"."
] |
def get_user_data(self):
"""Get the available user data for the current instance."""
return self._get_cache_data("userdata")
|
[
"def",
"get_user_data",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get_cache_data",
"(",
"\"userdata\"",
")"
] |
https://github.com/cloudbase/cloudbase-init/blob/300dabb36e91502f4ae93640136ad7a804aebd88/cloudbaseinit/metadata/services/packet.py#L86-L88
|
|
hyperledger/aries-cloudagent-python
|
2f36776e99f6053ae92eed8123b5b1b2e891c02a
|
aries_cloudagent/protocols/issue_credential/v2_0/routes.py
|
python
|
credential_exchange_create_free_offer
|
(request: web.BaseRequest)
|
return web.json_response(result)
|
Request handler for creating free credential offer.
Unlike with `send-offer`, this credential exchange is not tied to a specific
connection. It must be dispatched out-of-band by the controller.
Args:
request: aiohttp request object
Returns:
The credential exchange record
|
Request handler for creating free credential offer.
|
[
"Request",
"handler",
"for",
"creating",
"free",
"credential",
"offer",
"."
] |
async def credential_exchange_create_free_offer(request: web.BaseRequest):
"""
Request handler for creating free credential offer.
Unlike with `send-offer`, this credential exchange is not tied to a specific
connection. It must be dispatched out-of-band by the controller.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
r_time = get_timer()
context: AdminRequestContext = request["context"]
profile = context.profile
body = await request.json()
auto_issue = body.get(
"auto_issue", context.settings.get("debug.auto_respond_credential_request")
)
auto_remove = body.get("auto_remove")
comment = body.get("comment")
preview_spec = body.get("credential_preview")
filt_spec = body.get("filter")
if not filt_spec:
raise web.HTTPBadRequest(reason="Missing filter")
trace_msg = body.get("trace")
cred_ex_record = None
try:
(cred_ex_record, cred_offer_message) = await _create_free_offer(
profile=profile,
filt_spec=filt_spec,
auto_issue=auto_issue,
auto_remove=auto_remove,
preview_spec=preview_spec,
comment=comment,
trace_msg=trace_msg,
)
result = cred_ex_record.serialize()
except (
BaseModelError,
LedgerError,
V20CredFormatError,
V20CredManagerError,
) as err:
if cred_ex_record:
async with profile.session() as session:
await cred_ex_record.save_error_state(session, reason=err.roll_up)
raise web.HTTPBadRequest(reason=err.roll_up)
trace_event(
context.settings,
cred_offer_message,
outcome="credential_exchange_create_free_offer.END",
perf_counter=r_time,
)
return web.json_response(result)
|
[
"async",
"def",
"credential_exchange_create_free_offer",
"(",
"request",
":",
"web",
".",
"BaseRequest",
")",
":",
"r_time",
"=",
"get_timer",
"(",
")",
"context",
":",
"AdminRequestContext",
"=",
"request",
"[",
"\"context\"",
"]",
"profile",
"=",
"context",
".",
"profile",
"body",
"=",
"await",
"request",
".",
"json",
"(",
")",
"auto_issue",
"=",
"body",
".",
"get",
"(",
"\"auto_issue\"",
",",
"context",
".",
"settings",
".",
"get",
"(",
"\"debug.auto_respond_credential_request\"",
")",
")",
"auto_remove",
"=",
"body",
".",
"get",
"(",
"\"auto_remove\"",
")",
"comment",
"=",
"body",
".",
"get",
"(",
"\"comment\"",
")",
"preview_spec",
"=",
"body",
".",
"get",
"(",
"\"credential_preview\"",
")",
"filt_spec",
"=",
"body",
".",
"get",
"(",
"\"filter\"",
")",
"if",
"not",
"filt_spec",
":",
"raise",
"web",
".",
"HTTPBadRequest",
"(",
"reason",
"=",
"\"Missing filter\"",
")",
"trace_msg",
"=",
"body",
".",
"get",
"(",
"\"trace\"",
")",
"cred_ex_record",
"=",
"None",
"try",
":",
"(",
"cred_ex_record",
",",
"cred_offer_message",
")",
"=",
"await",
"_create_free_offer",
"(",
"profile",
"=",
"profile",
",",
"filt_spec",
"=",
"filt_spec",
",",
"auto_issue",
"=",
"auto_issue",
",",
"auto_remove",
"=",
"auto_remove",
",",
"preview_spec",
"=",
"preview_spec",
",",
"comment",
"=",
"comment",
",",
"trace_msg",
"=",
"trace_msg",
",",
")",
"result",
"=",
"cred_ex_record",
".",
"serialize",
"(",
")",
"except",
"(",
"BaseModelError",
",",
"LedgerError",
",",
"V20CredFormatError",
",",
"V20CredManagerError",
",",
")",
"as",
"err",
":",
"if",
"cred_ex_record",
":",
"async",
"with",
"profile",
".",
"session",
"(",
")",
"as",
"session",
":",
"await",
"cred_ex_record",
".",
"save_error_state",
"(",
"session",
",",
"reason",
"=",
"err",
".",
"roll_up",
")",
"raise",
"web",
".",
"HTTPBadRequest",
"(",
"reason",
"=",
"err",
".",
"roll_up",
")",
"trace_event",
"(",
"context",
".",
"settings",
",",
"cred_offer_message",
",",
"outcome",
"=",
"\"credential_exchange_create_free_offer.END\"",
",",
"perf_counter",
"=",
"r_time",
",",
")",
"return",
"web",
".",
"json_response",
"(",
"result",
")"
] |
https://github.com/hyperledger/aries-cloudagent-python/blob/2f36776e99f6053ae92eed8123b5b1b2e891c02a/aries_cloudagent/protocols/issue_credential/v2_0/routes.py#L820-L879
|
|
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/pandas/io/sql.py
|
python
|
read_sql
|
(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None)
|
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
|
Read SQL query or database table into a DataFrame.
|
[
"Read",
"SQL",
"query",
"or",
"database",
"table",
"into",
"a",
"DataFrame",
"."
] |
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
|
[
"def",
"read_sql",
"(",
"sql",
",",
"con",
",",
"index_col",
"=",
"None",
",",
"coerce_float",
"=",
"True",
",",
"params",
"=",
"None",
",",
"parse_dates",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"chunksize",
"=",
"None",
")",
":",
"pandas_sql",
"=",
"pandasSQL_builder",
"(",
"con",
")",
"if",
"isinstance",
"(",
"pandas_sql",
",",
"SQLiteDatabase",
")",
":",
"return",
"pandas_sql",
".",
"read_query",
"(",
"sql",
",",
"index_col",
"=",
"index_col",
",",
"params",
"=",
"params",
",",
"coerce_float",
"=",
"coerce_float",
",",
"parse_dates",
"=",
"parse_dates",
",",
"chunksize",
"=",
"chunksize",
")",
"try",
":",
"_is_table_name",
"=",
"pandas_sql",
".",
"has_table",
"(",
"sql",
")",
"except",
":",
"_is_table_name",
"=",
"False",
"if",
"_is_table_name",
":",
"pandas_sql",
".",
"meta",
".",
"reflect",
"(",
"only",
"=",
"[",
"sql",
"]",
")",
"return",
"pandas_sql",
".",
"read_table",
"(",
"sql",
",",
"index_col",
"=",
"index_col",
",",
"coerce_float",
"=",
"coerce_float",
",",
"parse_dates",
"=",
"parse_dates",
",",
"columns",
"=",
"columns",
",",
"chunksize",
"=",
"chunksize",
")",
"else",
":",
"return",
"pandas_sql",
".",
"read_query",
"(",
"sql",
",",
"index_col",
"=",
"index_col",
",",
"params",
"=",
"params",
",",
"coerce_float",
"=",
"coerce_float",
",",
"parse_dates",
"=",
"parse_dates",
",",
"chunksize",
"=",
"chunksize",
")"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/pandas/io/sql.py#L335-L416
|
||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/combinat/posets/poset_examples.py
|
python
|
Posets.CoxeterGroupAbsoluteOrderPoset
|
(W, use_reduced_words=True)
|
return Poset({s: s.absolute_covers() for s in W})
|
r"""
Return the poset of elements of a Coxeter group with respect
to absolute order.
INPUT:
- ``W`` -- a Coxeter group
- ``use_reduced_words`` -- boolean (default: ``True``); if
``True``, then the elements are labeled by their lexicographically
minimal reduced word
EXAMPLES::
sage: W = CoxeterGroup(['B', 3])
sage: posets.CoxeterGroupAbsoluteOrderPoset(W)
Finite poset containing 48 elements
sage: W = WeylGroup(['B', 2], prefix='s')
sage: posets.CoxeterGroupAbsoluteOrderPoset(W, False)
Finite poset containing 8 elements
|
r"""
Return the poset of elements of a Coxeter group with respect
to absolute order.
|
[
"r",
"Return",
"the",
"poset",
"of",
"elements",
"of",
"a",
"Coxeter",
"group",
"with",
"respect",
"to",
"absolute",
"order",
"."
] |
def CoxeterGroupAbsoluteOrderPoset(W, use_reduced_words=True):
r"""
Return the poset of elements of a Coxeter group with respect
to absolute order.
INPUT:
- ``W`` -- a Coxeter group
- ``use_reduced_words`` -- boolean (default: ``True``); if
``True``, then the elements are labeled by their lexicographically
minimal reduced word
EXAMPLES::
sage: W = CoxeterGroup(['B', 3])
sage: posets.CoxeterGroupAbsoluteOrderPoset(W)
Finite poset containing 48 elements
sage: W = WeylGroup(['B', 2], prefix='s')
sage: posets.CoxeterGroupAbsoluteOrderPoset(W, False)
Finite poset containing 8 elements
"""
if use_reduced_words:
element_labels = {s: tuple(s.reduced_word()) for s in W}
return Poset({s: s.absolute_covers() for s in W}, element_labels)
return Poset({s: s.absolute_covers() for s in W})
|
[
"def",
"CoxeterGroupAbsoluteOrderPoset",
"(",
"W",
",",
"use_reduced_words",
"=",
"True",
")",
":",
"if",
"use_reduced_words",
":",
"element_labels",
"=",
"{",
"s",
":",
"tuple",
"(",
"s",
".",
"reduced_word",
"(",
")",
")",
"for",
"s",
"in",
"W",
"}",
"return",
"Poset",
"(",
"{",
"s",
":",
"s",
".",
"absolute_covers",
"(",
")",
"for",
"s",
"in",
"W",
"}",
",",
"element_labels",
")",
"return",
"Poset",
"(",
"{",
"s",
":",
"s",
".",
"absolute_covers",
"(",
")",
"for",
"s",
"in",
"W",
"}",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/posets/poset_examples.py#L1285-L1310
|
|
iagcl/watchmen
|
d329b357e6fde3ad91e972988b160a33c12afc2a
|
elasticsearch/roll_indexes/packages/elasticsearch/client/cluster.py
|
python
|
ClusterClient.reroute
|
(self, body=None, params=None)
|
return self.transport.perform_request('POST', '/_cluster/reroute',
params=params, body=body)
|
Explicitly execute a cluster reroute allocation command including specific commands.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_
:arg body: The definition of `commands` to perform (`move`, `cancel`,
`allocate`)
:arg dry_run: Simulate the operation only and return the resulting state
:arg explain: Return an explanation of why the commands can or cannot be
executed
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg metric: Limit the information returned to the specified metrics.
Defaults to all but metadata, valid choices are: '_all', 'blocks',
'metadata', 'nodes', 'routing_table', 'master_node', 'version'
:arg retry_failed: Retries allocation of shards that are blocked due to
too many subsequent allocation failures
:arg timeout: Explicit operation timeout
|
Explicitly execute a cluster reroute allocation command including specific commands.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_
|
[
"Explicitly",
"execute",
"a",
"cluster",
"reroute",
"allocation",
"command",
"including",
"specific",
"commands",
".",
"<http",
":",
"//",
"www",
".",
"elastic",
".",
"co",
"/",
"guide",
"/",
"en",
"/",
"elasticsearch",
"/",
"reference",
"/",
"current",
"/",
"cluster",
"-",
"reroute",
".",
"html",
">",
"_"
] |
def reroute(self, body=None, params=None):
"""
Explicitly execute a cluster reroute allocation command including specific commands.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_
:arg body: The definition of `commands` to perform (`move`, `cancel`,
`allocate`)
:arg dry_run: Simulate the operation only and return the resulting state
:arg explain: Return an explanation of why the commands can or cannot be
executed
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg metric: Limit the information returned to the specified metrics.
Defaults to all but metadata, valid choices are: '_all', 'blocks',
'metadata', 'nodes', 'routing_table', 'master_node', 'version'
:arg retry_failed: Retries allocation of shards that are blocked due to
too many subsequent allocation failures
:arg timeout: Explicit operation timeout
"""
return self.transport.perform_request('POST', '/_cluster/reroute',
params=params, body=body)
|
[
"def",
"reroute",
"(",
"self",
",",
"body",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"'POST'",
",",
"'/_cluster/reroute'",
",",
"params",
"=",
"params",
",",
"body",
"=",
"body",
")"
] |
https://github.com/iagcl/watchmen/blob/d329b357e6fde3ad91e972988b160a33c12afc2a/elasticsearch/roll_indexes/packages/elasticsearch/client/cluster.py#L102-L122
|
|
vmware/vsphere-automation-sdk-python
|
ba7d4e0742f58a641dfed9538ecbbb1db4f3891e
|
samples/vsphere/logforwarding/log_forwarding.py
|
python
|
LogForwarding.__init__
|
(self)
|
[] |
def __init__(self):
self.loghost = None
self.protocol = None
self.port = None
self.stub_config = None
self.log_forwarding_client = None
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"loghost",
"=",
"None",
"self",
".",
"protocol",
"=",
"None",
"self",
".",
"port",
"=",
"None",
"self",
".",
"stub_config",
"=",
"None",
"self",
".",
"log_forwarding_client",
"=",
"None"
] |
https://github.com/vmware/vsphere-automation-sdk-python/blob/ba7d4e0742f58a641dfed9538ecbbb1db4f3891e/samples/vsphere/logforwarding/log_forwarding.py#L35-L40
|
||||
samgranger/EQGRP
|
fa0a1e1460767b8312839dc4be922f26ecdd250b
|
Firewall/EXPLOITS/EPBA/EPICBANANA/pexpect.py
|
python
|
spawn.__interact_writen
|
(self, fd, data)
|
This is used by the interact() method.
|
This is used by the interact() method.
|
[
"This",
"is",
"used",
"by",
"the",
"interact",
"()",
"method",
"."
] |
def __interact_writen(self, fd, data):
"""This is used by the interact() method.
"""
while data != '' and self.isalive():
n = os.write(fd, data)
data = data[n:]
|
[
"def",
"__interact_writen",
"(",
"self",
",",
"fd",
",",
"data",
")",
":",
"while",
"data",
"!=",
"''",
"and",
"self",
".",
"isalive",
"(",
")",
":",
"n",
"=",
"os",
".",
"write",
"(",
"fd",
",",
"data",
")",
"data",
"=",
"data",
"[",
"n",
":",
"]"
] |
https://github.com/samgranger/EQGRP/blob/fa0a1e1460767b8312839dc4be922f26ecdd250b/Firewall/EXPLOITS/EPBA/EPICBANANA/pexpect.py#L1496-L1503
|
||
sedthh/pyxelate
|
fbbcfbc2894c8bbf825b0667923dca45d617b523
|
pyxelate/pal.py
|
python
|
BasePalette.__len__
|
(self)
|
return len(self.value)
|
Number of colors in palette
|
Number of colors in palette
|
[
"Number",
"of",
"colors",
"in",
"palette"
] |
def __len__(self):
"""Number of colors in palette"""
return len(self.value)
|
[
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"value",
")"
] |
https://github.com/sedthh/pyxelate/blob/fbbcfbc2894c8bbf825b0667923dca45d617b523/pyxelate/pal.py#L7-L9
|
|
bendmorris/static-python
|
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
|
Lib/idlelib/AutoComplete.py
|
python
|
AutoComplete.autocomplete_event
|
(self, event)
|
Happens when the user wants to complete his word, and if necessary,
open a completion list after that (if there is more than one
completion)
|
Happens when the user wants to complete his word, and if necessary,
open a completion list after that (if there is more than one
completion)
|
[
"Happens",
"when",
"the",
"user",
"wants",
"to",
"complete",
"his",
"word",
"and",
"if",
"necessary",
"open",
"a",
"completion",
"list",
"after",
"that",
"(",
"if",
"there",
"is",
"more",
"than",
"one",
"completion",
")"
] |
def autocomplete_event(self, event):
"""Happens when the user wants to complete his word, and if necessary,
open a completion list after that (if there is more than one
completion)
"""
if hasattr(event, "mc_state") and event.mc_state:
# A modifier was pressed along with the tab, continue as usual.
return
if self.autocompletewindow and self.autocompletewindow.is_active():
self.autocompletewindow.complete()
return "break"
else:
opened = self.open_completions(False, True, True)
if opened:
return "break"
|
[
"def",
"autocomplete_event",
"(",
"self",
",",
"event",
")",
":",
"if",
"hasattr",
"(",
"event",
",",
"\"mc_state\"",
")",
"and",
"event",
".",
"mc_state",
":",
"# A modifier was pressed along with the tab, continue as usual.",
"return",
"if",
"self",
".",
"autocompletewindow",
"and",
"self",
".",
"autocompletewindow",
".",
"is_active",
"(",
")",
":",
"self",
".",
"autocompletewindow",
".",
"complete",
"(",
")",
"return",
"\"break\"",
"else",
":",
"opened",
"=",
"self",
".",
"open_completions",
"(",
"False",
",",
"True",
",",
"True",
")",
"if",
"opened",
":",
"return",
"\"break\""
] |
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/idlelib/AutoComplete.py#L78-L92
|
||
AIChallenger/AI_Challenger_2017
|
52014e0defbbdd85bf94ab05d308300d5764022f
|
Baselines/caption_baseline/build_tfrecord.py
|
python
|
_process_dataset
|
(name, images, vocab, num_shards)
|
Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
|
Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
|
[
"Processes",
"a",
"complete",
"data",
"set",
"and",
"saves",
"it",
"as",
"a",
"TFRecord",
".",
"Args",
":",
"name",
":",
"Unique",
"identifier",
"specifying",
"the",
"dataset",
".",
"images",
":",
"List",
"of",
"ImageMetadata",
".",
"vocab",
":",
"A",
"Vocabulary",
"object",
".",
"num_shards",
":",
"Integer",
"number",
"of",
"shards",
"for",
"the",
"output",
"files",
"."
] |
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
|
[
"def",
"_process_dataset",
"(",
"name",
",",
"images",
",",
"vocab",
",",
"num_shards",
")",
":",
"# Break up each image into a separate entity for each caption.",
"images",
"=",
"[",
"ImageMetadata",
"(",
"image",
".",
"id",
",",
"image",
".",
"filename",
",",
"[",
"caption",
"]",
")",
"for",
"image",
"in",
"images",
"for",
"caption",
"in",
"image",
".",
"captions",
"]",
"# Shuffle the ordering of images. Make the randomization repeatable.",
"random",
".",
"seed",
"(",
"12345",
")",
"random",
".",
"shuffle",
"(",
"images",
")",
"# Break the images into num_threads batches. Batch i is defined as",
"# images[ranges[i][0]:ranges[i][1]].",
"num_threads",
"=",
"min",
"(",
"num_shards",
",",
"FLAGS",
".",
"num_threads",
")",
"spacing",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"images",
")",
",",
"num_threads",
"+",
"1",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"ranges",
"=",
"[",
"]",
"threads",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"spacing",
")",
"-",
"1",
")",
":",
"ranges",
".",
"append",
"(",
"[",
"spacing",
"[",
"i",
"]",
",",
"spacing",
"[",
"i",
"+",
"1",
"]",
"]",
")",
"# Create a mechanism for monitoring when all threads are finished.",
"coord",
"=",
"tf",
".",
"train",
".",
"Coordinator",
"(",
")",
"# Create a utility for decoding JPEG images to run sanity checks.",
"decoder",
"=",
"ImageDecoder",
"(",
")",
"# Launch a thread for each batch.",
"print",
"(",
"\"Launching %d threads for spacings: %s\"",
"%",
"(",
"num_threads",
",",
"ranges",
")",
")",
"for",
"thread_index",
"in",
"range",
"(",
"len",
"(",
"ranges",
")",
")",
":",
"args",
"=",
"(",
"thread_index",
",",
"ranges",
",",
"name",
",",
"images",
",",
"decoder",
",",
"vocab",
",",
"num_shards",
")",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"_process_image_files",
",",
"args",
"=",
"args",
")",
"t",
".",
"start",
"(",
")",
"threads",
".",
"append",
"(",
"t",
")",
"# Wait for all the threads to terminate.",
"coord",
".",
"join",
"(",
"threads",
")",
"print",
"(",
"\"%s: Finished processing all %d image-caption pairs in data set '%s'.\"",
"%",
"(",
"datetime",
".",
"now",
"(",
")",
",",
"len",
"(",
"images",
")",
",",
"name",
")",
")"
] |
https://github.com/AIChallenger/AI_Challenger_2017/blob/52014e0defbbdd85bf94ab05d308300d5764022f/Baselines/caption_baseline/build_tfrecord.py#L228-L270
|
||
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
WebMirror/management/rss_parser_funcs/feed_parse_extractCayuruyuBlogspotCom.py
|
python
|
extractCayuruyuBlogspotCom
|
(item)
|
return False
|
Parser for 'cayuruyu.blogspot.com'
|
Parser for 'cayuruyu.blogspot.com'
|
[
"Parser",
"for",
"cayuruyu",
".",
"blogspot",
".",
"com"
] |
def extractCayuruyuBlogspotCom(item):
'''
Parser for 'cayuruyu.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"def",
"extractCayuruyuBlogspotCom",
"(",
"item",
")",
":",
"vol",
",",
"chp",
",",
"frag",
",",
"postfix",
"=",
"extractVolChapterFragmentPostfix",
"(",
"item",
"[",
"'title'",
"]",
")",
"if",
"not",
"(",
"chp",
"or",
"vol",
")",
"or",
"\"preview\"",
"in",
"item",
"[",
"'title'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"None",
"tagmap",
"=",
"[",
"(",
"'PRC'",
",",
"'PRC'",
",",
"'translated'",
")",
",",
"(",
"'Loiterous'",
",",
"'Loiterous'",
",",
"'oel'",
")",
",",
"]",
"for",
"tagname",
",",
"name",
",",
"tl_type",
"in",
"tagmap",
":",
"if",
"tagname",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"buildReleaseMessageWithType",
"(",
"item",
",",
"name",
",",
"vol",
",",
"chp",
",",
"frag",
"=",
"frag",
",",
"postfix",
"=",
"postfix",
",",
"tl_type",
"=",
"tl_type",
")",
"return",
"False"
] |
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractCayuruyuBlogspotCom.py#L2-L21
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py
|
python
|
general_k_edge_subgraphs
|
(G, k)
|
General algorithm to find all maximal k-edge-connected subgraphs in G.
Returns
-------
k_edge_subgraphs : a generator of nx.Graphs that are k-edge-subgraphs
Each k-edge-subgraph is a maximal set of nodes that defines a subgraph
of G that is k-edge-connected.
Notes
-----
Implementation of the basic algorithm from _[1]. The basic idea is to find
a global minimum cut of the graph. If the cut value is at least k, then the
graph is a k-edge-connected subgraph and can be added to the results.
Otherwise, the cut is used to split the graph in two and the procedure is
applied recursively. If the graph is just a single node, then it is also
added to the results. At the end, each result is either guaranteed to be
a single node or a subgraph of G that is k-edge-connected.
This implementation contains optimizations for reducing the number of calls
to max-flow, but there are other optimizations in _[1] that could be
implemented.
References
----------
.. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs
from a large graph. ACM International Conference on Extending Database
Technology 2012 480-–491.
https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf
Example
-------
>>> from networkx.utils import pairwise
>>> paths = [
... (11, 12, 13, 14, 11, 13, 14, 12), # a 4-clique
... (21, 22, 23, 24, 21, 23, 24, 22), # another 4-clique
... # connect the cliques with high degree but low connectivity
... (50, 13),
... (12, 50, 22),
... (13, 102, 23),
... (14, 101, 24),
... ]
>>> G = nx.Graph(it.chain(*[pairwise(path) for path in paths]))
>>> sorted(map(len, k_edge_subgraphs(G, k=3)))
[1, 1, 1, 4, 4]
|
General algorithm to find all maximal k-edge-connected subgraphs in G.
|
[
"General",
"algorithm",
"to",
"find",
"all",
"maximal",
"k",
"-",
"edge",
"-",
"connected",
"subgraphs",
"in",
"G",
"."
] |
def general_k_edge_subgraphs(G, k):
"""General algorithm to find all maximal k-edge-connected subgraphs in G.
Returns
-------
k_edge_subgraphs : a generator of nx.Graphs that are k-edge-subgraphs
Each k-edge-subgraph is a maximal set of nodes that defines a subgraph
of G that is k-edge-connected.
Notes
-----
Implementation of the basic algorithm from _[1]. The basic idea is to find
a global minimum cut of the graph. If the cut value is at least k, then the
graph is a k-edge-connected subgraph and can be added to the results.
Otherwise, the cut is used to split the graph in two and the procedure is
applied recursively. If the graph is just a single node, then it is also
added to the results. At the end, each result is either guaranteed to be
a single node or a subgraph of G that is k-edge-connected.
This implementation contains optimizations for reducing the number of calls
to max-flow, but there are other optimizations in _[1] that could be
implemented.
References
----------
.. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs
from a large graph. ACM International Conference on Extending Database
Technology 2012 480-–491.
https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf
Example
-------
>>> from networkx.utils import pairwise
>>> paths = [
... (11, 12, 13, 14, 11, 13, 14, 12), # a 4-clique
... (21, 22, 23, 24, 21, 23, 24, 22), # another 4-clique
... # connect the cliques with high degree but low connectivity
... (50, 13),
... (12, 50, 22),
... (13, 102, 23),
... (14, 101, 24),
... ]
>>> G = nx.Graph(it.chain(*[pairwise(path) for path in paths]))
>>> sorted(map(len, k_edge_subgraphs(G, k=3)))
[1, 1, 1, 4, 4]
"""
if k < 1:
raise ValueError('k cannot be less than 1')
# Node pruning optimization (incorporates early return)
# find_ccs is either connected_components/strongly_connected_components
find_ccs = partial(_high_degree_components, k=k)
# Quick return optimization
if G.number_of_nodes() < k:
for node in G.nodes():
yield G.subgraph([node]).copy()
return
# Intermediate results
R0 = {G.subgraph(cc).copy() for cc in find_ccs(G)}
# Subdivide CCs in the intermediate results until they are k-conn
while R0:
G1 = R0.pop()
if G1.number_of_nodes() == 1:
yield G1
else:
# Find a global minimum cut
cut_edges = nx.minimum_edge_cut(G1)
cut_value = len(cut_edges)
if cut_value < k:
# G1 is not k-edge-connected, so subdivide it
G1.remove_edges_from(cut_edges)
for cc in find_ccs(G1):
R0.add(G1.subgraph(cc).copy())
else:
# Otherwise we found a k-edge-connected subgraph
yield G1
|
[
"def",
"general_k_edge_subgraphs",
"(",
"G",
",",
"k",
")",
":",
"if",
"k",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'k cannot be less than 1'",
")",
"# Node pruning optimization (incorporates early return)",
"# find_ccs is either connected_components/strongly_connected_components",
"find_ccs",
"=",
"partial",
"(",
"_high_degree_components",
",",
"k",
"=",
"k",
")",
"# Quick return optimization",
"if",
"G",
".",
"number_of_nodes",
"(",
")",
"<",
"k",
":",
"for",
"node",
"in",
"G",
".",
"nodes",
"(",
")",
":",
"yield",
"G",
".",
"subgraph",
"(",
"[",
"node",
"]",
")",
".",
"copy",
"(",
")",
"return",
"# Intermediate results",
"R0",
"=",
"{",
"G",
".",
"subgraph",
"(",
"cc",
")",
".",
"copy",
"(",
")",
"for",
"cc",
"in",
"find_ccs",
"(",
"G",
")",
"}",
"# Subdivide CCs in the intermediate results until they are k-conn",
"while",
"R0",
":",
"G1",
"=",
"R0",
".",
"pop",
"(",
")",
"if",
"G1",
".",
"number_of_nodes",
"(",
")",
"==",
"1",
":",
"yield",
"G1",
"else",
":",
"# Find a global minimum cut",
"cut_edges",
"=",
"nx",
".",
"minimum_edge_cut",
"(",
"G1",
")",
"cut_value",
"=",
"len",
"(",
"cut_edges",
")",
"if",
"cut_value",
"<",
"k",
":",
"# G1 is not k-edge-connected, so subdivide it",
"G1",
".",
"remove_edges_from",
"(",
"cut_edges",
")",
"for",
"cc",
"in",
"find_ccs",
"(",
"G1",
")",
":",
"R0",
".",
"add",
"(",
"G1",
".",
"subgraph",
"(",
"cc",
")",
".",
"copy",
"(",
")",
")",
"else",
":",
"# Otherwise we found a k-edge-connected subgraph",
"yield",
"G1"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py#L519-L596
|
||
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/core/generic.py
|
python
|
NDFrame.to_clipboard
|
(self, excel=None, sep=None, **kwargs)
|
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
Parameters
----------
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows: none
- OS X: none
|
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
|
[
"Attempt",
"to",
"write",
"text",
"representation",
"of",
"object",
"to",
"the",
"system",
"clipboard",
"This",
"can",
"be",
"pasted",
"into",
"Excel",
"for",
"example",
"."
] |
def to_clipboard(self, excel=None, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
Parameters
----------
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows: none
- OS X: none
"""
from pandas.io import clipboard
clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs)
|
[
"def",
"to_clipboard",
"(",
"self",
",",
"excel",
"=",
"None",
",",
"sep",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"clipboard",
"clipboard",
".",
"to_clipboard",
"(",
"self",
",",
"excel",
"=",
"excel",
",",
"sep",
"=",
"sep",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/core/generic.py#L1016-L1039
|
||
aiidateam/aiida-core
|
c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2
|
aiida/cmdline/params/options/commands/computer.py
|
python
|
should_call_default_mpiprocs_per_machine
|
(ctx)
|
return job_resource_cls.accepts_default_mpiprocs_per_machine()
|
Return True if the scheduler can accept 'default_mpiprocs_per_machine',
False otherwise.
If there is a problem in determining the scheduler, return True to
avoid exceptions.
|
Return True if the scheduler can accept 'default_mpiprocs_per_machine',
False otherwise.
|
[
"Return",
"True",
"if",
"the",
"scheduler",
"can",
"accept",
"default_mpiprocs_per_machine",
"False",
"otherwise",
"."
] |
def should_call_default_mpiprocs_per_machine(ctx): # pylint: disable=invalid-name
"""
Return True if the scheduler can accept 'default_mpiprocs_per_machine',
False otherwise.
If there is a problem in determining the scheduler, return True to
avoid exceptions.
"""
from aiida.common.exceptions import ValidationError
scheduler_ep = ctx.params['scheduler']
if scheduler_ep is not None:
try:
scheduler_cls = scheduler_ep.load()
except ImportError:
raise ImportError(f"Unable to load the '{scheduler_ep.name}' scheduler")
else:
raise ValidationError(
'The should_call_... function should always be run (and prompted) AFTER asking for a scheduler'
)
job_resource_cls = scheduler_cls.job_resource_class
if job_resource_cls is None:
# Odd situation...
return False
return job_resource_cls.accepts_default_mpiprocs_per_machine()
|
[
"def",
"should_call_default_mpiprocs_per_machine",
"(",
"ctx",
")",
":",
"# pylint: disable=invalid-name",
"from",
"aiida",
".",
"common",
".",
"exceptions",
"import",
"ValidationError",
"scheduler_ep",
"=",
"ctx",
".",
"params",
"[",
"'scheduler'",
"]",
"if",
"scheduler_ep",
"is",
"not",
"None",
":",
"try",
":",
"scheduler_cls",
"=",
"scheduler_ep",
".",
"load",
"(",
")",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"f\"Unable to load the '{scheduler_ep.name}' scheduler\"",
")",
"else",
":",
"raise",
"ValidationError",
"(",
"'The should_call_... function should always be run (and prompted) AFTER asking for a scheduler'",
")",
"job_resource_cls",
"=",
"scheduler_cls",
".",
"job_resource_class",
"if",
"job_resource_cls",
"is",
"None",
":",
"# Odd situation...",
"return",
"False",
"return",
"job_resource_cls",
".",
"accepts_default_mpiprocs_per_machine",
"(",
")"
] |
https://github.com/aiidateam/aiida-core/blob/c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2/aiida/cmdline/params/options/commands/computer.py#L18-L44
|
|
chainer/chainercv
|
7159616642e0be7c5b3ef380b848e16b7e99355b
|
examples/instance_segmentation/eval_instance_segmentation.py
|
python
|
main
|
()
|
[] |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', choices=('sbd', 'coco'))
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--pretrained-model')
parser.add_argument('--batchsize', type=int)
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model, args.batchsize)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
iterator = iterators.MultithreadIterator(
dataset, batchsize, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
# delete unused iterators explicitly
del in_values
eval_(out_values, rest_values)
|
[
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--dataset'",
",",
"choices",
"=",
"(",
"'sbd'",
",",
"'coco'",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--model'",
",",
"choices",
"=",
"sorted",
"(",
"models",
".",
"keys",
"(",
")",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--pretrained-model'",
")",
"parser",
".",
"add_argument",
"(",
"'--batchsize'",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--gpu'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"dataset",
",",
"eval_",
",",
"model",
",",
"batchsize",
"=",
"setup",
"(",
"args",
".",
"dataset",
",",
"args",
".",
"model",
",",
"args",
".",
"pretrained_model",
",",
"args",
".",
"batchsize",
")",
"if",
"args",
".",
"gpu",
">=",
"0",
":",
"chainer",
".",
"cuda",
".",
"get_device_from_id",
"(",
"args",
".",
"gpu",
")",
".",
"use",
"(",
")",
"model",
".",
"to_gpu",
"(",
")",
"iterator",
"=",
"iterators",
".",
"MultithreadIterator",
"(",
"dataset",
",",
"batchsize",
",",
"repeat",
"=",
"False",
",",
"shuffle",
"=",
"False",
")",
"in_values",
",",
"out_values",
",",
"rest_values",
"=",
"apply_to_iterator",
"(",
"model",
".",
"predict",
",",
"iterator",
",",
"hook",
"=",
"ProgressHook",
"(",
"len",
"(",
"dataset",
")",
")",
")",
"# delete unused iterators explicitly",
"del",
"in_values",
"eval_",
"(",
"out_values",
",",
"rest_values",
")"
] |
https://github.com/chainer/chainercv/blob/7159616642e0be7c5b3ef380b848e16b7e99355b/examples/instance_segmentation/eval_instance_segmentation.py#L96-L120
|
||||
mailpile/Mailpile
|
b5e4b85fd1e584951d6d13af362ab28821466eea
|
mailpile/spambayes/OptionsClass.py
|
python
|
OptionsClass.display_name
|
(self, sect, opt)
|
return self._options[sect, opt.lower()].display_name()
|
A name for the option suitable for display to a user.
|
A name for the option suitable for display to a user.
|
[
"A",
"name",
"for",
"the",
"option",
"suitable",
"for",
"display",
"to",
"a",
"user",
"."
] |
def display_name(self, sect, opt):
'''A name for the option suitable for display to a user.'''
return self._options[sect, opt.lower()].display_name()
|
[
"def",
"display_name",
"(",
"self",
",",
"sect",
",",
"opt",
")",
":",
"return",
"self",
".",
"_options",
"[",
"sect",
",",
"opt",
".",
"lower",
"(",
")",
"]",
".",
"display_name",
"(",
")"
] |
https://github.com/mailpile/Mailpile/blob/b5e4b85fd1e584951d6d13af362ab28821466eea/mailpile/spambayes/OptionsClass.py#L614-L616
|
|
python-acoustics/python-acoustics
|
af72e7f88003f0bba06934ea38c98e8993c4a6c6
|
acoustics/signal.py
|
python
|
phase_spectrum
|
(x, fs, N=None)
|
return f, np.unwrap(a)
|
Phase spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided unwrapped phase spectrum.
.. seealso:: :func:`angle_spectrum` for wrapped phase angle.
|
Phase spectrum of instantaneous signal :math:`x(t)`.
|
[
"Phase",
"spectrum",
"of",
"instantaneous",
"signal",
":",
"math",
":",
"x",
"(",
"t",
")",
"."
] |
def phase_spectrum(x, fs, N=None):
"""
Phase spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided unwrapped phase spectrum.
.. seealso:: :func:`angle_spectrum` for wrapped phase angle.
"""
f, a = angle_spectrum(x, fs, N=None)
return f, np.unwrap(a)
|
[
"def",
"phase_spectrum",
"(",
"x",
",",
"fs",
",",
"N",
"=",
"None",
")",
":",
"f",
",",
"a",
"=",
"angle_spectrum",
"(",
"x",
",",
"fs",
",",
"N",
"=",
"None",
")",
"return",
"f",
",",
"np",
".",
"unwrap",
"(",
"a",
")"
] |
https://github.com/python-acoustics/python-acoustics/blob/af72e7f88003f0bba06934ea38c98e8993c4a6c6/acoustics/signal.py#L677-L691
|
|
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/importlib_resources/_legacy.py
|
python
|
deprecated
|
(func)
|
return wrapper
|
[] |
def deprecated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
f"{func.__name__} is deprecated. Use files() instead. "
"Refer to https://importlib-resources.readthedocs.io"
"/en/latest/using.html#migrating-from-legacy for migration advice.",
DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
|
[
"def",
"deprecated",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"f\"{func.__name__} is deprecated. Use files() instead. \"",
"\"Refer to https://importlib-resources.readthedocs.io\"",
"\"/en/latest/using.html#migrating-from-legacy for migration advice.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/importlib_resources/_legacy.py#L15-L27
|
|||
napalm-automation/napalm
|
ad1ff72000d0de59f25c8847694f51a4ad5aca86
|
napalm/eos/utils/versions.py
|
python
|
EOSVersion._parse
|
(self, version)
|
Parse version string
:param version: str: version
:return: None
|
Parse version string
:param version: str: version
:return: None
|
[
"Parse",
"version",
"string",
":",
"param",
"version",
":",
"str",
":",
"version",
":",
"return",
":",
"None"
] |
def _parse(self, version):
"""
Parse version string
:param version: str: version
:return: None
"""
m = re.match(r"^(?P<numbers>\d[\d.]+\d)", version)
if m:
self.numbers = m.group("numbers").split(".")
|
[
"def",
"_parse",
"(",
"self",
",",
"version",
")",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r\"^(?P<numbers>\\d[\\d.]+\\d)\"",
",",
"version",
")",
"if",
"m",
":",
"self",
".",
"numbers",
"=",
"m",
".",
"group",
"(",
"\"numbers\"",
")",
".",
"split",
"(",
"\".\"",
")"
] |
https://github.com/napalm-automation/napalm/blob/ad1ff72000d0de59f25c8847694f51a4ad5aca86/napalm/eos/utils/versions.py#L21-L30
|
||
Calysto/calysto_scheme
|
15bf81987870bcae1264e5a0a06feb9a8ee12b8b
|
calysto_scheme/scheme.py
|
python
|
cddr_hat
|
(asexp)
|
return cdr_hat(cdr_hat(asexp))
|
[] |
def cddr_hat(asexp):
return cdr_hat(cdr_hat(asexp))
|
[
"def",
"cddr_hat",
"(",
"asexp",
")",
":",
"return",
"cdr_hat",
"(",
"cdr_hat",
"(",
"asexp",
")",
")"
] |
https://github.com/Calysto/calysto_scheme/blob/15bf81987870bcae1264e5a0a06feb9a8ee12b8b/calysto_scheme/scheme.py#L6258-L6259
|
|||
nengo/keras-lmu
|
c871bb561c54f38e477fc4895f198ac157187c45
|
keras_lmu/layers.py
|
python
|
LMU.call
|
(self, inputs, training=None)
|
return self.layer.call(inputs, training=training)
|
Apply this layer to inputs.
Notes
-----
This method should not be called manually; rather, use the implicit layer
callable behaviour (like ``my_layer(inputs)``), which will apply this method
with some additional bookkeeping.
|
Apply this layer to inputs.
|
[
"Apply",
"this",
"layer",
"to",
"inputs",
"."
] |
def call(self, inputs, training=None):
"""
Apply this layer to inputs.
Notes
-----
This method should not be called manually; rather, use the implicit layer
callable behaviour (like ``my_layer(inputs)``), which will apply this method
with some additional bookkeeping.
"""
return self.layer.call(inputs, training=training)
|
[
"def",
"call",
"(",
"self",
",",
"inputs",
",",
"training",
"=",
"None",
")",
":",
"return",
"self",
".",
"layer",
".",
"call",
"(",
"inputs",
",",
"training",
"=",
"training",
")"
] |
https://github.com/nengo/keras-lmu/blob/c871bb561c54f38e477fc4895f198ac157187c45/keras_lmu/layers.py#L582-L593
|
|
makerbot/ReplicatorG
|
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
|
skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/cleave.py
|
python
|
writeOutput
|
(fileName, shouldAnalyze=True)
|
Cleave a GNU Triangulated Surface file.
|
Cleave a GNU Triangulated Surface file.
|
[
"Cleave",
"a",
"GNU",
"Triangulated",
"Surface",
"file",
"."
] |
def writeOutput(fileName, shouldAnalyze=True):
"Cleave a GNU Triangulated Surface file."
startTime = time.time()
print('File ' + archive.getSummarizedFileName(fileName) + ' is being cleaved.')
repository = CleaveRepository()
settings.getReadRepository(repository)
cleaveGcode = getCraftedText( fileName, '', repository )
if cleaveGcode == '':
return
suffixFileName = fileName[ : fileName.rfind('.') ] + '_cleave.svg'
suffixDirectoryName = os.path.dirname(suffixFileName)
suffixReplacedBaseName = os.path.basename(suffixFileName).replace(' ', '_')
suffixFileName = os.path.join( suffixDirectoryName, suffixReplacedBaseName )
archive.writeFileText( suffixFileName, cleaveGcode )
print('The cleaved file is saved as ' + archive.getSummarizedFileName(suffixFileName) )
print('It took %s to cleave the file.' % euclidean.getDurationString( time.time() - startTime ) )
if shouldAnalyze:
settings.openSVGPage( suffixFileName, repository.svgViewer.value )
|
[
"def",
"writeOutput",
"(",
"fileName",
",",
"shouldAnalyze",
"=",
"True",
")",
":",
"startTime",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'File '",
"+",
"archive",
".",
"getSummarizedFileName",
"(",
"fileName",
")",
"+",
"' is being cleaved.'",
")",
"repository",
"=",
"CleaveRepository",
"(",
")",
"settings",
".",
"getReadRepository",
"(",
"repository",
")",
"cleaveGcode",
"=",
"getCraftedText",
"(",
"fileName",
",",
"''",
",",
"repository",
")",
"if",
"cleaveGcode",
"==",
"''",
":",
"return",
"suffixFileName",
"=",
"fileName",
"[",
":",
"fileName",
".",
"rfind",
"(",
"'.'",
")",
"]",
"+",
"'_cleave.svg'",
"suffixDirectoryName",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"suffixFileName",
")",
"suffixReplacedBaseName",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"suffixFileName",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
"suffixFileName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"suffixDirectoryName",
",",
"suffixReplacedBaseName",
")",
"archive",
".",
"writeFileText",
"(",
"suffixFileName",
",",
"cleaveGcode",
")",
"print",
"(",
"'The cleaved file is saved as '",
"+",
"archive",
".",
"getSummarizedFileName",
"(",
"suffixFileName",
")",
")",
"print",
"(",
"'It took %s to cleave the file.'",
"%",
"euclidean",
".",
"getDurationString",
"(",
"time",
".",
"time",
"(",
")",
"-",
"startTime",
")",
")",
"if",
"shouldAnalyze",
":",
"settings",
".",
"openSVGPage",
"(",
"suffixFileName",
",",
"repository",
".",
"svgViewer",
".",
"value",
")"
] |
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/cleave.py#L121-L138
|
||
django-oscar/django-oscar
|
ffcc530844d40283b6b1552778a140536b904f5f
|
src/oscar/core/validators.py
|
python
|
URLDoesNotExistValidator.__call__
|
(self, value)
|
Validate that the URL does not already exist.
The URL will be verified first and raises ``ValidationError`` when
it is invalid. A valid URL is checked for existence and raises
``ValidationError`` if the URL already exists.
This validation uses two calls to ExtendedURLValidator which can
be slow. Be aware of this, when you use it.
Returns ``None`` if URL is valid and does not exist.
|
Validate that the URL does not already exist.
|
[
"Validate",
"that",
"the",
"URL",
"does",
"not",
"already",
"exist",
"."
] |
def __call__(self, value):
"""
Validate that the URL does not already exist.
The URL will be verified first and raises ``ValidationError`` when
it is invalid. A valid URL is checked for existence and raises
``ValidationError`` if the URL already exists.
This validation uses two calls to ExtendedURLValidator which can
be slow. Be aware of this, when you use it.
Returns ``None`` if URL is valid and does not exist.
"""
try:
self.validate_local_url(value)
except ValidationError:
# Page exists - that is what we want
return
raise ValidationError(
_('Specified page already exists!'), code='invalid')
|
[
"def",
"__call__",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"self",
".",
"validate_local_url",
"(",
"value",
")",
"except",
"ValidationError",
":",
"# Page exists - that is what we want",
"return",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Specified page already exists!'",
")",
",",
"code",
"=",
"'invalid'",
")"
] |
https://github.com/django-oscar/django-oscar/blob/ffcc530844d40283b6b1552778a140536b904f5f/src/oscar/core/validators.py#L87-L106
|
||
junyanz/VON
|
2bd39d0c11dd318a45ecda7b2125caa1c0dd93e8
|
render_module/render_sketch.py
|
python
|
CroppingLayer.forward
|
(self, exp_sil, exp_depth)
|
return sil, depth
|
[] |
def forward(self, exp_sil, exp_depth):
sil, depth, _, _ = self.crop_depth_sil(exp_sil, exp_depth)
return sil, depth
|
[
"def",
"forward",
"(",
"self",
",",
"exp_sil",
",",
"exp_depth",
")",
":",
"sil",
",",
"depth",
",",
"_",
",",
"_",
"=",
"self",
".",
"crop_depth_sil",
"(",
"exp_sil",
",",
"exp_depth",
")",
"return",
"sil",
",",
"depth"
] |
https://github.com/junyanz/VON/blob/2bd39d0c11dd318a45ecda7b2125caa1c0dd93e8/render_module/render_sketch.py#L137-L140
|
|||
gem/oq-engine
|
1bdb88f3914e390abcbd285600bfd39477aae47c
|
openquake/server/db/actions.py
|
python
|
set_status
|
(db, job_id, status)
|
return cursor.rowcount
|
Set the status 'created', 'executing', 'complete', 'failed', 'aborted'
consistently with `is_running`.
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_id: ID of the current job
:param status: status string
|
Set the status 'created', 'executing', 'complete', 'failed', 'aborted'
consistently with `is_running`.
|
[
"Set",
"the",
"status",
"created",
"executing",
"complete",
"failed",
"aborted",
"consistently",
"with",
"is_running",
"."
] |
def set_status(db, job_id, status):
"""
Set the status 'created', 'executing', 'complete', 'failed', 'aborted'
consistently with `is_running`.
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_id: ID of the current job
:param status: status string
"""
assert status in (
'created', 'submitted', 'executing', 'complete', 'aborted', 'failed',
'deleted'), status
if status in ('created', 'complete', 'failed', 'aborted', 'deleted'):
is_running = 0
else: # 'executing'
is_running = 1
if job_id < 0:
rows = db('SELECT id FROM job ORDER BY id DESC LIMIT ?x', -job_id)
if not rows:
return 0
job_id = rows[-1].id
cursor = db('UPDATE job SET status=?x, is_running=?x WHERE id=?x',
status, is_running, job_id)
return cursor.rowcount
|
[
"def",
"set_status",
"(",
"db",
",",
"job_id",
",",
"status",
")",
":",
"assert",
"status",
"in",
"(",
"'created'",
",",
"'submitted'",
",",
"'executing'",
",",
"'complete'",
",",
"'aborted'",
",",
"'failed'",
",",
"'deleted'",
")",
",",
"status",
"if",
"status",
"in",
"(",
"'created'",
",",
"'complete'",
",",
"'failed'",
",",
"'aborted'",
",",
"'deleted'",
")",
":",
"is_running",
"=",
"0",
"else",
":",
"# 'executing'",
"is_running",
"=",
"1",
"if",
"job_id",
"<",
"0",
":",
"rows",
"=",
"db",
"(",
"'SELECT id FROM job ORDER BY id DESC LIMIT ?x'",
",",
"-",
"job_id",
")",
"if",
"not",
"rows",
":",
"return",
"0",
"job_id",
"=",
"rows",
"[",
"-",
"1",
"]",
".",
"id",
"cursor",
"=",
"db",
"(",
"'UPDATE job SET status=?x, is_running=?x WHERE id=?x'",
",",
"status",
",",
"is_running",
",",
"job_id",
")",
"return",
"cursor",
".",
"rowcount"
] |
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/server/db/actions.py#L64-L87
|
|
pantsbuild/pants
|
2e126e78ffc40cb108408316b90e8beebee1df9e
|
src/python/pants/util/contextutil.py
|
python
|
pushd
|
(directory: str)
|
A with-context that encapsulates pushd/popd.
|
A with-context that encapsulates pushd/popd.
|
[
"A",
"with",
"-",
"context",
"that",
"encapsulates",
"pushd",
"/",
"popd",
"."
] |
def pushd(directory: str) -> Iterator[str]:
"""A with-context that encapsulates pushd/popd."""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
|
[
"def",
"pushd",
"(",
"directory",
":",
"str",
")",
"->",
"Iterator",
"[",
"str",
"]",
":",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"directory",
")",
"try",
":",
"yield",
"directory",
"finally",
":",
"os",
".",
"chdir",
"(",
"cwd",
")"
] |
https://github.com/pantsbuild/pants/blob/2e126e78ffc40cb108408316b90e8beebee1df9e/src/python/pants/util/contextutil.py#L208-L215
|
||
huggingface/transformers
|
623b4f7c63f60cce917677ee704d6c93ee960b4b
|
src/transformers/utils/dummy_pt_objects.py
|
python
|
LongformerForMaskedLM.forward
|
(self, *args, **kwargs)
|
[] |
def forward(self, *args, **kwargs):
requires_backends(self, ["torch"])
|
[
"def",
"forward",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"requires_backends",
"(",
"self",
",",
"[",
"\"torch\"",
"]",
")"
] |
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/utils/dummy_pt_objects.py#L2960-L2961
|
||||
shiweibsw/Translation-Tools
|
2fbbf902364e557fa7017f9a74a8797b7440c077
|
venv/Lib/site-packages/xlwt/Worksheet.py
|
python
|
Worksheet.get_dialogue_sheet
|
(self)
|
return bool(self.__dialogue_sheet)
|
[] |
def get_dialogue_sheet(self):
return bool(self.__dialogue_sheet)
|
[
"def",
"get_dialogue_sheet",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"__dialogue_sheet",
")"
] |
https://github.com/shiweibsw/Translation-Tools/blob/2fbbf902364e557fa7017f9a74a8797b7440c077/venv/Lib/site-packages/xlwt/Worksheet.py#L509-L510
|
|||
Blizzard/heroprotocol
|
3d36eaf44fc4c8ff3331c2ae2f1dc08a94535f1c
|
heroprotocol/versions/protocol66182.py
|
python
|
decode_replay_attributes_events
|
(contents)
|
return attributes
|
Decodes and yields each attribute from the contents byte string.
|
Decodes and yields each attribute from the contents byte string.
|
[
"Decodes",
"and",
"yields",
"each",
"attribute",
"from",
"the",
"contents",
"byte",
"string",
"."
] |
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
_ = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
|
[
"def",
"decode_replay_attributes_events",
"(",
"contents",
")",
":",
"buffer",
"=",
"BitPackedBuffer",
"(",
"contents",
",",
"'little'",
")",
"attributes",
"=",
"{",
"}",
"if",
"not",
"buffer",
".",
"done",
"(",
")",
":",
"attributes",
"[",
"'source'",
"]",
"=",
"buffer",
".",
"read_bits",
"(",
"8",
")",
"attributes",
"[",
"'mapNamespace'",
"]",
"=",
"buffer",
".",
"read_bits",
"(",
"32",
")",
"_",
"=",
"buffer",
".",
"read_bits",
"(",
"32",
")",
"attributes",
"[",
"'scopes'",
"]",
"=",
"{",
"}",
"while",
"not",
"buffer",
".",
"done",
"(",
")",
":",
"value",
"=",
"{",
"}",
"value",
"[",
"'namespace'",
"]",
"=",
"buffer",
".",
"read_bits",
"(",
"32",
")",
"value",
"[",
"'attrid'",
"]",
"=",
"attrid",
"=",
"buffer",
".",
"read_bits",
"(",
"32",
")",
"scope",
"=",
"buffer",
".",
"read_bits",
"(",
"8",
")",
"value",
"[",
"'value'",
"]",
"=",
"buffer",
".",
"read_aligned_bytes",
"(",
"4",
")",
"[",
":",
":",
"-",
"1",
"]",
".",
"strip",
"(",
"b'\\x00'",
")",
"if",
"not",
"scope",
"in",
"attributes",
"[",
"'scopes'",
"]",
":",
"attributes",
"[",
"'scopes'",
"]",
"[",
"scope",
"]",
"=",
"{",
"}",
"if",
"not",
"attrid",
"in",
"attributes",
"[",
"'scopes'",
"]",
"[",
"scope",
"]",
":",
"attributes",
"[",
"'scopes'",
"]",
"[",
"scope",
"]",
"[",
"attrid",
"]",
"=",
"[",
"]",
"attributes",
"[",
"'scopes'",
"]",
"[",
"scope",
"]",
"[",
"attrid",
"]",
".",
"append",
"(",
"value",
")",
"return",
"attributes"
] |
https://github.com/Blizzard/heroprotocol/blob/3d36eaf44fc4c8ff3331c2ae2f1dc08a94535f1c/heroprotocol/versions/protocol66182.py#L450-L470
|
|
jgagneastro/coffeegrindsize
|
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
|
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/colorbar.py
|
python
|
ColorbarBase._extend_upper
|
(self)
|
return self.extend in ('both', 'max')
|
Returns whether the uper limit is open ended.
|
Returns whether the uper limit is open ended.
|
[
"Returns",
"whether",
"the",
"uper",
"limit",
"is",
"open",
"ended",
"."
] |
def _extend_upper(self):
"""Returns whether the uper limit is open ended."""
return self.extend in ('both', 'max')
|
[
"def",
"_extend_upper",
"(",
"self",
")",
":",
"return",
"self",
".",
"extend",
"in",
"(",
"'both'",
",",
"'max'",
")"
] |
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/colorbar.py#L419-L421
|
|
DataBiosphere/toil
|
2e148eee2114ece8dcc3ec8a83f36333266ece0d
|
src/toil/provisioners/abstractProvisioner.py
|
python
|
AbstractProvisioner.addVolumesService
|
(self, config: InstanceConfiguration)
|
Add a service to prepare and mount local scratch volumes.
|
Add a service to prepare and mount local scratch volumes.
|
[
"Add",
"a",
"service",
"to",
"prepare",
"and",
"mount",
"local",
"scratch",
"volumes",
"."
] |
def addVolumesService(self, config: InstanceConfiguration):
"""
Add a service to prepare and mount local scratch volumes.
"""
config.addFile("/home/core/volumes.sh", contents=textwrap.dedent("""\
#!/bin/bash
set -x
ephemeral_count=0
drives=()
directories=(toil mesos docker kubelet cwl)
for drive in /dev/xvd{a..z} /dev/nvme{0..26}n1; do
echo "checking for ${drive}"
if [ -b $drive ]; then
echo "found it"
while [ "$(readlink -f "${drive}")" != "${drive}" ] ; do
drive="$(readlink -f "${drive}")"
echo "was a symlink to ${drive}"
done
seen=0
for other_drive in "${drives[@]}" ; do
if [ "${other_drive}" == "${drive}" ] ; then
seen=1
break
fi
done
if (( "${seen}" == "1" )) ; then
echo "already discovered via another name"
continue
fi
if mount | grep "^${drive}"; then
echo "already mounted, likely a root device"
else
ephemeral_count=$((ephemeral_count + 1 ))
drives+=("${drive}")
echo "increased ephemeral count by one"
fi
fi
done
if (("$ephemeral_count" == "0" )); then
echo "no ephemeral drive"
for directory in "${directories[@]}"; do
sudo mkdir -p /var/lib/$directory
done
exit 0
fi
sudo mkdir /mnt/ephemeral
if (("$ephemeral_count" == "1" )); then
echo "one ephemeral drive to mount"
sudo mkfs.ext4 -F "${drives[@]}"
sudo mount "${drives[@]}" /mnt/ephemeral
fi
if (("$ephemeral_count" > "1" )); then
echo "multiple drives"
for drive in "${drives[@]}"; do
sudo dd if=/dev/zero of=$drive bs=4096 count=1024
done
# determine force flag
sudo mdadm --create -f --verbose /dev/md0 --level=0 --raid-devices=$ephemeral_count "${drives[@]}"
sudo mkfs.ext4 -F /dev/md0
sudo mount /dev/md0 /mnt/ephemeral
fi
for directory in "${directories[@]}"; do
sudo mkdir -p /mnt/ephemeral/var/lib/$directory
sudo mkdir -p /var/lib/$directory
sudo mount --bind /mnt/ephemeral/var/lib/$directory /var/lib/$directory
done
"""))
config.addUnit("volume-mounting.service", contents=textwrap.dedent("""\
[Unit]
Description=mounts ephemeral volumes & bind mounts toil directories
Before=docker.service
[Service]
Type=oneshot
Restart=no
ExecStart=/usr/bin/bash /home/core/volumes.sh
[Install]
WantedBy=multi-user.target
"""))
|
[
"def",
"addVolumesService",
"(",
"self",
",",
"config",
":",
"InstanceConfiguration",
")",
":",
"config",
".",
"addFile",
"(",
"\"/home/core/volumes.sh\"",
",",
"contents",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n #!/bin/bash\n set -x\n ephemeral_count=0\n drives=()\n directories=(toil mesos docker kubelet cwl)\n for drive in /dev/xvd{a..z} /dev/nvme{0..26}n1; do\n echo \"checking for ${drive}\"\n if [ -b $drive ]; then\n echo \"found it\"\n while [ \"$(readlink -f \"${drive}\")\" != \"${drive}\" ] ; do\n drive=\"$(readlink -f \"${drive}\")\"\n echo \"was a symlink to ${drive}\"\n done\n seen=0\n for other_drive in \"${drives[@]}\" ; do\n if [ \"${other_drive}\" == \"${drive}\" ] ; then\n seen=1\n break\n fi\n done\n if (( \"${seen}\" == \"1\" )) ; then\n echo \"already discovered via another name\"\n continue\n fi\n if mount | grep \"^${drive}\"; then\n echo \"already mounted, likely a root device\"\n else\n ephemeral_count=$((ephemeral_count + 1 ))\n drives+=(\"${drive}\")\n echo \"increased ephemeral count by one\"\n fi\n fi\n done\n if ((\"$ephemeral_count\" == \"0\" )); then\n echo \"no ephemeral drive\"\n for directory in \"${directories[@]}\"; do\n sudo mkdir -p /var/lib/$directory\n done\n exit 0\n fi\n sudo mkdir /mnt/ephemeral\n if ((\"$ephemeral_count\" == \"1\" )); then\n echo \"one ephemeral drive to mount\"\n sudo mkfs.ext4 -F \"${drives[@]}\"\n sudo mount \"${drives[@]}\" /mnt/ephemeral\n fi\n if ((\"$ephemeral_count\" > \"1\" )); then\n echo \"multiple drives\"\n for drive in \"${drives[@]}\"; do\n sudo dd if=/dev/zero of=$drive bs=4096 count=1024\n done\n # determine force flag\n sudo mdadm --create -f --verbose /dev/md0 --level=0 --raid-devices=$ephemeral_count \"${drives[@]}\"\n sudo mkfs.ext4 -F /dev/md0\n sudo mount /dev/md0 /mnt/ephemeral\n fi\n for directory in \"${directories[@]}\"; do\n sudo mkdir -p /mnt/ephemeral/var/lib/$directory\n sudo mkdir -p /var/lib/$directory\n sudo mount --bind /mnt/ephemeral/var/lib/$directory /var/lib/$directory\n done\n \"\"\"",
")",
")",
"config",
".",
"addUnit",
"(",
"\"volume-mounting.service\"",
",",
"contents",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n [Unit]\n Description=mounts ephemeral volumes & bind mounts toil directories\n Before=docker.service\n\n [Service]\n Type=oneshot\n Restart=no\n ExecStart=/usr/bin/bash /home/core/volumes.sh\n\n [Install]\n WantedBy=multi-user.target\n \"\"\"",
")",
")"
] |
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/provisioners/abstractProvisioner.py#L586-L665
|
||
python/cpython
|
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
|
Lib/_pydecimal.py
|
python
|
Context.divmod
|
(self, a, b)
|
Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
|
Return (a // b, a % b).
|
[
"Return",
"(",
"a",
"//",
"b",
"a",
"%",
"b",
")",
"."
] |
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
|
[
"def",
"divmod",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"a",
"=",
"_convert_other",
"(",
"a",
",",
"raiseit",
"=",
"True",
")",
"r",
"=",
"a",
".",
"__divmod__",
"(",
"b",
",",
"context",
"=",
"self",
")",
"if",
"r",
"is",
"NotImplemented",
":",
"raise",
"TypeError",
"(",
"\"Unable to convert %s to Decimal\"",
"%",
"b",
")",
"else",
":",
"return",
"r"
] |
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/_pydecimal.py#L4418-L4437
|
||
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/tarfile.py
|
python
|
TarFile._dbg
|
(self, level, msg)
|
Write debugging output to sys.stderr.
|
Write debugging output to sys.stderr.
|
[
"Write",
"debugging",
"output",
"to",
"sys",
".",
"stderr",
"."
] |
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
|
[
"def",
"_dbg",
"(",
"self",
",",
"level",
",",
"msg",
")",
":",
"if",
"level",
"<=",
"self",
".",
"debug",
":",
"print",
"(",
"msg",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/tarfile.py#L2377-L2381
|
||
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
ext/feedparser/api.py
|
python
|
parse
|
(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None, resolve_relative_uris=None, sanitize_html=None)
|
return result
|
Parse a feed from a URL, file, stream, or string.
:param url_file_stream_or_string:
File-like object, URL, file path, or string. Both byte and text strings
are accepted. If necessary, encoding will be derived from the response
headers or automatically detected.
Note that strings may trigger network I/O or filesystem access
depending on the value. Wrap an untrusted string in
a :class:`io.StringIO` or :class:`io.BytesIO` to avoid this. Do not
pass untrusted strings to this function.
When a URL is not passed the feed location to use in relative URL
resolution should be passed in the ``Content-Location`` response header
(see ``response_headers`` below).
:param str etag: HTTP ``ETag`` request header.
:param modified: HTTP ``Last-Modified`` request header.
:type modified: :class:`str`, :class:`time.struct_time` 9-tuple, or
:class:`datetime.datetime`
:param str agent: HTTP ``User-Agent`` request header, which defaults to
the value of :data:`feedparser.USER_AGENT`.
:param referrer: HTTP ``Referer`` [sic] request header.
:param request_headers:
A mapping of HTTP header name to HTTP header value to add to the
request, overriding internally generated values.
:type request_headers: :class:`dict` mapping :class:`str` to :class:`str`
:param response_headers:
A mapping of HTTP header name to HTTP header value. Multiple values may
be joined with a comma. If a HTTP request was made, these headers
override any matching headers in the response. Otherwise this specifies
the entirety of the response headers.
:type response_headers: :class:`dict` mapping :class:`str` to :class:`str`
:param bool resolve_relative_uris:
Should feedparser attempt to resolve relative URIs absolute ones within
HTML content? Defaults to the value of
:data:`feedparser.RESOLVE_RELATIVE_URIS`, which is ``True``.
:param bool sanitize_html:
Should feedparser skip HTML sanitization? Only disable this if you know
what you are doing! Defaults to the value of
:data:`feedparser.SANITIZE_HTML`, which is ``True``.
:return: A :class:`FeedParserDict`.
|
Parse a feed from a URL, file, stream, or string.
|
[
"Parse",
"a",
"feed",
"from",
"a",
"URL",
"file",
"stream",
"or",
"string",
"."
] |
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None, resolve_relative_uris=None, sanitize_html=None):
"""Parse a feed from a URL, file, stream, or string.
:param url_file_stream_or_string:
File-like object, URL, file path, or string. Both byte and text strings
are accepted. If necessary, encoding will be derived from the response
headers or automatically detected.
Note that strings may trigger network I/O or filesystem access
depending on the value. Wrap an untrusted string in
a :class:`io.StringIO` or :class:`io.BytesIO` to avoid this. Do not
pass untrusted strings to this function.
When a URL is not passed the feed location to use in relative URL
resolution should be passed in the ``Content-Location`` response header
(see ``response_headers`` below).
:param str etag: HTTP ``ETag`` request header.
:param modified: HTTP ``Last-Modified`` request header.
:type modified: :class:`str`, :class:`time.struct_time` 9-tuple, or
:class:`datetime.datetime`
:param str agent: HTTP ``User-Agent`` request header, which defaults to
the value of :data:`feedparser.USER_AGENT`.
:param referrer: HTTP ``Referer`` [sic] request header.
:param request_headers:
A mapping of HTTP header name to HTTP header value to add to the
request, overriding internally generated values.
:type request_headers: :class:`dict` mapping :class:`str` to :class:`str`
:param response_headers:
A mapping of HTTP header name to HTTP header value. Multiple values may
be joined with a comma. If a HTTP request was made, these headers
override any matching headers in the response. Otherwise this specifies
the entirety of the response headers.
:type response_headers: :class:`dict` mapping :class:`str` to :class:`str`
:param bool resolve_relative_uris:
Should feedparser attempt to resolve relative URIs absolute ones within
HTML content? Defaults to the value of
:data:`feedparser.RESOLVE_RELATIVE_URIS`, which is ``True``.
:param bool sanitize_html:
Should feedparser skip HTML sanitization? Only disable this if you know
what you are doing! Defaults to the value of
:data:`feedparser.SANITIZE_HTML`, which is ``True``.
:return: A :class:`FeedParserDict`.
"""
if not agent or sanitize_html is None or resolve_relative_uris is None:
import feedparser
if not agent:
agent = feedparser.USER_AGENT
if sanitize_html is None:
sanitize_html = feedparser.SANITIZE_HTML
if resolve_relative_uris is None:
resolve_relative_uris = feedparser.RESOLVE_RELATIVE_URIS
result = FeedParserDict(
bozo=False,
entries=[],
feed=FeedParserDict(),
headers={},
)
data = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers, result)
if not data:
return result
# overwrite existing headers using response_headers
result['headers'].update(response_headers or {})
data = convert_to_utf8(result['headers'], data, result)
use_strict_parser = result['encoding'] and True or False
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = result['headers'].get('content-location', '')
href = result.get('href', '')
baseuri = make_safe_absolute_uri(href, contentloc) or make_safe_absolute_uri(contentloc) or href
baselang = result['headers'].get('content-language', None)
if isinstance(baselang, bytes) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = StrictFeedParser(baseuri, baselang, 'utf-8')
feedparser.resolve_relative_uris = resolve_relative_uris
feedparser.sanitize_html = sanitize_html
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(io.BytesIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException as e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.resolve_relative_uris = resolve_relative_uris
feedparser.sanitize_html = sanitize_html
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespaces_in_use
return result
|
[
"def",
"parse",
"(",
"url_file_stream_or_string",
",",
"etag",
"=",
"None",
",",
"modified",
"=",
"None",
",",
"agent",
"=",
"None",
",",
"referrer",
"=",
"None",
",",
"handlers",
"=",
"None",
",",
"request_headers",
"=",
"None",
",",
"response_headers",
"=",
"None",
",",
"resolve_relative_uris",
"=",
"None",
",",
"sanitize_html",
"=",
"None",
")",
":",
"if",
"not",
"agent",
"or",
"sanitize_html",
"is",
"None",
"or",
"resolve_relative_uris",
"is",
"None",
":",
"import",
"feedparser",
"if",
"not",
"agent",
":",
"agent",
"=",
"feedparser",
".",
"USER_AGENT",
"if",
"sanitize_html",
"is",
"None",
":",
"sanitize_html",
"=",
"feedparser",
".",
"SANITIZE_HTML",
"if",
"resolve_relative_uris",
"is",
"None",
":",
"resolve_relative_uris",
"=",
"feedparser",
".",
"RESOLVE_RELATIVE_URIS",
"result",
"=",
"FeedParserDict",
"(",
"bozo",
"=",
"False",
",",
"entries",
"=",
"[",
"]",
",",
"feed",
"=",
"FeedParserDict",
"(",
")",
",",
"headers",
"=",
"{",
"}",
",",
")",
"data",
"=",
"_open_resource",
"(",
"url_file_stream_or_string",
",",
"etag",
",",
"modified",
",",
"agent",
",",
"referrer",
",",
"handlers",
",",
"request_headers",
",",
"result",
")",
"if",
"not",
"data",
":",
"return",
"result",
"# overwrite existing headers using response_headers",
"result",
"[",
"'headers'",
"]",
".",
"update",
"(",
"response_headers",
"or",
"{",
"}",
")",
"data",
"=",
"convert_to_utf8",
"(",
"result",
"[",
"'headers'",
"]",
",",
"data",
",",
"result",
")",
"use_strict_parser",
"=",
"result",
"[",
"'encoding'",
"]",
"and",
"True",
"or",
"False",
"result",
"[",
"'version'",
"]",
",",
"data",
",",
"entities",
"=",
"replace_doctype",
"(",
"data",
")",
"# Ensure that baseuri is an absolute URI using an acceptable URI scheme.",
"contentloc",
"=",
"result",
"[",
"'headers'",
"]",
".",
"get",
"(",
"'content-location'",
",",
"''",
")",
"href",
"=",
"result",
".",
"get",
"(",
"'href'",
",",
"''",
")",
"baseuri",
"=",
"make_safe_absolute_uri",
"(",
"href",
",",
"contentloc",
")",
"or",
"make_safe_absolute_uri",
"(",
"contentloc",
")",
"or",
"href",
"baselang",
"=",
"result",
"[",
"'headers'",
"]",
".",
"get",
"(",
"'content-language'",
",",
"None",
")",
"if",
"isinstance",
"(",
"baselang",
",",
"bytes",
")",
"and",
"baselang",
"is",
"not",
"None",
":",
"baselang",
"=",
"baselang",
".",
"decode",
"(",
"'utf-8'",
",",
"'ignore'",
")",
"if",
"not",
"_XML_AVAILABLE",
":",
"use_strict_parser",
"=",
"0",
"if",
"use_strict_parser",
":",
"# initialize the SAX parser",
"feedparser",
"=",
"StrictFeedParser",
"(",
"baseuri",
",",
"baselang",
",",
"'utf-8'",
")",
"feedparser",
".",
"resolve_relative_uris",
"=",
"resolve_relative_uris",
"feedparser",
".",
"sanitize_html",
"=",
"sanitize_html",
"saxparser",
"=",
"xml",
".",
"sax",
".",
"make_parser",
"(",
"PREFERRED_XML_PARSERS",
")",
"saxparser",
".",
"setFeature",
"(",
"xml",
".",
"sax",
".",
"handler",
".",
"feature_namespaces",
",",
"1",
")",
"try",
":",
"# disable downloading external doctype references, if possible",
"saxparser",
".",
"setFeature",
"(",
"xml",
".",
"sax",
".",
"handler",
".",
"feature_external_ges",
",",
"0",
")",
"except",
"xml",
".",
"sax",
".",
"SAXNotSupportedException",
":",
"pass",
"saxparser",
".",
"setContentHandler",
"(",
"feedparser",
")",
"saxparser",
".",
"setErrorHandler",
"(",
"feedparser",
")",
"source",
"=",
"xml",
".",
"sax",
".",
"xmlreader",
".",
"InputSource",
"(",
")",
"source",
".",
"setByteStream",
"(",
"io",
".",
"BytesIO",
"(",
"data",
")",
")",
"try",
":",
"saxparser",
".",
"parse",
"(",
"source",
")",
"except",
"xml",
".",
"sax",
".",
"SAXException",
"as",
"e",
":",
"result",
"[",
"'bozo'",
"]",
"=",
"1",
"result",
"[",
"'bozo_exception'",
"]",
"=",
"feedparser",
".",
"exc",
"or",
"e",
"use_strict_parser",
"=",
"0",
"if",
"not",
"use_strict_parser",
":",
"feedparser",
"=",
"LooseFeedParser",
"(",
"baseuri",
",",
"baselang",
",",
"'utf-8'",
",",
"entities",
")",
"feedparser",
".",
"resolve_relative_uris",
"=",
"resolve_relative_uris",
"feedparser",
".",
"sanitize_html",
"=",
"sanitize_html",
"feedparser",
".",
"feed",
"(",
"data",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
")",
"result",
"[",
"'feed'",
"]",
"=",
"feedparser",
".",
"feeddata",
"result",
"[",
"'entries'",
"]",
"=",
"feedparser",
".",
"entries",
"result",
"[",
"'version'",
"]",
"=",
"result",
"[",
"'version'",
"]",
"or",
"feedparser",
".",
"version",
"result",
"[",
"'namespaces'",
"]",
"=",
"feedparser",
".",
"namespaces_in_use",
"return",
"result"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/feedparser/api.py#L151-L269
|
|
openai/universe
|
cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c
|
universe/vncdriver/vendor/pydes.py
|
python
|
_baseDes.setPadding
|
(self, pad)
|
setPadding() -> bytes of length 1. Padding character.
|
setPadding() -> bytes of length 1. Padding character.
|
[
"setPadding",
"()",
"-",
">",
"bytes",
"of",
"length",
"1",
".",
"Padding",
"character",
"."
] |
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
if pad is not None:
pad = self._guardAgainstUnicode(pad)
self._padding = pad
|
[
"def",
"setPadding",
"(",
"self",
",",
"pad",
")",
":",
"if",
"pad",
"is",
"not",
"None",
":",
"pad",
"=",
"self",
".",
"_guardAgainstUnicode",
"(",
"pad",
")",
"self",
".",
"_padding",
"=",
"pad"
] |
https://github.com/openai/universe/blob/cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c/universe/vncdriver/vendor/pydes.py#L147-L151
|
||
marshmallow-code/marshmallow
|
58c2045b8f272c2f1842458aa79f5c079a01429f
|
src/marshmallow/fields.py
|
python
|
Field.context
|
(self)
|
return self.parent.context
|
The context dictionary for the parent :class:`Schema`.
|
The context dictionary for the parent :class:`Schema`.
|
[
"The",
"context",
"dictionary",
"for",
"the",
"parent",
":",
"class",
":",
"Schema",
"."
] |
def context(self):
"""The context dictionary for the parent :class:`Schema`."""
return self.parent.context
|
[
"def",
"context",
"(",
"self",
")",
":",
"return",
"self",
".",
"parent",
".",
"context"
] |
https://github.com/marshmallow-code/marshmallow/blob/58c2045b8f272c2f1842458aa79f5c079a01429f/src/marshmallow/fields.py#L430-L432
|
|
nortikin/sverchok
|
7b460f01317c15f2681bfa3e337c5e7346f3711b
|
utils/geom.py
|
python
|
CircleEquation3D.get_projections
|
(self, vertices)
|
return self.radius * normalized + self.center
|
Calculate projections of vertices to the
circle. This method works with 3D circles only
(i.e., requires `normal` to be specified).
input: list of 3-tuples, or list of Vectors, or np.ndarray of shape (n,3).
returns: np.ndarray of shape (n,3).
|
Calculate projections of vertices to the
circle. This method works with 3D circles only
(i.e., requires `normal` to be specified).
|
[
"Calculate",
"projections",
"of",
"vertices",
"to",
"the",
"circle",
".",
"This",
"method",
"works",
"with",
"3D",
"circles",
"only",
"(",
"i",
".",
"e",
".",
"requires",
"normal",
"to",
"be",
"specified",
")",
"."
] |
def get_projections(self, vertices):
"""
Calculate projections of vertices to the
circle. This method works with 3D circles only
(i.e., requires `normal` to be specified).
input: list of 3-tuples, or list of Vectors, or np.ndarray of shape (n,3).
returns: np.ndarray of shape (n,3).
"""
vertices = np.array(vertices)
plane = PlaneEquation.from_normal_and_point(self.normal, self.center)
projected = plane.projection_of_points(vertices)
centered = projected - self.center
norms = np.linalg.norm(centered, axis=1)[np.newaxis].T
normalized = centered / norms
return self.radius * normalized + self.center
|
[
"def",
"get_projections",
"(",
"self",
",",
"vertices",
")",
":",
"vertices",
"=",
"np",
".",
"array",
"(",
"vertices",
")",
"plane",
"=",
"PlaneEquation",
".",
"from_normal_and_point",
"(",
"self",
".",
"normal",
",",
"self",
".",
"center",
")",
"projected",
"=",
"plane",
".",
"projection_of_points",
"(",
"vertices",
")",
"centered",
"=",
"projected",
"-",
"self",
".",
"center",
"norms",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"centered",
",",
"axis",
"=",
"1",
")",
"[",
"np",
".",
"newaxis",
"]",
".",
"T",
"normalized",
"=",
"centered",
"/",
"norms",
"return",
"self",
".",
"radius",
"*",
"normalized",
"+",
"self",
".",
"center"
] |
https://github.com/nortikin/sverchok/blob/7b460f01317c15f2681bfa3e337c5e7346f3711b/utils/geom.py#L2235-L2250
|
|
KvasirSecurity/Kvasir
|
a5b3775184a8343240e1154a1f762f75df04dc0a
|
modules/xlsxwriter/format.py
|
python
|
Format.set_bold
|
(self, bold=1)
|
Set the Format bold property.
Args:
bold: Default is 1, turns property on.
Returns:
Nothing.
|
Set the Format bold property.
|
[
"Set",
"the",
"Format",
"bold",
"property",
"."
] |
def set_bold(self, bold=1):
"""
Set the Format bold property.
Args:
bold: Default is 1, turns property on.
Returns:
Nothing.
"""
self.bold = bold
|
[
"def",
"set_bold",
"(",
"self",
",",
"bold",
"=",
"1",
")",
":",
"self",
".",
"bold",
"=",
"bold"
] |
https://github.com/KvasirSecurity/Kvasir/blob/a5b3775184a8343240e1154a1f762f75df04dc0a/modules/xlsxwriter/format.py#L155-L166
|
||
HymanLiuTS/flaskTs
|
286648286976e85d9b9a5873632331efcafe0b21
|
flasky/lib/python2.7/site-packages/pip/download.py
|
python
|
is_archive_file
|
(name)
|
return False
|
Return True if `name` is a considered as an archive file.
|
Return True if `name` is a considered as an archive file.
|
[
"Return",
"True",
"if",
"name",
"is",
"a",
"considered",
"as",
"an",
"archive",
"file",
"."
] |
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
|
[
"def",
"is_archive_file",
"(",
"name",
")",
":",
"ext",
"=",
"splitext",
"(",
"name",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"if",
"ext",
"in",
"ARCHIVE_EXTENSIONS",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/pip/download.py#L469-L474
|
|
django-nonrel/django-nonrel
|
4fbfe7344481a5eab8698f79207f09124310131b
|
django/db/backends/oracle/base.py
|
python
|
DatabaseOperations.combine_expression
|
(self, connector, sub_expressions)
|
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
|
Oracle requires special cases for %% and & operators in query expressions
|
Oracle requires special cases for %% and & operators in query expressions
|
[
"Oracle",
"requires",
"special",
"cases",
"for",
"%%",
"and",
"&",
"operators",
"in",
"query",
"expressions"
] |
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
|
[
"def",
"combine_expression",
"(",
"self",
",",
"connector",
",",
"sub_expressions",
")",
":",
"if",
"connector",
"==",
"'%%'",
":",
"return",
"'MOD(%s)'",
"%",
"','",
".",
"join",
"(",
"sub_expressions",
")",
"elif",
"connector",
"==",
"'&'",
":",
"return",
"'BITAND(%s)'",
"%",
"','",
".",
"join",
"(",
"sub_expressions",
")",
"elif",
"connector",
"==",
"'|'",
":",
"raise",
"NotImplementedError",
"(",
"\"Bit-wise or is not supported in Oracle.\"",
")",
"return",
"super",
"(",
"DatabaseOperations",
",",
"self",
")",
".",
"combine_expression",
"(",
"connector",
",",
"sub_expressions",
")"
] |
https://github.com/django-nonrel/django-nonrel/blob/4fbfe7344481a5eab8698f79207f09124310131b/django/db/backends/oracle/base.py#L358-L366
|
|
ronreiter/interactive-tutorials
|
d026d1ae58941863d60eb30a8a94a8650d2bd4bf
|
suds/xsd/sxbasic.py
|
python
|
Include.open
|
(self, options)
|
return result
|
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
|
Open and include the refrenced schema.
|
[
"Open",
"and",
"include",
"the",
"refrenced",
"schema",
"."
] |
def open(self, options):
"""
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, including location="%s"', self.id, self.location)
result = self.download(options)
log.debug('included:\n%s', result)
return result
|
[
"def",
"open",
"(",
"self",
",",
"options",
")",
":",
"if",
"self",
".",
"opened",
":",
"return",
"self",
".",
"opened",
"=",
"True",
"log",
".",
"debug",
"(",
"'%s, including location=\"%s\"'",
",",
"self",
".",
"id",
",",
"self",
".",
"location",
")",
"result",
"=",
"self",
".",
"download",
"(",
"options",
")",
"log",
".",
"debug",
"(",
"'included:\\n%s'",
",",
"result",
")",
"return",
"result"
] |
https://github.com/ronreiter/interactive-tutorials/blob/d026d1ae58941863d60eb30a8a94a8650d2bd4bf/suds/xsd/sxbasic.py#L593-L607
|
|
larryhastings/gilectomy
|
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
|
Lib/_pydecimal.py
|
python
|
Decimal.is_canonical
|
(self)
|
return True
|
Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
|
Return True if self is canonical; otherwise return False.
|
[
"Return",
"True",
"if",
"self",
"is",
"canonical",
";",
"otherwise",
"return",
"False",
"."
] |
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
|
[
"def",
"is_canonical",
"(",
"self",
")",
":",
"return",
"True"
] |
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/_pydecimal.py#L3149-L3155
|
|
lululxvi/deepxde
|
730c97282636e86c845ce2ba3253482f2178469e
|
deepxde/nn/tensorflow_compat_v1/nn.py
|
python
|
NN.apply_output_transform
|
(self, transform)
|
Apply a transform to the network outputs, i.e.,
outputs = transform(inputs, outputs).
|
Apply a transform to the network outputs, i.e.,
outputs = transform(inputs, outputs).
|
[
"Apply",
"a",
"transform",
"to",
"the",
"network",
"outputs",
"i",
".",
"e",
".",
"outputs",
"=",
"transform",
"(",
"inputs",
"outputs",
")",
"."
] |
def apply_output_transform(self, transform):
"""Apply a transform to the network outputs, i.e.,
outputs = transform(inputs, outputs).
"""
self._output_transform = transform
|
[
"def",
"apply_output_transform",
"(",
"self",
",",
"transform",
")",
":",
"self",
".",
"_output_transform",
"=",
"transform"
] |
https://github.com/lululxvi/deepxde/blob/730c97282636e86c845ce2ba3253482f2178469e/deepxde/nn/tensorflow_compat_v1/nn.py#L68-L72
|
||
devitocodes/devito
|
6abd441e3f5f091775ad332be6b95e017b8cbd16
|
devito/symbolics/search.py
|
python
|
Search.__init__
|
(self, query, mode, deep=False)
|
Search objects in an expression. This is much quicker than the more
general SymPy's find.
Parameters
----------
query
Any query from :mod:`queries`.
mode : str
Either 'unique' or 'all' (catch all instances).
deep : bool, optional
If True, propagate the search within an Indexed's indices. Defaults to False.
|
Search objects in an expression. This is much quicker than the more
general SymPy's find.
|
[
"Search",
"objects",
"in",
"an",
"expression",
".",
"This",
"is",
"much",
"quicker",
"than",
"the",
"more",
"general",
"SymPy",
"s",
"find",
"."
] |
def __init__(self, query, mode, deep=False):
"""
Search objects in an expression. This is much quicker than the more
general SymPy's find.
Parameters
----------
query
Any query from :mod:`queries`.
mode : str
Either 'unique' or 'all' (catch all instances).
deep : bool, optional
If True, propagate the search within an Indexed's indices. Defaults to False.
"""
self.query = query
self.collection = self.modes[mode]
self.deep = deep
|
[
"def",
"__init__",
"(",
"self",
",",
"query",
",",
"mode",
",",
"deep",
"=",
"False",
")",
":",
"self",
".",
"query",
"=",
"query",
"self",
".",
"collection",
"=",
"self",
".",
"modes",
"[",
"mode",
"]",
"self",
".",
"deep",
"=",
"deep"
] |
https://github.com/devitocodes/devito/blob/6abd441e3f5f091775ad332be6b95e017b8cbd16/devito/symbolics/search.py#L32-L48
|
||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/timeseries/periodograms/bls/core.py
|
python
|
BoxLeastSquares._format_results
|
(self, objective, period, results)
|
return BoxLeastSquaresResults(
objective, period, power, depth, depth_err, duration, transit_time,
depth_snr, log_likelihood)
|
A private method used to wrap and add units to the periodogram
Parameters
----------
objective : str
The name of the objective used in the optimization.
period : array_like or `~astropy.units.Quantity`
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
|
A private method used to wrap and add units to the periodogram
|
[
"A",
"private",
"method",
"used",
"to",
"wrap",
"and",
"add",
"units",
"to",
"the",
"periodogram"
] |
def _format_results(self, objective, period, results):
"""A private method used to wrap and add units to the periodogram
Parameters
----------
objective : str
The name of the objective used in the optimization.
period : array_like or `~astropy.units.Quantity`
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(power, depth, depth_err, duration, transit_time, depth_snr,
log_likelihood) = results
if has_units(self._trel):
transit_time = units.Quantity(transit_time, unit=self._trel.unit)
transit_time = self._as_absolute_time_if_needed('transit_time', transit_time)
duration = units.Quantity(duration, unit=self._trel.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood,
unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective, period, power, depth, depth_err, duration, transit_time,
depth_snr, log_likelihood)
|
[
"def",
"_format_results",
"(",
"self",
",",
"objective",
",",
"period",
",",
"results",
")",
":",
"(",
"power",
",",
"depth",
",",
"depth_err",
",",
"duration",
",",
"transit_time",
",",
"depth_snr",
",",
"log_likelihood",
")",
"=",
"results",
"if",
"has_units",
"(",
"self",
".",
"_trel",
")",
":",
"transit_time",
"=",
"units",
".",
"Quantity",
"(",
"transit_time",
",",
"unit",
"=",
"self",
".",
"_trel",
".",
"unit",
")",
"transit_time",
"=",
"self",
".",
"_as_absolute_time_if_needed",
"(",
"'transit_time'",
",",
"transit_time",
")",
"duration",
"=",
"units",
".",
"Quantity",
"(",
"duration",
",",
"unit",
"=",
"self",
".",
"_trel",
".",
"unit",
")",
"if",
"has_units",
"(",
"self",
".",
"y",
")",
":",
"depth",
"=",
"units",
".",
"Quantity",
"(",
"depth",
",",
"unit",
"=",
"self",
".",
"y",
".",
"unit",
")",
"depth_err",
"=",
"units",
".",
"Quantity",
"(",
"depth_err",
",",
"unit",
"=",
"self",
".",
"y",
".",
"unit",
")",
"depth_snr",
"=",
"units",
".",
"Quantity",
"(",
"depth_snr",
",",
"unit",
"=",
"units",
".",
"one",
")",
"if",
"self",
".",
"dy",
"is",
"None",
":",
"if",
"objective",
"==",
"\"likelihood\"",
":",
"power",
"=",
"units",
".",
"Quantity",
"(",
"power",
",",
"unit",
"=",
"self",
".",
"y",
".",
"unit",
"**",
"2",
")",
"else",
":",
"power",
"=",
"units",
".",
"Quantity",
"(",
"power",
",",
"unit",
"=",
"units",
".",
"one",
")",
"log_likelihood",
"=",
"units",
".",
"Quantity",
"(",
"log_likelihood",
",",
"unit",
"=",
"self",
".",
"y",
".",
"unit",
"**",
"2",
")",
"else",
":",
"power",
"=",
"units",
".",
"Quantity",
"(",
"power",
",",
"unit",
"=",
"units",
".",
"one",
")",
"log_likelihood",
"=",
"units",
".",
"Quantity",
"(",
"log_likelihood",
",",
"unit",
"=",
"units",
".",
"one",
")",
"return",
"BoxLeastSquaresResults",
"(",
"objective",
",",
"period",
",",
"power",
",",
"depth",
",",
"depth_err",
",",
"duration",
",",
"transit_time",
",",
"depth_snr",
",",
"log_likelihood",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/timeseries/periodograms/bls/core.py#L701-L741
|
|
PyWavelets/pywt
|
9a72143be347481e2276371efb41ae0266b9c808
|
pywt/_mra.py
|
python
|
imra
|
(mra_coeffs)
|
return reduce(lambda x, y: x + y, mra_coeffs)
|
Inverse 1D multiresolution analysis via summation.
Parameters
----------
mra_coeffs : list of ndarray
Multiresolution analysis coefficients as returned by `mra`.
Returns
-------
rec : ndarray
The reconstructed signal.
See Also
--------
mra
References
----------
.. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
|
Inverse 1D multiresolution analysis via summation.
|
[
"Inverse",
"1D",
"multiresolution",
"analysis",
"via",
"summation",
"."
] |
def imra(mra_coeffs):
"""Inverse 1D multiresolution analysis via summation.
Parameters
----------
mra_coeffs : list of ndarray
Multiresolution analysis coefficients as returned by `mra`.
Returns
-------
rec : ndarray
The reconstructed signal.
See Also
--------
mra
References
----------
.. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
https://doi.org/10.2307/2965551
"""
return reduce(lambda x, y: x + y, mra_coeffs)
|
[
"def",
"imra",
"(",
"mra_coeffs",
")",
":",
"return",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"+",
"y",
",",
"mra_coeffs",
")"
] |
https://github.com/PyWavelets/pywt/blob/9a72143be347481e2276371efb41ae0266b9c808/pywt/_mra.py#L116-L140
|
|
csuldw/AntSpider
|
d5fac11379c471079f13a1f159a3377a7eec27d6
|
scrapy/douban/pipelines.py
|
python
|
CoverPipeline.process_item
|
(self, item, spider)
|
return dfd.addCallback(self.item_completed, item, info)
|
[] |
def process_item(self, item, spider):
if 'meta' not in spider.name:
return item
info = self.spiderinfo
requests = arg_to_iter(self.get_media_requests(item, info))
dlist = [self._process_request(r, info) for r in requests]
dfd = DeferredList(dlist, consumeErrors=1)
return dfd.addCallback(self.item_completed, item, info)
|
[
"def",
"process_item",
"(",
"self",
",",
"item",
",",
"spider",
")",
":",
"if",
"'meta'",
"not",
"in",
"spider",
".",
"name",
":",
"return",
"item",
"info",
"=",
"self",
".",
"spiderinfo",
"requests",
"=",
"arg_to_iter",
"(",
"self",
".",
"get_media_requests",
"(",
"item",
",",
"info",
")",
")",
"dlist",
"=",
"[",
"self",
".",
"_process_request",
"(",
"r",
",",
"info",
")",
"for",
"r",
"in",
"requests",
"]",
"dfd",
"=",
"DeferredList",
"(",
"dlist",
",",
"consumeErrors",
"=",
"1",
")",
"return",
"dfd",
".",
"addCallback",
"(",
"self",
".",
"item_completed",
",",
"item",
",",
"info",
")"
] |
https://github.com/csuldw/AntSpider/blob/d5fac11379c471079f13a1f159a3377a7eec27d6/scrapy/douban/pipelines.py#L214-L221
|
|||
softlayer/softlayer-python
|
cdef7d63c66413197a9a97b0414de9f95887a82a
|
SoftLayer/managers/block.py
|
python
|
BlockStorageManager.create_or_update_lun_id
|
(self, volume_id, lun_id)
|
return self.client.call('Network_Storage', 'createOrUpdateLunId', lun_id, id=volume_id)
|
Set the LUN ID on a volume.
:param integer volume_id: The id of the volume
:param integer lun_id: LUN ID to set on the volume
:return: a SoftLayer_Network_Storage_Property object
|
Set the LUN ID on a volume.
|
[
"Set",
"the",
"LUN",
"ID",
"on",
"a",
"volume",
"."
] |
def create_or_update_lun_id(self, volume_id, lun_id):
"""Set the LUN ID on a volume.
:param integer volume_id: The id of the volume
:param integer lun_id: LUN ID to set on the volume
:return: a SoftLayer_Network_Storage_Property object
"""
return self.client.call('Network_Storage', 'createOrUpdateLunId', lun_id, id=volume_id)
|
[
"def",
"create_or_update_lun_id",
"(",
"self",
",",
"volume_id",
",",
"lun_id",
")",
":",
"return",
"self",
".",
"client",
".",
"call",
"(",
"'Network_Storage'",
",",
"'createOrUpdateLunId'",
",",
"lun_id",
",",
"id",
"=",
"volume_id",
")"
] |
https://github.com/softlayer/softlayer-python/blob/cdef7d63c66413197a9a97b0414de9f95887a82a/SoftLayer/managers/block.py#L184-L191
|
|
rwth-i6/returnn
|
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
|
returnn/tf/layers/basic.py
|
python
|
SeqLenMaskLayer.transform_config_dict
|
(cls, d, network, get_layer)
|
:param dict[str] d:
:param returnn.tf.network.TFNetwork network:
:param get_layer:
|
:param dict[str] d:
:param returnn.tf.network.TFNetwork network:
:param get_layer:
|
[
":",
"param",
"dict",
"[",
"str",
"]",
"d",
":",
":",
"param",
"returnn",
".",
"tf",
".",
"network",
".",
"TFNetwork",
"network",
":",
":",
"param",
"get_layer",
":"
] |
def transform_config_dict(cls, d, network, get_layer):
"""
:param dict[str] d:
:param returnn.tf.network.TFNetwork network:
:param get_layer:
"""
super(SeqLenMaskLayer, cls).transform_config_dict(d, network=network, get_layer=get_layer)
if d.get("seq_len_source", None):
d["seq_len_source"] = get_layer(d["seq_len_source"])
if d.get("start", None):
d["start"] = get_layer(d["start"])
if d.get("window_start", None):
d["window_start"] = get_layer(d["window_start"])
if d.get("window_size", None):
if isinstance(d["window_size"], str):
d["window_size"] = get_layer(d["window_size"])
|
[
"def",
"transform_config_dict",
"(",
"cls",
",",
"d",
",",
"network",
",",
"get_layer",
")",
":",
"super",
"(",
"SeqLenMaskLayer",
",",
"cls",
")",
".",
"transform_config_dict",
"(",
"d",
",",
"network",
"=",
"network",
",",
"get_layer",
"=",
"get_layer",
")",
"if",
"d",
".",
"get",
"(",
"\"seq_len_source\"",
",",
"None",
")",
":",
"d",
"[",
"\"seq_len_source\"",
"]",
"=",
"get_layer",
"(",
"d",
"[",
"\"seq_len_source\"",
"]",
")",
"if",
"d",
".",
"get",
"(",
"\"start\"",
",",
"None",
")",
":",
"d",
"[",
"\"start\"",
"]",
"=",
"get_layer",
"(",
"d",
"[",
"\"start\"",
"]",
")",
"if",
"d",
".",
"get",
"(",
"\"window_start\"",
",",
"None",
")",
":",
"d",
"[",
"\"window_start\"",
"]",
"=",
"get_layer",
"(",
"d",
"[",
"\"window_start\"",
"]",
")",
"if",
"d",
".",
"get",
"(",
"\"window_size\"",
",",
"None",
")",
":",
"if",
"isinstance",
"(",
"d",
"[",
"\"window_size\"",
"]",
",",
"str",
")",
":",
"d",
"[",
"\"window_size\"",
"]",
"=",
"get_layer",
"(",
"d",
"[",
"\"window_size\"",
"]",
")"
] |
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/tf/layers/basic.py#L2191-L2206
|
||
aaPanel/BaoTa
|
9bb1336f31ae2893ab513af7a3efed633338c64b
|
class/pyotp/totp.py
|
python
|
TOTP.now
|
(self)
|
return self.generate_otp(self.timecode(datetime.datetime.now()))
|
Generate the current time OTP
:returns: OTP value
:rtype: str
|
Generate the current time OTP
|
[
"Generate",
"the",
"current",
"time",
"OTP"
] |
def now(self):
"""
Generate the current time OTP
:returns: OTP value
:rtype: str
"""
return self.generate_otp(self.timecode(datetime.datetime.now()))
|
[
"def",
"now",
"(",
"self",
")",
":",
"return",
"self",
".",
"generate_otp",
"(",
"self",
".",
"timecode",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
")"
] |
https://github.com/aaPanel/BaoTa/blob/9bb1336f31ae2893ab513af7a3efed633338c64b/class/pyotp/totp.py#L37-L44
|
|
p2pool/p2pool
|
53c438bbada06b9d4a9a465bc13f7694a7a322b7
|
SOAPpy/Types.py
|
python
|
compoundType._aslist
|
(self, item=None)
|
[] |
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
|
[
"def",
"_aslist",
"(",
"self",
",",
"item",
"=",
"None",
")",
":",
"if",
"item",
"is",
"not",
"None",
":",
"return",
"self",
".",
"__dict__",
"[",
"self",
".",
"_keyord",
"[",
"item",
"]",
"]",
"else",
":",
"return",
"map",
"(",
"lambda",
"x",
":",
"self",
".",
"__dict__",
"[",
"x",
"]",
",",
"self",
".",
"_keyord",
")"
] |
https://github.com/p2pool/p2pool/blob/53c438bbada06b9d4a9a465bc13f7694a7a322b7/SOAPpy/Types.py#L1260-L1264
|
||||
rlgraph/rlgraph
|
428fc136a9a075f29a397495b4226a491a287be2
|
rlgraph/graphs/__init__.py
|
python
|
backend_executor
|
()
|
Returns default class for backend.
Returns: Executioner for the specified backend.
|
Returns default class for backend.
Returns: Executioner for the specified backend.
|
[
"Returns",
"default",
"class",
"for",
"backend",
".",
"Returns",
":",
"Executioner",
"for",
"the",
"specified",
"backend",
"."
] |
def backend_executor():
"""
Returns default class for backend.
Returns: Executioner for the specified backend.
"""
if get_backend() == "tf":
return TensorFlowExecutor
elif get_backend() == "pytorch":
return PyTorchExecutor
|
[
"def",
"backend_executor",
"(",
")",
":",
"if",
"get_backend",
"(",
")",
"==",
"\"tf\"",
":",
"return",
"TensorFlowExecutor",
"elif",
"get_backend",
"(",
")",
"==",
"\"pytorch\"",
":",
"return",
"PyTorchExecutor"
] |
https://github.com/rlgraph/rlgraph/blob/428fc136a9a075f29a397495b4226a491a287be2/rlgraph/graphs/__init__.py#L29-L37
|
||
ankitects/anki
|
4360fd16fb20b647ca2597d38129024d952f996e
|
qt/aqt/browser/table/model.py
|
python
|
DataModel.get_cached_row
|
(self, index: QModelIndex)
|
return self._rows.get(self.get_item(index))
|
Get row if it is cached, regardless of staleness.
|
Get row if it is cached, regardless of staleness.
|
[
"Get",
"row",
"if",
"it",
"is",
"cached",
"regardless",
"of",
"staleness",
"."
] |
def get_cached_row(self, index: QModelIndex) -> CellRow | None:
"""Get row if it is cached, regardless of staleness."""
return self._rows.get(self.get_item(index))
|
[
"def",
"get_cached_row",
"(",
"self",
",",
"index",
":",
"QModelIndex",
")",
"->",
"CellRow",
"|",
"None",
":",
"return",
"self",
".",
"_rows",
".",
"get",
"(",
"self",
".",
"get_item",
"(",
"index",
")",
")"
] |
https://github.com/ankitects/anki/blob/4360fd16fb20b647ca2597d38129024d952f996e/qt/aqt/browser/table/model.py#L121-L123
|
|
catap/namebench
|
9913a7a1a7955a3759eb18cbe73b421441a7a00f
|
nb_third_party/dns/resolver.py
|
python
|
Cache.flush
|
(self, key=None)
|
Flush the cache.
If I{key} is specified, only that item is flushed. Otherwise
the entire cache is flushed.
@param key: the key to flush
@type key: (dns.name.Name, int, int) tuple or None
|
Flush the cache.
|
[
"Flush",
"the",
"cache",
"."
] |
def flush(self, key=None):
"""Flush the cache.
If I{key} is specified, only that item is flushed. Otherwise
the entire cache is flushed.
@param key: the key to flush
@type key: (dns.name.Name, int, int) tuple or None
"""
if not key is None:
if self.data.has_key(key):
del self.data[key]
else:
self.data = {}
self.next_cleaning = time.time() + self.cleaning_interval
|
[
"def",
"flush",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"not",
"key",
"is",
"None",
":",
"if",
"self",
".",
"data",
".",
"has_key",
"(",
"key",
")",
":",
"del",
"self",
".",
"data",
"[",
"key",
"]",
"else",
":",
"self",
".",
"data",
"=",
"{",
"}",
"self",
".",
"next_cleaning",
"=",
"time",
".",
"time",
"(",
")",
"+",
"self",
".",
"cleaning_interval"
] |
https://github.com/catap/namebench/blob/9913a7a1a7955a3759eb18cbe73b421441a7a00f/nb_third_party/dns/resolver.py#L226-L241
|
||
andresriancho/w3af
|
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
|
w3af/plugins/attack/db/sqlmap/plugins/dbms/informix/enumeration.py
|
python
|
Enumeration.__init__
|
(self)
|
[] |
def __init__(self):
GenericEnumeration.__init__(self)
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"GenericEnumeration",
".",
"__init__",
"(",
"self",
")"
] |
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/db/sqlmap/plugins/dbms/informix/enumeration.py#L12-L13
|
||||
collinsctk/PyQYT
|
7af3673955f94ff1b2df2f94220cd2dab2e252af
|
ExtentionPackages/tornado/concurrent.py
|
python
|
Future.running
|
(self)
|
return not self._done
|
Returns True if this operation is currently running.
|
Returns True if this operation is currently running.
|
[
"Returns",
"True",
"if",
"this",
"operation",
"is",
"currently",
"running",
"."
] |
def running(self):
"""Returns True if this operation is currently running."""
return not self._done
|
[
"def",
"running",
"(",
"self",
")",
":",
"return",
"not",
"self",
".",
"_done"
] |
https://github.com/collinsctk/PyQYT/blob/7af3673955f94ff1b2df2f94220cd2dab2e252af/ExtentionPackages/tornado/concurrent.py#L206-L208
|
|
google/coursebuilder-core
|
08f809db3226d9269e30d5edd0edd33bd22041f4
|
coursebuilder/controllers/sites.py
|
python
|
ApplicationRequestHandler._dispatch
|
(self, handler, verb, path)
|
Dispatch the verb, path to a given handler.
|
Dispatch the verb, path to a given handler.
|
[
"Dispatch",
"the",
"verb",
"path",
"to",
"a",
"given",
"handler",
"."
] |
def _dispatch(self, handler, verb, path):
"""Dispatch the verb, path to a given handler."""
# these need to be empty, or dispatch() will attempt to use them; we
# don't want them to be set or used because routing phase if over by now
self.request.route_args = []
self.request.route_kwargs = {}
set_default_response_headers(handler)
self.before_method(handler, verb, path)
try:
status_code = None
try:
handler.dispatch()
status_code = handler.response.status_code
except exc.HTTPRedirection as e:
raise e
except Exception as e: # pylint: disable=broad-except
status_code = self.get_status_code_from_dispatch_exception(
verb, path, e)
self._finalize_namespaced_response(
handler.app_context,
handler.request, handler.response, status_code)
finally:
self.after_method(handler, verb, path)
|
[
"def",
"_dispatch",
"(",
"self",
",",
"handler",
",",
"verb",
",",
"path",
")",
":",
"# these need to be empty, or dispatch() will attempt to use them; we",
"# don't want them to be set or used because routing phase if over by now",
"self",
".",
"request",
".",
"route_args",
"=",
"[",
"]",
"self",
".",
"request",
".",
"route_kwargs",
"=",
"{",
"}",
"set_default_response_headers",
"(",
"handler",
")",
"self",
".",
"before_method",
"(",
"handler",
",",
"verb",
",",
"path",
")",
"try",
":",
"status_code",
"=",
"None",
"try",
":",
"handler",
".",
"dispatch",
"(",
")",
"status_code",
"=",
"handler",
".",
"response",
".",
"status_code",
"except",
"exc",
".",
"HTTPRedirection",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"status_code",
"=",
"self",
".",
"get_status_code_from_dispatch_exception",
"(",
"verb",
",",
"path",
",",
"e",
")",
"self",
".",
"_finalize_namespaced_response",
"(",
"handler",
".",
"app_context",
",",
"handler",
".",
"request",
",",
"handler",
".",
"response",
",",
"status_code",
")",
"finally",
":",
"self",
".",
"after_method",
"(",
"handler",
",",
"verb",
",",
"path",
")"
] |
https://github.com/google/coursebuilder-core/blob/08f809db3226d9269e30d5edd0edd33bd22041f4/coursebuilder/controllers/sites.py#L1546-L1569
|
||
ambujraj/hacktoberfest2018
|
53df2cac8b3404261131a873352ec4f2ffa3544d
|
MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/pyparsing.py
|
python
|
delimitedList
|
( expr, delim=",", combine=False )
|
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
|
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
|
[
"Helper",
"to",
"define",
"a",
"delimited",
"list",
"of",
"expressions",
"-",
"the",
"delimiter",
"defaults",
"to",
".",
"By",
"default",
"the",
"list",
"elements",
"and",
"delimiters",
"can",
"have",
"intervening",
"whitespace",
"and",
"comments",
"but",
"this",
"can",
"be",
"overridden",
"by",
"passing",
"C",
"{",
"combine",
"=",
"True",
"}",
"in",
"the",
"constructor",
".",
"If",
"C",
"{",
"combine",
"}",
"is",
"set",
"to",
"C",
"{",
"True",
"}",
"the",
"matching",
"tokens",
"are",
"returned",
"as",
"a",
"single",
"token",
"string",
"with",
"the",
"delimiters",
"included",
";",
"otherwise",
"the",
"matching",
"tokens",
"are",
"returned",
"as",
"a",
"list",
"of",
"tokens",
"with",
"the",
"delimiters",
"suppressed",
"."
] |
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
|
[
"def",
"delimitedList",
"(",
"expr",
",",
"delim",
"=",
"\",\"",
",",
"combine",
"=",
"False",
")",
":",
"dlName",
"=",
"_ustr",
"(",
"expr",
")",
"+",
"\" [\"",
"+",
"_ustr",
"(",
"delim",
")",
"+",
"\" \"",
"+",
"_ustr",
"(",
"expr",
")",
"+",
"\"]...\"",
"if",
"combine",
":",
"return",
"Combine",
"(",
"expr",
"+",
"ZeroOrMore",
"(",
"delim",
"+",
"expr",
")",
")",
".",
"setName",
"(",
"dlName",
")",
"else",
":",
"return",
"(",
"expr",
"+",
"ZeroOrMore",
"(",
"Suppress",
"(",
"delim",
")",
"+",
"expr",
")",
")",
".",
"setName",
"(",
"dlName",
")"
] |
https://github.com/ambujraj/hacktoberfest2018/blob/53df2cac8b3404261131a873352ec4f2ffa3544d/MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/pyparsing.py#L4428-L4445
|
||
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/ump/v20200918/models.py
|
python
|
CreateProgramStateResponse.__init__
|
(self)
|
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
[
"r",
":",
"param",
"RequestId",
":",
"唯一请求",
"ID,每次请求都会返回。定位问题时需要提供该次请求的",
"RequestId。",
":",
"type",
"RequestId",
":",
"str"
] |
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"RequestId",
"=",
"None"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/ump/v20200918/models.py#L713-L718
|
||
carpedm20/SPIRAL-tensorflow
|
2cca458f5d0d856c7bb73e877eea24906eda522f
|
agent.py
|
python
|
Agent.prepare_local_network
|
(self)
|
[] |
def prepare_local_network(self):
self.local_network = models.Policy(
self.args, self.env, "local",
self.input_shape, self.action_sizes,
data_format='channels_last')
##########################
# Trajectory queue
##########################
self.trajectory_placeholders = {
name:tf.placeholder(
tf.float32, dict(self.queue_shapes)[name],
name="{}_in".format(name)) \
for name, shape in self.queue_shapes
}
self.trajectory_enqueues = self.trajectory_queue.enqueue(
{ name:self.trajectory_placeholders[name] \
for name, _ in self.queue_shapes })
##########################
# Replay queue
##########################
if self.args.loss == 'gan':
self.replay_placeholder = tf.placeholder(
tf.float32, self.input_shape,
name="replay_in")
self.replay_enqueue = self.replay_queue.enqueue(
self.replay_placeholder)
else:
self.replay_placeholder = None
self.replay_enqueue = None
###############################
# Thread dealing with queues
###############################
self.worker_thread = rl_utils.WorkerThread(
self.env,
self.local_network,
self.trajectory_enqueues,
self.trajectory_placeholders,
self.trajectory_queue_size_op,
self.replay_enqueue,
self.replay_placeholder,
self.replay_queue_size_op)
# copy weights from the parameter server to the local model
self.policy_sync = ut.tf.get_sync_op(
from_list=self.global_network.var_list,
to_list=self.local_network.var_list)
|
[
"def",
"prepare_local_network",
"(",
"self",
")",
":",
"self",
".",
"local_network",
"=",
"models",
".",
"Policy",
"(",
"self",
".",
"args",
",",
"self",
".",
"env",
",",
"\"local\"",
",",
"self",
".",
"input_shape",
",",
"self",
".",
"action_sizes",
",",
"data_format",
"=",
"'channels_last'",
")",
"##########################",
"# Trajectory queue",
"##########################",
"self",
".",
"trajectory_placeholders",
"=",
"{",
"name",
":",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"dict",
"(",
"self",
".",
"queue_shapes",
")",
"[",
"name",
"]",
",",
"name",
"=",
"\"{}_in\"",
".",
"format",
"(",
"name",
")",
")",
"for",
"name",
",",
"shape",
"in",
"self",
".",
"queue_shapes",
"}",
"self",
".",
"trajectory_enqueues",
"=",
"self",
".",
"trajectory_queue",
".",
"enqueue",
"(",
"{",
"name",
":",
"self",
".",
"trajectory_placeholders",
"[",
"name",
"]",
"for",
"name",
",",
"_",
"in",
"self",
".",
"queue_shapes",
"}",
")",
"##########################",
"# Replay queue",
"##########################",
"if",
"self",
".",
"args",
".",
"loss",
"==",
"'gan'",
":",
"self",
".",
"replay_placeholder",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"self",
".",
"input_shape",
",",
"name",
"=",
"\"replay_in\"",
")",
"self",
".",
"replay_enqueue",
"=",
"self",
".",
"replay_queue",
".",
"enqueue",
"(",
"self",
".",
"replay_placeholder",
")",
"else",
":",
"self",
".",
"replay_placeholder",
"=",
"None",
"self",
".",
"replay_enqueue",
"=",
"None",
"###############################",
"# Thread dealing with queues",
"###############################",
"self",
".",
"worker_thread",
"=",
"rl_utils",
".",
"WorkerThread",
"(",
"self",
".",
"env",
",",
"self",
".",
"local_network",
",",
"self",
".",
"trajectory_enqueues",
",",
"self",
".",
"trajectory_placeholders",
",",
"self",
".",
"trajectory_queue_size_op",
",",
"self",
".",
"replay_enqueue",
",",
"self",
".",
"replay_placeholder",
",",
"self",
".",
"replay_queue_size_op",
")",
"# copy weights from the parameter server to the local model",
"self",
".",
"policy_sync",
"=",
"ut",
".",
"tf",
".",
"get_sync_op",
"(",
"from_list",
"=",
"self",
".",
"global_network",
".",
"var_list",
",",
"to_list",
"=",
"self",
".",
"local_network",
".",
"var_list",
")"
] |
https://github.com/carpedm20/SPIRAL-tensorflow/blob/2cca458f5d0d856c7bb73e877eea24906eda522f/agent.py#L242-L290
|
||||
pillone/usntssearch
|
24b5e5bc4b6af2589d95121c4d523dc58cb34273
|
NZBmegasearch/beautifulsoup.py
|
python
|
Tag._convertEntities
|
(self, match)
|
Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped.
|
Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped.
|
[
"Used",
"in",
"a",
"call",
"to",
"re",
".",
"sub",
"to",
"replace",
"HTML",
"XML",
"and",
"numeric",
"entities",
"with",
"the",
"appropriate",
"Unicode",
"characters",
".",
"If",
"HTML",
"entities",
"are",
"being",
"converted",
"any",
"unrecognized",
"entities",
"are",
"escaped",
"."
] |
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
|
[
"def",
"_convertEntities",
"(",
"self",
",",
"match",
")",
":",
"x",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"self",
".",
"convertHTMLEntities",
"and",
"x",
"in",
"name2codepoint",
":",
"return",
"unichr",
"(",
"name2codepoint",
"[",
"x",
"]",
")",
"elif",
"x",
"in",
"self",
".",
"XML_ENTITIES_TO_SPECIAL_CHARS",
":",
"if",
"self",
".",
"convertXMLEntities",
":",
"return",
"self",
".",
"XML_ENTITIES_TO_SPECIAL_CHARS",
"[",
"x",
"]",
"else",
":",
"return",
"u'&%s;'",
"%",
"x",
"elif",
"len",
"(",
"x",
")",
">",
"0",
"and",
"x",
"[",
"0",
"]",
"==",
"'#'",
":",
"# Handle numeric entities",
"if",
"len",
"(",
"x",
")",
">",
"1",
"and",
"x",
"[",
"1",
"]",
"==",
"'x'",
":",
"return",
"unichr",
"(",
"int",
"(",
"x",
"[",
"2",
":",
"]",
",",
"16",
")",
")",
"else",
":",
"return",
"unichr",
"(",
"int",
"(",
"x",
"[",
"1",
":",
"]",
")",
")",
"elif",
"self",
".",
"escapeUnrecognizedEntities",
":",
"return",
"u'&%s;'",
"%",
"x",
"else",
":",
"return",
"u'&%s;'",
"%",
"x"
] |
https://github.com/pillone/usntssearch/blob/24b5e5bc4b6af2589d95121c4d523dc58cb34273/NZBmegasearch/beautifulsoup.py#L510-L533
|
||
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/lib-python/3/platform.py
|
python
|
_syscmd_file
|
(target,default='')
|
Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
|
Interface to the system's file command.
|
[
"Interface",
"to",
"the",
"system",
"s",
"file",
"command",
"."
] |
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target).replace('"', '\\"')
try:
f = os.popen('file -b "%s" 2> %s' % (target, DEV_NULL))
except (AttributeError,os.error):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
|
[
"def",
"_syscmd_file",
"(",
"target",
",",
"default",
"=",
"''",
")",
":",
"if",
"sys",
".",
"platform",
"in",
"(",
"'dos'",
",",
"'win32'",
",",
"'win16'",
",",
"'os2'",
")",
":",
"# XXX Others too ?",
"return",
"default",
"target",
"=",
"_follow_symlinks",
"(",
"target",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"try",
":",
"f",
"=",
"os",
".",
"popen",
"(",
"'file -b \"%s\" 2> %s'",
"%",
"(",
"target",
",",
"DEV_NULL",
")",
")",
"except",
"(",
"AttributeError",
",",
"os",
".",
"error",
")",
":",
"return",
"default",
"output",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"rc",
"=",
"f",
".",
"close",
"(",
")",
"if",
"not",
"output",
"or",
"rc",
":",
"return",
"default",
"else",
":",
"return",
"output"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/platform.py#L986-L1008
|
||
ronreiter/interactive-tutorials
|
d026d1ae58941863d60eb30a8a94a8650d2bd4bf
|
suds/sax/element.py
|
python
|
PrefixNormalizer.skip
|
(self, ns)
|
return ns in (None, Namespace.default, Namespace.xsdns, Namespace.xsins, Namespace.xmlns)
|
Get whether the I{ns} is to B{not} be normalized.
@param ns: A namespace.
@type ns: (p,u)
@return: True if to be skipped.
@rtype: boolean
|
Get whether the I{ns} is to B{not} be normalized.
|
[
"Get",
"whether",
"the",
"I",
"{",
"ns",
"}",
"is",
"to",
"B",
"{",
"not",
"}",
"be",
"normalized",
"."
] |
def skip(self, ns):
"""
Get whether the I{ns} is to B{not} be normalized.
@param ns: A namespace.
@type ns: (p,u)
@return: True if to be skipped.
@rtype: boolean
"""
return ns in (None, Namespace.default, Namespace.xsdns, Namespace.xsins, Namespace.xmlns)
|
[
"def",
"skip",
"(",
"self",
",",
"ns",
")",
":",
"return",
"ns",
"in",
"(",
"None",
",",
"Namespace",
".",
"default",
",",
"Namespace",
".",
"xsdns",
",",
"Namespace",
".",
"xsins",
",",
"Namespace",
".",
"xmlns",
")"
] |
https://github.com/ronreiter/interactive-tutorials/blob/d026d1ae58941863d60eb30a8a94a8650d2bd4bf/suds/sax/element.py#L1138-L1146
|
|
nathanlopez/Stitch
|
8e22e91c94237959c02d521aab58dc7e3d994cea
|
Application/stitch_help.py
|
python
|
st_help_sudo
|
()
|
[] |
def st_help_sudo():
st_print("[*] Runs the preceding command with admin priveleges.")
usage_sudo()
|
[
"def",
"st_help_sudo",
"(",
")",
":",
"st_print",
"(",
"\"[*] Runs the preceding command with admin priveleges.\"",
")",
"usage_sudo",
"(",
")"
] |
https://github.com/nathanlopez/Stitch/blob/8e22e91c94237959c02d521aab58dc7e3d994cea/Application/stitch_help.py#L382-L384
|
||||
rgerum/pylustrator
|
b01825bc3de75ac127291647729fa7b0e6f8b821
|
pylustrator/change_tracker.py
|
python
|
setFigureVariableNames
|
(figure: Figure)
|
get the global variable names that refer to the given figure
|
get the global variable names that refer to the given figure
|
[
"get",
"the",
"global",
"variable",
"names",
"that",
"refer",
"to",
"the",
"given",
"figure"
] |
def setFigureVariableNames(figure: Figure):
""" get the global variable names that refer to the given figure """
import inspect
mpl_figure = _pylab_helpers.Gcf.figs[figure].canvas.figure
calling_globals = inspect.stack()[2][0].f_globals
fig_names = [
name
for name, val in calling_globals.items()
if isinstance(val, mpl.figure.Figure) and hash(val) == hash(mpl_figure)
]
print("fig_names", fig_names)
if len(fig_names):
globals()[fig_names[0]] = mpl_figure
setattr(mpl_figure, "_variable_name", fig_names[0])
|
[
"def",
"setFigureVariableNames",
"(",
"figure",
":",
"Figure",
")",
":",
"import",
"inspect",
"mpl_figure",
"=",
"_pylab_helpers",
".",
"Gcf",
".",
"figs",
"[",
"figure",
"]",
".",
"canvas",
".",
"figure",
"calling_globals",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"2",
"]",
"[",
"0",
"]",
".",
"f_globals",
"fig_names",
"=",
"[",
"name",
"for",
"name",
",",
"val",
"in",
"calling_globals",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"val",
",",
"mpl",
".",
"figure",
".",
"Figure",
")",
"and",
"hash",
"(",
"val",
")",
"==",
"hash",
"(",
"mpl_figure",
")",
"]",
"print",
"(",
"\"fig_names\"",
",",
"fig_names",
")",
"if",
"len",
"(",
"fig_names",
")",
":",
"globals",
"(",
")",
"[",
"fig_names",
"[",
"0",
"]",
"]",
"=",
"mpl_figure",
"setattr",
"(",
"mpl_figure",
",",
"\"_variable_name\"",
",",
"fig_names",
"[",
"0",
"]",
")"
] |
https://github.com/rgerum/pylustrator/blob/b01825bc3de75ac127291647729fa7b0e6f8b821/pylustrator/change_tracker.py#L152-L165
|
||
Zulko/moviepy
|
8eaf3f02c5cf812e89f03e925cb2fa5e05b8d29a
|
setup.py
|
python
|
PyTest.initialize_options
|
(self)
|
Initialize the PyTest options.
|
Initialize the PyTest options.
|
[
"Initialize",
"the",
"PyTest",
"options",
"."
] |
def initialize_options(self):
"""Initialize the PyTest options."""
TestCommand.initialize_options(self)
self.pytest_args = ""
|
[
"def",
"initialize_options",
"(",
"self",
")",
":",
"TestCommand",
".",
"initialize_options",
"(",
"self",
")",
"self",
".",
"pytest_args",
"=",
"\"\""
] |
https://github.com/Zulko/moviepy/blob/8eaf3f02c5cf812e89f03e925cb2fa5e05b8d29a/setup.py#L31-L34
|
||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_serviceaccount.py
|
python
|
OCServiceAccount.run_ansible
|
(params, check_mode)
|
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
|
run the oc_serviceaccount module
|
run the oc_serviceaccount module
|
[
"run",
"the",
"oc_serviceaccount",
"module"
] |
def run_ansible(params, check_mode):
'''run the oc_serviceaccount module'''
rconfig = ServiceAccountConfig(params['name'],
params['namespace'],
params['kubeconfig'],
params['secrets'],
params['image_pull_secrets'],
)
oc_sa = OCServiceAccount(rconfig,
verbose=params['debug'])
state = params['state']
api_rval = oc_sa.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = oc_sa.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
# Create it here
api_rval = oc_sa.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
########
# Update
########
if oc_sa.needs_update():
api_rval = oc_sa.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
return {'changed': False, 'results': api_rval, 'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
|
[
"def",
"run_ansible",
"(",
"params",
",",
"check_mode",
")",
":",
"rconfig",
"=",
"ServiceAccountConfig",
"(",
"params",
"[",
"'name'",
"]",
",",
"params",
"[",
"'namespace'",
"]",
",",
"params",
"[",
"'kubeconfig'",
"]",
",",
"params",
"[",
"'secrets'",
"]",
",",
"params",
"[",
"'image_pull_secrets'",
"]",
",",
")",
"oc_sa",
"=",
"OCServiceAccount",
"(",
"rconfig",
",",
"verbose",
"=",
"params",
"[",
"'debug'",
"]",
")",
"state",
"=",
"params",
"[",
"'state'",
"]",
"api_rval",
"=",
"oc_sa",
".",
"get",
"(",
")",
"#####",
"# Get",
"#####",
"if",
"state",
"==",
"'list'",
":",
"return",
"{",
"'changed'",
":",
"False",
",",
"'results'",
":",
"api_rval",
"[",
"'results'",
"]",
",",
"'state'",
":",
"'list'",
"}",
"########",
"# Delete",
"########",
"if",
"state",
"==",
"'absent'",
":",
"if",
"oc_sa",
".",
"exists",
"(",
")",
":",
"if",
"check_mode",
":",
"return",
"{",
"'changed'",
":",
"True",
",",
"'msg'",
":",
"'Would have performed a delete.'",
"}",
"api_rval",
"=",
"oc_sa",
".",
"delete",
"(",
")",
"return",
"{",
"'changed'",
":",
"True",
",",
"'results'",
":",
"api_rval",
",",
"'state'",
":",
"'absent'",
"}",
"return",
"{",
"'changed'",
":",
"False",
",",
"'state'",
":",
"'absent'",
"}",
"if",
"state",
"==",
"'present'",
":",
"########",
"# Create",
"########",
"if",
"not",
"oc_sa",
".",
"exists",
"(",
")",
":",
"if",
"check_mode",
":",
"return",
"{",
"'changed'",
":",
"True",
",",
"'msg'",
":",
"'Would have performed a create.'",
"}",
"# Create it here",
"api_rval",
"=",
"oc_sa",
".",
"create",
"(",
")",
"if",
"api_rval",
"[",
"'returncode'",
"]",
"!=",
"0",
":",
"return",
"{",
"'failed'",
":",
"True",
",",
"'msg'",
":",
"api_rval",
"}",
"# return the created object",
"api_rval",
"=",
"oc_sa",
".",
"get",
"(",
")",
"if",
"api_rval",
"[",
"'returncode'",
"]",
"!=",
"0",
":",
"return",
"{",
"'failed'",
":",
"True",
",",
"'msg'",
":",
"api_rval",
"}",
"return",
"{",
"'changed'",
":",
"True",
",",
"'results'",
":",
"api_rval",
",",
"'state'",
":",
"'present'",
"}",
"########",
"# Update",
"########",
"if",
"oc_sa",
".",
"needs_update",
"(",
")",
":",
"api_rval",
"=",
"oc_sa",
".",
"update",
"(",
")",
"if",
"api_rval",
"[",
"'returncode'",
"]",
"!=",
"0",
":",
"return",
"{",
"'failed'",
":",
"True",
",",
"'msg'",
":",
"api_rval",
"}",
"# return the created object",
"api_rval",
"=",
"oc_sa",
".",
"get",
"(",
")",
"if",
"api_rval",
"[",
"'returncode'",
"]",
"!=",
"0",
":",
"return",
"{",
"'failed'",
":",
"True",
",",
"'msg'",
":",
"api_rval",
"}",
"return",
"{",
"'changed'",
":",
"True",
",",
"'results'",
":",
"api_rval",
",",
"'state'",
":",
"'present'",
"}",
"return",
"{",
"'changed'",
":",
"False",
",",
"'results'",
":",
"api_rval",
",",
"'state'",
":",
"'present'",
"}",
"return",
"{",
"'failed'",
":",
"True",
",",
"'changed'",
":",
"False",
",",
"'msg'",
":",
"'Unknown state passed. %s'",
"%",
"state",
",",
"'state'",
":",
"'unknown'",
"}"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_serviceaccount.py#L1671-L1755
|
|
whiteclover/white
|
633c5f0c7e9e9518f495b0afddf06786d4e229c4
|
white/lib/memoize.py
|
python
|
LRUCacheDict.__contains__
|
(self, key)
|
return self.__values.has_key(key)
|
This method should almost NEVER be used. The reason is that between the time
has_key is called, and the key is accessed, the key might vanish.
You should ALWAYS use a try: ... except KeyError: ... block.
|
This method should almost NEVER be used. The reason is that between the time
has_key is called, and the key is accessed, the key might vanish.
You should ALWAYS use a try: ... except KeyError: ... block.
|
[
"This",
"method",
"should",
"almost",
"NEVER",
"be",
"used",
".",
"The",
"reason",
"is",
"that",
"between",
"the",
"time",
"has_key",
"is",
"called",
"and",
"the",
"key",
"is",
"accessed",
"the",
"key",
"might",
"vanish",
".",
"You",
"should",
"ALWAYS",
"use",
"a",
"try",
":",
"...",
"except",
"KeyError",
":",
"...",
"block",
"."
] |
def __contains__(self, key):
"""
This method should almost NEVER be used. The reason is that between the time
has_key is called, and the key is accessed, the key might vanish.
You should ALWAYS use a try: ... except KeyError: ... block.
"""
return self.__values.has_key(key)
|
[
"def",
"__contains__",
"(",
"self",
",",
"key",
")",
":",
"return",
"self",
".",
"__values",
".",
"has_key",
"(",
"key",
")"
] |
https://github.com/whiteclover/white/blob/633c5f0c7e9e9518f495b0afddf06786d4e229c4/white/lib/memoize.py#L215-L221
|
|
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/pandas/core/indexes/multi.py
|
python
|
MultiIndex.is_monotonic_decreasing
|
(self)
|
return False
|
return if the index is monotonic decreasing (only equal or
decreasing) values.
|
return if the index is monotonic decreasing (only equal or
decreasing) values.
|
[
"return",
"if",
"the",
"index",
"is",
"monotonic",
"decreasing",
"(",
"only",
"equal",
"or",
"decreasing",
")",
"values",
"."
] |
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return False
|
[
"def",
"is_monotonic_decreasing",
"(",
"self",
")",
":",
"return",
"False"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/pandas/core/indexes/multi.py#L697-L702
|
|
ni/nidaqmx-python
|
62fc6b48cbbb330fe1bcc9aedadc86610a1269b6
|
nidaqmx/_task_modules/channels/ai_channel.py
|
python
|
AIChannel.ai_sound_pressure_units
|
(self)
|
return SoundPressureUnits(val.value)
|
:class:`nidaqmx.constants.SoundPressureUnits`: Specifies the
units to use to return sound pressure measurements from the
channel.
|
:class:`nidaqmx.constants.SoundPressureUnits`: Specifies the
units to use to return sound pressure measurements from the
channel.
|
[
":",
"class",
":",
"nidaqmx",
".",
"constants",
".",
"SoundPressureUnits",
":",
"Specifies",
"the",
"units",
"to",
"use",
"to",
"return",
"sound",
"pressure",
"measurements",
"from",
"the",
"channel",
"."
] |
def ai_sound_pressure_units(self):
"""
:class:`nidaqmx.constants.SoundPressureUnits`: Specifies the
units to use to return sound pressure measurements from the
channel.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetAISoundPressureUnits
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return SoundPressureUnits(val.value)
|
[
"def",
"ai_sound_pressure_units",
"(",
"self",
")",
":",
"val",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"cfunc",
"=",
"lib_importer",
".",
"windll",
".",
"DAQmxGetAISoundPressureUnits",
"if",
"cfunc",
".",
"argtypes",
"is",
"None",
":",
"with",
"cfunc",
".",
"arglock",
":",
"if",
"cfunc",
".",
"argtypes",
"is",
"None",
":",
"cfunc",
".",
"argtypes",
"=",
"[",
"lib_importer",
".",
"task_handle",
",",
"ctypes_byte_str",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_int",
")",
"]",
"error_code",
"=",
"cfunc",
"(",
"self",
".",
"_handle",
",",
"self",
".",
"_name",
",",
"ctypes",
".",
"byref",
"(",
"val",
")",
")",
"check_for_error",
"(",
"error_code",
")",
"return",
"SoundPressureUnits",
"(",
"val",
".",
"value",
")"
] |
https://github.com/ni/nidaqmx-python/blob/62fc6b48cbbb330fe1bcc9aedadc86610a1269b6/nidaqmx/_task_modules/channels/ai_channel.py#L6848-L6868
|
|
AwesomeTTS/awesometts-anki-addon
|
c7c2c94479b610b9767ec44cdbb825002bc0c2b7
|
awesometts/service/base.py
|
python
|
Service.util_merge
|
(self, input_files, output_file)
|
Given several input files, dumbly merge together into a single
output file.
|
Given several input files, dumbly merge together into a single
output file.
|
[
"Given",
"several",
"input",
"files",
"dumbly",
"merge",
"together",
"into",
"a",
"single",
"output",
"file",
"."
] |
def util_merge(self, input_files, output_file):
"""
Given several input files, dumbly merge together into a single
output file.
"""
self._logger.debug("Merging %s into %s", input_files, output_file)
with open(output_file, 'wb') as output_stream:
for input_file in input_files:
with open(input_file, 'rb') as input_stream:
output_stream.write(input_stream.read())
|
[
"def",
"util_merge",
"(",
"self",
",",
"input_files",
",",
"output_file",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Merging %s into %s\"",
",",
"input_files",
",",
"output_file",
")",
"with",
"open",
"(",
"output_file",
",",
"'wb'",
")",
"as",
"output_stream",
":",
"for",
"input_file",
"in",
"input_files",
":",
"with",
"open",
"(",
"input_file",
",",
"'rb'",
")",
"as",
"input_stream",
":",
"output_stream",
".",
"write",
"(",
"input_stream",
".",
"read",
"(",
")",
")"
] |
https://github.com/AwesomeTTS/awesometts-anki-addon/blob/c7c2c94479b610b9767ec44cdbb825002bc0c2b7/awesometts/service/base.py#L790-L800
|
||
MeanEYE/Sunflower
|
1024bbdde3b8e202ddad3553b321a7b6230bffc9
|
sunflower/plugins/gvim_viewer/plugin.py
|
python
|
GVimViewer.__socket_realized
|
(self, widget, data=None)
|
Connect process when socket is realized
|
Connect process when socket is realized
|
[
"Connect",
"process",
"when",
"socket",
"is",
"realized"
] |
def __socket_realized(self, widget, data=None):
"""Connect process when socket is realized"""
socket_id = self._socket.get_id()
# generate command string
command = (
'gvim',
'--socketid', str(socket_id),
'-R', self._parent.path
)
# create new process
self._process = subprocess.Popen(command)
|
[
"def",
"__socket_realized",
"(",
"self",
",",
"widget",
",",
"data",
"=",
"None",
")",
":",
"socket_id",
"=",
"self",
".",
"_socket",
".",
"get_id",
"(",
")",
"# generate command string",
"command",
"=",
"(",
"'gvim'",
",",
"'--socketid'",
",",
"str",
"(",
"socket_id",
")",
",",
"'-R'",
",",
"self",
".",
"_parent",
".",
"path",
")",
"# create new process",
"self",
".",
"_process",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
")"
] |
https://github.com/MeanEYE/Sunflower/blob/1024bbdde3b8e202ddad3553b321a7b6230bffc9/sunflower/plugins/gvim_viewer/plugin.py#L36-L48
|
||
pdpipe/pdpipe
|
69502436d2a4ce70c6123d7a9db02cbf10f56cf2
|
pdpipe/skintegrate.py
|
python
|
PdPipelineAndSklearnEstimator.fit
|
(self, X, y)
|
return self
|
A reference implementation of a fitting function.
Parameters
----------
X : pandas.DataFrame, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
|
A reference implementation of a fitting function.
|
[
"A",
"reference",
"implementation",
"of",
"a",
"fitting",
"function",
"."
] |
def fit(self, X, y):
"""A reference implementation of a fitting function.
Parameters
----------
X : pandas.DataFrame, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
# X, y = check_X_y(X, y, accept_sparse=True)
post_X = self.pipeline.fit_transform(X=X, y=y)
self.estimator.fit(X=post_X.values, y=y.values)
self.is_fitted_ = True
return self
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"# X, y = check_X_y(X, y, accept_sparse=True)",
"post_X",
"=",
"self",
".",
"pipeline",
".",
"fit_transform",
"(",
"X",
"=",
"X",
",",
"y",
"=",
"y",
")",
"self",
".",
"estimator",
".",
"fit",
"(",
"X",
"=",
"post_X",
".",
"values",
",",
"y",
"=",
"y",
".",
"values",
")",
"self",
".",
"is_fitted_",
"=",
"True",
"return",
"self"
] |
https://github.com/pdpipe/pdpipe/blob/69502436d2a4ce70c6123d7a9db02cbf10f56cf2/pdpipe/skintegrate.py#L183-L203
|
|
smokeleeteveryday/CTF_WRITEUPS
|
4683f0d41c92c4ed407cc3dd3b1760c68a05943f
|
2015/POLICTF/reversing/johnthepacker/solution/john_keygen.py
|
python
|
key_gen
|
()
|
return key
|
[] |
def key_gen():
magic_table_0 = "\x15\x00\x00\x00\x00\x80\x00\x00\x15\x00\x00\x00\x00\x00\x08\x00\x15\x00\x00\x00\x00\x00\x80\x00\x15\x00\x00\x00\x00\x80\x00\x00\x15\x00\x00\x00\x00\x00\x40\x00\x15\x00\x00\x00\x00\x80\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x40\x00\x15\x00\x00\x00\x00\x00\x08\x00\x15\x00\x00\x00\x00\x00\x00\x80\x15\x00\x00\x00\x00\x80\x00\x00"
magic_table_1 = "\x44\x07\x43\x59\x1C\x5B\x1E\x19\x47\x00"
key = ""
# First 6 bytes
for i in xrange(6):
key += chr(pow_0(i+1))
# Next 11 bytes
for i in xrange(11):
index = (2 * i) * 4
magic_value = unpack('<Q', magic_table_0[index: index + 8])[0]
char_val = pow_1(magic_value)
# ( !(*(_BYTE *)(a5 + 17) & 1) )
if((i == 6) and (char_val & 1 == 0)):
# Wrap-around compensation
char_val -= 1
key += chr(char_val)
# Final 10 bytes
for i in xrange(10):
key += chr(ord(key[len(key)-1]) ^ ord(magic_table_1[i]))
return key
|
[
"def",
"key_gen",
"(",
")",
":",
"magic_table_0",
"=",
"\"\\x15\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x15\\x00\\x00\\x00\\x00\\x00\\x08\\x00\\x15\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x15\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x15\\x00\\x00\\x00\\x00\\x00\\x40\\x00\\x15\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x15\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x15\\x00\\x00\\x00\\x00\\x00\\x40\\x00\\x15\\x00\\x00\\x00\\x00\\x00\\x08\\x00\\x15\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x15\\x00\\x00\\x00\\x00\\x80\\x00\\x00\"",
"magic_table_1",
"=",
"\"\\x44\\x07\\x43\\x59\\x1C\\x5B\\x1E\\x19\\x47\\x00\"",
"key",
"=",
"\"\"",
"# First 6 bytes",
"for",
"i",
"in",
"xrange",
"(",
"6",
")",
":",
"key",
"+=",
"chr",
"(",
"pow_0",
"(",
"i",
"+",
"1",
")",
")",
"# Next 11 bytes",
"for",
"i",
"in",
"xrange",
"(",
"11",
")",
":",
"index",
"=",
"(",
"2",
"*",
"i",
")",
"*",
"4",
"magic_value",
"=",
"unpack",
"(",
"'<Q'",
",",
"magic_table_0",
"[",
"index",
":",
"index",
"+",
"8",
"]",
")",
"[",
"0",
"]",
"char_val",
"=",
"pow_1",
"(",
"magic_value",
")",
"# ( !(*(_BYTE *)(a5 + 17) & 1) )",
"if",
"(",
"(",
"i",
"==",
"6",
")",
"and",
"(",
"char_val",
"&",
"1",
"==",
"0",
")",
")",
":",
"# Wrap-around compensation",
"char_val",
"-=",
"1",
"key",
"+=",
"chr",
"(",
"char_val",
")",
"# Final 10 bytes",
"for",
"i",
"in",
"xrange",
"(",
"10",
")",
":",
"key",
"+=",
"chr",
"(",
"ord",
"(",
"key",
"[",
"len",
"(",
"key",
")",
"-",
"1",
"]",
")",
"^",
"ord",
"(",
"magic_table_1",
"[",
"i",
"]",
")",
")",
"return",
"key"
] |
https://github.com/smokeleeteveryday/CTF_WRITEUPS/blob/4683f0d41c92c4ed407cc3dd3b1760c68a05943f/2015/POLICTF/reversing/johnthepacker/solution/john_keygen.py#L28-L55
|
|||
galaxyproject/galaxy
|
4c03520f05062e0f4a1b3655dc0b7452fda69943
|
lib/galaxy/managers/sharable.py
|
python
|
SharableModelManager.get_sharing_extra_information
|
(
self, trans, item, users: Set[User], errors: Set[str], option: Optional[SharingOptions] = None
)
|
return None
|
Returns optional extra information about the shareability of the given item.
This function should be overridden in the particular manager class that wants
to provide the extra information, otherwise, it will be None by default.
|
Returns optional extra information about the shareability of the given item.
|
[
"Returns",
"optional",
"extra",
"information",
"about",
"the",
"shareability",
"of",
"the",
"given",
"item",
"."
] |
def get_sharing_extra_information(
self, trans, item, users: Set[User], errors: Set[str], option: Optional[SharingOptions] = None
) -> Optional[ShareWithExtra]:
"""Returns optional extra information about the shareability of the given item.
This function should be overridden in the particular manager class that wants
to provide the extra information, otherwise, it will be None by default."""
return None
|
[
"def",
"get_sharing_extra_information",
"(",
"self",
",",
"trans",
",",
"item",
",",
"users",
":",
"Set",
"[",
"User",
"]",
",",
"errors",
":",
"Set",
"[",
"str",
"]",
",",
"option",
":",
"Optional",
"[",
"SharingOptions",
"]",
"=",
"None",
")",
"->",
"Optional",
"[",
"ShareWithExtra",
"]",
":",
"return",
"None"
] |
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/managers/sharable.py#L238-L245
|
|
tendenci/tendenci
|
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
|
tendenci/apps/forms_builder/forms/forms.py
|
python
|
FormForForm.clean_pricing_option
|
(self)
|
return pricing_option
|
[] |
def clean_pricing_option(self):
pricing_pk = int(self.cleaned_data['pricing_option'])
[pricing_option] = self.form.pricing_set.filter(pk=pricing_pk)[:1] or [None]
custom_price = self.data.get('custom_price_%s' % pricing_pk)
# if not price set
if not pricing_option or pricing_option.price is None:
# then price is custom
if not custom_price: # custom price has a value
raise forms.ValidationError(_("Please set your price."))
try: # custom price is valid amount
custom_price = currency_check(custom_price)
except:
raise forms.ValidationError(_("Price must be a valid amount"))
self.cleaned_data['custom_price'] = custom_price
return pricing_option
|
[
"def",
"clean_pricing_option",
"(",
"self",
")",
":",
"pricing_pk",
"=",
"int",
"(",
"self",
".",
"cleaned_data",
"[",
"'pricing_option'",
"]",
")",
"[",
"pricing_option",
"]",
"=",
"self",
".",
"form",
".",
"pricing_set",
".",
"filter",
"(",
"pk",
"=",
"pricing_pk",
")",
"[",
":",
"1",
"]",
"or",
"[",
"None",
"]",
"custom_price",
"=",
"self",
".",
"data",
".",
"get",
"(",
"'custom_price_%s'",
"%",
"pricing_pk",
")",
"# if not price set",
"if",
"not",
"pricing_option",
"or",
"pricing_option",
".",
"price",
"is",
"None",
":",
"# then price is custom",
"if",
"not",
"custom_price",
":",
"# custom price has a value",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"\"Please set your price.\"",
")",
")",
"try",
":",
"# custom price is valid amount",
"custom_price",
"=",
"currency_check",
"(",
"custom_price",
")",
"except",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"\"Price must be a valid amount\"",
")",
")",
"self",
".",
"cleaned_data",
"[",
"'custom_price'",
"]",
"=",
"custom_price",
"return",
"pricing_option"
] |
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/forms_builder/forms/forms.py#L197-L215
|
|||
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/lighthouse/v20200324/models.py
|
python
|
DescribeCcnAttachedInstancesResponse.__init__
|
(self)
|
r"""
:param CcnAttachedInstanceSet: 云联网关联的实例列表。
注意:此字段可能返回 null,表示取不到有效值。
:type CcnAttachedInstanceSet: list of CcnAttachedInstance
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
r"""
:param CcnAttachedInstanceSet: 云联网关联的实例列表。
注意:此字段可能返回 null,表示取不到有效值。
:type CcnAttachedInstanceSet: list of CcnAttachedInstance
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
[
"r",
":",
"param",
"CcnAttachedInstanceSet",
":",
"云联网关联的实例列表。",
"注意:此字段可能返回",
"null,表示取不到有效值。",
":",
"type",
"CcnAttachedInstanceSet",
":",
"list",
"of",
"CcnAttachedInstance",
":",
"param",
"RequestId",
":",
"唯一请求",
"ID,每次请求都会返回。定位问题时需要提供该次请求的",
"RequestId。",
":",
"type",
"RequestId",
":",
"str"
] |
def __init__(self):
r"""
:param CcnAttachedInstanceSet: 云联网关联的实例列表。
注意:此字段可能返回 null,表示取不到有效值。
:type CcnAttachedInstanceSet: list of CcnAttachedInstance
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CcnAttachedInstanceSet = None
self.RequestId = None
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"CcnAttachedInstanceSet",
"=",
"None",
"self",
".",
"RequestId",
"=",
"None"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/lighthouse/v20200324/models.py#L1276-L1285
|
||
slinderman/pyhawkes
|
0df433a40c5e6d8c1dcdb98ffc88fe3a403ac223
|
pyhawkes/internals/parents.py
|
python
|
DiscreteTimeParents._mf_update_Z
|
(self)
|
Update the mean field parameters for the latent parents
:return:
|
Update the mean field parameters for the latent parents
:return:
|
[
"Update",
"the",
"mean",
"field",
"parameters",
"for",
"the",
"latent",
"parents",
":",
"return",
":"
] |
def _mf_update_Z(self):
"""
Update the mean field parameters for the latent parents
:return:
"""
bias_model, weight_model, impulse_model = \
self.model.bias_model, self.model.weight_model, self.model.impulse_model
K, B = self.K, self.B
exp_E_log_lambda0 = np.exp(bias_model.expected_log_lambda0())
exp_E_log_W = np.exp(weight_model.expected_log_W())
exp_E_log_g = np.exp(impulse_model.expected_log_g())
for k2, (Sk, Fk, EZk) in enumerate(zip(self.Ss, self.Fs, self.EZ)):
mf_update_Z(k2, EZk, Sk,
exp_E_log_lambda0,
exp_E_log_W,
exp_E_log_g,
Fk)
self._check_EZ()
|
[
"def",
"_mf_update_Z",
"(",
"self",
")",
":",
"bias_model",
",",
"weight_model",
",",
"impulse_model",
"=",
"self",
".",
"model",
".",
"bias_model",
",",
"self",
".",
"model",
".",
"weight_model",
",",
"self",
".",
"model",
".",
"impulse_model",
"K",
",",
"B",
"=",
"self",
".",
"K",
",",
"self",
".",
"B",
"exp_E_log_lambda0",
"=",
"np",
".",
"exp",
"(",
"bias_model",
".",
"expected_log_lambda0",
"(",
")",
")",
"exp_E_log_W",
"=",
"np",
".",
"exp",
"(",
"weight_model",
".",
"expected_log_W",
"(",
")",
")",
"exp_E_log_g",
"=",
"np",
".",
"exp",
"(",
"impulse_model",
".",
"expected_log_g",
"(",
")",
")",
"for",
"k2",
",",
"(",
"Sk",
",",
"Fk",
",",
"EZk",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"Ss",
",",
"self",
".",
"Fs",
",",
"self",
".",
"EZ",
")",
")",
":",
"mf_update_Z",
"(",
"k2",
",",
"EZk",
",",
"Sk",
",",
"exp_E_log_lambda0",
",",
"exp_E_log_W",
",",
"exp_E_log_g",
",",
"Fk",
")",
"self",
".",
"_check_EZ",
"(",
")"
] |
https://github.com/slinderman/pyhawkes/blob/0df433a40c5e6d8c1dcdb98ffc88fe3a403ac223/pyhawkes/internals/parents.py#L382-L402
|
||
cobbler/cobbler
|
eed8cdca3e970c8aa1d199e80b8c8f19b3f940cc
|
cobbler/cobbler_collections/images.py
|
python
|
Images.factory_produce
|
(self, api, item_dict)
|
return new_image
|
Return a Distro forged from item_dict
|
Return a Distro forged from item_dict
|
[
"Return",
"a",
"Distro",
"forged",
"from",
"item_dict"
] |
def factory_produce(self, api, item_dict):
"""
Return a Distro forged from item_dict
"""
new_image = image.Image(api)
new_image.from_dict(item_dict)
return new_image
|
[
"def",
"factory_produce",
"(",
"self",
",",
"api",
",",
"item_dict",
")",
":",
"new_image",
"=",
"image",
".",
"Image",
"(",
"api",
")",
"new_image",
".",
"from_dict",
"(",
"item_dict",
")",
"return",
"new_image"
] |
https://github.com/cobbler/cobbler/blob/eed8cdca3e970c8aa1d199e80b8c8f19b3f940cc/cobbler/cobbler_collections/images.py#L34-L40
|
|
ricequant/rqalpha-mod-ctp
|
bfd40801f9a182226a911cac74660f62993eb6db
|
rqalpha_mod_ctp/ctp/pyctp/linux64_35/__init__.py
|
python
|
TraderApi.ReqQryTrade
|
(self, pQryTrade, nRequestID)
|
return 0
|
请求查询成交
|
请求查询成交
|
[
"请求查询成交"
] |
def ReqQryTrade(self, pQryTrade, nRequestID):
"""请求查询成交"""
return 0
|
[
"def",
"ReqQryTrade",
"(",
"self",
",",
"pQryTrade",
",",
"nRequestID",
")",
":",
"return",
"0"
] |
https://github.com/ricequant/rqalpha-mod-ctp/blob/bfd40801f9a182226a911cac74660f62993eb6db/rqalpha_mod_ctp/ctp/pyctp/linux64_35/__init__.py#L313-L315
|
|
huawei-noah/vega
|
d9f13deede7f2b584e4b1d32ffdb833856129989
|
vega/networks/tensorflow/gcn/layers.py
|
python
|
get_a_cell
|
(hidden_size, keep_prob)
|
return drop
|
Get helper function to construct a gru cell.
:param hidden_size: int, dimension of the gru cell
:param keep_prob: placeholder, drop out probability
:return: tensor: rnn cell tensor
|
Get helper function to construct a gru cell.
|
[
"Get",
"helper",
"function",
"to",
"construct",
"a",
"gru",
"cell",
"."
] |
def get_a_cell(hidden_size, keep_prob):
"""Get helper function to construct a gru cell.
:param hidden_size: int, dimension of the gru cell
:param keep_prob: placeholder, drop out probability
:return: tensor: rnn cell tensor
"""
cell = tf.contrib.rnn.GRUCell(hidden_size)
drop = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=keep_prob)
return drop
|
[
"def",
"get_a_cell",
"(",
"hidden_size",
",",
"keep_prob",
")",
":",
"cell",
"=",
"tf",
".",
"contrib",
".",
"rnn",
".",
"GRUCell",
"(",
"hidden_size",
")",
"drop",
"=",
"tf",
".",
"nn",
".",
"rnn_cell",
".",
"DropoutWrapper",
"(",
"cell",
",",
"output_keep_prob",
"=",
"keep_prob",
")",
"return",
"drop"
] |
https://github.com/huawei-noah/vega/blob/d9f13deede7f2b584e4b1d32ffdb833856129989/vega/networks/tensorflow/gcn/layers.py#L220-L229
|
|
mozilla/kitsune
|
7c7cf9baed57aa776547aea744243ccad6ca91fb
|
kitsune/messages/context_processors.py
|
python
|
unread_message_count
|
(request)
|
return {"unread_message_count": count}
|
Adds the unread private messages count to the context.
* Returns 0 for anonymous users.
* Returns 0 if waffle flag is off.
|
Adds the unread private messages count to the context.
|
[
"Adds",
"the",
"unread",
"private",
"messages",
"count",
"to",
"the",
"context",
"."
] |
def unread_message_count(request):
"""Adds the unread private messages count to the context.
* Returns 0 for anonymous users.
* Returns 0 if waffle flag is off.
"""
count = 0
if hasattr(request, "user") and request.user.is_authenticated:
count = unread_count_for(request.user)
return {"unread_message_count": count}
|
[
"def",
"unread_message_count",
"(",
"request",
")",
":",
"count",
"=",
"0",
"if",
"hasattr",
"(",
"request",
",",
"\"user\"",
")",
"and",
"request",
".",
"user",
".",
"is_authenticated",
":",
"count",
"=",
"unread_count_for",
"(",
"request",
".",
"user",
")",
"return",
"{",
"\"unread_message_count\"",
":",
"count",
"}"
] |
https://github.com/mozilla/kitsune/blob/7c7cf9baed57aa776547aea744243ccad6ca91fb/kitsune/messages/context_processors.py#L4-L13
|
|
pytransitions/transitions
|
9663094f4566c016b11563e7a7d6d3802593845c
|
transitions/extensions/nesting.py
|
python
|
HierarchicalMachine.__init__
|
(self, *args, **kwargs)
|
[] |
def __init__(self, *args, **kwargs):
assert issubclass(self.state_cls, NestedState)
assert issubclass(self.event_cls, NestedEvent)
assert issubclass(self.transition_cls, NestedTransition)
self._stack = []
self.prefix_path = []
self.scoped = self
_super(HierarchicalMachine, self).__init__(*args, **kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"issubclass",
"(",
"self",
".",
"state_cls",
",",
"NestedState",
")",
"assert",
"issubclass",
"(",
"self",
".",
"event_cls",
",",
"NestedEvent",
")",
"assert",
"issubclass",
"(",
"self",
".",
"transition_cls",
",",
"NestedTransition",
")",
"self",
".",
"_stack",
"=",
"[",
"]",
"self",
".",
"prefix_path",
"=",
"[",
"]",
"self",
".",
"scoped",
"=",
"self",
"_super",
"(",
"HierarchicalMachine",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/pytransitions/transitions/blob/9663094f4566c016b11563e7a7d6d3802593845c/transitions/extensions/nesting.py#L386-L393
|
||||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/rings/asymptotic/asymptotic_ring.py
|
python
|
AsymptoticRingFunctor.__eq__
|
(self, other)
|
return (type(self) == type(other)
and self.growth_group == other.growth_group
and self._default_prec_ == other._default_prec_
and self._category_ == other._category_
and self.cls == other.cls)
|
r"""
Return whether this functor is equal to ``other``.
INPUT:
- ``other`` -- a functor.
OUTPUT:
A boolean.
EXAMPLES::
sage: X = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ)
sage: Y = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=QQ)
sage: F_X = X.construction()[0]
sage: F_Y = Y.construction()[0]
sage: F_X == F_X
True
sage: F_X == F_Y
False
|
r"""
Return whether this functor is equal to ``other``.
|
[
"r",
"Return",
"whether",
"this",
"functor",
"is",
"equal",
"to",
"other",
"."
] |
def __eq__(self, other):
r"""
Return whether this functor is equal to ``other``.
INPUT:
- ``other`` -- a functor.
OUTPUT:
A boolean.
EXAMPLES::
sage: X = AsymptoticRing(growth_group='x^ZZ', coefficient_ring=QQ)
sage: Y = AsymptoticRing(growth_group='y^ZZ', coefficient_ring=QQ)
sage: F_X = X.construction()[0]
sage: F_Y = Y.construction()[0]
sage: F_X == F_X
True
sage: F_X == F_Y
False
"""
return (type(self) == type(other)
and self.growth_group == other.growth_group
and self._default_prec_ == other._default_prec_
and self._category_ == other._category_
and self.cls == other.cls)
|
[
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"return",
"(",
"type",
"(",
"self",
")",
"==",
"type",
"(",
"other",
")",
"and",
"self",
".",
"growth_group",
"==",
"other",
".",
"growth_group",
"and",
"self",
".",
"_default_prec_",
"==",
"other",
".",
"_default_prec_",
"and",
"self",
".",
"_category_",
"==",
"other",
".",
"_category_",
"and",
"self",
".",
"cls",
"==",
"other",
".",
"cls",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/asymptotic/asymptotic_ring.py#L4974-L5001
|
|
makerbot/ReplicatorG
|
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
|
skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/export_plugins/gcode_time_segment.py
|
python
|
GcodeTimeSegmentSkein.__init__
|
(self)
|
Initialize.
|
Initialize.
|
[
"Initialize",
"."
] |
def __init__(self):
'Initialize.'
self.feedRateMinute = None
self.isExtruderActive = False
self.oldFeedRateString = None
self.oldLocation = None
self.oldZString = None
self.operatingFlowRate = None
self.output = cStringIO.StringIO()
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"feedRateMinute",
"=",
"None",
"self",
".",
"isExtruderActive",
"=",
"False",
"self",
".",
"oldFeedRateString",
"=",
"None",
"self",
".",
"oldLocation",
"=",
"None",
"self",
".",
"oldZString",
"=",
"None",
"self",
".",
"operatingFlowRate",
"=",
"None",
"self",
".",
"output",
"=",
"cStringIO",
".",
"StringIO",
"(",
")"
] |
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/export_plugins/gcode_time_segment.py#L155-L163
|
||
open-io/oio-sds
|
16041950b6056a55d5ce7ca77795defe6dfa6c61
|
oio/common/redis_conn.py
|
python
|
RedisConnection.release_lock
|
(self, lockname, identifier)
|
return False
|
Release a previously acquired Lock
|
Release a previously acquired Lock
|
[
"Release",
"a",
"previously",
"acquired",
"Lock"
] |
def release_lock(self, lockname, identifier):
"""Release a previously acquired Lock"""
conn = self.conn
pipe = conn.pipeline(True)
lockname = 'lock:' + lockname
while True:
try:
pipe.watch(lockname)
cur_id = pipe.get(lockname)
if cur_id and cur_id.decode('utf-8') == identifier:
pipe.multi()
pipe.delete(lockname)
pipe.execute()
return True
pipe.unwatch()
break
except self.__redis_mod.exceptions.WatchError:
pass
return False
|
[
"def",
"release_lock",
"(",
"self",
",",
"lockname",
",",
"identifier",
")",
":",
"conn",
"=",
"self",
".",
"conn",
"pipe",
"=",
"conn",
".",
"pipeline",
"(",
"True",
")",
"lockname",
"=",
"'lock:'",
"+",
"lockname",
"while",
"True",
":",
"try",
":",
"pipe",
".",
"watch",
"(",
"lockname",
")",
"cur_id",
"=",
"pipe",
".",
"get",
"(",
"lockname",
")",
"if",
"cur_id",
"and",
"cur_id",
".",
"decode",
"(",
"'utf-8'",
")",
"==",
"identifier",
":",
"pipe",
".",
"multi",
"(",
")",
"pipe",
".",
"delete",
"(",
"lockname",
")",
"pipe",
".",
"execute",
"(",
")",
"return",
"True",
"pipe",
".",
"unwatch",
"(",
")",
"break",
"except",
"self",
".",
"__redis_mod",
".",
"exceptions",
".",
"WatchError",
":",
"pass",
"return",
"False"
] |
https://github.com/open-io/oio-sds/blob/16041950b6056a55d5ce7ca77795defe6dfa6c61/oio/common/redis_conn.py#L193-L215
|
|
phimpme/phimpme-generator
|
ba6d11190b9016238f27672e1ad55e6a875b74a0
|
Phimpme/site-packages/nose/case.py
|
python
|
Test.run
|
(self, result)
|
Modified run for the test wrapper.
From here we don't call result.startTest or stopTest or
addSuccess. The wrapper calls addError/addFailure only if its
own setup or teardown fails, or running the wrapped test fails
(eg, if the wrapped "test" is not callable).
Two additional methods are called, beforeTest and
afterTest. These give plugins a chance to modify the wrapped
test before it is called and do cleanup after it is
called. They are called unconditionally.
|
Modified run for the test wrapper.
|
[
"Modified",
"run",
"for",
"the",
"test",
"wrapper",
"."
] |
def run(self, result):
"""Modified run for the test wrapper.
From here we don't call result.startTest or stopTest or
addSuccess. The wrapper calls addError/addFailure only if its
own setup or teardown fails, or running the wrapped test fails
(eg, if the wrapped "test" is not callable).
Two additional methods are called, beforeTest and
afterTest. These give plugins a chance to modify the wrapped
test before it is called and do cleanup after it is
called. They are called unconditionally.
"""
if self.resultProxy:
result = self.resultProxy(result, self)
try:
try:
self.beforeTest(result)
self.runTest(result)
except KeyboardInterrupt:
raise
except:
err = sys.exc_info()
result.addError(self, err)
finally:
self.afterTest(result)
|
[
"def",
"run",
"(",
"self",
",",
"result",
")",
":",
"if",
"self",
".",
"resultProxy",
":",
"result",
"=",
"self",
".",
"resultProxy",
"(",
"result",
",",
"self",
")",
"try",
":",
"try",
":",
"self",
".",
"beforeTest",
"(",
"result",
")",
"self",
".",
"runTest",
"(",
"result",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
":",
"err",
"=",
"sys",
".",
"exc_info",
"(",
")",
"result",
".",
"addError",
"(",
"self",
",",
"err",
")",
"finally",
":",
"self",
".",
"afterTest",
"(",
"result",
")"
] |
https://github.com/phimpme/phimpme-generator/blob/ba6d11190b9016238f27672e1ad55e6a875b74a0/Phimpme/site-packages/nose/case.py#L115-L140
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.