nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
espnet/espnet
|
ea411f3f627b8f101c211e107d0ff7053344ac80
|
espnet2/train/trainer.py
|
python
|
Trainer.build_options
|
(cls, args: argparse.Namespace)
|
return build_dataclass(TrainerOptions, args)
|
Build options consumed by train(), eval(), and plot_attention()
|
Build options consumed by train(), eval(), and plot_attention()
|
[
"Build",
"options",
"consumed",
"by",
"train",
"()",
"eval",
"()",
"and",
"plot_attention",
"()"
] |
def build_options(cls, args: argparse.Namespace) -> TrainerOptions:
"""Build options consumed by train(), eval(), and plot_attention()"""
assert check_argument_types()
return build_dataclass(TrainerOptions, args)
|
[
"def",
"build_options",
"(",
"cls",
",",
"args",
":",
"argparse",
".",
"Namespace",
")",
"->",
"TrainerOptions",
":",
"assert",
"check_argument_types",
"(",
")",
"return",
"build_dataclass",
"(",
"TrainerOptions",
",",
"args",
")"
] |
https://github.com/espnet/espnet/blob/ea411f3f627b8f101c211e107d0ff7053344ac80/espnet2/train/trainer.py#L118-L121
|
|
bruderstein/PythonScript
|
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
|
PythonLib/full/_pydecimal.py
|
python
|
_dexp
|
(c, e, p)
|
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
|
Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp.
|
Compute an approximation to exp(c*10**e), with p decimal places of
precision.
|
[
"Compute",
"an",
"approximation",
"to",
"exp",
"(",
"c",
"*",
"10",
"**",
"e",
")",
"with",
"p",
"decimal",
"places",
"of",
"precision",
"."
] |
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
|
[
"def",
"_dexp",
"(",
"c",
",",
"e",
",",
"p",
")",
":",
"# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision",
"p",
"+=",
"2",
"# compute log(10) with extra precision = adjusted exponent of c*10**e",
"extra",
"=",
"max",
"(",
"0",
",",
"e",
"+",
"len",
"(",
"str",
"(",
"c",
")",
")",
"-",
"1",
")",
"q",
"=",
"p",
"+",
"extra",
"# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),",
"# rounding down",
"shift",
"=",
"e",
"+",
"q",
"if",
"shift",
">=",
"0",
":",
"cshift",
"=",
"c",
"*",
"10",
"**",
"shift",
"else",
":",
"cshift",
"=",
"c",
"//",
"10",
"**",
"-",
"shift",
"quot",
",",
"rem",
"=",
"divmod",
"(",
"cshift",
",",
"_log10_digits",
"(",
"q",
")",
")",
"# reduce remainder back to original precision",
"rem",
"=",
"_div_nearest",
"(",
"rem",
",",
"10",
"**",
"extra",
")",
"# error in result of _iexp < 120; error after division < 0.62",
"return",
"_div_nearest",
"(",
"_iexp",
"(",
"rem",
",",
"10",
"**",
"p",
")",
",",
"1000",
")",
",",
"quot",
"-",
"p",
"+",
"3"
] |
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/_pydecimal.py#L5924-L5958
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-windows/x86/ldap3/strategy/mockBase.py
|
python
|
MockBaseStrategy._execute_search
|
(self, request)
|
return responses[:request['sizeLimit']] if request['sizeLimit'] > 0 else responses, result
|
[] |
def _execute_search(self, request):
responses = []
base = safe_dn(request['base'])
scope = request['scope']
attributes = request['attributes']
if '+' in attributes: # operational attributes requested
attributes.extend(self.operational_attributes)
attributes.remove('+')
attributes = [attr.lower() for attr in request['attributes']]
filter_root = parse_filter(request['filter'], self.connection.server.schema, auto_escape=True, auto_encode=False, validator=self.connection.server.custom_validator, check_names=self.connection.check_names)
candidates = []
if scope == 0: # base object
if base in self.connection.server.dit or base.lower() == 'cn=schema':
candidates.append(base)
elif scope == 1: # single level
for entry in self.connection.server.dit:
if entry.lower().endswith(base.lower()) and ',' not in entry[:-len(base) - 1]: # only leafs without commas in the remaining dn
candidates.append(entry)
elif scope == 2: # whole subtree
for entry in self.connection.server.dit:
if entry.lower().endswith(base.lower()):
candidates.append(entry)
if not candidates: # incorrect base
result_code = RESULT_NO_SUCH_OBJECT
message = 'incorrect base object'
else:
matched = self.evaluate_filter_node(filter_root, candidates)
if self.connection.raise_exceptions and 0 < request['sizeLimit'] < len(matched):
result_code = 4
message = 'size limit exceeded'
else:
for match in matched:
responses.append({
'object': match,
'attributes': [{'type': attribute,
'vals': [] if request['typesOnly'] else self.connection.server.dit[match][attribute]}
for attribute in self.connection.server.dit[match]
if attribute.lower() in attributes or ALL_ATTRIBUTES in attributes]
})
if '+' not in attributes: # remove operational attributes
for op_attr in self.operational_attributes:
for i, attr in enumerate(responses[len(responses)-1]['attributes']):
if attr['type'] == op_attr:
del responses[len(responses)-1]['attributes'][i]
result_code = 0
message = ''
result = {'resultCode': result_code,
'matchedDN': '',
'diagnosticMessage': to_unicode(message, SERVER_ENCODING),
'referral': None
}
return responses[:request['sizeLimit']] if request['sizeLimit'] > 0 else responses, result
|
[
"def",
"_execute_search",
"(",
"self",
",",
"request",
")",
":",
"responses",
"=",
"[",
"]",
"base",
"=",
"safe_dn",
"(",
"request",
"[",
"'base'",
"]",
")",
"scope",
"=",
"request",
"[",
"'scope'",
"]",
"attributes",
"=",
"request",
"[",
"'attributes'",
"]",
"if",
"'+'",
"in",
"attributes",
":",
"# operational attributes requested",
"attributes",
".",
"extend",
"(",
"self",
".",
"operational_attributes",
")",
"attributes",
".",
"remove",
"(",
"'+'",
")",
"attributes",
"=",
"[",
"attr",
".",
"lower",
"(",
")",
"for",
"attr",
"in",
"request",
"[",
"'attributes'",
"]",
"]",
"filter_root",
"=",
"parse_filter",
"(",
"request",
"[",
"'filter'",
"]",
",",
"self",
".",
"connection",
".",
"server",
".",
"schema",
",",
"auto_escape",
"=",
"True",
",",
"auto_encode",
"=",
"False",
",",
"validator",
"=",
"self",
".",
"connection",
".",
"server",
".",
"custom_validator",
",",
"check_names",
"=",
"self",
".",
"connection",
".",
"check_names",
")",
"candidates",
"=",
"[",
"]",
"if",
"scope",
"==",
"0",
":",
"# base object",
"if",
"base",
"in",
"self",
".",
"connection",
".",
"server",
".",
"dit",
"or",
"base",
".",
"lower",
"(",
")",
"==",
"'cn=schema'",
":",
"candidates",
".",
"append",
"(",
"base",
")",
"elif",
"scope",
"==",
"1",
":",
"# single level",
"for",
"entry",
"in",
"self",
".",
"connection",
".",
"server",
".",
"dit",
":",
"if",
"entry",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"base",
".",
"lower",
"(",
")",
")",
"and",
"','",
"not",
"in",
"entry",
"[",
":",
"-",
"len",
"(",
"base",
")",
"-",
"1",
"]",
":",
"# only leafs without commas in the remaining dn",
"candidates",
".",
"append",
"(",
"entry",
")",
"elif",
"scope",
"==",
"2",
":",
"# whole subtree",
"for",
"entry",
"in",
"self",
".",
"connection",
".",
"server",
".",
"dit",
":",
"if",
"entry",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"base",
".",
"lower",
"(",
")",
")",
":",
"candidates",
".",
"append",
"(",
"entry",
")",
"if",
"not",
"candidates",
":",
"# incorrect base",
"result_code",
"=",
"RESULT_NO_SUCH_OBJECT",
"message",
"=",
"'incorrect base object'",
"else",
":",
"matched",
"=",
"self",
".",
"evaluate_filter_node",
"(",
"filter_root",
",",
"candidates",
")",
"if",
"self",
".",
"connection",
".",
"raise_exceptions",
"and",
"0",
"<",
"request",
"[",
"'sizeLimit'",
"]",
"<",
"len",
"(",
"matched",
")",
":",
"result_code",
"=",
"4",
"message",
"=",
"'size limit exceeded'",
"else",
":",
"for",
"match",
"in",
"matched",
":",
"responses",
".",
"append",
"(",
"{",
"'object'",
":",
"match",
",",
"'attributes'",
":",
"[",
"{",
"'type'",
":",
"attribute",
",",
"'vals'",
":",
"[",
"]",
"if",
"request",
"[",
"'typesOnly'",
"]",
"else",
"self",
".",
"connection",
".",
"server",
".",
"dit",
"[",
"match",
"]",
"[",
"attribute",
"]",
"}",
"for",
"attribute",
"in",
"self",
".",
"connection",
".",
"server",
".",
"dit",
"[",
"match",
"]",
"if",
"attribute",
".",
"lower",
"(",
")",
"in",
"attributes",
"or",
"ALL_ATTRIBUTES",
"in",
"attributes",
"]",
"}",
")",
"if",
"'+'",
"not",
"in",
"attributes",
":",
"# remove operational attributes",
"for",
"op_attr",
"in",
"self",
".",
"operational_attributes",
":",
"for",
"i",
",",
"attr",
"in",
"enumerate",
"(",
"responses",
"[",
"len",
"(",
"responses",
")",
"-",
"1",
"]",
"[",
"'attributes'",
"]",
")",
":",
"if",
"attr",
"[",
"'type'",
"]",
"==",
"op_attr",
":",
"del",
"responses",
"[",
"len",
"(",
"responses",
")",
"-",
"1",
"]",
"[",
"'attributes'",
"]",
"[",
"i",
"]",
"result_code",
"=",
"0",
"message",
"=",
"''",
"result",
"=",
"{",
"'resultCode'",
":",
"result_code",
",",
"'matchedDN'",
":",
"''",
",",
"'diagnosticMessage'",
":",
"to_unicode",
"(",
"message",
",",
"SERVER_ENCODING",
")",
",",
"'referral'",
":",
"None",
"}",
"return",
"responses",
"[",
":",
"request",
"[",
"'sizeLimit'",
"]",
"]",
"if",
"request",
"[",
"'sizeLimit'",
"]",
">",
"0",
"else",
"responses",
",",
"result"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-windows/x86/ldap3/strategy/mockBase.py#L648-L704
|
|||
suragnair/alpha-zero-general
|
018f65ee1ef56b87c8a9049353d4130946d03a9a
|
rts/src/Board.py
|
python
|
Board._check_if_attack
|
(self, x, y, n_x, n_y)
|
return 0 <= n_x < self.n and 0 <= n_y < self.n and self[x][y][P_NAME_IDX] == -self[n_x][n_y][P_NAME_IDX] and self[n_x][n_y][A_TYPE_IDX] != d_a_type['Gold']
|
Check if actor on x,y can attack actor on n_x,n_y
:param x: actor on coordinate x
:param y: actor on coordinate y
:param n_x: can attack actor on coordinate n_x
:param n_y: can attack actor on coordinate n_y
:return: true/false
|
Check if actor on x,y can attack actor on n_x,n_y
:param x: actor on coordinate x
:param y: actor on coordinate y
:param n_x: can attack actor on coordinate n_x
:param n_y: can attack actor on coordinate n_y
:return: true/false
|
[
"Check",
"if",
"actor",
"on",
"x",
"y",
"can",
"attack",
"actor",
"on",
"n_x",
"n_y",
":",
"param",
"x",
":",
"actor",
"on",
"coordinate",
"x",
":",
"param",
"y",
":",
"actor",
"on",
"coordinate",
"y",
":",
"param",
"n_x",
":",
"can",
"attack",
"actor",
"on",
"coordinate",
"n_x",
":",
"param",
"n_y",
":",
"can",
"attack",
"actor",
"on",
"coordinate",
"n_y",
":",
"return",
":",
"true",
"/",
"false"
] |
def _check_if_attack(self, x, y, n_x, n_y):
"""
Check if actor on x,y can attack actor on n_x,n_y
:param x: actor on coordinate x
:param y: actor on coordinate y
:param n_x: can attack actor on coordinate n_x
:param n_y: can attack actor on coordinate n_y
:return: true/false
"""
return 0 <= n_x < self.n and 0 <= n_y < self.n and self[x][y][P_NAME_IDX] == -self[n_x][n_y][P_NAME_IDX] and self[n_x][n_y][A_TYPE_IDX] != d_a_type['Gold']
|
[
"def",
"_check_if_attack",
"(",
"self",
",",
"x",
",",
"y",
",",
"n_x",
",",
"n_y",
")",
":",
"return",
"0",
"<=",
"n_x",
"<",
"self",
".",
"n",
"and",
"0",
"<=",
"n_y",
"<",
"self",
".",
"n",
"and",
"self",
"[",
"x",
"]",
"[",
"y",
"]",
"[",
"P_NAME_IDX",
"]",
"==",
"-",
"self",
"[",
"n_x",
"]",
"[",
"n_y",
"]",
"[",
"P_NAME_IDX",
"]",
"and",
"self",
"[",
"n_x",
"]",
"[",
"n_y",
"]",
"[",
"A_TYPE_IDX",
"]",
"!=",
"d_a_type",
"[",
"'Gold'",
"]"
] |
https://github.com/suragnair/alpha-zero-general/blob/018f65ee1ef56b87c8a9049353d4130946d03a9a/rts/src/Board.py#L350-L359
|
|
n1nj4sec/pupy
|
a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39
|
pupy/pupylib/PupyDnsCnc.py
|
python
|
PupyDnsCommandServerHandler._whitelist
|
(self, nodeid, cid, version)
|
return nodeid in set([x.strip().lower() for x in allowed_nodes.split(',')])
|
[] |
def _whitelist(self, nodeid, cid, version):
if not self.config.getboolean('dnscnc', 'whitelist'):
return True
if version == 1 and not self.config.getboolean('dnscnc', 'allow_v1'):
return False
if not cid or not nodeid:
return self.config.getboolean('dnscnc', 'allow_by_default')
nodeid = '{:012x}'.format(nodeid)
cid = '{:016x}'.format(cid)
allowed_nodes = self.config.get('cids', cid)
if not allowed_nodes:
if self.config.getboolean('dnscnc', 'allow_by_default'):
return True
return False
return nodeid in set([x.strip().lower() for x in allowed_nodes.split(',')])
|
[
"def",
"_whitelist",
"(",
"self",
",",
"nodeid",
",",
"cid",
",",
"version",
")",
":",
"if",
"not",
"self",
".",
"config",
".",
"getboolean",
"(",
"'dnscnc'",
",",
"'whitelist'",
")",
":",
"return",
"True",
"if",
"version",
"==",
"1",
"and",
"not",
"self",
".",
"config",
".",
"getboolean",
"(",
"'dnscnc'",
",",
"'allow_v1'",
")",
":",
"return",
"False",
"if",
"not",
"cid",
"or",
"not",
"nodeid",
":",
"return",
"self",
".",
"config",
".",
"getboolean",
"(",
"'dnscnc'",
",",
"'allow_by_default'",
")",
"nodeid",
"=",
"'{:012x}'",
".",
"format",
"(",
"nodeid",
")",
"cid",
"=",
"'{:016x}'",
".",
"format",
"(",
"cid",
")",
"allowed_nodes",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'cids'",
",",
"cid",
")",
"if",
"not",
"allowed_nodes",
":",
"if",
"self",
".",
"config",
".",
"getboolean",
"(",
"'dnscnc'",
",",
"'allow_by_default'",
")",
":",
"return",
"True",
"return",
"False",
"return",
"nodeid",
"in",
"set",
"(",
"[",
"x",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"allowed_nodes",
".",
"split",
"(",
"','",
")",
"]",
")"
] |
https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/pupylib/PupyDnsCnc.py#L95-L114
|
|||
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/CPython/27/Lib/lib2to3/fixer_util.py
|
python
|
Attr
|
(obj, attr)
|
return [obj, Node(syms.trailer, [Dot(), attr])]
|
A node tuple for obj.attr
|
A node tuple for obj.attr
|
[
"A",
"node",
"tuple",
"for",
"obj",
".",
"attr"
] |
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
|
[
"def",
"Attr",
"(",
"obj",
",",
"attr",
")",
":",
"return",
"[",
"obj",
",",
"Node",
"(",
"syms",
".",
"trailer",
",",
"[",
"Dot",
"(",
")",
",",
"attr",
"]",
")",
"]"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/CPython/27/Lib/lib2to3/fixer_util.py#L42-L44
|
|
DataDog/integrations-core
|
934674b29d94b70ccc008f76ea172d0cdae05e1e
|
voltdb/datadog_checks/voltdb/config_models/instance.py
|
python
|
InstanceConfig._ensure_defaults
|
(cls, v, field)
|
return getattr(defaults, f'instance_{field.name}')(field, v)
|
[] |
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
|
[
"def",
"_ensure_defaults",
"(",
"cls",
",",
"v",
",",
"field",
")",
":",
"if",
"v",
"is",
"not",
"None",
"or",
"field",
".",
"required",
":",
"return",
"v",
"return",
"getattr",
"(",
"defaults",
",",
"f'instance_{field.name}'",
")",
"(",
"field",
",",
"v",
")"
] |
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/voltdb/datadog_checks/voltdb/config_models/instance.py#L102-L106
|
|||
googlemaps/google-maps-services-python
|
25e26092cb19e998764cae37474dd4c35ff42473
|
googlemaps/places.py
|
python
|
places_autocomplete
|
(
client,
input_text,
session_token=None,
offset=None,
origin=None,
location=None,
radius=None,
language=None,
types=None,
components=None,
strict_bounds=False,
)
|
return _autocomplete(
client,
"",
input_text,
session_token=session_token,
offset=offset,
origin=origin,
location=location,
radius=radius,
language=language,
types=types,
components=components,
strict_bounds=strict_bounds,
)
|
Returns Place predictions given a textual search string and optional
geographic bounds.
:param input_text: The text string on which to search.
:type input_text: string
:param session_token: A random string which identifies an autocomplete
session for billing purposes.
:type session_token: string
:param offset: The position, in the input term, of the last character
that the service uses to match predictions. For example,
if the input is 'Google' and the offset is 3, the
service will match on 'Goo'.
:type offset: int
:param origin: The origin point from which to calculate straight-line distance
to the destination (returned as distance_meters).
If this value is omitted, straight-line distance will
not be returned.
:type origin: string, dict, list, or tuple
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param language: The language in which to return results.
:type language: string
:param types: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/web-service/autocomplete#place_types
:type types: string
:param components: A component filter for which you wish to obtain a geocode.
Currently, you can use components to filter by up to 5 countries for
example: ``{'country': ['US', 'AU']}``
:type components: dict
:param strict_bounds: Returns only those places that are strictly within
the region defined by location and radius.
:type strict_bounds: bool
:rtype: list of predictions
|
Returns Place predictions given a textual search string and optional
geographic bounds.
|
[
"Returns",
"Place",
"predictions",
"given",
"a",
"textual",
"search",
"string",
"and",
"optional",
"geographic",
"bounds",
"."
] |
def places_autocomplete(
client,
input_text,
session_token=None,
offset=None,
origin=None,
location=None,
radius=None,
language=None,
types=None,
components=None,
strict_bounds=False,
):
"""
Returns Place predictions given a textual search string and optional
geographic bounds.
:param input_text: The text string on which to search.
:type input_text: string
:param session_token: A random string which identifies an autocomplete
session for billing purposes.
:type session_token: string
:param offset: The position, in the input term, of the last character
that the service uses to match predictions. For example,
if the input is 'Google' and the offset is 3, the
service will match on 'Goo'.
:type offset: int
:param origin: The origin point from which to calculate straight-line distance
to the destination (returned as distance_meters).
If this value is omitted, straight-line distance will
not be returned.
:type origin: string, dict, list, or tuple
:param location: The latitude/longitude value for which you wish to obtain the
closest, human-readable address.
:type location: string, dict, list, or tuple
:param radius: Distance in meters within which to bias results.
:type radius: int
:param language: The language in which to return results.
:type language: string
:param types: Restricts the results to places matching the specified type.
The full list of supported types is available here:
https://developers.google.com/places/web-service/autocomplete#place_types
:type types: string
:param components: A component filter for which you wish to obtain a geocode.
Currently, you can use components to filter by up to 5 countries for
example: ``{'country': ['US', 'AU']}``
:type components: dict
:param strict_bounds: Returns only those places that are strictly within
the region defined by location and radius.
:type strict_bounds: bool
:rtype: list of predictions
"""
return _autocomplete(
client,
"",
input_text,
session_token=session_token,
offset=offset,
origin=origin,
location=location,
radius=radius,
language=language,
types=types,
components=components,
strict_bounds=strict_bounds,
)
|
[
"def",
"places_autocomplete",
"(",
"client",
",",
"input_text",
",",
"session_token",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"origin",
"=",
"None",
",",
"location",
"=",
"None",
",",
"radius",
"=",
"None",
",",
"language",
"=",
"None",
",",
"types",
"=",
"None",
",",
"components",
"=",
"None",
",",
"strict_bounds",
"=",
"False",
",",
")",
":",
"return",
"_autocomplete",
"(",
"client",
",",
"\"\"",
",",
"input_text",
",",
"session_token",
"=",
"session_token",
",",
"offset",
"=",
"offset",
",",
"origin",
"=",
"origin",
",",
"location",
"=",
"location",
",",
"radius",
"=",
"radius",
",",
"language",
"=",
"language",
",",
"types",
"=",
"types",
",",
"components",
"=",
"components",
",",
"strict_bounds",
"=",
"strict_bounds",
",",
")"
] |
https://github.com/googlemaps/google-maps-services-python/blob/25e26092cb19e998764cae37474dd4c35ff42473/googlemaps/places.py#L499-L575
|
|
StanfordVL/taskonomy
|
9f814867b5fe4165860862211e8e99b0f200144d
|
code/lib/data/task_data_loading.py
|
python
|
load_and_preprocess_img_fast
|
( filename_template, cfg, is_training=False )
|
return input_img, target_img, target_mask
|
Slightly worse quality images than the one above, but roughly twice as fast
due to the use of PIL instead of Skimage.
Args:
filename_template: the filename, with {domain} where the
domain should be interpolated
cfg: A config.py dict
Returns:
input_img: scaled between [0, 1]
target_img: scaled between [0, 1]
|
Slightly worse quality images than the one above, but roughly twice as fast
due to the use of PIL instead of Skimage.
Args:
filename_template: the filename, with {domain} where the
domain should be interpolated
cfg: A config.py dict
Returns:
input_img: scaled between [0, 1]
target_img: scaled between [0, 1]
|
[
"Slightly",
"worse",
"quality",
"images",
"than",
"the",
"one",
"above",
"but",
"roughly",
"twice",
"as",
"fast",
"due",
"to",
"the",
"use",
"of",
"PIL",
"instead",
"of",
"Skimage",
".",
"Args",
":",
"filename_template",
":",
"the",
"filename",
"with",
"{",
"domain",
"}",
"where",
"the",
"domain",
"should",
"be",
"interpolated",
"cfg",
":",
"A",
"config",
".",
"py",
"dict",
"Returns",
":",
"input_img",
":",
"scaled",
"between",
"[",
"0",
"1",
"]",
"target_img",
":",
"scaled",
"between",
"[",
"0",
"1",
"]"
] |
def load_and_preprocess_img_fast( filename_template, cfg, is_training=False ):
"""
Slightly worse quality images than the one above, but roughly twice as fast
due to the use of PIL instead of Skimage.
Args:
filename_template: the filename, with {domain} where the
domain should be interpolated
cfg: A config.py dict
Returns:
input_img: scaled between [0, 1]
target_img: scaled between [0, 1]
"""
if 'resize_interpolation_order' not in cfg:
cfg['resize_interpolation_order'] = Image.NEAREST
if 'is_discriminative' in cfg:
raise ValueError("Using 'load_and_preprocess_img_fast' for discriminative task is not adviced, this function hasn't been updated for discriminative tasks")
# inputs
input_img = Image.open(make_filename_for_domain(
filename_template, cfg['input_domain_name'] ) )
# targets
target_img = Image.open(make_filename_for_domain(
filename_template, cfg['target_domain_name'] ) )
input_img = input_img.resize( cfg[ 'input_dim'], Image.NEAREST ) # cfg['resize_interpolation_order']
input_img = rescale_image( np.array( input_img ), [-1, 1] )
if len( input_img.shape )== 2:
input_img = input_img[:,:,np.newaxis]
# apply mask to raw target img
target_mask = make_mask( input_img, target_img, cfg )
# process target
target_img_np = np.array(target_img)
if len( targest_img_np.shape ) == 2:
target_img_np = target_img_np[:,:,np.newaxis]
target_img = target_img.resize( cfg[ 'target_dim' ], Image.NEAREST )
target_img = rescale_image( np.array( target_img ), [-1, 1] )
if len( target_img.shape ) == 2:
target_img = target_img[:,:,np.newaxis]
# print( "Loaded", filename_template )
return input_img, target_img, target_mask
|
[
"def",
"load_and_preprocess_img_fast",
"(",
"filename_template",
",",
"cfg",
",",
"is_training",
"=",
"False",
")",
":",
"if",
"'resize_interpolation_order'",
"not",
"in",
"cfg",
":",
"cfg",
"[",
"'resize_interpolation_order'",
"]",
"=",
"Image",
".",
"NEAREST",
"if",
"'is_discriminative'",
"in",
"cfg",
":",
"raise",
"ValueError",
"(",
"\"Using 'load_and_preprocess_img_fast' for discriminative task is not adviced, this function hasn't been updated for discriminative tasks\"",
")",
"# inputs",
"input_img",
"=",
"Image",
".",
"open",
"(",
"make_filename_for_domain",
"(",
"filename_template",
",",
"cfg",
"[",
"'input_domain_name'",
"]",
")",
")",
"# targets",
"target_img",
"=",
"Image",
".",
"open",
"(",
"make_filename_for_domain",
"(",
"filename_template",
",",
"cfg",
"[",
"'target_domain_name'",
"]",
")",
")",
"input_img",
"=",
"input_img",
".",
"resize",
"(",
"cfg",
"[",
"'input_dim'",
"]",
",",
"Image",
".",
"NEAREST",
")",
"# cfg['resize_interpolation_order']",
"input_img",
"=",
"rescale_image",
"(",
"np",
".",
"array",
"(",
"input_img",
")",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"if",
"len",
"(",
"input_img",
".",
"shape",
")",
"==",
"2",
":",
"input_img",
"=",
"input_img",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"# apply mask to raw target img",
"target_mask",
"=",
"make_mask",
"(",
"input_img",
",",
"target_img",
",",
"cfg",
")",
"# process target",
"target_img_np",
"=",
"np",
".",
"array",
"(",
"target_img",
")",
"if",
"len",
"(",
"targest_img_np",
".",
"shape",
")",
"==",
"2",
":",
"target_img_np",
"=",
"target_img_np",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"target_img",
"=",
"target_img",
".",
"resize",
"(",
"cfg",
"[",
"'target_dim'",
"]",
",",
"Image",
".",
"NEAREST",
")",
"target_img",
"=",
"rescale_image",
"(",
"np",
".",
"array",
"(",
"target_img",
")",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"if",
"len",
"(",
"target_img",
".",
"shape",
")",
"==",
"2",
":",
"target_img",
"=",
"target_img",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"# print( \"Loaded\", filename_template )",
"return",
"input_img",
",",
"target_img",
",",
"target_mask"
] |
https://github.com/StanfordVL/taskonomy/blob/9f814867b5fe4165860862211e8e99b0f200144d/code/lib/data/task_data_loading.py#L453-L497
|
|
sympy/sympy
|
d822fcba181155b85ff2b29fe525adbafb22b448
|
sympy/polys/polyclasses.py
|
python
|
DMP.prem
|
(f, g)
|
return per(dmp_prem(F, G, lev, dom))
|
Polynomial pseudo-remainder of ``f`` and ``g``.
|
Polynomial pseudo-remainder of ``f`` and ``g``.
|
[
"Polynomial",
"pseudo",
"-",
"remainder",
"of",
"f",
"and",
"g",
"."
] |
def prem(f, g):
"""Polynomial pseudo-remainder of ``f`` and ``g``. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_prem(F, G, lev, dom))
|
[
"def",
"prem",
"(",
"f",
",",
"g",
")",
":",
"lev",
",",
"dom",
",",
"per",
",",
"F",
",",
"G",
"=",
"f",
".",
"unify",
"(",
"g",
")",
"return",
"per",
"(",
"dmp_prem",
"(",
"F",
",",
"G",
",",
"lev",
",",
"dom",
")",
")"
] |
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/polys/polyclasses.py#L472-L475
|
|
kanzure/nanoengineer
|
874e4c9f8a9190f093625b267f9767e19f82e6c4
|
cad/src/dna/model/DnaSegment.py
|
python
|
DnaSegment.readmmp_info_opengroup_setitem
|
( self, key, val, interp )
|
return
|
[extends superclass method]
|
[extends superclass method]
|
[
"[",
"extends",
"superclass",
"method",
"]"
] |
def readmmp_info_opengroup_setitem( self, key, val, interp ):
"""
[extends superclass method]
"""
#bruce 080507 refactoring (split this out of the superclass method)
if key == ['dnaSegment-parameters']:
# val includes all the parameters, separated by commas.
basesPerTurn, duplexRise = val.split(",")
self.setBasesPerTurn(float(basesPerTurn))
self.setDuplexRise(float(duplexRise))
else:
_superclass.readmmp_info_opengroup_setitem( self, key, val, interp)
return
|
[
"def",
"readmmp_info_opengroup_setitem",
"(",
"self",
",",
"key",
",",
"val",
",",
"interp",
")",
":",
"#bruce 080507 refactoring (split this out of the superclass method)",
"if",
"key",
"==",
"[",
"'dnaSegment-parameters'",
"]",
":",
"# val includes all the parameters, separated by commas.",
"basesPerTurn",
",",
"duplexRise",
"=",
"val",
".",
"split",
"(",
"\",\"",
")",
"self",
".",
"setBasesPerTurn",
"(",
"float",
"(",
"basesPerTurn",
")",
")",
"self",
".",
"setDuplexRise",
"(",
"float",
"(",
"duplexRise",
")",
")",
"else",
":",
"_superclass",
".",
"readmmp_info_opengroup_setitem",
"(",
"self",
",",
"key",
",",
"val",
",",
"interp",
")",
"return"
] |
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/dna/model/DnaSegment.py#L791-L804
|
|
taigaio/taiga-back
|
60ccd74c80e12056d0385b2900fd180d0826e21c
|
taiga/base/api/throttling.py
|
python
|
BaseThrottle.allow_request
|
(self, request, view)
|
Return `True` if the request should be allowed, `False` otherwise.
|
Return `True` if the request should be allowed, `False` otherwise.
|
[
"Return",
"True",
"if",
"the",
"request",
"should",
"be",
"allowed",
"False",
"otherwise",
"."
] |
def allow_request(self, request, view):
"""
Return `True` if the request should be allowed, `False` otherwise.
"""
raise NotImplementedError(".allow_request() must be overridden")
|
[
"def",
"allow_request",
"(",
"self",
",",
"request",
",",
"view",
")",
":",
"raise",
"NotImplementedError",
"(",
"\".allow_request() must be overridden\"",
")"
] |
https://github.com/taigaio/taiga-back/blob/60ccd74c80e12056d0385b2900fd180d0826e21c/taiga/base/api/throttling.py#L59-L63
|
||
AzureAD/microsoft-authentication-library-for-python
|
a18c2231896d8a050ad181461928f4dbd818049f
|
msal/application.py
|
python
|
ClientApplication.acquire_token_silent
|
(
self,
scopes, # type: List[str]
account, # type: Optional[Account]
authority=None, # See get_authorization_request_url()
force_refresh=False, # type: Optional[boolean]
claims_challenge=None,
**kwargs)
|
return result if result and "error" not in result else None
|
Acquire an access token for given account, without user interaction.
It is done either by finding a valid access token from cache,
or by finding a valid refresh token from cache and then automatically
use it to redeem a new access token.
This method will combine the cache empty and refresh error
into one return value, `None`.
If your app does not care about the exact token refresh error during
token cache look-up, then this method is easier and recommended.
Internally, this method calls :func:`~acquire_token_silent_with_error`.
:param claims_challenge:
The claims_challenge parameter requests specific claims requested by the resource provider
in the form of a claims_challenge directive in the www-authenticate header to be
returned from the UserInfo Endpoint and/or in the ID Token and/or Access Token.
It is a string of a JSON object which contains lists of claims being requested from these locations.
:return:
- A dict containing no "error" key,
and typically contains an "access_token" key,
if cache lookup succeeded.
- None when cache lookup does not yield a token.
|
Acquire an access token for given account, without user interaction.
|
[
"Acquire",
"an",
"access",
"token",
"for",
"given",
"account",
"without",
"user",
"interaction",
"."
] |
def acquire_token_silent(
self,
scopes, # type: List[str]
account, # type: Optional[Account]
authority=None, # See get_authorization_request_url()
force_refresh=False, # type: Optional[boolean]
claims_challenge=None,
**kwargs):
"""Acquire an access token for given account, without user interaction.
It is done either by finding a valid access token from cache,
or by finding a valid refresh token from cache and then automatically
use it to redeem a new access token.
This method will combine the cache empty and refresh error
into one return value, `None`.
If your app does not care about the exact token refresh error during
token cache look-up, then this method is easier and recommended.
Internally, this method calls :func:`~acquire_token_silent_with_error`.
:param claims_challenge:
The claims_challenge parameter requests specific claims requested by the resource provider
in the form of a claims_challenge directive in the www-authenticate header to be
returned from the UserInfo Endpoint and/or in the ID Token and/or Access Token.
It is a string of a JSON object which contains lists of claims being requested from these locations.
:return:
- A dict containing no "error" key,
and typically contains an "access_token" key,
if cache lookup succeeded.
- None when cache lookup does not yield a token.
"""
result = self.acquire_token_silent_with_error(
scopes, account, authority=authority, force_refresh=force_refresh,
claims_challenge=claims_challenge, **kwargs)
return result if result and "error" not in result else None
|
[
"def",
"acquire_token_silent",
"(",
"self",
",",
"scopes",
",",
"# type: List[str]",
"account",
",",
"# type: Optional[Account]",
"authority",
"=",
"None",
",",
"# See get_authorization_request_url()",
"force_refresh",
"=",
"False",
",",
"# type: Optional[boolean]",
"claims_challenge",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"acquire_token_silent_with_error",
"(",
"scopes",
",",
"account",
",",
"authority",
"=",
"authority",
",",
"force_refresh",
"=",
"force_refresh",
",",
"claims_challenge",
"=",
"claims_challenge",
",",
"*",
"*",
"kwargs",
")",
"return",
"result",
"if",
"result",
"and",
"\"error\"",
"not",
"in",
"result",
"else",
"None"
] |
https://github.com/AzureAD/microsoft-authentication-library-for-python/blob/a18c2231896d8a050ad181461928f4dbd818049f/msal/application.py#L1045-L1081
|
|
lovelylain/pyctp
|
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
|
example/ctp/futures/ApiStruct.py
|
python
|
VerifyCustInfo.__init__
|
(self, CustomerName='', IdCardType=ICT_EID, IdentifiedCardNo='', CustType=CUSTT_Person)
|
[] |
def __init__(self, CustomerName='', IdCardType=ICT_EID, IdentifiedCardNo='', CustType=CUSTT_Person):
self.CustomerName = 'IndividualName' #客户姓名, char[51]
self.IdCardType = '' #证件类型, char
self.IdentifiedCardNo = '' #证件号码, char[51]
self.CustType = ''
|
[
"def",
"__init__",
"(",
"self",
",",
"CustomerName",
"=",
"''",
",",
"IdCardType",
"=",
"ICT_EID",
",",
"IdentifiedCardNo",
"=",
"''",
",",
"CustType",
"=",
"CUSTT_Person",
")",
":",
"self",
".",
"CustomerName",
"=",
"'IndividualName'",
"#客户姓名, char[51]",
"self",
".",
"IdCardType",
"=",
"''",
"#证件类型, char",
"self",
".",
"IdentifiedCardNo",
"=",
"''",
"#证件号码, char[51]",
"self",
".",
"CustType",
"=",
"''"
] |
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/example/ctp/futures/ApiStruct.py#L5460-L5464
|
||||
asyml/texar-pytorch
|
b83d3ec17e19da08fc5f81996d02f91176e55e54
|
texar/torch/modules/pretrained/t5_utils.py
|
python
|
MultiheadRPRAttention.default_hparams
|
()
|
return {
'initializer': None,
'num_heads': 8,
'output_dim': 512,
'num_units': 512,
'dropout_rate': 0.1,
'use_bias': False,
'name': 'multihead_attention_rpr',
'is_decoder': False,
'relative_attention_num_buckets': 32
}
|
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"initializer": None,
'num_heads': 8,
'output_dim': 512,
'num_units': 512,
'dropout_rate': 0.1,
'use_bias': False,
"name": "multihead_attention",
"is_decoder": False,
"relative_attention_num_buckets": 32
}
Here:
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.torch.core.get_initializer` for details.
`"num_heads"`: int
Number of heads for attention calculation.
`"output_dim"`: int
Output dimension of the returned tensor.
`"num_units"`: int
Hidden dimension of the unsplit attention space.
Should be divisible by `"num_heads"`.
`"dropout_rate"`: float
Dropout rate in the attention.
`"use_bias"`: bool
Use bias when projecting the key, value and query.
`"name"`: str
Name of the module.
`"is_decoder"`: bool
To pass in if the attention is for a encoder or decoder block.
`"name"`: relative_attention_num_buckets
If the Attention mechnanism needs to use relative positional
attention bias, then this hparam stores the relative attention
num buckets.
|
r"""Returns a dictionary of hyperparameters with default values.
|
[
"r",
"Returns",
"a",
"dictionary",
"of",
"hyperparameters",
"with",
"default",
"values",
"."
] |
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"initializer": None,
'num_heads': 8,
'output_dim': 512,
'num_units': 512,
'dropout_rate': 0.1,
'use_bias': False,
"name": "multihead_attention",
"is_decoder": False,
"relative_attention_num_buckets": 32
}
Here:
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.torch.core.get_initializer` for details.
`"num_heads"`: int
Number of heads for attention calculation.
`"output_dim"`: int
Output dimension of the returned tensor.
`"num_units"`: int
Hidden dimension of the unsplit attention space.
Should be divisible by `"num_heads"`.
`"dropout_rate"`: float
Dropout rate in the attention.
`"use_bias"`: bool
Use bias when projecting the key, value and query.
`"name"`: str
Name of the module.
`"is_decoder"`: bool
To pass in if the attention is for a encoder or decoder block.
`"name"`: relative_attention_num_buckets
If the Attention mechnanism needs to use relative positional
attention bias, then this hparam stores the relative attention
num buckets.
"""
return {
'initializer': None,
'num_heads': 8,
'output_dim': 512,
'num_units': 512,
'dropout_rate': 0.1,
'use_bias': False,
'name': 'multihead_attention_rpr',
'is_decoder': False,
'relative_attention_num_buckets': 32
}
|
[
"def",
"default_hparams",
"(",
")",
":",
"return",
"{",
"'initializer'",
":",
"None",
",",
"'num_heads'",
":",
"8",
",",
"'output_dim'",
":",
"512",
",",
"'num_units'",
":",
"512",
",",
"'dropout_rate'",
":",
"0.1",
",",
"'use_bias'",
":",
"False",
",",
"'name'",
":",
"'multihead_attention_rpr'",
",",
"'is_decoder'",
":",
"False",
",",
"'relative_attention_num_buckets'",
":",
"32",
"}"
] |
https://github.com/asyml/texar-pytorch/blob/b83d3ec17e19da08fc5f81996d02f91176e55e54/texar/torch/modules/pretrained/t5_utils.py#L144-L205
|
|
scikit-image/scikit-image
|
ed642e2bc822f362504d24379dee94978d6fa9de
|
skimage/_build.py
|
python
|
_compiled_filename
|
(f)
|
Check for the presence of a .pyx[.in] file as a .c or .cpp.
|
Check for the presence of a .pyx[.in] file as a .c or .cpp.
|
[
"Check",
"for",
"the",
"presence",
"of",
"a",
".",
"pyx",
"[",
".",
"in",
"]",
"file",
"as",
"a",
".",
"c",
"or",
".",
"cpp",
"."
] |
def _compiled_filename(f):
"""Check for the presence of a .pyx[.in] file as a .c or .cpp."""
basename = f.replace('.in', '').replace('.pyx', '')
for ext in ('.c', '.cpp'):
filename = basename + ext
if os.path.exists(filename):
return filename
else:
raise RuntimeError('Cython >= %s is required to build '
'scikit-image from git checkout' %
CYTHON_VERSION)
|
[
"def",
"_compiled_filename",
"(",
"f",
")",
":",
"basename",
"=",
"f",
".",
"replace",
"(",
"'.in'",
",",
"''",
")",
".",
"replace",
"(",
"'.pyx'",
",",
"''",
")",
"for",
"ext",
"in",
"(",
"'.c'",
",",
"'.cpp'",
")",
":",
"filename",
"=",
"basename",
"+",
"ext",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"filename",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Cython >= %s is required to build '",
"'scikit-image from git checkout'",
"%",
"CYTHON_VERSION",
")"
] |
https://github.com/scikit-image/scikit-image/blob/ed642e2bc822f362504d24379dee94978d6fa9de/skimage/_build.py#L16-L26
|
||
bsmali4/xssfork
|
515b45dfb0edb9263da544ad91fc1cb5f410bfd1
|
thirdparty/requests/packages/urllib3/packages/six.py
|
python
|
iteritems
|
(d)
|
return iter(getattr(d, _iteritems)())
|
Return an iterator over the (key, value) pairs of a dictionary.
|
Return an iterator over the (key, value) pairs of a dictionary.
|
[
"Return",
"an",
"iterator",
"over",
"the",
"(",
"key",
"value",
")",
"pairs",
"of",
"a",
"dictionary",
"."
] |
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
|
[
"def",
"iteritems",
"(",
"d",
")",
":",
"return",
"iter",
"(",
"getattr",
"(",
"d",
",",
"_iteritems",
")",
"(",
")",
")"
] |
https://github.com/bsmali4/xssfork/blob/515b45dfb0edb9263da544ad91fc1cb5f410bfd1/thirdparty/requests/packages/urllib3/packages/six.py#L271-L273
|
|
INK-USC/KagNet
|
b386661ac5841774b9d17cc132e991a7bef3c5ef
|
baselines/pytorch-pretrained-BERT/pytorch_pretrained_bert/modeling_gpt2.py
|
python
|
Attention.merge_heads
|
(self, x)
|
return x.view(*new_x_shape)
|
[] |
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape)
|
[
"def",
"merge_heads",
"(",
"self",
",",
"x",
")",
":",
"x",
"=",
"x",
".",
"permute",
"(",
"0",
",",
"2",
",",
"1",
",",
"3",
")",
".",
"contiguous",
"(",
")",
"new_x_shape",
"=",
"x",
".",
"size",
"(",
")",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"x",
".",
"size",
"(",
"-",
"2",
")",
"*",
"x",
".",
"size",
"(",
"-",
"1",
")",
",",
")",
"return",
"x",
".",
"view",
"(",
"*",
"new_x_shape",
")"
] |
https://github.com/INK-USC/KagNet/blob/b386661ac5841774b9d17cc132e991a7bef3c5ef/baselines/pytorch-pretrained-BERT/pytorch_pretrained_bert/modeling_gpt2.py#L222-L225
|
|||
bruderstein/PythonScript
|
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
|
PythonLib/full/multiprocessing/pool.py
|
python
|
rebuild_exc
|
(exc, tb)
|
return exc
|
[] |
def rebuild_exc(exc, tb):
exc.__cause__ = RemoteTraceback(tb)
return exc
|
[
"def",
"rebuild_exc",
"(",
"exc",
",",
"tb",
")",
":",
"exc",
".",
"__cause__",
"=",
"RemoteTraceback",
"(",
"tb",
")",
"return",
"exc"
] |
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/multiprocessing/pool.py#L72-L74
|
|||
Chaffelson/nipyapi
|
d3b186fd701ce308c2812746d98af9120955e810
|
nipyapi/nifi/models/versioned_controller_service.py
|
python
|
VersionedControllerService.group_identifier
|
(self, group_identifier)
|
Sets the group_identifier of this VersionedControllerService.
The ID of the Process Group that this component belongs to
:param group_identifier: The group_identifier of this VersionedControllerService.
:type: str
|
Sets the group_identifier of this VersionedControllerService.
The ID of the Process Group that this component belongs to
|
[
"Sets",
"the",
"group_identifier",
"of",
"this",
"VersionedControllerService",
".",
"The",
"ID",
"of",
"the",
"Process",
"Group",
"that",
"this",
"component",
"belongs",
"to"
] |
def group_identifier(self, group_identifier):
"""
Sets the group_identifier of this VersionedControllerService.
The ID of the Process Group that this component belongs to
:param group_identifier: The group_identifier of this VersionedControllerService.
:type: str
"""
self._group_identifier = group_identifier
|
[
"def",
"group_identifier",
"(",
"self",
",",
"group_identifier",
")",
":",
"self",
".",
"_group_identifier",
"=",
"group_identifier"
] |
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/versioned_controller_service.py#L375-L384
|
||
timmahrt/praatIO
|
50dbff6efb33a93c675d993b6deb08505e45e094
|
praatio/praatio_scripts.py
|
python
|
_shiftTimes
|
(
tg: textgrid.Textgrid, timeV: float, newTimeV: float
)
|
return tg
|
Change all instances of timeV in the textgrid to newTimeV
These are meant to be small changes. No checks are done to see
if the new interval steps on other intervals
|
Change all instances of timeV in the textgrid to newTimeV
|
[
"Change",
"all",
"instances",
"of",
"timeV",
"in",
"the",
"textgrid",
"to",
"newTimeV"
] |
def _shiftTimes(
tg: textgrid.Textgrid, timeV: float, newTimeV: float
) -> textgrid.Textgrid:
"""Change all instances of timeV in the textgrid to newTimeV
These are meant to be small changes. No checks are done to see
if the new interval steps on other intervals
"""
tg = tg.new()
for tierName in tg.tierNameList:
tier = tg.tierDict[tierName]
if isinstance(tier, textgrid.IntervalTier):
entryList = [
entry
for entry in tier.entryList
if entry[0] == timeV or entry[1] == timeV
]
insertEntryList = []
for entry in entryList:
if entry[0] == timeV:
newStart, newStop = newTimeV, entry[1]
elif entry[1] == timeV:
newStart, newStop = entry[0], newTimeV
tier.deleteEntry(entry)
insertEntryList.append((newStart, newStop, entry[2]))
for entry in insertEntryList:
tier.insertEntry(entry)
elif isinstance(tier, textgrid.PointTier):
entryList = [entry for entry in tier.entryList if entry[0] == timeV]
for entry in entryList:
tier.deleteEntry(entry)
tier.insertEntry(Point(newTimeV, entry[1]))
return tg
|
[
"def",
"_shiftTimes",
"(",
"tg",
":",
"textgrid",
".",
"Textgrid",
",",
"timeV",
":",
"float",
",",
"newTimeV",
":",
"float",
")",
"->",
"textgrid",
".",
"Textgrid",
":",
"tg",
"=",
"tg",
".",
"new",
"(",
")",
"for",
"tierName",
"in",
"tg",
".",
"tierNameList",
":",
"tier",
"=",
"tg",
".",
"tierDict",
"[",
"tierName",
"]",
"if",
"isinstance",
"(",
"tier",
",",
"textgrid",
".",
"IntervalTier",
")",
":",
"entryList",
"=",
"[",
"entry",
"for",
"entry",
"in",
"tier",
".",
"entryList",
"if",
"entry",
"[",
"0",
"]",
"==",
"timeV",
"or",
"entry",
"[",
"1",
"]",
"==",
"timeV",
"]",
"insertEntryList",
"=",
"[",
"]",
"for",
"entry",
"in",
"entryList",
":",
"if",
"entry",
"[",
"0",
"]",
"==",
"timeV",
":",
"newStart",
",",
"newStop",
"=",
"newTimeV",
",",
"entry",
"[",
"1",
"]",
"elif",
"entry",
"[",
"1",
"]",
"==",
"timeV",
":",
"newStart",
",",
"newStop",
"=",
"entry",
"[",
"0",
"]",
",",
"newTimeV",
"tier",
".",
"deleteEntry",
"(",
"entry",
")",
"insertEntryList",
".",
"append",
"(",
"(",
"newStart",
",",
"newStop",
",",
"entry",
"[",
"2",
"]",
")",
")",
"for",
"entry",
"in",
"insertEntryList",
":",
"tier",
".",
"insertEntry",
"(",
"entry",
")",
"elif",
"isinstance",
"(",
"tier",
",",
"textgrid",
".",
"PointTier",
")",
":",
"entryList",
"=",
"[",
"entry",
"for",
"entry",
"in",
"tier",
".",
"entryList",
"if",
"entry",
"[",
"0",
"]",
"==",
"timeV",
"]",
"for",
"entry",
"in",
"entryList",
":",
"tier",
".",
"deleteEntry",
"(",
"entry",
")",
"tier",
".",
"insertEntry",
"(",
"Point",
"(",
"newTimeV",
",",
"entry",
"[",
"1",
"]",
")",
")",
"return",
"tg"
] |
https://github.com/timmahrt/praatIO/blob/50dbff6efb33a93c675d993b6deb08505e45e094/praatio/praatio_scripts.py#L30-L66
|
|
bigaidream-projects/drmad
|
a4bb6010595d956f29c5a42a095bab76a60b29eb
|
cpu_ver/hypergrad/transforms.py
|
python
|
translate
|
(Lx, Ly, nx, ny)
|
return (x_transform * y_transform).reshape([Lx * Ly, Lx * Ly])
|
[] |
def translate(Lx, Ly, nx, ny):
x_transform = np.diag(np.ones(Lx - abs(nx)), -nx).reshape([1, Lx, 1, Lx])
y_transform = np.diag(np.ones(Ly - abs(ny)), -ny).reshape([Ly, 1, Ly, 1])
return (x_transform * y_transform).reshape([Lx * Ly, Lx * Ly])
|
[
"def",
"translate",
"(",
"Lx",
",",
"Ly",
",",
"nx",
",",
"ny",
")",
":",
"x_transform",
"=",
"np",
".",
"diag",
"(",
"np",
".",
"ones",
"(",
"Lx",
"-",
"abs",
"(",
"nx",
")",
")",
",",
"-",
"nx",
")",
".",
"reshape",
"(",
"[",
"1",
",",
"Lx",
",",
"1",
",",
"Lx",
"]",
")",
"y_transform",
"=",
"np",
".",
"diag",
"(",
"np",
".",
"ones",
"(",
"Ly",
"-",
"abs",
"(",
"ny",
")",
")",
",",
"-",
"ny",
")",
".",
"reshape",
"(",
"[",
"Ly",
",",
"1",
",",
"Ly",
",",
"1",
"]",
")",
"return",
"(",
"x_transform",
"*",
"y_transform",
")",
".",
"reshape",
"(",
"[",
"Lx",
"*",
"Ly",
",",
"Lx",
"*",
"Ly",
"]",
")"
] |
https://github.com/bigaidream-projects/drmad/blob/a4bb6010595d956f29c5a42a095bab76a60b29eb/cpu_ver/hypergrad/transforms.py#L3-L6
|
|||
DrkSephy/django-hackathon-starter
|
2b8a57379271c42b78ac85ff35bae84c1e1c7771
|
hackathon_starter/hackathon/scripts/twitter.py
|
python
|
TwitterOauthClient.get_tweets
|
(self, tweet)
|
return content['statuses'], json.dumps(jsonlist)
|
Get tweets of relevant search query.
|
Get tweets of relevant search query.
|
[
"Get",
"tweets",
"of",
"relevant",
"search",
"query",
"."
] |
def get_tweets(self, tweet):
'''
Get tweets of relevant search query.
'''
method = 'get'
link = 'https://api.twitter.com/1.1/search/tweets.json'
linkParameters = {'q': tweet, 'count': '100', 'result_type': 'popular'}
oauthParameters = getOauthParameters(
self.consumer_key,
self.access_token
)
oauthParameters['oauth_signature'] = generateSignature(
method,
link,
linkParameters,
oauthParameters,
self.consumer_secret,
self.access_token_secret
)
headers = {'Authorization': createAuthHeader(oauthParameters)}
link += '?' + urllib.urlencode(linkParameters)
req = requests.get(link, headers=headers)
if int(req.status_code) != 200:
raise Exception('Invalid response %s' %req.status_code)
content = json2.loads(req.content)
jsonlist = {}
for contrib in content['statuses']:
for e in contrib:
if e == 'retweet_count':
if contrib['user']['screen_name'] in jsonlist:
jsonlist[contrib['user']['screen_name']][contrib[e]] = str(contrib['text'].encode('ascii', 'ignore'))
else:
jsonlist[contrib['user']['screen_name']] = { contrib[e]:str(contrib['text'].encode('ascii', 'ignore'))}
return content['statuses'], json.dumps(jsonlist)
|
[
"def",
"get_tweets",
"(",
"self",
",",
"tweet",
")",
":",
"method",
"=",
"'get'",
"link",
"=",
"'https://api.twitter.com/1.1/search/tweets.json'",
"linkParameters",
"=",
"{",
"'q'",
":",
"tweet",
",",
"'count'",
":",
"'100'",
",",
"'result_type'",
":",
"'popular'",
"}",
"oauthParameters",
"=",
"getOauthParameters",
"(",
"self",
".",
"consumer_key",
",",
"self",
".",
"access_token",
")",
"oauthParameters",
"[",
"'oauth_signature'",
"]",
"=",
"generateSignature",
"(",
"method",
",",
"link",
",",
"linkParameters",
",",
"oauthParameters",
",",
"self",
".",
"consumer_secret",
",",
"self",
".",
"access_token_secret",
")",
"headers",
"=",
"{",
"'Authorization'",
":",
"createAuthHeader",
"(",
"oauthParameters",
")",
"}",
"link",
"+=",
"'?'",
"+",
"urllib",
".",
"urlencode",
"(",
"linkParameters",
")",
"req",
"=",
"requests",
".",
"get",
"(",
"link",
",",
"headers",
"=",
"headers",
")",
"if",
"int",
"(",
"req",
".",
"status_code",
")",
"!=",
"200",
":",
"raise",
"Exception",
"(",
"'Invalid response %s'",
"%",
"req",
".",
"status_code",
")",
"content",
"=",
"json2",
".",
"loads",
"(",
"req",
".",
"content",
")",
"jsonlist",
"=",
"{",
"}",
"for",
"contrib",
"in",
"content",
"[",
"'statuses'",
"]",
":",
"for",
"e",
"in",
"contrib",
":",
"if",
"e",
"==",
"'retweet_count'",
":",
"if",
"contrib",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
"in",
"jsonlist",
":",
"jsonlist",
"[",
"contrib",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
"]",
"[",
"contrib",
"[",
"e",
"]",
"]",
"=",
"str",
"(",
"contrib",
"[",
"'text'",
"]",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
")",
"else",
":",
"jsonlist",
"[",
"contrib",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
"]",
"=",
"{",
"contrib",
"[",
"e",
"]",
":",
"str",
"(",
"contrib",
"[",
"'text'",
"]",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
")",
"}",
"return",
"content",
"[",
"'statuses'",
"]",
",",
"json",
".",
"dumps",
"(",
"jsonlist",
")"
] |
https://github.com/DrkSephy/django-hackathon-starter/blob/2b8a57379271c42b78ac85ff35bae84c1e1c7771/hackathon_starter/hackathon/scripts/twitter.py#L119-L162
|
|
007gzs/dingtalk-sdk
|
7979da2e259fdbc571728cae2425a04dbc65850a
|
dingtalk/client/api/taobao.py
|
python
|
TbJuShiTa.taobao_jds_refund_traces_get
|
(
self,
refund_id
)
|
return self._top_request(
"taobao.jds.refund.traces.get",
{
"refund_id": refund_id
}
)
|
获取单条退款跟踪详情
获取聚石塔数据共享的交易全链路的退款信息
文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=23845
:param refund_id: 淘宝的退款编号
|
获取单条退款跟踪详情
获取聚石塔数据共享的交易全链路的退款信息
文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=23845
|
[
"获取单条退款跟踪详情",
"获取聚石塔数据共享的交易全链路的退款信息",
"文档地址:https",
":",
"//",
"open",
"-",
"doc",
".",
"dingtalk",
".",
"com",
"/",
"docs",
"/",
"api",
".",
"htm?apiId",
"=",
"23845"
] |
def taobao_jds_refund_traces_get(
self,
refund_id
):
"""
获取单条退款跟踪详情
获取聚石塔数据共享的交易全链路的退款信息
文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=23845
:param refund_id: 淘宝的退款编号
"""
return self._top_request(
"taobao.jds.refund.traces.get",
{
"refund_id": refund_id
}
)
|
[
"def",
"taobao_jds_refund_traces_get",
"(",
"self",
",",
"refund_id",
")",
":",
"return",
"self",
".",
"_top_request",
"(",
"\"taobao.jds.refund.traces.get\"",
",",
"{",
"\"refund_id\"",
":",
"refund_id",
"}",
")"
] |
https://github.com/007gzs/dingtalk-sdk/blob/7979da2e259fdbc571728cae2425a04dbc65850a/dingtalk/client/api/taobao.py#L32734-L32750
|
|
hatRiot/zarp
|
2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad
|
src/lib/libmproxy/contrib/pyparsing.py
|
python
|
srange
|
(s)
|
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
|
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
|
[
"r",
"Helper",
"to",
"easily",
"define",
"string",
"ranges",
"for",
"use",
"in",
"Word",
"construction",
".",
"Borrows",
"syntax",
"from",
"regexp",
"[]",
"string",
"range",
"definitions",
"::",
"srange",
"(",
"[",
"0",
"-",
"9",
"]",
")",
"-",
">",
"0123456789",
"srange",
"(",
"[",
"a",
"-",
"z",
"]",
")",
"-",
">",
"abcdefghijklmnopqrstuvwxyz",
"srange",
"(",
"[",
"a",
"-",
"z$_",
"]",
")",
"-",
">",
"abcdefghijklmnopqrstuvwxyz$_",
"The",
"input",
"string",
"must",
"be",
"enclosed",
"in",
"[]",
"s",
"and",
"the",
"returned",
"string",
"is",
"the",
"expanded",
"character",
"set",
"joined",
"into",
"a",
"single",
"string",
".",
"The",
"values",
"enclosed",
"in",
"the",
"[]",
"s",
"may",
"be",
"::",
"a",
"single",
"character",
"an",
"escaped",
"character",
"with",
"a",
"leading",
"backslash",
"(",
"such",
"as",
"\\",
"-",
"or",
"\\",
"]",
")",
"an",
"escaped",
"hex",
"character",
"with",
"a",
"leading",
"\\",
"0x",
"(",
"\\",
"0x21",
"which",
"is",
"a",
"!",
"character",
")",
"an",
"escaped",
"octal",
"character",
"with",
"a",
"leading",
"\\",
"0",
"(",
"\\",
"041",
"which",
"is",
"a",
"!",
"character",
")",
"a",
"range",
"of",
"any",
"of",
"the",
"above",
"separated",
"by",
"a",
"dash",
"(",
"a",
"-",
"z",
"etc",
".",
")",
"any",
"combination",
"of",
"the",
"above",
"(",
"aeiouy",
"a",
"-",
"zA",
"-",
"Z0",
"-",
"9_$",
"etc",
".",
")"
] |
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
|
[
"def",
"srange",
"(",
"s",
")",
":",
"try",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"_expanded",
"(",
"part",
")",
"for",
"part",
"in",
"_reBracketExpr",
".",
"parseString",
"(",
"s",
")",
".",
"body",
"]",
")",
"except",
":",
"return",
"\"\""
] |
https://github.com/hatRiot/zarp/blob/2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad/src/lib/libmproxy/contrib/pyparsing.py#L3310-L3329
|
||
spectacles/CodeComplice
|
8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62
|
libs/SilverCity/PostScript.py
|
python
|
PostScriptHandler.__init__
|
(self)
|
[] |
def __init__(self):
DispatchHandler.__init__(self, 'SCE_PS')
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"DispatchHandler",
".",
"__init__",
"(",
"self",
",",
"'SCE_PS'",
")"
] |
https://github.com/spectacles/CodeComplice/blob/8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62/libs/SilverCity/PostScript.py#L24-L25
|
||||
mandiant/flare-qdb
|
daf2dbf4cdc3b76f07203c44cd993d8a9a8c529f
|
flareqdb/__init__.py
|
python
|
Qdb._conout_pc
|
(self, s)
|
Output to the logging object that represents the console, including
the symbolic (if available) or hexadecimal program counter in the
output.
|
Output to the logging object that represents the console, including
the symbolic (if available) or hexadecimal program counter in the
output.
|
[
"Output",
"to",
"the",
"logging",
"object",
"that",
"represents",
"the",
"console",
"including",
"the",
"symbolic",
"(",
"if",
"available",
")",
"or",
"hexadecimal",
"program",
"counter",
"in",
"the",
"output",
"."
] |
def _conout_pc(self, s):
"""Output to the logging object that represents the console, including
the symbolic (if available) or hexadecimal program counter in the
output.
"""
if self._con:
prettypc = self._pretty_pc()
self._con.info('[' + prettypc + ']: ' + str(s))
|
[
"def",
"_conout_pc",
"(",
"self",
",",
"s",
")",
":",
"if",
"self",
".",
"_con",
":",
"prettypc",
"=",
"self",
".",
"_pretty_pc",
"(",
")",
"self",
".",
"_con",
".",
"info",
"(",
"'['",
"+",
"prettypc",
"+",
"']: '",
"+",
"str",
"(",
"s",
")",
")"
] |
https://github.com/mandiant/flare-qdb/blob/daf2dbf4cdc3b76f07203c44cd993d8a9a8c529f/flareqdb/__init__.py#L1926-L1933
|
||
sisl/MADRL
|
4a6d780e8cf111f312b757cca1b9f83441644958
|
madrl_environments/box_carrying.py
|
python
|
BoxCarrying._intelligent_leader_strategy
|
(self)
|
[] |
def _intelligent_leader_strategy(self):
def rotate(vec, angle):
assert vec.shape == (2,)
return np.array([np.cos(angle) * vec[0] - np.sin(angle) * vec[1],
np.sin(angle) * vec[0] + np.cos(angle) * vec[1]])
if self.stage == 0:
if self.count == TIME_INTERVAL:
self.force_NR_2 = self._init_force()
# Moved?
if self.count == TIME_INTERVAL - 2:
if np.linalg.norm(self.objv[-1]) > 0.02:
if all(self.robot_sum_force * self.objacc >= 0):
self.stage += 1
self._is_static = False
logger.info('Started Moving, initial_acc: {}'.format(self.objacc))
return
self._check_static_fric()
path_stage = 0
if path_stage == 0:
pos = np.array(self.obj.getPosition()[:2])
vn = pos - np.array([2.5, 0]) # Circle center?
v_to_path = vn
vn = (vn * 2.5) / np.linalg.norm(vn)
v_to_path = vn - v_to_path
v_path = rotate(vn, -np.pi / 2)
v_syn = v_path + 4. * v_to_path
v_syn /= np.linalg.norm(v_syn)
self._f_leader_set(np.linalg.norm(v_syn), self._vec_ang(v_syn))
if pos[0] >= 2.5:
path_stage = 1
|
[
"def",
"_intelligent_leader_strategy",
"(",
"self",
")",
":",
"def",
"rotate",
"(",
"vec",
",",
"angle",
")",
":",
"assert",
"vec",
".",
"shape",
"==",
"(",
"2",
",",
")",
"return",
"np",
".",
"array",
"(",
"[",
"np",
".",
"cos",
"(",
"angle",
")",
"*",
"vec",
"[",
"0",
"]",
"-",
"np",
".",
"sin",
"(",
"angle",
")",
"*",
"vec",
"[",
"1",
"]",
",",
"np",
".",
"sin",
"(",
"angle",
")",
"*",
"vec",
"[",
"0",
"]",
"+",
"np",
".",
"cos",
"(",
"angle",
")",
"*",
"vec",
"[",
"1",
"]",
"]",
")",
"if",
"self",
".",
"stage",
"==",
"0",
":",
"if",
"self",
".",
"count",
"==",
"TIME_INTERVAL",
":",
"self",
".",
"force_NR_2",
"=",
"self",
".",
"_init_force",
"(",
")",
"# Moved?",
"if",
"self",
".",
"count",
"==",
"TIME_INTERVAL",
"-",
"2",
":",
"if",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"objv",
"[",
"-",
"1",
"]",
")",
">",
"0.02",
":",
"if",
"all",
"(",
"self",
".",
"robot_sum_force",
"*",
"self",
".",
"objacc",
">=",
"0",
")",
":",
"self",
".",
"stage",
"+=",
"1",
"self",
".",
"_is_static",
"=",
"False",
"logger",
".",
"info",
"(",
"'Started Moving, initial_acc: {}'",
".",
"format",
"(",
"self",
".",
"objacc",
")",
")",
"return",
"self",
".",
"_check_static_fric",
"(",
")",
"path_stage",
"=",
"0",
"if",
"path_stage",
"==",
"0",
":",
"pos",
"=",
"np",
".",
"array",
"(",
"self",
".",
"obj",
".",
"getPosition",
"(",
")",
"[",
":",
"2",
"]",
")",
"vn",
"=",
"pos",
"-",
"np",
".",
"array",
"(",
"[",
"2.5",
",",
"0",
"]",
")",
"# Circle center?",
"v_to_path",
"=",
"vn",
"vn",
"=",
"(",
"vn",
"*",
"2.5",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"vn",
")",
"v_to_path",
"=",
"vn",
"-",
"v_to_path",
"v_path",
"=",
"rotate",
"(",
"vn",
",",
"-",
"np",
".",
"pi",
"/",
"2",
")",
"v_syn",
"=",
"v_path",
"+",
"4.",
"*",
"v_to_path",
"v_syn",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"v_syn",
")",
"self",
".",
"_f_leader_set",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"v_syn",
")",
",",
"self",
".",
"_vec_ang",
"(",
"v_syn",
")",
")",
"if",
"pos",
"[",
"0",
"]",
">=",
"2.5",
":",
"path_stage",
"=",
"1"
] |
https://github.com/sisl/MADRL/blob/4a6d780e8cf111f312b757cca1b9f83441644958/madrl_environments/box_carrying.py#L515-L549
|
||||
microsoft/debugpy
|
be8dd607f6837244e0b565345e497aff7a0c08bf
|
src/debugpy/_vendored/pydevd/stubs/_django_manager_body.py
|
python
|
create
|
(self, *args, **kwargs)
|
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
|
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
|
[
"Creates",
"a",
"new",
"object",
"with",
"the",
"given",
"kwargs",
"saving",
"it",
"to",
"the",
"database",
"and",
"returning",
"the",
"created",
"object",
"."
] |
def create(self, *args, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
|
[
"def",
"create",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":"
] |
https://github.com/microsoft/debugpy/blob/be8dd607f6837244e0b565345e497aff7a0c08bf/src/debugpy/_vendored/pydevd/stubs/_django_manager_body.py#L145-L149
|
||
guotong1988/BERT-GPU
|
3ee2fb74549387d60870ce4f1c65bf1311e378f6
|
tokenization.py
|
python
|
_is_whitespace
|
(char)
|
return False
|
Checks whether `chars` is a whitespace character.
|
Checks whether `chars` is a whitespace character.
|
[
"Checks",
"whether",
"chars",
"is",
"a",
"whitespace",
"character",
"."
] |
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
|
[
"def",
"_is_whitespace",
"(",
"char",
")",
":",
"# \\t, \\n, and \\r are technically contorl characters but we treat them",
"# as whitespace since they are generally considered as such.",
"if",
"char",
"==",
"\" \"",
"or",
"char",
"==",
"\"\\t\"",
"or",
"char",
"==",
"\"\\n\"",
"or",
"char",
"==",
"\"\\r\"",
":",
"return",
"True",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
"==",
"\"Zs\"",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/guotong1988/BERT-GPU/blob/3ee2fb74549387d60870ce4f1c65bf1311e378f6/tokenization.py#L362-L371
|
|
pallets/jinja
|
077b7918a7642ff6742fe48a32e54d7875140894
|
src/jinja2/filters.py
|
python
|
do_int
|
(value: t.Any, default: int = 0, base: int = 10)
|
Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter. You
can also override the default base (10) in the second
parameter, which handles input with prefixes such as
0b, 0o and 0x for bases 2, 8 and 16 respectively.
The base is ignored for decimal numbers and non-string values.
|
Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter. You
can also override the default base (10) in the second
parameter, which handles input with prefixes such as
0b, 0o and 0x for bases 2, 8 and 16 respectively.
The base is ignored for decimal numbers and non-string values.
|
[
"Convert",
"the",
"value",
"into",
"an",
"integer",
".",
"If",
"the",
"conversion",
"doesn",
"t",
"work",
"it",
"will",
"return",
"0",
".",
"You",
"can",
"override",
"this",
"default",
"using",
"the",
"first",
"parameter",
".",
"You",
"can",
"also",
"override",
"the",
"default",
"base",
"(",
"10",
")",
"in",
"the",
"second",
"parameter",
"which",
"handles",
"input",
"with",
"prefixes",
"such",
"as",
"0b",
"0o",
"and",
"0x",
"for",
"bases",
"2",
"8",
"and",
"16",
"respectively",
".",
"The",
"base",
"is",
"ignored",
"for",
"decimal",
"numbers",
"and",
"non",
"-",
"string",
"values",
"."
] |
def do_int(value: t.Any, default: int = 0, base: int = 10) -> int:
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter. You
can also override the default base (10) in the second
parameter, which handles input with prefixes such as
0b, 0o and 0x for bases 2, 8 and 16 respectively.
The base is ignored for decimal numbers and non-string values.
"""
try:
if isinstance(value, str):
return int(value, base)
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
|
[
"def",
"do_int",
"(",
"value",
":",
"t",
".",
"Any",
",",
"default",
":",
"int",
"=",
"0",
",",
"base",
":",
"int",
"=",
"10",
")",
"->",
"int",
":",
"try",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"int",
"(",
"value",
",",
"base",
")",
"return",
"int",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"# this quirk is necessary so that \"42.23\"|int gives 42.",
"try",
":",
"return",
"int",
"(",
"float",
"(",
"value",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"default"
] |
https://github.com/pallets/jinja/blob/077b7918a7642ff6742fe48a32e54d7875140894/src/jinja2/filters.py#L914-L933
|
||
kmpm/nodemcu-uploader
|
6178f40fff2deadd56b5bc474f9b4475ef444b37
|
nodemcu_uploader/uploader.py
|
python
|
Uploader.file_do
|
(self, filename)
|
return res
|
Execute a file on the device using 'do
|
Execute a file on the device using 'do
|
[
"Execute",
"a",
"file",
"on",
"the",
"device",
"using",
"do"
] |
def file_do(self, filename):
"""Execute a file on the device using 'do'"""
log.info('Executing '+filename)
res = self.__exchange('dofile("'+filename+'")')
log.info(res)
return res
|
[
"def",
"file_do",
"(",
"self",
",",
"filename",
")",
":",
"log",
".",
"info",
"(",
"'Executing '",
"+",
"filename",
")",
"res",
"=",
"self",
".",
"__exchange",
"(",
"'dofile(\"'",
"+",
"filename",
"+",
"'\")'",
")",
"log",
".",
"info",
"(",
"res",
")",
"return",
"res"
] |
https://github.com/kmpm/nodemcu-uploader/blob/6178f40fff2deadd56b5bc474f9b4475ef444b37/nodemcu_uploader/uploader.py#L451-L456
|
|
scikit-image/scikit-image
|
ed642e2bc822f362504d24379dee94978d6fa9de
|
skimage/measure/fit.py
|
python
|
LineModelND.predict_y
|
(self, x, params=None)
|
return y
|
Predict y-coordinates for 2D lines using the estimated model.
Alias for::
predict(x, axis=0)[:, 1]
Parameters
----------
x : array
x-coordinates.
params : (2, ) array, optional
Optional custom parameter set in the form (`origin`, `direction`).
Returns
-------
y : array
Predicted y-coordinates.
|
Predict y-coordinates for 2D lines using the estimated model.
|
[
"Predict",
"y",
"-",
"coordinates",
"for",
"2D",
"lines",
"using",
"the",
"estimated",
"model",
"."
] |
def predict_y(self, x, params=None):
"""Predict y-coordinates for 2D lines using the estimated model.
Alias for::
predict(x, axis=0)[:, 1]
Parameters
----------
x : array
x-coordinates.
params : (2, ) array, optional
Optional custom parameter set in the form (`origin`, `direction`).
Returns
-------
y : array
Predicted y-coordinates.
"""
y = self.predict(x, axis=0, params=params)[:, 1]
return y
|
[
"def",
"predict_y",
"(",
"self",
",",
"x",
",",
"params",
"=",
"None",
")",
":",
"y",
"=",
"self",
".",
"predict",
"(",
"x",
",",
"axis",
"=",
"0",
",",
"params",
"=",
"params",
")",
"[",
":",
",",
"1",
"]",
"return",
"y"
] |
https://github.com/scikit-image/scikit-image/blob/ed642e2bc822f362504d24379dee94978d6fa9de/skimage/measure/fit.py#L194-L215
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-linux/x64/psutil/_pswindows.py
|
python
|
Process.oneshot_enter
|
(self)
|
[] |
def oneshot_enter(self):
self._proc_info.cache_activate(self)
self.exe.cache_activate(self)
|
[
"def",
"oneshot_enter",
"(",
"self",
")",
":",
"self",
".",
"_proc_info",
".",
"cache_activate",
"(",
"self",
")",
"self",
".",
"exe",
".",
"cache_activate",
"(",
"self",
")"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/psutil/_pswindows.py#L723-L725
|
||||
tapanpandita/pocket
|
ce6f316887873db7c9a9d56811afb6eba744a4cd
|
pocket.py
|
python
|
bulk_wrapper
|
(fn)
|
return wrapped
|
[] |
def bulk_wrapper(fn):
@wraps(fn)
def wrapped(self, *args, **kwargs):
arg_names = list(fn.__code__.co_varnames)
arg_names.remove('self')
kwargs.update(dict(zip(arg_names, args)))
wait = kwargs.get('wait', True)
query = dict(
[(k, v) for k, v in kwargs.items() if v is not None]
)
# TODO: Fix this hack
query['action'] = 'add' if fn.__name__ == 'bulk_add' else fn.__name__
if wait:
self.add_bulk_query(query)
return self
else:
url = self.api_endpoints['send']
payload = {
'actions': [query],
}
payload.update(self.get_payload())
return self.make_request(
url,
json.dumps(payload),
headers={'content-type': 'application/json'},
)
return wrapped
|
[
"def",
"bulk_wrapper",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"arg_names",
"=",
"list",
"(",
"fn",
".",
"__code__",
".",
"co_varnames",
")",
"arg_names",
".",
"remove",
"(",
"'self'",
")",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"zip",
"(",
"arg_names",
",",
"args",
")",
")",
")",
"wait",
"=",
"kwargs",
".",
"get",
"(",
"'wait'",
",",
"True",
")",
"query",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"]",
")",
"# TODO: Fix this hack",
"query",
"[",
"'action'",
"]",
"=",
"'add'",
"if",
"fn",
".",
"__name__",
"==",
"'bulk_add'",
"else",
"fn",
".",
"__name__",
"if",
"wait",
":",
"self",
".",
"add_bulk_query",
"(",
"query",
")",
"return",
"self",
"else",
":",
"url",
"=",
"self",
".",
"api_endpoints",
"[",
"'send'",
"]",
"payload",
"=",
"{",
"'actions'",
":",
"[",
"query",
"]",
",",
"}",
"payload",
".",
"update",
"(",
"self",
".",
"get_payload",
"(",
")",
")",
"return",
"self",
".",
"make_request",
"(",
"url",
",",
"json",
".",
"dumps",
"(",
"payload",
")",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
",",
")",
"return",
"wrapped"
] |
https://github.com/tapanpandita/pocket/blob/ce6f316887873db7c9a9d56811afb6eba744a4cd/pocket.py#L62-L92
|
|||
elastic/elasticsearch-py-async
|
9427ef9e7e9e85f8e2330a98a97bb0b180953fa0
|
elasticsearch_async/transport.py
|
python
|
AsyncTransport._get_sniff_data
|
(self, initial=False)
|
[] |
def _get_sniff_data(self, initial=False):
previous_sniff = self.last_sniff
# reset last_sniff timestamp
self.last_sniff = time.time()
# use small timeout for the sniffing request, should be a fast api call
timeout = self.sniff_timeout if not initial else None
tasks = [
c.perform_request('GET', '/_nodes/_all/http', timeout=timeout)
# go through all current connections as well as the
# seed_connections for good measure
for c in chain(self.connection_pool.connections, (c for c in self.seed_connections if c not in self.connection_pool.connections))
]
done = ()
try:
while tasks:
# execute sniff requests in parallel, wait for first to return
done, tasks = yield from asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop)
# go through all the finished tasks
for t in done:
try:
_, headers, node_info = t.result()
node_info = self.deserializer.loads(node_info, headers.get('content-type'))
except (ConnectionError, SerializationError) as e:
logger.warn('Sniffing request failed with %r', e)
continue
node_info = list(node_info['nodes'].values())
return node_info
else:
# no task has finished completely
raise TransportError("N/A", "Unable to sniff hosts.")
except:
# keep the previous value on error
self.last_sniff = previous_sniff
raise
finally:
# clean up pending futures
for t in chain(done, tasks):
t.cancel()
|
[
"def",
"_get_sniff_data",
"(",
"self",
",",
"initial",
"=",
"False",
")",
":",
"previous_sniff",
"=",
"self",
".",
"last_sniff",
"# reset last_sniff timestamp",
"self",
".",
"last_sniff",
"=",
"time",
".",
"time",
"(",
")",
"# use small timeout for the sniffing request, should be a fast api call",
"timeout",
"=",
"self",
".",
"sniff_timeout",
"if",
"not",
"initial",
"else",
"None",
"tasks",
"=",
"[",
"c",
".",
"perform_request",
"(",
"'GET'",
",",
"'/_nodes/_all/http'",
",",
"timeout",
"=",
"timeout",
")",
"# go through all current connections as well as the",
"# seed_connections for good measure",
"for",
"c",
"in",
"chain",
"(",
"self",
".",
"connection_pool",
".",
"connections",
",",
"(",
"c",
"for",
"c",
"in",
"self",
".",
"seed_connections",
"if",
"c",
"not",
"in",
"self",
".",
"connection_pool",
".",
"connections",
")",
")",
"]",
"done",
"=",
"(",
")",
"try",
":",
"while",
"tasks",
":",
"# execute sniff requests in parallel, wait for first to return",
"done",
",",
"tasks",
"=",
"yield",
"from",
"asyncio",
".",
"wait",
"(",
"tasks",
",",
"return_when",
"=",
"asyncio",
".",
"FIRST_COMPLETED",
",",
"loop",
"=",
"self",
".",
"loop",
")",
"# go through all the finished tasks",
"for",
"t",
"in",
"done",
":",
"try",
":",
"_",
",",
"headers",
",",
"node_info",
"=",
"t",
".",
"result",
"(",
")",
"node_info",
"=",
"self",
".",
"deserializer",
".",
"loads",
"(",
"node_info",
",",
"headers",
".",
"get",
"(",
"'content-type'",
")",
")",
"except",
"(",
"ConnectionError",
",",
"SerializationError",
")",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"'Sniffing request failed with %r'",
",",
"e",
")",
"continue",
"node_info",
"=",
"list",
"(",
"node_info",
"[",
"'nodes'",
"]",
".",
"values",
"(",
")",
")",
"return",
"node_info",
"else",
":",
"# no task has finished completely",
"raise",
"TransportError",
"(",
"\"N/A\"",
",",
"\"Unable to sniff hosts.\"",
")",
"except",
":",
"# keep the previous value on error",
"self",
".",
"last_sniff",
"=",
"previous_sniff",
"raise",
"finally",
":",
"# clean up pending futures",
"for",
"t",
"in",
"chain",
"(",
"done",
",",
"tasks",
")",
":",
"t",
".",
"cancel",
"(",
")"
] |
https://github.com/elastic/elasticsearch-py-async/blob/9427ef9e7e9e85f8e2330a98a97bb0b180953fa0/elasticsearch_async/transport.py#L72-L113
|
||||
Azure/azure-devops-cli-extension
|
11334cd55806bef0b99c3bee5a438eed71e44037
|
azure-devops/azext_devops/devops_sdk/v5_0/git/git_client_base.py
|
python
|
GitClientBase.get_likes
|
(self, repository_id, pull_request_id, thread_id, comment_id, project=None)
|
return self._deserialize('[IdentityRef]', self._unwrap_collection(response))
|
GetLikes.
[Preview API] Get likes for a comment.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: The ID of the thread that contains the comment.
:param int comment_id: The ID of the comment.
:param str project: Project ID or project name
:rtype: [IdentityRef]
|
GetLikes.
[Preview API] Get likes for a comment.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: The ID of the thread that contains the comment.
:param int comment_id: The ID of the comment.
:param str project: Project ID or project name
:rtype: [IdentityRef]
|
[
"GetLikes",
".",
"[",
"Preview",
"API",
"]",
"Get",
"likes",
"for",
"a",
"comment",
".",
":",
"param",
"str",
"repository_id",
":",
"The",
"repository",
"ID",
"of",
"the",
"pull",
"request",
"s",
"target",
"branch",
".",
":",
"param",
"int",
"pull_request_id",
":",
"ID",
"of",
"the",
"pull",
"request",
".",
":",
"param",
"int",
"thread_id",
":",
"The",
"ID",
"of",
"the",
"thread",
"that",
"contains",
"the",
"comment",
".",
":",
"param",
"int",
"comment_id",
":",
"The",
"ID",
"of",
"the",
"comment",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"rtype",
":",
"[",
"IdentityRef",
"]"
] |
def get_likes(self, repository_id, pull_request_id, thread_id, comment_id, project=None):
"""GetLikes.
[Preview API] Get likes for a comment.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: The ID of the thread that contains the comment.
:param int comment_id: The ID of the comment.
:param str project: Project ID or project name
:rtype: [IdentityRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if thread_id is not None:
route_values['threadId'] = self._serialize.url('thread_id', thread_id, 'int')
if comment_id is not None:
route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int')
response = self._send(http_method='GET',
location_id='5f2e2851-1389-425b-a00b-fb2adb3ef31b',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('[IdentityRef]', self._unwrap_collection(response))
|
[
"def",
"get_likes",
"(",
"self",
",",
"repository_id",
",",
"pull_request_id",
",",
"thread_id",
",",
"comment_id",
",",
"project",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"repository_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'repositoryId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'repository_id'",
",",
"repository_id",
",",
"'str'",
")",
"if",
"pull_request_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'pullRequestId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'pull_request_id'",
",",
"pull_request_id",
",",
"'int'",
")",
"if",
"thread_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'threadId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'thread_id'",
",",
"thread_id",
",",
"'int'",
")",
"if",
"comment_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'commentId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'comment_id'",
",",
"comment_id",
",",
"'int'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'5f2e2851-1389-425b-a00b-fb2adb3ef31b'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[IdentityRef]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] |
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_0/git/git_client_base.py#L1282-L1307
|
|
HymanLiuTS/flaskTs
|
286648286976e85d9b9a5873632331efcafe0b21
|
flasky/lib/python2.7/site-packages/pip/_vendor/ipaddress.py
|
python
|
_BaseNetwork.is_unspecified
|
(self)
|
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
|
Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
|
Test if the address is unspecified.
|
[
"Test",
"if",
"the",
"address",
"is",
"unspecified",
"."
] |
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
|
[
"def",
"is_unspecified",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"network_address",
".",
"is_unspecified",
"and",
"self",
".",
"broadcast_address",
".",
"is_unspecified",
")"
] |
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/pip/_vendor/ipaddress.py#L1176-L1185
|
|
googleads/google-ads-python
|
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
|
google/ads/googleads/v9/services/services/campaign_customizer_service/client.py
|
python
|
CampaignCustomizerServiceClient.common_folder_path
|
(folder: str,)
|
return "folders/{folder}".format(folder=folder,)
|
Return a fully-qualified folder string.
|
Return a fully-qualified folder string.
|
[
"Return",
"a",
"fully",
"-",
"qualified",
"folder",
"string",
"."
] |
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
|
[
"def",
"common_folder_path",
"(",
"folder",
":",
"str",
",",
")",
"->",
"str",
":",
"return",
"\"folders/{folder}\"",
".",
"format",
"(",
"folder",
"=",
"folder",
",",
")"
] |
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v9/services/services/campaign_customizer_service/client.py#L248-L250
|
|
eliben/pyelftools
|
8f7a0becaface09435c4374947548b7851e3d1a2
|
elftools/elf/dynamic.py
|
python
|
DynamicSegment.__init__
|
(self, header, stream, elffile)
|
[] |
def __init__(self, header, stream, elffile):
# The string table section to be used to resolve string names in
# the dynamic tag array is the one pointed at by the sh_link field
# of the dynamic section header.
# So we must look for the dynamic section contained in the dynamic
# segment, we do so by searching for the dynamic section whose content
# is located at the same offset as the dynamic segment
stringtable = None
for section in elffile.iter_sections():
if (isinstance(section, DynamicSection) and
section['sh_offset'] == header['p_offset']):
stringtable = elffile.get_section(section['sh_link'])
break
Segment.__init__(self, header, stream)
Dynamic.__init__(self, stream, elffile, stringtable, self['p_offset'],
self['p_filesz'] == 0)
self._symbol_size = self.elfstructs.Elf_Sym.sizeof()
self._num_symbols = None
self._symbol_name_map = None
|
[
"def",
"__init__",
"(",
"self",
",",
"header",
",",
"stream",
",",
"elffile",
")",
":",
"# The string table section to be used to resolve string names in",
"# the dynamic tag array is the one pointed at by the sh_link field",
"# of the dynamic section header.",
"# So we must look for the dynamic section contained in the dynamic",
"# segment, we do so by searching for the dynamic section whose content",
"# is located at the same offset as the dynamic segment",
"stringtable",
"=",
"None",
"for",
"section",
"in",
"elffile",
".",
"iter_sections",
"(",
")",
":",
"if",
"(",
"isinstance",
"(",
"section",
",",
"DynamicSection",
")",
"and",
"section",
"[",
"'sh_offset'",
"]",
"==",
"header",
"[",
"'p_offset'",
"]",
")",
":",
"stringtable",
"=",
"elffile",
".",
"get_section",
"(",
"section",
"[",
"'sh_link'",
"]",
")",
"break",
"Segment",
".",
"__init__",
"(",
"self",
",",
"header",
",",
"stream",
")",
"Dynamic",
".",
"__init__",
"(",
"self",
",",
"stream",
",",
"elffile",
",",
"stringtable",
",",
"self",
"[",
"'p_offset'",
"]",
",",
"self",
"[",
"'p_filesz'",
"]",
"==",
"0",
")",
"self",
".",
"_symbol_size",
"=",
"self",
".",
"elfstructs",
".",
"Elf_Sym",
".",
"sizeof",
"(",
")",
"self",
".",
"_num_symbols",
"=",
"None",
"self",
".",
"_symbol_name_map",
"=",
"None"
] |
https://github.com/eliben/pyelftools/blob/8f7a0becaface09435c4374947548b7851e3d1a2/elftools/elf/dynamic.py#L239-L257
|
||||
boto/boto
|
b2a6f08122b2f1b89888d2848e730893595cd001
|
boto/utils.py
|
python
|
setlocale
|
(name)
|
A context manager to set the locale in a threadsafe manner.
|
A context manager to set the locale in a threadsafe manner.
|
[
"A",
"context",
"manager",
"to",
"set",
"the",
"locale",
"in",
"a",
"threadsafe",
"manner",
"."
] |
def setlocale(name):
"""
A context manager to set the locale in a threadsafe manner.
"""
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved)
|
[
"def",
"setlocale",
"(",
"name",
")",
":",
"with",
"LOCALE_LOCK",
":",
"saved",
"=",
"locale",
".",
"setlocale",
"(",
"locale",
".",
"LC_ALL",
")",
"try",
":",
"yield",
"locale",
".",
"setlocale",
"(",
"locale",
".",
"LC_ALL",
",",
"name",
")",
"finally",
":",
"locale",
".",
"setlocale",
"(",
"locale",
".",
"LC_ALL",
",",
"saved",
")"
] |
https://github.com/boto/boto/blob/b2a6f08122b2f1b89888d2848e730893595cd001/boto/utils.py#L453-L463
|
||
pyload/pyload
|
4410827ca7711f1a3cf91a0b11e967b81bbbcaa2
|
src/pyload/plugins/base/account.py
|
python
|
BaseAccount.get_info
|
(self, refresh=True)
|
return self.info
|
Retrieve account infos for an user, do **not** overwrite this method! just use
it to retrieve infos in hoster plugins. see `grab_info`
:param user: username
:param relogin: reloads cached account information
:return: dictionary with information
|
Retrieve account infos for an user, do **not** overwrite this method! just use
it to retrieve infos in hoster plugins. see `grab_info`
|
[
"Retrieve",
"account",
"infos",
"for",
"an",
"user",
"do",
"**",
"not",
"**",
"overwrite",
"this",
"method!",
"just",
"use",
"it",
"to",
"retrieve",
"infos",
"in",
"hoster",
"plugins",
".",
"see",
"grab_info"
] |
def get_info(self, refresh=True):
"""
Retrieve account infos for an user, do **not** overwrite this method! just use
it to retrieve infos in hoster plugins. see `grab_info`
:param user: username
:param relogin: reloads cached account information
:return: dictionary with information
"""
if not self.logged:
if self.relogin():
refresh = True
else:
refresh = False
self.reset()
if refresh and self.info["login"]["valid"]:
self.log_info(
self._("Grabbing account info for user `{}`...").format(self.user)
)
self.info = self._grab_info()
self.syncback()
self.log_debug(
"Account info for user `{}`: {}".format(self.user, self.info)
)
return self.info
|
[
"def",
"get_info",
"(",
"self",
",",
"refresh",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"logged",
":",
"if",
"self",
".",
"relogin",
"(",
")",
":",
"refresh",
"=",
"True",
"else",
":",
"refresh",
"=",
"False",
"self",
".",
"reset",
"(",
")",
"if",
"refresh",
"and",
"self",
".",
"info",
"[",
"\"login\"",
"]",
"[",
"\"valid\"",
"]",
":",
"self",
".",
"log_info",
"(",
"self",
".",
"_",
"(",
"\"Grabbing account info for user `{}`...\"",
")",
".",
"format",
"(",
"self",
".",
"user",
")",
")",
"self",
".",
"info",
"=",
"self",
".",
"_grab_info",
"(",
")",
"self",
".",
"syncback",
"(",
")",
"self",
".",
"log_debug",
"(",
"\"Account info for user `{}`: {}\"",
".",
"format",
"(",
"self",
".",
"user",
",",
"self",
".",
"info",
")",
")",
"return",
"self",
".",
"info"
] |
https://github.com/pyload/pyload/blob/4410827ca7711f1a3cf91a0b11e967b81bbbcaa2/src/pyload/plugins/base/account.py#L213-L241
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-darwin/x64/tornado/ioloop.py
|
python
|
IOLoop.split_fd
|
(self, fd)
|
Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
|
Returns an (fd, obj) pair from an ``fd`` parameter.
|
[
"Returns",
"an",
"(",
"fd",
"obj",
")",
"pair",
"from",
"an",
"fd",
"parameter",
"."
] |
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
|
[
"def",
"split_fd",
"(",
"self",
",",
"fd",
")",
":",
"try",
":",
"return",
"fd",
".",
"fileno",
"(",
")",
",",
"fd",
"except",
"AttributeError",
":",
"return",
"fd",
",",
"fd"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/tornado/ioloop.py#L803-L822
|
||
yorikvanhavre/BIM_Workbench
|
1114096d1f6abe15ce93c6ca8fed568f52765753
|
BimViews.py
|
python
|
BIM_Views.delete
|
(self)
|
deletes the selected object
|
deletes the selected object
|
[
"deletes",
"the",
"selected",
"object"
] |
def delete(self):
"deletes the selected object"
vm = findWidget()
if vm:
if vm.tree.selectedItems():
FreeCAD.ActiveDocument.openTransaction("Delete")
for item in vm.tree.selectedItems():
obj = FreeCAD.ActiveDocument.getObject(item.toolTip(0))
if obj:
FreeCAD.ActiveDocument.removeObject(obj.Name)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
self.update(False)
|
[
"def",
"delete",
"(",
"self",
")",
":",
"vm",
"=",
"findWidget",
"(",
")",
"if",
"vm",
":",
"if",
"vm",
".",
"tree",
".",
"selectedItems",
"(",
")",
":",
"FreeCAD",
".",
"ActiveDocument",
".",
"openTransaction",
"(",
"\"Delete\"",
")",
"for",
"item",
"in",
"vm",
".",
"tree",
".",
"selectedItems",
"(",
")",
":",
"obj",
"=",
"FreeCAD",
".",
"ActiveDocument",
".",
"getObject",
"(",
"item",
".",
"toolTip",
"(",
"0",
")",
")",
"if",
"obj",
":",
"FreeCAD",
".",
"ActiveDocument",
".",
"removeObject",
"(",
"obj",
".",
"Name",
")",
"FreeCAD",
".",
"ActiveDocument",
".",
"commitTransaction",
"(",
")",
"FreeCAD",
".",
"ActiveDocument",
".",
"recompute",
"(",
")",
"self",
".",
"update",
"(",
"False",
")"
] |
https://github.com/yorikvanhavre/BIM_Workbench/blob/1114096d1f6abe15ce93c6ca8fed568f52765753/BimViews.py#L196-L210
|
||
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/kubernetes/client/apis/core_v1_api.py
|
python
|
CoreV1Api.patch_namespaced_pod_with_http_info
|
(self, name, namespace, body, **kwargs)
|
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Pod',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
partially update the specified Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_pod_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
|
partially update the specified Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_pod_with_http_info(name, namespace, body, callback=callback_function)
|
[
"partially",
"update",
"the",
"specified",
"Pod",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"define",
"a",
"callback",
"function",
"to",
"be",
"invoked",
"when",
"receiving",
"the",
"response",
".",
">>>",
"def",
"callback_function",
"(",
"response",
")",
":",
">>>",
"pprint",
"(",
"response",
")",
">>>",
">>>",
"thread",
"=",
"api",
".",
"patch_namespaced_pod_with_http_info",
"(",
"name",
"namespace",
"body",
"callback",
"=",
"callback_function",
")"
] |
def patch_namespaced_pod_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_pod_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_pod" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_pod`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_pod`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_pod`")
collection_formats = {}
resource_path = '/api/v1/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Pod',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"def",
"patch_namespaced_pod_with_http_info",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"all_params",
"=",
"[",
"'name'",
",",
"'namespace'",
",",
"'body'",
",",
"'pretty'",
"]",
"all_params",
".",
"append",
"(",
"'callback'",
")",
"all_params",
".",
"append",
"(",
"'_return_http_data_only'",
")",
"all_params",
".",
"append",
"(",
"'_preload_content'",
")",
"all_params",
".",
"append",
"(",
"'_request_timeout'",
")",
"params",
"=",
"locals",
"(",
")",
"for",
"key",
",",
"val",
"in",
"iteritems",
"(",
"params",
"[",
"'kwargs'",
"]",
")",
":",
"if",
"key",
"not",
"in",
"all_params",
":",
"raise",
"TypeError",
"(",
"\"Got an unexpected keyword argument '%s'\"",
"\" to method patch_namespaced_pod\"",
"%",
"key",
")",
"params",
"[",
"key",
"]",
"=",
"val",
"del",
"params",
"[",
"'kwargs'",
"]",
"# verify the required parameter 'name' is set",
"if",
"(",
"'name'",
"not",
"in",
"params",
")",
"or",
"(",
"params",
"[",
"'name'",
"]",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Missing the required parameter `name` when calling `patch_namespaced_pod`\"",
")",
"# verify the required parameter 'namespace' is set",
"if",
"(",
"'namespace'",
"not",
"in",
"params",
")",
"or",
"(",
"params",
"[",
"'namespace'",
"]",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Missing the required parameter `namespace` when calling `patch_namespaced_pod`\"",
")",
"# verify the required parameter 'body' is set",
"if",
"(",
"'body'",
"not",
"in",
"params",
")",
"or",
"(",
"params",
"[",
"'body'",
"]",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Missing the required parameter `body` when calling `patch_namespaced_pod`\"",
")",
"collection_formats",
"=",
"{",
"}",
"resource_path",
"=",
"'/api/v1/namespaces/{namespace}/pods/{name}'",
".",
"replace",
"(",
"'{format}'",
",",
"'json'",
")",
"path_params",
"=",
"{",
"}",
"if",
"'name'",
"in",
"params",
":",
"path_params",
"[",
"'name'",
"]",
"=",
"params",
"[",
"'name'",
"]",
"if",
"'namespace'",
"in",
"params",
":",
"path_params",
"[",
"'namespace'",
"]",
"=",
"params",
"[",
"'namespace'",
"]",
"query_params",
"=",
"{",
"}",
"if",
"'pretty'",
"in",
"params",
":",
"query_params",
"[",
"'pretty'",
"]",
"=",
"params",
"[",
"'pretty'",
"]",
"header_params",
"=",
"{",
"}",
"form_params",
"=",
"[",
"]",
"local_var_files",
"=",
"{",
"}",
"body_params",
"=",
"None",
"if",
"'body'",
"in",
"params",
":",
"body_params",
"=",
"params",
"[",
"'body'",
"]",
"# HTTP header `Accept`",
"header_params",
"[",
"'Accept'",
"]",
"=",
"self",
".",
"api_client",
".",
"select_header_accept",
"(",
"[",
"'application/json'",
",",
"'application/yaml'",
",",
"'application/vnd.kubernetes.protobuf'",
"]",
")",
"# HTTP header `Content-Type`",
"header_params",
"[",
"'Content-Type'",
"]",
"=",
"self",
".",
"api_client",
".",
"select_header_content_type",
"(",
"[",
"'application/json-patch+json'",
",",
"'application/merge-patch+json'",
",",
"'application/strategic-merge-patch+json'",
"]",
")",
"# Authentication setting",
"auth_settings",
"=",
"[",
"'BearerToken'",
"]",
"return",
"self",
".",
"api_client",
".",
"call_api",
"(",
"resource_path",
",",
"'PATCH'",
",",
"path_params",
",",
"query_params",
",",
"header_params",
",",
"body",
"=",
"body_params",
",",
"post_params",
"=",
"form_params",
",",
"files",
"=",
"local_var_files",
",",
"response_type",
"=",
"'V1Pod'",
",",
"auth_settings",
"=",
"auth_settings",
",",
"callback",
"=",
"params",
".",
"get",
"(",
"'callback'",
")",
",",
"_return_http_data_only",
"=",
"params",
".",
"get",
"(",
"'_return_http_data_only'",
")",
",",
"_preload_content",
"=",
"params",
".",
"get",
"(",
"'_preload_content'",
",",
"True",
")",
",",
"_request_timeout",
"=",
"params",
".",
"get",
"(",
"'_request_timeout'",
")",
",",
"collection_formats",
"=",
"collection_formats",
")"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/apis/core_v1_api.py#L16220-L16313
|
|
deepinsight/insightface
|
c0b25f998a649f662c7136eb389abcacd7900e9d
|
recognition/partial_fc/mxnet/image_iter.py
|
python
|
FaceImageIter.check_data_shape
|
(self, data_shape)
|
Checks if the input data shape is valid
|
Checks if the input data shape is valid
|
[
"Checks",
"if",
"the",
"input",
"data",
"shape",
"is",
"valid"
] |
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError(
'data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError(
'This iterator expects inputs to have 3 channels.')
|
[
"def",
"check_data_shape",
"(",
"self",
",",
"data_shape",
")",
":",
"if",
"not",
"len",
"(",
"data_shape",
")",
"==",
"3",
":",
"raise",
"ValueError",
"(",
"'data_shape should have length 3, with dimensions CxHxW'",
")",
"if",
"not",
"data_shape",
"[",
"0",
"]",
"==",
"3",
":",
"raise",
"ValueError",
"(",
"'This iterator expects inputs to have 3 channels.'",
")"
] |
https://github.com/deepinsight/insightface/blob/c0b25f998a649f662c7136eb389abcacd7900e9d/recognition/partial_fc/mxnet/image_iter.py#L256-L263
|
||
PyFilesystem/pyfilesystem
|
7dfe14ae6c3b9c53543c1c3890232d9f37579f34
|
fs/expose/fuse/fuse.py
|
python
|
Operations.getattr
|
(self, path, fh=None)
|
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
|
Returns a dictionary with keys identical to the stat C structure
of stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories.
|
Returns a dictionary with keys identical to the stat C structure
of stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories.
|
[
"Returns",
"a",
"dictionary",
"with",
"keys",
"identical",
"to",
"the",
"stat",
"C",
"structure",
"of",
"stat",
"(",
"2",
")",
".",
"st_atime",
"st_mtime",
"and",
"st_ctime",
"should",
"be",
"floats",
".",
"NOTE",
":",
"There",
"is",
"an",
"incombatibility",
"between",
"Linux",
"and",
"Mac",
"OS",
"X",
"concerning",
"st_nlink",
"of",
"directories",
".",
"Mac",
"OS",
"X",
"counts",
"all",
"files",
"inside",
"the",
"directory",
"while",
"Linux",
"counts",
"only",
"the",
"subdirectories",
"."
] |
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories."""
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
|
[
"def",
"getattr",
"(",
"self",
",",
"path",
",",
"fh",
"=",
"None",
")",
":",
"if",
"path",
"!=",
"'/'",
":",
"raise",
"FuseOSError",
"(",
"ENOENT",
")",
"return",
"dict",
"(",
"st_mode",
"=",
"(",
"S_IFDIR",
"|",
"0755",
")",
",",
"st_nlink",
"=",
"2",
")"
] |
https://github.com/PyFilesystem/pyfilesystem/blob/7dfe14ae6c3b9c53543c1c3890232d9f37579f34/fs/expose/fuse/fuse.py#L572-L582
|
|
SteveDoyle2/pyNastran
|
eda651ac2d4883d95a34951f8a002ff94f642a1a
|
pyNastran/dev/bdf_vectorized2/cards/nodes.py
|
python
|
GRIDv.make_current
|
(self)
|
creates an array of the GRID points
|
creates an array of the GRID points
|
[
"creates",
"an",
"array",
"of",
"the",
"GRID",
"points"
] |
def make_current(self):
"""creates an array of the GRID points"""
if not self.is_current:
nnid = len(self.nid)
if nnid > 0: # there are already nodes in self.nid
self.nid = np.hstack([self.nid, self._nid])
self.xyz = np.vstack([self.xyz, self._xyz])
self.cp = np.hstack([self.cp, self._cp])
self.cd = np.hstack([self.cd, self._cd])
self.ps = np.hstack([self.ps, self._ps])
self.seid = np.hstack([self.seid, self._seid])
# don't need to handle comments
else:
self.nid = np.array(self._nid, dtype='int32')
self.xyz = np.array(self._xyz, dtype='float64')
self.cp = np.array(self._cp, dtype='int32')
self.cd = np.array(self._cd, dtype='int32')
self.ps = np.array(self._ps, dtype='|U8')
self.seid = np.array(self._seid, dtype='int32')
unid = np.unique(self.nid)
if len(self.nid) != len(unid):
duplicate_nodes = duplicates(self.nid)
msg = ('there are duplicate nodes\n'
'nid =%s; n=%s\n'
'unid=%s; n=%s\n'
'duplicates=%s' % (
self.nid, len(self.nid),
unid, len(unid),
duplicate_nodes))
raise RuntimeError(msg)
isort = np.argsort(self.nid)
self.nid = self.nid[isort]
self.xyz = self.xyz[isort, :]
self.cp = self.cp[isort]
self.cd = self.cd[isort]
self.ps = self.ps[isort]
self.seid = self.seid[isort]
self._nid = []
self._xyz = []
self._cp = []
self._cd = []
self._ps = []
self._seid = []
self.is_current = True
|
[
"def",
"make_current",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_current",
":",
"nnid",
"=",
"len",
"(",
"self",
".",
"nid",
")",
"if",
"nnid",
">",
"0",
":",
"# there are already nodes in self.nid",
"self",
".",
"nid",
"=",
"np",
".",
"hstack",
"(",
"[",
"self",
".",
"nid",
",",
"self",
".",
"_nid",
"]",
")",
"self",
".",
"xyz",
"=",
"np",
".",
"vstack",
"(",
"[",
"self",
".",
"xyz",
",",
"self",
".",
"_xyz",
"]",
")",
"self",
".",
"cp",
"=",
"np",
".",
"hstack",
"(",
"[",
"self",
".",
"cp",
",",
"self",
".",
"_cp",
"]",
")",
"self",
".",
"cd",
"=",
"np",
".",
"hstack",
"(",
"[",
"self",
".",
"cd",
",",
"self",
".",
"_cd",
"]",
")",
"self",
".",
"ps",
"=",
"np",
".",
"hstack",
"(",
"[",
"self",
".",
"ps",
",",
"self",
".",
"_ps",
"]",
")",
"self",
".",
"seid",
"=",
"np",
".",
"hstack",
"(",
"[",
"self",
".",
"seid",
",",
"self",
".",
"_seid",
"]",
")",
"# don't need to handle comments",
"else",
":",
"self",
".",
"nid",
"=",
"np",
".",
"array",
"(",
"self",
".",
"_nid",
",",
"dtype",
"=",
"'int32'",
")",
"self",
".",
"xyz",
"=",
"np",
".",
"array",
"(",
"self",
".",
"_xyz",
",",
"dtype",
"=",
"'float64'",
")",
"self",
".",
"cp",
"=",
"np",
".",
"array",
"(",
"self",
".",
"_cp",
",",
"dtype",
"=",
"'int32'",
")",
"self",
".",
"cd",
"=",
"np",
".",
"array",
"(",
"self",
".",
"_cd",
",",
"dtype",
"=",
"'int32'",
")",
"self",
".",
"ps",
"=",
"np",
".",
"array",
"(",
"self",
".",
"_ps",
",",
"dtype",
"=",
"'|U8'",
")",
"self",
".",
"seid",
"=",
"np",
".",
"array",
"(",
"self",
".",
"_seid",
",",
"dtype",
"=",
"'int32'",
")",
"unid",
"=",
"np",
".",
"unique",
"(",
"self",
".",
"nid",
")",
"if",
"len",
"(",
"self",
".",
"nid",
")",
"!=",
"len",
"(",
"unid",
")",
":",
"duplicate_nodes",
"=",
"duplicates",
"(",
"self",
".",
"nid",
")",
"msg",
"=",
"(",
"'there are duplicate nodes\\n'",
"'nid =%s; n=%s\\n'",
"'unid=%s; n=%s\\n'",
"'duplicates=%s'",
"%",
"(",
"self",
".",
"nid",
",",
"len",
"(",
"self",
".",
"nid",
")",
",",
"unid",
",",
"len",
"(",
"unid",
")",
",",
"duplicate_nodes",
")",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"isort",
"=",
"np",
".",
"argsort",
"(",
"self",
".",
"nid",
")",
"self",
".",
"nid",
"=",
"self",
".",
"nid",
"[",
"isort",
"]",
"self",
".",
"xyz",
"=",
"self",
".",
"xyz",
"[",
"isort",
",",
":",
"]",
"self",
".",
"cp",
"=",
"self",
".",
"cp",
"[",
"isort",
"]",
"self",
".",
"cd",
"=",
"self",
".",
"cd",
"[",
"isort",
"]",
"self",
".",
"ps",
"=",
"self",
".",
"ps",
"[",
"isort",
"]",
"self",
".",
"seid",
"=",
"self",
".",
"seid",
"[",
"isort",
"]",
"self",
".",
"_nid",
"=",
"[",
"]",
"self",
".",
"_xyz",
"=",
"[",
"]",
"self",
".",
"_cp",
"=",
"[",
"]",
"self",
".",
"_cd",
"=",
"[",
"]",
"self",
".",
"_ps",
"=",
"[",
"]",
"self",
".",
"_seid",
"=",
"[",
"]",
"self",
".",
"is_current",
"=",
"True"
] |
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/dev/bdf_vectorized2/cards/nodes.py#L360-L406
|
||
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/PIL/ImageFile.py
|
python
|
Parser.close
|
(self)
|
return self.image
|
(Consumer) Close the stream.
:returns: An image object.
:exception IOError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
|
(Consumer) Close the stream.
|
[
"(",
"Consumer",
")",
"Close",
"the",
"stream",
"."
] |
def close(self):
"""
(Consumer) Close the stream.
:returns: An image object.
:exception IOError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
"""
# finish decoding
if self.decoder:
# get rid of what's left in the buffers
self.feed(b"")
self.data = self.decoder = None
if not self.finished:
raise IOError("image was incomplete")
if not self.image:
raise IOError("cannot parse this image")
if self.data:
# incremental parsing not possible; reopen the file
# not that we have all data
try:
fp = io.BytesIO(self.data)
self.image = Image.open(fp)
finally:
self.image.load()
fp.close() # explicitly close the virtual file
return self.image
|
[
"def",
"close",
"(",
"self",
")",
":",
"# finish decoding",
"if",
"self",
".",
"decoder",
":",
"# get rid of what's left in the buffers",
"self",
".",
"feed",
"(",
"b\"\"",
")",
"self",
".",
"data",
"=",
"self",
".",
"decoder",
"=",
"None",
"if",
"not",
"self",
".",
"finished",
":",
"raise",
"IOError",
"(",
"\"image was incomplete\"",
")",
"if",
"not",
"self",
".",
"image",
":",
"raise",
"IOError",
"(",
"\"cannot parse this image\"",
")",
"if",
"self",
".",
"data",
":",
"# incremental parsing not possible; reopen the file",
"# not that we have all data",
"try",
":",
"fp",
"=",
"io",
".",
"BytesIO",
"(",
"self",
".",
"data",
")",
"self",
".",
"image",
"=",
"Image",
".",
"open",
"(",
"fp",
")",
"finally",
":",
"self",
".",
"image",
".",
"load",
"(",
")",
"fp",
".",
"close",
"(",
")",
"# explicitly close the virtual file",
"return",
"self",
".",
"image"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/PIL/ImageFile.py#L421-L448
|
|
microsoft/azure-devops-python-api
|
451cade4c475482792cbe9e522c1fee32393139e
|
azure-devops/azure/devops/released/git/git_client_base.py
|
python
|
GitClientBase.get_comments
|
(self, repository_id, pull_request_id, thread_id, project=None)
|
return self._deserialize('[Comment]', self._unwrap_collection(response))
|
GetComments.
Retrieve all comments associated with a specific thread in a pull request.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: ID of the thread.
:param str project: Project ID or project name
:rtype: [Comment]
|
GetComments.
Retrieve all comments associated with a specific thread in a pull request.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: ID of the thread.
:param str project: Project ID or project name
:rtype: [Comment]
|
[
"GetComments",
".",
"Retrieve",
"all",
"comments",
"associated",
"with",
"a",
"specific",
"thread",
"in",
"a",
"pull",
"request",
".",
":",
"param",
"str",
"repository_id",
":",
"The",
"repository",
"ID",
"of",
"the",
"pull",
"request",
"s",
"target",
"branch",
".",
":",
"param",
"int",
"pull_request_id",
":",
"ID",
"of",
"the",
"pull",
"request",
".",
":",
"param",
"int",
"thread_id",
":",
"ID",
"of",
"the",
"thread",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"rtype",
":",
"[",
"Comment",
"]"
] |
def get_comments(self, repository_id, pull_request_id, thread_id, project=None):
"""GetComments.
Retrieve all comments associated with a specific thread in a pull request.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: ID of the thread.
:param str project: Project ID or project name
:rtype: [Comment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if thread_id is not None:
route_values['threadId'] = self._serialize.url('thread_id', thread_id, 'int')
response = self._send(http_method='GET',
location_id='965a3ec7-5ed8-455a-bdcb-835a5ea7fe7b',
version='5.1',
route_values=route_values)
return self._deserialize('[Comment]', self._unwrap_collection(response))
|
[
"def",
"get_comments",
"(",
"self",
",",
"repository_id",
",",
"pull_request_id",
",",
"thread_id",
",",
"project",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"repository_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'repositoryId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'repository_id'",
",",
"repository_id",
",",
"'str'",
")",
"if",
"pull_request_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'pullRequestId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'pull_request_id'",
",",
"pull_request_id",
",",
"'int'",
")",
"if",
"thread_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'threadId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'thread_id'",
",",
"thread_id",
",",
"'int'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'965a3ec7-5ed8-455a-bdcb-835a5ea7fe7b'",
",",
"version",
"=",
"'5.1'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[Comment]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] |
https://github.com/microsoft/azure-devops-python-api/blob/451cade4c475482792cbe9e522c1fee32393139e/azure-devops/azure/devops/released/git/git_client_base.py#L1350-L1372
|
|
flozz/rivalcfg
|
3bec77db4185bba18a7d181eb366c0de93b6ab6a
|
rivalcfg/color_helpers.py
|
python
|
parse_color_gradient_string
|
(gradient)
|
return result
|
Parse a color gradient string.
:param str gradient: The gradient string.
:rtype: list
>>> parse_color_gradient_string("0%: red, 33%: #00ff00, 66: 00f")
[{'pos': 0, 'color': (255, 0, 0)}, {'pos': 33, 'color': (0, 255, 0)}, {'pos': 66, 'color': (0, 0, 255)}]
>>> parse_color_gradient_string("-1%: red")
Traceback (most recent call last):
...
ValueError: invalid color stop position '-1%'
>>> parse_color_gradient_string("150: red")
Traceback (most recent call last):
...
ValueError: invalid color stop position '150%'
>>> parse_color_gradient_string("42%: hello")
Traceback (most recent call last):
...
ValueError: invalid color 'hello'
>>> parse_color_gradient_string("hello")
Traceback (most recent call last):
...
ValueError: invalid color gradient 'hello'. ...
|
Parse a color gradient string.
|
[
"Parse",
"a",
"color",
"gradient",
"string",
"."
] |
def parse_color_gradient_string(gradient):
"""Parse a color gradient string.
:param str gradient: The gradient string.
:rtype: list
>>> parse_color_gradient_string("0%: red, 33%: #00ff00, 66: 00f")
[{'pos': 0, 'color': (255, 0, 0)}, {'pos': 33, 'color': (0, 255, 0)}, {'pos': 66, 'color': (0, 0, 255)}]
>>> parse_color_gradient_string("-1%: red")
Traceback (most recent call last):
...
ValueError: invalid color stop position '-1%'
>>> parse_color_gradient_string("150: red")
Traceback (most recent call last):
...
ValueError: invalid color stop position '150%'
>>> parse_color_gradient_string("42%: hello")
Traceback (most recent call last):
...
ValueError: invalid color 'hello'
>>> parse_color_gradient_string("hello")
Traceback (most recent call last):
...
ValueError: invalid color gradient 'hello'. ...
"""
gradient = gradient.replace(" ", "").replace("%", "")
if not re.match(r"[0-9-]+:[a-zA-Z0-9#]+(,[0-9]+:[a-zA-Z0-9#]+)*", gradient):
raise ValueError(
"invalid color gradient '%s'. It must looks like '<POS1>:<COLOR1>,<POS2>:<COLOR2>,...'"
% gradient
)
result = []
for pos, color in [s.split(":") for s in gradient.split(",")]:
pos = int(pos)
if not 0 <= pos <= 100:
raise ValueError("invalid color stop position '%i%%'" % pos)
if not is_color(color):
raise ValueError("invalid color '%s'" % color)
result.append(
{
"pos": pos,
"color": parse_color_string(color),
}
)
return result
|
[
"def",
"parse_color_gradient_string",
"(",
"gradient",
")",
":",
"gradient",
"=",
"gradient",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"%\"",
",",
"\"\"",
")",
"if",
"not",
"re",
".",
"match",
"(",
"r\"[0-9-]+:[a-zA-Z0-9#]+(,[0-9]+:[a-zA-Z0-9#]+)*\"",
",",
"gradient",
")",
":",
"raise",
"ValueError",
"(",
"\"invalid color gradient '%s'. It must looks like '<POS1>:<COLOR1>,<POS2>:<COLOR2>,...'\"",
"%",
"gradient",
")",
"result",
"=",
"[",
"]",
"for",
"pos",
",",
"color",
"in",
"[",
"s",
".",
"split",
"(",
"\":\"",
")",
"for",
"s",
"in",
"gradient",
".",
"split",
"(",
"\",\"",
")",
"]",
":",
"pos",
"=",
"int",
"(",
"pos",
")",
"if",
"not",
"0",
"<=",
"pos",
"<=",
"100",
":",
"raise",
"ValueError",
"(",
"\"invalid color stop position '%i%%'\"",
"%",
"pos",
")",
"if",
"not",
"is_color",
"(",
"color",
")",
":",
"raise",
"ValueError",
"(",
"\"invalid color '%s'\"",
"%",
"color",
")",
"result",
".",
"append",
"(",
"{",
"\"pos\"",
":",
"pos",
",",
"\"color\"",
":",
"parse_color_string",
"(",
"color",
")",
",",
"}",
")",
"return",
"result"
] |
https://github.com/flozz/rivalcfg/blob/3bec77db4185bba18a7d181eb366c0de93b6ab6a/rivalcfg/color_helpers.py#L102-L152
|
|
edgedb/edgedb
|
872bf5abbb10f7c72df21f57635238ed27b9f280
|
edb/pgsql/metaschema.py
|
python
|
_describe_config
|
(
schema: s_schema.Schema,
source: str,
testmode: bool,
)
|
return query
|
Generate an EdgeQL query to render config as DDL.
|
Generate an EdgeQL query to render config as DDL.
|
[
"Generate",
"an",
"EdgeQL",
"query",
"to",
"render",
"config",
"as",
"DDL",
"."
] |
def _describe_config(
schema: s_schema.Schema,
source: str,
testmode: bool,
) -> str:
"""Generate an EdgeQL query to render config as DDL."""
if source == 'system override':
scope = qltypes.ConfigScope.INSTANCE
config_object_name = 'cfg::InstanceConfig'
elif source == 'database':
scope = qltypes.ConfigScope.DATABASE
config_object_name = 'cfg::DatabaseConfig'
else:
raise AssertionError(f'unexpected configuration source: {source!r}')
cfg = schema.get(config_object_name, type=s_objtypes.ObjectType)
items = []
for ptr_name, p in cfg.get_pointers(schema).items(schema):
pn = str(ptr_name)
if pn in ('id', '__type__'):
continue
is_internal = (
p.get_annotation(
schema,
s_name.QualName('cfg', 'internal')
) == 'true'
)
if is_internal and not testmode:
continue
ptype = p.get_target(schema)
assert ptype is not None
ptr_card = p.get_cardinality(schema)
mult = ptr_card.is_multi()
if isinstance(ptype, s_objtypes.ObjectType):
item = textwrap.indent(
_render_config_object(
schema=schema,
valtype=ptype,
value_expr=str(ptype.get_name(schema)),
scope=scope,
join_term='',
level=1,
),
' ' * 4,
)
else:
psource = f'{config_object_name}.{ qlquote.quote_ident(pn) }'
renderer = _render_config_set if mult else _render_config_scalar
item = textwrap.indent(
renderer(
schema=schema,
valtype=ptype,
value_expr=psource,
name=pn,
scope=scope,
level=1,
),
' ' * 4,
)
condition = f'EXISTS json_get(conf, {ql(pn)})'
if is_internal:
condition = f'({condition}) AND testmode'
items.append(f"(\n{item}\n IF {condition} ELSE ''\n )")
testmode_check = (
"<bool>json_get(cfg::get_config_json(),'__internal_testmode','value')"
" ?? false"
)
query = (
f"FOR conf IN {{cfg::get_config_json(sources := [{ql(source)}])}} "
+ "UNION (\n"
+ (f"FOR testmode IN {{{testmode_check}}} UNION (\n"
if testmode else "")
+ "SELECT\n " + ' ++ '.join(items)
+ (")" if testmode else "")
+ ")"
)
return query
|
[
"def",
"_describe_config",
"(",
"schema",
":",
"s_schema",
".",
"Schema",
",",
"source",
":",
"str",
",",
"testmode",
":",
"bool",
",",
")",
"->",
"str",
":",
"if",
"source",
"==",
"'system override'",
":",
"scope",
"=",
"qltypes",
".",
"ConfigScope",
".",
"INSTANCE",
"config_object_name",
"=",
"'cfg::InstanceConfig'",
"elif",
"source",
"==",
"'database'",
":",
"scope",
"=",
"qltypes",
".",
"ConfigScope",
".",
"DATABASE",
"config_object_name",
"=",
"'cfg::DatabaseConfig'",
"else",
":",
"raise",
"AssertionError",
"(",
"f'unexpected configuration source: {source!r}'",
")",
"cfg",
"=",
"schema",
".",
"get",
"(",
"config_object_name",
",",
"type",
"=",
"s_objtypes",
".",
"ObjectType",
")",
"items",
"=",
"[",
"]",
"for",
"ptr_name",
",",
"p",
"in",
"cfg",
".",
"get_pointers",
"(",
"schema",
")",
".",
"items",
"(",
"schema",
")",
":",
"pn",
"=",
"str",
"(",
"ptr_name",
")",
"if",
"pn",
"in",
"(",
"'id'",
",",
"'__type__'",
")",
":",
"continue",
"is_internal",
"=",
"(",
"p",
".",
"get_annotation",
"(",
"schema",
",",
"s_name",
".",
"QualName",
"(",
"'cfg'",
",",
"'internal'",
")",
")",
"==",
"'true'",
")",
"if",
"is_internal",
"and",
"not",
"testmode",
":",
"continue",
"ptype",
"=",
"p",
".",
"get_target",
"(",
"schema",
")",
"assert",
"ptype",
"is",
"not",
"None",
"ptr_card",
"=",
"p",
".",
"get_cardinality",
"(",
"schema",
")",
"mult",
"=",
"ptr_card",
".",
"is_multi",
"(",
")",
"if",
"isinstance",
"(",
"ptype",
",",
"s_objtypes",
".",
"ObjectType",
")",
":",
"item",
"=",
"textwrap",
".",
"indent",
"(",
"_render_config_object",
"(",
"schema",
"=",
"schema",
",",
"valtype",
"=",
"ptype",
",",
"value_expr",
"=",
"str",
"(",
"ptype",
".",
"get_name",
"(",
"schema",
")",
")",
",",
"scope",
"=",
"scope",
",",
"join_term",
"=",
"''",
",",
"level",
"=",
"1",
",",
")",
",",
"' '",
"*",
"4",
",",
")",
"else",
":",
"psource",
"=",
"f'{config_object_name}.{ qlquote.quote_ident(pn) }'",
"renderer",
"=",
"_render_config_set",
"if",
"mult",
"else",
"_render_config_scalar",
"item",
"=",
"textwrap",
".",
"indent",
"(",
"renderer",
"(",
"schema",
"=",
"schema",
",",
"valtype",
"=",
"ptype",
",",
"value_expr",
"=",
"psource",
",",
"name",
"=",
"pn",
",",
"scope",
"=",
"scope",
",",
"level",
"=",
"1",
",",
")",
",",
"' '",
"*",
"4",
",",
")",
"condition",
"=",
"f'EXISTS json_get(conf, {ql(pn)})'",
"if",
"is_internal",
":",
"condition",
"=",
"f'({condition}) AND testmode'",
"items",
".",
"append",
"(",
"f\"(\\n{item}\\n IF {condition} ELSE ''\\n )\"",
")",
"testmode_check",
"=",
"(",
"\"<bool>json_get(cfg::get_config_json(),'__internal_testmode','value')\"",
"\" ?? false\"",
")",
"query",
"=",
"(",
"f\"FOR conf IN {{cfg::get_config_json(sources := [{ql(source)}])}} \"",
"+",
"\"UNION (\\n\"",
"+",
"(",
"f\"FOR testmode IN {{{testmode_check}}} UNION (\\n\"",
"if",
"testmode",
"else",
"\"\"",
")",
"+",
"\"SELECT\\n \"",
"+",
"' ++ '",
".",
"join",
"(",
"items",
")",
"+",
"(",
"\")\"",
"if",
"testmode",
"else",
"\"\"",
")",
"+",
"\")\"",
")",
"return",
"query"
] |
https://github.com/edgedb/edgedb/blob/872bf5abbb10f7c72df21f57635238ed27b9f280/edb/pgsql/metaschema.py#L4817-L4898
|
|
google-research/meta-dataset
|
c67dd2bb66fb2a4ce7e4e9906878e13d9b851eb5
|
meta_dataset/dataset_conversion/dataset_to_records.py
|
python
|
DatasetConverter.create_dataset_specification_and_records
|
(self)
|
Creates a DatasetSpecification and records for the dataset.
Specifically, the work that needs to be done here is twofold:
Firstly, the initial values of the following attributes need to be updated:
1) self.classes_per_split: a dict mapping each split to the number of
classes assigned to it
2) self.images_per_class: a dict mapping each class to its number of images
3) self.class_names: a dict mapping each class (e.g. 0) to its (string) name
if available.
This automatically results to updating self.dataset_spec as required.
Important note: Must assign class ids in a certain order:
lowest ones for training classes, then for validation classes and highest
ones for testing classes.
The reader data sources operate under this assumption.
Secondly, a tf.record needs to be created and written for each class. There
are some general functions at the top of this file that may be useful for
this (e.g. write_tfrecord_from_npy_single_channel,
write_tfrecord_from_image_files).
|
Creates a DatasetSpecification and records for the dataset.
|
[
"Creates",
"a",
"DatasetSpecification",
"and",
"records",
"for",
"the",
"dataset",
"."
] |
def create_dataset_specification_and_records(self):
"""Creates a DatasetSpecification and records for the dataset.
Specifically, the work that needs to be done here is twofold:
Firstly, the initial values of the following attributes need to be updated:
1) self.classes_per_split: a dict mapping each split to the number of
classes assigned to it
2) self.images_per_class: a dict mapping each class to its number of images
3) self.class_names: a dict mapping each class (e.g. 0) to its (string) name
if available.
This automatically results to updating self.dataset_spec as required.
Important note: Must assign class ids in a certain order:
lowest ones for training classes, then for validation classes and highest
ones for testing classes.
The reader data sources operate under this assumption.
Secondly, a tf.record needs to be created and written for each class. There
are some general functions at the top of this file that may be useful for
this (e.g. write_tfrecord_from_npy_single_channel,
write_tfrecord_from_image_files).
"""
raise NotImplementedError('Must be implemented in each sub-class.')
|
[
"def",
"create_dataset_specification_and_records",
"(",
"self",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Must be implemented in each sub-class.'",
")"
] |
https://github.com/google-research/meta-dataset/blob/c67dd2bb66fb2a4ce7e4e9906878e13d9b851eb5/meta_dataset/dataset_conversion/dataset_to_records.py#L615-L637
|
||
google-research/sound-separation
|
0b23ae22123b041b9538295f32a92151cb77bff9
|
datasets/fuss/make_ss_examples.py
|
python
|
Mixer.get_file_list
|
(self, subset, style)
|
return file_list
|
Get file list with relative paths to the desired subset.
|
Get file list with relative paths to the desired subset.
|
[
"Get",
"file",
"list",
"with",
"relative",
"paths",
"to",
"the",
"desired",
"subset",
"."
] |
def get_file_list(self, subset, style):
"""Get file list with relative paths to the desired subset."""
list_name = os.path.join(self.fg_root, subset + '_' + style + '.txt')
with open(list_name, 'r') as f:
file_list = f.read().splitlines()
# Pick the relative path wrt subset name.
# This is required since scaper checks the parent folder of the wav files
# and the parent folder name should be from the list of allowed labels.
file_list = [os.path.relpath(f, subset) for f in file_list]
np.random.shuffle(file_list)
return file_list
|
[
"def",
"get_file_list",
"(",
"self",
",",
"subset",
",",
"style",
")",
":",
"list_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"fg_root",
",",
"subset",
"+",
"'_'",
"+",
"style",
"+",
"'.txt'",
")",
"with",
"open",
"(",
"list_name",
",",
"'r'",
")",
"as",
"f",
":",
"file_list",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"# Pick the relative path wrt subset name.",
"# This is required since scaper checks the parent folder of the wav files",
"# and the parent folder name should be from the list of allowed labels.",
"file_list",
"=",
"[",
"os",
".",
"path",
".",
"relpath",
"(",
"f",
",",
"subset",
")",
"for",
"f",
"in",
"file_list",
"]",
"np",
".",
"random",
".",
"shuffle",
"(",
"file_list",
")",
"return",
"file_list"
] |
https://github.com/google-research/sound-separation/blob/0b23ae22123b041b9538295f32a92151cb77bff9/datasets/fuss/make_ss_examples.py#L91-L103
|
|
getavalon/core
|
31e8cb4760e00e3db64443f6f932b7fd8e96d41d
|
avalon/tools/sceneinventory/lib.py
|
python
|
walk_hierarchy
|
(node)
|
Recursively yield group node
|
Recursively yield group node
|
[
"Recursively",
"yield",
"group",
"node"
] |
def walk_hierarchy(node):
"""Recursively yield group node"""
for child in node.children():
if child.get("isGroupNode"):
yield child
for _child in walk_hierarchy(child):
yield _child
|
[
"def",
"walk_hierarchy",
"(",
"node",
")",
":",
"for",
"child",
"in",
"node",
".",
"children",
"(",
")",
":",
"if",
"child",
".",
"get",
"(",
"\"isGroupNode\"",
")",
":",
"yield",
"child",
"for",
"_child",
"in",
"walk_hierarchy",
"(",
"child",
")",
":",
"yield",
"_child"
] |
https://github.com/getavalon/core/blob/31e8cb4760e00e3db64443f6f932b7fd8e96d41d/avalon/tools/sceneinventory/lib.py#L76-L83
|
||
larryhastings/gilectomy
|
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
|
Lib/codecs.py
|
python
|
StreamReader.__getattr__
|
(self, name,
getattr=getattr)
|
return getattr(self.stream, name)
|
Inherit all other methods from the underlying stream.
|
Inherit all other methods from the underlying stream.
|
[
"Inherit",
"all",
"other",
"methods",
"from",
"the",
"underlying",
"stream",
"."
] |
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
|
[
"def",
"__getattr__",
"(",
"self",
",",
"name",
",",
"getattr",
"=",
"getattr",
")",
":",
"return",
"getattr",
"(",
"self",
".",
"stream",
",",
"name",
")"
] |
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/codecs.py#L650-L655
|
|
openstack/manila
|
142990edc027e14839d5deaf4954dd6fc88de15e
|
manila/share/drivers/hitachi/hnas/driver.py
|
python
|
HitachiHNASDriver.manage_existing_snapshot
|
(self, snapshot, driver_options)
|
return output
|
Manages a snapshot that exists only in HNAS.
The snapshot to be managed should be in the path
/snapshots/SHARE_ID/SNAPSHOT_ID. Also, the size of snapshot should be
provided as --driver_options size=<size>.
:param snapshot: snapshot that will be managed.
:param driver_options: expects only one key 'size'. It must be
provided in order to manage a snapshot.
:returns: Returns a dict with size of snapshot managed
|
Manages a snapshot that exists only in HNAS.
|
[
"Manages",
"a",
"snapshot",
"that",
"exists",
"only",
"in",
"HNAS",
"."
] |
def manage_existing_snapshot(self, snapshot, driver_options):
"""Manages a snapshot that exists only in HNAS.
The snapshot to be managed should be in the path
/snapshots/SHARE_ID/SNAPSHOT_ID. Also, the size of snapshot should be
provided as --driver_options size=<size>.
:param snapshot: snapshot that will be managed.
:param driver_options: expects only one key 'size'. It must be
provided in order to manage a snapshot.
:returns: Returns a dict with size of snapshot managed
"""
try:
snapshot_size = int(driver_options.get("size", 0))
except (ValueError, TypeError):
msg = _("The size in driver options to manage snapshot "
"%(snap_id)s should be an integer, in format "
"driver-options size=<SIZE>. Value passed: "
"%(size)s.") % {'snap_id': snapshot['id'],
'size': driver_options.get("size")}
raise exception.ManageInvalidShareSnapshot(reason=msg)
if snapshot_size == 0:
msg = _("Snapshot %(snap_id)s has no size specified for manage. "
"Please, provide the size with parameter driver-options "
"size=<SIZE>.") % {'snap_id': snapshot['id']}
raise exception.ManageInvalidShareSnapshot(reason=msg)
hnas_share_id = self._get_hnas_share_id(snapshot['share_id'])
LOG.debug("Path provided to manage snapshot: %(path)s.",
{'path': snapshot['provider_location']})
path_info = snapshot['provider_location'].split('/')
if len(path_info) == 4 and path_info[1] == 'snapshots':
path_share_id = path_info[2]
hnas_snapshot_id = path_info[3]
else:
msg = (_("Incorrect path %(path)s for manage snapshot "
"%(snap_id)s. It should have the following format: "
"/snapshots/SHARE_ID/SNAPSHOT_ID.") %
{'path': snapshot['provider_location'],
'snap_id': snapshot['id']})
raise exception.ManageInvalidShareSnapshot(reason=msg)
if hnas_share_id != path_share_id:
msg = _("The snapshot %(snap_id)s does not belong to share "
"%(share_id)s.") % {'snap_id': snapshot['id'],
'share_id': snapshot['share_id']}
raise exception.ManageInvalidShareSnapshot(reason=msg)
if not self.hnas.check_directory(snapshot['provider_location']):
msg = _("Snapshot %(snap_id)s does not exist in "
"HNAS.") % {'snap_id': hnas_snapshot_id}
raise exception.ManageInvalidShareSnapshot(reason=msg)
try:
self._ensure_snapshot(snapshot, hnas_snapshot_id)
except exception.HNASItemNotFoundException:
LOG.warning("Export does not exist for snapshot %s, "
"creating a new one.", snapshot['id'])
self._create_export(hnas_share_id,
snapshot['share']['share_proto'],
snapshot_id=hnas_snapshot_id)
output = {'size': snapshot_size}
if snapshot['share'].get('mount_snapshot_support'):
export_locations = self._get_export_locations(
snapshot['share']['share_proto'],
hnas_snapshot_id,
is_snapshot=True)
output['export_locations'] = export_locations
LOG.info("Snapshot %(snap_path)s for share %(shr_id)s was "
"successfully managed with ID %(snap_id)s.",
{'snap_path': snapshot['provider_location'],
'shr_id': snapshot['share_id'],
'snap_id': snapshot['id']})
return output
|
[
"def",
"manage_existing_snapshot",
"(",
"self",
",",
"snapshot",
",",
"driver_options",
")",
":",
"try",
":",
"snapshot_size",
"=",
"int",
"(",
"driver_options",
".",
"get",
"(",
"\"size\"",
",",
"0",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"msg",
"=",
"_",
"(",
"\"The size in driver options to manage snapshot \"",
"\"%(snap_id)s should be an integer, in format \"",
"\"driver-options size=<SIZE>. Value passed: \"",
"\"%(size)s.\"",
")",
"%",
"{",
"'snap_id'",
":",
"snapshot",
"[",
"'id'",
"]",
",",
"'size'",
":",
"driver_options",
".",
"get",
"(",
"\"size\"",
")",
"}",
"raise",
"exception",
".",
"ManageInvalidShareSnapshot",
"(",
"reason",
"=",
"msg",
")",
"if",
"snapshot_size",
"==",
"0",
":",
"msg",
"=",
"_",
"(",
"\"Snapshot %(snap_id)s has no size specified for manage. \"",
"\"Please, provide the size with parameter driver-options \"",
"\"size=<SIZE>.\"",
")",
"%",
"{",
"'snap_id'",
":",
"snapshot",
"[",
"'id'",
"]",
"}",
"raise",
"exception",
".",
"ManageInvalidShareSnapshot",
"(",
"reason",
"=",
"msg",
")",
"hnas_share_id",
"=",
"self",
".",
"_get_hnas_share_id",
"(",
"snapshot",
"[",
"'share_id'",
"]",
")",
"LOG",
".",
"debug",
"(",
"\"Path provided to manage snapshot: %(path)s.\"",
",",
"{",
"'path'",
":",
"snapshot",
"[",
"'provider_location'",
"]",
"}",
")",
"path_info",
"=",
"snapshot",
"[",
"'provider_location'",
"]",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"path_info",
")",
"==",
"4",
"and",
"path_info",
"[",
"1",
"]",
"==",
"'snapshots'",
":",
"path_share_id",
"=",
"path_info",
"[",
"2",
"]",
"hnas_snapshot_id",
"=",
"path_info",
"[",
"3",
"]",
"else",
":",
"msg",
"=",
"(",
"_",
"(",
"\"Incorrect path %(path)s for manage snapshot \"",
"\"%(snap_id)s. It should have the following format: \"",
"\"/snapshots/SHARE_ID/SNAPSHOT_ID.\"",
")",
"%",
"{",
"'path'",
":",
"snapshot",
"[",
"'provider_location'",
"]",
",",
"'snap_id'",
":",
"snapshot",
"[",
"'id'",
"]",
"}",
")",
"raise",
"exception",
".",
"ManageInvalidShareSnapshot",
"(",
"reason",
"=",
"msg",
")",
"if",
"hnas_share_id",
"!=",
"path_share_id",
":",
"msg",
"=",
"_",
"(",
"\"The snapshot %(snap_id)s does not belong to share \"",
"\"%(share_id)s.\"",
")",
"%",
"{",
"'snap_id'",
":",
"snapshot",
"[",
"'id'",
"]",
",",
"'share_id'",
":",
"snapshot",
"[",
"'share_id'",
"]",
"}",
"raise",
"exception",
".",
"ManageInvalidShareSnapshot",
"(",
"reason",
"=",
"msg",
")",
"if",
"not",
"self",
".",
"hnas",
".",
"check_directory",
"(",
"snapshot",
"[",
"'provider_location'",
"]",
")",
":",
"msg",
"=",
"_",
"(",
"\"Snapshot %(snap_id)s does not exist in \"",
"\"HNAS.\"",
")",
"%",
"{",
"'snap_id'",
":",
"hnas_snapshot_id",
"}",
"raise",
"exception",
".",
"ManageInvalidShareSnapshot",
"(",
"reason",
"=",
"msg",
")",
"try",
":",
"self",
".",
"_ensure_snapshot",
"(",
"snapshot",
",",
"hnas_snapshot_id",
")",
"except",
"exception",
".",
"HNASItemNotFoundException",
":",
"LOG",
".",
"warning",
"(",
"\"Export does not exist for snapshot %s, \"",
"\"creating a new one.\"",
",",
"snapshot",
"[",
"'id'",
"]",
")",
"self",
".",
"_create_export",
"(",
"hnas_share_id",
",",
"snapshot",
"[",
"'share'",
"]",
"[",
"'share_proto'",
"]",
",",
"snapshot_id",
"=",
"hnas_snapshot_id",
")",
"output",
"=",
"{",
"'size'",
":",
"snapshot_size",
"}",
"if",
"snapshot",
"[",
"'share'",
"]",
".",
"get",
"(",
"'mount_snapshot_support'",
")",
":",
"export_locations",
"=",
"self",
".",
"_get_export_locations",
"(",
"snapshot",
"[",
"'share'",
"]",
"[",
"'share_proto'",
"]",
",",
"hnas_snapshot_id",
",",
"is_snapshot",
"=",
"True",
")",
"output",
"[",
"'export_locations'",
"]",
"=",
"export_locations",
"LOG",
".",
"info",
"(",
"\"Snapshot %(snap_path)s for share %(shr_id)s was \"",
"\"successfully managed with ID %(snap_id)s.\"",
",",
"{",
"'snap_path'",
":",
"snapshot",
"[",
"'provider_location'",
"]",
",",
"'shr_id'",
":",
"snapshot",
"[",
"'share_id'",
"]",
",",
"'snap_id'",
":",
"snapshot",
"[",
"'id'",
"]",
"}",
")",
"return",
"output"
] |
https://github.com/openstack/manila/blob/142990edc027e14839d5deaf4954dd6fc88de15e/manila/share/drivers/hitachi/hnas/driver.py#L1286-L1366
|
|
Autodesk/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
moldesign/helpers/pdb.py
|
python
|
get_conect_pairs
|
(mol)
|
return conects
|
Returns a dicitonary of HETATM bonds for a PDB CONECT record
Note that this doesn't return the text records themselves, because they need
to reference a specific PDB sequence number
|
Returns a dicitonary of HETATM bonds for a PDB CONECT record
|
[
"Returns",
"a",
"dicitonary",
"of",
"HETATM",
"bonds",
"for",
"a",
"PDB",
"CONECT",
"record"
] |
def get_conect_pairs(mol):
""" Returns a dicitonary of HETATM bonds for a PDB CONECT record
Note that this doesn't return the text records themselves, because they need
to reference a specific PDB sequence number
"""
conects = collections.OrderedDict()
for residue in mol.residues:
# intra-residue bonds
if not residue.is_standard_residue:
for bond in residue.bonds:
if bond.order <= 1:
order = 1
else:
order = bond.order
for i in range(order):
conects.setdefault(bond.a1, []).append(bond.a2)
# inter-residue bonds
try:
r2 = residue.next_residue
except (StopIteration, KeyError, NotImplementedError):
continue
if not (residue.is_standard_residue and r2.is_standard_residue):
for bond in residue.bonds_to(r2):
conects.setdefault(bond.a1, []).append(bond.a2)
return conects
|
[
"def",
"get_conect_pairs",
"(",
"mol",
")",
":",
"conects",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"residue",
"in",
"mol",
".",
"residues",
":",
"# intra-residue bonds",
"if",
"not",
"residue",
".",
"is_standard_residue",
":",
"for",
"bond",
"in",
"residue",
".",
"bonds",
":",
"if",
"bond",
".",
"order",
"<=",
"1",
":",
"order",
"=",
"1",
"else",
":",
"order",
"=",
"bond",
".",
"order",
"for",
"i",
"in",
"range",
"(",
"order",
")",
":",
"conects",
".",
"setdefault",
"(",
"bond",
".",
"a1",
",",
"[",
"]",
")",
".",
"append",
"(",
"bond",
".",
"a2",
")",
"# inter-residue bonds",
"try",
":",
"r2",
"=",
"residue",
".",
"next_residue",
"except",
"(",
"StopIteration",
",",
"KeyError",
",",
"NotImplementedError",
")",
":",
"continue",
"if",
"not",
"(",
"residue",
".",
"is_standard_residue",
"and",
"r2",
".",
"is_standard_residue",
")",
":",
"for",
"bond",
"in",
"residue",
".",
"bonds_to",
"(",
"r2",
")",
":",
"conects",
".",
"setdefault",
"(",
"bond",
".",
"a1",
",",
"[",
"]",
")",
".",
"append",
"(",
"bond",
".",
"a2",
")",
"return",
"conects"
] |
https://github.com/Autodesk/molecular-design-toolkit/blob/5f45a47fea21d3603899a6366cb163024f0e2ec4/moldesign/helpers/pdb.py#L35-L63
|
|
oilshell/oil
|
94388e7d44a9ad879b12615f6203b38596b5a2d3
|
opy/tools/astgen.py
|
python
|
load_boilerplate
|
(file)
|
return pro, epi
|
[] |
def load_boilerplate(file):
f = open(file)
buf = f.read()
f.close()
i = buf.find('### ''PROLOGUE')
j = buf.find('### ''EPILOGUE')
pro = buf[i+12:j].strip()
epi = buf[j+12:].strip()
return pro, epi
|
[
"def",
"load_boilerplate",
"(",
"file",
")",
":",
"f",
"=",
"open",
"(",
"file",
")",
"buf",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"i",
"=",
"buf",
".",
"find",
"(",
"'### '",
"'PROLOGUE'",
")",
"j",
"=",
"buf",
".",
"find",
"(",
"'### '",
"'EPILOGUE'",
")",
"pro",
"=",
"buf",
"[",
"i",
"+",
"12",
":",
"j",
"]",
".",
"strip",
"(",
")",
"epi",
"=",
"buf",
"[",
"j",
"+",
"12",
":",
"]",
".",
"strip",
"(",
")",
"return",
"pro",
",",
"epi"
] |
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/opy/tools/astgen.py#L19-L27
|
|||
bendmorris/static-python
|
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
|
Lib/datetime.py
|
python
|
datetime.ctime
|
(self)
|
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
|
Return ctime() style string.
|
Return ctime() style string.
|
[
"Return",
"ctime",
"()",
"style",
"string",
"."
] |
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
|
[
"def",
"ctime",
"(",
"self",
")",
":",
"weekday",
"=",
"self",
".",
"toordinal",
"(",
")",
"%",
"7",
"or",
"7",
"return",
"\"%s %s %2d %02d:%02d:%02d %04d\"",
"%",
"(",
"_DAYNAMES",
"[",
"weekday",
"]",
",",
"_MONTHNAMES",
"[",
"self",
".",
"_month",
"]",
",",
"self",
".",
"_day",
",",
"self",
".",
"_hour",
",",
"self",
".",
"_minute",
",",
"self",
".",
"_second",
",",
"self",
".",
"_year",
")"
] |
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/datetime.py#L1531-L1539
|
|
krintoxi/NoobSec-Toolkit
|
38738541cbc03cedb9a3b3ed13b629f781ad64f6
|
NoobSecToolkit /tools/sqli/thirdparty/gprof2dot/gprof2dot.py
|
python
|
Theme.color
|
(self, weight)
|
return self.hsl_to_rgb(h, s, l)
|
[] |
def color(self, weight):
weight = min(max(weight, 0.0), 1.0)
hmin, smin, lmin = self.mincolor
hmax, smax, lmax = self.maxcolor
if self.skew < 0:
raise ValueError("Skew must be greater than 0")
elif self.skew == 1.0:
h = hmin + weight*(hmax - hmin)
s = smin + weight*(smax - smin)
l = lmin + weight*(lmax - lmin)
else:
base = self.skew
h = hmin + ((hmax-hmin)*(-1.0 + (base ** weight)) / (base - 1.0))
s = smin + ((smax-smin)*(-1.0 + (base ** weight)) / (base - 1.0))
l = lmin + ((lmax-lmin)*(-1.0 + (base ** weight)) / (base - 1.0))
return self.hsl_to_rgb(h, s, l)
|
[
"def",
"color",
"(",
"self",
",",
"weight",
")",
":",
"weight",
"=",
"min",
"(",
"max",
"(",
"weight",
",",
"0.0",
")",
",",
"1.0",
")",
"hmin",
",",
"smin",
",",
"lmin",
"=",
"self",
".",
"mincolor",
"hmax",
",",
"smax",
",",
"lmax",
"=",
"self",
".",
"maxcolor",
"if",
"self",
".",
"skew",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Skew must be greater than 0\"",
")",
"elif",
"self",
".",
"skew",
"==",
"1.0",
":",
"h",
"=",
"hmin",
"+",
"weight",
"*",
"(",
"hmax",
"-",
"hmin",
")",
"s",
"=",
"smin",
"+",
"weight",
"*",
"(",
"smax",
"-",
"smin",
")",
"l",
"=",
"lmin",
"+",
"weight",
"*",
"(",
"lmax",
"-",
"lmin",
")",
"else",
":",
"base",
"=",
"self",
".",
"skew",
"h",
"=",
"hmin",
"+",
"(",
"(",
"hmax",
"-",
"hmin",
")",
"*",
"(",
"-",
"1.0",
"+",
"(",
"base",
"**",
"weight",
")",
")",
"/",
"(",
"base",
"-",
"1.0",
")",
")",
"s",
"=",
"smin",
"+",
"(",
"(",
"smax",
"-",
"smin",
")",
"*",
"(",
"-",
"1.0",
"+",
"(",
"base",
"**",
"weight",
")",
")",
"/",
"(",
"base",
"-",
"1.0",
")",
")",
"l",
"=",
"lmin",
"+",
"(",
"(",
"lmax",
"-",
"lmin",
")",
"*",
"(",
"-",
"1.0",
"+",
"(",
"base",
"**",
"weight",
")",
")",
"/",
"(",
"base",
"-",
"1.0",
")",
")",
"return",
"self",
".",
"hsl_to_rgb",
"(",
"h",
",",
"s",
",",
"l",
")"
] |
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/sqli/thirdparty/gprof2dot/gprof2dot.py#L2188-L2206
|
|||
tokestermw/tensorflow-shakespeare
|
a6c23aa24a3f1d753743946b8d11bb4f4b292e1a
|
tensorshake/translate/seq2seq_model.py
|
python
|
Seq2SeqModel.step
|
(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only)
|
Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of enconder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
|
Run a step of the model feeding the given inputs.
|
[
"Run",
"a",
"step",
"of",
"the",
"model",
"feeding",
"the",
"given",
"inputs",
"."
] |
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of enconder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:]
|
[
"def",
"step",
"(",
"self",
",",
"session",
",",
"encoder_inputs",
",",
"decoder_inputs",
",",
"target_weights",
",",
"bucket_id",
",",
"forward_only",
")",
":",
"# Check if the sizes match.",
"encoder_size",
",",
"decoder_size",
"=",
"self",
".",
"buckets",
"[",
"bucket_id",
"]",
"if",
"len",
"(",
"encoder_inputs",
")",
"!=",
"encoder_size",
":",
"raise",
"ValueError",
"(",
"\"Encoder length must be equal to the one in bucket,\"",
"\" %d != %d.\"",
"%",
"(",
"len",
"(",
"encoder_inputs",
")",
",",
"encoder_size",
")",
")",
"if",
"len",
"(",
"decoder_inputs",
")",
"!=",
"decoder_size",
":",
"raise",
"ValueError",
"(",
"\"Decoder length must be equal to the one in bucket,\"",
"\" %d != %d.\"",
"%",
"(",
"len",
"(",
"decoder_inputs",
")",
",",
"decoder_size",
")",
")",
"if",
"len",
"(",
"target_weights",
")",
"!=",
"decoder_size",
":",
"raise",
"ValueError",
"(",
"\"Weights length must be equal to the one in bucket,\"",
"\" %d != %d.\"",
"%",
"(",
"len",
"(",
"target_weights",
")",
",",
"decoder_size",
")",
")",
"# Input feed: encoder inputs, decoder inputs, target_weights, as provided.",
"input_feed",
"=",
"{",
"}",
"for",
"l",
"in",
"xrange",
"(",
"encoder_size",
")",
":",
"input_feed",
"[",
"self",
".",
"encoder_inputs",
"[",
"l",
"]",
".",
"name",
"]",
"=",
"encoder_inputs",
"[",
"l",
"]",
"for",
"l",
"in",
"xrange",
"(",
"decoder_size",
")",
":",
"input_feed",
"[",
"self",
".",
"decoder_inputs",
"[",
"l",
"]",
".",
"name",
"]",
"=",
"decoder_inputs",
"[",
"l",
"]",
"input_feed",
"[",
"self",
".",
"target_weights",
"[",
"l",
"]",
".",
"name",
"]",
"=",
"target_weights",
"[",
"l",
"]",
"# Since our targets are decoder inputs shifted by one, we need one more.",
"last_target",
"=",
"self",
".",
"decoder_inputs",
"[",
"decoder_size",
"]",
".",
"name",
"input_feed",
"[",
"last_target",
"]",
"=",
"np",
".",
"zeros",
"(",
"[",
"self",
".",
"batch_size",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"# Output feed: depends on whether we do a backward step or not.",
"if",
"not",
"forward_only",
":",
"output_feed",
"=",
"[",
"self",
".",
"updates",
"[",
"bucket_id",
"]",
",",
"# Update Op that does SGD.",
"self",
".",
"gradient_norms",
"[",
"bucket_id",
"]",
",",
"# Gradient norm.",
"self",
".",
"losses",
"[",
"bucket_id",
"]",
"]",
"# Loss for this batch.",
"else",
":",
"output_feed",
"=",
"[",
"self",
".",
"losses",
"[",
"bucket_id",
"]",
"]",
"# Loss for this batch.",
"for",
"l",
"in",
"xrange",
"(",
"decoder_size",
")",
":",
"# Output logits.",
"output_feed",
".",
"append",
"(",
"self",
".",
"outputs",
"[",
"bucket_id",
"]",
"[",
"l",
"]",
")",
"outputs",
"=",
"session",
".",
"run",
"(",
"output_feed",
",",
"input_feed",
")",
"if",
"not",
"forward_only",
":",
"return",
"outputs",
"[",
"1",
"]",
",",
"outputs",
"[",
"2",
"]",
",",
"None",
"# Gradient norm, loss, no outputs.",
"else",
":",
"return",
"None",
",",
"outputs",
"[",
"0",
"]",
",",
"outputs",
"[",
"1",
":",
"]"
] |
https://github.com/tokestermw/tensorflow-shakespeare/blob/a6c23aa24a3f1d753743946b8d11bb4f4b292e1a/tensorshake/translate/seq2seq_model.py#L156-L214
|
||
Azure/azure-cli
|
6c1b085a0910c6c2139006fcbd8ade44006eb6dd
|
src/azure-cli/azure/cli/command_modules/resource/custom.py
|
python
|
list_resource_groups
|
(cmd, tag=None)
|
return list(groups)
|
List resource groups, optionally filtered by a tag.
:param str tag:tag to filter by in 'key[=value]' format
|
List resource groups, optionally filtered by a tag.
:param str tag:tag to filter by in 'key[=value]' format
|
[
"List",
"resource",
"groups",
"optionally",
"filtered",
"by",
"a",
"tag",
".",
":",
"param",
"str",
"tag",
":",
"tag",
"to",
"filter",
"by",
"in",
"key",
"[",
"=",
"value",
"]",
"format"
] |
def list_resource_groups(cmd, tag=None): # pylint: disable=no-self-use
""" List resource groups, optionally filtered by a tag.
:param str tag:tag to filter by in 'key[=value]' format
"""
rcf = _resource_client_factory(cmd.cli_ctx)
filters = []
if tag:
key = list(tag.keys())[0]
filters.append("tagname eq '{}'".format(key))
if tag[key]:
filters.append("tagvalue eq '{}'".format(tag[key]))
filter_text = ' and '.join(filters) if filters else None
groups = rcf.resource_groups.list(filter=filter_text)
return list(groups)
|
[
"def",
"list_resource_groups",
"(",
"cmd",
",",
"tag",
"=",
"None",
")",
":",
"# pylint: disable=no-self-use",
"rcf",
"=",
"_resource_client_factory",
"(",
"cmd",
".",
"cli_ctx",
")",
"filters",
"=",
"[",
"]",
"if",
"tag",
":",
"key",
"=",
"list",
"(",
"tag",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"filters",
".",
"append",
"(",
"\"tagname eq '{}'\"",
".",
"format",
"(",
"key",
")",
")",
"if",
"tag",
"[",
"key",
"]",
":",
"filters",
".",
"append",
"(",
"\"tagvalue eq '{}'\"",
".",
"format",
"(",
"tag",
"[",
"key",
"]",
")",
")",
"filter_text",
"=",
"' and '",
".",
"join",
"(",
"filters",
")",
"if",
"filters",
"else",
"None",
"groups",
"=",
"rcf",
".",
"resource_groups",
".",
"list",
"(",
"filter",
"=",
"filter_text",
")",
"return",
"list",
"(",
"groups",
")"
] |
https://github.com/Azure/azure-cli/blob/6c1b085a0910c6c2139006fcbd8ade44006eb6dd/src/azure-cli/azure/cli/command_modules/resource/custom.py#L1283-L1299
|
|
wucng/TensorExpand
|
4ea58f64f5c5082b278229b799c9f679536510b7
|
TensorExpand/Object detection/darknet/change2tf/Data_interface.py
|
python
|
Data_interface.to_img
|
(self,data)
|
return imgs,np.asarray(labels,np.int64)
|
[] |
def to_img(self,data):
imgs=[]
labels=[]
for da in data:
img=cv2.imread(da)
# img=cv2.resize(img,(self.width,self.hight))
# img=img/255.-0.5 # 归一化处理
img = tf.image.resize_image_with_crop_or_pad(img, self.hight, self.width)
img=tf.image.random_hue(img,0.1)
img=tf.image.random_flip_left_right(img)
img=tf.image.random_contrast(img,0.5,0.75)
img=tf.image.random_brightness(img,0.75)
label=os.path.basename(da).split('_')[-1].split('.')[0]
label=int(self.classes2id[label]) # 换成 id
imgs.append(img)
labels.append(label)
return imgs,np.asarray(labels,np.int64)
|
[
"def",
"to_img",
"(",
"self",
",",
"data",
")",
":",
"imgs",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"for",
"da",
"in",
"data",
":",
"img",
"=",
"cv2",
".",
"imread",
"(",
"da",
")",
"# img=cv2.resize(img,(self.width,self.hight))",
"# img=img/255.-0.5 # 归一化处理",
"img",
"=",
"tf",
".",
"image",
".",
"resize_image_with_crop_or_pad",
"(",
"img",
",",
"self",
".",
"hight",
",",
"self",
".",
"width",
")",
"img",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"img",
",",
"0.1",
")",
"img",
"=",
"tf",
".",
"image",
".",
"random_flip_left_right",
"(",
"img",
")",
"img",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"img",
",",
"0.5",
",",
"0.75",
")",
"img",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"img",
",",
"0.75",
")",
"label",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"da",
")",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"label",
"=",
"int",
"(",
"self",
".",
"classes2id",
"[",
"label",
"]",
")",
"# 换成 id",
"imgs",
".",
"append",
"(",
"img",
")",
"labels",
".",
"append",
"(",
"label",
")",
"return",
"imgs",
",",
"np",
".",
"asarray",
"(",
"labels",
",",
"np",
".",
"int64",
")"
] |
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/Object detection/darknet/change2tf/Data_interface.py#L64-L83
|
|||
openbmc/openbmc
|
5f4109adae05f4d6925bfe960007d52f98c61086
|
poky/scripts/lib/buildstats.py
|
python
|
BSTask.cputime
|
(self)
|
Sum of user and system time taken by the task
|
Sum of user and system time taken by the task
|
[
"Sum",
"of",
"user",
"and",
"system",
"time",
"taken",
"by",
"the",
"task"
] |
def cputime(self):
"""Sum of user and system time taken by the task"""
rusage = self['rusage']['ru_stime'] + self['rusage']['ru_utime']
if self['child_rusage']:
# Child rusage may have been optimized out
return rusage + self['child_rusage']['ru_stime'] + self['child_rusage']['ru_utime']
else:
return rusage
|
[
"def",
"cputime",
"(",
"self",
")",
":",
"rusage",
"=",
"self",
"[",
"'rusage'",
"]",
"[",
"'ru_stime'",
"]",
"+",
"self",
"[",
"'rusage'",
"]",
"[",
"'ru_utime'",
"]",
"if",
"self",
"[",
"'child_rusage'",
"]",
":",
"# Child rusage may have been optimized out",
"return",
"rusage",
"+",
"self",
"[",
"'child_rusage'",
"]",
"[",
"'ru_stime'",
"]",
"+",
"self",
"[",
"'child_rusage'",
"]",
"[",
"'ru_utime'",
"]",
"else",
":",
"return",
"rusage"
] |
https://github.com/openbmc/openbmc/blob/5f4109adae05f4d6925bfe960007d52f98c61086/poky/scripts/lib/buildstats.py#L39-L46
|
||
wistbean/learn_python3_spider
|
73c873f4845f4385f097e5057407d03dd37a117b
|
stackoverflow/venv/lib/python3.6/site-packages/pymongo/read_preferences.py
|
python
|
_validate_max_staleness
|
(max_staleness)
|
return max_staleness
|
Validate max_staleness.
|
Validate max_staleness.
|
[
"Validate",
"max_staleness",
"."
] |
def _validate_max_staleness(max_staleness):
"""Validate max_staleness."""
if max_staleness == -1:
return -1
if not isinstance(max_staleness, integer_types):
raise TypeError(_invalid_max_staleness_msg(max_staleness))
if max_staleness <= 0:
raise ValueError(_invalid_max_staleness_msg(max_staleness))
return max_staleness
|
[
"def",
"_validate_max_staleness",
"(",
"max_staleness",
")",
":",
"if",
"max_staleness",
"==",
"-",
"1",
":",
"return",
"-",
"1",
"if",
"not",
"isinstance",
"(",
"max_staleness",
",",
"integer_types",
")",
":",
"raise",
"TypeError",
"(",
"_invalid_max_staleness_msg",
"(",
"max_staleness",
")",
")",
"if",
"max_staleness",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"_invalid_max_staleness_msg",
"(",
"max_staleness",
")",
")",
"return",
"max_staleness"
] |
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/pymongo/read_preferences.py#L70-L81
|
|
BYU-PRISM/GEKKO
|
e1f641c2e659a2ee3f87dbe0cab212d726828f19
|
gekko/gk_gui.py
|
python
|
FlaskThread.run
|
(self)
|
Starts up the Flask API
Called by FlaskThread.start() as that is how python threads work
|
Starts up the Flask API
|
[
"Starts",
"up",
"the",
"Flask",
"API"
] |
def run(self):
"""Starts up the Flask API
Called by FlaskThread.start() as that is how python threads work
"""
self.set_endpoints()
# Debug in flask does not work when run on a separate thread
app.run(debug=False, port=self.port, threaded=True)
self.alarm.start()
|
[
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"set_endpoints",
"(",
")",
"# Debug in flask does not work when run on a separate thread",
"app",
".",
"run",
"(",
"debug",
"=",
"False",
",",
"port",
"=",
"self",
".",
"port",
",",
"threaded",
"=",
"True",
")",
"self",
".",
"alarm",
".",
"start",
"(",
")"
] |
https://github.com/BYU-PRISM/GEKKO/blob/e1f641c2e659a2ee3f87dbe0cab212d726828f19/gekko/gk_gui.py#L389-L397
|
||
yxgeee/MMT
|
057e1ea5d3054c9d7e5fa72c727298d8e4c5f668
|
mmt/utils/logging.py
|
python
|
Logger.__init__
|
(self, fpath=None)
|
[] |
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, 'w')
|
[
"def",
"__init__",
"(",
"self",
",",
"fpath",
"=",
"None",
")",
":",
"self",
".",
"console",
"=",
"sys",
".",
"stdout",
"self",
".",
"file",
"=",
"None",
"if",
"fpath",
"is",
"not",
"None",
":",
"mkdir_if_missing",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fpath",
")",
")",
"self",
".",
"file",
"=",
"open",
"(",
"fpath",
",",
"'w'",
")"
] |
https://github.com/yxgeee/MMT/blob/057e1ea5d3054c9d7e5fa72c727298d8e4c5f668/mmt/utils/logging.py#L9-L14
|
||||
inasafe/inasafe
|
355eb2ce63f516b9c26af0c86a24f99e53f63f87
|
safe/common/parameters/default_value_parameter_widget.py
|
python
|
DefaultValueParameterWidget.get_parameter
|
(self)
|
return self._parameter
|
Obtain list parameter object from the current widget state.
:returns: A DefaultValueParameter from the current state of widget
:rtype: DefaultValueParameter
|
Obtain list parameter object from the current widget state.
|
[
"Obtain",
"list",
"parameter",
"object",
"from",
"the",
"current",
"widget",
"state",
"."
] |
def get_parameter(self):
"""Obtain list parameter object from the current widget state.
:returns: A DefaultValueParameter from the current state of widget
:rtype: DefaultValueParameter
"""
radio_button_checked_id = self.input_button_group.checkedId()
# No radio button checked, then default value = None
if radio_button_checked_id == -1:
self._parameter.value = None
# The last radio button (custom) is checked, get the value from the
# line edit
elif radio_button_checked_id == len(self._parameter.options) - 1:
self._parameter.options[radio_button_checked_id] = \
self.custom_value.value()
self._parameter.value = self.custom_value.value()
else:
self._parameter.value = self._parameter.options[
radio_button_checked_id]
return self._parameter
|
[
"def",
"get_parameter",
"(",
"self",
")",
":",
"radio_button_checked_id",
"=",
"self",
".",
"input_button_group",
".",
"checkedId",
"(",
")",
"# No radio button checked, then default value = None",
"if",
"radio_button_checked_id",
"==",
"-",
"1",
":",
"self",
".",
"_parameter",
".",
"value",
"=",
"None",
"# The last radio button (custom) is checked, get the value from the",
"# line edit",
"elif",
"radio_button_checked_id",
"==",
"len",
"(",
"self",
".",
"_parameter",
".",
"options",
")",
"-",
"1",
":",
"self",
".",
"_parameter",
".",
"options",
"[",
"radio_button_checked_id",
"]",
"=",
"self",
".",
"custom_value",
".",
"value",
"(",
")",
"self",
".",
"_parameter",
".",
"value",
"=",
"self",
".",
"custom_value",
".",
"value",
"(",
")",
"else",
":",
"self",
".",
"_parameter",
".",
"value",
"=",
"self",
".",
"_parameter",
".",
"options",
"[",
"radio_button_checked_id",
"]",
"return",
"self",
".",
"_parameter"
] |
https://github.com/inasafe/inasafe/blob/355eb2ce63f516b9c26af0c86a24f99e53f63f87/safe/common/parameters/default_value_parameter_widget.py#L77-L97
|
|
DataBiosphere/toil
|
2e148eee2114ece8dcc3ec8a83f36333266ece0d
|
contrib/admin/remove_trailing_whitespace.py
|
python
|
strip_trailing_whitespace_from_all_files_in_dir
|
(dirname: str)
|
Strips trailing whitespace from all files in a directory, recursively.
Only strips files ending in one of the white-listed extensions in EXTENSIONS_TO_PROCESS.
Note: This includes things like "Dockerfile" that end in "Dockerfile".
|
Strips trailing whitespace from all files in a directory, recursively.
|
[
"Strips",
"trailing",
"whitespace",
"from",
"all",
"files",
"in",
"a",
"directory",
"recursively",
"."
] |
def strip_trailing_whitespace_from_all_files_in_dir(dirname: str) -> None:
"""
Strips trailing whitespace from all files in a directory, recursively.
Only strips files ending in one of the white-listed extensions in EXTENSIONS_TO_PROCESS.
Note: This includes things like "Dockerfile" that end in "Dockerfile".
"""
for dirpath, dirnames, filenames in os.walk(dirname):
for f in filenames:
for ext in EXTENSIONS_TO_PROCESS:
if f.endswith(ext):
strip_trailing_whitespace_from_file(os.path.abspath(os.path.join(dirpath, f)))
break
for d in dirnames:
strip_trailing_whitespace_from_all_files_in_dir(os.path.abspath(os.path.join(dirpath, d)))
|
[
"def",
"strip_trailing_whitespace_from_all_files_in_dir",
"(",
"dirname",
":",
"str",
")",
"->",
"None",
":",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"dirname",
")",
":",
"for",
"f",
"in",
"filenames",
":",
"for",
"ext",
"in",
"EXTENSIONS_TO_PROCESS",
":",
"if",
"f",
".",
"endswith",
"(",
"ext",
")",
":",
"strip_trailing_whitespace_from_file",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"f",
")",
")",
")",
"break",
"for",
"d",
"in",
"dirnames",
":",
"strip_trailing_whitespace_from_all_files_in_dir",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"d",
")",
")",
")"
] |
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/contrib/admin/remove_trailing_whitespace.py#L51-L65
|
||
pypa/pipenv
|
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
|
pipenv/vendor/dateutil/rrule.py
|
python
|
rrulebase.xafter
|
(self, dt, count=None, inc=False)
|
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
|
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
|
[
"Generator",
"which",
"yields",
"up",
"to",
"count",
"recurrences",
"after",
"the",
"given",
"datetime",
"instance",
"equivalent",
"to",
"after",
"."
] |
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
|
[
"def",
"xafter",
"(",
"self",
",",
"dt",
",",
"count",
"=",
"None",
",",
"inc",
"=",
"False",
")",
":",
"if",
"self",
".",
"_cache_complete",
":",
"gen",
"=",
"self",
".",
"_cache",
"else",
":",
"gen",
"=",
"self",
"# Select the comparison function",
"if",
"inc",
":",
"comp",
"=",
"lambda",
"dc",
",",
"dtc",
":",
"dc",
">=",
"dtc",
"else",
":",
"comp",
"=",
"lambda",
"dc",
",",
"dtc",
":",
"dc",
">",
"dtc",
"# Generate dates",
"n",
"=",
"0",
"for",
"d",
"in",
"gen",
":",
"if",
"comp",
"(",
"d",
",",
"dt",
")",
":",
"if",
"count",
"is",
"not",
"None",
":",
"n",
"+=",
"1",
"if",
"n",
">",
"count",
":",
"break",
"yield",
"d"
] |
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/vendor/dateutil/rrule.py#L230-L269
|
||
matrix-org/synapse
|
8e57584a5859a9002759963eb546d523d2498a01
|
synapse/storage/databases/main/profile.py
|
python
|
ProfileWorkerStore.get_remote_profile_cache_entries_that_expire
|
(
self, last_checked: int
)
|
return await self.db_pool.runInteraction(
"get_remote_profile_cache_entries_that_expire",
_get_remote_profile_cache_entries_that_expire_txn,
)
|
Get all users who haven't been checked since `last_checked`
|
Get all users who haven't been checked since `last_checked`
|
[
"Get",
"all",
"users",
"who",
"haven",
"t",
"been",
"checked",
"since",
"last_checked"
] |
async def get_remote_profile_cache_entries_that_expire(
self, last_checked: int
) -> List[Dict[str, str]]:
"""Get all users who haven't been checked since `last_checked`"""
def _get_remote_profile_cache_entries_that_expire_txn(
txn: LoggingTransaction,
) -> List[Dict[str, str]]:
sql = """
SELECT user_id, displayname, avatar_url
FROM remote_profile_cache
WHERE last_check < ?
"""
txn.execute(sql, (last_checked,))
return self.db_pool.cursor_to_dict(txn)
return await self.db_pool.runInteraction(
"get_remote_profile_cache_entries_that_expire",
_get_remote_profile_cache_entries_that_expire_txn,
)
|
[
"async",
"def",
"get_remote_profile_cache_entries_that_expire",
"(",
"self",
",",
"last_checked",
":",
"int",
")",
"->",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
":",
"def",
"_get_remote_profile_cache_entries_that_expire_txn",
"(",
"txn",
":",
"LoggingTransaction",
",",
")",
"->",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
":",
"sql",
"=",
"\"\"\"\n SELECT user_id, displayname, avatar_url\n FROM remote_profile_cache\n WHERE last_check < ?\n \"\"\"",
"txn",
".",
"execute",
"(",
"sql",
",",
"(",
"last_checked",
",",
")",
")",
"return",
"self",
".",
"db_pool",
".",
"cursor_to_dict",
"(",
"txn",
")",
"return",
"await",
"self",
".",
"db_pool",
".",
"runInteraction",
"(",
"\"get_remote_profile_cache_entries_that_expire\"",
",",
"_get_remote_profile_cache_entries_that_expire_txn",
",",
")"
] |
https://github.com/matrix-org/synapse/blob/8e57584a5859a9002759963eb546d523d2498a01/synapse/storage/databases/main/profile.py#L145-L166
|
|
aws/aws-parallelcluster
|
f1fe5679a01c524e7ea904c329bd6d17318c6cd9
|
cli/src/pcluster/utils.py
|
python
|
to_utc_datetime
|
(time_in, default_timezone=datetime.timezone.utc)
|
return time_.astimezone(datetime.timezone.utc)
|
Convert a given string, datetime or int into utc datetime.
:param time_in: Time in a format that may be parsed, integers are assumed to
be timestamps in UTC timezone.
:param default_timezone: Timezone to assum in the event that the time is
unspecified in the input parameter. This applies only for datetime and str inputs
:return: time as a datetime in UTC timezone
|
Convert a given string, datetime or int into utc datetime.
|
[
"Convert",
"a",
"given",
"string",
"datetime",
"or",
"int",
"into",
"utc",
"datetime",
"."
] |
def to_utc_datetime(time_in, default_timezone=datetime.timezone.utc) -> datetime.datetime:
"""
Convert a given string, datetime or int into utc datetime.
:param time_in: Time in a format that may be parsed, integers are assumed to
be timestamps in UTC timezone.
:param default_timezone: Timezone to assum in the event that the time is
unspecified in the input parameter. This applies only for datetime and str inputs
:return: time as a datetime in UTC timezone
"""
if isinstance(time_in, int):
if time_in > 1e12:
time_in /= 1000
time_ = datetime.datetime.utcfromtimestamp(time_in)
time_ = time_.replace(tzinfo=datetime.timezone.utc)
elif isinstance(time_in, str):
time_ = dateutil.parser.parse(time_in)
elif isinstance(time_in, datetime.date):
time_ = time_in
else:
raise TypeError("to_utc_datetime object must be 'str', 'int' or 'datetime'.")
if time_.tzinfo is None:
time_ = time_.replace(tzinfo=default_timezone)
return time_.astimezone(datetime.timezone.utc)
|
[
"def",
"to_utc_datetime",
"(",
"time_in",
",",
"default_timezone",
"=",
"datetime",
".",
"timezone",
".",
"utc",
")",
"->",
"datetime",
".",
"datetime",
":",
"if",
"isinstance",
"(",
"time_in",
",",
"int",
")",
":",
"if",
"time_in",
">",
"1e12",
":",
"time_in",
"/=",
"1000",
"time_",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"time_in",
")",
"time_",
"=",
"time_",
".",
"replace",
"(",
"tzinfo",
"=",
"datetime",
".",
"timezone",
".",
"utc",
")",
"elif",
"isinstance",
"(",
"time_in",
",",
"str",
")",
":",
"time_",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"time_in",
")",
"elif",
"isinstance",
"(",
"time_in",
",",
"datetime",
".",
"date",
")",
":",
"time_",
"=",
"time_in",
"else",
":",
"raise",
"TypeError",
"(",
"\"to_utc_datetime object must be 'str', 'int' or 'datetime'.\"",
")",
"if",
"time_",
".",
"tzinfo",
"is",
"None",
":",
"time_",
"=",
"time_",
".",
"replace",
"(",
"tzinfo",
"=",
"default_timezone",
")",
"return",
"time_",
".",
"astimezone",
"(",
"datetime",
".",
"timezone",
".",
"utc",
")"
] |
https://github.com/aws/aws-parallelcluster/blob/f1fe5679a01c524e7ea904c329bd6d17318c6cd9/cli/src/pcluster/utils.py#L127-L150
|
|
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/macpackage.py
|
python
|
get_pkg_id
|
(pkg)
|
return package_ids
|
Attempt to get the package ID from a .pkg file
Args:
pkg (str): The location of the pkg file
Returns:
list: List of all of the package IDs
CLI Example:
.. code-block:: bash
salt '*' macpackage.get_pkg_id /tmp/test.pkg
|
Attempt to get the package ID from a .pkg file
|
[
"Attempt",
"to",
"get",
"the",
"package",
"ID",
"from",
"a",
".",
"pkg",
"file"
] |
def get_pkg_id(pkg):
"""
Attempt to get the package ID from a .pkg file
Args:
pkg (str): The location of the pkg file
Returns:
list: List of all of the package IDs
CLI Example:
.. code-block:: bash
salt '*' macpackage.get_pkg_id /tmp/test.pkg
"""
pkg = _quote(pkg)
package_ids = []
# Create temp directory
temp_dir = __salt__["temp.dir"](prefix="pkg-")
try:
# List all of the PackageInfo files
cmd = "xar -t -f {} | grep PackageInfo".format(pkg)
out = __salt__["cmd.run"](cmd, python_shell=True, output_loglevel="quiet")
files = out.split("\n")
if "Error opening" not in out:
# Extract the PackageInfo files
cmd = "xar -x -f {} {}".format(pkg, " ".join(files))
__salt__["cmd.run"](cmd, cwd=temp_dir, output_loglevel="quiet")
# Find our identifiers
for f in files:
i = _get_pkg_id_from_pkginfo(os.path.join(temp_dir, f))
if i:
package_ids.extend(i)
else:
package_ids = _get_pkg_id_dir(pkg)
finally:
# Clean up
__salt__["file.remove"](temp_dir)
return package_ids
|
[
"def",
"get_pkg_id",
"(",
"pkg",
")",
":",
"pkg",
"=",
"_quote",
"(",
"pkg",
")",
"package_ids",
"=",
"[",
"]",
"# Create temp directory",
"temp_dir",
"=",
"__salt__",
"[",
"\"temp.dir\"",
"]",
"(",
"prefix",
"=",
"\"pkg-\"",
")",
"try",
":",
"# List all of the PackageInfo files",
"cmd",
"=",
"\"xar -t -f {} | grep PackageInfo\"",
".",
"format",
"(",
"pkg",
")",
"out",
"=",
"__salt__",
"[",
"\"cmd.run\"",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"True",
",",
"output_loglevel",
"=",
"\"quiet\"",
")",
"files",
"=",
"out",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"\"Error opening\"",
"not",
"in",
"out",
":",
"# Extract the PackageInfo files",
"cmd",
"=",
"\"xar -x -f {} {}\"",
".",
"format",
"(",
"pkg",
",",
"\" \"",
".",
"join",
"(",
"files",
")",
")",
"__salt__",
"[",
"\"cmd.run\"",
"]",
"(",
"cmd",
",",
"cwd",
"=",
"temp_dir",
",",
"output_loglevel",
"=",
"\"quiet\"",
")",
"# Find our identifiers",
"for",
"f",
"in",
"files",
":",
"i",
"=",
"_get_pkg_id_from_pkginfo",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"f",
")",
")",
"if",
"i",
":",
"package_ids",
".",
"extend",
"(",
"i",
")",
"else",
":",
"package_ids",
"=",
"_get_pkg_id_dir",
"(",
"pkg",
")",
"finally",
":",
"# Clean up",
"__salt__",
"[",
"\"file.remove\"",
"]",
"(",
"temp_dir",
")",
"return",
"package_ids"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/macpackage.py#L203-L248
|
|
Matheus-Garbelini/sweyntooth_bluetooth_low_energy_attacks
|
40c985b9a9ff1189ddf278462440b120cf96b196
|
libs/scapy/layers/bluetooth.py
|
python
|
LowEnergyBeaconHelper.build_eir
|
(self)
|
Builds a list of EIR messages to wrap this frame.
Users of this helper must implement this method.
:returns: List of HCI_Hdr with payloads that describe this beacon type
:rtype: list[HCI_Hdr]
|
Builds a list of EIR messages to wrap this frame.
|
[
"Builds",
"a",
"list",
"of",
"EIR",
"messages",
"to",
"wrap",
"this",
"frame",
"."
] |
def build_eir(self):
"""
Builds a list of EIR messages to wrap this frame.
Users of this helper must implement this method.
:returns: List of HCI_Hdr with payloads that describe this beacon type
:rtype: list[HCI_Hdr]
"""
raise NotImplementedError("build_eir")
|
[
"def",
"build_eir",
"(",
"self",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"build_eir\"",
")"
] |
https://github.com/Matheus-Garbelini/sweyntooth_bluetooth_low_energy_attacks/blob/40c985b9a9ff1189ddf278462440b120cf96b196/libs/scapy/layers/bluetooth.py#L1381-L1390
|
||
tomhartley/AirPi
|
572bcd2ae5d76a95f22e01cef076bb483ce668ee
|
sensors/Adafruit_I2C.py
|
python
|
Adafruit_I2C.readU8
|
(self, reg)
|
Read an unsigned byte from the I2C device
|
Read an unsigned byte from the I2C device
|
[
"Read",
"an",
"unsigned",
"byte",
"from",
"the",
"I2C",
"device"
] |
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" % (self.address, result & 0xFF, reg)
return result
except IOError, err:
print "Error accessing 0x%02X: Check your I2C address" % self.address
return -1
|
[
"def",
"readU8",
"(",
"self",
",",
"reg",
")",
":",
"try",
":",
"result",
"=",
"self",
".",
"bus",
".",
"read_byte_data",
"(",
"self",
".",
"address",
",",
"reg",
")",
"if",
"(",
"self",
".",
"debug",
")",
":",
"print",
"\"I2C: Device 0x%02X returned 0x%02X from reg 0x%02X\"",
"%",
"(",
"self",
".",
"address",
",",
"result",
"&",
"0xFF",
",",
"reg",
")",
"return",
"result",
"except",
"IOError",
",",
"err",
":",
"print",
"\"Error accessing 0x%02X: Check your I2C address\"",
"%",
"self",
".",
"address",
"return",
"-",
"1"
] |
https://github.com/tomhartley/AirPi/blob/572bcd2ae5d76a95f22e01cef076bb483ce668ee/sensors/Adafruit_I2C.py#L46-L55
|
||
maraoz/proofofexistence
|
10703675824e989f59a8d36fd8c06394e71a2c25
|
pycoin/encoding.py
|
python
|
is_sec_compressed
|
(sec)
|
return sec[:1] in (b'\2', b'\3')
|
Return a boolean indicating if the sec represents a compressed public key.
|
Return a boolean indicating if the sec represents a compressed public key.
|
[
"Return",
"a",
"boolean",
"indicating",
"if",
"the",
"sec",
"represents",
"a",
"compressed",
"public",
"key",
"."
] |
def is_sec_compressed(sec):
"""Return a boolean indicating if the sec represents a compressed public key."""
return sec[:1] in (b'\2', b'\3')
|
[
"def",
"is_sec_compressed",
"(",
"sec",
")",
":",
"return",
"sec",
"[",
":",
"1",
"]",
"in",
"(",
"b'\\2'",
",",
"b'\\3'",
")"
] |
https://github.com/maraoz/proofofexistence/blob/10703675824e989f59a8d36fd8c06394e71a2c25/pycoin/encoding.py#L231-L233
|
|
apprenticeharper/DeDRM_tools
|
776f146ca00d11b24575f4fd6e8202df30a2b7ea
|
DeDRM_plugin/aescbc.py
|
python
|
BlockCipher.decrypt
|
(self, cipherText, more = None)
|
return plainText
|
Decrypt a string and return a string
|
Decrypt a string and return a string
|
[
"Decrypt",
"a",
"string",
"and",
"return",
"a",
"string"
] |
def decrypt(self, cipherText, more = None):
""" Decrypt a string and return a string """
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
if more == None: # no more calls to decrypt, should have all the data
if numExtraBytes != 0:
raise DecryptNotBlockAlignedError('Data not block aligned on decrypt')
# hold back some bytes in case last decrypt has zero len
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
numBlocks -= 1
numExtraBytes = self.blockSize
plainText = ''
for i in range(numBlocks):
bStart = i*self.blockSize
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
self.decryptBlockCount += 1
plainText += ptBlock
if numExtraBytes > 0: # save any bytes that are not block aligned
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
else:
self.bytesToEncrypt = ''
if more == None: # last decrypt remove padding
plainText = self.padding.removePad(plainText, self.blockSize)
self.resetDecrypt()
return plainText
|
[
"def",
"decrypt",
"(",
"self",
",",
"cipherText",
",",
"more",
"=",
"None",
")",
":",
"self",
".",
"bytesToDecrypt",
"+=",
"cipherText",
"# append to any bytes from prior decrypt",
"numBlocks",
",",
"numExtraBytes",
"=",
"divmod",
"(",
"len",
"(",
"self",
".",
"bytesToDecrypt",
")",
",",
"self",
".",
"blockSize",
")",
"if",
"more",
"==",
"None",
":",
"# no more calls to decrypt, should have all the data",
"if",
"numExtraBytes",
"!=",
"0",
":",
"raise",
"DecryptNotBlockAlignedError",
"(",
"'Data not block aligned on decrypt'",
")",
"# hold back some bytes in case last decrypt has zero len",
"if",
"(",
"more",
"!=",
"None",
")",
"and",
"(",
"numExtraBytes",
"==",
"0",
")",
"and",
"(",
"numBlocks",
">",
"0",
")",
":",
"numBlocks",
"-=",
"1",
"numExtraBytes",
"=",
"self",
".",
"blockSize",
"plainText",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"numBlocks",
")",
":",
"bStart",
"=",
"i",
"*",
"self",
".",
"blockSize",
"ptBlock",
"=",
"self",
".",
"decryptBlock",
"(",
"self",
".",
"bytesToDecrypt",
"[",
"bStart",
":",
"bStart",
"+",
"self",
".",
"blockSize",
"]",
")",
"self",
".",
"decryptBlockCount",
"+=",
"1",
"plainText",
"+=",
"ptBlock",
"if",
"numExtraBytes",
">",
"0",
":",
"# save any bytes that are not block aligned",
"self",
".",
"bytesToEncrypt",
"=",
"self",
".",
"bytesToEncrypt",
"[",
"-",
"numExtraBytes",
":",
"]",
"else",
":",
"self",
".",
"bytesToEncrypt",
"=",
"''",
"if",
"more",
"==",
"None",
":",
"# last decrypt remove padding",
"plainText",
"=",
"self",
".",
"padding",
".",
"removePad",
"(",
"plainText",
",",
"self",
".",
"blockSize",
")",
"self",
".",
"resetDecrypt",
"(",
")",
"return",
"plainText"
] |
https://github.com/apprenticeharper/DeDRM_tools/blob/776f146ca00d11b24575f4fd6e8202df30a2b7ea/DeDRM_plugin/aescbc.py#L100-L129
|
|
NoneGG/aredis
|
b46e67163692cd0796763e5c9e17394821d9280c
|
aredis/cache.py
|
python
|
BasicCache._pack
|
(self, content)
|
return content
|
Packs the content using serializer and compressor
|
Packs the content using serializer and compressor
|
[
"Packs",
"the",
"content",
"using",
"serializer",
"and",
"compressor"
] |
def _pack(self, content):
"""Packs the content using serializer and compressor"""
if self.serializer:
content = self.serializer.serialize(content)
if self.compressor:
content = self.compressor.compress(content)
return content
|
[
"def",
"_pack",
"(",
"self",
",",
"content",
")",
":",
"if",
"self",
".",
"serializer",
":",
"content",
"=",
"self",
".",
"serializer",
".",
"serialize",
"(",
"content",
")",
"if",
"self",
".",
"compressor",
":",
"content",
"=",
"self",
".",
"compressor",
".",
"compress",
"(",
"content",
")",
"return",
"content"
] |
https://github.com/NoneGG/aredis/blob/b46e67163692cd0796763e5c9e17394821d9280c/aredis/cache.py#L146-L152
|
|
lovelylain/pyctp
|
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
|
stock2/ctp/ApiStruct.py
|
python
|
BrokerWithdrawAlgorithm.__init__
|
(self, BrokerID='', WithdrawAlgorithm=AG_All, UsingRatio=0.0, IncludeCloseProfit=ICP_Include, AllWithoutTrade=AWT_Enable, AvailIncludeCloseProfit=ICP_Include, IsBrokerUserEvent=0)
|
[] |
def __init__(self, BrokerID='', WithdrawAlgorithm=AG_All, UsingRatio=0.0, IncludeCloseProfit=ICP_Include, AllWithoutTrade=AWT_Enable, AvailIncludeCloseProfit=ICP_Include, IsBrokerUserEvent=0):
self.BrokerID = '' #经纪公司代码, char[11]
self.WithdrawAlgorithm = 'Algorithm' #可提资金算法, char
self.UsingRatio = 'Ratio' #资金使用率, double
self.IncludeCloseProfit = '' #可提是否包含平仓盈利, char
self.AllWithoutTrade = '' #本日无仓且无成交客户是否受可提比例限制, char
self.AvailIncludeCloseProfit = 'IncludeCloseProfit' #可用是否包含平仓盈利, char
self.IsBrokerUserEvent = 'Bool'
|
[
"def",
"__init__",
"(",
"self",
",",
"BrokerID",
"=",
"''",
",",
"WithdrawAlgorithm",
"=",
"AG_All",
",",
"UsingRatio",
"=",
"0.0",
",",
"IncludeCloseProfit",
"=",
"ICP_Include",
",",
"AllWithoutTrade",
"=",
"AWT_Enable",
",",
"AvailIncludeCloseProfit",
"=",
"ICP_Include",
",",
"IsBrokerUserEvent",
"=",
"0",
")",
":",
"self",
".",
"BrokerID",
"=",
"''",
"#经纪公司代码, char[11]",
"self",
".",
"WithdrawAlgorithm",
"=",
"'Algorithm'",
"#可提资金算法, char",
"self",
".",
"UsingRatio",
"=",
"'Ratio'",
"#资金使用率, double",
"self",
".",
"IncludeCloseProfit",
"=",
"''",
"#可提是否包含平仓盈利, char",
"self",
".",
"AllWithoutTrade",
"=",
"''",
"#本日无仓且无成交客户是否受可提比例限制, char",
"self",
".",
"AvailIncludeCloseProfit",
"=",
"'IncludeCloseProfit'",
"#可用是否包含平仓盈利, char",
"self",
".",
"IsBrokerUserEvent",
"=",
"'Bool'"
] |
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/stock2/ctp/ApiStruct.py#L2891-L2898
|
||||
viewflow/viewflow
|
2389bd379a2ab22cc277585df7c09514e273541d
|
viewflow/decorators.py
|
python
|
flow_func
|
(func)
|
return _wrapper
|
Decorator for flow functions.
Expects function that gets activation instance as the first parameter.
Returns function that expects task instance as the first parameter instead.
|
Decorator for flow functions.
|
[
"Decorator",
"for",
"flow",
"functions",
"."
] |
def flow_func(func):
"""
Decorator for flow functions.
Expects function that gets activation instance as the first parameter.
Returns function that expects task instance as the first parameter instead.
"""
@transaction.atomic
@functools.wraps(func)
def _wrapper(task, *args, **kwargs):
flow_task = task.flow_task
flow_class = flow_task.flow_class
lock = flow_class.lock_impl(flow_class.instance)
with lock(flow_class, task.process_id):
task = flow_class.task_class._default_manager.get(pk=task.pk, process_id=task.process_id)
activation = flow_task.activation_class()
activation.initialize(flow_task, task)
return func(activation, *args, **kwargs)
return _wrapper
|
[
"def",
"flow_func",
"(",
"func",
")",
":",
"@",
"transaction",
".",
"atomic",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"_wrapper",
"(",
"task",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"flow_task",
"=",
"task",
".",
"flow_task",
"flow_class",
"=",
"flow_task",
".",
"flow_class",
"lock",
"=",
"flow_class",
".",
"lock_impl",
"(",
"flow_class",
".",
"instance",
")",
"with",
"lock",
"(",
"flow_class",
",",
"task",
".",
"process_id",
")",
":",
"task",
"=",
"flow_class",
".",
"task_class",
".",
"_default_manager",
".",
"get",
"(",
"pk",
"=",
"task",
".",
"pk",
",",
"process_id",
"=",
"task",
".",
"process_id",
")",
"activation",
"=",
"flow_task",
".",
"activation_class",
"(",
")",
"activation",
".",
"initialize",
"(",
"flow_task",
",",
"task",
")",
"return",
"func",
"(",
"activation",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapper"
] |
https://github.com/viewflow/viewflow/blob/2389bd379a2ab22cc277585df7c09514e273541d/viewflow/decorators.py#L34-L53
|
|
microsoft/Oscar
|
4788a7425cd0f9861ea80fed79528abbb72eb169
|
oscar/utils/cider/pyciderevalcap/cider/cider_scorer.py
|
python
|
CiderScorer.__init__
|
(self, df_mode="corpus", test=None, refs=None, n=4, sigma=6.0)
|
singular instance
|
singular instance
|
[
"singular",
"instance"
] |
def __init__(self, df_mode="corpus", test=None, refs=None, n=4, sigma=6.0):
''' singular instance '''
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.df_mode = df_mode
self.ref_len = None
if self.df_mode != "corpus":
pkl_file = cPickle.load(open(os.path.join('data', df_mode + '.p'),'rb'), **(dict(encoding='latin1') if six.PY3 else {}))
self.ref_len = np.log(float(pkl_file['ref_len']))
self.document_frequency = pkl_file['document_frequency']
self.cook_append(test, refs)
|
[
"def",
"__init__",
"(",
"self",
",",
"df_mode",
"=",
"\"corpus\"",
",",
"test",
"=",
"None",
",",
"refs",
"=",
"None",
",",
"n",
"=",
"4",
",",
"sigma",
"=",
"6.0",
")",
":",
"self",
".",
"n",
"=",
"n",
"self",
".",
"sigma",
"=",
"sigma",
"self",
".",
"crefs",
"=",
"[",
"]",
"self",
".",
"ctest",
"=",
"[",
"]",
"self",
".",
"df_mode",
"=",
"df_mode",
"self",
".",
"ref_len",
"=",
"None",
"if",
"self",
".",
"df_mode",
"!=",
"\"corpus\"",
":",
"pkl_file",
"=",
"cPickle",
".",
"load",
"(",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'data'",
",",
"df_mode",
"+",
"'.p'",
")",
",",
"'rb'",
")",
",",
"*",
"*",
"(",
"dict",
"(",
"encoding",
"=",
"'latin1'",
")",
"if",
"six",
".",
"PY3",
"else",
"{",
"}",
")",
")",
"self",
".",
"ref_len",
"=",
"np",
".",
"log",
"(",
"float",
"(",
"pkl_file",
"[",
"'ref_len'",
"]",
")",
")",
"self",
".",
"document_frequency",
"=",
"pkl_file",
"[",
"'document_frequency'",
"]",
"self",
".",
"cook_append",
"(",
"test",
",",
"refs",
")"
] |
https://github.com/microsoft/Oscar/blob/4788a7425cd0f9861ea80fed79528abbb72eb169/oscar/utils/cider/pyciderevalcap/cider/cider_scorer.py#L63-L75
|
||
chribsen/simple-machine-learning-examples
|
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
|
venv/lib/python2.7/site-packages/numpy/ma/mrecords.py
|
python
|
MaskedRecords.__setstate__
|
(self, state)
|
Restore the internal state of the masked array.
This is for pickling. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
|
Restore the internal state of the masked array.
|
[
"Restore",
"the",
"internal",
"state",
"of",
"the",
"masked",
"array",
"."
] |
def __setstate__(self, state):
"""
Restore the internal state of the masked array.
This is for pickling. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
|
[
"def",
"__setstate__",
"(",
"self",
",",
"state",
")",
":",
"(",
"ver",
",",
"shp",
",",
"typ",
",",
"isf",
",",
"raw",
",",
"msk",
",",
"flv",
")",
"=",
"state",
"ndarray",
".",
"__setstate__",
"(",
"self",
",",
"(",
"shp",
",",
"typ",
",",
"isf",
",",
"raw",
")",
")",
"mdtype",
"=",
"dtype",
"(",
"[",
"(",
"k",
",",
"bool_",
")",
"for",
"(",
"k",
",",
"_",
")",
"in",
"self",
".",
"dtype",
".",
"descr",
"]",
")",
"self",
".",
"__dict__",
"[",
"'_mask'",
"]",
".",
"__setstate__",
"(",
"(",
"shp",
",",
"mdtype",
",",
"isf",
",",
"msk",
")",
")",
"self",
".",
"fill_value",
"=",
"flv"
] |
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/numpy/ma/mrecords.py#L483-L501
|
||
jtriley/StarCluster
|
bc7c950e73f193eac9aab986b6764939cfdad978
|
starcluster/balancers/sge/__init__.py
|
python
|
SGEStats.slots_per_host
|
(self)
|
return single
|
Returns the number of slots per host. If for some reason the cluster is
inconsistent, this will return -1 for example, if you have m1.large and
m1.small in the same cluster
|
Returns the number of slots per host. If for some reason the cluster is
inconsistent, this will return -1 for example, if you have m1.large and
m1.small in the same cluster
|
[
"Returns",
"the",
"number",
"of",
"slots",
"per",
"host",
".",
"If",
"for",
"some",
"reason",
"the",
"cluster",
"is",
"inconsistent",
"this",
"will",
"return",
"-",
"1",
"for",
"example",
"if",
"you",
"have",
"m1",
".",
"large",
"and",
"m1",
".",
"small",
"in",
"the",
"same",
"cluster"
] |
def slots_per_host(self):
"""
Returns the number of slots per host. If for some reason the cluster is
inconsistent, this will return -1 for example, if you have m1.large and
m1.small in the same cluster
"""
total = self.count_total_slots()
if total == 0:
return total
single = 0
for q in self.queues:
if q.startswith('all.q@'):
single = self.queues.get(q).get('slots')
break
if (total != (single * len(self.hosts))):
raise exception.BaseException(
"ERROR: Number of slots not consistent across cluster")
return single
|
[
"def",
"slots_per_host",
"(",
"self",
")",
":",
"total",
"=",
"self",
".",
"count_total_slots",
"(",
")",
"if",
"total",
"==",
"0",
":",
"return",
"total",
"single",
"=",
"0",
"for",
"q",
"in",
"self",
".",
"queues",
":",
"if",
"q",
".",
"startswith",
"(",
"'all.q@'",
")",
":",
"single",
"=",
"self",
".",
"queues",
".",
"get",
"(",
"q",
")",
".",
"get",
"(",
"'slots'",
")",
"break",
"if",
"(",
"total",
"!=",
"(",
"single",
"*",
"len",
"(",
"self",
".",
"hosts",
")",
")",
")",
":",
"raise",
"exception",
".",
"BaseException",
"(",
"\"ERROR: Number of slots not consistent across cluster\"",
")",
"return",
"single"
] |
https://github.com/jtriley/StarCluster/blob/bc7c950e73f193eac9aab986b6764939cfdad978/starcluster/balancers/sge/__init__.py#L227-L244
|
|
BlackLight/platypush
|
a6b552504e2ac327c94f3a28b607061b6b60cf36
|
platypush/plugins/zwave/__init__.py
|
python
|
ZwavePlugin.delete_button
|
(self, button_id: Union[int, str], node_id: Optional[int] = None, node_name: Optional[str] = None)
|
Delete a button association from a device. Only intended for bridge firmware controllers.
:param button_id: The ID of the button.
:param node_id: Filter by node_id.
:param node_name: Filter by current node name.
|
Delete a button association from a device. Only intended for bridge firmware controllers.
|
[
"Delete",
"a",
"button",
"association",
"from",
"a",
"device",
".",
"Only",
"intended",
"for",
"bridge",
"firmware",
"controllers",
"."
] |
def delete_button(self, button_id: Union[int, str], node_id: Optional[int] = None, node_name: Optional[str] = None):
"""
Delete a button association from a device. Only intended for bridge firmware controllers.
:param button_id: The ID of the button.
:param node_id: Filter by node_id.
:param node_name: Filter by current node name.
"""
node = self._get_node(node_id=node_id, node_name=node_name)
self._get_controller().delete_button(node.node_id, button_id)
self.write_config()
|
[
"def",
"delete_button",
"(",
"self",
",",
"button_id",
":",
"Union",
"[",
"int",
",",
"str",
"]",
",",
"node_id",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"node_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
":",
"node",
"=",
"self",
".",
"_get_node",
"(",
"node_id",
"=",
"node_id",
",",
"node_name",
"=",
"node_name",
")",
"self",
".",
"_get_controller",
"(",
")",
".",
"delete_button",
"(",
"node",
".",
"node_id",
",",
"button_id",
")",
"self",
".",
"write_config",
"(",
")"
] |
https://github.com/BlackLight/platypush/blob/a6b552504e2ac327c94f3a28b607061b6b60cf36/platypush/plugins/zwave/__init__.py#L898-L908
|
||
miguelgrinberg/api-pycon2014
|
9a1e036d0851b93545b1d0bd0309d61cc01f3d89
|
api/v1_0/students.py
|
python
|
get_students
|
()
|
return Student.query
|
[] |
def get_students():
return Student.query
|
[
"def",
"get_students",
"(",
")",
":",
"return",
"Student",
".",
"query"
] |
https://github.com/miguelgrinberg/api-pycon2014/blob/9a1e036d0851b93545b1d0bd0309d61cc01f3d89/api/v1_0/students.py#L10-L11
|
|||
standardebooks/tools
|
f57af3c5938a9aeed9e97e82b2c130424f6033e5
|
se/easy_xml.py
|
python
|
EasyXmlElement.attrs
|
(self)
|
return self.lxml_element.attrib
|
Return a dict of attributes for this node
|
Return a dict of attributes for this node
|
[
"Return",
"a",
"dict",
"of",
"attributes",
"for",
"this",
"node"
] |
def attrs(self) -> Dict:
"""
Return a dict of attributes for this node
"""
return self.lxml_element.attrib
|
[
"def",
"attrs",
"(",
"self",
")",
"->",
"Dict",
":",
"return",
"self",
".",
"lxml_element",
".",
"attrib"
] |
https://github.com/standardebooks/tools/blob/f57af3c5938a9aeed9e97e82b2c130424f6033e5/se/easy_xml.py#L642-L647
|
|
glitchdotcom/WebPutty
|
4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7
|
libs/cssutils/css/csspagerule.py
|
python
|
CSSPageRule._getCssText
|
(self)
|
return cssutils.ser.do_CSSPageRule(self)
|
Return serialized property cssText.
|
Return serialized property cssText.
|
[
"Return",
"serialized",
"property",
"cssText",
"."
] |
def _getCssText(self):
"""Return serialized property cssText."""
return cssutils.ser.do_CSSPageRule(self)
|
[
"def",
"_getCssText",
"(",
"self",
")",
":",
"return",
"cssutils",
".",
"ser",
".",
"do_CSSPageRule",
"(",
"self",
")"
] |
https://github.com/glitchdotcom/WebPutty/blob/4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7/libs/cssutils/css/csspagerule.py#L156-L158
|
|
biolab/orange3
|
41685e1c7b1d1babe680113685a2d44bcc9fec0b
|
Orange/widgets/visualize/utils/view.py
|
python
|
ZoomableGraphicsView.central_widget_rect
|
(self)
|
return self.__central_widget.boundingRect().adjusted(*self.__padding)
|
Get the bounding box of the central widget.
If a central widget and padding are set, this method calculates the
rect containing both of them. This is useful because if the padding was
added directly onto the widget, the padding would be rescaled as well.
If the central widget is not set, return the scene rect instead.
Returns
-------
QtCore.QRectF
|
Get the bounding box of the central widget.
|
[
"Get",
"the",
"bounding",
"box",
"of",
"the",
"central",
"widget",
"."
] |
def central_widget_rect(self):
"""Get the bounding box of the central widget.
If a central widget and padding are set, this method calculates the
rect containing both of them. This is useful because if the padding was
added directly onto the widget, the padding would be rescaled as well.
If the central widget is not set, return the scene rect instead.
Returns
-------
QtCore.QRectF
"""
if self.__central_widget is None:
return self.scene().itemsBoundingRect().adjusted(*self.__padding)
return self.__central_widget.boundingRect().adjusted(*self.__padding)
|
[
"def",
"central_widget_rect",
"(",
"self",
")",
":",
"if",
"self",
".",
"__central_widget",
"is",
"None",
":",
"return",
"self",
".",
"scene",
"(",
")",
".",
"itemsBoundingRect",
"(",
")",
".",
"adjusted",
"(",
"*",
"self",
".",
"__padding",
")",
"return",
"self",
".",
"__central_widget",
".",
"boundingRect",
"(",
")",
".",
"adjusted",
"(",
"*",
"self",
".",
"__padding",
")"
] |
https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/visualize/utils/view.py#L128-L144
|
|
fkie/multimaster_fkie
|
3d23df29d25d71a75c66bbd3cc6e9cbb255724d8
|
fkie_node_manager/src/fkie_node_manager/master_view_proxy.py
|
python
|
MasterViewProxy.on_parameter_filter_changed
|
(self, text)
|
Filter the displayed parameter
|
Filter the displayed parameter
|
[
"Filter",
"the",
"displayed",
"parameter"
] |
def on_parameter_filter_changed(self, text):
'''
Filter the displayed parameter
'''
self.parameter_proxyModel.setFilterRegExp(QRegExp(text, Qt.CaseInsensitive, QRegExp.Wildcard))
|
[
"def",
"on_parameter_filter_changed",
"(",
"self",
",",
"text",
")",
":",
"self",
".",
"parameter_proxyModel",
".",
"setFilterRegExp",
"(",
"QRegExp",
"(",
"text",
",",
"Qt",
".",
"CaseInsensitive",
",",
"QRegExp",
".",
"Wildcard",
")",
")"
] |
https://github.com/fkie/multimaster_fkie/blob/3d23df29d25d71a75c66bbd3cc6e9cbb255724d8/fkie_node_manager/src/fkie_node_manager/master_view_proxy.py#L3376-L3380
|
||
google/ci_edit
|
ffaa52473673cc7ec2080bc59996d61414d662c9
|
app/buffer_manager.py
|
python
|
BufferManager.close_text_buffer
|
(self, textBuffer)
|
Warning this will throw away the buffer. Please be sure the user is
ok with this before calling.
|
Warning this will throw away the buffer. Please be sure the user is
ok with this before calling.
|
[
"Warning",
"this",
"will",
"throw",
"away",
"the",
"buffer",
".",
"Please",
"be",
"sure",
"the",
"user",
"is",
"ok",
"with",
"this",
"before",
"calling",
"."
] |
def close_text_buffer(self, textBuffer):
"""Warning this will throw away the buffer. Please be sure the user is
ok with this before calling."""
if app.config.strict_debug:
assert issubclass(self.__class__, BufferManager), self
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.untrack_buffer_(textBuffer)
|
[
"def",
"close_text_buffer",
"(",
"self",
",",
"textBuffer",
")",
":",
"if",
"app",
".",
"config",
".",
"strict_debug",
":",
"assert",
"issubclass",
"(",
"self",
".",
"__class__",
",",
"BufferManager",
")",
",",
"self",
"assert",
"issubclass",
"(",
"textBuffer",
".",
"__class__",
",",
"app",
".",
"text_buffer",
".",
"TextBuffer",
")",
"self",
".",
"untrack_buffer_",
"(",
"textBuffer",
")"
] |
https://github.com/google/ci_edit/blob/ffaa52473673cc7ec2080bc59996d61414d662c9/app/buffer_manager.py#L50-L56
|
||
CouchPotato/CouchPotatoV1
|
135b3331d1b88ef645e29b76f2d4cc4a732c9232
|
library/sqlalchemy/types.py
|
python
|
PickleType.__init__
|
(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, mutable=True, comparator=None)
|
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps` and ``loads`` methods.
:param mutable: defaults to True; implements
:meth:`AbstractType.is_mutable`. When ``True``, incoming
objects should provide an ``__eq__()`` method which
performs the desired deep comparison of members, or the
``comparator`` argument must be present.
:param comparator: optional. a 2-arg callable predicate used
to compare values of this type. Otherwise,
the == operator is used to compare values.
|
Construct a PickleType.
|
[
"Construct",
"a",
"PickleType",
"."
] |
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, mutable=True, comparator=None):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps` and ``loads`` methods.
:param mutable: defaults to True; implements
:meth:`AbstractType.is_mutable`. When ``True``, incoming
objects should provide an ``__eq__()`` method which
performs the desired deep comparison of members, or the
``comparator`` argument must be present.
:param comparator: optional. a 2-arg callable predicate used
to compare values of this type. Otherwise,
the == operator is used to compare values.
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.mutable = mutable
self.comparator = comparator
super(PickleType, self).__init__()
|
[
"def",
"__init__",
"(",
"self",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
",",
"pickler",
"=",
"None",
",",
"mutable",
"=",
"True",
",",
"comparator",
"=",
"None",
")",
":",
"self",
".",
"protocol",
"=",
"protocol",
"self",
".",
"pickler",
"=",
"pickler",
"or",
"pickle",
"self",
".",
"mutable",
"=",
"mutable",
"self",
".",
"comparator",
"=",
"comparator",
"super",
"(",
"PickleType",
",",
"self",
")",
".",
"__init__",
"(",
")"
] |
https://github.com/CouchPotato/CouchPotatoV1/blob/135b3331d1b88ef645e29b76f2d4cc4a732c9232/library/sqlalchemy/types.py#L1565-L1591
|
||
pantsbuild/pex
|
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
|
pex/common.py
|
python
|
Chroot.clone
|
(self, into=None)
|
return new_chroot
|
Clone this chroot.
:keyword into: (optional) An optional destination directory to clone the
Chroot into. If not specified, a temporary directory will be created.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
|
Clone this chroot.
|
[
"Clone",
"this",
"chroot",
"."
] |
def clone(self, into=None):
"""Clone this chroot.
:keyword into: (optional) An optional destination directory to clone the
Chroot into. If not specified, a temporary directory will be created.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
"""
into = into or safe_mkdtemp()
new_chroot = Chroot(into)
for label, fileset in self.filesets.items():
for fn in fileset:
new_chroot.link(os.path.join(self.chroot, fn), fn, label=label)
return new_chroot
|
[
"def",
"clone",
"(",
"self",
",",
"into",
"=",
"None",
")",
":",
"into",
"=",
"into",
"or",
"safe_mkdtemp",
"(",
")",
"new_chroot",
"=",
"Chroot",
"(",
"into",
")",
"for",
"label",
",",
"fileset",
"in",
"self",
".",
"filesets",
".",
"items",
"(",
")",
":",
"for",
"fn",
"in",
"fileset",
":",
"new_chroot",
".",
"link",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"chroot",
",",
"fn",
")",
",",
"fn",
",",
"label",
"=",
"label",
")",
"return",
"new_chroot"
] |
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/common.py#L603-L618
|
|
opencobra/optlang
|
47c0ded00abf115cfedfc2a0b8fbcd9c9cbfbde5
|
src/optlang/interface.py
|
python
|
Objective.value
|
(self)
|
return self._value
|
The objective value.
|
The objective value.
|
[
"The",
"objective",
"value",
"."
] |
def value(self):
"""The objective value."""
return self._value
|
[
"def",
"value",
"(",
"self",
")",
":",
"return",
"self",
".",
"_value"
] |
https://github.com/opencobra/optlang/blob/47c0ded00abf115cfedfc2a0b8fbcd9c9cbfbde5/src/optlang/interface.py#L888-L890
|
|
ambitioninc/django-query-builder
|
c7abba65e7c92c2943c8e4e7e0865e1b98a42c34
|
querybuilder/query.py
|
python
|
QueryWindow.build_partition_by_fields
|
(self)
|
return select_sql.replace('GROUP BY', 'PARTITION BY', 1)
|
Equivalent to ``self.build_groups()`` except for the GROUP BY
clause being named PARTITION BY
:return: The sql to be used in the PARTITION BY clause
:rtype: str
|
Equivalent to ``self.build_groups()`` except for the GROUP BY
clause being named PARTITION BY
|
[
"Equivalent",
"to",
"self",
".",
"build_groups",
"()",
"except",
"for",
"the",
"GROUP",
"BY",
"clause",
"being",
"named",
"PARTITION",
"BY"
] |
def build_partition_by_fields(self):
"""
Equivalent to ``self.build_groups()`` except for the GROUP BY
clause being named PARTITION BY
:return: The sql to be used in the PARTITION BY clause
:rtype: str
"""
select_sql = self.build_groups()
return select_sql.replace('GROUP BY', 'PARTITION BY', 1)
|
[
"def",
"build_partition_by_fields",
"(",
"self",
")",
":",
"select_sql",
"=",
"self",
".",
"build_groups",
"(",
")",
"return",
"select_sql",
".",
"replace",
"(",
"'GROUP BY'",
",",
"'PARTITION BY'",
",",
"1",
")"
] |
https://github.com/ambitioninc/django-query-builder/blob/c7abba65e7c92c2943c8e4e7e0865e1b98a42c34/querybuilder/query.py#L1984-L1993
|
|
nick6918/MyDeepLearning
|
1316fbd244cb06cdc541c98389c5f97aad2be89c
|
lib/layers/fast_layers.py
|
python
|
max_pool_forward_fast
|
(x, pool_param)
|
return out, cache
|
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
|
A fast implementation of the forward pass for a max pooling layer.
|
[
"A",
"fast",
"implementation",
"of",
"the",
"forward",
"pass",
"for",
"a",
"max",
"pooling",
"layer",
"."
] |
def max_pool_forward_fast(x, pool_param):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
same_size = pool_height == pool_width == stride
tiles = H % pool_height == 0 and W % pool_width == 0
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return out, cache
|
[
"def",
"max_pool_forward_fast",
"(",
"x",
",",
"pool_param",
")",
":",
"N",
",",
"C",
",",
"H",
",",
"W",
"=",
"x",
".",
"shape",
"pool_height",
",",
"pool_width",
"=",
"pool_param",
"[",
"'pool_height'",
"]",
",",
"pool_param",
"[",
"'pool_width'",
"]",
"stride",
"=",
"pool_param",
"[",
"'stride'",
"]",
"same_size",
"=",
"pool_height",
"==",
"pool_width",
"==",
"stride",
"tiles",
"=",
"H",
"%",
"pool_height",
"==",
"0",
"and",
"W",
"%",
"pool_width",
"==",
"0",
"if",
"same_size",
"and",
"tiles",
":",
"out",
",",
"reshape_cache",
"=",
"max_pool_forward_reshape",
"(",
"x",
",",
"pool_param",
")",
"cache",
"=",
"(",
"'reshape'",
",",
"reshape_cache",
")",
"else",
":",
"out",
",",
"im2col_cache",
"=",
"max_pool_forward_im2col",
"(",
"x",
",",
"pool_param",
")",
"cache",
"=",
"(",
"'im2col'",
",",
"im2col_cache",
")",
"return",
"out",
",",
"cache"
] |
https://github.com/nick6918/MyDeepLearning/blob/1316fbd244cb06cdc541c98389c5f97aad2be89c/lib/layers/fast_layers.py#L133-L154
|
|
emesene/emesene
|
4548a4098310e21b16437bb36223a7f632a4f7bc
|
emesene/gui/common/TrayIcon.py
|
python
|
TrayIcon._on_activate
|
(self, trayicon)
|
callback called when the status icon is activated
(includes clicking the icon)
|
callback called when the status icon is activated
(includes clicking the icon)
|
[
"callback",
"called",
"when",
"the",
"status",
"icon",
"is",
"activated",
"(",
"includes",
"clicking",
"the",
"icon",
")"
] |
def _on_activate(self, trayicon):
"""
callback called when the status icon is activated
(includes clicking the icon)
"""
if self.last_new_message is not None and (self.count != 0):
# show the tab with the latest message
cid = self.last_new_message
conv_manager = self.handler.session.get_conversation_manager(cid)
if conv_manager:
conversation = conv_manager.has_similar_conversation(cid)
conv_manager.present(conversation)
else:
self.handler.on_hide_show_mainwindow(self.main_window)
|
[
"def",
"_on_activate",
"(",
"self",
",",
"trayicon",
")",
":",
"if",
"self",
".",
"last_new_message",
"is",
"not",
"None",
"and",
"(",
"self",
".",
"count",
"!=",
"0",
")",
":",
"# show the tab with the latest message",
"cid",
"=",
"self",
".",
"last_new_message",
"conv_manager",
"=",
"self",
".",
"handler",
".",
"session",
".",
"get_conversation_manager",
"(",
"cid",
")",
"if",
"conv_manager",
":",
"conversation",
"=",
"conv_manager",
".",
"has_similar_conversation",
"(",
"cid",
")",
"conv_manager",
".",
"present",
"(",
"conversation",
")",
"else",
":",
"self",
".",
"handler",
".",
"on_hide_show_mainwindow",
"(",
"self",
".",
"main_window",
")"
] |
https://github.com/emesene/emesene/blob/4548a4098310e21b16437bb36223a7f632a4f7bc/emesene/gui/common/TrayIcon.py#L120-L134
|
||
beurtschipper/crackcoin
|
73ae99b8f6957f1df3f4549074beaf616e4588d7
|
crackcoin/wallets.py
|
python
|
printBasicInfo
|
()
|
Print basic wallet info
|
Print basic wallet info
|
[
"Print",
"basic",
"wallet",
"info"
] |
def printBasicInfo():
''' Print basic wallet info '''
outputs = crackcoin.db.doQuery("select distinct transactions_outputs.amount, transactions_outputs.address, transactions_outputs.outputHash from transactions_outputs LEFT JOIN transactions_inputs WHERE NOT EXISTS(SELECT * FROM transactions_inputs WHERE transactions_outputs.outputHash = transactions_inputs.previousOutput)", result='all')
wallets = crackcoin.db.doQuery("select * from wallets", result='all')
totalMoney = 0
for wallet in wallets:
ID, privateKey, publicKey, myAddress = wallet
print "Wallet address: %s " % myAddress
walletMoney = 0
for output in outputs:
amount, address, outputHash = output
if address == myAddress:
walletMoney += amount
print "Money in wallet: %s\n" % str(walletMoney)
totalMoney += walletMoney
print "Total money: %s\n" % str(totalMoney)
|
[
"def",
"printBasicInfo",
"(",
")",
":",
"outputs",
"=",
"crackcoin",
".",
"db",
".",
"doQuery",
"(",
"\"select distinct transactions_outputs.amount, transactions_outputs.address, transactions_outputs.outputHash from transactions_outputs LEFT JOIN transactions_inputs WHERE NOT EXISTS(SELECT * FROM transactions_inputs WHERE transactions_outputs.outputHash = transactions_inputs.previousOutput)\"",
",",
"result",
"=",
"'all'",
")",
"wallets",
"=",
"crackcoin",
".",
"db",
".",
"doQuery",
"(",
"\"select * from wallets\"",
",",
"result",
"=",
"'all'",
")",
"totalMoney",
"=",
"0",
"for",
"wallet",
"in",
"wallets",
":",
"ID",
",",
"privateKey",
",",
"publicKey",
",",
"myAddress",
"=",
"wallet",
"print",
"\"Wallet address: %s \"",
"%",
"myAddress",
"walletMoney",
"=",
"0",
"for",
"output",
"in",
"outputs",
":",
"amount",
",",
"address",
",",
"outputHash",
"=",
"output",
"if",
"address",
"==",
"myAddress",
":",
"walletMoney",
"+=",
"amount",
"print",
"\"Money in wallet: %s\\n\"",
"%",
"str",
"(",
"walletMoney",
")",
"totalMoney",
"+=",
"walletMoney",
"print",
"\"Total money: %s\\n\"",
"%",
"str",
"(",
"totalMoney",
")"
] |
https://github.com/beurtschipper/crackcoin/blob/73ae99b8f6957f1df3f4549074beaf616e4588d7/crackcoin/wallets.py#L21-L44
|
||
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/tkinter/__init__.py
|
python
|
Canvas.create_bitmap
|
(self, *args, **kw)
|
return self._create('bitmap', args, kw)
|
Create bitmap with coordinates x1,y1.
|
Create bitmap with coordinates x1,y1.
|
[
"Create",
"bitmap",
"with",
"coordinates",
"x1",
"y1",
"."
] |
def create_bitmap(self, *args, **kw):
"""Create bitmap with coordinates x1,y1."""
return self._create('bitmap', args, kw)
|
[
"def",
"create_bitmap",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"return",
"self",
".",
"_create",
"(",
"'bitmap'",
",",
"args",
",",
"kw",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/tkinter/__init__.py#L2332-L2334
|
|
angr/angr
|
4b04d56ace135018083d36d9083805be8146688b
|
angr/state_plugins/globals.py
|
python
|
SimStateGlobals.widen
|
(self, others)
|
return False
|
[] |
def widen(self, others): # pylint: disable=unused-argument
l.warning("Widening is unimplemented for globals")
return False
|
[
"def",
"widen",
"(",
"self",
",",
"others",
")",
":",
"# pylint: disable=unused-argument",
"l",
".",
"warning",
"(",
"\"Widening is unimplemented for globals\"",
")",
"return",
"False"
] |
https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/state_plugins/globals.py#L26-L28
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.