nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hudson-and-thames/mlfinlab
|
79dcc7120ec84110578f75b025a75850eb72fc73
|
mlfinlab/multi_product/etf_trick.py
|
python
|
ETFTrick._update_cache
|
(self)
|
Updates cache (two previous rows) when new data batch is read into the memory. Cache is used to
recalculate ETF trick value which corresponds to previous batch last row. That is why we need 2 previous rows
for close price difference calculation
:return: (dict): dictionary with open, close, alloc, costs and rates last 2 rows
|
Updates cache (two previous rows) when new data batch is read into the memory. Cache is used to
recalculate ETF trick value which corresponds to previous batch last row. That is why we need 2 previous rows
for close price difference calculation
|
[
"Updates",
"cache",
"(",
"two",
"previous",
"rows",
")",
"when",
"new",
"data",
"batch",
"is",
"read",
"into",
"the",
"memory",
".",
"Cache",
"is",
"used",
"to",
"recalculate",
"ETF",
"trick",
"value",
"which",
"corresponds",
"to",
"previous",
"batch",
"last",
"row",
".",
"That",
"is",
"why",
"we",
"need",
"2",
"previous",
"rows",
"for",
"close",
"price",
"difference",
"calculation"
] |
def _update_cache(self):
"""
Updates cache (two previous rows) when new data batch is read into the memory. Cache is used to
recalculate ETF trick value which corresponds to previous batch last row. That is why we need 2 previous rows
for close price difference calculation
:return: (dict): dictionary with open, close, alloc, costs and rates last 2 rows
"""
pass
|
[
"def",
"_update_cache",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/hudson-and-thames/mlfinlab/blob/79dcc7120ec84110578f75b025a75850eb72fc73/mlfinlab/multi_product/etf_trick.py#L72-L81
|
||
XingangPan/Switchable-Whitening
|
dc8a9947ee27285ab123db1f152e18959e0e0861
|
utils/common_utils.py
|
python
|
accuracy
|
(output, target, topk=(1,))
|
return res
|
Computes the precision@k for the specified values of k
|
Computes the precision
|
[
"Computes",
"the",
"precision"
] |
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
[
"def",
"accuracy",
"(",
"output",
",",
"target",
",",
"topk",
"=",
"(",
"1",
",",
")",
")",
":",
"maxk",
"=",
"max",
"(",
"topk",
")",
"batch_size",
"=",
"target",
".",
"size",
"(",
"0",
")",
"_",
",",
"pred",
"=",
"output",
".",
"topk",
"(",
"maxk",
",",
"1",
",",
"True",
",",
"True",
")",
"pred",
"=",
"pred",
".",
"t",
"(",
")",
"correct",
"=",
"pred",
".",
"eq",
"(",
"target",
".",
"view",
"(",
"1",
",",
"-",
"1",
")",
".",
"expand_as",
"(",
"pred",
")",
")",
"res",
"=",
"[",
"]",
"for",
"k",
"in",
"topk",
":",
"correct_k",
"=",
"correct",
"[",
":",
"k",
"]",
".",
"view",
"(",
"-",
"1",
")",
".",
"float",
"(",
")",
".",
"sum",
"(",
"0",
",",
"keepdim",
"=",
"True",
")",
"res",
".",
"append",
"(",
"correct_k",
".",
"mul_",
"(",
"100.0",
"/",
"batch_size",
")",
")",
"return",
"res"
] |
https://github.com/XingangPan/Switchable-Whitening/blob/dc8a9947ee27285ab123db1f152e18959e0e0861/utils/common_utils.py#L153-L166
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/django/db/backends/base/schema.py
|
python
|
BaseDatabaseSchemaEditor._digest
|
(cls, *args)
|
return h.hexdigest()[:8]
|
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
|
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
|
[
"Generates",
"a",
"32",
"-",
"bit",
"digest",
"of",
"a",
"set",
"of",
"arguments",
"that",
"can",
"be",
"used",
"to",
"shorten",
"identifying",
"names",
"."
] |
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
|
[
"def",
"_digest",
"(",
"cls",
",",
"*",
"args",
")",
":",
"h",
"=",
"hashlib",
".",
"md5",
"(",
")",
"for",
"arg",
"in",
"args",
":",
"h",
".",
"update",
"(",
"force_bytes",
"(",
"arg",
")",
")",
"return",
"h",
".",
"hexdigest",
"(",
")",
"[",
":",
"8",
"]"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/db/backends/base/schema.py#L118-L126
|
|
leo-editor/leo-editor
|
383d6776d135ef17d73d935a2f0ecb3ac0e99494
|
leo/plugins/qt_gui.py
|
python
|
LeoQtGui.createLeoFrame
|
(self, c, title)
|
return qt_frame.LeoQtFrame(c, title, gui=self)
|
Create a new Leo frame.
|
Create a new Leo frame.
|
[
"Create",
"a",
"new",
"Leo",
"frame",
"."
] |
def createLeoFrame(self, c, title):
"""Create a new Leo frame."""
return qt_frame.LeoQtFrame(c, title, gui=self)
|
[
"def",
"createLeoFrame",
"(",
"self",
",",
"c",
",",
"title",
")",
":",
"return",
"qt_frame",
".",
"LeoQtFrame",
"(",
"c",
",",
"title",
",",
"gui",
"=",
"self",
")"
] |
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/plugins/qt_gui.py#L276-L278
|
|
HenriWahl/Nagstamon
|
16549c6860b51a93141d84881c6ad28c35d8581e
|
Nagstamon/Servers/Monitos3.py
|
python
|
Monitos3Server.get
|
(self, table, raw=[], headers={})
|
return result
|
send data to livestatus socket, receive result, format as json
|
send data to livestatus socket, receive result, format as json
|
[
"send",
"data",
"to",
"livestatus",
"socket",
"receive",
"result",
"format",
"as",
"json"
] |
def get(self, table, raw=[], headers={}):
"""send data to livestatus socket, receive result, format as json"""
data = ['GET %s' % table, ]
headers['OutputFormat'] = 'json'
headers['ColumnHeaders'] = 'on'
for k, v in headers.items():
data.append('%s: %s' % (k, v))
for line in raw:
data.append(line)
result = self.communicate(data)
if result:
return json.loads(result)
return result
|
[
"def",
"get",
"(",
"self",
",",
"table",
",",
"raw",
"=",
"[",
"]",
",",
"headers",
"=",
"{",
"}",
")",
":",
"data",
"=",
"[",
"'GET %s'",
"%",
"table",
",",
"]",
"headers",
"[",
"'OutputFormat'",
"]",
"=",
"'json'",
"headers",
"[",
"'ColumnHeaders'",
"]",
"=",
"'on'",
"for",
"k",
",",
"v",
"in",
"headers",
".",
"items",
"(",
")",
":",
"data",
".",
"append",
"(",
"'%s: %s'",
"%",
"(",
"k",
",",
"v",
")",
")",
"for",
"line",
"in",
"raw",
":",
"data",
".",
"append",
"(",
"line",
")",
"result",
"=",
"self",
".",
"communicate",
"(",
"data",
")",
"if",
"result",
":",
"return",
"json",
".",
"loads",
"(",
"result",
")",
"return",
"result"
] |
https://github.com/HenriWahl/Nagstamon/blob/16549c6860b51a93141d84881c6ad28c35d8581e/Nagstamon/Servers/Monitos3.py#L128-L140
|
|
exentriquesolutions/nip.io
|
cf6c5be870b63f07ecdf9f56500e5d8e846f3593
|
nipio/backend.py
|
python
|
DynamicBackend.run
|
(self)
|
Run the pipe backend.
This is a loop that runs forever.
|
Run the pipe backend.
|
[
"Run",
"the",
"pipe",
"backend",
"."
] |
def run(self) -> None:
"""Run the pipe backend.
This is a loop that runs forever.
"""
_log('starting up')
handshake = _get_next()
if handshake[1] != '5':
_log(f'Not version 5: {handshake}')
sys.exit(1)
_write('OK', 'nip.io backend - We are good')
_log('Done handshake')
while True:
cmd = _get_next()
if _is_debug():
_log(f"cmd: {cmd}")
if cmd[0] == "CMD":
_log(f"received command: {cmd}")
self.handle_command(cmd)
continue
if cmd[0] == "END":
_log("completing")
break
if len(cmd) < 6:
_log(f'did not understand: {cmd}')
_write('FAIL')
continue
qname = cmd[1].lower()
qtype = cmd[3]
if (qtype == 'A' or qtype == 'ANY') and qname.endswith(self.domain):
if qname == self.domain:
self.handle_self(self.domain)
elif qname in self.name_servers:
self.handle_nameservers(qname)
else:
self.handle_subdomains(qname)
elif qtype == 'SOA' and qname.endswith(self.domain):
self.handle_soa(qname)
else:
self.handle_unknown(qtype, qname)
|
[
"def",
"run",
"(",
"self",
")",
"->",
"None",
":",
"_log",
"(",
"'starting up'",
")",
"handshake",
"=",
"_get_next",
"(",
")",
"if",
"handshake",
"[",
"1",
"]",
"!=",
"'5'",
":",
"_log",
"(",
"f'Not version 5: {handshake}'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"_write",
"(",
"'OK'",
",",
"'nip.io backend - We are good'",
")",
"_log",
"(",
"'Done handshake'",
")",
"while",
"True",
":",
"cmd",
"=",
"_get_next",
"(",
")",
"if",
"_is_debug",
"(",
")",
":",
"_log",
"(",
"f\"cmd: {cmd}\"",
")",
"if",
"cmd",
"[",
"0",
"]",
"==",
"\"CMD\"",
":",
"_log",
"(",
"f\"received command: {cmd}\"",
")",
"self",
".",
"handle_command",
"(",
"cmd",
")",
"continue",
"if",
"cmd",
"[",
"0",
"]",
"==",
"\"END\"",
":",
"_log",
"(",
"\"completing\"",
")",
"break",
"if",
"len",
"(",
"cmd",
")",
"<",
"6",
":",
"_log",
"(",
"f'did not understand: {cmd}'",
")",
"_write",
"(",
"'FAIL'",
")",
"continue",
"qname",
"=",
"cmd",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"qtype",
"=",
"cmd",
"[",
"3",
"]",
"if",
"(",
"qtype",
"==",
"'A'",
"or",
"qtype",
"==",
"'ANY'",
")",
"and",
"qname",
".",
"endswith",
"(",
"self",
".",
"domain",
")",
":",
"if",
"qname",
"==",
"self",
".",
"domain",
":",
"self",
".",
"handle_self",
"(",
"self",
".",
"domain",
")",
"elif",
"qname",
"in",
"self",
".",
"name_servers",
":",
"self",
".",
"handle_nameservers",
"(",
"qname",
")",
"else",
":",
"self",
".",
"handle_subdomains",
"(",
"qname",
")",
"elif",
"qtype",
"==",
"'SOA'",
"and",
"qname",
".",
"endswith",
"(",
"self",
".",
"domain",
")",
":",
"self",
".",
"handle_soa",
"(",
"qname",
")",
"else",
":",
"self",
".",
"handle_unknown",
"(",
"qtype",
",",
"qname",
")"
] |
https://github.com/exentriquesolutions/nip.io/blob/cf6c5be870b63f07ecdf9f56500e5d8e846f3593/nipio/backend.py#L174-L219
|
||
ftramer/Steal-ML
|
e37c67b2f74e42a85370ce431bc9d4e391b0ed8b
|
regression/aws_wrapper/regression.py
|
python
|
eps_round
|
(x, epsilon)
|
return round(x / epsilon) * epsilon
|
Round a floating point value to the nearest multiple of eps
|
Round a floating point value to the nearest multiple of eps
|
[
"Round",
"a",
"floating",
"point",
"value",
"to",
"the",
"nearest",
"multiple",
"of",
"eps"
] |
def eps_round(x, epsilon):
"""
Round a floating point value to the nearest multiple of eps
"""
return round(x / epsilon) * epsilon
|
[
"def",
"eps_round",
"(",
"x",
",",
"epsilon",
")",
":",
"return",
"round",
"(",
"x",
"/",
"epsilon",
")",
"*",
"epsilon"
] |
https://github.com/ftramer/Steal-ML/blob/e37c67b2f74e42a85370ce431bc9d4e391b0ed8b/regression/aws_wrapper/regression.py#L60-L64
|
|
BMW-InnovationLab/BMW-TensorFlow-Training-GUI
|
4f10d1f00f9ac312ca833e5b28fd0f8952cfee17
|
training_api/research/object_detection/core/box_list_ops.py
|
python
|
_copy_extra_fields
|
(boxlist_to_copy_to, boxlist_to_copy_from)
|
return boxlist_to_copy_to
|
Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
|
Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
|
[
"Copies",
"the",
"extra",
"fields",
"of",
"boxlist_to_copy_from",
"to",
"boxlist_to_copy_to",
"."
] |
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
|
[
"def",
"_copy_extra_fields",
"(",
"boxlist_to_copy_to",
",",
"boxlist_to_copy_from",
")",
":",
"for",
"field",
"in",
"boxlist_to_copy_from",
".",
"get_extra_fields",
"(",
")",
":",
"boxlist_to_copy_to",
".",
"add_field",
"(",
"field",
",",
"boxlist_to_copy_from",
".",
"get_field",
"(",
"field",
")",
")",
"return",
"boxlist_to_copy_to"
] |
https://github.com/BMW-InnovationLab/BMW-TensorFlow-Training-GUI/blob/4f10d1f00f9ac312ca833e5b28fd0f8952cfee17/training_api/research/object_detection/core/box_list_ops.py#L725-L737
|
|
pculture/miro
|
d8e4594441939514dd2ac29812bf37087bb3aea5
|
tv/lib/config.py
|
python
|
set_theme
|
(theme)
|
Setup the theme to get config data from.
This method exists because we need to create the config object ASAP,
before we know the theme on some platforms. Therfore, we create the
config object, then later on set the theme.
|
Setup the theme to get config data from.
|
[
"Setup",
"the",
"theme",
"to",
"get",
"config",
"data",
"from",
"."
] |
def set_theme(theme):
"""Setup the theme to get config data from.
This method exists because we need to create the config object ASAP,
before we know the theme on some platforms. Therfore, we create the
config object, then later on set the theme.
"""
app.configfile = AppConfig(theme)
|
[
"def",
"set_theme",
"(",
"theme",
")",
":",
"app",
".",
"configfile",
"=",
"AppConfig",
"(",
"theme",
")"
] |
https://github.com/pculture/miro/blob/d8e4594441939514dd2ac29812bf37087bb3aea5/tv/lib/config.py#L233-L240
|
||
makerbot/ReplicatorG
|
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
|
skein_engines/skeinforge-50/fabmetheus_utilities/intercircle.py
|
python
|
getCircleNodesFromLoop
|
(loop, radius, thresholdRatio=0.9)
|
return getCircleNodesFromPoints( points, radius )
|
Get the circle nodes from every point on a loop and between points.
|
Get the circle nodes from every point on a loop and between points.
|
[
"Get",
"the",
"circle",
"nodes",
"from",
"every",
"point",
"on",
"a",
"loop",
"and",
"between",
"points",
"."
] |
def getCircleNodesFromLoop(loop, radius, thresholdRatio=0.9):
'Get the circle nodes from every point on a loop and between points.'
radius = abs(radius)
points = getPointsFromLoop( loop, radius, thresholdRatio )
return getCircleNodesFromPoints( points, radius )
|
[
"def",
"getCircleNodesFromLoop",
"(",
"loop",
",",
"radius",
",",
"thresholdRatio",
"=",
"0.9",
")",
":",
"radius",
"=",
"abs",
"(",
"radius",
")",
"points",
"=",
"getPointsFromLoop",
"(",
"loop",
",",
"radius",
",",
"thresholdRatio",
")",
"return",
"getCircleNodesFromPoints",
"(",
"points",
",",
"radius",
")"
] |
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-50/fabmetheus_utilities/intercircle.py#L296-L300
|
|
SteveDoyle2/pyNastran
|
eda651ac2d4883d95a34951f8a002ff94f642a1a
|
pyNastran/bdf/case_control_deck2.py
|
python
|
CaseControlDeck.convert_to_sol_200
|
(self, model: BDF)
|
Takes a case control deck and changes it from a SOL xxx to a SOL 200
Parameters
----------
model : BDF()
the BDF object
.. todo:: not done...
|
Takes a case control deck and changes it from a SOL xxx to a SOL 200
|
[
"Takes",
"a",
"case",
"control",
"deck",
"and",
"changes",
"it",
"from",
"a",
"SOL",
"xxx",
"to",
"a",
"SOL",
"200"
] |
def convert_to_sol_200(self, model: BDF) -> None:
"""
Takes a case control deck and changes it from a SOL xxx to a SOL 200
Parameters
----------
model : BDF()
the BDF object
.. todo:: not done...
"""
analysis = model.rsolmap_to_str[model.sol]
model.sol = 200
subcase0 = self.subcases[0]
subcase0.add_parameter_to_global_subcase('ANALYSIS', analysis)
|
[
"def",
"convert_to_sol_200",
"(",
"self",
",",
"model",
":",
"BDF",
")",
"->",
"None",
":",
"analysis",
"=",
"model",
".",
"rsolmap_to_str",
"[",
"model",
".",
"sol",
"]",
"model",
".",
"sol",
"=",
"200",
"subcase0",
"=",
"self",
".",
"subcases",
"[",
"0",
"]",
"subcase0",
".",
"add_parameter_to_global_subcase",
"(",
"'ANALYSIS'",
",",
"analysis",
")"
] |
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/bdf/case_control_deck2.py#L987-L1003
|
||
scrtlabs/catalyst
|
2e8029780f2381da7a0729f7b52505e5db5f535b
|
catalyst/assets/assets.py
|
python
|
_filter_kwargs
|
(names, dict_)
|
return {k: v for k, v in dict_.items() if k in names and v is not None}
|
Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
|
Filter out kwargs from a dictionary.
|
[
"Filter",
"out",
"kwargs",
"from",
"a",
"dictionary",
"."
] |
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
|
[
"def",
"_filter_kwargs",
"(",
"names",
",",
"dict_",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"dict_",
".",
"items",
"(",
")",
"if",
"k",
"in",
"names",
"and",
"v",
"is",
"not",
"None",
"}"
] |
https://github.com/scrtlabs/catalyst/blob/2e8029780f2381da7a0729f7b52505e5db5f535b/catalyst/assets/assets.py#L165-L181
|
|
salabim/salabim
|
e0de846b042daf2dc71aaf43d8adc6486b57f376
|
salabim.py
|
python
|
Queue.__radd__
|
(self, other)
|
return self.union(other)
|
[] |
def __radd__(self, other):
if other == 0: # to be able to use sum
return self
if not isinstance(other, Queue):
return NotImplemented
return self.union(other)
|
[
"def",
"__radd__",
"(",
"self",
",",
"other",
")",
":",
"if",
"other",
"==",
"0",
":",
"# to be able to use sum",
"return",
"self",
"if",
"not",
"isinstance",
"(",
"other",
",",
"Queue",
")",
":",
"return",
"NotImplemented",
"return",
"self",
".",
"union",
"(",
"other",
")"
] |
https://github.com/salabim/salabim/blob/e0de846b042daf2dc71aaf43d8adc6486b57f376/salabim.py#L4118-L4123
|
|||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/mpmath/libmp/six.py
|
python
|
iterkeys
|
(d)
|
return iter(getattr(d, _iterkeys)())
|
Return an iterator over the keys of a dictionary.
|
Return an iterator over the keys of a dictionary.
|
[
"Return",
"an",
"iterator",
"over",
"the",
"keys",
"of",
"a",
"dictionary",
"."
] |
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
|
[
"def",
"iterkeys",
"(",
"d",
")",
":",
"return",
"iter",
"(",
"getattr",
"(",
"d",
",",
"_iterkeys",
")",
"(",
")",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/mpmath/libmp/six.py#L266-L268
|
|
pypa/pipenv
|
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
|
pipenv/vendor/pyparsing.py
|
python
|
Forward.validate
|
(self, validateTrace=None)
|
[] |
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
|
[
"def",
"validate",
"(",
"self",
",",
"validateTrace",
"=",
"None",
")",
":",
"if",
"validateTrace",
"is",
"None",
":",
"validateTrace",
"=",
"[",
"]",
"if",
"self",
"not",
"in",
"validateTrace",
":",
"tmp",
"=",
"validateTrace",
"[",
":",
"]",
"+",
"[",
"self",
"]",
"if",
"self",
".",
"expr",
"is",
"not",
"None",
":",
"self",
".",
"expr",
".",
"validate",
"(",
"tmp",
")",
"self",
".",
"checkRecursion",
"(",
"[",
"]",
")"
] |
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/vendor/pyparsing.py#L5045-L5053
|
||||
dropbox/dropbox-sdk-python
|
015437429be224732990041164a21a0501235db1
|
dropbox/team_log.py
|
python
|
ShowcaseDownloadPolicy.is_enabled
|
(self)
|
return self._tag == 'enabled'
|
Check if the union tag is ``enabled``.
:rtype: bool
|
Check if the union tag is ``enabled``.
|
[
"Check",
"if",
"the",
"union",
"tag",
"is",
"enabled",
"."
] |
def is_enabled(self):
"""
Check if the union tag is ``enabled``.
:rtype: bool
"""
return self._tag == 'enabled'
|
[
"def",
"is_enabled",
"(",
"self",
")",
":",
"return",
"self",
".",
"_tag",
"==",
"'enabled'"
] |
https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/team_log.py#L64932-L64938
|
|
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/scipy/spatial/_plotutils.py
|
python
|
convex_hull_plot_2d
|
(hull, ax=None)
|
return ax.figure
|
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
|
Plot the given convex hull diagram in 2-D
|
[
"Plot",
"the",
"given",
"convex",
"hull",
"diagram",
"in",
"2",
"-",
"D"
] |
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
|
[
"def",
"convex_hull_plot_2d",
"(",
"hull",
",",
"ax",
"=",
"None",
")",
":",
"from",
"matplotlib",
".",
"collections",
"import",
"LineCollection",
"if",
"hull",
".",
"points",
".",
"shape",
"[",
"1",
"]",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"Convex hull is not 2-D\"",
")",
"ax",
".",
"plot",
"(",
"hull",
".",
"points",
"[",
":",
",",
"0",
"]",
",",
"hull",
".",
"points",
"[",
":",
",",
"1",
"]",
",",
"'o'",
")",
"line_segments",
"=",
"[",
"hull",
".",
"points",
"[",
"simplex",
"]",
"for",
"simplex",
"in",
"hull",
".",
"simplices",
"]",
"ax",
".",
"add_collection",
"(",
"LineCollection",
"(",
"line_segments",
",",
"colors",
"=",
"'k'",
",",
"linestyle",
"=",
"'solid'",
")",
")",
"_adjust_bounds",
"(",
"ax",
",",
"hull",
".",
"points",
")",
"return",
"ax",
".",
"figure"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/scipy/spatial/_plotutils.py#L78-L115
|
|
AcidWeb/CurseBreaker
|
1a8cb60f4db0cc8b7e0702441e1adc0f1829003e
|
CurseBreaker.py
|
python
|
TUI.parse_args
|
(self, args)
|
return sorted(parsed)
|
[] |
def parse_args(self, args):
parsed = []
for addon in sorted(self.core.config['Addons'], key=lambda k: len(k['Name']), reverse=True):
if addon['Name'] in args or addon['URL'] in args:
parsed.append(addon['Name'])
args = args.replace(addon['Name'], '', 1)
return sorted(parsed)
|
[
"def",
"parse_args",
"(",
"self",
",",
"args",
")",
":",
"parsed",
"=",
"[",
"]",
"for",
"addon",
"in",
"sorted",
"(",
"self",
".",
"core",
".",
"config",
"[",
"'Addons'",
"]",
",",
"key",
"=",
"lambda",
"k",
":",
"len",
"(",
"k",
"[",
"'Name'",
"]",
")",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"addon",
"[",
"'Name'",
"]",
"in",
"args",
"or",
"addon",
"[",
"'URL'",
"]",
"in",
"args",
":",
"parsed",
".",
"append",
"(",
"addon",
"[",
"'Name'",
"]",
")",
"args",
"=",
"args",
".",
"replace",
"(",
"addon",
"[",
"'Name'",
"]",
",",
"''",
",",
"1",
")",
"return",
"sorted",
"(",
"parsed",
")"
] |
https://github.com/AcidWeb/CurseBreaker/blob/1a8cb60f4db0cc8b7e0702441e1adc0f1829003e/CurseBreaker.py#L400-L406
|
|||
galaxyproject/galaxy
|
4c03520f05062e0f4a1b3655dc0b7452fda69943
|
lib/galaxy/webapps/galaxy/services/datasets.py
|
python
|
DatasetsService.index
|
(
self,
trans: ProvidesHistoryContext,
history_id: Optional[EncodedDatabaseIdField],
serialization_params: SerializationParams,
filter_query_params: FilterQueryParams,
)
|
return [
self.serializer_by_type[content.history_content_type].serialize_to_view(content, user=user, trans=trans, view=view)
for content in contents
]
|
Search datasets or collections using a query system and returns a list
containing summary of dataset or dataset_collection information.
|
Search datasets or collections using a query system and returns a list
containing summary of dataset or dataset_collection information.
|
[
"Search",
"datasets",
"or",
"collections",
"using",
"a",
"query",
"system",
"and",
"returns",
"a",
"list",
"containing",
"summary",
"of",
"dataset",
"or",
"dataset_collection",
"information",
"."
] |
def index(
self,
trans: ProvidesHistoryContext,
history_id: Optional[EncodedDatabaseIdField],
serialization_params: SerializationParams,
filter_query_params: FilterQueryParams,
) -> List[AnyHistoryContentItem]:
"""
Search datasets or collections using a query system and returns a list
containing summary of dataset or dataset_collection information.
"""
user = self.get_authenticated_user(trans)
filters = self.history_contents_filters.parse_query_filters(filter_query_params)
view = serialization_params.view or 'summary'
order_by = self.build_order_by(self.history_contents_manager, filter_query_params.order or "create_time-dsc")
container = None
if history_id:
container = self.history_manager.get_accessible(self.decode_id(history_id), user)
contents = self.history_contents_manager.contents(
container=container,
filters=filters,
limit=filter_query_params.limit or DEFAULT_LIMIT,
offset=filter_query_params.offset,
order_by=order_by,
user_id=user.id,
)
return [
self.serializer_by_type[content.history_content_type].serialize_to_view(content, user=user, trans=trans, view=view)
for content in contents
]
|
[
"def",
"index",
"(",
"self",
",",
"trans",
":",
"ProvidesHistoryContext",
",",
"history_id",
":",
"Optional",
"[",
"EncodedDatabaseIdField",
"]",
",",
"serialization_params",
":",
"SerializationParams",
",",
"filter_query_params",
":",
"FilterQueryParams",
",",
")",
"->",
"List",
"[",
"AnyHistoryContentItem",
"]",
":",
"user",
"=",
"self",
".",
"get_authenticated_user",
"(",
"trans",
")",
"filters",
"=",
"self",
".",
"history_contents_filters",
".",
"parse_query_filters",
"(",
"filter_query_params",
")",
"view",
"=",
"serialization_params",
".",
"view",
"or",
"'summary'",
"order_by",
"=",
"self",
".",
"build_order_by",
"(",
"self",
".",
"history_contents_manager",
",",
"filter_query_params",
".",
"order",
"or",
"\"create_time-dsc\"",
")",
"container",
"=",
"None",
"if",
"history_id",
":",
"container",
"=",
"self",
".",
"history_manager",
".",
"get_accessible",
"(",
"self",
".",
"decode_id",
"(",
"history_id",
")",
",",
"user",
")",
"contents",
"=",
"self",
".",
"history_contents_manager",
".",
"contents",
"(",
"container",
"=",
"container",
",",
"filters",
"=",
"filters",
",",
"limit",
"=",
"filter_query_params",
".",
"limit",
"or",
"DEFAULT_LIMIT",
",",
"offset",
"=",
"filter_query_params",
".",
"offset",
",",
"order_by",
"=",
"order_by",
",",
"user_id",
"=",
"user",
".",
"id",
",",
")",
"return",
"[",
"self",
".",
"serializer_by_type",
"[",
"content",
".",
"history_content_type",
"]",
".",
"serialize_to_view",
"(",
"content",
",",
"user",
"=",
"user",
",",
"trans",
"=",
"trans",
",",
"view",
"=",
"view",
")",
"for",
"content",
"in",
"contents",
"]"
] |
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/webapps/galaxy/services/datasets.py#L195-L224
|
|
subuser-security/subuser
|
8072271f8fc3dded60b048c2dee878f9840c126a
|
subuserlib/resolve.py
|
python
|
lookupRepositoryByPath
|
(user,path)
|
return None
|
If a repository with this path exists, return that repository. Otherwise, return None.
|
If a repository with this path exists, return that repository. Otherwise, return None.
|
[
"If",
"a",
"repository",
"with",
"this",
"path",
"exists",
"return",
"that",
"repository",
".",
"Otherwise",
"return",
"None",
"."
] |
def lookupRepositoryByPath(user,path):
"""
If a repository with this path exists, return that repository. Otherwise, return None.
"""
for _,repository in user.registry.repositories.items():
if repository.isLocal and path == repository.repoPath:
return repository
return None
|
[
"def",
"lookupRepositoryByPath",
"(",
"user",
",",
"path",
")",
":",
"for",
"_",
",",
"repository",
"in",
"user",
".",
"registry",
".",
"repositories",
".",
"items",
"(",
")",
":",
"if",
"repository",
".",
"isLocal",
"and",
"path",
"==",
"repository",
".",
"repoPath",
":",
"return",
"repository",
"return",
"None"
] |
https://github.com/subuser-security/subuser/blob/8072271f8fc3dded60b048c2dee878f9840c126a/subuserlib/resolve.py#L116-L123
|
|
openstack/horizon
|
12bb9fe5184c9dd3329ba17b3d03c90887dbcc3d
|
horizon/tabs/views.py
|
python
|
TabView.get_tabs
|
(self, request, **kwargs)
|
return self._tab_group
|
Returns the initialized tab group for this view.
|
Returns the initialized tab group for this view.
|
[
"Returns",
"the",
"initialized",
"tab",
"group",
"for",
"this",
"view",
"."
] |
def get_tabs(self, request, **kwargs):
"""Returns the initialized tab group for this view."""
if self._tab_group is None:
self._tab_group = self.tab_group_class(request, **kwargs)
return self._tab_group
|
[
"def",
"get_tabs",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_tab_group",
"is",
"None",
":",
"self",
".",
"_tab_group",
"=",
"self",
".",
"tab_group_class",
"(",
"request",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_tab_group"
] |
https://github.com/openstack/horizon/blob/12bb9fe5184c9dd3329ba17b3d03c90887dbcc3d/horizon/tabs/views.py#L40-L44
|
|
boredbird/woe
|
335e9ec2a521d3bbccb0ad5d915128119e4d0ca6
|
woe/feature_process.py
|
python
|
proc_woe_discrete
|
(df,var,global_bt,global_gt,min_sample,alpha=0.01)
|
return civ
|
process woe transformation of discrete variables
:param df:
:param var:
:param global_bt:
:param global_gt:
:param min_sample:
:return:
|
process woe transformation of discrete variables
:param df:
:param var:
:param global_bt:
:param global_gt:
:param min_sample:
:return:
|
[
"process",
"woe",
"transformation",
"of",
"discrete",
"variables",
":",
"param",
"df",
":",
":",
"param",
"var",
":",
":",
"param",
"global_bt",
":",
":",
"param",
"global_gt",
":",
":",
"param",
"min_sample",
":",
":",
"return",
":"
] |
def proc_woe_discrete(df,var,global_bt,global_gt,min_sample,alpha=0.01):
'''
process woe transformation of discrete variables
:param df:
:param var:
:param global_bt:
:param global_gt:
:param min_sample:
:return:
'''
s = 'process discrete variable:'+str(var)
print(s.center(60, '-'))
df = df[[var,'target']]
div = DisInfoValue()
div.var_name = var
rdict = {}
cpvar = df[var]
# print('np.unique(df[var]):',np.unique(df[var]))
for var_value in np.unique(df[var]):
# Here come with a '==',in case type error you must do Nan filling process firstly
df_temp = df[df[var] == var_value]
gd = calulate_iv(df_temp,var,global_bt,global_gt)
woei, ivi = gd['woei'],gd['ivi']
div.origin_value.append(var_value)
div.woe_before.append(woei)
rdict[var_value] = woei
# print(var_value,woei,ivi)
cpvar = cpvar.map(rdict)
df[var] = cpvar
iv_tree = binning_data_split(df,var,global_bt,global_gt,min_sample,alpha)
# Traversal tree, get the segmentation point
split_list = []
search(iv_tree, split_list)
split_list = list(np.unique([1.0 * x for x in split_list if x is not None]))
split_list.sort()
# Segmentation point checking and processing
split_list = check_point(df, var, split_list, min_sample)
split_list.sort()
civ = format_iv_split(df, var, split_list,global_bt,global_gt)
civ.is_discrete = 1
split_list_temp = []
split_list_temp.append(float("-inf"))
split_list_temp.extend([i for i in split_list])
split_list_temp.append(float("inf"))
a = []
for i in range(split_list_temp.__len__() - 1):
temp = []
for j in range(div.origin_value.__len__()):
if (div.woe_before[j]>split_list_temp[i]) & (div.woe_before[j]<=split_list_temp[i+1]):
temp.append(div.origin_value[j])
if temp != [] :
a.append(temp)
civ.split_list = a
return civ
|
[
"def",
"proc_woe_discrete",
"(",
"df",
",",
"var",
",",
"global_bt",
",",
"global_gt",
",",
"min_sample",
",",
"alpha",
"=",
"0.01",
")",
":",
"s",
"=",
"'process discrete variable:'",
"+",
"str",
"(",
"var",
")",
"print",
"(",
"s",
".",
"center",
"(",
"60",
",",
"'-'",
")",
")",
"df",
"=",
"df",
"[",
"[",
"var",
",",
"'target'",
"]",
"]",
"div",
"=",
"DisInfoValue",
"(",
")",
"div",
".",
"var_name",
"=",
"var",
"rdict",
"=",
"{",
"}",
"cpvar",
"=",
"df",
"[",
"var",
"]",
"# print('np.unique(df[var]):',np.unique(df[var]))",
"for",
"var_value",
"in",
"np",
".",
"unique",
"(",
"df",
"[",
"var",
"]",
")",
":",
"# Here come with a '==',in case type error you must do Nan filling process firstly",
"df_temp",
"=",
"df",
"[",
"df",
"[",
"var",
"]",
"==",
"var_value",
"]",
"gd",
"=",
"calulate_iv",
"(",
"df_temp",
",",
"var",
",",
"global_bt",
",",
"global_gt",
")",
"woei",
",",
"ivi",
"=",
"gd",
"[",
"'woei'",
"]",
",",
"gd",
"[",
"'ivi'",
"]",
"div",
".",
"origin_value",
".",
"append",
"(",
"var_value",
")",
"div",
".",
"woe_before",
".",
"append",
"(",
"woei",
")",
"rdict",
"[",
"var_value",
"]",
"=",
"woei",
"# print(var_value,woei,ivi)",
"cpvar",
"=",
"cpvar",
".",
"map",
"(",
"rdict",
")",
"df",
"[",
"var",
"]",
"=",
"cpvar",
"iv_tree",
"=",
"binning_data_split",
"(",
"df",
",",
"var",
",",
"global_bt",
",",
"global_gt",
",",
"min_sample",
",",
"alpha",
")",
"# Traversal tree, get the segmentation point",
"split_list",
"=",
"[",
"]",
"search",
"(",
"iv_tree",
",",
"split_list",
")",
"split_list",
"=",
"list",
"(",
"np",
".",
"unique",
"(",
"[",
"1.0",
"*",
"x",
"for",
"x",
"in",
"split_list",
"if",
"x",
"is",
"not",
"None",
"]",
")",
")",
"split_list",
".",
"sort",
"(",
")",
"# Segmentation point checking and processing",
"split_list",
"=",
"check_point",
"(",
"df",
",",
"var",
",",
"split_list",
",",
"min_sample",
")",
"split_list",
".",
"sort",
"(",
")",
"civ",
"=",
"format_iv_split",
"(",
"df",
",",
"var",
",",
"split_list",
",",
"global_bt",
",",
"global_gt",
")",
"civ",
".",
"is_discrete",
"=",
"1",
"split_list_temp",
"=",
"[",
"]",
"split_list_temp",
".",
"append",
"(",
"float",
"(",
"\"-inf\"",
")",
")",
"split_list_temp",
".",
"extend",
"(",
"[",
"i",
"for",
"i",
"in",
"split_list",
"]",
")",
"split_list_temp",
".",
"append",
"(",
"float",
"(",
"\"inf\"",
")",
")",
"a",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"split_list_temp",
".",
"__len__",
"(",
")",
"-",
"1",
")",
":",
"temp",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"div",
".",
"origin_value",
".",
"__len__",
"(",
")",
")",
":",
"if",
"(",
"div",
".",
"woe_before",
"[",
"j",
"]",
">",
"split_list_temp",
"[",
"i",
"]",
")",
"&",
"(",
"div",
".",
"woe_before",
"[",
"j",
"]",
"<=",
"split_list_temp",
"[",
"i",
"+",
"1",
"]",
")",
":",
"temp",
".",
"append",
"(",
"div",
".",
"origin_value",
"[",
"j",
"]",
")",
"if",
"temp",
"!=",
"[",
"]",
":",
"a",
".",
"append",
"(",
"temp",
")",
"civ",
".",
"split_list",
"=",
"a",
"return",
"civ"
] |
https://github.com/boredbird/woe/blob/335e9ec2a521d3bbccb0ad5d915128119e4d0ca6/woe/feature_process.py#L384-L448
|
|
eBay/accelerator
|
218d9a5e4451ac72b9e65df6c5b32e37d25136c8
|
accelerator/job.py
|
python
|
CurrentJob.input_filename
|
(self, filename)
|
return os.path.join(self.input_directory, filename)
|
[] |
def input_filename(self, filename):
return os.path.join(self.input_directory, filename)
|
[
"def",
"input_filename",
"(",
"self",
",",
"filename",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"input_directory",
",",
"filename",
")"
] |
https://github.com/eBay/accelerator/blob/218d9a5e4451ac72b9e65df6c5b32e37d25136c8/accelerator/job.py#L256-L257
|
|||
isl-org/MultiObjectiveOptimization
|
d45eb262ec61c0dafecebfb69027ff6de280dbb3
|
multi_task/min_norm_solvers.py
|
python
|
MinNormSolver.find_min_norm_element
|
(vecs)
|
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
|
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
|
[
"Given",
"a",
"list",
"of",
"vectors",
"(",
"vecs",
")",
"this",
"method",
"finds",
"the",
"minimum",
"norm",
"element",
"in",
"the",
"convex",
"hull",
"as",
"min",
"|u|_2",
"st",
".",
"u",
"=",
"\\",
"sum",
"c_i",
"vecs",
"[",
"i",
"]",
"and",
"\\",
"sum",
"c_i",
"=",
"1",
".",
"It",
"is",
"quite",
"geometric",
"and",
"the",
"main",
"idea",
"is",
"the",
"fact",
"that",
"if",
"d_",
"{",
"ij",
"}",
"=",
"min",
"|u|_2",
"st",
"u",
"=",
"c",
"x_i",
"+",
"(",
"1",
"-",
"c",
")",
"x_j",
";",
"the",
"solution",
"lies",
"in",
"(",
"0",
"d_",
"{",
"i",
"j",
"}",
")",
"Hence",
"we",
"find",
"the",
"best",
"2",
"-",
"task",
"solution",
"and",
"then",
"run",
"the",
"projected",
"gradient",
"descent",
"until",
"convergence"
] |
def find_min_norm_element(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n=len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec , init_sol[2]
iter_count = 0
grad_mat = np.zeros((n,n))
for i in range(n):
for j in range(n):
grad_mat[i,j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
grad_dir = -1.0*np.dot(grad_mat, sol_vec)
new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i]*sol_vec[j]*dps[(i,j)]
v1v2 += sol_vec[i]*new_point[j]*dps[(i,j)]
v2v2 += new_point[i]*new_point[j]*dps[(i,j)]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc*sol_vec + (1-nc)*new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
|
[
"def",
"find_min_norm_element",
"(",
"vecs",
")",
":",
"# Solution lying at the combination of two points",
"dps",
"=",
"{",
"}",
"init_sol",
",",
"dps",
"=",
"MinNormSolver",
".",
"_min_norm_2d",
"(",
"vecs",
",",
"dps",
")",
"n",
"=",
"len",
"(",
"vecs",
")",
"sol_vec",
"=",
"np",
".",
"zeros",
"(",
"n",
")",
"sol_vec",
"[",
"init_sol",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"=",
"init_sol",
"[",
"1",
"]",
"sol_vec",
"[",
"init_sol",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
"=",
"1",
"-",
"init_sol",
"[",
"1",
"]",
"if",
"n",
"<",
"3",
":",
"# This is optimal for n=2, so return the solution",
"return",
"sol_vec",
",",
"init_sol",
"[",
"2",
"]",
"iter_count",
"=",
"0",
"grad_mat",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"grad_mat",
"[",
"i",
",",
"j",
"]",
"=",
"dps",
"[",
"(",
"i",
",",
"j",
")",
"]",
"while",
"iter_count",
"<",
"MinNormSolver",
".",
"MAX_ITER",
":",
"grad_dir",
"=",
"-",
"1.0",
"*",
"np",
".",
"dot",
"(",
"grad_mat",
",",
"sol_vec",
")",
"new_point",
"=",
"MinNormSolver",
".",
"_next_point",
"(",
"sol_vec",
",",
"grad_dir",
",",
"n",
")",
"# Re-compute the inner products for line search",
"v1v1",
"=",
"0.0",
"v1v2",
"=",
"0.0",
"v2v2",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"for",
"j",
"in",
"range",
"(",
"n",
")",
":",
"v1v1",
"+=",
"sol_vec",
"[",
"i",
"]",
"*",
"sol_vec",
"[",
"j",
"]",
"*",
"dps",
"[",
"(",
"i",
",",
"j",
")",
"]",
"v1v2",
"+=",
"sol_vec",
"[",
"i",
"]",
"*",
"new_point",
"[",
"j",
"]",
"*",
"dps",
"[",
"(",
"i",
",",
"j",
")",
"]",
"v2v2",
"+=",
"new_point",
"[",
"i",
"]",
"*",
"new_point",
"[",
"j",
"]",
"*",
"dps",
"[",
"(",
"i",
",",
"j",
")",
"]",
"nc",
",",
"nd",
"=",
"MinNormSolver",
".",
"_min_norm_element_from2",
"(",
"v1v1",
",",
"v1v2",
",",
"v2v2",
")",
"new_sol_vec",
"=",
"nc",
"*",
"sol_vec",
"+",
"(",
"1",
"-",
"nc",
")",
"*",
"new_point",
"change",
"=",
"new_sol_vec",
"-",
"sol_vec",
"if",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"change",
")",
")",
"<",
"MinNormSolver",
".",
"STOP_CRIT",
":",
"return",
"sol_vec",
",",
"nd",
"sol_vec",
"=",
"new_sol_vec"
] |
https://github.com/isl-org/MultiObjectiveOptimization/blob/d45eb262ec61c0dafecebfb69027ff6de280dbb3/multi_task/min_norm_solvers.py#L92-L137
|
||
OpenMined/SyferText
|
1e9a6c1fbbe31d1b20e852242bf2f9ab9bcc1ce6
|
src/syfertext/data/units/text_doc.py
|
python
|
TextDoc.__len__
|
(self)
|
return len(self.token_metas)
|
Return the number of tokens in the Doc.
|
Return the number of tokens in the Doc.
|
[
"Return",
"the",
"number",
"of",
"tokens",
"in",
"the",
"Doc",
"."
] |
def __len__(self):
"""Return the number of tokens in the Doc."""
return len(self.token_metas)
|
[
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"token_metas",
")"
] |
https://github.com/OpenMined/SyferText/blob/1e9a6c1fbbe31d1b20e852242bf2f9ab9bcc1ce6/src/syfertext/data/units/text_doc.py#L59-L61
|
|
opps/opps
|
fdc557a36ad0bca4e4ad339a6814f457c65e58c7
|
opps/core/filters.py
|
python
|
ChannelListFilter._get_descendant_count
|
(self, item, channel_list)
|
return len(children)
|
Search item occurrences on channel_list
|
Search item occurrences on channel_list
|
[
"Search",
"item",
"occurrences",
"on",
"channel_list"
] |
def _get_descendant_count(self, item, channel_list):
"""
Search item occurrences on channel_list
"""
children = []
item_set = set(item.split('/'))
for channel in channel_list:
splt = set(channel.split('/'))
if item != channel and item_set.issubset(splt):
children.append(channel)
return len(children)
|
[
"def",
"_get_descendant_count",
"(",
"self",
",",
"item",
",",
"channel_list",
")",
":",
"children",
"=",
"[",
"]",
"item_set",
"=",
"set",
"(",
"item",
".",
"split",
"(",
"'/'",
")",
")",
"for",
"channel",
"in",
"channel_list",
":",
"splt",
"=",
"set",
"(",
"channel",
".",
"split",
"(",
"'/'",
")",
")",
"if",
"item",
"!=",
"channel",
"and",
"item_set",
".",
"issubset",
"(",
"splt",
")",
":",
"children",
".",
"append",
"(",
"channel",
")",
"return",
"len",
"(",
"children",
")"
] |
https://github.com/opps/opps/blob/fdc557a36ad0bca4e4ad339a6814f457c65e58c7/opps/core/filters.py#L55-L65
|
|
numba/numba
|
bf480b9e0da858a65508c2b17759a72ee6a44c51
|
numba/core/interpreter.py
|
python
|
Interpreter.op_PRINT_ITEM
|
(self, inst, item, printvar, res)
|
[] |
def op_PRINT_ITEM(self, inst, item, printvar, res):
item = self.get(item)
printgv = ir.Global("print", print, loc=self.loc)
self.store(value=printgv, name=printvar)
call = ir.Expr.call(self.get(printvar), (item,), (), loc=self.loc)
self.store(value=call, name=res)
|
[
"def",
"op_PRINT_ITEM",
"(",
"self",
",",
"inst",
",",
"item",
",",
"printvar",
",",
"res",
")",
":",
"item",
"=",
"self",
".",
"get",
"(",
"item",
")",
"printgv",
"=",
"ir",
".",
"Global",
"(",
"\"print\"",
",",
"print",
",",
"loc",
"=",
"self",
".",
"loc",
")",
"self",
".",
"store",
"(",
"value",
"=",
"printgv",
",",
"name",
"=",
"printvar",
")",
"call",
"=",
"ir",
".",
"Expr",
".",
"call",
"(",
"self",
".",
"get",
"(",
"printvar",
")",
",",
"(",
"item",
",",
")",
",",
"(",
")",
",",
"loc",
"=",
"self",
".",
"loc",
")",
"self",
".",
"store",
"(",
"value",
"=",
"call",
",",
"name",
"=",
"res",
")"
] |
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/core/interpreter.py#L742-L747
|
||||
JaniceWuo/MovieRecommend
|
4c86db64ca45598917d304f535413df3bc9fea65
|
movierecommend/venv1/Lib/site-packages/django/contrib/gis/utils/layermapping.py
|
python
|
LayerMapping.verify_geom
|
(self, geom, model_field)
|
return g.wkt
|
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
|
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
|
[
"Verifies",
"the",
"geometry",
"--",
"will",
"construct",
"and",
"return",
"a",
"GeometryCollection",
"if",
"necessary",
"(",
"for",
"example",
"if",
"the",
"model",
"field",
"is",
"MultiPolygonField",
"while",
"the",
"mapped",
"shapefile",
"only",
"contains",
"Polygons",
")",
"."
] |
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
|
[
"def",
"verify_geom",
"(",
"self",
",",
"geom",
",",
"model_field",
")",
":",
"# Downgrade a 3D geom to a 2D one, if necessary.",
"if",
"self",
".",
"coord_dim",
"!=",
"geom",
".",
"coord_dim",
":",
"geom",
".",
"coord_dim",
"=",
"self",
".",
"coord_dim",
"if",
"self",
".",
"make_multi",
"(",
"geom",
".",
"geom_type",
",",
"model_field",
")",
":",
"# Constructing a multi-geometry type to contain the single geometry",
"multi_type",
"=",
"self",
".",
"MULTI_TYPES",
"[",
"geom",
".",
"geom_type",
".",
"num",
"]",
"g",
"=",
"OGRGeometry",
"(",
"multi_type",
")",
"g",
".",
"add",
"(",
"geom",
")",
"else",
":",
"g",
"=",
"geom",
"# Transforming the geometry with our Coordinate Transformation object,",
"# but only if the class variable `transform` is set w/a CoordTransform",
"# object.",
"if",
"self",
".",
"transform",
":",
"g",
".",
"transform",
"(",
"self",
".",
"transform",
")",
"# Returning the WKT of the geometry.",
"return",
"g",
".",
"wkt"
] |
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/contrib/gis/utils/layermapping.py#L422-L447
|
|
tensorwerk/hangar-py
|
a6deb22854a6c9e9709011b91c1c0eeda7f47bb0
|
src/hangar/records/commiting.py
|
python
|
number_commits_recorded
|
(refenv)
|
return len(list_all_commits(refenv))
|
Returns the total number of commits made across all history.
|
Returns the total number of commits made across all history.
|
[
"Returns",
"the",
"total",
"number",
"of",
"commits",
"made",
"across",
"all",
"history",
"."
] |
def number_commits_recorded(refenv) -> int:
"""Returns the total number of commits made across all history.
"""
return len(list_all_commits(refenv))
|
[
"def",
"number_commits_recorded",
"(",
"refenv",
")",
"->",
"int",
":",
"return",
"len",
"(",
"list_all_commits",
"(",
"refenv",
")",
")"
] |
https://github.com/tensorwerk/hangar-py/blob/a6deb22854a6c9e9709011b91c1c0eeda7f47bb0/src/hangar/records/commiting.py#L700-L703
|
|
pypa/pipenv
|
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
|
pipenv/patched/notpip/_vendor/distlib/database.py
|
python
|
_Cache.__init__
|
(self)
|
Initialise an instance. There is normally one for each DistributionPath.
|
Initialise an instance. There is normally one for each DistributionPath.
|
[
"Initialise",
"an",
"instance",
".",
"There",
"is",
"normally",
"one",
"for",
"each",
"DistributionPath",
"."
] |
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"name",
"=",
"{",
"}",
"self",
".",
"path",
"=",
"{",
"}",
"self",
".",
"generated",
"=",
"False"
] |
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_vendor/distlib/database.py#L49-L55
|
||
isce-framework/isce2
|
0e5114a8bede3caf1d533d98e44dfe4b983e3f48
|
components/isceobj/TopsProc/runDenseOffsets.py
|
python
|
runDenseOffsetsCPU
|
(self)
|
Estimate dense offset field between merged reference bursts and secondary bursts.
|
Estimate dense offset field between merged reference bursts and secondary bursts.
|
[
"Estimate",
"dense",
"offset",
"field",
"between",
"merged",
"reference",
"bursts",
"and",
"secondary",
"bursts",
"."
] |
def runDenseOffsetsCPU(self):
'''
Estimate dense offset field between merged reference bursts and secondary bursts.
'''
from mroipac.ampcor.DenseAmpcor import DenseAmpcor
os.environ['VRT_SHARED_SOURCE'] = "0"
print('\n============================================================')
print('Configuring DenseAmpcor object for processing...\n')
### Determine appropriate filenames
mf = 'reference.slc'
sf = 'secondary.slc'
if not ((self.numberRangeLooks == 1) and (self.numberAzimuthLooks==1)):
mf += '.full'
sf += '.full'
reference = os.path.join(self._insar.mergedDirname, mf)
secondary = os.path.join(self._insar.mergedDirname, sf)
####For this module currently, we need to create an actual file on disk
for infile in [reference,secondary]:
if os.path.isfile(infile):
continue
cmd = 'gdal_translate -of ENVI {0}.vrt {0}'.format(infile)
status = os.system(cmd)
if status:
raise Exception('{0} could not be executed'.format(status))
### Load the reference object
m = isceobj.createSlcImage()
m.load(reference + '.xml')
m.setAccessMode('READ')
# m.createImage()
### Load the secondary object
s = isceobj.createSlcImage()
s.load(secondary + '.xml')
s.setAccessMode('READ')
# s.createImage()
width = m.getWidth()
length = m.getLength()
objOffset = DenseAmpcor(name='dense')
objOffset.configure()
# objOffset.numberThreads = 1
### Configure dense Ampcor object
print('\nReference frame: %s' % (mf))
print('Secondary frame: %s' % (sf))
print('Main window size width: %d' % (self.winwidth))
print('Main window size height: %d' % (self.winhgt))
print('Search window size width: %d' % (self.srcwidth))
print('Search window size height: %d' % (self.srchgt))
print('Skip sample across: %d' % (self.skipwidth))
print('Skip sample down: %d' % (self.skiphgt))
print('Field margin: %d' % (self.margin))
print('Oversampling factor: %d' % (self.oversample))
print('Gross offset across: %d' % (self.rgshift))
print('Gross offset down: %d\n' % (self.azshift))
objOffset.setWindowSizeWidth(self.winwidth)
objOffset.setWindowSizeHeight(self.winhgt)
objOffset.setSearchWindowSizeWidth(self.srcwidth)
objOffset.setSearchWindowSizeHeight(self.srchgt)
objOffset.skipSampleAcross = self.skipwidth
objOffset.skipSampleDown = self.skiphgt
objOffset.oversamplingFactor = self.oversample
objOffset.setAcrossGrossOffset(self.rgshift)
objOffset.setDownGrossOffset(self.azshift)
objOffset.setFirstPRF(1.0)
objOffset.setSecondPRF(1.0)
if m.dataType.startswith('C'):
objOffset.setImageDataType1('mag')
else:
objOffset.setImageDataType1('real')
if s.dataType.startswith('C'):
objOffset.setImageDataType2('mag')
else:
objOffset.setImageDataType2('real')
objOffset.offsetImageName = os.path.join(self._insar.mergedDirname, self._insar.offsetfile)
objOffset.snrImageName = os.path.join(self._insar.mergedDirname, self._insar.snrfile)
objOffset.covImageName = os.path.join(self._insar.mergedDirname, self._insar.covfile)
print('Output dense offsets file name: %s' % (objOffset.offsetImageName))
print('Output SNR file name: %s' % (objOffset.snrImageName))
print('Output covariance file name: %s' % (objOffset.covImageName))
print('\n======================================')
print('Running dense ampcor...')
print('======================================\n')
objOffset.denseampcor(m, s) ### Where the magic happens...
### Store params for later
self._insar.offset_width = objOffset.offsetCols
self._insar.offset_length = objOffset.offsetLines
self._insar.offset_top = objOffset.locationDown[0][0]
self._insar.offset_left = objOffset.locationAcross[0][0]
|
[
"def",
"runDenseOffsetsCPU",
"(",
"self",
")",
":",
"from",
"mroipac",
".",
"ampcor",
".",
"DenseAmpcor",
"import",
"DenseAmpcor",
"os",
".",
"environ",
"[",
"'VRT_SHARED_SOURCE'",
"]",
"=",
"\"0\"",
"print",
"(",
"'\\n============================================================'",
")",
"print",
"(",
"'Configuring DenseAmpcor object for processing...\\n'",
")",
"### Determine appropriate filenames",
"mf",
"=",
"'reference.slc'",
"sf",
"=",
"'secondary.slc'",
"if",
"not",
"(",
"(",
"self",
".",
"numberRangeLooks",
"==",
"1",
")",
"and",
"(",
"self",
".",
"numberAzimuthLooks",
"==",
"1",
")",
")",
":",
"mf",
"+=",
"'.full'",
"sf",
"+=",
"'.full'",
"reference",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_insar",
".",
"mergedDirname",
",",
"mf",
")",
"secondary",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_insar",
".",
"mergedDirname",
",",
"sf",
")",
"####For this module currently, we need to create an actual file on disk",
"for",
"infile",
"in",
"[",
"reference",
",",
"secondary",
"]",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"infile",
")",
":",
"continue",
"cmd",
"=",
"'gdal_translate -of ENVI {0}.vrt {0}'",
".",
"format",
"(",
"infile",
")",
"status",
"=",
"os",
".",
"system",
"(",
"cmd",
")",
"if",
"status",
":",
"raise",
"Exception",
"(",
"'{0} could not be executed'",
".",
"format",
"(",
"status",
")",
")",
"### Load the reference object",
"m",
"=",
"isceobj",
".",
"createSlcImage",
"(",
")",
"m",
".",
"load",
"(",
"reference",
"+",
"'.xml'",
")",
"m",
".",
"setAccessMode",
"(",
"'READ'",
")",
"# m.createImage()",
"### Load the secondary object",
"s",
"=",
"isceobj",
".",
"createSlcImage",
"(",
")",
"s",
".",
"load",
"(",
"secondary",
"+",
"'.xml'",
")",
"s",
".",
"setAccessMode",
"(",
"'READ'",
")",
"# s.createImage()",
"width",
"=",
"m",
".",
"getWidth",
"(",
")",
"length",
"=",
"m",
".",
"getLength",
"(",
")",
"objOffset",
"=",
"DenseAmpcor",
"(",
"name",
"=",
"'dense'",
")",
"objOffset",
".",
"configure",
"(",
")",
"# objOffset.numberThreads = 1",
"### Configure dense Ampcor object",
"print",
"(",
"'\\nReference frame: %s'",
"%",
"(",
"mf",
")",
")",
"print",
"(",
"'Secondary frame: %s'",
"%",
"(",
"sf",
")",
")",
"print",
"(",
"'Main window size width: %d'",
"%",
"(",
"self",
".",
"winwidth",
")",
")",
"print",
"(",
"'Main window size height: %d'",
"%",
"(",
"self",
".",
"winhgt",
")",
")",
"print",
"(",
"'Search window size width: %d'",
"%",
"(",
"self",
".",
"srcwidth",
")",
")",
"print",
"(",
"'Search window size height: %d'",
"%",
"(",
"self",
".",
"srchgt",
")",
")",
"print",
"(",
"'Skip sample across: %d'",
"%",
"(",
"self",
".",
"skipwidth",
")",
")",
"print",
"(",
"'Skip sample down: %d'",
"%",
"(",
"self",
".",
"skiphgt",
")",
")",
"print",
"(",
"'Field margin: %d'",
"%",
"(",
"self",
".",
"margin",
")",
")",
"print",
"(",
"'Oversampling factor: %d'",
"%",
"(",
"self",
".",
"oversample",
")",
")",
"print",
"(",
"'Gross offset across: %d'",
"%",
"(",
"self",
".",
"rgshift",
")",
")",
"print",
"(",
"'Gross offset down: %d\\n'",
"%",
"(",
"self",
".",
"azshift",
")",
")",
"objOffset",
".",
"setWindowSizeWidth",
"(",
"self",
".",
"winwidth",
")",
"objOffset",
".",
"setWindowSizeHeight",
"(",
"self",
".",
"winhgt",
")",
"objOffset",
".",
"setSearchWindowSizeWidth",
"(",
"self",
".",
"srcwidth",
")",
"objOffset",
".",
"setSearchWindowSizeHeight",
"(",
"self",
".",
"srchgt",
")",
"objOffset",
".",
"skipSampleAcross",
"=",
"self",
".",
"skipwidth",
"objOffset",
".",
"skipSampleDown",
"=",
"self",
".",
"skiphgt",
"objOffset",
".",
"oversamplingFactor",
"=",
"self",
".",
"oversample",
"objOffset",
".",
"setAcrossGrossOffset",
"(",
"self",
".",
"rgshift",
")",
"objOffset",
".",
"setDownGrossOffset",
"(",
"self",
".",
"azshift",
")",
"objOffset",
".",
"setFirstPRF",
"(",
"1.0",
")",
"objOffset",
".",
"setSecondPRF",
"(",
"1.0",
")",
"if",
"m",
".",
"dataType",
".",
"startswith",
"(",
"'C'",
")",
":",
"objOffset",
".",
"setImageDataType1",
"(",
"'mag'",
")",
"else",
":",
"objOffset",
".",
"setImageDataType1",
"(",
"'real'",
")",
"if",
"s",
".",
"dataType",
".",
"startswith",
"(",
"'C'",
")",
":",
"objOffset",
".",
"setImageDataType2",
"(",
"'mag'",
")",
"else",
":",
"objOffset",
".",
"setImageDataType2",
"(",
"'real'",
")",
"objOffset",
".",
"offsetImageName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_insar",
".",
"mergedDirname",
",",
"self",
".",
"_insar",
".",
"offsetfile",
")",
"objOffset",
".",
"snrImageName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_insar",
".",
"mergedDirname",
",",
"self",
".",
"_insar",
".",
"snrfile",
")",
"objOffset",
".",
"covImageName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_insar",
".",
"mergedDirname",
",",
"self",
".",
"_insar",
".",
"covfile",
")",
"print",
"(",
"'Output dense offsets file name: %s'",
"%",
"(",
"objOffset",
".",
"offsetImageName",
")",
")",
"print",
"(",
"'Output SNR file name: %s'",
"%",
"(",
"objOffset",
".",
"snrImageName",
")",
")",
"print",
"(",
"'Output covariance file name: %s'",
"%",
"(",
"objOffset",
".",
"covImageName",
")",
")",
"print",
"(",
"'\\n======================================'",
")",
"print",
"(",
"'Running dense ampcor...'",
")",
"print",
"(",
"'======================================\\n'",
")",
"objOffset",
".",
"denseampcor",
"(",
"m",
",",
"s",
")",
"### Where the magic happens...",
"### Store params for later",
"self",
".",
"_insar",
".",
"offset_width",
"=",
"objOffset",
".",
"offsetCols",
"self",
".",
"_insar",
".",
"offset_length",
"=",
"objOffset",
".",
"offsetLines",
"self",
".",
"_insar",
".",
"offset_top",
"=",
"objOffset",
".",
"locationDown",
"[",
"0",
"]",
"[",
"0",
"]",
"self",
".",
"_insar",
".",
"offset_left",
"=",
"objOffset",
".",
"locationAcross",
"[",
"0",
"]",
"[",
"0",
"]"
] |
https://github.com/isce-framework/isce2/blob/0e5114a8bede3caf1d533d98e44dfe4b983e3f48/components/isceobj/TopsProc/runDenseOffsets.py#L33-L136
|
||
pyvista/pyvista
|
012dbb95a9aae406c3cd4cd94fc8c477f871e426
|
pyvista/examples/downloads.py
|
python
|
download_crater_topo
|
(load=True)
|
return _download_and_read('Ruapehu_mag_dem_15m_NZTM.vtk', load=load)
|
Download crater dataset.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.UniformGrid or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_crater_topo()
>>> dataset.plot(cmap="gist_earth", cpos="xy")
This dataset is used in the following examples:
* :ref:`terrain_following_mesh_example`
* :ref:`ref_topo_map_example`
|
Download crater dataset.
|
[
"Download",
"crater",
"dataset",
"."
] |
def download_crater_topo(load=True): # pragma: no cover
"""Download crater dataset.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.UniformGrid or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_crater_topo()
>>> dataset.plot(cmap="gist_earth", cpos="xy")
This dataset is used in the following examples:
* :ref:`terrain_following_mesh_example`
* :ref:`ref_topo_map_example`
"""
return _download_and_read('Ruapehu_mag_dem_15m_NZTM.vtk', load=load)
|
[
"def",
"download_crater_topo",
"(",
"load",
"=",
"True",
")",
":",
"# pragma: no cover",
"return",
"_download_and_read",
"(",
"'Ruapehu_mag_dem_15m_NZTM.vtk'",
",",
"load",
"=",
"load",
")"
] |
https://github.com/pyvista/pyvista/blob/012dbb95a9aae406c3cd4cd94fc8c477f871e426/pyvista/examples/downloads.py#L2479-L2505
|
|
nschaetti/EchoTorch
|
cba209c49e0fda73172d2e853b85c747f9f5117e
|
echotorch/base_tensors.py
|
python
|
BaseTensor.__getattr__
|
(self, item)
|
r"""Override attribute getter and redirect unknown attributes to wrapper tensor.
|
r"""Override attribute getter and redirect unknown attributes to wrapper tensor.
|
[
"r",
"Override",
"attribute",
"getter",
"and",
"redirect",
"unknown",
"attributes",
"to",
"wrapper",
"tensor",
"."
] |
def __getattr__(self, item):
r"""Override attribute getter and redirect unknown attributes to wrapper tensor.
"""
if hasattr(self._tensor, item):
return getattr(self._tensor, item)
else:
raise AttributeError(
"AttributeError: Neither '{}' object nor its wrapped "
"tensor has no attribute '{}'".format(self.__class__.__name__, item)
)
|
[
"def",
"__getattr__",
"(",
"self",
",",
"item",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"_tensor",
",",
"item",
")",
":",
"return",
"getattr",
"(",
"self",
".",
"_tensor",
",",
"item",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"\"AttributeError: Neither '{}' object nor its wrapped \"",
"\"tensor has no attribute '{}'\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"item",
")",
")"
] |
https://github.com/nschaetti/EchoTorch/blob/cba209c49e0fda73172d2e853b85c747f9f5117e/echotorch/base_tensors.py#L310-L319
|
||
JaniceWuo/MovieRecommend
|
4c86db64ca45598917d304f535413df3bc9fea65
|
movierecommend/venv1/Lib/site-packages/django/db/models/fields/__init__.py
|
python
|
CommaSeparatedIntegerField.formfield
|
(self, **kwargs)
|
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
|
[] |
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
|
[
"def",
"formfield",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"defaults",
"=",
"{",
"'error_messages'",
":",
"{",
"'invalid'",
":",
"_",
"(",
"'Enter only digits separated by commas.'",
")",
",",
"}",
"}",
"defaults",
".",
"update",
"(",
"kwargs",
")",
"return",
"super",
"(",
"CommaSeparatedIntegerField",
",",
"self",
")",
".",
"formfield",
"(",
"*",
"*",
"defaults",
")"
] |
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/db/models/fields/__init__.py#L1128-L1135
|
|||
MDudek-ICS/TRISIS-TRITON-HATMAN
|
15a00af7fd1040f0430729d024427601f84886a1
|
decompiled_code/library/random.py
|
python
|
Random.randint
|
(self, a, b)
|
return self.randrange(a, b + 1)
|
Return random integer in range [a, b], including both end points.
|
Return random integer in range [a, b], including both end points.
|
[
"Return",
"random",
"integer",
"in",
"range",
"[",
"a",
"b",
"]",
"including",
"both",
"end",
"points",
"."
] |
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b + 1)
|
[
"def",
"randint",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"return",
"self",
".",
"randrange",
"(",
"a",
",",
"b",
"+",
"1",
")"
] |
https://github.com/MDudek-ICS/TRISIS-TRITON-HATMAN/blob/15a00af7fd1040f0430729d024427601f84886a1/decompiled_code/library/random.py#L201-L204
|
|
portante/pycscope
|
d991da9d45c6d0a4c6617c267da238a5f1bd2bdf
|
pycscope/__init__.py
|
python
|
replaceNodeType
|
(treeList)
|
return treeList
|
Replaces the 0th element in the list with the name
that corresponds to its node value.
|
Replaces the 0th element in the list with the name
that corresponds to its node value.
|
[
"Replaces",
"the",
"0th",
"element",
"in",
"the",
"list",
"with",
"the",
"name",
"that",
"corresponds",
"to",
"its",
"node",
"value",
"."
] |
def replaceNodeType(treeList):
""" Replaces the 0th element in the list with the name
that corresponds to its node value.
"""
global nodeNames
# Replace node num with name
treeList[0] = nodeNames[treeList[0]]
# Recurse
for i in range(1, len(treeList)):
if type(treeList[i]) == tuple:
treeList[i] = list(treeList[i])
if type(treeList[i]) == list:
replaceNodeType(treeList[i])
return treeList
|
[
"def",
"replaceNodeType",
"(",
"treeList",
")",
":",
"global",
"nodeNames",
"# Replace node num with name",
"treeList",
"[",
"0",
"]",
"=",
"nodeNames",
"[",
"treeList",
"[",
"0",
"]",
"]",
"# Recurse",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"treeList",
")",
")",
":",
"if",
"type",
"(",
"treeList",
"[",
"i",
"]",
")",
"==",
"tuple",
":",
"treeList",
"[",
"i",
"]",
"=",
"list",
"(",
"treeList",
"[",
"i",
"]",
")",
"if",
"type",
"(",
"treeList",
"[",
"i",
"]",
")",
"==",
"list",
":",
"replaceNodeType",
"(",
"treeList",
"[",
"i",
"]",
")",
"return",
"treeList"
] |
https://github.com/portante/pycscope/blob/d991da9d45c6d0a4c6617c267da238a5f1bd2bdf/pycscope/__init__.py#L276-L291
|
|
stephenmcd/gnotty
|
bea3762dc9cbc3cb21a5ae7224091cf027273c40
|
gnotty/bots/events.py
|
python
|
on
|
(event, *args, **kwargs)
|
return wrapper
|
Event method wrapper for bot mixins. When a bot is constructed,
its metaclass inspects all members of all base classes, and
looks for methods marked with an event attribute which is assigned
via this wrapper. It then stores all the methods in a dict
that maps event names to lists of these methods, which are each
called when the event occurs.
|
Event method wrapper for bot mixins. When a bot is constructed,
its metaclass inspects all members of all base classes, and
looks for methods marked with an event attribute which is assigned
via this wrapper. It then stores all the methods in a dict
that maps event names to lists of these methods, which are each
called when the event occurs.
|
[
"Event",
"method",
"wrapper",
"for",
"bot",
"mixins",
".",
"When",
"a",
"bot",
"is",
"constructed",
"its",
"metaclass",
"inspects",
"all",
"members",
"of",
"all",
"base",
"classes",
"and",
"looks",
"for",
"methods",
"marked",
"with",
"an",
"event",
"attribute",
"which",
"is",
"assigned",
"via",
"this",
"wrapper",
".",
"It",
"then",
"stores",
"all",
"the",
"methods",
"in",
"a",
"dict",
"that",
"maps",
"event",
"names",
"to",
"lists",
"of",
"these",
"methods",
"which",
"are",
"each",
"called",
"when",
"the",
"event",
"occurs",
"."
] |
def on(event, *args, **kwargs):
"""
Event method wrapper for bot mixins. When a bot is constructed,
its metaclass inspects all members of all base classes, and
looks for methods marked with an event attribute which is assigned
via this wrapper. It then stores all the methods in a dict
that maps event names to lists of these methods, which are each
called when the event occurs.
"""
def wrapper(func):
for i, arg in args:
kwargs[i] = arg
func.event = Event(event, kwargs)
return func
return wrapper
|
[
"def",
"on",
"(",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"for",
"i",
",",
"arg",
"in",
"args",
":",
"kwargs",
"[",
"i",
"]",
"=",
"arg",
"func",
".",
"event",
"=",
"Event",
"(",
"event",
",",
"kwargs",
")",
"return",
"func",
"return",
"wrapper"
] |
https://github.com/stephenmcd/gnotty/blob/bea3762dc9cbc3cb21a5ae7224091cf027273c40/gnotty/bots/events.py#L8-L22
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/Pillow-6.0.0-py3.7-macosx-10.9-x86_64.egg/PIL/Image.py
|
python
|
Image.toqpixmap
|
(self)
|
return ImageQt.toqpixmap(self)
|
Returns a QPixmap copy of this image
|
Returns a QPixmap copy of this image
|
[
"Returns",
"a",
"QPixmap",
"copy",
"of",
"this",
"image"
] |
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqpixmap(self)
|
[
"def",
"toqpixmap",
"(",
"self",
")",
":",
"from",
".",
"import",
"ImageQt",
"if",
"not",
"ImageQt",
".",
"qt_is_installed",
":",
"raise",
"ImportError",
"(",
"\"Qt bindings are not installed\"",
")",
"return",
"ImageQt",
".",
"toqpixmap",
"(",
"self",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/Pillow-6.0.0-py3.7-macosx-10.9-x86_64.egg/PIL/Image.py#L2307-L2312
|
|
Esri/ArcREST
|
ab240fde2b0200f61d4a5f6df033516e53f2f416
|
src/arcrest/manageorg/_parameters.py
|
python
|
PortalParameters.canSignInIDP
|
(self)
|
return self._canSignInIDP
|
gets/sets the property value canSignInIDP
|
gets/sets the property value canSignInIDP
|
[
"gets",
"/",
"sets",
"the",
"property",
"value",
"canSignInIDP"
] |
def canSignInIDP(self):
"""gets/sets the property value canSignInIDP"""
return self._canSignInIDP
|
[
"def",
"canSignInIDP",
"(",
"self",
")",
":",
"return",
"self",
".",
"_canSignInIDP"
] |
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_parameters.py#L850-L852
|
|
svenkreiss/pysparkling
|
f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78
|
pysparkling/sql/functions.py
|
python
|
log1p
|
(e)
|
return col(Log1p(parse(e)))
|
:rtype: Column
|
:rtype: Column
|
[
":",
"rtype",
":",
"Column"
] |
def log1p(e):
"""
:rtype: Column
"""
return col(Log1p(parse(e)))
|
[
"def",
"log1p",
"(",
"e",
")",
":",
"return",
"col",
"(",
"Log1p",
"(",
"parse",
"(",
"e",
")",
")",
")"
] |
https://github.com/svenkreiss/pysparkling/blob/f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78/pysparkling/sql/functions.py#L1003-L1007
|
|
home-assistant/core
|
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
|
homeassistant/components/cloud/prefs.py
|
python
|
CloudPreferences._empty_config
|
(username)
|
return {
PREF_ALEXA_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_ALEXA_ENTITY_CONFIGS: {},
PREF_CLOUD_USER: None,
PREF_CLOUDHOOKS: {},
PREF_ENABLE_ALEXA: True,
PREF_ENABLE_GOOGLE: True,
PREF_ENABLE_REMOTE: False,
PREF_GOOGLE_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_GOOGLE_ENTITY_CONFIGS: {},
PREF_GOOGLE_LOCAL_WEBHOOK_ID: webhook.async_generate_id(),
PREF_GOOGLE_SECURE_DEVICES_PIN: None,
PREF_USERNAME: username,
}
|
Return an empty config.
|
Return an empty config.
|
[
"Return",
"an",
"empty",
"config",
"."
] |
def _empty_config(username):
"""Return an empty config."""
return {
PREF_ALEXA_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_ALEXA_ENTITY_CONFIGS: {},
PREF_CLOUD_USER: None,
PREF_CLOUDHOOKS: {},
PREF_ENABLE_ALEXA: True,
PREF_ENABLE_GOOGLE: True,
PREF_ENABLE_REMOTE: False,
PREF_GOOGLE_DEFAULT_EXPOSE: DEFAULT_EXPOSED_DOMAINS,
PREF_GOOGLE_ENTITY_CONFIGS: {},
PREF_GOOGLE_LOCAL_WEBHOOK_ID: webhook.async_generate_id(),
PREF_GOOGLE_SECURE_DEVICES_PIN: None,
PREF_USERNAME: username,
}
|
[
"def",
"_empty_config",
"(",
"username",
")",
":",
"return",
"{",
"PREF_ALEXA_DEFAULT_EXPOSE",
":",
"DEFAULT_EXPOSED_DOMAINS",
",",
"PREF_ALEXA_ENTITY_CONFIGS",
":",
"{",
"}",
",",
"PREF_CLOUD_USER",
":",
"None",
",",
"PREF_CLOUDHOOKS",
":",
"{",
"}",
",",
"PREF_ENABLE_ALEXA",
":",
"True",
",",
"PREF_ENABLE_GOOGLE",
":",
"True",
",",
"PREF_ENABLE_REMOTE",
":",
"False",
",",
"PREF_GOOGLE_DEFAULT_EXPOSE",
":",
"DEFAULT_EXPOSED_DOMAINS",
",",
"PREF_GOOGLE_ENTITY_CONFIGS",
":",
"{",
"}",
",",
"PREF_GOOGLE_LOCAL_WEBHOOK_ID",
":",
"webhook",
".",
"async_generate_id",
"(",
")",
",",
"PREF_GOOGLE_SECURE_DEVICES_PIN",
":",
"None",
",",
"PREF_USERNAME",
":",
"username",
",",
"}"
] |
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/cloud/prefs.py#L310-L325
|
|
devitocodes/devito
|
6abd441e3f5f091775ad332be6b95e017b8cbd16
|
examples/seismic/viscoacoustic/wavesolver.py
|
python
|
ViscoacousticWaveSolver.jacobian
|
(self, dmin, src=None, rec=None, p=None, P=None, rp=None, rP=None, v=None,
dv=None, model=None, **kwargs)
|
return rec, p, P, summary
|
Linearized Born modelling function that creates the necessary
data objects for running an adjoint modelling operator.
Parameters
----------
src : SparseTimeFunction or array_like, optional
Time series data for the injected source term.
rec : SparseTimeFunction or array_like, optional
The interpolated receiver data.
p : TimeFunction, optional
The forward wavefield.
P : TimeFunction, optional
The linearized wavefield.
rp : TimeFunction, optional
The computed attenuation memory variable.
rP : TimeFunction, optional
The computed attenuation memory variable.
v : VectorTimeFunction, optional
The computed particle velocity.
dv : VectorTimeFunction, optional
The computed particle velocity.
model : Model, optional
Object containing the physical parameters.
vp : Function or float, optional
The time-constant velocity.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
|
Linearized Born modelling function that creates the necessary
data objects for running an adjoint modelling operator.
|
[
"Linearized",
"Born",
"modelling",
"function",
"that",
"creates",
"the",
"necessary",
"data",
"objects",
"for",
"running",
"an",
"adjoint",
"modelling",
"operator",
"."
] |
def jacobian(self, dmin, src=None, rec=None, p=None, P=None, rp=None, rP=None, v=None,
dv=None, model=None, **kwargs):
"""
Linearized Born modelling function that creates the necessary
data objects for running an adjoint modelling operator.
Parameters
----------
src : SparseTimeFunction or array_like, optional
Time series data for the injected source term.
rec : SparseTimeFunction or array_like, optional
The interpolated receiver data.
p : TimeFunction, optional
The forward wavefield.
P : TimeFunction, optional
The linearized wavefield.
rp : TimeFunction, optional
The computed attenuation memory variable.
rP : TimeFunction, optional
The computed attenuation memory variable.
v : VectorTimeFunction, optional
The computed particle velocity.
dv : VectorTimeFunction, optional
The computed particle velocity.
model : Model, optional
Object containing the physical parameters.
vp : Function or float, optional
The time-constant velocity.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
"""
# Source term is read-only, so re-use the default
src = src or self.geometry.src
# Create a new receiver object to store the result
rec = rec or self.geometry.rec
# Create the forward wavefields u and U if not provided
p = p or TimeFunction(name='p', grid=self.model.grid,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
P = P or TimeFunction(name='P', grid=self.model.grid,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Memory variable:
rp = rp or TimeFunction(name='rp', grid=self.model.grid,
time_order=self.time_order,
space_order=self.space_order, staggered=NODE)
# Memory variable:
rP = rP or TimeFunction(name='rP', grid=self.model.grid,
time_order=self.time_order,
space_order=self.space_order, staggered=NODE)
if self.time_order == 1:
v = v or VectorTimeFunction(name="v", grid=self.model.grid,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in v})
dv = dv or VectorTimeFunction(name="dv", grid=self.model.grid,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in dv})
model = model or self.model
# Pick vp and physical parameters from model unless explicitly provided
kwargs.update(model.physical_params(**kwargs))
# Execute operator and return wavefield and receiver data
summary = self.op_born().apply(dm=dmin, p=p, P=P, src=src, rec=rec, rp=rp, rP=rP,
dt=kwargs.pop('dt', self.dt), **kwargs)
return rec, p, P, summary
|
[
"def",
"jacobian",
"(",
"self",
",",
"dmin",
",",
"src",
"=",
"None",
",",
"rec",
"=",
"None",
",",
"p",
"=",
"None",
",",
"P",
"=",
"None",
",",
"rp",
"=",
"None",
",",
"rP",
"=",
"None",
",",
"v",
"=",
"None",
",",
"dv",
"=",
"None",
",",
"model",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Source term is read-only, so re-use the default",
"src",
"=",
"src",
"or",
"self",
".",
"geometry",
".",
"src",
"# Create a new receiver object to store the result",
"rec",
"=",
"rec",
"or",
"self",
".",
"geometry",
".",
"rec",
"# Create the forward wavefields u and U if not provided",
"p",
"=",
"p",
"or",
"TimeFunction",
"(",
"name",
"=",
"'p'",
",",
"grid",
"=",
"self",
".",
"model",
".",
"grid",
",",
"time_order",
"=",
"self",
".",
"time_order",
",",
"space_order",
"=",
"self",
".",
"space_order",
",",
"staggered",
"=",
"NODE",
")",
"P",
"=",
"P",
"or",
"TimeFunction",
"(",
"name",
"=",
"'P'",
",",
"grid",
"=",
"self",
".",
"model",
".",
"grid",
",",
"time_order",
"=",
"self",
".",
"time_order",
",",
"space_order",
"=",
"self",
".",
"space_order",
",",
"staggered",
"=",
"NODE",
")",
"# Memory variable:",
"rp",
"=",
"rp",
"or",
"TimeFunction",
"(",
"name",
"=",
"'rp'",
",",
"grid",
"=",
"self",
".",
"model",
".",
"grid",
",",
"time_order",
"=",
"self",
".",
"time_order",
",",
"space_order",
"=",
"self",
".",
"space_order",
",",
"staggered",
"=",
"NODE",
")",
"# Memory variable:",
"rP",
"=",
"rP",
"or",
"TimeFunction",
"(",
"name",
"=",
"'rP'",
",",
"grid",
"=",
"self",
".",
"model",
".",
"grid",
",",
"time_order",
"=",
"self",
".",
"time_order",
",",
"space_order",
"=",
"self",
".",
"space_order",
",",
"staggered",
"=",
"NODE",
")",
"if",
"self",
".",
"time_order",
"==",
"1",
":",
"v",
"=",
"v",
"or",
"VectorTimeFunction",
"(",
"name",
"=",
"\"v\"",
",",
"grid",
"=",
"self",
".",
"model",
".",
"grid",
",",
"time_order",
"=",
"self",
".",
"time_order",
",",
"space_order",
"=",
"self",
".",
"space_order",
")",
"kwargs",
".",
"update",
"(",
"{",
"k",
".",
"name",
":",
"k",
"for",
"k",
"in",
"v",
"}",
")",
"dv",
"=",
"dv",
"or",
"VectorTimeFunction",
"(",
"name",
"=",
"\"dv\"",
",",
"grid",
"=",
"self",
".",
"model",
".",
"grid",
",",
"time_order",
"=",
"self",
".",
"time_order",
",",
"space_order",
"=",
"self",
".",
"space_order",
")",
"kwargs",
".",
"update",
"(",
"{",
"k",
".",
"name",
":",
"k",
"for",
"k",
"in",
"dv",
"}",
")",
"model",
"=",
"model",
"or",
"self",
".",
"model",
"# Pick vp and physical parameters from model unless explicitly provided",
"kwargs",
".",
"update",
"(",
"model",
".",
"physical_params",
"(",
"*",
"*",
"kwargs",
")",
")",
"# Execute operator and return wavefield and receiver data",
"summary",
"=",
"self",
".",
"op_born",
"(",
")",
".",
"apply",
"(",
"dm",
"=",
"dmin",
",",
"p",
"=",
"p",
",",
"P",
"=",
"P",
",",
"src",
"=",
"src",
",",
"rec",
"=",
"rec",
",",
"rp",
"=",
"rp",
",",
"rP",
"=",
"rP",
",",
"dt",
"=",
"kwargs",
".",
"pop",
"(",
"'dt'",
",",
"self",
".",
"dt",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"rec",
",",
"p",
",",
"P",
",",
"summary"
] |
https://github.com/devitocodes/devito/blob/6abd441e3f5f091775ad332be6b95e017b8cbd16/examples/seismic/viscoacoustic/wavesolver.py#L324-L398
|
|
pventuzelo/octopus
|
e8b8c5a9d5f6d9c63605afe9ef1528ab481ec983
|
octopus/arch/wasm/instruction.py
|
python
|
WasmInstruction.__init__
|
(self, opcode, name, imm_struct, operand_size, insn_byte,
pops, pushes, description, operand_interpretation=None, offset=0)
|
TODO
|
TODO
|
[
"TODO"
] |
def __init__(self, opcode, name, imm_struct, operand_size, insn_byte,
pops, pushes, description, operand_interpretation=None, offset=0):
""" TODO """
self.opcode = opcode
self.offset = offset
self.name = name
self.description = description
self.operand_size = operand_size
if len(insn_byte) > 1:
self.operand = insn_byte[-operand_size:] # Immediate operand if any
else:
self.operand = None
# specific interpretation of operand value
self.operand_interpretation = operand_interpretation
self.insn_byte = insn_byte
self.pops = pops
self.pushes = pushes
self.imm_struct = imm_struct
self.xref = list()
self.ssa = None
|
[
"def",
"__init__",
"(",
"self",
",",
"opcode",
",",
"name",
",",
"imm_struct",
",",
"operand_size",
",",
"insn_byte",
",",
"pops",
",",
"pushes",
",",
"description",
",",
"operand_interpretation",
"=",
"None",
",",
"offset",
"=",
"0",
")",
":",
"self",
".",
"opcode",
"=",
"opcode",
"self",
".",
"offset",
"=",
"offset",
"self",
".",
"name",
"=",
"name",
"self",
".",
"description",
"=",
"description",
"self",
".",
"operand_size",
"=",
"operand_size",
"if",
"len",
"(",
"insn_byte",
")",
">",
"1",
":",
"self",
".",
"operand",
"=",
"insn_byte",
"[",
"-",
"operand_size",
":",
"]",
"# Immediate operand if any",
"else",
":",
"self",
".",
"operand",
"=",
"None",
"# specific interpretation of operand value",
"self",
".",
"operand_interpretation",
"=",
"operand_interpretation",
"self",
".",
"insn_byte",
"=",
"insn_byte",
"self",
".",
"pops",
"=",
"pops",
"self",
".",
"pushes",
"=",
"pushes",
"self",
".",
"imm_struct",
"=",
"imm_struct",
"self",
".",
"xref",
"=",
"list",
"(",
")",
"self",
".",
"ssa",
"=",
"None"
] |
https://github.com/pventuzelo/octopus/blob/e8b8c5a9d5f6d9c63605afe9ef1528ab481ec983/octopus/arch/wasm/instruction.py#L10-L29
|
||
emesene/emesene
|
4548a4098310e21b16437bb36223a7f632a4f7bc
|
emesene/e3/cache/AvatarCache.py
|
python
|
AvatarCache.__add_entry
|
(self, hash_)
|
return time_info, hash_
|
add an entry to the information file with the current timestamp
and the hash_ of the file that was saved
return (stamp, hash)
|
add an entry to the information file with the current timestamp
and the hash_ of the file that was saved
return (stamp, hash)
|
[
"add",
"an",
"entry",
"to",
"the",
"information",
"file",
"with",
"the",
"current",
"timestamp",
"and",
"the",
"hash_",
"of",
"the",
"file",
"that",
"was",
"saved",
"return",
"(",
"stamp",
"hash",
")"
] |
def __add_entry(self, hash_):
'''add an entry to the information file with the current timestamp
and the hash_ of the file that was saved
return (stamp, hash)
'''
time_info = int(time.time())
handle = file(self.info_path, 'a')
handle.write('%s %s\n' % (str(time_info), hash_))
handle.close()
return time_info, hash_
|
[
"def",
"__add_entry",
"(",
"self",
",",
"hash_",
")",
":",
"time_info",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"handle",
"=",
"file",
"(",
"self",
".",
"info_path",
",",
"'a'",
")",
"handle",
".",
"write",
"(",
"'%s %s\\n'",
"%",
"(",
"str",
"(",
"time_info",
")",
",",
"hash_",
")",
")",
"handle",
".",
"close",
"(",
")",
"return",
"time_info",
",",
"hash_"
] |
https://github.com/emesene/emesene/blob/4548a4098310e21b16437bb36223a7f632a4f7bc/emesene/e3/cache/AvatarCache.py#L117-L127
|
|
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/axis.py
|
python
|
Tick._get_text1
|
(self)
|
Get the default Text 1 instance
|
Get the default Text 1 instance
|
[
"Get",
"the",
"default",
"Text",
"1",
"instance"
] |
def _get_text1(self):
'Get the default Text 1 instance'
pass
|
[
"def",
"_get_text1",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/axis.py#L201-L203
|
||
flasgger/flasgger
|
beb9fa781fc6b063fe3f3081b9677dd70184a2da
|
flasgger/commands.py
|
python
|
generate_api_schema
|
(file, endpoint)
|
return spec
|
Generate the swagger schema for your api.
|
Generate the swagger schema for your api.
|
[
"Generate",
"the",
"swagger",
"schema",
"for",
"your",
"api",
"."
] |
def generate_api_schema(file, endpoint):
"""Generate the swagger schema for your api."""
try:
if endpoint is None:
endpoint = current_app.swag.config["specs"][0]["endpoint"]
spec = current_app.swag.get_apispecs(endpoint)
except RuntimeError as e:
click.echo(e, err=True)
click.echo(
"Possible values for endpoint are: {}".format(
", ".join(
[
spec["endpoint"]
for spec in current_app.swag.config["specs"]
if "endpoint" in spec
]
)
),
err=True,
)
raise click.Abort
# See also: https://github.com/flasgger/flasgger/issues/267
if is_openapi3(spec.get("openapi")):
if "definitions" in spec:
del spec["definitions"]
json.dump(spec, file, indent=4)
return spec
|
[
"def",
"generate_api_schema",
"(",
"file",
",",
"endpoint",
")",
":",
"try",
":",
"if",
"endpoint",
"is",
"None",
":",
"endpoint",
"=",
"current_app",
".",
"swag",
".",
"config",
"[",
"\"specs\"",
"]",
"[",
"0",
"]",
"[",
"\"endpoint\"",
"]",
"spec",
"=",
"current_app",
".",
"swag",
".",
"get_apispecs",
"(",
"endpoint",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"e",
",",
"err",
"=",
"True",
")",
"click",
".",
"echo",
"(",
"\"Possible values for endpoint are: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"[",
"spec",
"[",
"\"endpoint\"",
"]",
"for",
"spec",
"in",
"current_app",
".",
"swag",
".",
"config",
"[",
"\"specs\"",
"]",
"if",
"\"endpoint\"",
"in",
"spec",
"]",
")",
")",
",",
"err",
"=",
"True",
",",
")",
"raise",
"click",
".",
"Abort",
"# See also: https://github.com/flasgger/flasgger/issues/267",
"if",
"is_openapi3",
"(",
"spec",
".",
"get",
"(",
"\"openapi\"",
")",
")",
":",
"if",
"\"definitions\"",
"in",
"spec",
":",
"del",
"spec",
"[",
"\"definitions\"",
"]",
"json",
".",
"dump",
"(",
"spec",
",",
"file",
",",
"indent",
"=",
"4",
")",
"return",
"spec"
] |
https://github.com/flasgger/flasgger/blob/beb9fa781fc6b063fe3f3081b9677dd70184a2da/flasgger/commands.py#L14-L44
|
|
Qiskit/qiskit-terra
|
b66030e3b9192efdd3eb95cf25c6545fe0a13da4
|
qiskit/providers/models/backendconfiguration.py
|
python
|
GateConfig.to_dict
|
(self)
|
return out_dict
|
Return a dictionary format representation of the GateConfig.
Returns:
dict: The dictionary form of the GateConfig.
|
Return a dictionary format representation of the GateConfig.
|
[
"Return",
"a",
"dictionary",
"format",
"representation",
"of",
"the",
"GateConfig",
"."
] |
def to_dict(self):
"""Return a dictionary format representation of the GateConfig.
Returns:
dict: The dictionary form of the GateConfig.
"""
out_dict = {
"name": self.name,
"parameters": self.parameters,
"qasm_def": self.qasm_def,
}
if hasattr(self, "coupling_map"):
out_dict["coupling_map"] = self.coupling_map
if hasattr(self, "latency_map"):
out_dict["latency_map"] = self.latency_map
if hasattr(self, "conditional"):
out_dict["conditional"] = self.conditional
if hasattr(self, "description"):
out_dict["description"] = self.description
return out_dict
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"out_dict",
"=",
"{",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"parameters\"",
":",
"self",
".",
"parameters",
",",
"\"qasm_def\"",
":",
"self",
".",
"qasm_def",
",",
"}",
"if",
"hasattr",
"(",
"self",
",",
"\"coupling_map\"",
")",
":",
"out_dict",
"[",
"\"coupling_map\"",
"]",
"=",
"self",
".",
"coupling_map",
"if",
"hasattr",
"(",
"self",
",",
"\"latency_map\"",
")",
":",
"out_dict",
"[",
"\"latency_map\"",
"]",
"=",
"self",
".",
"latency_map",
"if",
"hasattr",
"(",
"self",
",",
"\"conditional\"",
")",
":",
"out_dict",
"[",
"\"conditional\"",
"]",
"=",
"self",
".",
"conditional",
"if",
"hasattr",
"(",
"self",
",",
"\"description\"",
")",
":",
"out_dict",
"[",
"\"description\"",
"]",
"=",
"self",
".",
"description",
"return",
"out_dict"
] |
https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/providers/models/backendconfiguration.py#L102-L121
|
|
pyqt/examples
|
843bb982917cecb2350b5f6d7f42c9b7fb142ec1
|
src/pyqt-official/designer/plugins/widgets/datetimeedit.py
|
python
|
PyDateEdit.setHorizontalHeaderFormat
|
(self, format)
|
[] |
def setHorizontalHeaderFormat(self, format):
if format != self.__horizontalHeaderFormat:
self.__horizontalHeaderFormat = format
if self.__cw:
self.__cw.setHorizontalHeaderFormat(format)
|
[
"def",
"setHorizontalHeaderFormat",
"(",
"self",
",",
"format",
")",
":",
"if",
"format",
"!=",
"self",
".",
"__horizontalHeaderFormat",
":",
"self",
".",
"__horizontalHeaderFormat",
"=",
"format",
"if",
"self",
".",
"__cw",
":",
"self",
".",
"__cw",
".",
"setHorizontalHeaderFormat",
"(",
"format",
")"
] |
https://github.com/pyqt/examples/blob/843bb982917cecb2350b5f6d7f42c9b7fb142ec1/src/pyqt-official/designer/plugins/widgets/datetimeedit.py#L130-L134
|
||||
aiqm/torchani
|
258e6c36cf2b35a3a672137ebe30cb923db75952
|
torchani/nn.py
|
python
|
Sequential.__init__
|
(self, *modules)
|
[] |
def __init__(self, *modules):
super().__init__(modules)
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"modules",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"modules",
")"
] |
https://github.com/aiqm/torchani/blob/258e6c36cf2b35a3a672137ebe30cb923db75952/torchani/nn.py#L104-L105
|
||||
igogo-x86/HexRaysPyTools
|
b8ebf757a92fda934c35c418fc55bfdd6fc8e67c
|
HexRaysPyTools/core/helper.py
|
python
|
get_ptr
|
(ea)
|
return ptr
|
Reads ptr at specified address.
|
Reads ptr at specified address.
|
[
"Reads",
"ptr",
"at",
"specified",
"address",
"."
] |
def get_ptr(ea):
""" Reads ptr at specified address. """
if const.EA64:
return idaapi.get_64bit(ea)
ptr = idaapi.get_32bit(ea)
if idaapi.cvar.inf.procname == "ARM":
ptr &= -2 # Clear thumb bit
return ptr
|
[
"def",
"get_ptr",
"(",
"ea",
")",
":",
"if",
"const",
".",
"EA64",
":",
"return",
"idaapi",
".",
"get_64bit",
"(",
"ea",
")",
"ptr",
"=",
"idaapi",
".",
"get_32bit",
"(",
"ea",
")",
"if",
"idaapi",
".",
"cvar",
".",
"inf",
".",
"procname",
"==",
"\"ARM\"",
":",
"ptr",
"&=",
"-",
"2",
"# Clear thumb bit",
"return",
"ptr"
] |
https://github.com/igogo-x86/HexRaysPyTools/blob/b8ebf757a92fda934c35c418fc55bfdd6fc8e67c/HexRaysPyTools/core/helper.py#L36-L43
|
|
google-research/tapas
|
a3e069b50c71f50b12a6e5bb3dad10fb51f6fe68
|
tapas/utils/span_prediction_utils.py
|
python
|
_gather_indexes
|
(
indexes,
flat_spans_2d,
start_or_end,
)
|
return span_index
|
Gathers indexes for start or end index.
Where flat_spans_2d is a data-structure built to work with tf.gather_nd.
It pairs a batch index with a start or end index.
Args:
indexes: <int32>[batch_size, seq_length].
flat_spans_2d: <int32>[batch_size, num_spans * 2, 2].
start_or_end: 0 for start index, 1 for end index.
Returns:
indexes: <int32>[batch_size, num_spans].
|
Gathers indexes for start or end index.
|
[
"Gathers",
"indexes",
"for",
"start",
"or",
"end",
"index",
"."
] |
def _gather_indexes(
indexes,
flat_spans_2d,
start_or_end,
):
"""Gathers indexes for start or end index.
Where flat_spans_2d is a data-structure built to work with tf.gather_nd.
It pairs a batch index with a start or end index.
Args:
indexes: <int32>[batch_size, seq_length].
flat_spans_2d: <int32>[batch_size, num_spans * 2, 2].
start_or_end: 0 for start index, 1 for end index.
Returns:
indexes: <int32>[batch_size, num_spans].
"""
shape = modeling.get_shape_list(flat_spans_2d, expected_rank=3)
batch_size = shape[0]
num_spans = shape[1] // 2
span_index = tf.gather_nd(params=indexes, indices=flat_spans_2d)
span_index = tf.reshape(span_index, shape=(batch_size, num_spans, 2))
span_index = tf.slice(
span_index, begin=[0, 0, start_or_end], size=[batch_size, num_spans, 1])
span_index = tf.squeeze(span_index, axis=2)
return span_index
|
[
"def",
"_gather_indexes",
"(",
"indexes",
",",
"flat_spans_2d",
",",
"start_or_end",
",",
")",
":",
"shape",
"=",
"modeling",
".",
"get_shape_list",
"(",
"flat_spans_2d",
",",
"expected_rank",
"=",
"3",
")",
"batch_size",
"=",
"shape",
"[",
"0",
"]",
"num_spans",
"=",
"shape",
"[",
"1",
"]",
"//",
"2",
"span_index",
"=",
"tf",
".",
"gather_nd",
"(",
"params",
"=",
"indexes",
",",
"indices",
"=",
"flat_spans_2d",
")",
"span_index",
"=",
"tf",
".",
"reshape",
"(",
"span_index",
",",
"shape",
"=",
"(",
"batch_size",
",",
"num_spans",
",",
"2",
")",
")",
"span_index",
"=",
"tf",
".",
"slice",
"(",
"span_index",
",",
"begin",
"=",
"[",
"0",
",",
"0",
",",
"start_or_end",
"]",
",",
"size",
"=",
"[",
"batch_size",
",",
"num_spans",
",",
"1",
"]",
")",
"span_index",
"=",
"tf",
".",
"squeeze",
"(",
"span_index",
",",
"axis",
"=",
"2",
")",
"return",
"span_index"
] |
https://github.com/google-research/tapas/blob/a3e069b50c71f50b12a6e5bb3dad10fb51f6fe68/tapas/utils/span_prediction_utils.py#L53-L79
|
|
oilshell/oil
|
94388e7d44a9ad879b12615f6203b38596b5a2d3
|
pgen2/tokenize.py
|
python
|
Untokenizer.compat
|
(self, token, iterable)
|
[] |
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER, ASYNC, AWAIT):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
|
[
"def",
"compat",
"(",
"self",
",",
"token",
",",
"iterable",
")",
":",
"startline",
"=",
"False",
"indents",
"=",
"[",
"]",
"toks_append",
"=",
"self",
".",
"tokens",
".",
"append",
"toknum",
",",
"tokval",
"=",
"token",
"if",
"toknum",
"in",
"(",
"NAME",
",",
"NUMBER",
")",
":",
"tokval",
"+=",
"' '",
"if",
"toknum",
"in",
"(",
"NEWLINE",
",",
"NL",
")",
":",
"startline",
"=",
"True",
"for",
"tok",
"in",
"iterable",
":",
"toknum",
",",
"tokval",
"=",
"tok",
"[",
":",
"2",
"]",
"if",
"toknum",
"in",
"(",
"NAME",
",",
"NUMBER",
",",
"ASYNC",
",",
"AWAIT",
")",
":",
"tokval",
"+=",
"' '",
"if",
"toknum",
"==",
"INDENT",
":",
"indents",
".",
"append",
"(",
"tokval",
")",
"continue",
"elif",
"toknum",
"==",
"DEDENT",
":",
"indents",
".",
"pop",
"(",
")",
"continue",
"elif",
"toknum",
"in",
"(",
"NEWLINE",
",",
"NL",
")",
":",
"startline",
"=",
"True",
"elif",
"startline",
"and",
"indents",
":",
"toks_append",
"(",
"indents",
"[",
"-",
"1",
"]",
")",
"startline",
"=",
"False",
"toks_append",
"(",
"tokval",
")"
] |
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/pgen2/tokenize.py#L213-L239
|
||||
LeapBeyond/scrubadub
|
ab199f0b3cc3ca11f646aabb05ebe124d2757ea5
|
scrubadub/detectors/base.py
|
python
|
Detector.iter_filth_documents
|
(self, document_list: Sequence[str],
document_names: Sequence[Optional[str]])
|
Yields discovered filth in a list of documents.
:param document_list: A list of documents to clean.
:type document_list: List[str]
:param document_names: A list containing the name of each document.
:type document_names: List[str]
:return: An iterator to the discovered :class:`Filth`
:rtype: Iterator[:class:`Filth`]
|
Yields discovered filth in a list of documents.
|
[
"Yields",
"discovered",
"filth",
"in",
"a",
"list",
"of",
"documents",
"."
] |
def iter_filth_documents(self, document_list: Sequence[str],
document_names: Sequence[Optional[str]]) -> Generator[Filth, None, None]:
"""Yields discovered filth in a list of documents.
:param document_list: A list of documents to clean.
:type document_list: List[str]
:param document_names: A list containing the name of each document.
:type document_names: List[str]
:return: An iterator to the discovered :class:`Filth`
:rtype: Iterator[:class:`Filth`]
"""
raise NotImplementedError('must be implemented in derived classes')
|
[
"def",
"iter_filth_documents",
"(",
"self",
",",
"document_list",
":",
"Sequence",
"[",
"str",
"]",
",",
"document_names",
":",
"Sequence",
"[",
"Optional",
"[",
"str",
"]",
"]",
")",
"->",
"Generator",
"[",
"Filth",
",",
"None",
",",
"None",
"]",
":",
"raise",
"NotImplementedError",
"(",
"'must be implemented in derived classes'",
")"
] |
https://github.com/LeapBeyond/scrubadub/blob/ab199f0b3cc3ca11f646aabb05ebe124d2757ea5/scrubadub/detectors/base.py#L81-L92
|
||
xgi/castero
|
766965fb1d3586d62ab6fd6dd144fa510c1e0ecb
|
castero/helpers.py
|
python
|
third
|
(n)
|
return int(n / 3)
|
Calculates one-third of a given value.
:param n the integer to calculate one-third of
:returns int: one-third of n, rounded down
|
Calculates one-third of a given value.
|
[
"Calculates",
"one",
"-",
"third",
"of",
"a",
"given",
"value",
"."
] |
def third(n) -> int:
"""Calculates one-third of a given value.
:param n the integer to calculate one-third of
:returns int: one-third of n, rounded down
"""
return int(n / 3)
|
[
"def",
"third",
"(",
"n",
")",
"->",
"int",
":",
"return",
"int",
"(",
"n",
"/",
"3",
")"
] |
https://github.com/xgi/castero/blob/766965fb1d3586d62ab6fd6dd144fa510c1e0ecb/castero/helpers.py#L9-L15
|
|
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/dnspython-1.15.0/dns/message.py
|
python
|
_TextReader._question_line
|
(self, section)
|
Process one line from the text format question section.
|
Process one line from the text format question section.
|
[
"Process",
"one",
"line",
"from",
"the",
"text",
"format",
"question",
"section",
"."
] |
def _question_line(self, section):
"""Process one line from the text format question section."""
token = self.tok.get(want_leading=True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
self.message.find_rrset(self.message.question, name,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
self.tok.get_eol()
|
[
"def",
"_question_line",
"(",
"self",
",",
"section",
")",
":",
"token",
"=",
"self",
".",
"tok",
".",
"get",
"(",
"want_leading",
"=",
"True",
")",
"if",
"not",
"token",
".",
"is_whitespace",
"(",
")",
":",
"self",
".",
"last_name",
"=",
"dns",
".",
"name",
".",
"from_text",
"(",
"token",
".",
"value",
",",
"None",
")",
"name",
"=",
"self",
".",
"last_name",
"token",
"=",
"self",
".",
"tok",
".",
"get",
"(",
")",
"if",
"not",
"token",
".",
"is_identifier",
"(",
")",
":",
"raise",
"dns",
".",
"exception",
".",
"SyntaxError",
"# Class",
"try",
":",
"rdclass",
"=",
"dns",
".",
"rdataclass",
".",
"from_text",
"(",
"token",
".",
"value",
")",
"token",
"=",
"self",
".",
"tok",
".",
"get",
"(",
")",
"if",
"not",
"token",
".",
"is_identifier",
"(",
")",
":",
"raise",
"dns",
".",
"exception",
".",
"SyntaxError",
"except",
"dns",
".",
"exception",
".",
"SyntaxError",
":",
"raise",
"dns",
".",
"exception",
".",
"SyntaxError",
"except",
"Exception",
":",
"rdclass",
"=",
"dns",
".",
"rdataclass",
".",
"IN",
"# Type",
"rdtype",
"=",
"dns",
".",
"rdatatype",
".",
"from_text",
"(",
"token",
".",
"value",
")",
"self",
".",
"message",
".",
"find_rrset",
"(",
"self",
".",
"message",
".",
"question",
",",
"name",
",",
"rdclass",
",",
"rdtype",
",",
"create",
"=",
"True",
",",
"force_unique",
"=",
"True",
")",
"if",
"self",
".",
"updating",
":",
"self",
".",
"zone_rdclass",
"=",
"rdclass",
"self",
".",
"tok",
".",
"get_eol",
"(",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/dnspython-1.15.0/dns/message.py#L883-L910
|
||
kuri65536/python-for-android
|
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
|
python-modules/twisted/twisted/python/rebuild.py
|
python
|
latestFunction
|
(oldFunc)
|
return getattr(module, oldFunc.__name__)
|
Get the latest version of a function.
|
Get the latest version of a function.
|
[
"Get",
"the",
"latest",
"version",
"of",
"a",
"function",
"."
] |
def latestFunction(oldFunc):
"""
Get the latest version of a function.
"""
# This may be CPython specific, since I believe jython instantiates a new
# module upon reload.
dictID = id(oldFunc.func_globals)
module = _modDictIDMap.get(dictID)
if module is None:
return oldFunc
return getattr(module, oldFunc.__name__)
|
[
"def",
"latestFunction",
"(",
"oldFunc",
")",
":",
"# This may be CPython specific, since I believe jython instantiates a new",
"# module upon reload.",
"dictID",
"=",
"id",
"(",
"oldFunc",
".",
"func_globals",
")",
"module",
"=",
"_modDictIDMap",
".",
"get",
"(",
"dictID",
")",
"if",
"module",
"is",
"None",
":",
"return",
"oldFunc",
"return",
"getattr",
"(",
"module",
",",
"oldFunc",
".",
"__name__",
")"
] |
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/python/rebuild.py#L66-L76
|
|
NVIDIA/DeepLearningExamples
|
589604d49e016cd9ef4525f7abcc9c7b826cfc5e
|
TensorFlow/Translation/GNMT/model.py
|
python
|
BaseModel.__init__
|
(self, hparams, mode, features, scope=None, extra_args=None)
|
Create the model.
Args:
hparams: Hyperparameter configurations.
mode: TRAIN | EVAL | INFER
features: a dict of input features.
scope: scope of the model.
extra_args: model_helper.ExtraArgs, for passing customizable functions.
|
Create the model.
|
[
"Create",
"the",
"model",
"."
] |
def __init__(self, hparams, mode, features, scope=None, extra_args=None):
"""Create the model.
Args:
hparams: Hyperparameter configurations.
mode: TRAIN | EVAL | INFER
features: a dict of input features.
scope: scope of the model.
extra_args: model_helper.ExtraArgs, for passing customizable functions.
"""
self.hparams = hparams
# Set params
self._set_params_initializer(hparams, mode, features, scope, extra_args)
# Train graph
res = self.build_graph(hparams, scope=scope)
self._set_train_or_infer(res, hparams)
|
[
"def",
"__init__",
"(",
"self",
",",
"hparams",
",",
"mode",
",",
"features",
",",
"scope",
"=",
"None",
",",
"extra_args",
"=",
"None",
")",
":",
"self",
".",
"hparams",
"=",
"hparams",
"# Set params",
"self",
".",
"_set_params_initializer",
"(",
"hparams",
",",
"mode",
",",
"features",
",",
"scope",
",",
"extra_args",
")",
"# Train graph",
"res",
"=",
"self",
".",
"build_graph",
"(",
"hparams",
",",
"scope",
"=",
"scope",
")",
"self",
".",
"_set_train_or_infer",
"(",
"res",
",",
"hparams",
")"
] |
https://github.com/NVIDIA/DeepLearningExamples/blob/589604d49e016cd9ef4525f7abcc9c7b826cfc5e/TensorFlow/Translation/GNMT/model.py#L74-L91
|
||
kpe/bert-for-tf2
|
55f6a6fd5d8ea14f96ee19938b7a1bf0cb26aaea
|
bert/loader_albert.py
|
python
|
albert_params
|
(albert_model: str)
|
return params
|
Returns the ALBERT params for the specified TFHub model.
:param albert_model: either a model name or a checkpoint directory
containing an assets/albert_config.json
|
Returns the ALBERT params for the specified TFHub model.
|
[
"Returns",
"the",
"ALBERT",
"params",
"for",
"the",
"specified",
"TFHub",
"model",
"."
] |
def albert_params(albert_model: str):
"""Returns the ALBERT params for the specified TFHub model.
:param albert_model: either a model name or a checkpoint directory
containing an assets/albert_config.json
"""
if tf.io.gfile.isdir(albert_model):
config_file = os.path.join(albert_model, "assets", "albert_config.json") # google tfhub v2 weights
if not tf.io.gfile.exists(config_file):
config_file = os.path.join(albert_model, "albert_config.json") # google non-tfhub v2 weights
if tf.io.gfile.exists(config_file):
stock_config = loader.StockBertConfig.from_json_file(config_file)
else:
raise ValueError("No google-research ALBERT model found under:[{}] expecting albert_config.json or assets/albert_config.json".format(albert_model))
else:
if albert_model in albert_models_config: # google tfhub v1 weights
albert_config = albert_models_config[albert_model]
stock_config = loader.StockBertConfig.from_dict(albert_config, return_instance=True, return_unused=False)
else:
raise ValueError("ALBERT model with name:[{}] not one of tfhub/google-research albert models, try one of:{}".format(
albert_model, albert_models_tfhub))
params = loader.map_stock_config_to_params(stock_config)
return params
|
[
"def",
"albert_params",
"(",
"albert_model",
":",
"str",
")",
":",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"isdir",
"(",
"albert_model",
")",
":",
"config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"albert_model",
",",
"\"assets\"",
",",
"\"albert_config.json\"",
")",
"# google tfhub v2 weights",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"config_file",
")",
":",
"config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"albert_model",
",",
"\"albert_config.json\"",
")",
"# google non-tfhub v2 weights",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"config_file",
")",
":",
"stock_config",
"=",
"loader",
".",
"StockBertConfig",
".",
"from_json_file",
"(",
"config_file",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"No google-research ALBERT model found under:[{}] expecting albert_config.json or assets/albert_config.json\"",
".",
"format",
"(",
"albert_model",
")",
")",
"else",
":",
"if",
"albert_model",
"in",
"albert_models_config",
":",
"# google tfhub v1 weights",
"albert_config",
"=",
"albert_models_config",
"[",
"albert_model",
"]",
"stock_config",
"=",
"loader",
".",
"StockBertConfig",
".",
"from_dict",
"(",
"albert_config",
",",
"return_instance",
"=",
"True",
",",
"return_unused",
"=",
"False",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"ALBERT model with name:[{}] not one of tfhub/google-research albert models, try one of:{}\"",
".",
"format",
"(",
"albert_model",
",",
"albert_models_tfhub",
")",
")",
"params",
"=",
"loader",
".",
"map_stock_config_to_params",
"(",
"stock_config",
")",
"return",
"params"
] |
https://github.com/kpe/bert-for-tf2/blob/55f6a6fd5d8ea14f96ee19938b7a1bf0cb26aaea/bert/loader_albert.py#L142-L165
|
|
sqlalchemy/sqlalchemy
|
eb716884a4abcabae84a6aaba105568e925b7d27
|
lib/sqlalchemy/util/langhelpers.py
|
python
|
warn_limited
|
(msg, args)
|
Issue a warning with a parameterized string, limiting the number
of registrations.
|
Issue a warning with a parameterized string, limiting the number
of registrations.
|
[
"Issue",
"a",
"warning",
"with",
"a",
"parameterized",
"string",
"limiting",
"the",
"number",
"of",
"registrations",
"."
] |
def warn_limited(msg, args):
"""Issue a warning with a parameterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
_warnings_warn(msg, exc.SAWarning)
|
[
"def",
"warn_limited",
"(",
"msg",
",",
"args",
")",
":",
"if",
"args",
":",
"msg",
"=",
"_hash_limit_string",
"(",
"msg",
",",
"10",
",",
"args",
")",
"_warnings_warn",
"(",
"msg",
",",
"exc",
".",
"SAWarning",
")"
] |
https://github.com/sqlalchemy/sqlalchemy/blob/eb716884a4abcabae84a6aaba105568e925b7d27/lib/sqlalchemy/util/langhelpers.py#L1575-L1582
|
||
mjwestcott/Goodrich
|
dc2516591bd28488516c0337a62e64248debe47c
|
ch07/linked_stack.py
|
python
|
LinkedStack.is_empty
|
(self)
|
return self._size == 0
|
Return True if the stack is empty.
|
Return True if the stack is empty.
|
[
"Return",
"True",
"if",
"the",
"stack",
"is",
"empty",
"."
] |
def is_empty(self):
"""Return True if the stack is empty."""
return self._size == 0
|
[
"def",
"is_empty",
"(",
"self",
")",
":",
"return",
"self",
".",
"_size",
"==",
"0"
] |
https://github.com/mjwestcott/Goodrich/blob/dc2516591bd28488516c0337a62e64248debe47c/ch07/linked_stack.py#L46-L48
|
|
jkkummerfeld/text2sql-data
|
2905ab815b4893d99ea061a20fb55860ecb1f92e
|
systems/sequence-to-sequence/seq2seq/graph_utils.py
|
python
|
get_dict_from_collection
|
(collection_name)
|
return dict(zip(keys, values))
|
Gets a dictionary from a graph collection.
Args:
collection_name: A collection name to read a dictionary from
Returns:
A dictionary with string keys and tensor values
|
Gets a dictionary from a graph collection.
|
[
"Gets",
"a",
"dictionary",
"from",
"a",
"graph",
"collection",
"."
] |
def get_dict_from_collection(collection_name):
"""Gets a dictionary from a graph collection.
Args:
collection_name: A collection name to read a dictionary from
Returns:
A dictionary with string keys and tensor values
"""
key_collection = collection_name + "_keys"
value_collection = collection_name + "_values"
keys = tf.get_collection(key_collection)
values = tf.get_collection(value_collection)
return dict(zip(keys, values))
|
[
"def",
"get_dict_from_collection",
"(",
"collection_name",
")",
":",
"key_collection",
"=",
"collection_name",
"+",
"\"_keys\"",
"value_collection",
"=",
"collection_name",
"+",
"\"_values\"",
"keys",
"=",
"tf",
".",
"get_collection",
"(",
"key_collection",
")",
"values",
"=",
"tf",
".",
"get_collection",
"(",
"value_collection",
")",
"return",
"dict",
"(",
"zip",
"(",
"keys",
",",
"values",
")",
")"
] |
https://github.com/jkkummerfeld/text2sql-data/blob/2905ab815b4893d99ea061a20fb55860ecb1f92e/systems/sequence-to-sequence/seq2seq/graph_utils.py#L59-L72
|
|
RodrigoGantier/Mask_R_CNN_Keypoints
|
6b22e72de01ae98eaa1acd8645e5dbe3a096459f
|
utils.py
|
python
|
extract_bboxes
|
(mask)
|
return boxes.astype(np.int32)
|
Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
|
Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
|
[
"Compute",
"bounding",
"boxes",
"from",
"masks",
".",
"mask",
":",
"[",
"height",
"width",
"num_instances",
"]",
".",
"Mask",
"pixels",
"are",
"either",
"1",
"or",
"0",
"."
] |
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
|
[
"def",
"extract_bboxes",
"(",
"mask",
")",
":",
"boxes",
"=",
"np",
".",
"zeros",
"(",
"[",
"mask",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"4",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"i",
"in",
"range",
"(",
"mask",
".",
"shape",
"[",
"-",
"1",
"]",
")",
":",
"m",
"=",
"mask",
"[",
":",
",",
":",
",",
"i",
"]",
"# Bounding box.",
"horizontal_indicies",
"=",
"np",
".",
"where",
"(",
"np",
".",
"any",
"(",
"m",
",",
"axis",
"=",
"0",
")",
")",
"[",
"0",
"]",
"vertical_indicies",
"=",
"np",
".",
"where",
"(",
"np",
".",
"any",
"(",
"m",
",",
"axis",
"=",
"1",
")",
")",
"[",
"0",
"]",
"if",
"horizontal_indicies",
".",
"shape",
"[",
"0",
"]",
":",
"x1",
",",
"x2",
"=",
"horizontal_indicies",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"y1",
",",
"y2",
"=",
"vertical_indicies",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"# x2 and y2 should not be part of the box. Increment by 1.",
"x2",
"+=",
"1",
"y2",
"+=",
"1",
"else",
":",
"# No mask for this instance. Might happen due to",
"# resizing or cropping. Set bbox to zeros",
"x1",
",",
"x2",
",",
"y1",
",",
"y2",
"=",
"0",
",",
"0",
",",
"0",
",",
"0",
"boxes",
"[",
"i",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"y1",
",",
"x1",
",",
"y2",
",",
"x2",
"]",
")",
"return",
"boxes",
".",
"astype",
"(",
"np",
".",
"int32",
")"
] |
https://github.com/RodrigoGantier/Mask_R_CNN_Keypoints/blob/6b22e72de01ae98eaa1acd8645e5dbe3a096459f/utils.py#L21-L44
|
|
oleg-yaroshevskiy/quest_qa_labeling
|
730a9632314e54584f69f909d5e2ef74d843e02c
|
packages/fairseq-hacked/fairseq/distributed_utils.py
|
python
|
suppress_output
|
(is_master)
|
Suppress printing on the current device. Force printing with `force=True`.
|
Suppress printing on the current device. Force printing with `force=True`.
|
[
"Suppress",
"printing",
"on",
"the",
"current",
"device",
".",
"Force",
"printing",
"with",
"force",
"=",
"True",
"."
] |
def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
|
[
"def",
"suppress_output",
"(",
"is_master",
")",
":",
"import",
"builtins",
"as",
"__builtin__",
"builtin_print",
"=",
"__builtin__",
".",
"print",
"def",
"print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"force",
"=",
"kwargs",
".",
"pop",
"(",
"\"force\"",
",",
"False",
")",
"if",
"is_master",
"or",
"force",
":",
"builtin_print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"__builtin__",
".",
"print",
"=",
"print"
] |
https://github.com/oleg-yaroshevskiy/quest_qa_labeling/blob/730a9632314e54584f69f909d5e2ef74d843e02c/packages/fairseq-hacked/fairseq/distributed_utils.py#L112-L123
|
||
pydata/xarray
|
9226c7ac87b3eb246f7a7e49f8f0f23d68951624
|
xarray/core/indexes.py
|
python
|
default_indexes
|
(
coords: Mapping[Any, "Variable"], dims: Iterable
)
|
return {key: coords[key]._to_xindex() for key in dims if key in coords}
|
Default indexes for a Dataset/DataArray.
Parameters
----------
coords : Mapping[Any, xarray.Variable]
Coordinate variables from which to draw default indexes.
dims : iterable
Iterable of dimension names.
Returns
-------
Mapping from indexing keys (levels/dimension names) to indexes used for
indexing along that dimension.
|
Default indexes for a Dataset/DataArray.
|
[
"Default",
"indexes",
"for",
"a",
"Dataset",
"/",
"DataArray",
"."
] |
def default_indexes(
coords: Mapping[Any, "Variable"], dims: Iterable
) -> Dict[Hashable, Index]:
"""Default indexes for a Dataset/DataArray.
Parameters
----------
coords : Mapping[Any, xarray.Variable]
Coordinate variables from which to draw default indexes.
dims : iterable
Iterable of dimension names.
Returns
-------
Mapping from indexing keys (levels/dimension names) to indexes used for
indexing along that dimension.
"""
return {key: coords[key]._to_xindex() for key in dims if key in coords}
|
[
"def",
"default_indexes",
"(",
"coords",
":",
"Mapping",
"[",
"Any",
",",
"\"Variable\"",
"]",
",",
"dims",
":",
"Iterable",
")",
"->",
"Dict",
"[",
"Hashable",
",",
"Index",
"]",
":",
"return",
"{",
"key",
":",
"coords",
"[",
"key",
"]",
".",
"_to_xindex",
"(",
")",
"for",
"key",
"in",
"dims",
"if",
"key",
"in",
"coords",
"}"
] |
https://github.com/pydata/xarray/blob/9226c7ac87b3eb246f7a7e49f8f0f23d68951624/xarray/core/indexes.py#L479-L496
|
|
smicallef/spiderfoot
|
fd4bf9394c9ab3ecc90adc3115c56349fb23165b
|
modules/sfp_coinblocker.py
|
python
|
sfp_coinblocker.retrieveBlocklist
|
(self)
|
return self.parseBlocklist(res['content'])
|
[] |
def retrieveBlocklist(self):
blocklist = self.sf.cacheGet('coinblocker', self.opts.get('cacheperiod', 24))
if blocklist is not None:
return self.parseBlocklist(blocklist)
url = "https://zerodot1.gitlab.io/CoinBlockerLists/list.txt"
res = self.sf.fetchUrl(
url,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
)
if res['code'] != "200":
self.error(f"Unexpected HTTP response code {res['code']} from {url}")
self.errorState = True
return None
if res['content'] is None:
self.error(f"Received no content from {url}")
self.errorState = True
return None
self.sf.cachePut("coinblocker", res['content'])
return self.parseBlocklist(res['content'])
|
[
"def",
"retrieveBlocklist",
"(",
"self",
")",
":",
"blocklist",
"=",
"self",
".",
"sf",
".",
"cacheGet",
"(",
"'coinblocker'",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'cacheperiod'",
",",
"24",
")",
")",
"if",
"blocklist",
"is",
"not",
"None",
":",
"return",
"self",
".",
"parseBlocklist",
"(",
"blocklist",
")",
"url",
"=",
"\"https://zerodot1.gitlab.io/CoinBlockerLists/list.txt\"",
"res",
"=",
"self",
".",
"sf",
".",
"fetchUrl",
"(",
"url",
",",
"timeout",
"=",
"self",
".",
"opts",
"[",
"'_fetchtimeout'",
"]",
",",
"useragent",
"=",
"self",
".",
"opts",
"[",
"'_useragent'",
"]",
",",
")",
"if",
"res",
"[",
"'code'",
"]",
"!=",
"\"200\"",
":",
"self",
".",
"error",
"(",
"f\"Unexpected HTTP response code {res['code']} from {url}\"",
")",
"self",
".",
"errorState",
"=",
"True",
"return",
"None",
"if",
"res",
"[",
"'content'",
"]",
"is",
"None",
":",
"self",
".",
"error",
"(",
"f\"Received no content from {url}\"",
")",
"self",
".",
"errorState",
"=",
"True",
"return",
"None",
"self",
".",
"sf",
".",
"cachePut",
"(",
"\"coinblocker\"",
",",
"res",
"[",
"'content'",
"]",
")",
"return",
"self",
".",
"parseBlocklist",
"(",
"res",
"[",
"'content'",
"]",
")"
] |
https://github.com/smicallef/spiderfoot/blob/fd4bf9394c9ab3ecc90adc3115c56349fb23165b/modules/sfp_coinblocker.py#L93-L118
|
|||
Source-Python-Dev-Team/Source.Python
|
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
|
addons/source-python/Python3/logging/config.py
|
python
|
stopListening
|
()
|
Stop the listening server which was created with a call to listen().
|
Stop the listening server which was created with a call to listen().
|
[
"Stop",
"the",
"listening",
"server",
"which",
"was",
"created",
"with",
"a",
"call",
"to",
"listen",
"()",
"."
] |
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
[
"def",
"stopListening",
"(",
")",
":",
"global",
"_listener",
"logging",
".",
"_acquireLock",
"(",
")",
"try",
":",
"if",
"_listener",
":",
"_listener",
".",
"abort",
"=",
"1",
"_listener",
"=",
"None",
"finally",
":",
"logging",
".",
"_releaseLock",
"(",
")"
] |
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/Python3/logging/config.py#L922-L933
|
||
IronLanguages/ironpython3
|
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
|
Src/IronPython/Modules/unicodedata/genunicodedata.py
|
python
|
add_eawidths
|
(data)
|
return data
|
[] |
def add_eawidths(data):
for eawidth in readdatafile(EASTASIANWIDTHS):
if eawidth[0] in data:
data[eawidth[0]] += [eawidth[1]]
return data
|
[
"def",
"add_eawidths",
"(",
"data",
")",
":",
"for",
"eawidth",
"in",
"readdatafile",
"(",
"EASTASIANWIDTHS",
")",
":",
"if",
"eawidth",
"[",
"0",
"]",
"in",
"data",
":",
"data",
"[",
"eawidth",
"[",
"0",
"]",
"]",
"+=",
"[",
"eawidth",
"[",
"1",
"]",
"]",
"return",
"data"
] |
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/IronPython/Modules/unicodedata/genunicodedata.py#L32-L37
|
|||
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/crypt.py
|
python
|
_get_key_with_evict
|
(path, timestamp, passphrase)
|
return key
|
Load a private key from disk. `timestamp` above is intended to be the
timestamp of the file's last modification. This fn is memoized so if it is
called with the same path and timestamp (the file's last modified time) the
second time the result is returned from the memoiziation. If the file gets
modified then the params are different and the key is loaded from disk.
|
Load a private key from disk. `timestamp` above is intended to be the
timestamp of the file's last modification. This fn is memoized so if it is
called with the same path and timestamp (the file's last modified time) the
second time the result is returned from the memoiziation. If the file gets
modified then the params are different and the key is loaded from disk.
|
[
"Load",
"a",
"private",
"key",
"from",
"disk",
".",
"timestamp",
"above",
"is",
"intended",
"to",
"be",
"the",
"timestamp",
"of",
"the",
"file",
"s",
"last",
"modification",
".",
"This",
"fn",
"is",
"memoized",
"so",
"if",
"it",
"is",
"called",
"with",
"the",
"same",
"path",
"and",
"timestamp",
"(",
"the",
"file",
"s",
"last",
"modified",
"time",
")",
"the",
"second",
"time",
"the",
"result",
"is",
"returned",
"from",
"the",
"memoiziation",
".",
"If",
"the",
"file",
"gets",
"modified",
"then",
"the",
"params",
"are",
"different",
"and",
"the",
"key",
"is",
"loaded",
"from",
"disk",
"."
] |
def _get_key_with_evict(path, timestamp, passphrase):
"""
Load a private key from disk. `timestamp` above is intended to be the
timestamp of the file's last modification. This fn is memoized so if it is
called with the same path and timestamp (the file's last modified time) the
second time the result is returned from the memoiziation. If the file gets
modified then the params are different and the key is loaded from disk.
"""
log.debug("salt.crypt._get_key_with_evict: Loading private key")
if HAS_M2:
key = RSA.load_key(path, lambda x: bytes(passphrase))
else:
with salt.utils.files.fopen(path) as f:
key = RSA.importKey(f.read(), passphrase)
return key
|
[
"def",
"_get_key_with_evict",
"(",
"path",
",",
"timestamp",
",",
"passphrase",
")",
":",
"log",
".",
"debug",
"(",
"\"salt.crypt._get_key_with_evict: Loading private key\"",
")",
"if",
"HAS_M2",
":",
"key",
"=",
"RSA",
".",
"load_key",
"(",
"path",
",",
"lambda",
"x",
":",
"bytes",
"(",
"passphrase",
")",
")",
"else",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"path",
")",
"as",
"f",
":",
"key",
"=",
"RSA",
".",
"importKey",
"(",
"f",
".",
"read",
"(",
")",
",",
"passphrase",
")",
"return",
"key"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/crypt.py#L183-L197
|
|
BigBrotherBot/big-brother-bot
|
848823c71413c86e7f1ff9584f43e08d40a7f2c0
|
b3/plugins/poweradminhf/__init__.py
|
python
|
PoweradminhfPlugin.cmd_pamatch
|
(self, data, client, cmd=None)
|
<on/off> - set server match mode on/off
(You can safely use the command without the 'pa' at the beginning)
|
<on/off> - set server match mode on/off
(You can safely use the command without the 'pa' at the beginning)
|
[
"<on",
"/",
"off",
">",
"-",
"set",
"server",
"match",
"mode",
"on",
"/",
"off",
"(",
"You",
"can",
"safely",
"use",
"the",
"command",
"without",
"the",
"pa",
"at",
"the",
"beginning",
")"
] |
def cmd_pamatch(self, data, client, cmd=None):
"""
<on/off> - set server match mode on/off
(You can safely use the command without the 'pa' at the beginning)
"""
if not data or str(data).lower() not in ('on','off'):
client.message('invalid or missing data, try !help pamatch')
else:
if data.lower() == 'on':
self._matchmode = True
self._enableTeamBalancer = False
for e in self._match_plugin_disable:
self.debug('disabling plugin %s' %e)
plugin = self.console.getPlugin(e)
if plugin:
plugin.disable()
client.message('plugin %s disabled' % e)
self.console.say('match mode: ON')
if self._matchManager:
self._matchManager.stop()
self._matchManager = MatchManager(self)
self._matchManager.initMatch()
elif data.lower() == 'off':
self._matchmode = False
if self._matchManager:
self._matchManager.stop()
self._matchManager = None
# enable plugins
for e in self._match_plugin_disable:
self.debug('enabling plugin %s' %e)
plugin = self.console.getPlugin(e)
if plugin:
plugin.enable()
client.message('plugin %s enabled' % e)
self.console.say('match mode: OFF')
|
[
"def",
"cmd_pamatch",
"(",
"self",
",",
"data",
",",
"client",
",",
"cmd",
"=",
"None",
")",
":",
"if",
"not",
"data",
"or",
"str",
"(",
"data",
")",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'on'",
",",
"'off'",
")",
":",
"client",
".",
"message",
"(",
"'invalid or missing data, try !help pamatch'",
")",
"else",
":",
"if",
"data",
".",
"lower",
"(",
")",
"==",
"'on'",
":",
"self",
".",
"_matchmode",
"=",
"True",
"self",
".",
"_enableTeamBalancer",
"=",
"False",
"for",
"e",
"in",
"self",
".",
"_match_plugin_disable",
":",
"self",
".",
"debug",
"(",
"'disabling plugin %s'",
"%",
"e",
")",
"plugin",
"=",
"self",
".",
"console",
".",
"getPlugin",
"(",
"e",
")",
"if",
"plugin",
":",
"plugin",
".",
"disable",
"(",
")",
"client",
".",
"message",
"(",
"'plugin %s disabled'",
"%",
"e",
")",
"self",
".",
"console",
".",
"say",
"(",
"'match mode: ON'",
")",
"if",
"self",
".",
"_matchManager",
":",
"self",
".",
"_matchManager",
".",
"stop",
"(",
")",
"self",
".",
"_matchManager",
"=",
"MatchManager",
"(",
"self",
")",
"self",
".",
"_matchManager",
".",
"initMatch",
"(",
")",
"elif",
"data",
".",
"lower",
"(",
")",
"==",
"'off'",
":",
"self",
".",
"_matchmode",
"=",
"False",
"if",
"self",
".",
"_matchManager",
":",
"self",
".",
"_matchManager",
".",
"stop",
"(",
")",
"self",
".",
"_matchManager",
"=",
"None",
"# enable plugins",
"for",
"e",
"in",
"self",
".",
"_match_plugin_disable",
":",
"self",
".",
"debug",
"(",
"'enabling plugin %s'",
"%",
"e",
")",
"plugin",
"=",
"self",
".",
"console",
".",
"getPlugin",
"(",
"e",
")",
"if",
"plugin",
":",
"plugin",
".",
"enable",
"(",
")",
"client",
".",
"message",
"(",
"'plugin %s enabled'",
"%",
"e",
")",
"self",
".",
"console",
".",
"say",
"(",
"'match mode: OFF'",
")"
] |
https://github.com/BigBrotherBot/big-brother-bot/blob/848823c71413c86e7f1ff9584f43e08d40a7f2c0/b3/plugins/poweradminhf/__init__.py#L399-L438
|
||
exaile/exaile
|
a7b58996c5c15b3aa7b9975ac13ee8f784ef4689
|
plugins/quickbuttons/__init__.py
|
python
|
qb_spinner._set_delay_value
|
(self, value: int)
|
Set the delay value in ms
|
Set the delay value in ms
|
[
"Set",
"the",
"delay",
"value",
"in",
"ms"
] |
def _set_delay_value(self, value: int) -> None:
"""
Set the delay value in ms
"""
value = value * 1000
settings.set_option("player/auto_advance_delay", value)
|
[
"def",
"_set_delay_value",
"(",
"self",
",",
"value",
":",
"int",
")",
"->",
"None",
":",
"value",
"=",
"value",
"*",
"1000",
"settings",
".",
"set_option",
"(",
"\"player/auto_advance_delay\"",
",",
"value",
")"
] |
https://github.com/exaile/exaile/blob/a7b58996c5c15b3aa7b9975ac13ee8f784ef4689/plugins/quickbuttons/__init__.py#L366-L371
|
||
nvbn/everpad
|
5db96c0f9b7c30ce4f900274f3826fdfa55cbaac
|
everpad/provider/service.py
|
python
|
ProviderService.find_notes
|
(
self, words, notebooks, tags, place,
limit=const.DEFAULT_LIMIT, order=const.ORDER_UPDATED,
pinnded=const.NOT_PINNDED,
)
|
return notes
|
Find notes by filters
|
Find notes by filters
|
[
"Find",
"notes",
"by",
"filters"
] |
def find_notes(
self, words, notebooks, tags, place,
limit=const.DEFAULT_LIMIT, order=const.ORDER_UPDATED,
pinnded=const.NOT_PINNDED,
):
"""Find notes by filters"""
notes = btype.Note.list >> NoteFilterer(self.session)\
.by_words(words)\
.by_notebooks(notebooks)\
.by_tags(tags)\
.by_place(place)\
.by_pinnded(pinnded)\
.order_by(order)\
.all()\
.limit(limit)
return notes
|
[
"def",
"find_notes",
"(",
"self",
",",
"words",
",",
"notebooks",
",",
"tags",
",",
"place",
",",
"limit",
"=",
"const",
".",
"DEFAULT_LIMIT",
",",
"order",
"=",
"const",
".",
"ORDER_UPDATED",
",",
"pinnded",
"=",
"const",
".",
"NOT_PINNDED",
",",
")",
":",
"notes",
"=",
"btype",
".",
"Note",
".",
"list",
">>",
"NoteFilterer",
"(",
"self",
".",
"session",
")",
".",
"by_words",
"(",
"words",
")",
".",
"by_notebooks",
"(",
"notebooks",
")",
".",
"by_tags",
"(",
"tags",
")",
".",
"by_place",
"(",
"place",
")",
".",
"by_pinnded",
"(",
"pinnded",
")",
".",
"order_by",
"(",
"order",
")",
".",
"all",
"(",
")",
".",
"limit",
"(",
"limit",
")",
"return",
"notes"
] |
https://github.com/nvbn/everpad/blob/5db96c0f9b7c30ce4f900274f3826fdfa55cbaac/everpad/provider/service.py#L163-L179
|
|
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/urllib3-1.25.8/dummyserver/handlers.py
|
python
|
TestingApp.upload
|
(self, request)
|
return Response()
|
Confirm that the uploaded file conforms to specification
|
Confirm that the uploaded file conforms to specification
|
[
"Confirm",
"that",
"the",
"uploaded",
"file",
"conforms",
"to",
"specification"
] |
def upload(self, request):
"Confirm that the uploaded file conforms to specification"
# FIXME: This is a huge broken mess
param = request.params.get("upload_param", b"myfile").decode("ascii")
filename = request.params.get("upload_filename", b"").decode("utf-8")
size = int(request.params.get("upload_size", "0"))
files_ = request.files.get(param)
if len(files_) != 1:
return Response(
"Expected 1 file for '%s', not %d" % (param, len(files_)),
status="400 Bad Request",
)
file_ = files_[0]
data = file_["body"]
if int(size) != len(data):
return Response(
"Wrong size: %d != %d" % (size, len(data)), status="400 Bad Request"
)
got_filename = file_["filename"]
if isinstance(got_filename, binary_type):
got_filename = got_filename.decode("utf-8")
# Tornado can leave the trailing \n in place on the filename.
if filename != got_filename:
return Response(
u"Wrong filename: %s != %s" % (filename, file_.filename),
status="400 Bad Request",
)
return Response()
|
[
"def",
"upload",
"(",
"self",
",",
"request",
")",
":",
"# FIXME: This is a huge broken mess",
"param",
"=",
"request",
".",
"params",
".",
"get",
"(",
"\"upload_param\"",
",",
"b\"myfile\"",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
"filename",
"=",
"request",
".",
"params",
".",
"get",
"(",
"\"upload_filename\"",
",",
"b\"\"",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"size",
"=",
"int",
"(",
"request",
".",
"params",
".",
"get",
"(",
"\"upload_size\"",
",",
"\"0\"",
")",
")",
"files_",
"=",
"request",
".",
"files",
".",
"get",
"(",
"param",
")",
"if",
"len",
"(",
"files_",
")",
"!=",
"1",
":",
"return",
"Response",
"(",
"\"Expected 1 file for '%s', not %d\"",
"%",
"(",
"param",
",",
"len",
"(",
"files_",
")",
")",
",",
"status",
"=",
"\"400 Bad Request\"",
",",
")",
"file_",
"=",
"files_",
"[",
"0",
"]",
"data",
"=",
"file_",
"[",
"\"body\"",
"]",
"if",
"int",
"(",
"size",
")",
"!=",
"len",
"(",
"data",
")",
":",
"return",
"Response",
"(",
"\"Wrong size: %d != %d\"",
"%",
"(",
"size",
",",
"len",
"(",
"data",
")",
")",
",",
"status",
"=",
"\"400 Bad Request\"",
")",
"got_filename",
"=",
"file_",
"[",
"\"filename\"",
"]",
"if",
"isinstance",
"(",
"got_filename",
",",
"binary_type",
")",
":",
"got_filename",
"=",
"got_filename",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# Tornado can leave the trailing \\n in place on the filename.",
"if",
"filename",
"!=",
"got_filename",
":",
"return",
"Response",
"(",
"u\"Wrong filename: %s != %s\"",
"%",
"(",
"filename",
",",
"file_",
".",
"filename",
")",
",",
"status",
"=",
"\"400 Bad Request\"",
",",
")",
"return",
"Response",
"(",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/urllib3-1.25.8/dummyserver/handlers.py#L145-L177
|
|
roglew/guppy-proxy
|
01df16be71dd9f23d7de415a315821659c29bc63
|
guppyproxy/decoder.py
|
python
|
decode_jwt
|
(s)
|
return ret
|
[] |
def decode_jwt(s):
# in case they paste the whole auth header or the token with "bearer"
s = s.strip()
fields = s.split(b' ')
s = fields[-1].strip()
parts = s.split(b'.')
ret = b''
for part in parts:
try:
ret += base64_decode_helper(part.decode()) + b'\n\n'
except:
ret += b"[error decoding]\n\n"
return ret
|
[
"def",
"decode_jwt",
"(",
"s",
")",
":",
"# in case they paste the whole auth header or the token with \"bearer\"",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"fields",
"=",
"s",
".",
"split",
"(",
"b' '",
")",
"s",
"=",
"fields",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"parts",
"=",
"s",
".",
"split",
"(",
"b'.'",
")",
"ret",
"=",
"b''",
"for",
"part",
"in",
"parts",
":",
"try",
":",
"ret",
"+=",
"base64_decode_helper",
"(",
"part",
".",
"decode",
"(",
")",
")",
"+",
"b'\\n\\n'",
"except",
":",
"ret",
"+=",
"b\"[error decoding]\\n\\n\"",
"return",
"ret"
] |
https://github.com/roglew/guppy-proxy/blob/01df16be71dd9f23d7de415a315821659c29bc63/guppyproxy/decoder.py#L63-L75
|
|||
coreemu/core
|
7e18a7a72023a69a92ad61d87461bd659ba27f7c
|
daemon/core/api/grpc/client.py
|
python
|
CoreGrpcClient.check_session
|
(self, session_id: int)
|
return self.stub.CheckSession(request)
|
Check if a session exists.
:param session_id: id of session to check for
:return: response with result if session was found
|
Check if a session exists.
|
[
"Check",
"if",
"a",
"session",
"exists",
"."
] |
def check_session(self, session_id: int) -> core_pb2.CheckSessionResponse:
"""
Check if a session exists.
:param session_id: id of session to check for
:return: response with result if session was found
"""
request = core_pb2.CheckSessionRequest(session_id=session_id)
return self.stub.CheckSession(request)
|
[
"def",
"check_session",
"(",
"self",
",",
"session_id",
":",
"int",
")",
"->",
"core_pb2",
".",
"CheckSessionResponse",
":",
"request",
"=",
"core_pb2",
".",
"CheckSessionRequest",
"(",
"session_id",
"=",
"session_id",
")",
"return",
"self",
".",
"stub",
".",
"CheckSession",
"(",
"request",
")"
] |
https://github.com/coreemu/core/blob/7e18a7a72023a69a92ad61d87461bd659ba27f7c/daemon/core/api/grpc/client.py#L278-L286
|
|
facebookresearch/mmf
|
fb6fe390287e1da12c3bd28d4ab43c5f7dcdfc9f
|
mmf/modules/attention.py
|
python
|
TopDownAttention.forward
|
(self, image_feat, question_embedding, image_locs=None)
|
return masked_attention
|
[] |
def forward(self, image_feat, question_embedding, image_locs=None):
# N x K x joint_dim
joint_feature = self.combination_layer(image_feat, question_embedding)
# N x K x n_att
raw_attn = self.transform(joint_feature)
if self.normalization.lower() == "softmax":
attention = nn.functional.softmax(raw_attn, dim=1)
if image_locs is not None:
masked_attention = self._mask_attentions(attention, image_locs)
masked_attention_sum = torch.sum(masked_attention, dim=1, keepdim=True)
masked_attention_sum += masked_attention_sum.eq(0).float() + self.EPS
masked_attention = masked_attention / masked_attention_sum
else:
masked_attention = attention
elif self.normalization.lower() == "sigmoid":
attention = torch.sigmoid(raw_attn)
masked_attention = attention
if image_locs is not None:
masked_attention = self._mask_attentions(attention, image_locs)
return masked_attention
|
[
"def",
"forward",
"(",
"self",
",",
"image_feat",
",",
"question_embedding",
",",
"image_locs",
"=",
"None",
")",
":",
"# N x K x joint_dim",
"joint_feature",
"=",
"self",
".",
"combination_layer",
"(",
"image_feat",
",",
"question_embedding",
")",
"# N x K x n_att",
"raw_attn",
"=",
"self",
".",
"transform",
"(",
"joint_feature",
")",
"if",
"self",
".",
"normalization",
".",
"lower",
"(",
")",
"==",
"\"softmax\"",
":",
"attention",
"=",
"nn",
".",
"functional",
".",
"softmax",
"(",
"raw_attn",
",",
"dim",
"=",
"1",
")",
"if",
"image_locs",
"is",
"not",
"None",
":",
"masked_attention",
"=",
"self",
".",
"_mask_attentions",
"(",
"attention",
",",
"image_locs",
")",
"masked_attention_sum",
"=",
"torch",
".",
"sum",
"(",
"masked_attention",
",",
"dim",
"=",
"1",
",",
"keepdim",
"=",
"True",
")",
"masked_attention_sum",
"+=",
"masked_attention_sum",
".",
"eq",
"(",
"0",
")",
".",
"float",
"(",
")",
"+",
"self",
".",
"EPS",
"masked_attention",
"=",
"masked_attention",
"/",
"masked_attention_sum",
"else",
":",
"masked_attention",
"=",
"attention",
"elif",
"self",
".",
"normalization",
".",
"lower",
"(",
")",
"==",
"\"sigmoid\"",
":",
"attention",
"=",
"torch",
".",
"sigmoid",
"(",
"raw_attn",
")",
"masked_attention",
"=",
"attention",
"if",
"image_locs",
"is",
"not",
"None",
":",
"masked_attention",
"=",
"self",
".",
"_mask_attentions",
"(",
"attention",
",",
"image_locs",
")",
"return",
"masked_attention"
] |
https://github.com/facebookresearch/mmf/blob/fb6fe390287e1da12c3bd28d4ab43c5f7dcdfc9f/mmf/modules/attention.py#L139-L161
|
|||
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-windows/x86/ldap3/extend/operation.py
|
python
|
ExtendedOperation.decode_response
|
(self)
|
[] |
def decode_response(self):
if not self.result:
return None
if self.result['result'] not in [RESULT_SUCCESS]:
if self.connection.raise_exceptions:
raise LDAPExtensionError('extended operation error: ' + self.result['description'] + ' - ' + self.result['message'])
else:
return None
if not self.response_name or self.result['responseName'] == self.response_name:
if self.result['responseValue']:
if self.asn1_spec is not None:
decoded, unprocessed = decoder.decode(self.result['responseValue'], asn1Spec=self.asn1_spec)
if unprocessed:
raise LDAPExtensionError('error decoding extended response value')
self.decoded_response = decoded
else:
self.decoded_response = self.result['responseValue']
else:
raise LDAPExtensionError('invalid response name received')
|
[
"def",
"decode_response",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"result",
":",
"return",
"None",
"if",
"self",
".",
"result",
"[",
"'result'",
"]",
"not",
"in",
"[",
"RESULT_SUCCESS",
"]",
":",
"if",
"self",
".",
"connection",
".",
"raise_exceptions",
":",
"raise",
"LDAPExtensionError",
"(",
"'extended operation error: '",
"+",
"self",
".",
"result",
"[",
"'description'",
"]",
"+",
"' - '",
"+",
"self",
".",
"result",
"[",
"'message'",
"]",
")",
"else",
":",
"return",
"None",
"if",
"not",
"self",
".",
"response_name",
"or",
"self",
".",
"result",
"[",
"'responseName'",
"]",
"==",
"self",
".",
"response_name",
":",
"if",
"self",
".",
"result",
"[",
"'responseValue'",
"]",
":",
"if",
"self",
".",
"asn1_spec",
"is",
"not",
"None",
":",
"decoded",
",",
"unprocessed",
"=",
"decoder",
".",
"decode",
"(",
"self",
".",
"result",
"[",
"'responseValue'",
"]",
",",
"asn1Spec",
"=",
"self",
".",
"asn1_spec",
")",
"if",
"unprocessed",
":",
"raise",
"LDAPExtensionError",
"(",
"'error decoding extended response value'",
")",
"self",
".",
"decoded_response",
"=",
"decoded",
"else",
":",
"self",
".",
"decoded_response",
"=",
"self",
".",
"result",
"[",
"'responseValue'",
"]",
"else",
":",
"raise",
"LDAPExtensionError",
"(",
"'invalid response name received'",
")"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-windows/x86/ldap3/extend/operation.py#L66-L84
|
||||
numba/numba
|
bf480b9e0da858a65508c2b17759a72ee6a44c51
|
numba/cuda/cudaimpl.py
|
python
|
ptx_atomic_cas_tuple
|
(context, builder, sig, args)
|
[] |
def ptx_atomic_cas_tuple(context, builder, sig, args):
aryty, oldty, valty = sig.args
ary, old, val = args
dtype = aryty.dtype
lary = context.make_array(aryty)(context, builder, ary)
zero = context.get_constant(types.intp, 0)
ptr = cgutils.get_item_pointer(context, builder, aryty, lary, (zero,))
if aryty.dtype in (cuda.cudadecl.integer_numba_types):
lmod = builder.module
bitwidth = aryty.dtype.bitwidth
return nvvmutils.atomic_cmpxchg(builder, lmod, bitwidth, ptr, old, val)
else:
raise TypeError('Unimplemented atomic compare_and_swap '
'with %s array' % dtype)
|
[
"def",
"ptx_atomic_cas_tuple",
"(",
"context",
",",
"builder",
",",
"sig",
",",
"args",
")",
":",
"aryty",
",",
"oldty",
",",
"valty",
"=",
"sig",
".",
"args",
"ary",
",",
"old",
",",
"val",
"=",
"args",
"dtype",
"=",
"aryty",
".",
"dtype",
"lary",
"=",
"context",
".",
"make_array",
"(",
"aryty",
")",
"(",
"context",
",",
"builder",
",",
"ary",
")",
"zero",
"=",
"context",
".",
"get_constant",
"(",
"types",
".",
"intp",
",",
"0",
")",
"ptr",
"=",
"cgutils",
".",
"get_item_pointer",
"(",
"context",
",",
"builder",
",",
"aryty",
",",
"lary",
",",
"(",
"zero",
",",
")",
")",
"if",
"aryty",
".",
"dtype",
"in",
"(",
"cuda",
".",
"cudadecl",
".",
"integer_numba_types",
")",
":",
"lmod",
"=",
"builder",
".",
"module",
"bitwidth",
"=",
"aryty",
".",
"dtype",
".",
"bitwidth",
"return",
"nvvmutils",
".",
"atomic_cmpxchg",
"(",
"builder",
",",
"lmod",
",",
"bitwidth",
",",
"ptr",
",",
"old",
",",
"val",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unimplemented atomic compare_and_swap '",
"'with %s array'",
"%",
"dtype",
")"
] |
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/cuda/cudaimpl.py#L944-L959
|
||||
gtaylor/python-colormath
|
4a076831fd5136f685aa7143db81eba27b2cd19a
|
colormath/color_conversions.py
|
python
|
LCHab_to_Lab
|
(cobj, *args, **kwargs)
|
return LabColor(
lab_l, lab_a, lab_b, illuminant=cobj.illuminant, observer=cobj.observer
)
|
Convert from LCH(ab) to Lab.
|
Convert from LCH(ab) to Lab.
|
[
"Convert",
"from",
"LCH",
"(",
"ab",
")",
"to",
"Lab",
"."
] |
def LCHab_to_Lab(cobj, *args, **kwargs):
"""
Convert from LCH(ab) to Lab.
"""
lab_l = cobj.lch_l
lab_a = math.cos(math.radians(cobj.lch_h)) * cobj.lch_c
lab_b = math.sin(math.radians(cobj.lch_h)) * cobj.lch_c
return LabColor(
lab_l, lab_a, lab_b, illuminant=cobj.illuminant, observer=cobj.observer
)
|
[
"def",
"LCHab_to_Lab",
"(",
"cobj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"lab_l",
"=",
"cobj",
".",
"lch_l",
"lab_a",
"=",
"math",
".",
"cos",
"(",
"math",
".",
"radians",
"(",
"cobj",
".",
"lch_h",
")",
")",
"*",
"cobj",
".",
"lch_c",
"lab_b",
"=",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"cobj",
".",
"lch_h",
")",
")",
"*",
"cobj",
".",
"lch_c",
"return",
"LabColor",
"(",
"lab_l",
",",
"lab_a",
",",
"lab_b",
",",
"illuminant",
"=",
"cobj",
".",
"illuminant",
",",
"observer",
"=",
"cobj",
".",
"observer",
")"
] |
https://github.com/gtaylor/python-colormath/blob/4a076831fd5136f685aa7143db81eba27b2cd19a/colormath/color_conversions.py#L359-L368
|
|
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
corehq/reports.py
|
python
|
_filter_reports
|
(report_set, reports)
|
[] |
def _filter_reports(report_set, reports):
if report_set:
return [r for r in reports if r.slug in report_set]
else:
return reports
|
[
"def",
"_filter_reports",
"(",
"report_set",
",",
"reports",
")",
":",
"if",
"report_set",
":",
"return",
"[",
"r",
"for",
"r",
"in",
"reports",
"if",
"r",
".",
"slug",
"in",
"report_set",
"]",
"else",
":",
"return",
"reports"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/reports.py#L172-L176
|
||||
cea-hpc/clustershell
|
c421133ed4baa69e35ff76c476d4097201485344
|
lib/ClusterShell/Worker/Tree.py
|
python
|
TreeWorker._copy_remote
|
(self, source, dest, targets, gateway, timeout, reverse)
|
run a remote copy in tree mode (using gateway)
|
run a remote copy in tree mode (using gateway)
|
[
"run",
"a",
"remote",
"copy",
"in",
"tree",
"mode",
"(",
"using",
"gateway",
")"
] |
def _copy_remote(self, source, dest, targets, gateway, timeout, reverse):
"""run a remote copy in tree mode (using gateway)"""
self.logger.debug("_copy_remote gateway=%s source=%s dest=%s "
"reverse=%s", gateway, source, dest, reverse)
self._target_count += len(targets)
self.gwtargets.setdefault(str(gateway), NodeSet()).add(targets)
# tar commands are built here and launched on targets
if reverse:
# these weird replace calls aim to escape single quotes ' within ''
srcdir = dirname(source).replace("'", '\'\"\'\"\'')
srcbase = basename(normpath(self.source)).replace("'", '\'\"\'\"\'')
cmd = self.TAR_CMD_FMT % (srcdir, srcbase)
else:
cmd = self.UNTAR_CMD_FMT % dest.replace("'", '\'\"\'\"\'')
self.logger.debug('_copy_remote: tar cmd: %s', cmd)
pchan = self.task._pchannel(gateway, self)
pchan.shell(nodes=targets, command=cmd, worker=self, timeout=timeout,
stderr=self.stderr, gw_invoke_cmd=self.invoke_gateway,
remote=self.remote)
|
[
"def",
"_copy_remote",
"(",
"self",
",",
"source",
",",
"dest",
",",
"targets",
",",
"gateway",
",",
"timeout",
",",
"reverse",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"_copy_remote gateway=%s source=%s dest=%s \"",
"\"reverse=%s\"",
",",
"gateway",
",",
"source",
",",
"dest",
",",
"reverse",
")",
"self",
".",
"_target_count",
"+=",
"len",
"(",
"targets",
")",
"self",
".",
"gwtargets",
".",
"setdefault",
"(",
"str",
"(",
"gateway",
")",
",",
"NodeSet",
"(",
")",
")",
".",
"add",
"(",
"targets",
")",
"# tar commands are built here and launched on targets",
"if",
"reverse",
":",
"# these weird replace calls aim to escape single quotes ' within ''",
"srcdir",
"=",
"dirname",
"(",
"source",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\\'\\\"\\'\\\"\\''",
")",
"srcbase",
"=",
"basename",
"(",
"normpath",
"(",
"self",
".",
"source",
")",
")",
".",
"replace",
"(",
"\"'\"",
",",
"'\\'\\\"\\'\\\"\\''",
")",
"cmd",
"=",
"self",
".",
"TAR_CMD_FMT",
"%",
"(",
"srcdir",
",",
"srcbase",
")",
"else",
":",
"cmd",
"=",
"self",
".",
"UNTAR_CMD_FMT",
"%",
"dest",
".",
"replace",
"(",
"\"'\"",
",",
"'\\'\\\"\\'\\\"\\''",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'_copy_remote: tar cmd: %s'",
",",
"cmd",
")",
"pchan",
"=",
"self",
".",
"task",
".",
"_pchannel",
"(",
"gateway",
",",
"self",
")",
"pchan",
".",
"shell",
"(",
"nodes",
"=",
"targets",
",",
"command",
"=",
"cmd",
",",
"worker",
"=",
"self",
",",
"timeout",
"=",
"timeout",
",",
"stderr",
"=",
"self",
".",
"stderr",
",",
"gw_invoke_cmd",
"=",
"self",
".",
"invoke_gateway",
",",
"remote",
"=",
"self",
".",
"remote",
")"
] |
https://github.com/cea-hpc/clustershell/blob/c421133ed4baa69e35ff76c476d4097201485344/lib/ClusterShell/Worker/Tree.py#L332-L355
|
||
rainofmine/Face_Attention_Network
|
68393da155da02d365e50e4118ca428eb9d24eb7
|
csv_eval.py
|
python
|
evaluate
|
(
generator,
retinanet,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None
)
|
return average_precisions
|
Evaluate a given dataset using a given retinanet.
# Arguments
generator : The generator that represents the dataset to evaluate.
retinanet : The retinanet to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
|
Evaluate a given dataset using a given retinanet.
# Arguments
generator : The generator that represents the dataset to evaluate.
retinanet : The retinanet to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
|
[
"Evaluate",
"a",
"given",
"dataset",
"using",
"a",
"given",
"retinanet",
".",
"#",
"Arguments",
"generator",
":",
"The",
"generator",
"that",
"represents",
"the",
"dataset",
"to",
"evaluate",
".",
"retinanet",
":",
"The",
"retinanet",
"to",
"evaluate",
".",
"iou_threshold",
":",
"The",
"threshold",
"used",
"to",
"consider",
"when",
"a",
"detection",
"is",
"positive",
"or",
"negative",
".",
"score_threshold",
":",
"The",
"score",
"confidence",
"threshold",
"to",
"use",
"for",
"detections",
".",
"max_detections",
":",
"The",
"maximum",
"number",
"of",
"detections",
"to",
"use",
"per",
"image",
".",
"save_path",
":",
"The",
"path",
"to",
"save",
"images",
"with",
"visualized",
"detections",
"to",
".",
"#",
"Returns",
"A",
"dict",
"mapping",
"class",
"names",
"to",
"mAP",
"scores",
"."
] |
def evaluate(
generator,
retinanet,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None
):
""" Evaluate a given dataset using a given retinanet.
# Arguments
generator : The generator that represents the dataset to evaluate.
retinanet : The retinanet to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = _get_detections(generator, retinanet, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)
all_annotations = _get_annotations(generator)
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(len(generator)):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0, 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision, num_annotations
print('\nmAP:')
for label in range(generator.num_classes()):
label_name = generator.label_to_name(label)
print('{}: {}'.format(label_name, average_precisions[label][0]))
return average_precisions
|
[
"def",
"evaluate",
"(",
"generator",
",",
"retinanet",
",",
"iou_threshold",
"=",
"0.5",
",",
"score_threshold",
"=",
"0.05",
",",
"max_detections",
"=",
"100",
",",
"save_path",
"=",
"None",
")",
":",
"# gather all detections and annotations",
"all_detections",
"=",
"_get_detections",
"(",
"generator",
",",
"retinanet",
",",
"score_threshold",
"=",
"score_threshold",
",",
"max_detections",
"=",
"max_detections",
",",
"save_path",
"=",
"save_path",
")",
"all_annotations",
"=",
"_get_annotations",
"(",
"generator",
")",
"average_precisions",
"=",
"{",
"}",
"for",
"label",
"in",
"range",
"(",
"generator",
".",
"num_classes",
"(",
")",
")",
":",
"false_positives",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
")",
"true_positives",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
")",
"scores",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
")",
"num_annotations",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"generator",
")",
")",
":",
"detections",
"=",
"all_detections",
"[",
"i",
"]",
"[",
"label",
"]",
"annotations",
"=",
"all_annotations",
"[",
"i",
"]",
"[",
"label",
"]",
"num_annotations",
"+=",
"annotations",
".",
"shape",
"[",
"0",
"]",
"detected_annotations",
"=",
"[",
"]",
"for",
"d",
"in",
"detections",
":",
"scores",
"=",
"np",
".",
"append",
"(",
"scores",
",",
"d",
"[",
"4",
"]",
")",
"if",
"annotations",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"false_positives",
"=",
"np",
".",
"append",
"(",
"false_positives",
",",
"1",
")",
"true_positives",
"=",
"np",
".",
"append",
"(",
"true_positives",
",",
"0",
")",
"continue",
"overlaps",
"=",
"compute_overlap",
"(",
"np",
".",
"expand_dims",
"(",
"d",
",",
"axis",
"=",
"0",
")",
",",
"annotations",
")",
"assigned_annotation",
"=",
"np",
".",
"argmax",
"(",
"overlaps",
",",
"axis",
"=",
"1",
")",
"max_overlap",
"=",
"overlaps",
"[",
"0",
",",
"assigned_annotation",
"]",
"if",
"max_overlap",
">=",
"iou_threshold",
"and",
"assigned_annotation",
"not",
"in",
"detected_annotations",
":",
"false_positives",
"=",
"np",
".",
"append",
"(",
"false_positives",
",",
"0",
")",
"true_positives",
"=",
"np",
".",
"append",
"(",
"true_positives",
",",
"1",
")",
"detected_annotations",
".",
"append",
"(",
"assigned_annotation",
")",
"else",
":",
"false_positives",
"=",
"np",
".",
"append",
"(",
"false_positives",
",",
"1",
")",
"true_positives",
"=",
"np",
".",
"append",
"(",
"true_positives",
",",
"0",
")",
"# no annotations -> AP for this class is 0 (is this correct?)",
"if",
"num_annotations",
"==",
"0",
":",
"average_precisions",
"[",
"label",
"]",
"=",
"0",
",",
"0",
"continue",
"# sort by score",
"indices",
"=",
"np",
".",
"argsort",
"(",
"-",
"scores",
")",
"false_positives",
"=",
"false_positives",
"[",
"indices",
"]",
"true_positives",
"=",
"true_positives",
"[",
"indices",
"]",
"# compute false positives and true positives",
"false_positives",
"=",
"np",
".",
"cumsum",
"(",
"false_positives",
")",
"true_positives",
"=",
"np",
".",
"cumsum",
"(",
"true_positives",
")",
"# compute recall and precision",
"recall",
"=",
"true_positives",
"/",
"num_annotations",
"precision",
"=",
"true_positives",
"/",
"np",
".",
"maximum",
"(",
"true_positives",
"+",
"false_positives",
",",
"np",
".",
"finfo",
"(",
"np",
".",
"float64",
")",
".",
"eps",
")",
"# compute average precision",
"average_precision",
"=",
"_compute_ap",
"(",
"recall",
",",
"precision",
")",
"average_precisions",
"[",
"label",
"]",
"=",
"average_precision",
",",
"num_annotations",
"print",
"(",
"'\\nmAP:'",
")",
"for",
"label",
"in",
"range",
"(",
"generator",
".",
"num_classes",
"(",
")",
")",
":",
"label_name",
"=",
"generator",
".",
"label_to_name",
"(",
"label",
")",
"print",
"(",
"'{}: {}'",
".",
"format",
"(",
"label_name",
",",
"average_precisions",
"[",
"label",
"]",
"[",
"0",
"]",
")",
")",
"return",
"average_precisions"
] |
https://github.com/rainofmine/Face_Attention_Network/blob/68393da155da02d365e50e4118ca428eb9d24eb7/csv_eval.py#L150-L238
|
|
openstack/magnum
|
fa298eeab19b1d87070d72c7c4fb26cd75b0781e
|
magnum/db/sqlalchemy/alembic/versions/461d798132c7_change_cluster_to_support_nodegroups.py
|
python
|
_handle_json_columns
|
(value, default=None)
|
return default
|
[] |
def _handle_json_columns(value, default=None):
if value is not None:
return jsonutils.loads(value)
return default
|
[
"def",
"_handle_json_columns",
"(",
"value",
",",
"default",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"jsonutils",
".",
"loads",
"(",
"value",
")",
"return",
"default"
] |
https://github.com/openstack/magnum/blob/fa298eeab19b1d87070d72c7c4fb26cd75b0781e/magnum/db/sqlalchemy/alembic/versions/461d798132c7_change_cluster_to_support_nodegroups.py#L39-L42
|
|||
axcore/tartube
|
36dd493642923fe8b9190a41db596c30c043ae90
|
tartube/mainwin.py
|
python
|
MainWin.results_list_update_tooltip
|
(self, video_obj)
|
Called by downloads.DownloadWorker.data_callback().
When downloading a video individually, the tooltips in the Results
List are only updated when the video file is actually downloaded. This
function is called to update the tooltips at the end of every download,
ensuring that any errors/warnings are visible in it.
Args:
video_obj (media.Video): The video which has just been downloaded
individually
|
Called by downloads.DownloadWorker.data_callback().
|
[
"Called",
"by",
"downloads",
".",
"DownloadWorker",
".",
"data_callback",
"()",
"."
] |
def results_list_update_tooltip(self, video_obj):
"""Called by downloads.DownloadWorker.data_callback().
When downloading a video individually, the tooltips in the Results
List are only updated when the video file is actually downloaded. This
function is called to update the tooltips at the end of every download,
ensuring that any errors/warnings are visible in it.
Args:
video_obj (media.Video): The video which has just been downloaded
individually
"""
if DEBUG_FUNC_FLAG:
utils.debug_time('mwn 10275 results_list_update_tooltip')
if video_obj.dbid in self.results_list_row_dict:
# Update the corresponding row in the Results List
row_num = self.results_list_row_dict[video_obj.dbid]
# New rows are being added to the top, so the real row number
# changes on every call to self.results_list_add_row()
if self.app_obj.results_list_reverse_flag:
row_num = self.results_list_row_count - 1 - row_num
tree_path = Gtk.TreePath(row_num)
row_iter = self.results_list_liststore.get_iter(tree_path)
self.results_list_liststore.set(
row_iter,
1,
html.escape(
video_obj.fetch_tooltip_text(
self.app_obj,
self.tooltip_max_len,
True, # Show errors/warnings
),
),
)
|
[
"def",
"results_list_update_tooltip",
"(",
"self",
",",
"video_obj",
")",
":",
"if",
"DEBUG_FUNC_FLAG",
":",
"utils",
".",
"debug_time",
"(",
"'mwn 10275 results_list_update_tooltip'",
")",
"if",
"video_obj",
".",
"dbid",
"in",
"self",
".",
"results_list_row_dict",
":",
"# Update the corresponding row in the Results List",
"row_num",
"=",
"self",
".",
"results_list_row_dict",
"[",
"video_obj",
".",
"dbid",
"]",
"# New rows are being added to the top, so the real row number",
"# changes on every call to self.results_list_add_row()",
"if",
"self",
".",
"app_obj",
".",
"results_list_reverse_flag",
":",
"row_num",
"=",
"self",
".",
"results_list_row_count",
"-",
"1",
"-",
"row_num",
"tree_path",
"=",
"Gtk",
".",
"TreePath",
"(",
"row_num",
")",
"row_iter",
"=",
"self",
".",
"results_list_liststore",
".",
"get_iter",
"(",
"tree_path",
")",
"self",
".",
"results_list_liststore",
".",
"set",
"(",
"row_iter",
",",
"1",
",",
"html",
".",
"escape",
"(",
"video_obj",
".",
"fetch_tooltip_text",
"(",
"self",
".",
"app_obj",
",",
"self",
".",
"tooltip_max_len",
",",
"True",
",",
"# Show errors/warnings",
")",
",",
")",
",",
")"
] |
https://github.com/axcore/tartube/blob/36dd493642923fe8b9190a41db596c30c043ae90/tartube/mainwin.py#L11179-L11220
|
||
google/textfsm
|
65ce6c13f0b0c798a6505366cf17dd54bf285d90
|
textfsm/clitable.py
|
python
|
CliTable.sort
|
(self, cmp=None, key=None, reverse=False)
|
Overrides sort func to use the KeyValue for the key.
|
Overrides sort func to use the KeyValue for the key.
|
[
"Overrides",
"sort",
"func",
"to",
"use",
"the",
"KeyValue",
"for",
"the",
"key",
"."
] |
def sort(self, cmp=None, key=None, reverse=False):
"""Overrides sort func to use the KeyValue for the key."""
if not key and self._keys:
key = self.KeyValue
super(CliTable, self).sort(cmp=cmp, key=key, reverse=reverse)
|
[
"def",
"sort",
"(",
"self",
",",
"cmp",
"=",
"None",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
")",
":",
"if",
"not",
"key",
"and",
"self",
".",
"_keys",
":",
"key",
"=",
"self",
".",
"KeyValue",
"super",
"(",
"CliTable",
",",
"self",
")",
".",
"sort",
"(",
"cmp",
"=",
"cmp",
",",
"key",
"=",
"key",
",",
"reverse",
"=",
"reverse",
")"
] |
https://github.com/google/textfsm/blob/65ce6c13f0b0c798a6505366cf17dd54bf285d90/textfsm/clitable.py#L356-L360
|
||
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/scipy/cluster/hierarchy.py
|
python
|
is_valid_im
|
(R, warning=False, throw=False, name=None)
|
return valid
|
Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
|
Returns True if the inconsistency matrix passed is valid.
|
[
"Returns",
"True",
"if",
"the",
"inconsistency",
"matrix",
"passed",
"is",
"valid",
"."
] |
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
|
[
"def",
"is_valid_im",
"(",
"R",
",",
"warning",
"=",
"False",
",",
"throw",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"R",
"=",
"np",
".",
"asarray",
"(",
"R",
",",
"order",
"=",
"'c'",
")",
"valid",
"=",
"True",
"name_str",
"=",
"\"%r \"",
"%",
"name",
"if",
"name",
"else",
"''",
"try",
":",
"if",
"type",
"(",
"R",
")",
"!=",
"np",
".",
"ndarray",
":",
"raise",
"TypeError",
"(",
"'Variable %spassed as inconsistency matrix is not '",
"'a numpy array.'",
"%",
"name_str",
")",
"if",
"R",
".",
"dtype",
"!=",
"np",
".",
"double",
":",
"raise",
"TypeError",
"(",
"'Inconsistency matrix %smust contain doubles '",
"'(double).'",
"%",
"name_str",
")",
"if",
"len",
"(",
"R",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Inconsistency matrix %smust have shape=2 (i.e. '",
"'be two-dimensional).'",
"%",
"name_str",
")",
"if",
"R",
".",
"shape",
"[",
"1",
"]",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"'Inconsistency matrix %smust have 4 columns.'",
"%",
"name_str",
")",
"if",
"R",
".",
"shape",
"[",
"0",
"]",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Inconsistency matrix %smust have at least one '",
"'row.'",
"%",
"name_str",
")",
"if",
"(",
"R",
"[",
":",
",",
"0",
"]",
"<",
"0",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Inconsistency matrix %scontains negative link '",
"'height means.'",
"%",
"name_str",
")",
"if",
"(",
"R",
"[",
":",
",",
"1",
"]",
"<",
"0",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Inconsistency matrix %scontains negative link '",
"'height standard deviations.'",
"%",
"name_str",
")",
"if",
"(",
"R",
"[",
":",
",",
"2",
"]",
"<",
"0",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Inconsistency matrix %scontains negative link '",
"'counts.'",
"%",
"name_str",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"throw",
":",
"raise",
"if",
"warning",
":",
"_warning",
"(",
"str",
"(",
"e",
")",
")",
"valid",
"=",
"False",
"return",
"valid"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/scipy/cluster/hierarchy.py#L1342-L1404
|
|
ProjectQ-Framework/ProjectQ
|
0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005
|
projectq/meta/_dagger.py
|
python
|
Dagger.__exit__
|
(self, exc_type, exc_value, exc_traceback)
|
Context manager exit function.
|
Context manager exit function.
|
[
"Context",
"manager",
"exit",
"function",
"."
] |
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Context manager exit function."""
# If an error happens in this context, qubits might not have been
# deallocated because that code section was not yet executed,
# so don't check and raise an additional error.
if exc_type is not None:
return
# run dagger engine
self._dagger_eng.run()
self._dagger_eng = None
# remove dagger handler from engine list (i.e. skip it)
drop_engine_after(self.engine)
|
[
"def",
"__exit__",
"(",
"self",
",",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
":",
"# If an error happens in this context, qubits might not have been",
"# deallocated because that code section was not yet executed,",
"# so don't check and raise an additional error.",
"if",
"exc_type",
"is",
"not",
"None",
":",
"return",
"# run dagger engine",
"self",
".",
"_dagger_eng",
".",
"run",
"(",
")",
"self",
".",
"_dagger_eng",
"=",
"None",
"# remove dagger handler from engine list (i.e. skip it)",
"drop_engine_after",
"(",
"self",
".",
"engine",
")"
] |
https://github.com/ProjectQ-Framework/ProjectQ/blob/0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005/projectq/meta/_dagger.py#L130-L141
|
||
OneDrive/onedrive-sdk-python
|
e5642f8cad8eea37a4f653c1a23dfcfc06c37110
|
src/onedrivesdk/model/quota.py
|
python
|
Quota.total
|
(self)
|
Gets and sets the total
Returns:
int:
The total
|
Gets and sets the total
Returns:
int:
The total
|
[
"Gets",
"and",
"sets",
"the",
"total",
"Returns",
":",
"int",
":",
"The",
"total"
] |
def total(self):
"""Gets and sets the total
Returns:
int:
The total
"""
if "total" in self._prop_dict:
return self._prop_dict["total"]
else:
return None
|
[
"def",
"total",
"(",
"self",
")",
":",
"if",
"\"total\"",
"in",
"self",
".",
"_prop_dict",
":",
"return",
"self",
".",
"_prop_dict",
"[",
"\"total\"",
"]",
"else",
":",
"return",
"None"
] |
https://github.com/OneDrive/onedrive-sdk-python/blob/e5642f8cad8eea37a4f653c1a23dfcfc06c37110/src/onedrivesdk/model/quota.py#L70-L80
|
||
CompVis/adaptive-style-transfer
|
51b4c90dbd998d9efd1dc821ad7a8df69bef61da
|
evaluation/feature_extractor/nets/vgg.py
|
python
|
vgg_16
|
(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
add_classifier=True)
|
Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
add_classifier: should contruct softmax classifier on top or not
Returns:
the last op containing the log predictions and end_points dict.
|
Oxford Net VGG 16-Layers version D Example.
|
[
"Oxford",
"Net",
"VGG",
"16",
"-",
"Layers",
"version",
"D",
"Example",
"."
] |
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
add_classifier=True):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
add_classifier: should contruct softmax classifier on top or not
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
if add_classifier:
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if add_classifier and spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
pos = net.name.find('fc8/squeezed')
prefix = net.name[:pos]
end_points[prefix + 'fc8'] = net
return net, end_points
|
[
"def",
"vgg_16",
"(",
"inputs",
",",
"num_classes",
"=",
"1000",
",",
"is_training",
"=",
"True",
",",
"dropout_keep_prob",
"=",
"0.5",
",",
"spatial_squeeze",
"=",
"True",
",",
"scope",
"=",
"'vgg_16'",
",",
"add_classifier",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"'vgg_16'",
",",
"[",
"inputs",
"]",
")",
"as",
"sc",
":",
"end_points_collection",
"=",
"sc",
".",
"name",
"+",
"'_end_points'",
"# Collect outputs for conv2d, fully_connected and max_pool2d.",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
",",
"slim",
".",
"fully_connected",
",",
"slim",
".",
"max_pool2d",
"]",
",",
"outputs_collections",
"=",
"end_points_collection",
")",
":",
"net",
"=",
"slim",
".",
"repeat",
"(",
"inputs",
",",
"2",
",",
"slim",
".",
"conv2d",
",",
"64",
",",
"[",
"3",
",",
"3",
"]",
",",
"scope",
"=",
"'conv1'",
")",
"net",
"=",
"slim",
".",
"max_pool2d",
"(",
"net",
",",
"[",
"2",
",",
"2",
"]",
",",
"scope",
"=",
"'pool1'",
")",
"net",
"=",
"slim",
".",
"repeat",
"(",
"net",
",",
"2",
",",
"slim",
".",
"conv2d",
",",
"128",
",",
"[",
"3",
",",
"3",
"]",
",",
"scope",
"=",
"'conv2'",
")",
"net",
"=",
"slim",
".",
"max_pool2d",
"(",
"net",
",",
"[",
"2",
",",
"2",
"]",
",",
"scope",
"=",
"'pool2'",
")",
"net",
"=",
"slim",
".",
"repeat",
"(",
"net",
",",
"3",
",",
"slim",
".",
"conv2d",
",",
"256",
",",
"[",
"3",
",",
"3",
"]",
",",
"scope",
"=",
"'conv3'",
")",
"net",
"=",
"slim",
".",
"max_pool2d",
"(",
"net",
",",
"[",
"2",
",",
"2",
"]",
",",
"scope",
"=",
"'pool3'",
")",
"net",
"=",
"slim",
".",
"repeat",
"(",
"net",
",",
"3",
",",
"slim",
".",
"conv2d",
",",
"512",
",",
"[",
"3",
",",
"3",
"]",
",",
"scope",
"=",
"'conv4'",
")",
"net",
"=",
"slim",
".",
"max_pool2d",
"(",
"net",
",",
"[",
"2",
",",
"2",
"]",
",",
"scope",
"=",
"'pool4'",
")",
"net",
"=",
"slim",
".",
"repeat",
"(",
"net",
",",
"3",
",",
"slim",
".",
"conv2d",
",",
"512",
",",
"[",
"3",
",",
"3",
"]",
",",
"scope",
"=",
"'conv5'",
")",
"net",
"=",
"slim",
".",
"max_pool2d",
"(",
"net",
",",
"[",
"2",
",",
"2",
"]",
",",
"scope",
"=",
"'pool5'",
")",
"# Use conv2d instead of fully_connected layers.",
"net",
"=",
"slim",
".",
"conv2d",
"(",
"net",
",",
"4096",
",",
"[",
"7",
",",
"7",
"]",
",",
"padding",
"=",
"'VALID'",
",",
"scope",
"=",
"'fc6'",
")",
"net",
"=",
"slim",
".",
"dropout",
"(",
"net",
",",
"dropout_keep_prob",
",",
"is_training",
"=",
"is_training",
",",
"scope",
"=",
"'dropout6'",
")",
"net",
"=",
"slim",
".",
"conv2d",
"(",
"net",
",",
"4096",
",",
"[",
"1",
",",
"1",
"]",
",",
"scope",
"=",
"'fc7'",
")",
"net",
"=",
"slim",
".",
"dropout",
"(",
"net",
",",
"dropout_keep_prob",
",",
"is_training",
"=",
"is_training",
",",
"scope",
"=",
"'dropout7'",
")",
"if",
"add_classifier",
":",
"net",
"=",
"slim",
".",
"conv2d",
"(",
"net",
",",
"num_classes",
",",
"[",
"1",
",",
"1",
"]",
",",
"activation_fn",
"=",
"None",
",",
"normalizer_fn",
"=",
"None",
",",
"scope",
"=",
"'fc8'",
")",
"# Convert end_points_collection into a end_point dict.",
"end_points",
"=",
"slim",
".",
"utils",
".",
"convert_collection_to_dict",
"(",
"end_points_collection",
")",
"if",
"add_classifier",
"and",
"spatial_squeeze",
":",
"net",
"=",
"tf",
".",
"squeeze",
"(",
"net",
",",
"[",
"1",
",",
"2",
"]",
",",
"name",
"=",
"'fc8/squeezed'",
")",
"pos",
"=",
"net",
".",
"name",
".",
"find",
"(",
"'fc8/squeezed'",
")",
"prefix",
"=",
"net",
".",
"name",
"[",
":",
"pos",
"]",
"end_points",
"[",
"prefix",
"+",
"'fc8'",
"]",
"=",
"net",
"return",
"net",
",",
"end_points"
] |
https://github.com/CompVis/adaptive-style-transfer/blob/51b4c90dbd998d9efd1dc821ad7a8df69bef61da/evaluation/feature_extractor/nets/vgg.py#L127-L188
|
||
mattrobenolt/django-sudo
|
abf3fdc5d34ed325722fd0252e48e3402cfdabaa
|
tasks.py
|
python
|
clean
|
(c)
|
Clean working directory
|
Clean working directory
|
[
"Clean",
"working",
"directory"
] |
def clean(c):
"Clean working directory"
run("rm -rf *.egg-info *.egg")
run("rm -rf dist build")
|
[
"def",
"clean",
"(",
"c",
")",
":",
"run",
"(",
"\"rm -rf *.egg-info *.egg\"",
")",
"run",
"(",
"\"rm -rf dist build\"",
")"
] |
https://github.com/mattrobenolt/django-sudo/blob/abf3fdc5d34ed325722fd0252e48e3402cfdabaa/tasks.py#L31-L34
|
||
Jajcus/pyxmpp2
|
59e5fd7c8837991ac265dc6aad23a6bd256768a7
|
pyxmpp2/streamtls.py
|
python
|
StreamTLSHandler._make_tls_connection
|
(self)
|
Initiate TLS connection.
[initiating entity only]
|
Initiate TLS connection.
|
[
"Initiate",
"TLS",
"connection",
"."
] |
def _make_tls_connection(self):
"""Initiate TLS connection.
[initiating entity only]
"""
logger.debug("Preparing TLS connection")
if self.settings["tls_verify_peer"]:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.stream.transport.starttls(
keyfile = self.settings["tls_key_file"],
certfile = self.settings["tls_cert_file"],
server_side = not self.stream.initiator,
cert_reqs = cert_reqs,
ssl_version = ssl.PROTOCOL_TLSv1,
ca_certs = self.settings["tls_cacert_file"],
do_handshake_on_connect = False,
)
|
[
"def",
"_make_tls_connection",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Preparing TLS connection\"",
")",
"if",
"self",
".",
"settings",
"[",
"\"tls_verify_peer\"",
"]",
":",
"cert_reqs",
"=",
"ssl",
".",
"CERT_REQUIRED",
"else",
":",
"cert_reqs",
"=",
"ssl",
".",
"CERT_NONE",
"self",
".",
"stream",
".",
"transport",
".",
"starttls",
"(",
"keyfile",
"=",
"self",
".",
"settings",
"[",
"\"tls_key_file\"",
"]",
",",
"certfile",
"=",
"self",
".",
"settings",
"[",
"\"tls_cert_file\"",
"]",
",",
"server_side",
"=",
"not",
"self",
".",
"stream",
".",
"initiator",
",",
"cert_reqs",
"=",
"cert_reqs",
",",
"ssl_version",
"=",
"ssl",
".",
"PROTOCOL_TLSv1",
",",
"ca_certs",
"=",
"self",
".",
"settings",
"[",
"\"tls_cacert_file\"",
"]",
",",
"do_handshake_on_connect",
"=",
"False",
",",
")"
] |
https://github.com/Jajcus/pyxmpp2/blob/59e5fd7c8837991ac265dc6aad23a6bd256768a7/pyxmpp2/streamtls.py#L166-L184
|
||
Esri/ArcREST
|
ab240fde2b0200f61d4a5f6df033516e53f2f416
|
src/arcrest/common/general.py
|
python
|
Feature.set_value
|
(self, field_name, value)
|
return True
|
sets an attribute value for a given field name
|
sets an attribute value for a given field name
|
[
"sets",
"an",
"attribute",
"value",
"for",
"a",
"given",
"field",
"name"
] |
def set_value(self, field_name, value):
""" sets an attribute value for a given field name """
if field_name in self.fields:
if not value is None:
self._dict['attributes'][field_name] = _unicode_convert(value)
else:
pass
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
if isinstance(value, dict):
if 'geometry' in value:
self._dict['geometry'] = value['geometry']
elif any(k in value.keys() for k in ['x','y','points','paths','rings', 'spatialReference']):
self._dict['geometry'] = value
elif isinstance(value, AbstractGeometry):
self._dict['geometry'] = value.asDictionary
elif arcpyFound:
if isinstance(value, arcpy.Geometry) and \
value.type == self.geometryType:
self._dict['geometry']=json.loads(value.JSON)
self._geom = None
self._geom = self.geometry
else:
return False
self._json = json.dumps(self._dict, default=_date_handler)
return True
|
[
"def",
"set_value",
"(",
"self",
",",
"field_name",
",",
"value",
")",
":",
"if",
"field_name",
"in",
"self",
".",
"fields",
":",
"if",
"not",
"value",
"is",
"None",
":",
"self",
".",
"_dict",
"[",
"'attributes'",
"]",
"[",
"field_name",
"]",
"=",
"_unicode_convert",
"(",
"value",
")",
"else",
":",
"pass",
"elif",
"field_name",
".",
"upper",
"(",
")",
"in",
"[",
"'SHAPE'",
",",
"'SHAPE@'",
",",
"\"GEOMETRY\"",
"]",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"'geometry'",
"in",
"value",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"value",
"[",
"'geometry'",
"]",
"elif",
"any",
"(",
"k",
"in",
"value",
".",
"keys",
"(",
")",
"for",
"k",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'points'",
",",
"'paths'",
",",
"'rings'",
",",
"'spatialReference'",
"]",
")",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"AbstractGeometry",
")",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"value",
".",
"asDictionary",
"elif",
"arcpyFound",
":",
"if",
"isinstance",
"(",
"value",
",",
"arcpy",
".",
"Geometry",
")",
"and",
"value",
".",
"type",
"==",
"self",
".",
"geometryType",
":",
"self",
".",
"_dict",
"[",
"'geometry'",
"]",
"=",
"json",
".",
"loads",
"(",
"value",
".",
"JSON",
")",
"self",
".",
"_geom",
"=",
"None",
"self",
".",
"_geom",
"=",
"self",
".",
"geometry",
"else",
":",
"return",
"False",
"self",
".",
"_json",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_dict",
",",
"default",
"=",
"_date_handler",
")",
"return",
"True"
] |
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/general.py#L136-L160
|
|
GoogleCloudPlatform/PerfKitBenchmarker
|
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
|
perfkitbenchmarker/linux_packages/node_js.py
|
python
|
_Uninstall
|
(vm)
|
Uninstalls the node.js package on the VM.
|
Uninstalls the node.js package on the VM.
|
[
"Uninstalls",
"the",
"node",
".",
"js",
"package",
"on",
"the",
"VM",
"."
] |
def _Uninstall(vm):
"""Uninstalls the node.js package on the VM."""
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(NODE_DIR))
|
[
"def",
"_Uninstall",
"(",
"vm",
")",
":",
"vm",
".",
"RemoteCommand",
"(",
"'cd {0} && sudo make uninstall'",
".",
"format",
"(",
"NODE_DIR",
")",
")"
] |
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/linux_packages/node_js.py#L44-L46
|
||
pypa/pip
|
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
|
src/pip/_vendor/distlib/database.py
|
python
|
DistributionPath.distinfo_dirname
|
(cls, name, version)
|
return '-'.join([name, version]) + DISTINFO_EXT
|
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string
|
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
|
[
"The",
"*",
"name",
"*",
"and",
"*",
"version",
"*",
"parameters",
"are",
"converted",
"into",
"their",
"filename",
"-",
"escaped",
"form",
"i",
".",
"e",
".",
"any",
"-",
"characters",
"are",
"replaced",
"with",
"_",
"other",
"than",
"the",
"one",
"in",
"dist",
"-",
"info",
"and",
"the",
"one",
"separating",
"the",
"name",
"from",
"the",
"version",
"number",
"."
] |
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
|
[
"def",
"distinfo_dirname",
"(",
"cls",
",",
"name",
",",
"version",
")",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"return",
"'-'",
".",
"join",
"(",
"[",
"name",
",",
"version",
"]",
")",
"+",
"DISTINFO_EXT"
] |
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_vendor/distlib/database.py#L179-L198
|
|
thinkle/gourmet
|
8af29c8ded24528030e5ae2ea3461f61c1e5a575
|
gourmet/plugins/import_export/website_import_plugins/cooksillustrated_plugin.py
|
python
|
LogInWebReader.read
|
(self)
|
[] |
def read (self):
self.emit('progress',0,_('Logging into %s')%'www.cooksillustrated.com')
global driver
if driver:
# Don't log in twice :)
self.d = driver
else:
#self.d = webdriver.Chrome()
self.d = webdriver.Firefox()
print('Logging in...')
driver = self.d
self.d.get('https://www.cooksillustrated.com/sign_in/')
username,pw = self.get_username_and_pw()
#un=self.d.find_element_by_xpath('//*[@name="user[email]"]')
un=self.d.find_element_by_xpath('//*[@id="email"]')
print('Got email element',un)
un.send_keys(username)
#pw_el = self.d.find_element_by_xpath('//*[@name="user[password]"]')
pw_el = self.d.find_element_by_xpath('//*[@id="password"]')
print('Got password element',pw_el)
pw_el.send_keys(pw+'\n')
# Now get URL
# First log in...
self.emit('progress',0.5,_('Logging into %s')%'www.cooksillustrated.com')
self.emit('progress',0.6,_('Retrieving %s')%self.url)
self.d.get(self.url)
self.emit('progress',1,_('Retrieving %s')%self.url)
self.content_type = 'text/html'
self.data = self.d.page_source
|
[
"def",
"read",
"(",
"self",
")",
":",
"self",
".",
"emit",
"(",
"'progress'",
",",
"0",
",",
"_",
"(",
"'Logging into %s'",
")",
"%",
"'www.cooksillustrated.com'",
")",
"global",
"driver",
"if",
"driver",
":",
"# Don't log in twice :)",
"self",
".",
"d",
"=",
"driver",
"else",
":",
"#self.d = webdriver.Chrome()",
"self",
".",
"d",
"=",
"webdriver",
".",
"Firefox",
"(",
")",
"print",
"(",
"'Logging in...'",
")",
"driver",
"=",
"self",
".",
"d",
"self",
".",
"d",
".",
"get",
"(",
"'https://www.cooksillustrated.com/sign_in/'",
")",
"username",
",",
"pw",
"=",
"self",
".",
"get_username_and_pw",
"(",
")",
"#un=self.d.find_element_by_xpath('//*[@name=\"user[email]\"]')",
"un",
"=",
"self",
".",
"d",
".",
"find_element_by_xpath",
"(",
"'//*[@id=\"email\"]'",
")",
"print",
"(",
"'Got email element'",
",",
"un",
")",
"un",
".",
"send_keys",
"(",
"username",
")",
"#pw_el = self.d.find_element_by_xpath('//*[@name=\"user[password]\"]')",
"pw_el",
"=",
"self",
".",
"d",
".",
"find_element_by_xpath",
"(",
"'//*[@id=\"password\"]'",
")",
"print",
"(",
"'Got password element'",
",",
"pw_el",
")",
"pw_el",
".",
"send_keys",
"(",
"pw",
"+",
"'\\n'",
")",
"# Now get URL",
"# First log in...",
"self",
".",
"emit",
"(",
"'progress'",
",",
"0.5",
",",
"_",
"(",
"'Logging into %s'",
")",
"%",
"'www.cooksillustrated.com'",
")",
"self",
".",
"emit",
"(",
"'progress'",
",",
"0.6",
",",
"_",
"(",
"'Retrieving %s'",
")",
"%",
"self",
".",
"url",
")",
"self",
".",
"d",
".",
"get",
"(",
"self",
".",
"url",
")",
"self",
".",
"emit",
"(",
"'progress'",
",",
"1",
",",
"_",
"(",
"'Retrieving %s'",
")",
"%",
"self",
".",
"url",
")",
"self",
".",
"content_type",
"=",
"'text/html'",
"self",
".",
"data",
"=",
"self",
".",
"d",
".",
"page_source"
] |
https://github.com/thinkle/gourmet/blob/8af29c8ded24528030e5ae2ea3461f61c1e5a575/gourmet/plugins/import_export/website_import_plugins/cooksillustrated_plugin.py#L59-L87
|
||||
flow-project/flow
|
a511c41c48e6b928bb2060de8ad1ef3c3e3d9554
|
flow/networks/traffic_light_grid.py
|
python
|
TrafficLightGridNetwork.specify_edges
|
(self, net_params)
|
return self._inner_edges + self._outer_edges
|
See parent class.
|
See parent class.
|
[
"See",
"parent",
"class",
"."
] |
def specify_edges(self, net_params):
"""See parent class."""
return self._inner_edges + self._outer_edges
|
[
"def",
"specify_edges",
"(",
"self",
",",
"net_params",
")",
":",
"return",
"self",
".",
"_inner_edges",
"+",
"self",
".",
"_outer_edges"
] |
https://github.com/flow-project/flow/blob/a511c41c48e6b928bb2060de8ad1ef3c3e3d9554/flow/networks/traffic_light_grid.py#L170-L172
|
|
ShuLiu1993/PANet
|
f055f716a21896dab8907c46c12e216323baefdb
|
lib/utils/blob.py
|
python
|
get_max_shape
|
(im_shapes)
|
return max_shape
|
Calculate max spatial size (h, w) for batching given a list of image shapes
|
Calculate max spatial size (h, w) for batching given a list of image shapes
|
[
"Calculate",
"max",
"spatial",
"size",
"(",
"h",
"w",
")",
"for",
"batching",
"given",
"a",
"list",
"of",
"image",
"shapes"
] |
def get_max_shape(im_shapes):
"""Calculate max spatial size (h, w) for batching given a list of image shapes
"""
max_shape = np.array(im_shapes).max(axis=0)
assert max_shape.size == 2
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
return max_shape
|
[
"def",
"get_max_shape",
"(",
"im_shapes",
")",
":",
"max_shape",
"=",
"np",
".",
"array",
"(",
"im_shapes",
")",
".",
"max",
"(",
"axis",
"=",
"0",
")",
"assert",
"max_shape",
".",
"size",
"==",
"2",
"# Pad the image so they can be divisible by a stride",
"if",
"cfg",
".",
"FPN",
".",
"FPN_ON",
":",
"stride",
"=",
"float",
"(",
"cfg",
".",
"FPN",
".",
"COARSEST_STRIDE",
")",
"max_shape",
"[",
"0",
"]",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"max_shape",
"[",
"0",
"]",
"/",
"stride",
")",
"*",
"stride",
")",
"max_shape",
"[",
"1",
"]",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"max_shape",
"[",
"1",
"]",
"/",
"stride",
")",
"*",
"stride",
")",
"return",
"max_shape"
] |
https://github.com/ShuLiu1993/PANet/blob/f055f716a21896dab8907c46c12e216323baefdb/lib/utils/blob.py#L91-L101
|
|
WikidPad/WikidPad
|
558109638807bc76b4672922686e416ab2d5f79c
|
WikidPad/lib/pwiki/WikiTxtCtrl.py
|
python
|
ViHandler.GotoVisualLineStart
|
(self)
|
Move caret to start of the visual line
|
Move caret to start of the visual line
|
[
"Move",
"caret",
"to",
"start",
"of",
"the",
"visual",
"line"
] |
def GotoVisualLineStart(self):
"""
Move caret to start of the visual line
"""
self.ctrl.HomeDisplay()
|
[
"def",
"GotoVisualLineStart",
"(",
"self",
")",
":",
"self",
".",
"ctrl",
".",
"HomeDisplay",
"(",
")"
] |
https://github.com/WikidPad/WikidPad/blob/558109638807bc76b4672922686e416ab2d5f79c/WikidPad/lib/pwiki/WikiTxtCtrl.py#L7850-L7854
|
||
cszn/KAIR
|
72e93351bca41d1b1f6a4c3e1957f5bffccc7101
|
models/model_base.py
|
python
|
ModelBase.get_bare_model
|
(self, network)
|
return network
|
Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
|
Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
|
[
"Get",
"bare",
"model",
"especially",
"under",
"wrapping",
"with",
"DistributedDataParallel",
"or",
"DataParallel",
"."
] |
def get_bare_model(self, network):
"""Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
"""
if isinstance(network, (DataParallel, DistributedDataParallel)):
network = network.module
return network
|
[
"def",
"get_bare_model",
"(",
"self",
",",
"network",
")",
":",
"if",
"isinstance",
"(",
"network",
",",
"(",
"DataParallel",
",",
"DistributedDataParallel",
")",
")",
":",
"network",
"=",
"network",
".",
"module",
"return",
"network"
] |
https://github.com/cszn/KAIR/blob/72e93351bca41d1b1f6a4c3e1957f5bffccc7101/models/model_base.py#L89-L95
|
|
mrJean1/PyGeodesy
|
7da5ca71aa3edb7bc49e219e0b8190686e1a7965
|
pygeodesy/named.py
|
python
|
_xjoined_
|
(prefix, name)
|
return _SPACE_(prefix, repr(name)) if name and prefix else (prefix or name)
|
(INTERNAL) Join C{pref} and non-empty C{name}.
|
(INTERNAL) Join C{pref} and non-empty C{name}.
|
[
"(",
"INTERNAL",
")",
"Join",
"C",
"{",
"pref",
"}",
"and",
"non",
"-",
"empty",
"C",
"{",
"name",
"}",
"."
] |
def _xjoined_(prefix, name):
'''(INTERNAL) Join C{pref} and non-empty C{name}.
'''
return _SPACE_(prefix, repr(name)) if name and prefix else (prefix or name)
|
[
"def",
"_xjoined_",
"(",
"prefix",
",",
"name",
")",
":",
"return",
"_SPACE_",
"(",
"prefix",
",",
"repr",
"(",
"name",
")",
")",
"if",
"name",
"and",
"prefix",
"else",
"(",
"prefix",
"or",
"name",
")"
] |
https://github.com/mrJean1/PyGeodesy/blob/7da5ca71aa3edb7bc49e219e0b8190686e1a7965/pygeodesy/named.py#L47-L50
|
|
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
corehq/ex-submodules/pillowtop/pillow/interface.py
|
python
|
ConstructedPillow.__init__
|
(self, name, checkpoint, change_feed, processor, process_num=0,
change_processed_event_handler=None, processor_chunk_size=0,
is_dedicated_migration_process=False)
|
[] |
def __init__(self, name, checkpoint, change_feed, processor, process_num=0,
change_processed_event_handler=None, processor_chunk_size=0,
is_dedicated_migration_process=False):
self._name = name
self._checkpoint = checkpoint
self._change_feed = change_feed
self.processor_chunk_size = processor_chunk_size
if isinstance(processor, list):
self.processors = processor
else:
self.processors = [processor]
self._change_processed_event_handler = change_processed_event_handler
self.is_dedicated_migration_process = is_dedicated_migration_process
|
[
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"checkpoint",
",",
"change_feed",
",",
"processor",
",",
"process_num",
"=",
"0",
",",
"change_processed_event_handler",
"=",
"None",
",",
"processor_chunk_size",
"=",
"0",
",",
"is_dedicated_migration_process",
"=",
"False",
")",
":",
"self",
".",
"_name",
"=",
"name",
"self",
".",
"_checkpoint",
"=",
"checkpoint",
"self",
".",
"_change_feed",
"=",
"change_feed",
"self",
".",
"processor_chunk_size",
"=",
"processor_chunk_size",
"if",
"isinstance",
"(",
"processor",
",",
"list",
")",
":",
"self",
".",
"processors",
"=",
"processor",
"else",
":",
"self",
".",
"processors",
"=",
"[",
"processor",
"]",
"self",
".",
"_change_processed_event_handler",
"=",
"change_processed_event_handler",
"self",
".",
"is_dedicated_migration_process",
"=",
"is_dedicated_migration_process"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/ex-submodules/pillowtop/pillow/interface.py#L430-L443
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.