nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
SebKuzminsky/pycam
|
55e3129f518e470040e79bb00515b4bfcf36c172
|
pycam/Plugins/Tasks.py
|
python
|
Tasks._task_new
|
(self, widget=None, task_type="milling")
|
[] |
def _task_new(self, widget=None, task_type="milling"):
with merge_history_and_block_events(self.core):
params = {"type": task_type}
params.update(self.core.get("get_default_parameter_values")("task",
set_name=task_type))
new_task = pycam.workspace.data_models.Task(None, data=params)
new_task.set_application_value("name", self.get_non_conflicting_name("Task #%d"))
self.select(new_task)
|
[
"def",
"_task_new",
"(",
"self",
",",
"widget",
"=",
"None",
",",
"task_type",
"=",
"\"milling\"",
")",
":",
"with",
"merge_history_and_block_events",
"(",
"self",
".",
"core",
")",
":",
"params",
"=",
"{",
"\"type\"",
":",
"task_type",
"}",
"params",
".",
"update",
"(",
"self",
".",
"core",
".",
"get",
"(",
"\"get_default_parameter_values\"",
")",
"(",
"\"task\"",
",",
"set_name",
"=",
"task_type",
")",
")",
"new_task",
"=",
"pycam",
".",
"workspace",
".",
"data_models",
".",
"Task",
"(",
"None",
",",
"data",
"=",
"params",
")",
"new_task",
".",
"set_application_value",
"(",
"\"name\"",
",",
"self",
".",
"get_non_conflicting_name",
"(",
"\"Task #%d\"",
")",
")",
"self",
".",
"select",
"(",
"new_task",
")"
] |
https://github.com/SebKuzminsky/pycam/blob/55e3129f518e470040e79bb00515b4bfcf36c172/pycam/Plugins/Tasks.py#L220-L227
|
||||
KalleHallden/AutoTimer
|
2d954216700c4930baa154e28dbddc34609af7ce
|
env/lib/python2.7/site-packages/pip/_vendor/requests/cookies.py
|
python
|
MockResponse.info
|
(self)
|
return self._headers
|
[] |
def info(self):
return self._headers
|
[
"def",
"info",
"(",
"self",
")",
":",
"return",
"self",
".",
"_headers"
] |
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pip/_vendor/requests/cookies.py#L111-L112
|
|||
wikimedia/pywikibot
|
81a01ffaec7271bf5b4b170f85a80388420a4e78
|
pywikibot/pagegenerators.py
|
python
|
GeneratorFactory._handle_interwiki
|
(self, value: str)
|
return LanguageLinksPageGenerator(page)
|
Handle `-interwiki` argument.
|
Handle `-interwiki` argument.
|
[
"Handle",
"-",
"interwiki",
"argument",
"."
] |
def _handle_interwiki(self, value: str) -> HANDLER_RETURN_TYPE:
"""Handle `-interwiki` argument."""
if not value:
value = i18n.input('pywikibot-enter-page-processing')
page = pywikibot.Page(pywikibot.Link(value, self.site))
return LanguageLinksPageGenerator(page)
|
[
"def",
"_handle_interwiki",
"(",
"self",
",",
"value",
":",
"str",
")",
"->",
"HANDLER_RETURN_TYPE",
":",
"if",
"not",
"value",
":",
"value",
"=",
"i18n",
".",
"input",
"(",
"'pywikibot-enter-page-processing'",
")",
"page",
"=",
"pywikibot",
".",
"Page",
"(",
"pywikibot",
".",
"Link",
"(",
"value",
",",
"self",
".",
"site",
")",
")",
"return",
"LanguageLinksPageGenerator",
"(",
"page",
")"
] |
https://github.com/wikimedia/pywikibot/blob/81a01ffaec7271bf5b4b170f85a80388420a4e78/pywikibot/pagegenerators.py#L917-L922
|
|
latenighttales/alcali
|
8939fc7901b4d8273c4106a242afdf94c0b05ea8
|
docker/saltconfig/salt/_returners/alcali.py
|
python
|
get_jids_filter
|
(count, filter_find_job=True)
|
Return a list of all job ids
:param int count: show not more than the count of most recent jobs
:param bool filter_find_jobs: filter out 'saltutil.find_job' jobs
|
Return a list of all job ids
:param int count: show not more than the count of most recent jobs
:param bool filter_find_jobs: filter out 'saltutil.find_job' jobs
|
[
"Return",
"a",
"list",
"of",
"all",
"job",
"ids",
":",
"param",
"int",
"count",
":",
"show",
"not",
"more",
"than",
"the",
"count",
"of",
"most",
"recent",
"jobs",
":",
"param",
"bool",
"filter_find_jobs",
":",
"filter",
"out",
"saltutil",
".",
"find_job",
"jobs"
] |
def get_jids_filter(count, filter_find_job=True):
"""
Return a list of all job ids
:param int count: show not more than the count of most recent jobs
:param bool filter_find_jobs: filter out 'saltutil.find_job' jobs
"""
with _get_serv(ret=None, commit=True) as cur:
sql = """SELECT * FROM (
SELECT DISTINCT `jid` ,`load` FROM `jids`
{0}
ORDER BY `jid` DESC limit {1}
) `tmp`
ORDER BY `jid`;"""
where = """WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' """
cur.execute(sql.format(where if filter_find_job else "", count))
data = cur.fetchall()
ret = []
for jid in data:
ret.append(
salt.utils.jid.format_jid_instance_ext(
jid[0], salt.utils.json.loads(jid[1])
)
)
return ret
|
[
"def",
"get_jids_filter",
"(",
"count",
",",
"filter_find_job",
"=",
"True",
")",
":",
"with",
"_get_serv",
"(",
"ret",
"=",
"None",
",",
"commit",
"=",
"True",
")",
"as",
"cur",
":",
"sql",
"=",
"\"\"\"SELECT * FROM (\n SELECT DISTINCT `jid` ,`load` FROM `jids`\n {0}\n ORDER BY `jid` DESC limit {1}\n ) `tmp`\n ORDER BY `jid`;\"\"\"",
"where",
"=",
"\"\"\"WHERE `load` NOT LIKE '%\"fun\": \"saltutil.find_job\"%' \"\"\"",
"cur",
".",
"execute",
"(",
"sql",
".",
"format",
"(",
"where",
"if",
"filter_find_job",
"else",
"\"\"",
",",
"count",
")",
")",
"data",
"=",
"cur",
".",
"fetchall",
"(",
")",
"ret",
"=",
"[",
"]",
"for",
"jid",
"in",
"data",
":",
"ret",
".",
"append",
"(",
"salt",
".",
"utils",
".",
"jid",
".",
"format_jid_instance_ext",
"(",
"jid",
"[",
"0",
"]",
",",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"jid",
"[",
"1",
"]",
")",
")",
")",
"return",
"ret"
] |
https://github.com/latenighttales/alcali/blob/8939fc7901b4d8273c4106a242afdf94c0b05ea8/docker/saltconfig/salt/_returners/alcali.py#L469-L494
|
||
deepchem/deepchem
|
054eb4b2b082e3df8e1a8e77f36a52137ae6e375
|
contrib/atomicconv/splits/pdbbind_temporal_split.py
|
python
|
load_pdbbind_labels
|
(labels_file)
|
return contents_df
|
Loads pdbbind labels as dataframe
Parameters
----------
labels_file: str
Location of PDBbind datafile.
Returns
-------
contents_df: pd.DataFrame
Dataframe containing contents of PDBbind datafile.
|
Loads pdbbind labels as dataframe
|
[
"Loads",
"pdbbind",
"labels",
"as",
"dataframe"
] |
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe
Parameters
----------
labels_file: str
Location of PDBbind datafile.
Returns
-------
contents_df: pd.DataFrame
Dataframe containing contents of PDBbind datafile.
"""
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
splitline = line.split()
if len(splitline) == 8:
contents.append(splitline)
else:
print("Incorrect data format")
print(splitline)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
|
[
"def",
"load_pdbbind_labels",
"(",
"labels_file",
")",
":",
"contents",
"=",
"[",
"]",
"with",
"open",
"(",
"labels_file",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"continue",
"else",
":",
"splitline",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"splitline",
")",
"==",
"8",
":",
"contents",
".",
"append",
"(",
"splitline",
")",
"else",
":",
"print",
"(",
"\"Incorrect data format\"",
")",
"print",
"(",
"splitline",
")",
"contents_df",
"=",
"pd",
".",
"DataFrame",
"(",
"contents",
",",
"columns",
"=",
"(",
"\"PDB code\"",
",",
"\"resolution\"",
",",
"\"release year\"",
",",
"\"-logKd/Ki\"",
",",
"\"Kd/Ki\"",
",",
"\"ignore-this-field\"",
",",
"\"reference\"",
",",
"\"ligand name\"",
")",
")",
"return",
"contents_df"
] |
https://github.com/deepchem/deepchem/blob/054eb4b2b082e3df8e1a8e77f36a52137ae6e375/contrib/atomicconv/splits/pdbbind_temporal_split.py#L8-L40
|
|
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/lib-python/3/xml/sax/saxutils.py
|
python
|
XMLFilterBase.startElementNS
|
(self, name, qname, attrs)
|
[] |
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
|
[
"def",
"startElementNS",
"(",
"self",
",",
"name",
",",
"qname",
",",
"attrs",
")",
":",
"self",
".",
"_cont_handler",
".",
"startElementNS",
"(",
"name",
",",
"qname",
",",
"attrs",
")"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/xml/sax/saxutils.py#L240-L241
|
||||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/Python-2.7.9/Lib/bsddb/dbshelve.py
|
python
|
DBShelfCursor.first
|
(self, flags=0)
|
return self.get_1(flags|db.DB_FIRST)
|
[] |
def first(self, flags=0): return self.get_1(flags|db.DB_FIRST)
|
[
"def",
"first",
"(",
"self",
",",
"flags",
"=",
"0",
")",
":",
"return",
"self",
".",
"get_1",
"(",
"flags",
"|",
"db",
".",
"DB_FIRST",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/bsddb/dbshelve.py#L334-L334
|
|||
blinktrade/bitex
|
a4896e7faef9c4aa0ca5325f18b77db67003764e
|
apps/trade/models.py
|
python
|
Position.get_positions_by_account_broker
|
(session, account_id, broker_id)
|
return session.query(Position).filter_by(account_id = account_id).filter_by(broker_id = broker_id )
|
[] |
def get_positions_by_account_broker(session, account_id, broker_id):
return session.query(Position).filter_by(account_id = account_id).filter_by(broker_id = broker_id )
|
[
"def",
"get_positions_by_account_broker",
"(",
"session",
",",
"account_id",
",",
"broker_id",
")",
":",
"return",
"session",
".",
"query",
"(",
"Position",
")",
".",
"filter_by",
"(",
"account_id",
"=",
"account_id",
")",
".",
"filter_by",
"(",
"broker_id",
"=",
"broker_id",
")"
] |
https://github.com/blinktrade/bitex/blob/a4896e7faef9c4aa0ca5325f18b77db67003764e/apps/trade/models.py#L736-L737
|
|||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/pip/_vendor/requests/utils.py
|
python
|
unquote_unreserved
|
(uri)
|
return ''.join(parts)
|
Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
|
Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
|
[
"Un",
"-",
"escape",
"any",
"percent",
"-",
"escape",
"sequences",
"in",
"a",
"URI",
"that",
"are",
"unreserved",
"characters",
".",
"This",
"leaves",
"all",
"reserved",
"illegal",
"and",
"non",
"-",
"ASCII",
"bytes",
"encoded",
"."
] |
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
|
[
"def",
"unquote_unreserved",
"(",
"uri",
")",
":",
"parts",
"=",
"uri",
".",
"split",
"(",
"'%'",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"parts",
")",
")",
":",
"h",
"=",
"parts",
"[",
"i",
"]",
"[",
"0",
":",
"2",
"]",
"if",
"len",
"(",
"h",
")",
"==",
"2",
"and",
"h",
".",
"isalnum",
"(",
")",
":",
"try",
":",
"c",
"=",
"chr",
"(",
"int",
"(",
"h",
",",
"16",
")",
")",
"except",
"ValueError",
":",
"raise",
"InvalidURL",
"(",
"\"Invalid percent-escape sequence: '%s'\"",
"%",
"h",
")",
"if",
"c",
"in",
"UNRESERVED_SET",
":",
"parts",
"[",
"i",
"]",
"=",
"c",
"+",
"parts",
"[",
"i",
"]",
"[",
"2",
":",
"]",
"else",
":",
"parts",
"[",
"i",
"]",
"=",
"'%'",
"+",
"parts",
"[",
"i",
"]",
"else",
":",
"parts",
"[",
"i",
"]",
"=",
"'%'",
"+",
"parts",
"[",
"i",
"]",
"return",
"''",
".",
"join",
"(",
"parts",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/_vendor/requests/utils.py#L438-L459
|
|
dj-stripe/dj-stripe
|
cf15b07c754525077098e2b906108425a1f657e0
|
djstripe/models/billing.py
|
python
|
UsageRecordSummary.__str__
|
(self)
|
return f"Usage Summary for {self.subscription_item} ({self.invoice}) is {self.total_usage}"
|
[] |
def __str__(self):
return f"Usage Summary for {self.subscription_item} ({self.invoice}) is {self.total_usage}"
|
[
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"f\"Usage Summary for {self.subscription_item} ({self.invoice}) is {self.total_usage}\""
] |
https://github.com/dj-stripe/dj-stripe/blob/cf15b07c754525077098e2b906108425a1f657e0/djstripe/models/billing.py#L2077-L2078
|
|||
linxid/Machine_Learning_Study_Path
|
558e82d13237114bbb8152483977806fc0c222af
|
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/lockfile/linklockfile.py
|
python
|
LinkLockFile.i_am_locking
|
(self)
|
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
|
[] |
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
|
[
"def",
"i_am_locking",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"is_locked",
"(",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"unique_name",
")",
"and",
"os",
".",
"stat",
"(",
"self",
".",
"unique_name",
")",
".",
"st_nlink",
"==",
"2",
")"
] |
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/lockfile/linklockfile.py#L66-L69
|
|||
edgedb/edgedb
|
872bf5abbb10f7c72df21f57635238ed27b9f280
|
edb/edgeql/parser/grammar/ddl.py
|
python
|
CreateObjectTypeStmt.reduce_CreateAbstractObjectTypeStmt
|
(self, *kids)
|
r"""%reduce \
CREATE ABSTRACT TYPE NodeName \
OptExtendingSimple OptCreateObjectTypeCommandsBlock \
|
r"""%reduce \
CREATE ABSTRACT TYPE NodeName \
OptExtendingSimple OptCreateObjectTypeCommandsBlock \
|
[
"r",
"%reduce",
"\\",
"CREATE",
"ABSTRACT",
"TYPE",
"NodeName",
"\\",
"OptExtendingSimple",
"OptCreateObjectTypeCommandsBlock",
"\\"
] |
def reduce_CreateAbstractObjectTypeStmt(self, *kids):
r"""%reduce \
CREATE ABSTRACT TYPE NodeName \
OptExtendingSimple OptCreateObjectTypeCommandsBlock \
"""
self.val = qlast.CreateObjectType(
name=kids[3].val,
bases=kids[4].val,
abstract=True,
commands=kids[5].val,
)
|
[
"def",
"reduce_CreateAbstractObjectTypeStmt",
"(",
"self",
",",
"*",
"kids",
")",
":",
"self",
".",
"val",
"=",
"qlast",
".",
"CreateObjectType",
"(",
"name",
"=",
"kids",
"[",
"3",
"]",
".",
"val",
",",
"bases",
"=",
"kids",
"[",
"4",
"]",
".",
"val",
",",
"abstract",
"=",
"True",
",",
"commands",
"=",
"kids",
"[",
"5",
"]",
".",
"val",
",",
")"
] |
https://github.com/edgedb/edgedb/blob/872bf5abbb10f7c72df21f57635238ed27b9f280/edb/edgeql/parser/grammar/ddl.py#L1751-L1761
|
||
theotherp/nzbhydra
|
4b03d7f769384b97dfc60dade4806c0fc987514e
|
libs/mailbox.py
|
python
|
Mailbox.__getitem__
|
(self, key)
|
Return the keyed message; raise KeyError if it doesn't exist.
|
Return the keyed message; raise KeyError if it doesn't exist.
|
[
"Return",
"the",
"keyed",
"message",
";",
"raise",
"KeyError",
"if",
"it",
"doesn",
"t",
"exist",
"."
] |
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
return self._factory(self.get_file(key))
|
[
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"self",
".",
"_factory",
":",
"return",
"self",
".",
"get_message",
"(",
"key",
")",
"else",
":",
"return",
"self",
".",
"_factory",
"(",
"self",
".",
"get_file",
"(",
"key",
")",
")"
] |
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/mailbox.py#L77-L82
|
||
osmr/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
pytorch/pytorchcv/models/pyramidnet_cifar.py
|
python
|
pyramidnet110_a84_cifar100
|
(num_classes=100, **kwargs)
|
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
|
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
|
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
|
[
"PyramidNet",
"-",
"110",
"(",
"a",
"=",
"84",
")",
"model",
"for",
"CIFAR",
"-",
"100",
"from",
"Deep",
"Pyramidal",
"Residual",
"Networks",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1610",
".",
"02915",
"."
] |
def pyramidnet110_a84_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
|
[
"def",
"pyramidnet110_a84_cifar100",
"(",
"num_classes",
"=",
"100",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"get_pyramidnet_cifar",
"(",
"num_classes",
"=",
"num_classes",
",",
"blocks",
"=",
"110",
",",
"alpha",
"=",
"84",
",",
"bottleneck",
"=",
"False",
",",
"model_name",
"=",
"\"pyramidnet110_a84_cifar100\"",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/pytorch/pytorchcv/models/pyramidnet_cifar.py#L250-L269
|
|
amundsen-io/amundsendatabuilder
|
a0af611350fde12438450d4bfd83b226ef220c3f
|
databuilder/extractor/csv_extractor.py
|
python
|
CsvTableColumnExtractor.extract
|
(self)
|
Yield the csv result one at a time.
convert the result to model if a model_class is provided
|
Yield the csv result one at a time.
convert the result to model if a model_class is provided
|
[
"Yield",
"the",
"csv",
"result",
"one",
"at",
"a",
"time",
".",
"convert",
"the",
"result",
"to",
"model",
"if",
"a",
"model_class",
"is",
"provided"
] |
def extract(self) -> Any:
"""
Yield the csv result one at a time.
convert the result to model if a model_class is provided
"""
try:
return next(self._iter)
except StopIteration:
return None
except Exception as e:
raise e
|
[
"def",
"extract",
"(",
"self",
")",
"->",
"Any",
":",
"try",
":",
"return",
"next",
"(",
"self",
".",
"_iter",
")",
"except",
"StopIteration",
":",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] |
https://github.com/amundsen-io/amundsendatabuilder/blob/a0af611350fde12438450d4bfd83b226ef220c3f/databuilder/extractor/csv_extractor.py#L245-L255
|
||
krintoxi/NoobSec-Toolkit
|
38738541cbc03cedb9a3b3ed13b629f781ad64f6
|
NoobSecToolkit /tools/inject/tamper/uppercase.py
|
python
|
tamper
|
(payload, **kwargs)
|
return retVal
|
Replaces each keyword character with upper case value
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
* This tamper script should work against all (?) databases
>>> tamper('insert')
'INSERT'
|
Replaces each keyword character with upper case value
|
[
"Replaces",
"each",
"keyword",
"character",
"with",
"upper",
"case",
"value"
] |
def tamper(payload, **kwargs):
"""
Replaces each keyword character with upper case value
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
* This tamper script should work against all (?) databases
>>> tamper('insert')
'INSERT'
"""
retVal = payload
if payload:
for match in re.finditer(r"[A-Za-z_]+", retVal):
word = match.group()
if word.upper() in kb.keywords:
retVal = retVal.replace(word, word.upper())
return retVal
|
[
"def",
"tamper",
"(",
"payload",
",",
"*",
"*",
"kwargs",
")",
":",
"retVal",
"=",
"payload",
"if",
"payload",
":",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"r\"[A-Za-z_]+\"",
",",
"retVal",
")",
":",
"word",
"=",
"match",
".",
"group",
"(",
")",
"if",
"word",
".",
"upper",
"(",
")",
"in",
"kb",
".",
"keywords",
":",
"retVal",
"=",
"retVal",
".",
"replace",
"(",
"word",
",",
"word",
".",
"upper",
"(",
")",
")",
"return",
"retVal"
] |
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/inject/tamper/uppercase.py#L18-L46
|
|
HeinleinSupport/check_mk_extensions
|
aa7d7389b812ed00f91dad61d66fb676284897d8
|
postfix_mailq_details/web/plugins/wato/postfix_mailq_details.py
|
python
|
_parameter_valuespec_postfix_mailq_details
|
()
|
return Transform(
Dictionary(
elements = [
('level',
Tuple(
help = _("These levels are applied to the number of Email that are "
"currently in the specified mail queue."),
elements = [
Integer(title = _("Warning at"), unit = _("mails"), default_value = 1000),
Integer(title = _("Critical at"), unit = _("mails"), default_value = 1500),
]
)),
],
optional_keys = [],
),
forth = lambda v: isinstance(v, tuple) and {
'level': v
} or v,
)
|
[] |
def _parameter_valuespec_postfix_mailq_details():
return Transform(
Dictionary(
elements = [
('level',
Tuple(
help = _("These levels are applied to the number of Email that are "
"currently in the specified mail queue."),
elements = [
Integer(title = _("Warning at"), unit = _("mails"), default_value = 1000),
Integer(title = _("Critical at"), unit = _("mails"), default_value = 1500),
]
)),
],
optional_keys = [],
),
forth = lambda v: isinstance(v, tuple) and {
'level': v
} or v,
)
|
[
"def",
"_parameter_valuespec_postfix_mailq_details",
"(",
")",
":",
"return",
"Transform",
"(",
"Dictionary",
"(",
"elements",
"=",
"[",
"(",
"'level'",
",",
"Tuple",
"(",
"help",
"=",
"_",
"(",
"\"These levels are applied to the number of Email that are \"",
"\"currently in the specified mail queue.\"",
")",
",",
"elements",
"=",
"[",
"Integer",
"(",
"title",
"=",
"_",
"(",
"\"Warning at\"",
")",
",",
"unit",
"=",
"_",
"(",
"\"mails\"",
")",
",",
"default_value",
"=",
"1000",
")",
",",
"Integer",
"(",
"title",
"=",
"_",
"(",
"\"Critical at\"",
")",
",",
"unit",
"=",
"_",
"(",
"\"mails\"",
")",
",",
"default_value",
"=",
"1500",
")",
",",
"]",
")",
")",
",",
"]",
",",
"optional_keys",
"=",
"[",
"]",
",",
")",
",",
"forth",
"=",
"lambda",
"v",
":",
"isinstance",
"(",
"v",
",",
"tuple",
")",
"and",
"{",
"'level'",
":",
"v",
"}",
"or",
"v",
",",
")"
] |
https://github.com/HeinleinSupport/check_mk_extensions/blob/aa7d7389b812ed00f91dad61d66fb676284897d8/postfix_mailq_details/web/plugins/wato/postfix_mailq_details.py#L39-L58
|
|||
quantumlib/Cirq
|
89f88b01d69222d3f1ec14d649b7b3a85ed9211f
|
cirq-core/cirq/contrib/qasm_import/_parser.py
|
python
|
QasmParser.p_term
|
(self, p)
|
term : NUMBER
| NATURAL_NUMBER
| PI
|
term : NUMBER
| NATURAL_NUMBER
| PI
|
[
"term",
":",
"NUMBER",
"|",
"NATURAL_NUMBER",
"|",
"PI"
] |
def p_term(self, p):
"""term : NUMBER
| NATURAL_NUMBER
| PI"""
p[0] = p[1]
|
[
"def",
"p_term",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] |
https://github.com/quantumlib/Cirq/blob/89f88b01d69222d3f1ec14d649b7b3a85ed9211f/cirq-core/cirq/contrib/qasm_import/_parser.py#L401-L405
|
||
errbotio/errbot
|
66e1de8e1d7daa62be7f9ed1b2ac8832f09fa25d
|
errbot/flow.py
|
python
|
FlowExecutor._create_new_flow
|
(
flow_root, requestor: Identifier, initial_command
)
|
return None, None
|
Helper method to create a new FLow.
|
Helper method to create a new FLow.
|
[
"Helper",
"method",
"to",
"create",
"a",
"new",
"FLow",
"."
] |
def _create_new_flow(
flow_root, requestor: Identifier, initial_command
) -> Tuple[Optional[Flow], Optional[FlowNode]]:
"""
Helper method to create a new FLow.
"""
empty_context = {}
flow = Flow(flow_root, requestor, empty_context)
for possible_next_step in flow.next_steps():
if possible_next_step.command == initial_command:
# The predicate is good as we just executed manually the command.
return flow, possible_next_step
return None, None
|
[
"def",
"_create_new_flow",
"(",
"flow_root",
",",
"requestor",
":",
"Identifier",
",",
"initial_command",
")",
"->",
"Tuple",
"[",
"Optional",
"[",
"Flow",
"]",
",",
"Optional",
"[",
"FlowNode",
"]",
"]",
":",
"empty_context",
"=",
"{",
"}",
"flow",
"=",
"Flow",
"(",
"flow_root",
",",
"requestor",
",",
"empty_context",
")",
"for",
"possible_next_step",
"in",
"flow",
".",
"next_steps",
"(",
")",
":",
"if",
"possible_next_step",
".",
"command",
"==",
"initial_command",
":",
"# The predicate is good as we just executed manually the command.",
"return",
"flow",
",",
"possible_next_step",
"return",
"None",
",",
"None"
] |
https://github.com/errbotio/errbot/blob/66e1de8e1d7daa62be7f9ed1b2ac8832f09fa25d/errbot/flow.py#L372-L384
|
|
giantbranch/python-hacker-code
|
addbc8c73e7e6fb9e4fcadcec022fa1d3da4b96d
|
我手敲的代码(中文注释)/chapter11/volatility/cache.py
|
python
|
CacheDecorator._cachewrapper
|
(self, f, s, *args, **kwargs)
|
return result
|
Wrapper for caching function calls
|
Wrapper for caching function calls
|
[
"Wrapper",
"for",
"caching",
"function",
"calls"
] |
def _cachewrapper(self, f, s, *args, **kwargs):
"""Wrapper for caching function calls"""
## See if the path is callable:
if callable(self.path):
path = self.path(s, *args, **kwargs)
else:
path = self.path
## Check if the result can be retrieved
self.node = CACHE[path]
# If this test goes away, we need to change the set_payload exception check
# to act on dump instead of just the payload
if self.node:
payload = self.node.get_payload()
if payload:
return payload
result = f(s, *args, **kwargs)
## If the wrapped function is a generator we need to
## handle it especially
if isinstance(result, types.GeneratorType):
return self.generate(path, result)
self.dump(path, result)
return result
|
[
"def",
"_cachewrapper",
"(",
"self",
",",
"f",
",",
"s",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"## See if the path is callable:",
"if",
"callable",
"(",
"self",
".",
"path",
")",
":",
"path",
"=",
"self",
".",
"path",
"(",
"s",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"path",
"=",
"self",
".",
"path",
"## Check if the result can be retrieved",
"self",
".",
"node",
"=",
"CACHE",
"[",
"path",
"]",
"# If this test goes away, we need to change the set_payload exception check",
"# to act on dump instead of just the payload",
"if",
"self",
".",
"node",
":",
"payload",
"=",
"self",
".",
"node",
".",
"get_payload",
"(",
")",
"if",
"payload",
":",
"return",
"payload",
"result",
"=",
"f",
"(",
"s",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"## If the wrapped function is a generator we need to",
"## handle it especially",
"if",
"isinstance",
"(",
"result",
",",
"types",
".",
"GeneratorType",
")",
":",
"return",
"self",
".",
"generate",
"(",
"path",
",",
"result",
")",
"self",
".",
"dump",
"(",
"path",
",",
"result",
")",
"return",
"result"
] |
https://github.com/giantbranch/python-hacker-code/blob/addbc8c73e7e6fb9e4fcadcec022fa1d3da4b96d/我手敲的代码(中文注释)/chapter11/volatility/cache.py#L561-L586
|
|
shiyanlou/louplus-python
|
4c61697259e286e3d9116c3299f170d019ba3767
|
taobei/challenge-06/tbbuy/handlers/cart_product.py
|
python
|
cart_product_info
|
(id)
|
return json_response(cart_product=CartProductSchema().dump(cart_product))
|
查询购物车商品
|
查询购物车商品
|
[
"查询购物车商品"
] |
def cart_product_info(id):
"""查询购物车商品
"""
cart_product = CartProduct.query.filter(CartProduct.id == id).first()
if cart_product is None:
return json_response(ResponseCode.NOT_FOUND)
return json_response(cart_product=CartProductSchema().dump(cart_product))
|
[
"def",
"cart_product_info",
"(",
"id",
")",
":",
"cart_product",
"=",
"CartProduct",
".",
"query",
".",
"filter",
"(",
"CartProduct",
".",
"id",
"==",
"id",
")",
".",
"first",
"(",
")",
"if",
"cart_product",
"is",
"None",
":",
"return",
"json_response",
"(",
"ResponseCode",
".",
"NOT_FOUND",
")",
"return",
"json_response",
"(",
"cart_product",
"=",
"CartProductSchema",
"(",
")",
".",
"dump",
"(",
"cart_product",
")",
")"
] |
https://github.com/shiyanlou/louplus-python/blob/4c61697259e286e3d9116c3299f170d019ba3767/taobei/challenge-06/tbbuy/handlers/cart_product.py#L97-L105
|
|
gramps-project/gramps
|
04d4651a43eb210192f40a9f8c2bad8ee8fa3753
|
gramps/gen/lib/ldsord.py
|
python
|
LdsOrd.type2xml
|
(self)
|
return ""
|
Return type-representing string suitable for XML.
|
Return type-representing string suitable for XML.
|
[
"Return",
"type",
"-",
"representing",
"string",
"suitable",
"for",
"XML",
"."
] |
def type2xml(self):
"""
Return type-representing string suitable for XML.
"""
for item in LdsOrd._TYPE_MAP:
if item[0] == self.type:
return item[2]
return ""
|
[
"def",
"type2xml",
"(",
"self",
")",
":",
"for",
"item",
"in",
"LdsOrd",
".",
"_TYPE_MAP",
":",
"if",
"item",
"[",
"0",
"]",
"==",
"self",
".",
"type",
":",
"return",
"item",
"[",
"2",
"]",
"return",
"\"\""
] |
https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/gen/lib/ldsord.py#L350-L357
|
|
irmen/synthesizer
|
9ee0969cb9756d63dffb3b4be2ff35e3ccfadbf9
|
synthplayer/sample.py
|
python
|
Sample.envelope
|
(self, attack: float, decay: float, sustainlevel: float, release: float)
|
return self
|
Apply an ADSR volume envelope. A,D,R are in seconds, Sustainlevel is a factor.
|
Apply an ADSR volume envelope. A,D,R are in seconds, Sustainlevel is a factor.
|
[
"Apply",
"an",
"ADSR",
"volume",
"envelope",
".",
"A",
"D",
"R",
"are",
"in",
"seconds",
"Sustainlevel",
"is",
"a",
"factor",
"."
] |
def envelope(self, attack: float, decay: float, sustainlevel: float, release: float) -> 'Sample':
"""Apply an ADSR volume envelope. A,D,R are in seconds, Sustainlevel is a factor."""
if self.__locked:
raise RuntimeError("cannot modify a locked sample")
assert attack >= 0 and decay >= 0 and release >= 0
assert 0 <= sustainlevel <= 1
D = self.split(attack) # self = A
S = D.split(decay)
if sustainlevel < 1:
S.amplify(sustainlevel) # apply the sustain level to S now so that R gets it as well
R = S.split(S.duration - release)
if attack > 0:
self.fadein(attack)
if decay > 0:
D.fadeout(decay, sustainlevel)
if release > 0:
R.fadeout(release)
self.join(D).join(S).join(R)
return self
|
[
"def",
"envelope",
"(",
"self",
",",
"attack",
":",
"float",
",",
"decay",
":",
"float",
",",
"sustainlevel",
":",
"float",
",",
"release",
":",
"float",
")",
"->",
"'Sample'",
":",
"if",
"self",
".",
"__locked",
":",
"raise",
"RuntimeError",
"(",
"\"cannot modify a locked sample\"",
")",
"assert",
"attack",
">=",
"0",
"and",
"decay",
">=",
"0",
"and",
"release",
">=",
"0",
"assert",
"0",
"<=",
"sustainlevel",
"<=",
"1",
"D",
"=",
"self",
".",
"split",
"(",
"attack",
")",
"# self = A",
"S",
"=",
"D",
".",
"split",
"(",
"decay",
")",
"if",
"sustainlevel",
"<",
"1",
":",
"S",
".",
"amplify",
"(",
"sustainlevel",
")",
"# apply the sustain level to S now so that R gets it as well",
"R",
"=",
"S",
".",
"split",
"(",
"S",
".",
"duration",
"-",
"release",
")",
"if",
"attack",
">",
"0",
":",
"self",
".",
"fadein",
"(",
"attack",
")",
"if",
"decay",
">",
"0",
":",
"D",
".",
"fadeout",
"(",
"decay",
",",
"sustainlevel",
")",
"if",
"release",
">",
"0",
":",
"R",
".",
"fadeout",
"(",
"release",
")",
"self",
".",
"join",
"(",
"D",
")",
".",
"join",
"(",
"S",
")",
".",
"join",
"(",
"R",
")",
"return",
"self"
] |
https://github.com/irmen/synthesizer/blob/9ee0969cb9756d63dffb3b4be2ff35e3ccfadbf9/synthplayer/sample.py#L779-L797
|
|
aiortc/aioquic
|
9e360a57d6b8cf78844fba814d9968cd5f602648
|
src/aioquic/quic/connection.py
|
python
|
QuicConnection._handle_max_data_frame
|
(
self, context: QuicReceiveContext, frame_type: int, buf: Buffer
)
|
Handle a MAX_DATA frame.
This adjusts the total amount of we can send to the peer.
|
Handle a MAX_DATA frame.
|
[
"Handle",
"a",
"MAX_DATA",
"frame",
"."
] |
def _handle_max_data_frame(
self, context: QuicReceiveContext, frame_type: int, buf: Buffer
) -> None:
"""
Handle a MAX_DATA frame.
This adjusts the total amount of we can send to the peer.
"""
max_data = buf.pull_uint_var()
# log frame
if self._quic_logger is not None:
context.quic_logger_frames.append(
self._quic_logger.encode_connection_limit_frame(
frame_type=frame_type, maximum=max_data
)
)
if max_data > self._remote_max_data:
self._logger.debug("Remote max_data raised to %d", max_data)
self._remote_max_data = max_data
|
[
"def",
"_handle_max_data_frame",
"(",
"self",
",",
"context",
":",
"QuicReceiveContext",
",",
"frame_type",
":",
"int",
",",
"buf",
":",
"Buffer",
")",
"->",
"None",
":",
"max_data",
"=",
"buf",
".",
"pull_uint_var",
"(",
")",
"# log frame",
"if",
"self",
".",
"_quic_logger",
"is",
"not",
"None",
":",
"context",
".",
"quic_logger_frames",
".",
"append",
"(",
"self",
".",
"_quic_logger",
".",
"encode_connection_limit_frame",
"(",
"frame_type",
"=",
"frame_type",
",",
"maximum",
"=",
"max_data",
")",
")",
"if",
"max_data",
">",
"self",
".",
"_remote_max_data",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Remote max_data raised to %d\"",
",",
"max_data",
")",
"self",
".",
"_remote_max_data",
"=",
"max_data"
] |
https://github.com/aiortc/aioquic/blob/9e360a57d6b8cf78844fba814d9968cd5f602648/src/aioquic/quic/connection.py#L1623-L1643
|
||
masa-su/pixyz
|
a9baf067730035d03351476f5c2e1e43016808ce
|
pixyz/losses/losses.py
|
python
|
Loss.__init__
|
(self, input_var=None)
|
Parameters
----------
input_var : :obj:`list` of :obj:`str`, defaults to None
Input variables of this loss function.
In general, users do not need to set them explicitly
because these depend on the given distributions and each loss function.
|
Parameters
----------
input_var : :obj:`list` of :obj:`str`, defaults to None
Input variables of this loss function.
In general, users do not need to set them explicitly
because these depend on the given distributions and each loss function.
|
[
"Parameters",
"----------",
"input_var",
":",
":",
"obj",
":",
"list",
"of",
":",
"obj",
":",
"str",
"defaults",
"to",
"None",
"Input",
"variables",
"of",
"this",
"loss",
"function",
".",
"In",
"general",
"users",
"do",
"not",
"need",
"to",
"set",
"them",
"explicitly",
"because",
"these",
"depend",
"on",
"the",
"given",
"distributions",
"and",
"each",
"loss",
"function",
"."
] |
def __init__(self, input_var=None):
"""
Parameters
----------
input_var : :obj:`list` of :obj:`str`, defaults to None
Input variables of this loss function.
In general, users do not need to set them explicitly
because these depend on the given distributions and each loss function.
"""
super().__init__()
self._input_var = deepcopy(input_var)
|
[
"def",
"__init__",
"(",
"self",
",",
"input_var",
"=",
"None",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_input_var",
"=",
"deepcopy",
"(",
"input_var",
")"
] |
https://github.com/masa-su/pixyz/blob/a9baf067730035d03351476f5c2e1e43016808ce/pixyz/losses/losses.py#L58-L69
|
||
1012598167/flask_mongodb_game
|
60c7e0351586656ec38f851592886338e50b4110
|
python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/markers.py
|
python
|
Evaluator.evaluate
|
(self, expr, context)
|
return result
|
Evaluate a marker expression returned by the :func:`parse_requirement`
function in the specified context.
|
Evaluate a marker expression returned by the :func:`parse_requirement`
function in the specified context.
|
[
"Evaluate",
"a",
"marker",
"expression",
"returned",
"by",
"the",
":",
"func",
":",
"parse_requirement",
"function",
"in",
"the",
"specified",
"context",
"."
] |
def evaluate(self, expr, context):
"""
Evaluate a marker expression returned by the :func:`parse_requirement`
function in the specified context.
"""
if isinstance(expr, string_types):
if expr[0] in '\'"':
result = expr[1:-1]
else:
if expr not in context:
raise SyntaxError('unknown variable: %s' % expr)
result = context[expr]
else:
assert isinstance(expr, dict)
op = expr['op']
if op not in self.operations:
raise NotImplementedError('op not implemented: %s' % op)
elhs = expr['lhs']
erhs = expr['rhs']
if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
lhs = self.evaluate(elhs, context)
rhs = self.evaluate(erhs, context)
result = self.operations[op](lhs, rhs)
return result
|
[
"def",
"evaluate",
"(",
"self",
",",
"expr",
",",
"context",
")",
":",
"if",
"isinstance",
"(",
"expr",
",",
"string_types",
")",
":",
"if",
"expr",
"[",
"0",
"]",
"in",
"'\\'\"'",
":",
"result",
"=",
"expr",
"[",
"1",
":",
"-",
"1",
"]",
"else",
":",
"if",
"expr",
"not",
"in",
"context",
":",
"raise",
"SyntaxError",
"(",
"'unknown variable: %s'",
"%",
"expr",
")",
"result",
"=",
"context",
"[",
"expr",
"]",
"else",
":",
"assert",
"isinstance",
"(",
"expr",
",",
"dict",
")",
"op",
"=",
"expr",
"[",
"'op'",
"]",
"if",
"op",
"not",
"in",
"self",
".",
"operations",
":",
"raise",
"NotImplementedError",
"(",
"'op not implemented: %s'",
"%",
"op",
")",
"elhs",
"=",
"expr",
"[",
"'lhs'",
"]",
"erhs",
"=",
"expr",
"[",
"'rhs'",
"]",
"if",
"_is_literal",
"(",
"expr",
"[",
"'lhs'",
"]",
")",
"and",
"_is_literal",
"(",
"expr",
"[",
"'rhs'",
"]",
")",
":",
"raise",
"SyntaxError",
"(",
"'invalid comparison: %s %s %s'",
"%",
"(",
"elhs",
",",
"op",
",",
"erhs",
")",
")",
"lhs",
"=",
"self",
".",
"evaluate",
"(",
"elhs",
",",
"context",
")",
"rhs",
"=",
"self",
".",
"evaluate",
"(",
"erhs",
",",
"context",
")",
"result",
"=",
"self",
".",
"operations",
"[",
"op",
"]",
"(",
"lhs",
",",
"rhs",
")",
"return",
"result"
] |
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/markers.py#L50-L75
|
|
AIChallenger/AI_Challenger_2018
|
f0e4376152c8fe5a098ed92a973cec96b13e1a24
|
Baselines/autonomous_driving_perception208_baseline/detection/utils/np_box_list_ops.py
|
python
|
gather
|
(boxlist, indices, fields=None)
|
return subboxlist
|
Gather boxes from BoxList according to indices and return new BoxList.
By default, gather returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a 1-d numpy array of type int_
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int_
|
Gather boxes from BoxList according to indices and return new BoxList.
|
[
"Gather",
"boxes",
"from",
"BoxList",
"according",
"to",
"indices",
"and",
"return",
"new",
"BoxList",
"."
] |
def gather(boxlist, indices, fields=None):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, gather returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a 1-d numpy array of type int_
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int_
"""
if indices.size:
if np.amax(indices) >= boxlist.num_boxes() or np.amin(indices) < 0:
raise ValueError('indices are out of valid range.')
subboxlist = np_box_list.BoxList(boxlist.get()[indices, :])
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
extra_field_data = boxlist.get_field(field)
subboxlist.add_field(field, extra_field_data[indices, ...])
return subboxlist
|
[
"def",
"gather",
"(",
"boxlist",
",",
"indices",
",",
"fields",
"=",
"None",
")",
":",
"if",
"indices",
".",
"size",
":",
"if",
"np",
".",
"amax",
"(",
"indices",
")",
">=",
"boxlist",
".",
"num_boxes",
"(",
")",
"or",
"np",
".",
"amin",
"(",
"indices",
")",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'indices are out of valid range.'",
")",
"subboxlist",
"=",
"np_box_list",
".",
"BoxList",
"(",
"boxlist",
".",
"get",
"(",
")",
"[",
"indices",
",",
":",
"]",
")",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"boxlist",
".",
"get_extra_fields",
"(",
")",
"for",
"field",
"in",
"fields",
":",
"extra_field_data",
"=",
"boxlist",
".",
"get_field",
"(",
"field",
")",
"subboxlist",
".",
"add_field",
"(",
"field",
",",
"extra_field_data",
"[",
"indices",
",",
"...",
"]",
")",
"return",
"subboxlist"
] |
https://github.com/AIChallenger/AI_Challenger_2018/blob/f0e4376152c8fe5a098ed92a973cec96b13e1a24/Baselines/autonomous_driving_perception208_baseline/detection/utils/np_box_list_ops.py#L95-L127
|
|
deepset-ai/haystack
|
79fdda8a7cf393d774803608a4874f2a6e63cf6f
|
haystack/nodes/retriever/dense.py
|
python
|
TableTextRetriever.load
|
(cls,
load_dir: Union[Path, str],
document_store: BaseDocumentStore,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
max_seq_len_table: int = 256,
use_gpu: bool = True,
batch_size: int = 16,
embed_meta_fields: List[str] = ["name", "section_title", "caption"],
use_fast_tokenizers: bool = True,
similarity_function: str = "dot_product",
query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder",
table_encoder_dir: str = "table_encoder",
infer_tokenizer_classes: bool = False
)
|
return mm_retriever
|
Load TableTextRetriever from the specified directory.
|
Load TableTextRetriever from the specified directory.
|
[
"Load",
"TableTextRetriever",
"from",
"the",
"specified",
"directory",
"."
] |
def load(cls,
load_dir: Union[Path, str],
document_store: BaseDocumentStore,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
max_seq_len_table: int = 256,
use_gpu: bool = True,
batch_size: int = 16,
embed_meta_fields: List[str] = ["name", "section_title", "caption"],
use_fast_tokenizers: bool = True,
similarity_function: str = "dot_product",
query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder",
table_encoder_dir: str = "table_encoder",
infer_tokenizer_classes: bool = False
):
"""
Load TableTextRetriever from the specified directory.
"""
load_dir = Path(load_dir)
mm_retriever = cls(
document_store=document_store,
query_embedding_model=Path(load_dir) / query_encoder_dir,
passage_embedding_model=Path(load_dir) / passage_encoder_dir,
table_embedding_model=Path(load_dir) / table_encoder_dir,
max_seq_len_query=max_seq_len_query,
max_seq_len_passage=max_seq_len_passage,
max_seq_len_table=max_seq_len_table,
use_gpu=use_gpu,
batch_size=batch_size,
embed_meta_fields=embed_meta_fields,
use_fast_tokenizers=use_fast_tokenizers,
similarity_function=similarity_function,
infer_tokenizer_classes=infer_tokenizer_classes
)
logger.info(f"TableTextRetriever model loaded from {load_dir}")
return mm_retriever
|
[
"def",
"load",
"(",
"cls",
",",
"load_dir",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
",",
"document_store",
":",
"BaseDocumentStore",
",",
"max_seq_len_query",
":",
"int",
"=",
"64",
",",
"max_seq_len_passage",
":",
"int",
"=",
"256",
",",
"max_seq_len_table",
":",
"int",
"=",
"256",
",",
"use_gpu",
":",
"bool",
"=",
"True",
",",
"batch_size",
":",
"int",
"=",
"16",
",",
"embed_meta_fields",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"\"name\"",
",",
"\"section_title\"",
",",
"\"caption\"",
"]",
",",
"use_fast_tokenizers",
":",
"bool",
"=",
"True",
",",
"similarity_function",
":",
"str",
"=",
"\"dot_product\"",
",",
"query_encoder_dir",
":",
"str",
"=",
"\"query_encoder\"",
",",
"passage_encoder_dir",
":",
"str",
"=",
"\"passage_encoder\"",
",",
"table_encoder_dir",
":",
"str",
"=",
"\"table_encoder\"",
",",
"infer_tokenizer_classes",
":",
"bool",
"=",
"False",
")",
":",
"load_dir",
"=",
"Path",
"(",
"load_dir",
")",
"mm_retriever",
"=",
"cls",
"(",
"document_store",
"=",
"document_store",
",",
"query_embedding_model",
"=",
"Path",
"(",
"load_dir",
")",
"/",
"query_encoder_dir",
",",
"passage_embedding_model",
"=",
"Path",
"(",
"load_dir",
")",
"/",
"passage_encoder_dir",
",",
"table_embedding_model",
"=",
"Path",
"(",
"load_dir",
")",
"/",
"table_encoder_dir",
",",
"max_seq_len_query",
"=",
"max_seq_len_query",
",",
"max_seq_len_passage",
"=",
"max_seq_len_passage",
",",
"max_seq_len_table",
"=",
"max_seq_len_table",
",",
"use_gpu",
"=",
"use_gpu",
",",
"batch_size",
"=",
"batch_size",
",",
"embed_meta_fields",
"=",
"embed_meta_fields",
",",
"use_fast_tokenizers",
"=",
"use_fast_tokenizers",
",",
"similarity_function",
"=",
"similarity_function",
",",
"infer_tokenizer_classes",
"=",
"infer_tokenizer_classes",
")",
"logger",
".",
"info",
"(",
"f\"TableTextRetriever model loaded from {load_dir}\"",
")",
"return",
"mm_retriever"
] |
https://github.com/deepset-ai/haystack/blob/79fdda8a7cf393d774803608a4874f2a6e63cf6f/haystack/nodes/retriever/dense.py#L921-L959
|
|
spectacles/CodeComplice
|
8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62
|
libs/codeintel2/langintel.py
|
python
|
LangIntel.cb_function_detail_from_elem
|
(self, elem)
|
[] |
def cb_function_detail_from_elem(self, elem):
# by default (some languages may choose to override)
sig = elem.get("signature")
if sig:
return sig
else:
return elem.get("name")+"(...)"
|
[
"def",
"cb_function_detail_from_elem",
"(",
"self",
",",
"elem",
")",
":",
"# by default (some languages may choose to override)",
"sig",
"=",
"elem",
".",
"get",
"(",
"\"signature\"",
")",
"if",
"sig",
":",
"return",
"sig",
"else",
":",
"return",
"elem",
".",
"get",
"(",
"\"name\"",
")",
"+",
"\"(...)\""
] |
https://github.com/spectacles/CodeComplice/blob/8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62/libs/codeintel2/langintel.py#L153-L159
|
||||
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/celery/celery/concurrency/processes/pool.py
|
python
|
Pool.apply_async
|
(self, func, args=(), kwds={},
callback=None, accept_callback=None, timeout_callback=None,
waitforslot=False, error_callback=None,
soft_timeout=None, timeout=None)
|
return result
|
Asynchronous equivalent of `apply()` builtin.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> if accept_callback:
... accept_callback()
>>> retval = func(*args, **kwds)
>>> if callback:
... callback(retval)
|
Asynchronous equivalent of `apply()` builtin.
|
[
"Asynchronous",
"equivalent",
"of",
"apply",
"()",
"builtin",
"."
] |
def apply_async(self, func, args=(), kwds={},
callback=None, accept_callback=None, timeout_callback=None,
waitforslot=False, error_callback=None,
soft_timeout=None, timeout=None):
'''
Asynchronous equivalent of `apply()` builtin.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> if accept_callback:
... accept_callback()
>>> retval = func(*args, **kwds)
>>> if callback:
... callback(retval)
'''
assert self._state == RUN
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning("Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal."))
soft_timeout = None
result = ApplyResult(self._cache, callback,
accept_callback, timeout_callback,
error_callback, soft_timeout, timeout)
if waitforslot and self._putlock is not None:
self._putlock.acquire()
if self._state != RUN:
return
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
|
[
"def",
"apply_async",
"(",
"self",
",",
"func",
",",
"args",
"=",
"(",
")",
",",
"kwds",
"=",
"{",
"}",
",",
"callback",
"=",
"None",
",",
"accept_callback",
"=",
"None",
",",
"timeout_callback",
"=",
"None",
",",
"waitforslot",
"=",
"False",
",",
"error_callback",
"=",
"None",
",",
"soft_timeout",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"assert",
"self",
".",
"_state",
"==",
"RUN",
"if",
"soft_timeout",
"and",
"SIG_SOFT_TIMEOUT",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"UserWarning",
"(",
"\"Soft timeouts are not supported: \"",
"\"on this platform: It does not have the SIGUSR1 signal.\"",
")",
")",
"soft_timeout",
"=",
"None",
"result",
"=",
"ApplyResult",
"(",
"self",
".",
"_cache",
",",
"callback",
",",
"accept_callback",
",",
"timeout_callback",
",",
"error_callback",
",",
"soft_timeout",
",",
"timeout",
")",
"if",
"waitforslot",
"and",
"self",
".",
"_putlock",
"is",
"not",
"None",
":",
"self",
".",
"_putlock",
".",
"acquire",
"(",
")",
"if",
"self",
".",
"_state",
"!=",
"RUN",
":",
"return",
"if",
"timeout",
"or",
"soft_timeout",
":",
"# start the timeout handler thread when required.",
"self",
".",
"_start_timeout_handler",
"(",
")",
"self",
".",
"_taskqueue",
".",
"put",
"(",
"(",
"[",
"(",
"result",
".",
"_job",
",",
"None",
",",
"func",
",",
"args",
",",
"kwds",
")",
"]",
",",
"None",
")",
")",
"return",
"result"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/celery/celery/concurrency/processes/pool.py#L770-L806
|
|
home-assistant/core
|
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
|
homeassistant/components/mpchc/media_player.py
|
python
|
MpcHcDevice.media_play
|
(self)
|
Send play command.
|
Send play command.
|
[
"Send",
"play",
"command",
"."
] |
def media_play(self):
"""Send play command."""
self._send_command(887)
|
[
"def",
"media_play",
"(",
"self",
")",
":",
"self",
".",
"_send_command",
"(",
"887",
")"
] |
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/mpchc/media_player.py#L181-L183
|
||
apache/tvm
|
6eb4ed813ebcdcd9558f0906a1870db8302ff1e0
|
python/tvm/relay/op/contrib/cutlass.py
|
python
|
partition_for_cutlass
|
(mod, params=None)
|
return seq(mod)
|
Partition the input module into CUTLASS-supported subgraphs.
|
Partition the input module into CUTLASS-supported subgraphs.
|
[
"Partition",
"the",
"input",
"module",
"into",
"CUTLASS",
"-",
"supported",
"subgraphs",
"."
] |
def partition_for_cutlass(mod, params=None):
"""Partition the input module into CUTLASS-supported subgraphs."""
dense_pat = ("cutlass.dense", make_gemm_pattern(False, None), check_gemm)
dense_bias_pat = ("cutlass.dense_bias", make_gemm_pattern(True, None), check_gemm)
dense_bias_relu_pat = ("cutlass.dense_bias_relu", make_gemm_pattern(True, "relu"), check_gemm)
dense_bias_gelu_fp16_pat = (
"cutlass.dense_bias_gelu_fp16",
make_gemm_pattern(True, "gelu"),
check_gemm,
)
dense_bias_gelu_fp32_pat = (
"cutlass.dense_bias_gelu_fp32",
make_gemm_pattern(True, "gelu", out_dtype="float32"),
check_gemm,
)
dense_patterns = [
dense_bias_gelu_fp16_pat,
dense_bias_gelu_fp32_pat,
dense_bias_relu_pat,
dense_bias_pat,
dense_pat,
("cutlass.batch_matmul", make_batch_matmul_pattern(), check_batch_matmul),
]
conv2d_patterns = [
(
"cutlass.conv2d_bias_hardswish",
make_conv2d_pattern(with_bias=True, with_act="hardswish"),
check_conv2d,
),
(
"cutlass.conv2d_bias_silu",
make_conv2d_pattern(with_bias=True, with_act="silu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_relu",
make_conv2d_pattern(with_bias=True, with_act="relu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_sigmoid",
make_conv2d_pattern(with_bias=True, with_act="sigmoid"),
check_conv2d,
),
("cutlass.conv2d_bias", make_conv2d_pattern(with_bias=True), check_conv2d),
("cutlass.conv2d", make_conv2d_pattern(), check_conv2d),
]
residual_block_patterns = []
for with_act, postfix in [("relu", "_relu"), (None, "")]:
for name, pat, _ in conv2d_patterns[:-1]:
for bin_op in ["add", "multiply"]:
residual_block_patterns.append(
(
name + "_residual_" + bin_op + postfix,
make_residual_block_pattern(pat, bin_op, with_act=with_act),
partial(check_conv2d_residual, binary_op=bin_op),
)
)
cutlass_patterns = residual_block_patterns + dense_patterns + conv2d_patterns
if params is not None:
mod["main"] = bind_params_by_name(mod["main"], params)
remove_bn_pass = Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
with PassContext(opt_level=3):
mod = remove_bn_pass(mod)
seq = Sequential(
[
transform.InferType(),
transform.MergeComposite(cutlass_patterns),
transform.AnnotateTarget(["cutlass"], include_non_call_ops=False),
transform.PartitionGraph(bind_constants=False),
]
)
return seq(mod)
|
[
"def",
"partition_for_cutlass",
"(",
"mod",
",",
"params",
"=",
"None",
")",
":",
"dense_pat",
"=",
"(",
"\"cutlass.dense\"",
",",
"make_gemm_pattern",
"(",
"False",
",",
"None",
")",
",",
"check_gemm",
")",
"dense_bias_pat",
"=",
"(",
"\"cutlass.dense_bias\"",
",",
"make_gemm_pattern",
"(",
"True",
",",
"None",
")",
",",
"check_gemm",
")",
"dense_bias_relu_pat",
"=",
"(",
"\"cutlass.dense_bias_relu\"",
",",
"make_gemm_pattern",
"(",
"True",
",",
"\"relu\"",
")",
",",
"check_gemm",
")",
"dense_bias_gelu_fp16_pat",
"=",
"(",
"\"cutlass.dense_bias_gelu_fp16\"",
",",
"make_gemm_pattern",
"(",
"True",
",",
"\"gelu\"",
")",
",",
"check_gemm",
",",
")",
"dense_bias_gelu_fp32_pat",
"=",
"(",
"\"cutlass.dense_bias_gelu_fp32\"",
",",
"make_gemm_pattern",
"(",
"True",
",",
"\"gelu\"",
",",
"out_dtype",
"=",
"\"float32\"",
")",
",",
"check_gemm",
",",
")",
"dense_patterns",
"=",
"[",
"dense_bias_gelu_fp16_pat",
",",
"dense_bias_gelu_fp32_pat",
",",
"dense_bias_relu_pat",
",",
"dense_bias_pat",
",",
"dense_pat",
",",
"(",
"\"cutlass.batch_matmul\"",
",",
"make_batch_matmul_pattern",
"(",
")",
",",
"check_batch_matmul",
")",
",",
"]",
"conv2d_patterns",
"=",
"[",
"(",
"\"cutlass.conv2d_bias_hardswish\"",
",",
"make_conv2d_pattern",
"(",
"with_bias",
"=",
"True",
",",
"with_act",
"=",
"\"hardswish\"",
")",
",",
"check_conv2d",
",",
")",
",",
"(",
"\"cutlass.conv2d_bias_silu\"",
",",
"make_conv2d_pattern",
"(",
"with_bias",
"=",
"True",
",",
"with_act",
"=",
"\"silu\"",
")",
",",
"check_conv2d",
",",
")",
",",
"(",
"\"cutlass.conv2d_bias_relu\"",
",",
"make_conv2d_pattern",
"(",
"with_bias",
"=",
"True",
",",
"with_act",
"=",
"\"relu\"",
")",
",",
"check_conv2d",
",",
")",
",",
"(",
"\"cutlass.conv2d_bias_sigmoid\"",
",",
"make_conv2d_pattern",
"(",
"with_bias",
"=",
"True",
",",
"with_act",
"=",
"\"sigmoid\"",
")",
",",
"check_conv2d",
",",
")",
",",
"(",
"\"cutlass.conv2d_bias\"",
",",
"make_conv2d_pattern",
"(",
"with_bias",
"=",
"True",
")",
",",
"check_conv2d",
")",
",",
"(",
"\"cutlass.conv2d\"",
",",
"make_conv2d_pattern",
"(",
")",
",",
"check_conv2d",
")",
",",
"]",
"residual_block_patterns",
"=",
"[",
"]",
"for",
"with_act",
",",
"postfix",
"in",
"[",
"(",
"\"relu\"",
",",
"\"_relu\"",
")",
",",
"(",
"None",
",",
"\"\"",
")",
"]",
":",
"for",
"name",
",",
"pat",
",",
"_",
"in",
"conv2d_patterns",
"[",
":",
"-",
"1",
"]",
":",
"for",
"bin_op",
"in",
"[",
"\"add\"",
",",
"\"multiply\"",
"]",
":",
"residual_block_patterns",
".",
"append",
"(",
"(",
"name",
"+",
"\"_residual_\"",
"+",
"bin_op",
"+",
"postfix",
",",
"make_residual_block_pattern",
"(",
"pat",
",",
"bin_op",
",",
"with_act",
"=",
"with_act",
")",
",",
"partial",
"(",
"check_conv2d_residual",
",",
"binary_op",
"=",
"bin_op",
")",
",",
")",
")",
"cutlass_patterns",
"=",
"residual_block_patterns",
"+",
"dense_patterns",
"+",
"conv2d_patterns",
"if",
"params",
"is",
"not",
"None",
":",
"mod",
"[",
"\"main\"",
"]",
"=",
"bind_params_by_name",
"(",
"mod",
"[",
"\"main\"",
"]",
",",
"params",
")",
"remove_bn_pass",
"=",
"Sequential",
"(",
"[",
"transform",
".",
"InferType",
"(",
")",
",",
"transform",
".",
"SimplifyInference",
"(",
")",
",",
"transform",
".",
"FoldConstant",
"(",
")",
",",
"transform",
".",
"FoldScaleAxis",
"(",
")",
",",
"]",
")",
"with",
"PassContext",
"(",
"opt_level",
"=",
"3",
")",
":",
"mod",
"=",
"remove_bn_pass",
"(",
"mod",
")",
"seq",
"=",
"Sequential",
"(",
"[",
"transform",
".",
"InferType",
"(",
")",
",",
"transform",
".",
"MergeComposite",
"(",
"cutlass_patterns",
")",
",",
"transform",
".",
"AnnotateTarget",
"(",
"[",
"\"cutlass\"",
"]",
",",
"include_non_call_ops",
"=",
"False",
")",
",",
"transform",
".",
"PartitionGraph",
"(",
"bind_constants",
"=",
"False",
")",
",",
"]",
")",
"return",
"seq",
"(",
"mod",
")"
] |
https://github.com/apache/tvm/blob/6eb4ed813ebcdcd9558f0906a1870db8302ff1e0/python/tvm/relay/op/contrib/cutlass.py#L178-L265
|
|
blawar/nut
|
2cf351400418399a70164987e28670309f6c9cb5
|
Fs/driver/curl.py
|
python
|
FileContext.read
|
(self, sz=None)
|
return output.getvalue()
|
[] |
def read(self, sz=None):
curl = pycurl.Curl()
curl.setopt(pycurl.URL, self.url)
curl.setopt(pycurl.CAINFO, certifi.where())
output = io.BytesIO()
curl.setopt(pycurl.WRITEFUNCTION, output.write)
self.setup(curl, None, sz)
curl.perform()
return output.getvalue()
|
[
"def",
"read",
"(",
"self",
",",
"sz",
"=",
"None",
")",
":",
"curl",
"=",
"pycurl",
".",
"Curl",
"(",
")",
"curl",
".",
"setopt",
"(",
"pycurl",
".",
"URL",
",",
"self",
".",
"url",
")",
"curl",
".",
"setopt",
"(",
"pycurl",
".",
"CAINFO",
",",
"certifi",
".",
"where",
"(",
")",
")",
"output",
"=",
"io",
".",
"BytesIO",
"(",
")",
"curl",
".",
"setopt",
"(",
"pycurl",
".",
"WRITEFUNCTION",
",",
"output",
".",
"write",
")",
"self",
".",
"setup",
"(",
"curl",
",",
"None",
",",
"sz",
")",
"curl",
".",
"perform",
"(",
")",
"return",
"output",
".",
"getvalue",
"(",
")"
] |
https://github.com/blawar/nut/blob/2cf351400418399a70164987e28670309f6c9cb5/Fs/driver/curl.py#L31-L40
|
|||
sonyisme/keras-recommendation
|
e84fab6706faf8405f4bfc75cf50ecff6433eb77
|
keras-master/keras/initializations.py
|
python
|
orthogonal
|
(shape, scale=1.1)
|
return sharedX(scale * q[:shape[0], :shape[1]])
|
From Lasagne
|
From Lasagne
|
[
"From",
"Lasagne"
] |
def orthogonal(shape, scale=1.1):
''' From Lasagne
'''
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return sharedX(scale * q[:shape[0], :shape[1]])
|
[
"def",
"orthogonal",
"(",
"shape",
",",
"scale",
"=",
"1.1",
")",
":",
"flat_shape",
"=",
"(",
"shape",
"[",
"0",
"]",
",",
"np",
".",
"prod",
"(",
"shape",
"[",
"1",
":",
"]",
")",
")",
"a",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"0.0",
",",
"1.0",
",",
"flat_shape",
")",
"u",
",",
"_",
",",
"v",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"a",
",",
"full_matrices",
"=",
"False",
")",
"q",
"=",
"u",
"if",
"u",
".",
"shape",
"==",
"flat_shape",
"else",
"v",
"# pick the one with the correct shape",
"q",
"=",
"q",
".",
"reshape",
"(",
"shape",
")",
"return",
"sharedX",
"(",
"scale",
"*",
"q",
"[",
":",
"shape",
"[",
"0",
"]",
",",
":",
"shape",
"[",
"1",
"]",
"]",
")"
] |
https://github.com/sonyisme/keras-recommendation/blob/e84fab6706faf8405f4bfc75cf50ecff6433eb77/keras-master/keras/initializations.py#L52-L60
|
|
mdiazcl/fuzzbunch-debian
|
2b76c2249ade83a389ae3badb12a1bd09901fd2c
|
windows/Resources/Python/Core/Lib/tarfile.py
|
python
|
TarIter.next
|
(self)
|
return tarinfo
|
Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
|
Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
|
[
"Return",
"the",
"next",
"item",
"using",
"TarFile",
"s",
"next",
"()",
"method",
".",
"When",
"all",
"members",
"have",
"been",
"read",
"set",
"TarFile",
"as",
"_loaded",
"."
] |
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
|
[
"def",
"next",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"tarfile",
".",
"_loaded",
":",
"tarinfo",
"=",
"self",
".",
"tarfile",
".",
"next",
"(",
")",
"if",
"not",
"tarinfo",
":",
"self",
".",
"tarfile",
".",
"_loaded",
"=",
"True",
"raise",
"StopIteration",
"else",
":",
"try",
":",
"tarinfo",
"=",
"self",
".",
"tarfile",
".",
"members",
"[",
"self",
".",
"index",
"]",
"except",
"IndexError",
":",
"raise",
"StopIteration",
"self",
".",
"index",
"+=",
"1",
"return",
"tarinfo"
] |
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/tarfile.py#L2171-L2187
|
|
hitchtest/hitch
|
7ccfb878566d51259f448fab7d7ae5bdb92025bf
|
hitch/commandline.py
|
python
|
upgrade
|
()
|
Upgrade all installed hitch packages.
|
Upgrade all installed hitch packages.
|
[
"Upgrade",
"all",
"installed",
"hitch",
"packages",
"."
] |
def upgrade():
"""Upgrade all installed hitch packages."""
hitchdir.check_hitch_directory_integrity()
update_requirements()
pip = get_pip()
package_list = [
p for p in check_output([pip, "freeze"]).decode('utf8').split('\n')
if p != "" and "==" in p
]
version_fixed_package_list = [p.split("==")[0] for p in package_list]
for package in version_fixed_package_list:
call([pip, "install", package, "-U", ])
pip_freeze = check_output([pip, "freeze"]).decode('utf8')
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
installpackages()
|
[
"def",
"upgrade",
"(",
")",
":",
"hitchdir",
".",
"check_hitch_directory_integrity",
"(",
")",
"update_requirements",
"(",
")",
"pip",
"=",
"get_pip",
"(",
")",
"package_list",
"=",
"[",
"p",
"for",
"p",
"in",
"check_output",
"(",
"[",
"pip",
",",
"\"freeze\"",
"]",
")",
".",
"decode",
"(",
"'utf8'",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"p",
"!=",
"\"\"",
"and",
"\"==\"",
"in",
"p",
"]",
"version_fixed_package_list",
"=",
"[",
"p",
".",
"split",
"(",
"\"==\"",
")",
"[",
"0",
"]",
"for",
"p",
"in",
"package_list",
"]",
"for",
"package",
"in",
"version_fixed_package_list",
":",
"call",
"(",
"[",
"pip",
",",
"\"install\"",
",",
"package",
",",
"\"-U\"",
",",
"]",
")",
"pip_freeze",
"=",
"check_output",
"(",
"[",
"pip",
",",
"\"freeze\"",
"]",
")",
".",
"decode",
"(",
"'utf8'",
")",
"with",
"open",
"(",
"\"hitchreqs.txt\"",
",",
"\"w\"",
")",
"as",
"hitchreqs_handle",
":",
"hitchreqs_handle",
".",
"write",
"(",
"pip_freeze",
")",
"installpackages",
"(",
")"
] |
https://github.com/hitchtest/hitch/blob/7ccfb878566d51259f448fab7d7ae5bdb92025bf/hitch/commandline.py#L222-L243
|
||
Nicotine-Plus/nicotine-plus
|
6583532193e132206bb2096c77c6ad1ce96c21fa
|
pynicotine/pynicotine.py
|
python
|
NicotineCore.room_ticker_remove
|
(self, msg)
|
Server code: 115
|
Server code: 115
|
[
"Server",
"code",
":",
"115"
] |
def room_ticker_remove(self, msg):
""" Server code: 115 """
log.add_msg_contents(msg)
self.chatrooms.ticker_remove(msg)
|
[
"def",
"room_ticker_remove",
"(",
"self",
",",
"msg",
")",
":",
"log",
".",
"add_msg_contents",
"(",
"msg",
")",
"self",
".",
"chatrooms",
".",
"ticker_remove",
"(",
"msg",
")"
] |
https://github.com/Nicotine-Plus/nicotine-plus/blob/6583532193e132206bb2096c77c6ad1ce96c21fa/pynicotine/pynicotine.py#L836-L840
|
||
dagster-io/dagster
|
b27d569d5fcf1072543533a0c763815d96f90b8f
|
python_modules/dagster/dagster/core/storage/event_log/sql_event_log.py
|
python
|
SqlEventLogStorage.update_event_log_record
|
(self, record_id, event)
|
Utility method for migration scripts to update SQL representation of event records.
|
Utility method for migration scripts to update SQL representation of event records.
|
[
"Utility",
"method",
"for",
"migration",
"scripts",
"to",
"update",
"SQL",
"representation",
"of",
"event",
"records",
"."
] |
def update_event_log_record(self, record_id, event):
"""Utility method for migration scripts to update SQL representation of event records."""
check.int_param(record_id, "record_id")
check.inst_param(event, "event", EventLogEntry)
dagster_event_type = None
asset_key_str = None
if event.is_dagster_event:
dagster_event_type = event.dagster_event.event_type_value
if event.dagster_event.asset_key:
check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey)
asset_key_str = event.dagster_event.asset_key.to_string()
with self.run_connection(run_id=event.run_id) as conn:
conn.execute(
SqlEventLogStorageTable.update() # pylint: disable=no-value-for-parameter
.where(SqlEventLogStorageTable.c.id == record_id)
.values(
event=serialize_dagster_namedtuple(event),
dagster_event_type=dagster_event_type,
timestamp=datetime.utcfromtimestamp(event.timestamp),
step_key=event.step_key,
asset_key=asset_key_str,
)
)
|
[
"def",
"update_event_log_record",
"(",
"self",
",",
"record_id",
",",
"event",
")",
":",
"check",
".",
"int_param",
"(",
"record_id",
",",
"\"record_id\"",
")",
"check",
".",
"inst_param",
"(",
"event",
",",
"\"event\"",
",",
"EventLogEntry",
")",
"dagster_event_type",
"=",
"None",
"asset_key_str",
"=",
"None",
"if",
"event",
".",
"is_dagster_event",
":",
"dagster_event_type",
"=",
"event",
".",
"dagster_event",
".",
"event_type_value",
"if",
"event",
".",
"dagster_event",
".",
"asset_key",
":",
"check",
".",
"inst_param",
"(",
"event",
".",
"dagster_event",
".",
"asset_key",
",",
"\"asset_key\"",
",",
"AssetKey",
")",
"asset_key_str",
"=",
"event",
".",
"dagster_event",
".",
"asset_key",
".",
"to_string",
"(",
")",
"with",
"self",
".",
"run_connection",
"(",
"run_id",
"=",
"event",
".",
"run_id",
")",
"as",
"conn",
":",
"conn",
".",
"execute",
"(",
"SqlEventLogStorageTable",
".",
"update",
"(",
")",
"# pylint: disable=no-value-for-parameter",
".",
"where",
"(",
"SqlEventLogStorageTable",
".",
"c",
".",
"id",
"==",
"record_id",
")",
".",
"values",
"(",
"event",
"=",
"serialize_dagster_namedtuple",
"(",
"event",
")",
",",
"dagster_event_type",
"=",
"dagster_event_type",
",",
"timestamp",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"event",
".",
"timestamp",
")",
",",
"step_key",
"=",
"event",
".",
"step_key",
",",
"asset_key",
"=",
"asset_key_str",
",",
")",
")"
] |
https://github.com/dagster-io/dagster/blob/b27d569d5fcf1072543533a0c763815d96f90b8f/python_modules/dagster/dagster/core/storage/event_log/sql_event_log.py#L457-L480
|
||
beeware/ouroboros
|
a29123c6fab6a807caffbb7587cf548e0c370296
|
ouroboros/turtle.py
|
python
|
__methodDict
|
(cls, _dict)
|
helper function for Scrolled Canvas
|
helper function for Scrolled Canvas
|
[
"helper",
"function",
"for",
"Scrolled",
"Canvas"
] |
def __methodDict(cls, _dict):
"""helper function for Scrolled Canvas"""
baseList = list(cls.__bases__)
baseList.reverse()
for _super in baseList:
__methodDict(_super, _dict)
for key, value in cls.__dict__.items():
if type(value) == types.FunctionType:
_dict[key] = value
|
[
"def",
"__methodDict",
"(",
"cls",
",",
"_dict",
")",
":",
"baseList",
"=",
"list",
"(",
"cls",
".",
"__bases__",
")",
"baseList",
".",
"reverse",
"(",
")",
"for",
"_super",
"in",
"baseList",
":",
"__methodDict",
"(",
"_super",
",",
"_dict",
")",
"for",
"key",
",",
"value",
"in",
"cls",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"types",
".",
"FunctionType",
":",
"_dict",
"[",
"key",
"]",
"=",
"value"
] |
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/turtle.py#L288-L296
|
||
evennia/evennia
|
fa79110ba6b219932f22297838e8ac72ebc0be0e
|
evennia/server/portal/amp_server.py
|
python
|
AMPServerProtocol.data_to_server
|
(self, command, sessid, **kwargs)
|
Send data across the wire to the Server.
Args:
command (AMP Command): A protocol send command.
sessid (int): A unique Session id.
kwargs (any): Data to send. This will be pickled.
Returns:
deferred (deferred or None): A deferred with an errback.
Notes:
Data will be sent across the wire pickled as a tuple
(sessid, kwargs).
|
Send data across the wire to the Server.
|
[
"Send",
"data",
"across",
"the",
"wire",
"to",
"the",
"Server",
"."
] |
def data_to_server(self, command, sessid, **kwargs):
"""
Send data across the wire to the Server.
Args:
command (AMP Command): A protocol send command.
sessid (int): A unique Session id.
kwargs (any): Data to send. This will be pickled.
Returns:
deferred (deferred or None): A deferred with an errback.
Notes:
Data will be sent across the wire pickled as a tuple
(sessid, kwargs).
"""
# print("portal data_to_server: {}, {}, {}".format(command, sessid, kwargs))
if self.factory.server_connection:
return self.factory.server_connection.callRemote(
command, packed_data=amp.dumps((sessid, kwargs))
).addErrback(self.errback, command.key)
else:
# if no server connection is available, broadcast
return self.broadcast(command, sessid, packed_data=amp.dumps((sessid, kwargs)))
|
[
"def",
"data_to_server",
"(",
"self",
",",
"command",
",",
"sessid",
",",
"*",
"*",
"kwargs",
")",
":",
"# print(\"portal data_to_server: {}, {}, {}\".format(command, sessid, kwargs))",
"if",
"self",
".",
"factory",
".",
"server_connection",
":",
"return",
"self",
".",
"factory",
".",
"server_connection",
".",
"callRemote",
"(",
"command",
",",
"packed_data",
"=",
"amp",
".",
"dumps",
"(",
"(",
"sessid",
",",
"kwargs",
")",
")",
")",
".",
"addErrback",
"(",
"self",
".",
"errback",
",",
"command",
".",
"key",
")",
"else",
":",
"# if no server connection is available, broadcast",
"return",
"self",
".",
"broadcast",
"(",
"command",
",",
"sessid",
",",
"packed_data",
"=",
"amp",
".",
"dumps",
"(",
"(",
"sessid",
",",
"kwargs",
")",
")",
")"
] |
https://github.com/evennia/evennia/blob/fa79110ba6b219932f22297838e8ac72ebc0be0e/evennia/server/portal/amp_server.py#L126-L150
|
||
jantman/awslimitchecker
|
411ad9e734ddb16d87720ff5b994f19f47b8b098
|
awslimitchecker/services/vpc.py
|
python
|
_VpcService._find_usage_network_interfaces
|
(self)
|
find usage of network interfaces
|
find usage of network interfaces
|
[
"find",
"usage",
"of",
"network",
"interfaces"
] |
def _find_usage_network_interfaces(self):
"""find usage of network interfaces"""
enis = paginate_dict(
self.conn.describe_network_interfaces,
alc_marker_path=['NextToken'],
alc_data_path=['NetworkInterfaces'],
alc_marker_param='NextToken',
Filters=[{'Name': 'owner-id', 'Values': [self.current_account_id]}]
)
self.limits['Network interfaces per Region']._add_current_usage(
len(enis['NetworkInterfaces']),
aws_type='AWS::EC2::NetworkInterface'
)
|
[
"def",
"_find_usage_network_interfaces",
"(",
"self",
")",
":",
"enis",
"=",
"paginate_dict",
"(",
"self",
".",
"conn",
".",
"describe_network_interfaces",
",",
"alc_marker_path",
"=",
"[",
"'NextToken'",
"]",
",",
"alc_data_path",
"=",
"[",
"'NetworkInterfaces'",
"]",
",",
"alc_marker_param",
"=",
"'NextToken'",
",",
"Filters",
"=",
"[",
"{",
"'Name'",
":",
"'owner-id'",
",",
"'Values'",
":",
"[",
"self",
".",
"current_account_id",
"]",
"}",
"]",
")",
"self",
".",
"limits",
"[",
"'Network interfaces per Region'",
"]",
".",
"_add_current_usage",
"(",
"len",
"(",
"enis",
"[",
"'NetworkInterfaces'",
"]",
")",
",",
"aws_type",
"=",
"'AWS::EC2::NetworkInterface'",
")"
] |
https://github.com/jantman/awslimitchecker/blob/411ad9e734ddb16d87720ff5b994f19f47b8b098/awslimitchecker/services/vpc.py#L238-L251
|
||
rockstor/rockstor-core
|
81a0d5f5e0a6dfe5a922199828f66eeab0253e65
|
src/rockstor/system/ssh.py
|
python
|
update_sftp_config
|
(input_map)
|
Fetch sftp-related customization settings from database
and writes them to SSHD_CONFIG.
:param input_map: dictionary of user,directory pairs.
:return:
|
Fetch sftp-related customization settings from database
and writes them to SSHD_CONFIG.
:param input_map: dictionary of user,directory pairs.
:return:
|
[
"Fetch",
"sftp",
"-",
"related",
"customization",
"settings",
"from",
"database",
"and",
"writes",
"them",
"to",
"SSHD_CONFIG",
".",
":",
"param",
"input_map",
":",
"dictionary",
"of",
"user",
"directory",
"pairs",
".",
":",
"return",
":"
] |
def update_sftp_config(input_map):
"""
Fetch sftp-related customization settings from database
and writes them to SSHD_CONFIG.
:param input_map: dictionary of user,directory pairs.
:return:
"""
fo, npath = mkstemp()
userstr = "AllowUsers root {}".format(" ".join(input_map.keys()))
with open(SSHD_CONFIG) as sfo, open(npath, "w") as tfo:
for line in sfo.readlines():
if re.match(settings.SSHD_HEADER, line) is None:
tfo.write(line)
else:
break
tfo.write("{}\n".format(settings.SSHD_HEADER))
# Detect sftp service status and ensure we maintain it
if is_sftp_running():
tfo.write("{}\n".format(settings.SFTP_STR))
tfo.write("{}\n".format(userstr))
# Set options for each user according to openSUSE's defaults:
# https://en.opensuse.org/SDB:SFTP_server_with_Chroot#Match_rule_block
# TODO: implement webUI element to re-enable rsync over ssh by omitting
# the `ForceCommand internal sftp` line below.
for user in input_map:
tfo.write("Match User {}\n".format(user))
tfo.write("\tForceCommand internal-sftp\n")
tfo.write("\tChrootDirectory {}\n".format(input_map[user]))
tfo.write("\tX11Forwarding no\n")
tfo.write("\tAllowTcpForwarding no\n")
move(npath, SSHD_CONFIG)
try:
systemctl("sshd", "reload")
except:
return systemctl("sshd", "restart")
|
[
"def",
"update_sftp_config",
"(",
"input_map",
")",
":",
"fo",
",",
"npath",
"=",
"mkstemp",
"(",
")",
"userstr",
"=",
"\"AllowUsers root {}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"input_map",
".",
"keys",
"(",
")",
")",
")",
"with",
"open",
"(",
"SSHD_CONFIG",
")",
"as",
"sfo",
",",
"open",
"(",
"npath",
",",
"\"w\"",
")",
"as",
"tfo",
":",
"for",
"line",
"in",
"sfo",
".",
"readlines",
"(",
")",
":",
"if",
"re",
".",
"match",
"(",
"settings",
".",
"SSHD_HEADER",
",",
"line",
")",
"is",
"None",
":",
"tfo",
".",
"write",
"(",
"line",
")",
"else",
":",
"break",
"tfo",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"settings",
".",
"SSHD_HEADER",
")",
")",
"# Detect sftp service status and ensure we maintain it",
"if",
"is_sftp_running",
"(",
")",
":",
"tfo",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"settings",
".",
"SFTP_STR",
")",
")",
"tfo",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"userstr",
")",
")",
"# Set options for each user according to openSUSE's defaults:",
"# https://en.opensuse.org/SDB:SFTP_server_with_Chroot#Match_rule_block",
"# TODO: implement webUI element to re-enable rsync over ssh by omitting",
"# the `ForceCommand internal sftp` line below.",
"for",
"user",
"in",
"input_map",
":",
"tfo",
".",
"write",
"(",
"\"Match User {}\\n\"",
".",
"format",
"(",
"user",
")",
")",
"tfo",
".",
"write",
"(",
"\"\\tForceCommand internal-sftp\\n\"",
")",
"tfo",
".",
"write",
"(",
"\"\\tChrootDirectory {}\\n\"",
".",
"format",
"(",
"input_map",
"[",
"user",
"]",
")",
")",
"tfo",
".",
"write",
"(",
"\"\\tX11Forwarding no\\n\"",
")",
"tfo",
".",
"write",
"(",
"\"\\tAllowTcpForwarding no\\n\"",
")",
"move",
"(",
"npath",
",",
"SSHD_CONFIG",
")",
"try",
":",
"systemctl",
"(",
"\"sshd\"",
",",
"\"reload\"",
")",
"except",
":",
"return",
"systemctl",
"(",
"\"sshd\"",
",",
"\"restart\"",
")"
] |
https://github.com/rockstor/rockstor-core/blob/81a0d5f5e0a6dfe5a922199828f66eeab0253e65/src/rockstor/system/ssh.py#L36-L71
|
||
danecjensen/subscribely
|
4d6ac60358b5fe26f0c01be68f1ba063df3b1ea0
|
src/httplib2/__init__.py
|
python
|
Http.__init__
|
(self, cache=None, timeout=None,
proxy_info=ProxyInfo.from_environment,
ca_certs=None, disable_ssl_certificate_validation=False)
|
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
ProxyInfo.from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
|
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
|
[
"If",
"cache",
"is",
"a",
"string",
"then",
"it",
"is",
"used",
"as",
"a",
"directory",
"name",
"for",
"a",
"disk",
"cache",
".",
"Otherwise",
"it",
"must",
"be",
"an",
"object",
"that",
"supports",
"the",
"same",
"interface",
"as",
"FileCache",
"."
] |
def __init__(self, cache=None, timeout=None,
proxy_info=ProxyInfo.from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
ProxyInfo.from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
|
[
"def",
"__init__",
"(",
"self",
",",
"cache",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"proxy_info",
"=",
"ProxyInfo",
".",
"from_environment",
",",
"ca_certs",
"=",
"None",
",",
"disable_ssl_certificate_validation",
"=",
"False",
")",
":",
"self",
".",
"proxy_info",
"=",
"proxy_info",
"self",
".",
"ca_certs",
"=",
"ca_certs",
"self",
".",
"disable_ssl_certificate_validation",
"=",
"disable_ssl_certificate_validation",
"# Map domain name to an httplib connection",
"self",
".",
"connections",
"=",
"{",
"}",
"# The location of the cache, for now a directory",
"# where cached responses are held.",
"if",
"cache",
"and",
"isinstance",
"(",
"cache",
",",
"basestring",
")",
":",
"self",
".",
"cache",
"=",
"FileCache",
"(",
"cache",
")",
"else",
":",
"self",
".",
"cache",
"=",
"cache",
"# Name/password",
"self",
".",
"credentials",
"=",
"Credentials",
"(",
")",
"# Key/cert",
"self",
".",
"certificates",
"=",
"KeyCerts",
"(",
")",
"# authorization objects",
"self",
".",
"authorizations",
"=",
"[",
"]",
"# If set to False then no redirects are followed, even safe ones.",
"self",
".",
"follow_redirects",
"=",
"True",
"# Which HTTP methods do we apply optimistic concurrency to, i.e.",
"# which methods get an \"if-match:\" etag header added to them.",
"self",
".",
"optimistic_concurrency_methods",
"=",
"[",
"\"PUT\"",
",",
"\"PATCH\"",
"]",
"# If 'follow_redirects' is True, and this is set to True then",
"# all redirecs are followed, including unsafe ones.",
"self",
".",
"follow_all_redirects",
"=",
"False",
"self",
".",
"ignore_etag",
"=",
"False",
"self",
".",
"force_exception_to_status_code",
"=",
"False",
"self",
".",
"timeout",
"=",
"timeout"
] |
https://github.com/danecjensen/subscribely/blob/4d6ac60358b5fe26f0c01be68f1ba063df3b1ea0/src/httplib2/__init__.py#L1133-L1198
|
||
neulab/xnmt
|
d93f8f3710f986f36eb54e9ff3976a6b683da2a4
|
xnmt/transducers/base.py
|
python
|
SeqTransducer.transduce
|
(self, seq: 'expression_seqs.ExpressionSequence')
|
Parameters should be :class:`expression_seqs.ExpressionSequence` objects wherever appropriate
Args:
seq: An expression sequence representing the input to the transduction
Returns:
result of transduction, an expression sequence
|
Parameters should be :class:`expression_seqs.ExpressionSequence` objects wherever appropriate
|
[
"Parameters",
"should",
"be",
":",
"class",
":",
"expression_seqs",
".",
"ExpressionSequence",
"objects",
"wherever",
"appropriate"
] |
def transduce(self, seq: 'expression_seqs.ExpressionSequence') -> 'expression_seqs.ExpressionSequence':
"""
Parameters should be :class:`expression_seqs.ExpressionSequence` objects wherever appropriate
Args:
seq: An expression sequence representing the input to the transduction
Returns:
result of transduction, an expression sequence
"""
raise NotImplementedError("SeqTransducer.transduce() must be implemented by SeqTransducer sub-classes")
|
[
"def",
"transduce",
"(",
"self",
",",
"seq",
":",
"'expression_seqs.ExpressionSequence'",
")",
"->",
"'expression_seqs.ExpressionSequence'",
":",
"raise",
"NotImplementedError",
"(",
"\"SeqTransducer.transduce() must be implemented by SeqTransducer sub-classes\"",
")"
] |
https://github.com/neulab/xnmt/blob/d93f8f3710f986f36eb54e9ff3976a6b683da2a4/xnmt/transducers/base.py#L46-L56
|
||
hyperledger/fabric-sdk-py
|
8ee33a8981887e37950dc0f36a7ec63b3a5ba5c3
|
hfc/fabric/channel/instantiation.py
|
python
|
Instantiation.handle
|
(self, tran_prop_req, scheduler=None)
|
return _instantiate_chaincode(self._chain, tran_prop_req, scheduler)
|
Execute chaincode instantiation transaction proposal request.
:param tran_prop_req: chaincode instantiation transaction proposal request
:param scheduler: see rx.Scheduler, defaults to None
:return: An rx.Observer wrapper of chaincode instantiation response
|
Execute chaincode instantiation transaction proposal request.
|
[
"Execute",
"chaincode",
"instantiation",
"transaction",
"proposal",
"request",
"."
] |
def handle(self, tran_prop_req, scheduler=None):
"""Execute chaincode instantiation transaction proposal request.
:param tran_prop_req: chaincode instantiation transaction proposal request
:param scheduler: see rx.Scheduler, defaults to None
:return: An rx.Observer wrapper of chaincode instantiation response
"""
return _instantiate_chaincode(self._chain, tran_prop_req, scheduler)
|
[
"def",
"handle",
"(",
"self",
",",
"tran_prop_req",
",",
"scheduler",
"=",
"None",
")",
":",
"return",
"_instantiate_chaincode",
"(",
"self",
".",
"_chain",
",",
"tran_prop_req",
",",
"scheduler",
")"
] |
https://github.com/hyperledger/fabric-sdk-py/blob/8ee33a8981887e37950dc0f36a7ec63b3a5ba5c3/hfc/fabric/channel/instantiation.py#L36-L43
|
|
hubblestack/hubble
|
763142474edcecdec5fd25591dc29c3536e8f969
|
hubblestack/audit/readfile.py
|
python
|
validate_params
|
(block_id, block_dict, extra_args=None)
|
Validate all mandatory params required for this module
:param block_id:
id of the block
:param block_dict:
parameter for this module
:param extra_args:
Extra argument dictionary, (If any)
Example: {'chaining_args': {'result': '/some/path', 'status': True},
'caller': 'Audit'}
Raises:
HubbleCheckValidationError: For any validation error
|
Validate all mandatory params required for this module
|
[
"Validate",
"all",
"mandatory",
"params",
"required",
"for",
"this",
"module"
] |
def validate_params(block_id, block_dict, extra_args=None):
"""
Validate all mandatory params required for this module
:param block_id:
id of the block
:param block_dict:
parameter for this module
:param extra_args:
Extra argument dictionary, (If any)
Example: {'chaining_args': {'result': '/some/path', 'status': True},
'caller': 'Audit'}
Raises:
HubbleCheckValidationError: For any validation error
"""
log.debug('Module: readfile Start validating params for check-id: {0}'.format(block_id))
filepath = runner_utils.get_param_for_module(block_id, block_dict, 'path')
file_format = runner_utils.get_param_for_module(block_id, block_dict, 'format')
error = {}
if not filepath:
error['path'] = 'No filepath provided'
if not file_format:
error['format'] = 'No file format provided'
if error:
raise HubbleCheckValidationError(error)
log.debug('Validation success for check-id: {0}'.format(block_id))
|
[
"def",
"validate_params",
"(",
"block_id",
",",
"block_dict",
",",
"extra_args",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"'Module: readfile Start validating params for check-id: {0}'",
".",
"format",
"(",
"block_id",
")",
")",
"filepath",
"=",
"runner_utils",
".",
"get_param_for_module",
"(",
"block_id",
",",
"block_dict",
",",
"'path'",
")",
"file_format",
"=",
"runner_utils",
".",
"get_param_for_module",
"(",
"block_id",
",",
"block_dict",
",",
"'format'",
")",
"error",
"=",
"{",
"}",
"if",
"not",
"filepath",
":",
"error",
"[",
"'path'",
"]",
"=",
"'No filepath provided'",
"if",
"not",
"file_format",
":",
"error",
"[",
"'format'",
"]",
"=",
"'No file format provided'",
"if",
"error",
":",
"raise",
"HubbleCheckValidationError",
"(",
"error",
")",
"log",
".",
"debug",
"(",
"'Validation success for check-id: {0}'",
".",
"format",
"(",
"block_id",
")",
")"
] |
https://github.com/hubblestack/hubble/blob/763142474edcecdec5fd25591dc29c3536e8f969/hubblestack/audit/readfile.py#L238-L268
|
||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/parallel/parallelism.py
|
python
|
Parallelism.set
|
(self, field=None, nproc=None)
|
r"""
Set the number of processes to be launched for parallel computations
regarding some specific field.
INPUT:
- ``field`` -- (default: ``None``) string specifying the computational
field for which the number of parallel processes is to be set; if
``None``, all fields are considered
- ``nproc`` -- (default: ``None``) number of processes to be used for
parallelization; if ``None``, the number of processes will be set to
the default value, which, unless redefined by :meth:`set_default`,
is the total number of cores found on the computer.
EXAMPLES:
The default is a single processor (no parallelization)::
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 1
Asking for parallelization on 4 cores in tensor algebra::
sage: Parallelism().set('tensor', nproc=4)
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 4
Using all the cores available on the computer::
sage: Parallelism().set('tensor')
sage: Parallelism() # random (depends on the computer)
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 8
Using 6 cores in all parallelizations::
sage: Parallelism().set(nproc=6)
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 6
- tensor computations: 6
Using all the cores available on the computer in all parallelizations::
sage: Parallelism().set()
sage: Parallelism() # random (depends on the computer)
Number of processes for parallelization:
- linbox computations: 8
- tensor computations: 8
Switching off the parallelization::
sage: Parallelism().set(nproc=1)
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 1
|
r"""
Set the number of processes to be launched for parallel computations
regarding some specific field.
|
[
"r",
"Set",
"the",
"number",
"of",
"processes",
"to",
"be",
"launched",
"for",
"parallel",
"computations",
"regarding",
"some",
"specific",
"field",
"."
] |
def set(self, field=None, nproc=None):
r"""
Set the number of processes to be launched for parallel computations
regarding some specific field.
INPUT:
- ``field`` -- (default: ``None``) string specifying the computational
field for which the number of parallel processes is to be set; if
``None``, all fields are considered
- ``nproc`` -- (default: ``None``) number of processes to be used for
parallelization; if ``None``, the number of processes will be set to
the default value, which, unless redefined by :meth:`set_default`,
is the total number of cores found on the computer.
EXAMPLES:
The default is a single processor (no parallelization)::
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 1
Asking for parallelization on 4 cores in tensor algebra::
sage: Parallelism().set('tensor', nproc=4)
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 4
Using all the cores available on the computer::
sage: Parallelism().set('tensor')
sage: Parallelism() # random (depends on the computer)
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 8
Using 6 cores in all parallelizations::
sage: Parallelism().set(nproc=6)
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 6
- tensor computations: 6
Using all the cores available on the computer in all parallelizations::
sage: Parallelism().set()
sage: Parallelism() # random (depends on the computer)
Number of processes for parallelization:
- linbox computations: 8
- tensor computations: 8
Switching off the parallelization::
sage: Parallelism().set(nproc=1)
sage: Parallelism()
Number of processes for parallelization:
- linbox computations: 1
- tensor computations: 1
"""
if field is None:
for fi in self._nproc:
self.set(field=fi, nproc=nproc)
else:
if field not in self._nproc:
raise KeyError("entry for field {} is not ".format(field) +
"implemented in Parallelism")
if nproc is None:
self._nproc[field] = self._default
else:
if not isinstance(nproc, (int,Integer)):
raise TypeError("nproc must be integer")
self._nproc[field] = nproc
|
[
"def",
"set",
"(",
"self",
",",
"field",
"=",
"None",
",",
"nproc",
"=",
"None",
")",
":",
"if",
"field",
"is",
"None",
":",
"for",
"fi",
"in",
"self",
".",
"_nproc",
":",
"self",
".",
"set",
"(",
"field",
"=",
"fi",
",",
"nproc",
"=",
"nproc",
")",
"else",
":",
"if",
"field",
"not",
"in",
"self",
".",
"_nproc",
":",
"raise",
"KeyError",
"(",
"\"entry for field {} is not \"",
".",
"format",
"(",
"field",
")",
"+",
"\"implemented in Parallelism\"",
")",
"if",
"nproc",
"is",
"None",
":",
"self",
".",
"_nproc",
"[",
"field",
"]",
"=",
"self",
".",
"_default",
"else",
":",
"if",
"not",
"isinstance",
"(",
"nproc",
",",
"(",
"int",
",",
"Integer",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"nproc must be integer\"",
")",
"self",
".",
"_nproc",
"[",
"field",
"]",
"=",
"nproc"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/parallel/parallelism.py#L170-L247
|
||
HealthCatalyst/healthcareai-py
|
cb82b94990fb3046edccb3740ae5653adce70940
|
healthcareai/common/cardinality_checks.py
|
python
|
calculate_cardinality
|
(dataframe)
|
return results
|
Find cardinality of columns in a dataframe.
This function counts the number of rows in the dataframe, counts the unique
values in each column and sorts by the ratio of unique values relative to
the number of rows.
This is useful for profiling training data.
Args:
dataframe (pandas.core.frame.DataFrame):
Returns:
pandas.core.frame.DataFrame: dataframe sorted by cardinality (unique
count ratio)
|
Find cardinality of columns in a dataframe.
|
[
"Find",
"cardinality",
"of",
"columns",
"in",
"a",
"dataframe",
"."
] |
def calculate_cardinality(dataframe):
"""
Find cardinality of columns in a dataframe.
This function counts the number of rows in the dataframe, counts the unique
values in each column and sorts by the ratio of unique values relative to
the number of rows.
This is useful for profiling training data.
Args:
dataframe (pandas.core.frame.DataFrame):
Returns:
pandas.core.frame.DataFrame: dataframe sorted by cardinality (unique
count ratio)
"""
record_count = len(dataframe)
result_list = []
for column in dataframe:
count = len(dataframe[column].unique())
ordinal_ratio = count / record_count
result_list.append([column, count, ordinal_ratio])
results = pd.DataFrame(result_list)
results.columns = ['Feature Name', 'unique_value_count', 'unique_ratio']
results.sort_values('unique_ratio', ascending=False, inplace=True)
results.reset_index(inplace=True)
return results
|
[
"def",
"calculate_cardinality",
"(",
"dataframe",
")",
":",
"record_count",
"=",
"len",
"(",
"dataframe",
")",
"result_list",
"=",
"[",
"]",
"for",
"column",
"in",
"dataframe",
":",
"count",
"=",
"len",
"(",
"dataframe",
"[",
"column",
"]",
".",
"unique",
"(",
")",
")",
"ordinal_ratio",
"=",
"count",
"/",
"record_count",
"result_list",
".",
"append",
"(",
"[",
"column",
",",
"count",
",",
"ordinal_ratio",
"]",
")",
"results",
"=",
"pd",
".",
"DataFrame",
"(",
"result_list",
")",
"results",
".",
"columns",
"=",
"[",
"'Feature Name'",
",",
"'unique_value_count'",
",",
"'unique_ratio'",
"]",
"results",
".",
"sort_values",
"(",
"'unique_ratio'",
",",
"ascending",
"=",
"False",
",",
"inplace",
"=",
"True",
")",
"results",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"return",
"results"
] |
https://github.com/HealthCatalyst/healthcareai-py/blob/cb82b94990fb3046edccb3740ae5653adce70940/healthcareai/common/cardinality_checks.py#L9-L40
|
|
pysmt/pysmt
|
ade4dc2a825727615033a96d31c71e9f53ce4764
|
pysmt/solvers/msat.py
|
python
|
MathSATOptions.__init__
|
(self, **base_options)
|
[] |
def __init__(self, **base_options):
SolverOptions.__init__(self, **base_options)
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"base_options",
")",
":",
"SolverOptions",
".",
"__init__",
"(",
"self",
",",
"*",
"*",
"base_options",
")"
] |
https://github.com/pysmt/pysmt/blob/ade4dc2a825727615033a96d31c71e9f53ce4764/pysmt/solvers/msat.py#L139-L140
|
||||
rcorcs/NatI
|
fdf014f4292afdc95250add7b6658468043228e1
|
en/wordnet/wntools.py
|
python
|
hypernyms
|
(source)
|
return closure(source, HYPERNYM)
|
Return source and its hypernyms. If source is a Word, return
the union of the hypernyms of its senses.
|
Return source and its hypernyms. If source is a Word, return
the union of the hypernyms of its senses.
|
[
"Return",
"source",
"and",
"its",
"hypernyms",
".",
"If",
"source",
"is",
"a",
"Word",
"return",
"the",
"union",
"of",
"the",
"hypernyms",
"of",
"its",
"senses",
"."
] |
def hypernyms(source):
"""Return source and its hypernyms. If source is a Word, return
the union of the hypernyms of its senses."""
return closure(source, HYPERNYM)
|
[
"def",
"hypernyms",
"(",
"source",
")",
":",
"return",
"closure",
"(",
"source",
",",
"HYPERNYM",
")"
] |
https://github.com/rcorcs/NatI/blob/fdf014f4292afdc95250add7b6658468043228e1/en/wordnet/wntools.py#L96-L100
|
|
replit-archive/empythoned
|
977ec10ced29a3541a4973dc2b59910805695752
|
cpython/Tools/gdb/libpython.py
|
python
|
PyObjectPtr.is_optimized_out
|
(self)
|
return self._gdbval.is_optimized_out
|
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
|
Is the value of the underlying PyObject* visible to the debugger?
|
[
"Is",
"the",
"value",
"of",
"the",
"underlying",
"PyObject",
"*",
"visible",
"to",
"the",
"debugger?"
] |
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
|
[
"def",
"is_optimized_out",
"(",
"self",
")",
":",
"return",
"self",
".",
"_gdbval",
".",
"is_optimized_out"
] |
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/cpython/Tools/gdb/libpython.py#L198-L208
|
|
CvvT/dumpDex
|
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
|
python/idc.py
|
python
|
GetStringType
|
(ea)
|
Get string type
@param ea: linear address
@return: One of ASCSTR_... constants
|
Get string type
|
[
"Get",
"string",
"type"
] |
def GetStringType(ea):
"""
Get string type
@param ea: linear address
@return: One of ASCSTR_... constants
"""
ti = idaapi.opinfo_t()
if idaapi.get_opinfo(ea, 0, GetFlags(ea), ti):
return ti.strtype
else:
return None
|
[
"def",
"GetStringType",
"(",
"ea",
")",
":",
"ti",
"=",
"idaapi",
".",
"opinfo_t",
"(",
")",
"if",
"idaapi",
".",
"get_opinfo",
"(",
"ea",
",",
"0",
",",
"GetFlags",
"(",
"ea",
")",
",",
"ti",
")",
":",
"return",
"ti",
".",
"strtype",
"else",
":",
"return",
"None"
] |
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idc.py#L2448-L2461
|
||
harvey1673/pyktrader
|
9e4f9211cb716786d443f3800010d901bc261610
|
agent.py
|
python
|
Agent.__init__
|
(self, name, trader, cuser, instruments, strategies = [], tday=datetime.date.today(), config = {})
|
trader为交易对象
tday为当前日,为0则为当日
|
trader为交易对象
tday为当前日,为0则为当日
|
[
"trader为交易对象",
"tday为当前日",
"为0则为当日"
] |
def __init__(self, name, trader, cuser, instruments, strategies = [], tday=datetime.date.today(), config = {}):
'''
trader为交易对象
tday为当前日,为0则为当日
'''
self.tick_id = 0
self.timer_count = 0
self.request_id = 1
folder = 'C:\\dev\\src\\ktlib\\pythonctp\\pyctp\\'
if 'folder' in config:
folder = config['folder']
daily_data_days = 60
if 'daily_data_days' in config:
daily_data_days = config['daily_data_days']
min_data_days = 5
if 'min_data_days' in config:
min_data_days = config['min_data_days']
live_trading = False
if 'live_trading' in config:
live_trading = config['live_trading']
self.logger = logging.getLogger('ctp.agent')
self.mdapis = []
self.trader = trader
self.name = name
self.folder = folder + self.name + os.path.sep
self.cuser = cuser
self.initialized = False
self.scur_day = tday
#保存分钟数据标志
self.save_flag = False #默认不保存
self.live_trading = live_trading
self.tick_db_table = 'fut_tick'
self.min_db_table = 'fut_min'
self.daily_db_table = 'fut_daily'
self.eod_flag = False
# market data
self.daily_data_days = daily_data_days
self.min_data_days = min_data_days
self.instruments = {}
self.tick_data = {}
self.day_data = {}
self.min_data = {}
self.cur_min = {}
self.cur_day = {}
self.positions= {}
self.qry_pos = {}
self.day_data_func = {}
self.min_data_func = {}
self.inst2strat = {}
self.add_instruments(instruments, self.scur_day)
self.strategies = {}
self.strat_list = []
for strat in strategies:
self.add_strategy(strat)
###交易
self.ref2order = {} #orderref==>order
self.ref2trade = {}
#self.queued_orders = [] #因为保证金原因等待发出的指令(合约、策略族、基准价、基准时间(到秒))
#当前资金/持仓
self.available = 0 #可用资金
self.locked_margin = 0
self.used_margin = 0
self.margin_cap = 1500000
self.pnl_total = 0.0
self.curr_capital = 1000000.0
self.prev_capital = 1000000.0
self.ctp_orders = []
self.eventEngine = EventEngine(1)
self.eventEngine.register(EVENT_LOG, self.log_handler)
self.eventEngine.register(EVENT_MARKETDATA, self.rtn_tick)
self.eventEngine.register(EVENT_TIMER, self.check_qry_commands)
self.eventEngine.register(EVENT_DAYSWITCH, self.day_switch)
self.cancel_protect_period = 200
self.market_order_tick_multiple = 5
self.order_stats = dict([(inst,{'submitted': 0, 'cancelled':0, 'failed': 0, 'status': True }) for inst in self.instruments])
self.total_submitted = 0
self.total_cancelled = 0
self.total_submitted_limit = 1000
self.submitted_limit_per_inst = 100
self.failed_order_limit = 200
##查询命令队列
self.qry_commands = [] #每个元素为查询命令,用于初始化时查询相关数据
self.init_init() #init中的init,用于子类的处理
#结算单
self.isSettlementInfoConfirmed = False
|
[
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"trader",
",",
"cuser",
",",
"instruments",
",",
"strategies",
"=",
"[",
"]",
",",
"tday",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
",",
"config",
"=",
"{",
"}",
")",
":",
"self",
".",
"tick_id",
"=",
"0",
"self",
".",
"timer_count",
"=",
"0",
"self",
".",
"request_id",
"=",
"1",
"folder",
"=",
"'C:\\\\dev\\\\src\\\\ktlib\\\\pythonctp\\\\pyctp\\\\'",
"if",
"'folder'",
"in",
"config",
":",
"folder",
"=",
"config",
"[",
"'folder'",
"]",
"daily_data_days",
"=",
"60",
"if",
"'daily_data_days'",
"in",
"config",
":",
"daily_data_days",
"=",
"config",
"[",
"'daily_data_days'",
"]",
"min_data_days",
"=",
"5",
"if",
"'min_data_days'",
"in",
"config",
":",
"min_data_days",
"=",
"config",
"[",
"'min_data_days'",
"]",
"live_trading",
"=",
"False",
"if",
"'live_trading'",
"in",
"config",
":",
"live_trading",
"=",
"config",
"[",
"'live_trading'",
"]",
"self",
".",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'ctp.agent'",
")",
"self",
".",
"mdapis",
"=",
"[",
"]",
"self",
".",
"trader",
"=",
"trader",
"self",
".",
"name",
"=",
"name",
"self",
".",
"folder",
"=",
"folder",
"+",
"self",
".",
"name",
"+",
"os",
".",
"path",
".",
"sep",
"self",
".",
"cuser",
"=",
"cuser",
"self",
".",
"initialized",
"=",
"False",
"self",
".",
"scur_day",
"=",
"tday",
"#保存分钟数据标志",
"self",
".",
"save_flag",
"=",
"False",
"#默认不保存",
"self",
".",
"live_trading",
"=",
"live_trading",
"self",
".",
"tick_db_table",
"=",
"'fut_tick'",
"self",
".",
"min_db_table",
"=",
"'fut_min'",
"self",
".",
"daily_db_table",
"=",
"'fut_daily'",
"self",
".",
"eod_flag",
"=",
"False",
"# market data",
"self",
".",
"daily_data_days",
"=",
"daily_data_days",
"self",
".",
"min_data_days",
"=",
"min_data_days",
"self",
".",
"instruments",
"=",
"{",
"}",
"self",
".",
"tick_data",
"=",
"{",
"}",
"self",
".",
"day_data",
"=",
"{",
"}",
"self",
".",
"min_data",
"=",
"{",
"}",
"self",
".",
"cur_min",
"=",
"{",
"}",
"self",
".",
"cur_day",
"=",
"{",
"}",
"self",
".",
"positions",
"=",
"{",
"}",
"self",
".",
"qry_pos",
"=",
"{",
"}",
"self",
".",
"day_data_func",
"=",
"{",
"}",
"self",
".",
"min_data_func",
"=",
"{",
"}",
"self",
".",
"inst2strat",
"=",
"{",
"}",
"self",
".",
"add_instruments",
"(",
"instruments",
",",
"self",
".",
"scur_day",
")",
"self",
".",
"strategies",
"=",
"{",
"}",
"self",
".",
"strat_list",
"=",
"[",
"]",
"for",
"strat",
"in",
"strategies",
":",
"self",
".",
"add_strategy",
"(",
"strat",
")",
"###交易",
"self",
".",
"ref2order",
"=",
"{",
"}",
"#orderref==>order",
"self",
".",
"ref2trade",
"=",
"{",
"}",
"#self.queued_orders = [] #因为保证金原因等待发出的指令(合约、策略族、基准价、基准时间(到秒))",
"#当前资金/持仓",
"self",
".",
"available",
"=",
"0",
"#可用资金",
"self",
".",
"locked_margin",
"=",
"0",
"self",
".",
"used_margin",
"=",
"0",
"self",
".",
"margin_cap",
"=",
"1500000",
"self",
".",
"pnl_total",
"=",
"0.0",
"self",
".",
"curr_capital",
"=",
"1000000.0",
"self",
".",
"prev_capital",
"=",
"1000000.0",
"self",
".",
"ctp_orders",
"=",
"[",
"]",
"self",
".",
"eventEngine",
"=",
"EventEngine",
"(",
"1",
")",
"self",
".",
"eventEngine",
".",
"register",
"(",
"EVENT_LOG",
",",
"self",
".",
"log_handler",
")",
"self",
".",
"eventEngine",
".",
"register",
"(",
"EVENT_MARKETDATA",
",",
"self",
".",
"rtn_tick",
")",
"self",
".",
"eventEngine",
".",
"register",
"(",
"EVENT_TIMER",
",",
"self",
".",
"check_qry_commands",
")",
"self",
".",
"eventEngine",
".",
"register",
"(",
"EVENT_DAYSWITCH",
",",
"self",
".",
"day_switch",
")",
"self",
".",
"cancel_protect_period",
"=",
"200",
"self",
".",
"market_order_tick_multiple",
"=",
"5",
"self",
".",
"order_stats",
"=",
"dict",
"(",
"[",
"(",
"inst",
",",
"{",
"'submitted'",
":",
"0",
",",
"'cancelled'",
":",
"0",
",",
"'failed'",
":",
"0",
",",
"'status'",
":",
"True",
"}",
")",
"for",
"inst",
"in",
"self",
".",
"instruments",
"]",
")",
"self",
".",
"total_submitted",
"=",
"0",
"self",
".",
"total_cancelled",
"=",
"0",
"self",
".",
"total_submitted_limit",
"=",
"1000",
"self",
".",
"submitted_limit_per_inst",
"=",
"100",
"self",
".",
"failed_order_limit",
"=",
"200",
"##查询命令队列",
"self",
".",
"qry_commands",
"=",
"[",
"]",
"#每个元素为查询命令,用于初始化时查询相关数据",
"self",
".",
"init_init",
"(",
")",
"#init中的init,用于子类的处理",
"#结算单",
"self",
".",
"isSettlementInfoConfirmed",
"=",
"False"
] |
https://github.com/harvey1673/pyktrader/blob/9e4f9211cb716786d443f3800010d901bc261610/agent.py#L651-L736
|
||
CLUEbenchmark/CLUE
|
5bd39732734afecb490cf18a5212e692dbf2c007
|
baselines/models/bert_wwm_ext/tokenization.py
|
python
|
_is_control
|
(char)
|
return False
|
Checks whether `chars` is a control character.
|
Checks whether `chars` is a control character.
|
[
"Checks",
"whether",
"chars",
"is",
"a",
"control",
"character",
"."
] |
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
|
[
"def",
"_is_control",
"(",
"char",
")",
":",
"# These are technically control characters but we count them as whitespace",
"# characters.",
"if",
"char",
"==",
"\"\\t\"",
"or",
"char",
"==",
"\"\\n\"",
"or",
"char",
"==",
"\"\\r\"",
":",
"return",
"False",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
"in",
"(",
"\"Cc\"",
",",
"\"Cf\"",
")",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/CLUEbenchmark/CLUE/blob/5bd39732734afecb490cf18a5212e692dbf2c007/baselines/models/bert_wwm_ext/tokenization.py#L374-L383
|
|
cuthbertLab/music21
|
bd30d4663e52955ed922c10fdf541419d8c67671
|
music21/romanText/translate.py
|
python
|
_copyMultipleMeasures
|
(t, p, kCurrent)
|
return measures, kCurrent
|
Given a RomanText token for a RTMeasure, a
Part used as the current container, and the current Key,
return a Measure range copied from the past of the Part.
This is used for cases such as:
m23-25 = m20-22
|
Given a RomanText token for a RTMeasure, a
Part used as the current container, and the current Key,
return a Measure range copied from the past of the Part.
|
[
"Given",
"a",
"RomanText",
"token",
"for",
"a",
"RTMeasure",
"a",
"Part",
"used",
"as",
"the",
"current",
"container",
"and",
"the",
"current",
"Key",
"return",
"a",
"Measure",
"range",
"copied",
"from",
"the",
"past",
"of",
"the",
"Part",
"."
] |
def _copyMultipleMeasures(t, p, kCurrent):
'''
Given a RomanText token for a RTMeasure, a
Part used as the current container, and the current Key,
return a Measure range copied from the past of the Part.
This is used for cases such as:
m23-25 = m20-22
'''
# the key provided needs to be the current key
# environLocal.printDebug(['calling _copyMultipleMeasures()'])
targetNumbers, unused_targetRepeat = t.getCopyTarget()
if len(targetNumbers) == 1: # pragma: no cover
# this is an encoding error
raise RomanTextTranslateException('a multiple measure range cannot copy a single measure')
# TODO: ignoring repeat letters
targetStart = targetNumbers[0]
targetEnd = targetNumbers[1]
if t.number[1] - t.number[0] != targetEnd - targetStart: # pragma: no cover
raise RomanTextTranslateException(
'both the source and destination sections need to have the same number of measures')
if t.number[0] < targetEnd: # pragma: no cover
raise RomanTextTranslateException(
'the source section cannot overlap with the destination section')
measures = []
for mPast in p.getElementsByClass('Measure'):
if mPast.number in range(targetStart, targetEnd + 1):
try:
m = copy.deepcopy(mPast)
except TypeError: # pragma: no cover
raise RomanTextTranslateException(
'Failed to copy measure {0} to measure range {1}-{2}: '.format(
mPast.number, targetStart, targetEnd)
+ 'did you perhaps parse an RTOpus object with romanTextToStreamScore '
+ 'instead of romanTextToStreamOpus?')
m.number = t.number[0] + mPast.number - targetStart
measures.append(m)
# update all keys
allRNs = list(m.getElementsByClass('RomanNumeral'))
for rnPast in allRNs:
if kCurrent is None: # pragma: no cover
# should not happen
raise RomanTextTranslateException(
'attempting to copy a measure but no past key definitions are found')
if rnPast.editorial.get('followsKeyChange'):
kCurrent = rnPast.key
elif rnPast.pivotChord is not None:
kCurrent = rnPast.pivotChord.key
else:
rnPast.key = kCurrent
if rnPast.secondaryRomanNumeral is not None:
newRN = roman.RomanNumeral(rnPast.figure, kCurrent)
newRN.duration = copy.deepcopy(rnPast.duration)
newRN.lyrics = copy.deepcopy(rnPast.lyrics)
m.replace(rnPast, newRN)
if mPast.number == targetEnd:
break
return measures, kCurrent
|
[
"def",
"_copyMultipleMeasures",
"(",
"t",
",",
"p",
",",
"kCurrent",
")",
":",
"# the key provided needs to be the current key",
"# environLocal.printDebug(['calling _copyMultipleMeasures()'])",
"targetNumbers",
",",
"unused_targetRepeat",
"=",
"t",
".",
"getCopyTarget",
"(",
")",
"if",
"len",
"(",
"targetNumbers",
")",
"==",
"1",
":",
"# pragma: no cover",
"# this is an encoding error",
"raise",
"RomanTextTranslateException",
"(",
"'a multiple measure range cannot copy a single measure'",
")",
"# TODO: ignoring repeat letters",
"targetStart",
"=",
"targetNumbers",
"[",
"0",
"]",
"targetEnd",
"=",
"targetNumbers",
"[",
"1",
"]",
"if",
"t",
".",
"number",
"[",
"1",
"]",
"-",
"t",
".",
"number",
"[",
"0",
"]",
"!=",
"targetEnd",
"-",
"targetStart",
":",
"# pragma: no cover",
"raise",
"RomanTextTranslateException",
"(",
"'both the source and destination sections need to have the same number of measures'",
")",
"if",
"t",
".",
"number",
"[",
"0",
"]",
"<",
"targetEnd",
":",
"# pragma: no cover",
"raise",
"RomanTextTranslateException",
"(",
"'the source section cannot overlap with the destination section'",
")",
"measures",
"=",
"[",
"]",
"for",
"mPast",
"in",
"p",
".",
"getElementsByClass",
"(",
"'Measure'",
")",
":",
"if",
"mPast",
".",
"number",
"in",
"range",
"(",
"targetStart",
",",
"targetEnd",
"+",
"1",
")",
":",
"try",
":",
"m",
"=",
"copy",
".",
"deepcopy",
"(",
"mPast",
")",
"except",
"TypeError",
":",
"# pragma: no cover",
"raise",
"RomanTextTranslateException",
"(",
"'Failed to copy measure {0} to measure range {1}-{2}: '",
".",
"format",
"(",
"mPast",
".",
"number",
",",
"targetStart",
",",
"targetEnd",
")",
"+",
"'did you perhaps parse an RTOpus object with romanTextToStreamScore '",
"+",
"'instead of romanTextToStreamOpus?'",
")",
"m",
".",
"number",
"=",
"t",
".",
"number",
"[",
"0",
"]",
"+",
"mPast",
".",
"number",
"-",
"targetStart",
"measures",
".",
"append",
"(",
"m",
")",
"# update all keys",
"allRNs",
"=",
"list",
"(",
"m",
".",
"getElementsByClass",
"(",
"'RomanNumeral'",
")",
")",
"for",
"rnPast",
"in",
"allRNs",
":",
"if",
"kCurrent",
"is",
"None",
":",
"# pragma: no cover",
"# should not happen",
"raise",
"RomanTextTranslateException",
"(",
"'attempting to copy a measure but no past key definitions are found'",
")",
"if",
"rnPast",
".",
"editorial",
".",
"get",
"(",
"'followsKeyChange'",
")",
":",
"kCurrent",
"=",
"rnPast",
".",
"key",
"elif",
"rnPast",
".",
"pivotChord",
"is",
"not",
"None",
":",
"kCurrent",
"=",
"rnPast",
".",
"pivotChord",
".",
"key",
"else",
":",
"rnPast",
".",
"key",
"=",
"kCurrent",
"if",
"rnPast",
".",
"secondaryRomanNumeral",
"is",
"not",
"None",
":",
"newRN",
"=",
"roman",
".",
"RomanNumeral",
"(",
"rnPast",
".",
"figure",
",",
"kCurrent",
")",
"newRN",
".",
"duration",
"=",
"copy",
".",
"deepcopy",
"(",
"rnPast",
".",
"duration",
")",
"newRN",
".",
"lyrics",
"=",
"copy",
".",
"deepcopy",
"(",
"rnPast",
".",
"lyrics",
")",
"m",
".",
"replace",
"(",
"rnPast",
",",
"newRN",
")",
"if",
"mPast",
".",
"number",
"==",
"targetEnd",
":",
"break",
"return",
"measures",
",",
"kCurrent"
] |
https://github.com/cuthbertLab/music21/blob/bd30d4663e52955ed922c10fdf541419d8c67671/music21/romanText/translate.py#L224-L286
|
|
DataBiosphere/toil
|
2e148eee2114ece8dcc3ec8a83f36333266ece0d
|
src/toil/deferred.py
|
python
|
DeferredFunctionManager._runAllDeferredFunctions
|
(self, fileObj)
|
Read and run deferred functions until EOF from the given open file.
|
Read and run deferred functions until EOF from the given open file.
|
[
"Read",
"and",
"run",
"deferred",
"functions",
"until",
"EOF",
"from",
"the",
"given",
"open",
"file",
"."
] |
def _runAllDeferredFunctions(self, fileObj):
"""
Read and run deferred functions until EOF from the given open file.
"""
try:
while True:
# Load each function
deferredFunction = dill.load(fileObj)
logger.debug("Loaded deferred function %s" % repr(deferredFunction))
# Run it
self._runDeferredFunction(deferredFunction)
except EOFError as e:
# This is expected and means we read all the complete entries.
logger.debug("Out of deferred functions!")
|
[
"def",
"_runAllDeferredFunctions",
"(",
"self",
",",
"fileObj",
")",
":",
"try",
":",
"while",
"True",
":",
"# Load each function",
"deferredFunction",
"=",
"dill",
".",
"load",
"(",
"fileObj",
")",
"logger",
".",
"debug",
"(",
"\"Loaded deferred function %s\"",
"%",
"repr",
"(",
"deferredFunction",
")",
")",
"# Run it",
"self",
".",
"_runDeferredFunction",
"(",
"deferredFunction",
")",
"except",
"EOFError",
"as",
"e",
":",
"# This is expected and means we read all the complete entries.",
"logger",
".",
"debug",
"(",
"\"Out of deferred functions!\"",
")"
] |
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/deferred.py#L235-L249
|
||
kerlomz/captcha_platform
|
f7d719bd1239a987996e266bd7fe35c96003b378
|
sdk/onnx/sdk.py
|
python
|
Pretreatment.preprocessing_by_func
|
(exec_map, key, src_arr)
|
return cv2.cvtColor(target_arr, cv2.COLOR_BGR2RGB)
|
[] |
def preprocessing_by_func(exec_map, key, src_arr):
if not exec_map:
return src_arr
target_arr = cv2.cvtColor(src_arr, cv2.COLOR_RGB2BGR)
for sentence in exec_map.get(key):
if sentence.startswith("@@"):
target_arr = eval(sentence[2:])
elif sentence.startswith("$$"):
exec(sentence[2:])
return cv2.cvtColor(target_arr, cv2.COLOR_BGR2RGB)
|
[
"def",
"preprocessing_by_func",
"(",
"exec_map",
",",
"key",
",",
"src_arr",
")",
":",
"if",
"not",
"exec_map",
":",
"return",
"src_arr",
"target_arr",
"=",
"cv2",
".",
"cvtColor",
"(",
"src_arr",
",",
"cv2",
".",
"COLOR_RGB2BGR",
")",
"for",
"sentence",
"in",
"exec_map",
".",
"get",
"(",
"key",
")",
":",
"if",
"sentence",
".",
"startswith",
"(",
"\"@@\"",
")",
":",
"target_arr",
"=",
"eval",
"(",
"sentence",
"[",
"2",
":",
"]",
")",
"elif",
"sentence",
".",
"startswith",
"(",
"\"$$\"",
")",
":",
"exec",
"(",
"sentence",
"[",
"2",
":",
"]",
")",
"return",
"cv2",
".",
"cvtColor",
"(",
"target_arr",
",",
"cv2",
".",
"COLOR_BGR2RGB",
")"
] |
https://github.com/kerlomz/captcha_platform/blob/f7d719bd1239a987996e266bd7fe35c96003b378/sdk/onnx/sdk.py#L506-L515
|
|||
triaquae/triaquae
|
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
|
TriAquae/models/Centos_5.9/paramiko/channel.py
|
python
|
Channel._request_success
|
(self, m)
|
return
|
[] |
def _request_success(self, m):
self._log(DEBUG, 'Sesch channel %d request ok' % self.chanid)
self.event_ready = True
self.event.set()
return
|
[
"def",
"_request_success",
"(",
"self",
",",
"m",
")",
":",
"self",
".",
"_log",
"(",
"DEBUG",
",",
"'Sesch channel %d request ok'",
"%",
"self",
".",
"chanid",
")",
"self",
".",
"event_ready",
"=",
"True",
"self",
".",
"event",
".",
"set",
"(",
")",
"return"
] |
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Centos_5.9/paramiko/channel.py#L955-L959
|
|||
progrium/duplex
|
d076a972fc21e19dff1f0a619e29dc6c5b25cc1f
|
python/duplex/sync.py
|
python
|
Channel.close
|
(self)
|
[] |
def close(self):
self.peer.close()
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"peer",
".",
"close",
"(",
")"
] |
https://github.com/progrium/duplex/blob/d076a972fc21e19dff1f0a619e29dc6c5b25cc1f/python/duplex/sync.py#L26-L27
|
||||
microsoft/ptvsd
|
99c8513921021d2cc7cd82e132b65c644c256768
|
src/ptvsd/_vendored/pydevd/_pydev_bundle/_pydev_completer.py
|
python
|
extract_token_and_qualifier
|
(text, line=0, column=0)
|
return TokenAndQualifier(token, qualifier)
|
Extracts the token a qualifier from the text given the line/colum
(see test_extract_token_and_qualifier for examples).
:param unicode text:
:param int line: 0-based
:param int column: 0-based
|
Extracts the token a qualifier from the text given the line/colum
(see test_extract_token_and_qualifier for examples).
|
[
"Extracts",
"the",
"token",
"a",
"qualifier",
"from",
"the",
"text",
"given",
"the",
"line",
"/",
"colum",
"(",
"see",
"test_extract_token_and_qualifier",
"for",
"examples",
")",
"."
] |
def extract_token_and_qualifier(text, line=0, column=0):
'''
Extracts the token a qualifier from the text given the line/colum
(see test_extract_token_and_qualifier for examples).
:param unicode text:
:param int line: 0-based
:param int column: 0-based
'''
# Note: not using the tokenize module because text should be unicode and
# line/column refer to the unicode text (otherwise we'd have to know
# those ranges after converted to bytes).
if line < 0:
line = 0
if column < 0:
column = 0
if isinstance(text, bytes):
text = text.decode('utf-8')
lines = text.splitlines()
try:
text = lines[line]
except IndexError:
return TokenAndQualifier(u'', u'')
if column >= len(text):
column = len(text)
text = text[:column]
token = u''
qualifier = u''
temp_token = []
for i in range(column - 1, -1, -1):
c = text[i]
if c in identifier_part or isidentifier(c) or c == u'.':
temp_token.append(c)
else:
break
temp_token = u''.join(reversed(temp_token))
if u'.' in temp_token:
temp_token = temp_token.split(u'.')
token = u'.'.join(temp_token[:-1])
qualifier = temp_token[-1]
else:
qualifier = temp_token
return TokenAndQualifier(token, qualifier)
|
[
"def",
"extract_token_and_qualifier",
"(",
"text",
",",
"line",
"=",
"0",
",",
"column",
"=",
"0",
")",
":",
"# Note: not using the tokenize module because text should be unicode and",
"# line/column refer to the unicode text (otherwise we'd have to know",
"# those ranges after converted to bytes).",
"if",
"line",
"<",
"0",
":",
"line",
"=",
"0",
"if",
"column",
"<",
"0",
":",
"column",
"=",
"0",
"if",
"isinstance",
"(",
"text",
",",
"bytes",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"lines",
"=",
"text",
".",
"splitlines",
"(",
")",
"try",
":",
"text",
"=",
"lines",
"[",
"line",
"]",
"except",
"IndexError",
":",
"return",
"TokenAndQualifier",
"(",
"u''",
",",
"u''",
")",
"if",
"column",
">=",
"len",
"(",
"text",
")",
":",
"column",
"=",
"len",
"(",
"text",
")",
"text",
"=",
"text",
"[",
":",
"column",
"]",
"token",
"=",
"u''",
"qualifier",
"=",
"u''",
"temp_token",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"column",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"c",
"=",
"text",
"[",
"i",
"]",
"if",
"c",
"in",
"identifier_part",
"or",
"isidentifier",
"(",
"c",
")",
"or",
"c",
"==",
"u'.'",
":",
"temp_token",
".",
"append",
"(",
"c",
")",
"else",
":",
"break",
"temp_token",
"=",
"u''",
".",
"join",
"(",
"reversed",
"(",
"temp_token",
")",
")",
"if",
"u'.'",
"in",
"temp_token",
":",
"temp_token",
"=",
"temp_token",
".",
"split",
"(",
"u'.'",
")",
"token",
"=",
"u'.'",
".",
"join",
"(",
"temp_token",
"[",
":",
"-",
"1",
"]",
")",
"qualifier",
"=",
"temp_token",
"[",
"-",
"1",
"]",
"else",
":",
"qualifier",
"=",
"temp_token",
"return",
"TokenAndQualifier",
"(",
"token",
",",
"qualifier",
")"
] |
https://github.com/microsoft/ptvsd/blob/99c8513921021d2cc7cd82e132b65c644c256768/src/ptvsd/_vendored/pydevd/_pydev_bundle/_pydev_completer.py#L243-L291
|
|
wistbean/learn_python3_spider
|
73c873f4845f4385f097e5057407d03dd37a117b
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/internet/interfaces.py
|
python
|
IResolver.query
|
(query, timeout=None)
|
Dispatch C{query} to the method which can handle its type.
@type query: L{twisted.names.dns.Query}
@param query: The DNS query being issued, to which a response is to be
generated.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
|
Dispatch C{query} to the method which can handle its type.
|
[
"Dispatch",
"C",
"{",
"query",
"}",
"to",
"the",
"method",
"which",
"can",
"handle",
"its",
"type",
"."
] |
def query(query, timeout=None):
"""
Dispatch C{query} to the method which can handle its type.
@type query: L{twisted.names.dns.Query}
@param query: The DNS query being issued, to which a response is to be
generated.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
|
[
"def",
"query",
"(",
"query",
",",
"timeout",
"=",
"None",
")",
":"
] |
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/twisted/internet/interfaces.py#L192-L212
|
||
cloudmatrix/esky
|
6fde3201f0335064931a6c7f7847fc5ad39001b4
|
esky/sudo/__init__.py
|
python
|
_get_sudo_argtypes
|
(obj,methname)
|
return None
|
Get the argtypes list for the given method.
This searches the base classes of obj if the given method is not declared
allowed_from_sudo, so that people don't have to constantly re-apply the
decorator.
|
Get the argtypes list for the given method.
|
[
"Get",
"the",
"argtypes",
"list",
"for",
"the",
"given",
"method",
"."
] |
def _get_sudo_argtypes(obj,methname):
"""Get the argtypes list for the given method.
This searches the base classes of obj if the given method is not declared
allowed_from_sudo, so that people don't have to constantly re-apply the
decorator.
"""
for base in _get_mro(obj):
try:
argtypes = base.__dict__[methname]._esky_sudo_argtypes
except (KeyError,AttributeError):
pass
else:
return argtypes
return None
|
[
"def",
"_get_sudo_argtypes",
"(",
"obj",
",",
"methname",
")",
":",
"for",
"base",
"in",
"_get_mro",
"(",
"obj",
")",
":",
"try",
":",
"argtypes",
"=",
"base",
".",
"__dict__",
"[",
"methname",
"]",
".",
"_esky_sudo_argtypes",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"pass",
"else",
":",
"return",
"argtypes",
"return",
"None"
] |
https://github.com/cloudmatrix/esky/blob/6fde3201f0335064931a6c7f7847fc5ad39001b4/esky/sudo/__init__.py#L279-L293
|
|
readthedocs/sphinx-autoapi
|
71c6ceebe0b02c34027fcd3d56c8641e9b94c7af
|
autoapi/directives.py
|
python
|
NestedParse.run
|
(self)
|
return node.children
|
[] |
def run(self):
node = nodes.container()
node.document = self.state.document
nested_parse_with_titles(self.state, self.content, node)
try:
title_node = node[0][0]
if isinstance(title_node, nodes.title):
del node[0][0]
except IndexError:
pass
return node.children
|
[
"def",
"run",
"(",
"self",
")",
":",
"node",
"=",
"nodes",
".",
"container",
"(",
")",
"node",
".",
"document",
"=",
"self",
".",
"state",
".",
"document",
"nested_parse_with_titles",
"(",
"self",
".",
"state",
",",
"self",
".",
"content",
",",
"node",
")",
"try",
":",
"title_node",
"=",
"node",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"title_node",
",",
"nodes",
".",
"title",
")",
":",
"del",
"node",
"[",
"0",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"pass",
"return",
"node",
".",
"children"
] |
https://github.com/readthedocs/sphinx-autoapi/blob/71c6ceebe0b02c34027fcd3d56c8641e9b94c7af/autoapi/directives.py#L53-L63
|
|||
kwotsin/mimicry
|
70ce919b0684b14af264881cc6acf4eccaff42b2
|
torch_mimicry/metrics/fid/fid_utils.py
|
python
|
calculate_activation_statistics
|
(images, sess, batch_size=50, verbose=True)
|
return mu, sigma
|
Calculation of the statistics used by the FID.
Args:
images (ndarray): Numpy array of shape (N, H, W, 3) and values in
the range [0, 255].
sess (Session): TensorFlow session object.
batch_size (int): Batch size for inference.
verbose (bool): If True, prints out logging information.
Returns:
ndarray: Mean of inception features from samples.
ndarray: Covariance of inception features from samples.
|
Calculation of the statistics used by the FID.
|
[
"Calculation",
"of",
"the",
"statistics",
"used",
"by",
"the",
"FID",
"."
] |
def calculate_activation_statistics(images, sess, batch_size=50, verbose=True):
"""
Calculation of the statistics used by the FID.
Args:
images (ndarray): Numpy array of shape (N, H, W, 3) and values in
the range [0, 255].
sess (Session): TensorFlow session object.
batch_size (int): Batch size for inference.
verbose (bool): If True, prints out logging information.
Returns:
ndarray: Mean of inception features from samples.
ndarray: Covariance of inception features from samples.
"""
act = inception_utils.get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
|
[
"def",
"calculate_activation_statistics",
"(",
"images",
",",
"sess",
",",
"batch_size",
"=",
"50",
",",
"verbose",
"=",
"True",
")",
":",
"act",
"=",
"inception_utils",
".",
"get_activations",
"(",
"images",
",",
"sess",
",",
"batch_size",
",",
"verbose",
")",
"mu",
"=",
"np",
".",
"mean",
"(",
"act",
",",
"axis",
"=",
"0",
")",
"sigma",
"=",
"np",
".",
"cov",
"(",
"act",
",",
"rowvar",
"=",
"False",
")",
"return",
"mu",
",",
"sigma"
] |
https://github.com/kwotsin/mimicry/blob/70ce919b0684b14af264881cc6acf4eccaff42b2/torch_mimicry/metrics/fid/fid_utils.py#L70-L89
|
|
lucadelu/pyModis
|
de86ccf28fffcb759d18b4b5b5a601304ec4fd14
|
pymodis/convertmodis.py
|
python
|
processModis.__init__
|
(self, hdfname, confile, mrtpath)
|
Function to initialize the object
|
Function to initialize the object
|
[
"Function",
"to",
"initialize",
"the",
"object"
] |
def __init__(self, hdfname, confile, mrtpath):
"""Function to initialize the object"""
# check if the hdf file exists
if os.path.exists(hdfname):
self.name = hdfname
else:
raise Exception('%s does not exist' % hdfname)
# check if confile exists
if os.path.exists(confile):
self.conf = confile
else:
raise Exception('%s does not exist' % confile)
# check if mrtpath and subdirectories exists and set environment
# variables
self.mrtpathbin, self.mrtpathdata = checkMRTpath(mrtpath)
|
[
"def",
"__init__",
"(",
"self",
",",
"hdfname",
",",
"confile",
",",
"mrtpath",
")",
":",
"# check if the hdf file exists",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"hdfname",
")",
":",
"self",
".",
"name",
"=",
"hdfname",
"else",
":",
"raise",
"Exception",
"(",
"'%s does not exist'",
"%",
"hdfname",
")",
"# check if confile exists",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"confile",
")",
":",
"self",
".",
"conf",
"=",
"confile",
"else",
":",
"raise",
"Exception",
"(",
"'%s does not exist'",
"%",
"confile",
")",
"# check if mrtpath and subdirectories exists and set environment",
"# variables",
"self",
".",
"mrtpathbin",
",",
"self",
".",
"mrtpathdata",
"=",
"checkMRTpath",
"(",
"mrtpath",
")"
] |
https://github.com/lucadelu/pyModis/blob/de86ccf28fffcb759d18b4b5b5a601304ec4fd14/pymodis/convertmodis.py#L210-L224
|
||
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/site-packages/pip-7.1.2-py3.3.egg/pip/_vendor/distlib/_backport/shutil.py
|
python
|
_check_unpack_options
|
(extensions, function, extra_args)
|
Checks what gets registered as an unpacker.
|
Checks what gets registered as an unpacker.
|
[
"Checks",
"what",
"gets",
"registered",
"as",
"an",
"unpacker",
"."
] |
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
|
[
"def",
"_check_unpack_options",
"(",
"extensions",
",",
"function",
",",
"extra_args",
")",
":",
"# first make sure no other unpacker is registered for this extension",
"existing_extensions",
"=",
"{",
"}",
"for",
"name",
",",
"info",
"in",
"_UNPACK_FORMATS",
".",
"items",
"(",
")",
":",
"for",
"ext",
"in",
"info",
"[",
"0",
"]",
":",
"existing_extensions",
"[",
"ext",
"]",
"=",
"name",
"for",
"extension",
"in",
"extensions",
":",
"if",
"extension",
"in",
"existing_extensions",
":",
"msg",
"=",
"'%s is already registered for \"%s\"'",
"raise",
"RegistryError",
"(",
"msg",
"%",
"(",
"extension",
",",
"existing_extensions",
"[",
"extension",
"]",
")",
")",
"if",
"not",
"isinstance",
"(",
"function",
",",
"collections",
".",
"Callable",
")",
":",
"raise",
"TypeError",
"(",
"'The registered function must be a callable'",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pip-7.1.2-py3.3.egg/pip/_vendor/distlib/_backport/shutil.py#L610-L625
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/wagtail/wagtailsearch/backends/elasticsearch.py
|
python
|
ElasticsearchIndexRebuilder.reset_index
|
(self)
|
[] |
def reset_index(self):
self.index.reset()
|
[
"def",
"reset_index",
"(",
"self",
")",
":",
"self",
".",
"index",
".",
"reset",
"(",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/wagtail/wagtailsearch/backends/elasticsearch.py#L615-L616
|
||||
mesalock-linux/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
rpython/jit/metainterp/optimizeopt/virtualstate.py
|
python
|
AbstractVirtualStateInfo.generate_guards
|
(self, other, op, runtime_op, state)
|
generate guards (output in the list extra_guards) that make runtime
values of the shape other match the shape of self. if that's not
possible, VirtualStatesCantMatch is thrown and bad gets keys set which
parts of the state are the problem.
the function can peek into the information about the op, as well
as runtime value (passed in runtime_op)
as a guiding heuristic whether making such guards makes
sense. if None is passed in for op, no guard is ever generated, and
this function degenerates to a generalization check.
|
generate guards (output in the list extra_guards) that make runtime
values of the shape other match the shape of self. if that's not
possible, VirtualStatesCantMatch is thrown and bad gets keys set which
parts of the state are the problem.
|
[
"generate",
"guards",
"(",
"output",
"in",
"the",
"list",
"extra_guards",
")",
"that",
"make",
"runtime",
"values",
"of",
"the",
"shape",
"other",
"match",
"the",
"shape",
"of",
"self",
".",
"if",
"that",
"s",
"not",
"possible",
"VirtualStatesCantMatch",
"is",
"thrown",
"and",
"bad",
"gets",
"keys",
"set",
"which",
"parts",
"of",
"the",
"state",
"are",
"the",
"problem",
"."
] |
def generate_guards(self, other, op, runtime_op, state):
""" generate guards (output in the list extra_guards) that make runtime
values of the shape other match the shape of self. if that's not
possible, VirtualStatesCantMatch is thrown and bad gets keys set which
parts of the state are the problem.
the function can peek into the information about the op, as well
as runtime value (passed in runtime_op)
as a guiding heuristic whether making such guards makes
sense. if None is passed in for op, no guard is ever generated, and
this function degenerates to a generalization check."""
assert self.position != -1
if self.position in state.renum:
if state.renum[self.position] != other.position:
state.bad[self] = state.bad[other] = None
raise VirtualStatesCantMatch(
'The numbering of the virtual states does not ' +
'match. This means that two virtual fields ' +
'have been set to the same Box in one of the ' +
'virtual states but not in the other.',
state)
else:
state.renum[self.position] = other.position
try:
self._generate_guards(other, op, runtime_op, state)
except VirtualStatesCantMatch as e:
state.bad[self] = state.bad[other] = None
if e.state is None:
e.state = state
raise e
|
[
"def",
"generate_guards",
"(",
"self",
",",
"other",
",",
"op",
",",
"runtime_op",
",",
"state",
")",
":",
"assert",
"self",
".",
"position",
"!=",
"-",
"1",
"if",
"self",
".",
"position",
"in",
"state",
".",
"renum",
":",
"if",
"state",
".",
"renum",
"[",
"self",
".",
"position",
"]",
"!=",
"other",
".",
"position",
":",
"state",
".",
"bad",
"[",
"self",
"]",
"=",
"state",
".",
"bad",
"[",
"other",
"]",
"=",
"None",
"raise",
"VirtualStatesCantMatch",
"(",
"'The numbering of the virtual states does not '",
"+",
"'match. This means that two virtual fields '",
"+",
"'have been set to the same Box in one of the '",
"+",
"'virtual states but not in the other.'",
",",
"state",
")",
"else",
":",
"state",
".",
"renum",
"[",
"self",
".",
"position",
"]",
"=",
"other",
".",
"position",
"try",
":",
"self",
".",
"_generate_guards",
"(",
"other",
",",
"op",
",",
"runtime_op",
",",
"state",
")",
"except",
"VirtualStatesCantMatch",
"as",
"e",
":",
"state",
".",
"bad",
"[",
"self",
"]",
"=",
"state",
".",
"bad",
"[",
"other",
"]",
"=",
"None",
"if",
"e",
".",
"state",
"is",
"None",
":",
"e",
".",
"state",
"=",
"state",
"raise",
"e"
] |
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/rpython/jit/metainterp/optimizeopt/virtualstate.py#L70-L99
|
||
stratosphereips/Manati
|
20e55d49edf00f8503807c62397d02a0dad9ddff
|
manati/api_manager/core/modules_manager.py
|
python
|
ModulesManager.get_filtered_weblogs_json
|
(**kwargs)
|
return json.dumps(weblogs_json)
|
[] |
def get_filtered_weblogs_json(**kwargs):
weblogs_qs = Weblog.objects.filter(Q(**kwargs))
weblogs_json = WeblogSerializer(weblogs_qs, many=True).data
return json.dumps(weblogs_json)
|
[
"def",
"get_filtered_weblogs_json",
"(",
"*",
"*",
"kwargs",
")",
":",
"weblogs_qs",
"=",
"Weblog",
".",
"objects",
".",
"filter",
"(",
"Q",
"(",
"*",
"*",
"kwargs",
")",
")",
"weblogs_json",
"=",
"WeblogSerializer",
"(",
"weblogs_qs",
",",
"many",
"=",
"True",
")",
".",
"data",
"return",
"json",
".",
"dumps",
"(",
"weblogs_json",
")"
] |
https://github.com/stratosphereips/Manati/blob/20e55d49edf00f8503807c62397d02a0dad9ddff/manati/api_manager/core/modules_manager.py#L150-L153
|
|||
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/click/utils.py
|
python
|
get_binary_stream
|
(name)
|
return opener()
|
Returns a system stream for byte processing. This essentially
returns the stream from the sys module with the given name but it
solves some compatibility issues between different Python versions.
Primarily this function is necessary for getting binary streams on
Python 3.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
|
Returns a system stream for byte processing. This essentially
returns the stream from the sys module with the given name but it
solves some compatibility issues between different Python versions.
Primarily this function is necessary for getting binary streams on
Python 3.
|
[
"Returns",
"a",
"system",
"stream",
"for",
"byte",
"processing",
".",
"This",
"essentially",
"returns",
"the",
"stream",
"from",
"the",
"sys",
"module",
"with",
"the",
"given",
"name",
"but",
"it",
"solves",
"some",
"compatibility",
"issues",
"between",
"different",
"Python",
"versions",
".",
"Primarily",
"this",
"function",
"is",
"necessary",
"for",
"getting",
"binary",
"streams",
"on",
"Python",
"3",
"."
] |
def get_binary_stream(name):
"""Returns a system stream for byte processing. This essentially
returns the stream from the sys module with the given name but it
solves some compatibility issues between different Python versions.
Primarily this function is necessary for getting binary streams on
Python 3.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError('Unknown standard stream %r' % name)
return opener()
|
[
"def",
"get_binary_stream",
"(",
"name",
")",
":",
"opener",
"=",
"binary_streams",
".",
"get",
"(",
"name",
")",
"if",
"opener",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'Unknown standard stream %r'",
"%",
"name",
")",
"return",
"opener",
"(",
")"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/click/utils.py#L263-L276
|
|
enthought/traitsui
|
b7c38c7a47bf6ae7971f9ddab70c8a358647dd25
|
traitsui/qt4/list_editor.py
|
python
|
NotebookEditor.update_page_name
|
(self, object, name, old, new)
|
Handles the trait defining a particular page's name being changed.
|
Handles the trait defining a particular page's name being changed.
|
[
"Handles",
"the",
"trait",
"defining",
"a",
"particular",
"page",
"s",
"name",
"being",
"changed",
"."
] |
def update_page_name(self, object, name, old, new):
"""Handles the trait defining a particular page's name being changed."""
for i, value in enumerate(self._uis):
page, ui, _, _ = value
if object is ui.info.object:
name = None
handler = getattr(
self.ui.handler,
"%s_%s_page_name" % (self.object_name, self.name),
None,
)
if handler is not None:
name = handler(self.ui.info, object)
if name is None:
name = str(
xgetattr(object, self.factory.page_name[1:], "???")
)
self.control.setTabText(self.control.indexOf(page), name)
break
|
[
"def",
"update_page_name",
"(",
"self",
",",
"object",
",",
"name",
",",
"old",
",",
"new",
")",
":",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"self",
".",
"_uis",
")",
":",
"page",
",",
"ui",
",",
"_",
",",
"_",
"=",
"value",
"if",
"object",
"is",
"ui",
".",
"info",
".",
"object",
":",
"name",
"=",
"None",
"handler",
"=",
"getattr",
"(",
"self",
".",
"ui",
".",
"handler",
",",
"\"%s_%s_page_name\"",
"%",
"(",
"self",
".",
"object_name",
",",
"self",
".",
"name",
")",
",",
"None",
",",
")",
"if",
"handler",
"is",
"not",
"None",
":",
"name",
"=",
"handler",
"(",
"self",
".",
"ui",
".",
"info",
",",
"object",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"str",
"(",
"xgetattr",
"(",
"object",
",",
"self",
".",
"factory",
".",
"page_name",
"[",
"1",
":",
"]",
",",
"\"???\"",
")",
")",
"self",
".",
"control",
".",
"setTabText",
"(",
"self",
".",
"control",
".",
"indexOf",
"(",
"page",
")",
",",
"name",
")",
"break"
] |
https://github.com/enthought/traitsui/blob/b7c38c7a47bf6ae7971f9ddab70c8a358647dd25/traitsui/qt4/list_editor.py#L626-L646
|
||
DxCx/plugin.video.9anime
|
34358c2f701e5ddf19d3276926374a16f63f7b6a
|
resources/lib/ui/js2py/legecy_translators/nparser.py
|
python
|
parseExpression
|
()
|
return delegate.markEndIf(expr)
|
[] |
def parseExpression():
expr = None
delegate.markStart()
expr = parseAssignmentExpression()
if match(","):
expr = delegate.createSequenceExpression([expr])
while index < length:
if not match(","):
break
lex()
expr.expressions.append(parseAssignmentExpression())
return delegate.markEndIf(expr)
|
[
"def",
"parseExpression",
"(",
")",
":",
"expr",
"=",
"None",
"delegate",
".",
"markStart",
"(",
")",
"expr",
"=",
"parseAssignmentExpression",
"(",
")",
"if",
"match",
"(",
"\",\"",
")",
":",
"expr",
"=",
"delegate",
".",
"createSequenceExpression",
"(",
"[",
"expr",
"]",
")",
"while",
"index",
"<",
"length",
":",
"if",
"not",
"match",
"(",
"\",\"",
")",
":",
"break",
"lex",
"(",
")",
"expr",
".",
"expressions",
".",
"append",
"(",
"parseAssignmentExpression",
"(",
")",
")",
"return",
"delegate",
".",
"markEndIf",
"(",
"expr",
")"
] |
https://github.com/DxCx/plugin.video.9anime/blob/34358c2f701e5ddf19d3276926374a16f63f7b6a/resources/lib/ui/js2py/legecy_translators/nparser.py#L1937-L1948
|
|||
facebookresearch/mmf
|
fb6fe390287e1da12c3bd28d4ab43c5f7dcdfc9f
|
mmf/utils/logger.py
|
python
|
_find_caller
|
()
|
Returns:
str: module name of the caller
tuple: a hashable key to be used to identify different callers
|
Returns:
str: module name of the caller
tuple: a hashable key to be used to identify different callers
|
[
"Returns",
":",
"str",
":",
"module",
"name",
"of",
"the",
"caller",
"tuple",
":",
"a",
"hashable",
"key",
"to",
"be",
"used",
"to",
"identify",
"different",
"callers"
] |
def _find_caller():
"""
Returns:
str: module name of the caller
tuple: a hashable key to be used to identify different callers
"""
frame = sys._getframe(2)
while frame:
code = frame.f_code
if os.path.join("utils", "logger.") not in code.co_filename:
mod_name = frame.f_globals["__name__"]
if mod_name == "__main__":
mod_name = "mmf"
return mod_name, (code.co_filename, frame.f_lineno, code.co_name)
frame = frame.f_back
|
[
"def",
"_find_caller",
"(",
")",
":",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"2",
")",
"while",
"frame",
":",
"code",
"=",
"frame",
".",
"f_code",
"if",
"os",
".",
"path",
".",
"join",
"(",
"\"utils\"",
",",
"\"logger.\"",
")",
"not",
"in",
"code",
".",
"co_filename",
":",
"mod_name",
"=",
"frame",
".",
"f_globals",
"[",
"\"__name__\"",
"]",
"if",
"mod_name",
"==",
"\"__main__\"",
":",
"mod_name",
"=",
"\"mmf\"",
"return",
"mod_name",
",",
"(",
"code",
".",
"co_filename",
",",
"frame",
".",
"f_lineno",
",",
"code",
".",
"co_name",
")",
"frame",
"=",
"frame",
".",
"f_back"
] |
https://github.com/facebookresearch/mmf/blob/fb6fe390287e1da12c3bd28d4ab43c5f7dcdfc9f/mmf/utils/logger.py#L196-L210
|
||
yt-project/yt
|
dc7b24f9b266703db4c843e329c6c8644d47b824
|
yt/frontends/owls/data_structures.py
|
python
|
OWLSDataset._set_code_unit_attributes
|
(self)
|
[] |
def _set_code_unit_attributes(self):
self._set_owls_eagle_units()
|
[
"def",
"_set_code_unit_attributes",
"(",
"self",
")",
":",
"self",
".",
"_set_owls_eagle_units",
"(",
")"
] |
https://github.com/yt-project/yt/blob/dc7b24f9b266703db4c843e329c6c8644d47b824/yt/frontends/owls/data_structures.py#L30-L31
|
||||
Dentosal/python-sc2
|
e816cce83772d1aee1291b86b300b69405aa96b4
|
sc2/bot_ai.py
|
python
|
BotAI.in_pathing_grid
|
(self, pos: Union[Point2, Point3, Unit])
|
return self._game_info.pathing_grid[pos] == 0
|
Returns True if a unit can pass through a grid point.
|
Returns True if a unit can pass through a grid point.
|
[
"Returns",
"True",
"if",
"a",
"unit",
"can",
"pass",
"through",
"a",
"grid",
"point",
"."
] |
def in_pathing_grid(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if a unit can pass through a grid point. """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self._game_info.pathing_grid[pos] == 0
|
[
"def",
"in_pathing_grid",
"(",
"self",
",",
"pos",
":",
"Union",
"[",
"Point2",
",",
"Point3",
",",
"Unit",
"]",
")",
"->",
"bool",
":",
"assert",
"isinstance",
"(",
"pos",
",",
"(",
"Point2",
",",
"Point3",
",",
"Unit",
")",
")",
"pos",
"=",
"pos",
".",
"position",
".",
"to2",
".",
"rounded",
"return",
"self",
".",
"_game_info",
".",
"pathing_grid",
"[",
"pos",
"]",
"==",
"0"
] |
https://github.com/Dentosal/python-sc2/blob/e816cce83772d1aee1291b86b300b69405aa96b4/sc2/bot_ai.py#L482-L486
|
|
koaning/scikit-lego
|
028597fd0ba9ac387b9faa6f06050a7ee05e6cba
|
sklego/naive_bayes.py
|
python
|
BayesianGaussianMixtureNB.fit
|
(self, X: np.array, y: np.array)
|
return self
|
Fit the model using X, y as training data.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:param y: array-like, shape=(n_samples, ) training data.
:return: Returns an instance of self.
|
Fit the model using X, y as training data.
|
[
"Fit",
"the",
"model",
"using",
"X",
"y",
"as",
"training",
"data",
"."
] |
def fit(self, X: np.array, y: np.array) -> "BayesianGaussianMixtureNB":
"""
Fit the model using X, y as training data.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:param y: array-like, shape=(n_samples, ) training data.
:return: Returns an instance of self.
"""
X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
X = np.expand_dims(X, 1)
self.gmms_ = {}
self.classes_ = unique_labels(y)
self.num_fit_cols_ = X.shape[1]
for c in self.classes_:
subset_x, subset_y = X[y == c], y[y == c]
self.gmms_[c] = [
BayesianGaussianMixture(
n_components=self.n_components,
covariance_type=self.covariance_type,
tol=self.tol,
reg_covar=self.reg_covar,
max_iter=self.max_iter,
n_init=self.n_init,
init_params=self.init_params,
weight_concentration_prior_type=self.weight_concentration_prior_type,
weight_concentration_prior=self.weight_concentration_prior,
mean_precision_prior=self.mean_precision_prior,
mean_prior=self.mean_prior,
degrees_of_freedom_prior=self.degrees_of_freedom_prior,
covariance_prior=self.covariance_prior,
random_state=self.random_state,
warm_start=self.warm_start,
verbose=self.verbose,
verbose_interval=self.verbose_interval,
).fit(subset_x[:, i].reshape(-1, 1), subset_y)
for i in range(X.shape[1])
]
return self
|
[
"def",
"fit",
"(",
"self",
",",
"X",
":",
"np",
".",
"array",
",",
"y",
":",
"np",
".",
"array",
")",
"->",
"\"BayesianGaussianMixtureNB\"",
":",
"X",
",",
"y",
"=",
"check_X_y",
"(",
"X",
",",
"y",
",",
"estimator",
"=",
"self",
",",
"dtype",
"=",
"FLOAT_DTYPES",
")",
"if",
"X",
".",
"ndim",
"==",
"1",
":",
"X",
"=",
"np",
".",
"expand_dims",
"(",
"X",
",",
"1",
")",
"self",
".",
"gmms_",
"=",
"{",
"}",
"self",
".",
"classes_",
"=",
"unique_labels",
"(",
"y",
")",
"self",
".",
"num_fit_cols_",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"for",
"c",
"in",
"self",
".",
"classes_",
":",
"subset_x",
",",
"subset_y",
"=",
"X",
"[",
"y",
"==",
"c",
"]",
",",
"y",
"[",
"y",
"==",
"c",
"]",
"self",
".",
"gmms_",
"[",
"c",
"]",
"=",
"[",
"BayesianGaussianMixture",
"(",
"n_components",
"=",
"self",
".",
"n_components",
",",
"covariance_type",
"=",
"self",
".",
"covariance_type",
",",
"tol",
"=",
"self",
".",
"tol",
",",
"reg_covar",
"=",
"self",
".",
"reg_covar",
",",
"max_iter",
"=",
"self",
".",
"max_iter",
",",
"n_init",
"=",
"self",
".",
"n_init",
",",
"init_params",
"=",
"self",
".",
"init_params",
",",
"weight_concentration_prior_type",
"=",
"self",
".",
"weight_concentration_prior_type",
",",
"weight_concentration_prior",
"=",
"self",
".",
"weight_concentration_prior",
",",
"mean_precision_prior",
"=",
"self",
".",
"mean_precision_prior",
",",
"mean_prior",
"=",
"self",
".",
"mean_prior",
",",
"degrees_of_freedom_prior",
"=",
"self",
".",
"degrees_of_freedom_prior",
",",
"covariance_prior",
"=",
"self",
".",
"covariance_prior",
",",
"random_state",
"=",
"self",
".",
"random_state",
",",
"warm_start",
"=",
"self",
".",
"warm_start",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"verbose_interval",
"=",
"self",
".",
"verbose_interval",
",",
")",
".",
"fit",
"(",
"subset_x",
"[",
":",
",",
"i",
"]",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
",",
"subset_y",
")",
"for",
"i",
"in",
"range",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"return",
"self"
] |
https://github.com/koaning/scikit-lego/blob/028597fd0ba9ac387b9faa6f06050a7ee05e6cba/sklego/naive_bayes.py#L156-L195
|
|
chapmanb/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
nextgen/bcbio/utils.py
|
python
|
replace_suffix
|
(filename, suffix)
|
return base + suffix
|
replace the suffix of filename with suffix
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
|
replace the suffix of filename with suffix
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
|
[
"replace",
"the",
"suffix",
"of",
"filename",
"with",
"suffix",
"example",
":",
"replace_suffix",
"(",
"/",
"path",
"/",
"to",
"/",
"test",
".",
"sam",
".",
"bam",
")",
"-",
">",
"/",
"path",
"/",
"to",
"/",
"test",
".",
"bam"
] |
def replace_suffix(filename, suffix):
"""
replace the suffix of filename with suffix
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
"""
(base, _) = os.path.splitext(filename)
return base + suffix
|
[
"def",
"replace_suffix",
"(",
"filename",
",",
"suffix",
")",
":",
"(",
"base",
",",
"_",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"return",
"base",
"+",
"suffix"
] |
https://github.com/chapmanb/bcbb/blob/dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027/nextgen/bcbio/utils.py#L273-L281
|
|
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/pkg_resources/__init__.py
|
python
|
ResourceManager.postprocess
|
(self, tempname, filename)
|
Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
|
Perform any platform-specific postprocessing of `tempname`
|
[
"Perform",
"any",
"platform",
"-",
"specific",
"postprocessing",
"of",
"tempname"
] |
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
|
[
"def",
"postprocess",
"(",
"self",
",",
"tempname",
",",
"filename",
")",
":",
"if",
"os",
".",
"name",
"==",
"'posix'",
":",
"# Make the resource executable",
"mode",
"=",
"(",
"(",
"os",
".",
"stat",
"(",
"tempname",
")",
".",
"st_mode",
")",
"|",
"0o555",
")",
"&",
"0o7777",
"os",
".",
"chmod",
"(",
"tempname",
",",
"mode",
")"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/pkg_resources/__init__.py#L1319-L1337
|
||
nosmokingbandit/Watcher3
|
0217e75158b563bdefc8e01c3be7620008cf3977
|
lib/sqlalchemy/ext/mutable.py
|
python
|
MutableDict.__setitem__
|
(self, key, value)
|
Detect dictionary set events and emit change events.
|
Detect dictionary set events and emit change events.
|
[
"Detect",
"dictionary",
"set",
"events",
"and",
"emit",
"change",
"events",
"."
] |
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
|
[
"def",
"__setitem__",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"dict",
".",
"__setitem__",
"(",
"self",
",",
"key",
",",
"value",
")",
"self",
".",
"changed",
"(",
")"
] |
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/sqlalchemy/ext/mutable.py#L660-L663
|
||
google/deepvariant
|
9cf1c7b0e2342d013180aa153cba3c9331c9aef7
|
deepvariant/vcf_stats.py
|
python
|
_tstv
|
(variant, vtype)
|
return is_transition, is_transversion
|
Returns a pair of bools indicating Transition, Transversion status.
|
Returns a pair of bools indicating Transition, Transversion status.
|
[
"Returns",
"a",
"pair",
"of",
"bools",
"indicating",
"Transition",
"Transversion",
"status",
"."
] |
def _tstv(variant, vtype):
"""Returns a pair of bools indicating Transition, Transversion status."""
if vtype == BIALLELIC_SNP:
is_transition = variant_utils.is_transition(variant.reference_bases,
variant.alternate_bases[0])
is_transversion = not is_transition
else:
is_transition = is_transversion = False
return is_transition, is_transversion
|
[
"def",
"_tstv",
"(",
"variant",
",",
"vtype",
")",
":",
"if",
"vtype",
"==",
"BIALLELIC_SNP",
":",
"is_transition",
"=",
"variant_utils",
".",
"is_transition",
"(",
"variant",
".",
"reference_bases",
",",
"variant",
".",
"alternate_bases",
"[",
"0",
"]",
")",
"is_transversion",
"=",
"not",
"is_transition",
"else",
":",
"is_transition",
"=",
"is_transversion",
"=",
"False",
"return",
"is_transition",
",",
"is_transversion"
] |
https://github.com/google/deepvariant/blob/9cf1c7b0e2342d013180aa153cba3c9331c9aef7/deepvariant/vcf_stats.py#L89-L98
|
|
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
ext/pint/compat/tokenize.py
|
python
|
_get_normal_name
|
(orig_enc)
|
return orig_enc
|
Imitates get_normal_name in tokenizer.c.
|
Imitates get_normal_name in tokenizer.c.
|
[
"Imitates",
"get_normal_name",
"in",
"tokenizer",
".",
"c",
"."
] |
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
|
[
"def",
"_get_normal_name",
"(",
"orig_enc",
")",
":",
"# Only care about the first 12 characters.",
"enc",
"=",
"orig_enc",
"[",
":",
"12",
"]",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\"-\"",
")",
"if",
"enc",
"==",
"\"utf-8\"",
"or",
"enc",
".",
"startswith",
"(",
"\"utf-8-\"",
")",
":",
"return",
"\"utf-8\"",
"if",
"enc",
"in",
"(",
"\"latin-1\"",
",",
"\"iso-8859-1\"",
",",
"\"iso-latin-1\"",
")",
"or",
"enc",
".",
"startswith",
"(",
"(",
"\"latin-1-\"",
",",
"\"iso-8859-1-\"",
",",
"\"iso-latin-1-\"",
")",
")",
":",
"return",
"\"iso-8859-1\"",
"return",
"orig_enc"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/pint/compat/tokenize.py#L343-L352
|
|
asappresearch/sru
|
9ddc8da12f067125c2cfdd4f3b28a87c02889681
|
sru/modules.py
|
python
|
SRUCell.forward
|
(self,
input: Tensor,
c0: Optional[Tensor] = None,
mask_pad: Optional[Tensor] = None)
|
return h, c
|
The forward method of the SRU layer.
|
The forward method of the SRU layer.
|
[
"The",
"forward",
"method",
"of",
"the",
"SRU",
"layer",
"."
] |
def forward(self,
input: Tensor,
c0: Optional[Tensor] = None,
mask_pad: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
"""The forward method of the SRU layer.
"""
if input.dim() != 2 and input.dim() != 3:
raise ValueError("Input must be 2 or 3 dimensional")
batch_size = input.size(-2)
if c0 is None:
c0 = torch.zeros(batch_size, self.output_size, dtype=input.dtype,
device=input.device)
# apply layer norm before activation (i.e. before SRU computation)
residual = input
if self.layer_norm is not None and not self.normalize_after:
input = self.layer_norm(input)
# apply dropout for multiplication
if self.training and (self.rnn_dropout > 0):
mask = self.get_dropout_mask_((batch_size, input.size(-1)), self.rnn_dropout)
input = input * mask.expand_as(input)
# get the scaling constant; scale_x is a scalar
scale_val: Optional[Tensor] = None
scale_val = self.scale_x if self.rescale else None
# get dropout mask
mask_c: Optional[Tensor] = None
if self.training and (self.dropout > 0):
mask_c = self.get_dropout_mask_((batch_size, self.output_size),
self.dropout)
# compute U, V
# U is (length, batch_size, output_size * num_matrices)
# V is (output_size*2,) or (length, batch_size, output_size * 2) if provided
U, V = self.compute_UV(input, c0, mask_pad)
# apply elementwise recurrence to get hidden states h and c
h, c = self.apply_recurrence(U, V, residual, c0, scale_val, mask_c, mask_pad)
if self.layer_norm is not None and self.normalize_after:
h = self.layer_norm(h)
return h, c
|
[
"def",
"forward",
"(",
"self",
",",
"input",
":",
"Tensor",
",",
"c0",
":",
"Optional",
"[",
"Tensor",
"]",
"=",
"None",
",",
"mask_pad",
":",
"Optional",
"[",
"Tensor",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"Tensor",
",",
"Tensor",
"]",
":",
"if",
"input",
".",
"dim",
"(",
")",
"!=",
"2",
"and",
"input",
".",
"dim",
"(",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"Input must be 2 or 3 dimensional\"",
")",
"batch_size",
"=",
"input",
".",
"size",
"(",
"-",
"2",
")",
"if",
"c0",
"is",
"None",
":",
"c0",
"=",
"torch",
".",
"zeros",
"(",
"batch_size",
",",
"self",
".",
"output_size",
",",
"dtype",
"=",
"input",
".",
"dtype",
",",
"device",
"=",
"input",
".",
"device",
")",
"# apply layer norm before activation (i.e. before SRU computation)",
"residual",
"=",
"input",
"if",
"self",
".",
"layer_norm",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"normalize_after",
":",
"input",
"=",
"self",
".",
"layer_norm",
"(",
"input",
")",
"# apply dropout for multiplication",
"if",
"self",
".",
"training",
"and",
"(",
"self",
".",
"rnn_dropout",
">",
"0",
")",
":",
"mask",
"=",
"self",
".",
"get_dropout_mask_",
"(",
"(",
"batch_size",
",",
"input",
".",
"size",
"(",
"-",
"1",
")",
")",
",",
"self",
".",
"rnn_dropout",
")",
"input",
"=",
"input",
"*",
"mask",
".",
"expand_as",
"(",
"input",
")",
"# get the scaling constant; scale_x is a scalar",
"scale_val",
":",
"Optional",
"[",
"Tensor",
"]",
"=",
"None",
"scale_val",
"=",
"self",
".",
"scale_x",
"if",
"self",
".",
"rescale",
"else",
"None",
"# get dropout mask",
"mask_c",
":",
"Optional",
"[",
"Tensor",
"]",
"=",
"None",
"if",
"self",
".",
"training",
"and",
"(",
"self",
".",
"dropout",
">",
"0",
")",
":",
"mask_c",
"=",
"self",
".",
"get_dropout_mask_",
"(",
"(",
"batch_size",
",",
"self",
".",
"output_size",
")",
",",
"self",
".",
"dropout",
")",
"# compute U, V",
"# U is (length, batch_size, output_size * num_matrices)",
"# V is (output_size*2,) or (length, batch_size, output_size * 2) if provided",
"U",
",",
"V",
"=",
"self",
".",
"compute_UV",
"(",
"input",
",",
"c0",
",",
"mask_pad",
")",
"# apply elementwise recurrence to get hidden states h and c",
"h",
",",
"c",
"=",
"self",
".",
"apply_recurrence",
"(",
"U",
",",
"V",
",",
"residual",
",",
"c0",
",",
"scale_val",
",",
"mask_c",
",",
"mask_pad",
")",
"if",
"self",
".",
"layer_norm",
"is",
"not",
"None",
"and",
"self",
".",
"normalize_after",
":",
"h",
"=",
"self",
".",
"layer_norm",
"(",
"h",
")",
"return",
"h",
",",
"c"
] |
https://github.com/asappresearch/sru/blob/9ddc8da12f067125c2cfdd4f3b28a87c02889681/sru/modules.py#L235-L281
|
|
silverapp/silver
|
a59dbc7216733ab49dca2fae525d229bdba04420
|
silver/api/views/payment_method_views.py
|
python
|
PaymentMethodList.get_queryset
|
(self)
|
return PaymentMethod.objects.filter(customer=self.customer)
|
[] |
def get_queryset(self):
return PaymentMethod.objects.filter(customer=self.customer)
|
[
"def",
"get_queryset",
"(",
"self",
")",
":",
"return",
"PaymentMethod",
".",
"objects",
".",
"filter",
"(",
"customer",
"=",
"self",
".",
"customer",
")"
] |
https://github.com/silverapp/silver/blob/a59dbc7216733ab49dca2fae525d229bdba04420/silver/api/views/payment_method_views.py#L61-L62
|
|||
mesonbuild/meson
|
a22d0f9a0a787df70ce79b05d0c45de90a970048
|
mesonbuild/backend/vs2010backend.py
|
python
|
Vs2010Backend.get_vcvars_command
|
(self)
|
return ''
|
[] |
def get_vcvars_command(self):
has_arch_values = 'VSCMD_ARG_TGT_ARCH' in os.environ and 'VSCMD_ARG_HOST_ARCH' in os.environ
# Use vcvarsall.bat if we found it.
if 'VCINSTALLDIR' in os.environ:
vs_version = os.environ['VisualStudioVersion'] \
if 'VisualStudioVersion' in os.environ else None
relative_path = 'Auxiliary\\Build\\' if vs_version is not None and vs_version >= '15.0' else ''
script_path = os.environ['VCINSTALLDIR'] + relative_path + 'vcvarsall.bat'
if os.path.exists(script_path):
if has_arch_values:
target_arch = os.environ['VSCMD_ARG_TGT_ARCH']
host_arch = os.environ['VSCMD_ARG_HOST_ARCH']
else:
target_arch = os.environ.get('Platform', 'x86')
host_arch = target_arch
arch = host_arch + '_' + target_arch if host_arch != target_arch else target_arch
return f'"{script_path}" {arch}'
# Otherwise try the VS2017 Developer Command Prompt.
if 'VS150COMNTOOLS' in os.environ and has_arch_values:
script_path = os.environ['VS150COMNTOOLS'] + 'VsDevCmd.bat'
if os.path.exists(script_path):
return '"%s" -arch=%s -host_arch=%s' % \
(script_path, os.environ['VSCMD_ARG_TGT_ARCH'], os.environ['VSCMD_ARG_HOST_ARCH'])
return ''
|
[
"def",
"get_vcvars_command",
"(",
"self",
")",
":",
"has_arch_values",
"=",
"'VSCMD_ARG_TGT_ARCH'",
"in",
"os",
".",
"environ",
"and",
"'VSCMD_ARG_HOST_ARCH'",
"in",
"os",
".",
"environ",
"# Use vcvarsall.bat if we found it.",
"if",
"'VCINSTALLDIR'",
"in",
"os",
".",
"environ",
":",
"vs_version",
"=",
"os",
".",
"environ",
"[",
"'VisualStudioVersion'",
"]",
"if",
"'VisualStudioVersion'",
"in",
"os",
".",
"environ",
"else",
"None",
"relative_path",
"=",
"'Auxiliary\\\\Build\\\\'",
"if",
"vs_version",
"is",
"not",
"None",
"and",
"vs_version",
">=",
"'15.0'",
"else",
"''",
"script_path",
"=",
"os",
".",
"environ",
"[",
"'VCINSTALLDIR'",
"]",
"+",
"relative_path",
"+",
"'vcvarsall.bat'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"script_path",
")",
":",
"if",
"has_arch_values",
":",
"target_arch",
"=",
"os",
".",
"environ",
"[",
"'VSCMD_ARG_TGT_ARCH'",
"]",
"host_arch",
"=",
"os",
".",
"environ",
"[",
"'VSCMD_ARG_HOST_ARCH'",
"]",
"else",
":",
"target_arch",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'Platform'",
",",
"'x86'",
")",
"host_arch",
"=",
"target_arch",
"arch",
"=",
"host_arch",
"+",
"'_'",
"+",
"target_arch",
"if",
"host_arch",
"!=",
"target_arch",
"else",
"target_arch",
"return",
"f'\"{script_path}\" {arch}'",
"# Otherwise try the VS2017 Developer Command Prompt.",
"if",
"'VS150COMNTOOLS'",
"in",
"os",
".",
"environ",
"and",
"has_arch_values",
":",
"script_path",
"=",
"os",
".",
"environ",
"[",
"'VS150COMNTOOLS'",
"]",
"+",
"'VsDevCmd.bat'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"script_path",
")",
":",
"return",
"'\"%s\" -arch=%s -host_arch=%s'",
"%",
"(",
"script_path",
",",
"os",
".",
"environ",
"[",
"'VSCMD_ARG_TGT_ARCH'",
"]",
",",
"os",
".",
"environ",
"[",
"'VSCMD_ARG_HOST_ARCH'",
"]",
")",
"return",
"''"
] |
https://github.com/mesonbuild/meson/blob/a22d0f9a0a787df70ce79b05d0c45de90a970048/mesonbuild/backend/vs2010backend.py#L239-L264
|
|||
ranahanocka/MeshCNN
|
15b83cc6a4db968baf6cf595df78995a9c2dcee3
|
models/layers/mesh_prepare.py
|
python
|
build_gemm
|
(mesh, faces, face_areas)
|
gemm_edges: array (#E x 4) of the 4 one-ring neighbors for each edge
sides: array (#E x 4) indices (values of: 0,1,2,3) indicating where an edge is in the gemm_edge entry of the 4 neighboring edges
for example edge i -> gemm_edges[gemm_edges[i], sides[i]] == [i, i, i, i]
|
gemm_edges: array (#E x 4) of the 4 one-ring neighbors for each edge
sides: array (#E x 4) indices (values of: 0,1,2,3) indicating where an edge is in the gemm_edge entry of the 4 neighboring edges
for example edge i -> gemm_edges[gemm_edges[i], sides[i]] == [i, i, i, i]
|
[
"gemm_edges",
":",
"array",
"(",
"#E",
"x",
"4",
")",
"of",
"the",
"4",
"one",
"-",
"ring",
"neighbors",
"for",
"each",
"edge",
"sides",
":",
"array",
"(",
"#E",
"x",
"4",
")",
"indices",
"(",
"values",
"of",
":",
"0",
"1",
"2",
"3",
")",
"indicating",
"where",
"an",
"edge",
"is",
"in",
"the",
"gemm_edge",
"entry",
"of",
"the",
"4",
"neighboring",
"edges",
"for",
"example",
"edge",
"i",
"-",
">",
"gemm_edges",
"[",
"gemm_edges",
"[",
"i",
"]",
"sides",
"[",
"i",
"]]",
"==",
"[",
"i",
"i",
"i",
"i",
"]"
] |
def build_gemm(mesh, faces, face_areas):
"""
gemm_edges: array (#E x 4) of the 4 one-ring neighbors for each edge
sides: array (#E x 4) indices (values of: 0,1,2,3) indicating where an edge is in the gemm_edge entry of the 4 neighboring edges
for example edge i -> gemm_edges[gemm_edges[i], sides[i]] == [i, i, i, i]
"""
mesh.ve = [[] for _ in mesh.vs]
edge_nb = []
sides = []
edge2key = dict()
edges = []
edges_count = 0
nb_count = []
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
faces_edges[idx] = edge
if edge not in edge2key:
edge2key[edge] = edges_count
edges.append(list(edge))
edge_nb.append([-1, -1, -1, -1])
sides.append([-1, -1, -1, -1])
mesh.ve[edge[0]].append(edges_count)
mesh.ve[edge[1]].append(edges_count)
mesh.edge_areas.append(0)
nb_count.append(0)
edges_count += 1
mesh.edge_areas[edge2key[edge]] += face_areas[face_id] / 3
for idx, edge in enumerate(faces_edges):
edge_key = edge2key[edge]
edge_nb[edge_key][nb_count[edge_key]] = edge2key[faces_edges[(idx + 1) % 3]]
edge_nb[edge_key][nb_count[edge_key] + 1] = edge2key[faces_edges[(idx + 2) % 3]]
nb_count[edge_key] += 2
for idx, edge in enumerate(faces_edges):
edge_key = edge2key[edge]
sides[edge_key][nb_count[edge_key] - 2] = nb_count[edge2key[faces_edges[(idx + 1) % 3]]] - 1
sides[edge_key][nb_count[edge_key] - 1] = nb_count[edge2key[faces_edges[(idx + 2) % 3]]] - 2
mesh.edges = np.array(edges, dtype=np.int32)
mesh.gemm_edges = np.array(edge_nb, dtype=np.int64)
mesh.sides = np.array(sides, dtype=np.int64)
mesh.edges_count = edges_count
mesh.edge_areas = np.array(mesh.edge_areas, dtype=np.float32) / np.sum(face_areas)
|
[
"def",
"build_gemm",
"(",
"mesh",
",",
"faces",
",",
"face_areas",
")",
":",
"mesh",
".",
"ve",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"mesh",
".",
"vs",
"]",
"edge_nb",
"=",
"[",
"]",
"sides",
"=",
"[",
"]",
"edge2key",
"=",
"dict",
"(",
")",
"edges",
"=",
"[",
"]",
"edges_count",
"=",
"0",
"nb_count",
"=",
"[",
"]",
"for",
"face_id",
",",
"face",
"in",
"enumerate",
"(",
"faces",
")",
":",
"faces_edges",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"cur_edge",
"=",
"(",
"face",
"[",
"i",
"]",
",",
"face",
"[",
"(",
"i",
"+",
"1",
")",
"%",
"3",
"]",
")",
"faces_edges",
".",
"append",
"(",
"cur_edge",
")",
"for",
"idx",
",",
"edge",
"in",
"enumerate",
"(",
"faces_edges",
")",
":",
"edge",
"=",
"tuple",
"(",
"sorted",
"(",
"list",
"(",
"edge",
")",
")",
")",
"faces_edges",
"[",
"idx",
"]",
"=",
"edge",
"if",
"edge",
"not",
"in",
"edge2key",
":",
"edge2key",
"[",
"edge",
"]",
"=",
"edges_count",
"edges",
".",
"append",
"(",
"list",
"(",
"edge",
")",
")",
"edge_nb",
".",
"append",
"(",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
")",
"sides",
".",
"append",
"(",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"]",
")",
"mesh",
".",
"ve",
"[",
"edge",
"[",
"0",
"]",
"]",
".",
"append",
"(",
"edges_count",
")",
"mesh",
".",
"ve",
"[",
"edge",
"[",
"1",
"]",
"]",
".",
"append",
"(",
"edges_count",
")",
"mesh",
".",
"edge_areas",
".",
"append",
"(",
"0",
")",
"nb_count",
".",
"append",
"(",
"0",
")",
"edges_count",
"+=",
"1",
"mesh",
".",
"edge_areas",
"[",
"edge2key",
"[",
"edge",
"]",
"]",
"+=",
"face_areas",
"[",
"face_id",
"]",
"/",
"3",
"for",
"idx",
",",
"edge",
"in",
"enumerate",
"(",
"faces_edges",
")",
":",
"edge_key",
"=",
"edge2key",
"[",
"edge",
"]",
"edge_nb",
"[",
"edge_key",
"]",
"[",
"nb_count",
"[",
"edge_key",
"]",
"]",
"=",
"edge2key",
"[",
"faces_edges",
"[",
"(",
"idx",
"+",
"1",
")",
"%",
"3",
"]",
"]",
"edge_nb",
"[",
"edge_key",
"]",
"[",
"nb_count",
"[",
"edge_key",
"]",
"+",
"1",
"]",
"=",
"edge2key",
"[",
"faces_edges",
"[",
"(",
"idx",
"+",
"2",
")",
"%",
"3",
"]",
"]",
"nb_count",
"[",
"edge_key",
"]",
"+=",
"2",
"for",
"idx",
",",
"edge",
"in",
"enumerate",
"(",
"faces_edges",
")",
":",
"edge_key",
"=",
"edge2key",
"[",
"edge",
"]",
"sides",
"[",
"edge_key",
"]",
"[",
"nb_count",
"[",
"edge_key",
"]",
"-",
"2",
"]",
"=",
"nb_count",
"[",
"edge2key",
"[",
"faces_edges",
"[",
"(",
"idx",
"+",
"1",
")",
"%",
"3",
"]",
"]",
"]",
"-",
"1",
"sides",
"[",
"edge_key",
"]",
"[",
"nb_count",
"[",
"edge_key",
"]",
"-",
"1",
"]",
"=",
"nb_count",
"[",
"edge2key",
"[",
"faces_edges",
"[",
"(",
"idx",
"+",
"2",
")",
"%",
"3",
"]",
"]",
"]",
"-",
"2",
"mesh",
".",
"edges",
"=",
"np",
".",
"array",
"(",
"edges",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"mesh",
".",
"gemm_edges",
"=",
"np",
".",
"array",
"(",
"edge_nb",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"mesh",
".",
"sides",
"=",
"np",
".",
"array",
"(",
"sides",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"mesh",
".",
"edges_count",
"=",
"edges_count",
"mesh",
".",
"edge_areas",
"=",
"np",
".",
"array",
"(",
"mesh",
".",
"edge_areas",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"/",
"np",
".",
"sum",
"(",
"face_areas",
")"
] |
https://github.com/ranahanocka/MeshCNN/blob/15b83cc6a4db968baf6cf595df78995a9c2dcee3/models/layers/mesh_prepare.py#L116-L161
|
||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/src/generate.py
|
python
|
main
|
()
|
combine the necessary files to create the ansible module
|
combine the necessary files to create the ansible module
|
[
"combine",
"the",
"necessary",
"files",
"to",
"create",
"the",
"ansible",
"module"
] |
def main():
''' combine the necessary files to create the ansible module '''
args = parse_args()
if args.verify:
verify()
for fname, parts in get_sources().items():
data = generate(parts)
fname = os.path.join(LIBRARY, fname)
with open(fname, 'w') as afd:
afd.seek(0)
afd.write(data.getvalue())
|
[
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"if",
"args",
".",
"verify",
":",
"verify",
"(",
")",
"for",
"fname",
",",
"parts",
"in",
"get_sources",
"(",
")",
".",
"items",
"(",
")",
":",
"data",
"=",
"generate",
"(",
"parts",
")",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"LIBRARY",
",",
"fname",
")",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"afd",
":",
"afd",
".",
"seek",
"(",
"0",
")",
"afd",
".",
"write",
"(",
"data",
".",
"getvalue",
"(",
")",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/src/generate.py#L95-L106
|
||
zzzeek/sqlalchemy
|
fc5c54fcd4d868c2a4c7ac19668d72f506fe821e
|
lib/sqlalchemy/dialects/__init__.py
|
python
|
_auto_fn
|
(name)
|
default dialect importer.
plugs into the :class:`.PluginLoader`
as a first-hit system.
|
default dialect importer.
|
[
"default",
"dialect",
"importer",
"."
] |
def _auto_fn(name):
"""default dialect importer.
plugs into the :class:`.PluginLoader`
as a first-hit system.
"""
if "." in name:
dialect, driver = name.split(".")
else:
dialect = name
driver = "base"
try:
if dialect == "mariadb":
# it's "OK" for us to hardcode here since _auto_fn is already
# hardcoded. if mysql / mariadb etc were third party dialects
# they would just publish all the entrypoints, which would actually
# look much nicer.
module = __import__(
"sqlalchemy.dialects.mysql.mariadb"
).dialects.mysql.mariadb
return module.loader(driver)
else:
module = __import__("sqlalchemy.dialects.%s" % (dialect,)).dialects
module = getattr(module, dialect)
except ImportError:
return None
if hasattr(module, driver):
module = getattr(module, driver)
return lambda: module.dialect
else:
return None
|
[
"def",
"_auto_fn",
"(",
"name",
")",
":",
"if",
"\".\"",
"in",
"name",
":",
"dialect",
",",
"driver",
"=",
"name",
".",
"split",
"(",
"\".\"",
")",
"else",
":",
"dialect",
"=",
"name",
"driver",
"=",
"\"base\"",
"try",
":",
"if",
"dialect",
"==",
"\"mariadb\"",
":",
"# it's \"OK\" for us to hardcode here since _auto_fn is already",
"# hardcoded. if mysql / mariadb etc were third party dialects",
"# they would just publish all the entrypoints, which would actually",
"# look much nicer.",
"module",
"=",
"__import__",
"(",
"\"sqlalchemy.dialects.mysql.mariadb\"",
")",
".",
"dialects",
".",
"mysql",
".",
"mariadb",
"return",
"module",
".",
"loader",
"(",
"driver",
")",
"else",
":",
"module",
"=",
"__import__",
"(",
"\"sqlalchemy.dialects.%s\"",
"%",
"(",
"dialect",
",",
")",
")",
".",
"dialects",
"module",
"=",
"getattr",
"(",
"module",
",",
"dialect",
")",
"except",
"ImportError",
":",
"return",
"None",
"if",
"hasattr",
"(",
"module",
",",
"driver",
")",
":",
"module",
"=",
"getattr",
"(",
"module",
",",
"driver",
")",
"return",
"lambda",
":",
"module",
".",
"dialect",
"else",
":",
"return",
"None"
] |
https://github.com/zzzeek/sqlalchemy/blob/fc5c54fcd4d868c2a4c7ac19668d72f506fe821e/lib/sqlalchemy/dialects/__init__.py#L14-L47
|
||
meduza-corp/interstellar
|
40a801ccd7856491726f5a126621d9318cabe2e1
|
gsutil/third_party/boto/boto/s3/bucketlistresultset.py
|
python
|
bucket_lister
|
(bucket, prefix='', delimiter='', marker='', headers=None,
encoding_type=None)
|
A generator function for listing keys in a bucket.
|
A generator function for listing keys in a bucket.
|
[
"A",
"generator",
"function",
"for",
"listing",
"keys",
"in",
"a",
"bucket",
"."
] |
def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None,
encoding_type=None):
"""
A generator function for listing keys in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_keys(prefix=prefix, marker=marker,
delimiter=delimiter, headers=headers,
encoding_type=encoding_type)
for k in rs:
yield k
if k:
marker = rs.next_marker or k.name
more_results= rs.is_truncated
|
[
"def",
"bucket_lister",
"(",
"bucket",
",",
"prefix",
"=",
"''",
",",
"delimiter",
"=",
"''",
",",
"marker",
"=",
"''",
",",
"headers",
"=",
"None",
",",
"encoding_type",
"=",
"None",
")",
":",
"more_results",
"=",
"True",
"k",
"=",
"None",
"while",
"more_results",
":",
"rs",
"=",
"bucket",
".",
"get_all_keys",
"(",
"prefix",
"=",
"prefix",
",",
"marker",
"=",
"marker",
",",
"delimiter",
"=",
"delimiter",
",",
"headers",
"=",
"headers",
",",
"encoding_type",
"=",
"encoding_type",
")",
"for",
"k",
"in",
"rs",
":",
"yield",
"k",
"if",
"k",
":",
"marker",
"=",
"rs",
".",
"next_marker",
"or",
"k",
".",
"name",
"more_results",
"=",
"rs",
".",
"is_truncated"
] |
https://github.com/meduza-corp/interstellar/blob/40a801ccd7856491726f5a126621d9318cabe2e1/gsutil/third_party/boto/boto/s3/bucketlistresultset.py#L22-L37
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/django/utils/deprecation.py
|
python
|
RenameMethodsBase.__new__
|
(cls, name, bases, attrs)
|
return new_class
|
[] |
def __new__(cls, name, bases, attrs):
new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
|
[
"def",
"__new__",
"(",
"cls",
",",
"name",
",",
"bases",
",",
"attrs",
")",
":",
"new_class",
"=",
"super",
"(",
"RenameMethodsBase",
",",
"cls",
")",
".",
"__new__",
"(",
"cls",
",",
"name",
",",
"bases",
",",
"attrs",
")",
"for",
"base",
"in",
"inspect",
".",
"getmro",
"(",
"new_class",
")",
":",
"class_name",
"=",
"base",
".",
"__name__",
"for",
"renamed_method",
"in",
"cls",
".",
"renamed_methods",
":",
"old_method_name",
"=",
"renamed_method",
"[",
"0",
"]",
"old_method",
"=",
"base",
".",
"__dict__",
".",
"get",
"(",
"old_method_name",
")",
"new_method_name",
"=",
"renamed_method",
"[",
"1",
"]",
"new_method",
"=",
"base",
".",
"__dict__",
".",
"get",
"(",
"new_method_name",
")",
"deprecation_warning",
"=",
"renamed_method",
"[",
"2",
"]",
"wrapper",
"=",
"warn_about_renamed_method",
"(",
"class_name",
",",
"*",
"renamed_method",
")",
"# Define the new method if missing and complain about it",
"if",
"not",
"new_method",
"and",
"old_method",
":",
"warnings",
".",
"warn",
"(",
"\"`%s.%s` method should be renamed `%s`.\"",
"%",
"(",
"class_name",
",",
"old_method_name",
",",
"new_method_name",
")",
",",
"deprecation_warning",
",",
"2",
")",
"setattr",
"(",
"base",
",",
"new_method_name",
",",
"old_method",
")",
"setattr",
"(",
"base",
",",
"old_method_name",
",",
"wrapper",
"(",
"old_method",
")",
")",
"# Define the old method as a wrapped call to the new method.",
"if",
"not",
"old_method",
"and",
"new_method",
":",
"setattr",
"(",
"base",
",",
"old_method_name",
",",
"wrapper",
"(",
"new_method",
")",
")",
"return",
"new_class"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/utils/deprecation.py#L46-L72
|
|||
JacquesLucke/animation_nodes
|
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
|
animation_nodes/ui/selection_pie.py
|
python
|
SelectionPie.drawTop
|
(self, layout)
|
[] |
def drawTop(self, layout):
layout.operator("an.frame_active_network")
|
[
"def",
"drawTop",
"(",
"self",
",",
"layout",
")",
":",
"layout",
".",
"operator",
"(",
"\"an.frame_active_network\"",
")"
] |
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/ui/selection_pie.py#L24-L25
|
||||
AstusRush/AMaDiA
|
e2ad87318d9dd30bc24428e05c29cb32a29c83aa
|
External_Libraries/keyboard_master/keyboard/__init__.py
|
python
|
on_release
|
(callback, suppress=False)
|
return hook(lambda e: e.event_type == KEY_DOWN or callback(e), suppress=suppress)
|
Invokes `callback` for every KEY_UP event. For details see `hook`.
|
Invokes `callback` for every KEY_UP event. For details see `hook`.
|
[
"Invokes",
"callback",
"for",
"every",
"KEY_UP",
"event",
".",
"For",
"details",
"see",
"hook",
"."
] |
def on_release(callback, suppress=False):
"""
Invokes `callback` for every KEY_UP event. For details see `hook`.
"""
return hook(lambda e: e.event_type == KEY_DOWN or callback(e), suppress=suppress)
|
[
"def",
"on_release",
"(",
"callback",
",",
"suppress",
"=",
"False",
")",
":",
"return",
"hook",
"(",
"lambda",
"e",
":",
"e",
".",
"event_type",
"==",
"KEY_DOWN",
"or",
"callback",
"(",
"e",
")",
",",
"suppress",
"=",
"suppress",
")"
] |
https://github.com/AstusRush/AMaDiA/blob/e2ad87318d9dd30bc24428e05c29cb32a29c83aa/External_Libraries/keyboard_master/keyboard/__init__.py#L476-L480
|
|
cloudera/impyla
|
0c736af4cad2bade9b8e313badc08ec50e81c948
|
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
|
python
|
drop_partition_args.write
|
(self, oprot)
|
[] |
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('drop_partition_args')
if self.db_name is not None:
oprot.writeFieldBegin('db_name', TType.STRING, 1)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.tbl_name is not None:
oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
oprot.writeString(self.tbl_name)
oprot.writeFieldEnd()
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
for iter742 in self.part_vals:
oprot.writeString(iter742)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.deleteData is not None:
oprot.writeFieldBegin('deleteData', TType.BOOL, 4)
oprot.writeBool(self.deleteData)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
|
[
"def",
"write",
"(",
"self",
",",
"oprot",
")",
":",
"if",
"oprot",
".",
"_fast_encode",
"is",
"not",
"None",
"and",
"self",
".",
"thrift_spec",
"is",
"not",
"None",
":",
"oprot",
".",
"trans",
".",
"write",
"(",
"oprot",
".",
"_fast_encode",
"(",
"self",
",",
"[",
"self",
".",
"__class__",
",",
"self",
".",
"thrift_spec",
"]",
")",
")",
"return",
"oprot",
".",
"writeStructBegin",
"(",
"'drop_partition_args'",
")",
"if",
"self",
".",
"db_name",
"is",
"not",
"None",
":",
"oprot",
".",
"writeFieldBegin",
"(",
"'db_name'",
",",
"TType",
".",
"STRING",
",",
"1",
")",
"oprot",
".",
"writeString",
"(",
"self",
".",
"db_name",
")",
"oprot",
".",
"writeFieldEnd",
"(",
")",
"if",
"self",
".",
"tbl_name",
"is",
"not",
"None",
":",
"oprot",
".",
"writeFieldBegin",
"(",
"'tbl_name'",
",",
"TType",
".",
"STRING",
",",
"2",
")",
"oprot",
".",
"writeString",
"(",
"self",
".",
"tbl_name",
")",
"oprot",
".",
"writeFieldEnd",
"(",
")",
"if",
"self",
".",
"part_vals",
"is",
"not",
"None",
":",
"oprot",
".",
"writeFieldBegin",
"(",
"'part_vals'",
",",
"TType",
".",
"LIST",
",",
"3",
")",
"oprot",
".",
"writeListBegin",
"(",
"TType",
".",
"STRING",
",",
"len",
"(",
"self",
".",
"part_vals",
")",
")",
"for",
"iter742",
"in",
"self",
".",
"part_vals",
":",
"oprot",
".",
"writeString",
"(",
"iter742",
")",
"oprot",
".",
"writeListEnd",
"(",
")",
"oprot",
".",
"writeFieldEnd",
"(",
")",
"if",
"self",
".",
"deleteData",
"is",
"not",
"None",
":",
"oprot",
".",
"writeFieldBegin",
"(",
"'deleteData'",
",",
"TType",
".",
"BOOL",
",",
"4",
")",
"oprot",
".",
"writeBool",
"(",
"self",
".",
"deleteData",
")",
"oprot",
".",
"writeFieldEnd",
"(",
")",
"oprot",
".",
"writeFieldStop",
"(",
")",
"oprot",
".",
"writeStructEnd",
"(",
")"
] |
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L18131-L18156
|
||||
scikit-hep/awkward-0.x
|
dd885bef15814f588b58944d2505296df4aaae0e
|
awkward0/array/base.py
|
python
|
AwkwardArray.BitMaskedArray
|
(self)
|
return awkward0.array.masked.BitMaskedArray
|
[] |
def BitMaskedArray(self):
import awkward0.array.masked
return awkward0.array.masked.BitMaskedArray
|
[
"def",
"BitMaskedArray",
"(",
"self",
")",
":",
"import",
"awkward0",
".",
"array",
".",
"masked",
"return",
"awkward0",
".",
"array",
".",
"masked",
".",
"BitMaskedArray"
] |
https://github.com/scikit-hep/awkward-0.x/blob/dd885bef15814f588b58944d2505296df4aaae0e/awkward0/array/base.py#L337-L339
|
|||
volatilityfoundation/volatility
|
a438e768194a9e05eb4d9ee9338b881c0fa25937
|
volatility/obj.py
|
python
|
Profile.clear
|
(self)
|
Clears out the input vtypes and object_classes, and only the base object types
|
Clears out the input vtypes and object_classes, and only the base object types
|
[
"Clears",
"out",
"the",
"input",
"vtypes",
"and",
"object_classes",
"and",
"only",
"the",
"base",
"object",
"types"
] |
def clear(self):
""" Clears out the input vtypes and object_classes, and only the base object types """
# Prepopulate object_classes with base classes
self.object_classes = {'BitField': BitField,
'Pointer': Pointer,
'Pointer32':Pointer32,
'Void': Void,
'Array': Array,
'CType': CType,
'VolatilityMagic': VolatilityMagic}
# Ensure VOLATILITY_MAGIC is always present in vtypes
self.vtypes = {'VOLATILITY_MAGIC' : [0x0, {}]}
# Clear out the ordering that modifications were applied (since now, none were)
self._mods = []
|
[
"def",
"clear",
"(",
"self",
")",
":",
"# Prepopulate object_classes with base classes",
"self",
".",
"object_classes",
"=",
"{",
"'BitField'",
":",
"BitField",
",",
"'Pointer'",
":",
"Pointer",
",",
"'Pointer32'",
":",
"Pointer32",
",",
"'Void'",
":",
"Void",
",",
"'Array'",
":",
"Array",
",",
"'CType'",
":",
"CType",
",",
"'VolatilityMagic'",
":",
"VolatilityMagic",
"}",
"# Ensure VOLATILITY_MAGIC is always present in vtypes",
"self",
".",
"vtypes",
"=",
"{",
"'VOLATILITY_MAGIC'",
":",
"[",
"0x0",
",",
"{",
"}",
"]",
"}",
"# Clear out the ordering that modifications were applied (since now, none were)",
"self",
".",
"_mods",
"=",
"[",
"]"
] |
https://github.com/volatilityfoundation/volatility/blob/a438e768194a9e05eb4d9ee9338b881c0fa25937/volatility/obj.py#L868-L881
|
||
algorhythms/LeetCode
|
3fb14aeea62a960442e47dfde9f964c7ffce32be
|
354 Russian Doll Envelopes.py
|
python
|
Solution.maxEnvelopesTLE
|
(self, A)
|
return max(F)
|
LIS
O(n^2)
:type A: List[List[int]]
:rtype: int
|
LIS
O(n^2)
:type A: List[List[int]]
:rtype: int
|
[
"LIS",
"O",
"(",
"n^2",
")",
":",
"type",
"A",
":",
"List",
"[",
"List",
"[",
"int",
"]]",
":",
"rtype",
":",
"int"
] |
def maxEnvelopesTLE(self, A):
"""
LIS
O(n^2)
:type A: List[List[int]]
:rtype: int
"""
if not A: return 0
predicate = lambda a, b: b[0] > a[0] and b[1] > a[1]
A.sort()
n = len(A)
F = [1 for _ in xrange(n)]
for i in xrange(1, n):
for j in xrange(i):
if predicate(A[j], A[i]):
F[i] = max(F[i], 1 + F[j])
return max(F)
|
[
"def",
"maxEnvelopesTLE",
"(",
"self",
",",
"A",
")",
":",
"if",
"not",
"A",
":",
"return",
"0",
"predicate",
"=",
"lambda",
"a",
",",
"b",
":",
"b",
"[",
"0",
"]",
">",
"a",
"[",
"0",
"]",
"and",
"b",
"[",
"1",
"]",
">",
"a",
"[",
"1",
"]",
"A",
".",
"sort",
"(",
")",
"n",
"=",
"len",
"(",
"A",
")",
"F",
"=",
"[",
"1",
"for",
"_",
"in",
"xrange",
"(",
"n",
")",
"]",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"n",
")",
":",
"for",
"j",
"in",
"xrange",
"(",
"i",
")",
":",
"if",
"predicate",
"(",
"A",
"[",
"j",
"]",
",",
"A",
"[",
"i",
"]",
")",
":",
"F",
"[",
"i",
"]",
"=",
"max",
"(",
"F",
"[",
"i",
"]",
",",
"1",
"+",
"F",
"[",
"j",
"]",
")",
"return",
"max",
"(",
"F",
")"
] |
https://github.com/algorhythms/LeetCode/blob/3fb14aeea62a960442e47dfde9f964c7ffce32be/354 Russian Doll Envelopes.py#L41-L59
|
|
pywren/pywren
|
d898af6418a2d915c3984152de07d95e32c9789a
|
pywren/storage/storage.py
|
python
|
Storage.put_func
|
(self, key, func)
|
return self.backend_handler.put_object(key, func)
|
Put serialized function into storage.
:param key: function key
:param func: serialized function
:return: None
|
Put serialized function into storage.
:param key: function key
:param func: serialized function
:return: None
|
[
"Put",
"serialized",
"function",
"into",
"storage",
".",
":",
"param",
"key",
":",
"function",
"key",
":",
"param",
"func",
":",
"serialized",
"function",
":",
"return",
":",
"None"
] |
def put_func(self, key, func):
"""
Put serialized function into storage.
:param key: function key
:param func: serialized function
:return: None
"""
return self.backend_handler.put_object(key, func)
|
[
"def",
"put_func",
"(",
"self",
",",
"key",
",",
"func",
")",
":",
"return",
"self",
".",
"backend_handler",
".",
"put_object",
"(",
"key",
",",
"func",
")"
] |
https://github.com/pywren/pywren/blob/d898af6418a2d915c3984152de07d95e32c9789a/pywren/storage/storage.py#L62-L69
|
|
androguard/androguard
|
8d091cbb309c0c50bf239f805cc1e0931b8dcddc
|
androguard/core/bytecodes/dvm.py
|
python
|
ClassDefItem.get_static_values_off
|
(self)
|
return self.static_values_off
|
Return the offset from the start of the file to the list of initial values for static fields,
or 0 if there are none (and all static fields are to be initialized with 0 or null)
:rtype: int
|
Return the offset from the start of the file to the list of initial values for static fields,
or 0 if there are none (and all static fields are to be initialized with 0 or null)
|
[
"Return",
"the",
"offset",
"from",
"the",
"start",
"of",
"the",
"file",
"to",
"the",
"list",
"of",
"initial",
"values",
"for",
"static",
"fields",
"or",
"0",
"if",
"there",
"are",
"none",
"(",
"and",
"all",
"static",
"fields",
"are",
"to",
"be",
"initialized",
"with",
"0",
"or",
"null",
")"
] |
def get_static_values_off(self):
"""
Return the offset from the start of the file to the list of initial values for static fields,
or 0 if there are none (and all static fields are to be initialized with 0 or null)
:rtype: int
"""
return self.static_values_off
|
[
"def",
"get_static_values_off",
"(",
"self",
")",
":",
"return",
"self",
".",
"static_values_off"
] |
https://github.com/androguard/androguard/blob/8d091cbb309c0c50bf239f805cc1e0931b8dcddc/androguard/core/bytecodes/dvm.py#L3753-L3760
|
|
oaubert/python-vlc
|
908ffdbd0844dc1849728c456e147788798c99da
|
generated/3.0/vlc.py
|
python
|
Instance.playlist_play
|
(self, i_id, i_options, ppsz_options)
|
return libvlc_playlist_play(self, i_id, i_options, ppsz_options)
|
Start playing (if there is any item in the playlist).
Additionnal playlist item options can be specified for addition to the
item before it is played.
@param i_id: the item to play. If this is a negative number, the next item will be selected. Otherwise, the item with the given ID will be played.
@param i_options: the number of options to add to the item.
@param ppsz_options: the options to add to the item.
|
Start playing (if there is any item in the playlist).
Additionnal playlist item options can be specified for addition to the
item before it is played.
|
[
"Start",
"playing",
"(",
"if",
"there",
"is",
"any",
"item",
"in",
"the",
"playlist",
")",
".",
"Additionnal",
"playlist",
"item",
"options",
"can",
"be",
"specified",
"for",
"addition",
"to",
"the",
"item",
"before",
"it",
"is",
"played",
"."
] |
def playlist_play(self, i_id, i_options, ppsz_options):
'''Start playing (if there is any item in the playlist).
Additionnal playlist item options can be specified for addition to the
item before it is played.
@param i_id: the item to play. If this is a negative number, the next item will be selected. Otherwise, the item with the given ID will be played.
@param i_options: the number of options to add to the item.
@param ppsz_options: the options to add to the item.
'''
return libvlc_playlist_play(self, i_id, i_options, ppsz_options)
|
[
"def",
"playlist_play",
"(",
"self",
",",
"i_id",
",",
"i_options",
",",
"ppsz_options",
")",
":",
"return",
"libvlc_playlist_play",
"(",
"self",
",",
"i_id",
",",
"i_options",
",",
"ppsz_options",
")"
] |
https://github.com/oaubert/python-vlc/blob/908ffdbd0844dc1849728c456e147788798c99da/generated/3.0/vlc.py#L1976-L1984
|
|
evennia/evennia
|
fa79110ba6b219932f22297838e8ac72ebc0be0e
|
evennia/locks/lockhandler.py
|
python
|
LockHandler.get
|
(self, access_type=None)
|
return str(self)
|
Get the full lockstring or the lockstring of a particular
access type.
Args:
access_type (str, optional):
Returns:
lockstring (str): The matched lockstring, or the full
lockstring if no access_type was given.
|
Get the full lockstring or the lockstring of a particular
access type.
|
[
"Get",
"the",
"full",
"lockstring",
"or",
"the",
"lockstring",
"of",
"a",
"particular",
"access",
"type",
"."
] |
def get(self, access_type=None):
"""
Get the full lockstring or the lockstring of a particular
access type.
Args:
access_type (str, optional):
Returns:
lockstring (str): The matched lockstring, or the full
lockstring if no access_type was given.
"""
if access_type:
return self.locks.get(access_type, ["", "", ""])[2]
return str(self)
|
[
"def",
"get",
"(",
"self",
",",
"access_type",
"=",
"None",
")",
":",
"if",
"access_type",
":",
"return",
"self",
".",
"locks",
".",
"get",
"(",
"access_type",
",",
"[",
"\"\"",
",",
"\"\"",
",",
"\"\"",
"]",
")",
"[",
"2",
"]",
"return",
"str",
"(",
"self",
")"
] |
https://github.com/evennia/evennia/blob/fa79110ba6b219932f22297838e8ac72ebc0be0e/evennia/locks/lockhandler.py#L417-L432
|
|
hacktoolspack/hack-tools
|
c2b1e324f4b24a2c5b4f111e7ef84e9c547159c2
|
wifite2-master/wifite/util/process.py
|
python
|
Process.exists
|
(program)
|
return True
|
Checks if program is installed on this system
|
Checks if program is installed on this system
|
[
"Checks",
"if",
"program",
"is",
"installed",
"on",
"this",
"system"
] |
def exists(program):
''' Checks if program is installed on this system '''
p = Process(['which', program])
stdout = p.stdout().strip()
stderr = p.stderr().strip()
if stdout == '' and stderr == '':
return False
return True
|
[
"def",
"exists",
"(",
"program",
")",
":",
"p",
"=",
"Process",
"(",
"[",
"'which'",
",",
"program",
"]",
")",
"stdout",
"=",
"p",
".",
"stdout",
"(",
")",
".",
"strip",
"(",
")",
"stderr",
"=",
"p",
".",
"stderr",
"(",
")",
".",
"strip",
"(",
")",
"if",
"stdout",
"==",
"''",
"and",
"stderr",
"==",
"''",
":",
"return",
"False",
"return",
"True"
] |
https://github.com/hacktoolspack/hack-tools/blob/c2b1e324f4b24a2c5b4f111e7ef84e9c547159c2/wifite2-master/wifite/util/process.py#L55-L64
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.