nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
postgres/pgadmin4
|
374c5e952fa594d749fadf1f88076c1cba8c5f64
|
web/pgadmin/browser/server_groups/servers/databases/schemas/__init__.py
|
python
|
SchemaView.msql
|
(self, gid, sid, did, scid=None)
|
This function will generate modified sql for schema object based on
the input from the user. This route is used by the SQL tab in the
edit/create dialog.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID (When working with existing schema node)
|
This function will generate modified sql for schema object based on
the input from the user. This route is used by the SQL tab in the
edit/create dialog.
|
[
"This",
"function",
"will",
"generate",
"modified",
"sql",
"for",
"schema",
"object",
"based",
"on",
"the",
"input",
"from",
"the",
"user",
".",
"This",
"route",
"is",
"used",
"by",
"the",
"SQL",
"tab",
"in",
"the",
"edit",
"/",
"create",
"dialog",
"."
] |
def msql(self, gid, sid, did, scid=None):
"""
This function will generate modified sql for schema object based on
the input from the user. This route is used by the SQL tab in the
edit/create dialog.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID (When working with existing schema node)
"""
data = dict()
for k, v in request.args.items():
try:
# comments should be taken as is because if user enters a
# json comment it is parsed by loads which should not happen
if k in ('description',):
data[k] = v
else:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
try:
SQL, name = self.get_sql(gid, sid, data, scid)
if SQL and SQL.strip('\n') and SQL.strip(' '):
return make_json_response(
data=SQL.strip('\n'),
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
|
[
"def",
"msql",
"(",
"self",
",",
"gid",
",",
"sid",
",",
"did",
",",
"scid",
"=",
"None",
")",
":",
"data",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"request",
".",
"args",
".",
"items",
"(",
")",
":",
"try",
":",
"# comments should be taken as is because if user enters a",
"# json comment it is parsed by loads which should not happen",
"if",
"k",
"in",
"(",
"'description'",
",",
")",
":",
"data",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"data",
"[",
"k",
"]",
"=",
"json",
".",
"loads",
"(",
"v",
",",
"encoding",
"=",
"'utf-8'",
")",
"except",
"ValueError",
":",
"data",
"[",
"k",
"]",
"=",
"v",
"try",
":",
"SQL",
",",
"name",
"=",
"self",
".",
"get_sql",
"(",
"gid",
",",
"sid",
",",
"data",
",",
"scid",
")",
"if",
"SQL",
"and",
"SQL",
".",
"strip",
"(",
"'\\n'",
")",
"and",
"SQL",
".",
"strip",
"(",
"' '",
")",
":",
"return",
"make_json_response",
"(",
"data",
"=",
"SQL",
".",
"strip",
"(",
"'\\n'",
")",
",",
"status",
"=",
"200",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"internal_server_error",
"(",
"errormsg",
"=",
"str",
"(",
"e",
")",
")"
] |
https://github.com/postgres/pgadmin4/blob/374c5e952fa594d749fadf1f88076c1cba8c5f64/web/pgadmin/browser/server_groups/servers/databases/schemas/__init__.py#L745-L777
|
||
shiyanhui/FileHeader
|
f347cc134021fb0b710694b71c57742476f5fd2b
|
jinja2/filters.py
|
python
|
do_indent
|
(s, width=4, indentfirst=False)
|
return rv
|
Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
|
Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
|
[
"Return",
"a",
"copy",
"of",
"the",
"passed",
"string",
"each",
"line",
"indented",
"by",
"4",
"spaces",
".",
"The",
"first",
"line",
"is",
"not",
"indented",
".",
"If",
"you",
"want",
"to",
"change",
"the",
"number",
"of",
"spaces",
"or",
"indent",
"the",
"first",
"line",
"too",
"you",
"can",
"pass",
"additional",
"parameters",
"to",
"the",
"filter",
":"
] |
def do_indent(s, width=4, indentfirst=False):
"""Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
"""
indention = u' ' * width
rv = (u'\n' + indention).join(s.splitlines())
if indentfirst:
rv = indention + rv
return rv
|
[
"def",
"do_indent",
"(",
"s",
",",
"width",
"=",
"4",
",",
"indentfirst",
"=",
"False",
")",
":",
"indention",
"=",
"u' '",
"*",
"width",
"rv",
"=",
"(",
"u'\\n'",
"+",
"indention",
")",
".",
"join",
"(",
"s",
".",
"splitlines",
"(",
")",
")",
"if",
"indentfirst",
":",
"rv",
"=",
"indention",
"+",
"rv",
"return",
"rv"
] |
https://github.com/shiyanhui/FileHeader/blob/f347cc134021fb0b710694b71c57742476f5fd2b/jinja2/filters.py#L430-L445
|
|
qibinlou/SinaWeibo-Emotion-Classification
|
f336fc104abd68b0ec4180fe2ed80fafe49cb790
|
nltk/sem/evaluate.py
|
python
|
arity
|
(rel)
|
return len(list(rel)[0])
|
Check the arity of a relation.
:type rel: set of tuples
:rtype: int of tuple of str
|
Check the arity of a relation.
:type rel: set of tuples
:rtype: int of tuple of str
|
[
"Check",
"the",
"arity",
"of",
"a",
"relation",
".",
":",
"type",
"rel",
":",
"set",
"of",
"tuples",
":",
"rtype",
":",
"int",
"of",
"tuple",
"of",
"str"
] |
def arity(rel):
"""
Check the arity of a relation.
:type rel: set of tuples
:rtype: int of tuple of str
"""
if len(rel) == 0:
return 0
return len(list(rel)[0])
|
[
"def",
"arity",
"(",
"rel",
")",
":",
"if",
"len",
"(",
"rel",
")",
"==",
"0",
":",
"return",
"0",
"return",
"len",
"(",
"list",
"(",
"rel",
")",
"[",
"0",
"]",
")"
] |
https://github.com/qibinlou/SinaWeibo-Emotion-Classification/blob/f336fc104abd68b0ec4180fe2ed80fafe49cb790/nltk/sem/evaluate.py#L85-L93
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/font_manager.py
|
python
|
FontManager.score_size
|
(self, size1, size2)
|
return abs(sizeval1 - sizeval2) / 72.0
|
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
|
Returns a match score between *size1* and *size2*.
|
[
"Returns",
"a",
"match",
"score",
"between",
"*",
"size1",
"*",
"and",
"*",
"size2",
"*",
"."
] |
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings[size1]
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
|
[
"def",
"score_size",
"(",
"self",
",",
"size1",
",",
"size2",
")",
":",
"if",
"size2",
"==",
"'scalable'",
":",
"return",
"0.0",
"# Size value should have already been",
"try",
":",
"sizeval1",
"=",
"float",
"(",
"size1",
")",
"except",
"ValueError",
":",
"sizeval1",
"=",
"self",
".",
"default_size",
"*",
"font_scalings",
"[",
"size1",
"]",
"try",
":",
"sizeval2",
"=",
"float",
"(",
"size2",
")",
"except",
"ValueError",
":",
"return",
"1.0",
"return",
"abs",
"(",
"sizeval1",
"-",
"sizeval2",
")",
"/",
"72.0"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/font_manager.py#L1139-L1161
|
|
krintoxi/NoobSec-Toolkit
|
38738541cbc03cedb9a3b3ed13b629f781ad64f6
|
NoobSecToolkit /tools/sqli/thirdparty/bottle/bottle.py
|
python
|
tob
|
(s, enc='utf8')
|
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
|
[] |
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
|
[
"def",
"tob",
"(",
"s",
",",
"enc",
"=",
"'utf8'",
")",
":",
"return",
"s",
".",
"encode",
"(",
"enc",
")",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
"else",
"bytes",
"(",
"s",
")"
] |
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/sqli/thirdparty/bottle/bottle.py#L112-L113
|
|||
nodesign/weio
|
1d67d705a5c36a2e825ad13feab910b0aca9a2e8
|
openWrt/files/usr/lib/python2.7/site-packages/tornado/ioloop.py
|
python
|
IOLoop.configurable_base
|
(cls)
|
return IOLoop
|
[] |
def configurable_base(cls):
return IOLoop
|
[
"def",
"configurable_base",
"(",
"cls",
")",
":",
"return",
"IOLoop"
] |
https://github.com/nodesign/weio/blob/1d67d705a5c36a2e825ad13feab910b0aca9a2e8/openWrt/files/usr/lib/python2.7/site-packages/tornado/ioloop.py#L198-L199
|
|||
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
|
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
|
tensorflow_dl_models/research/slim/datasets/download_and_convert_flowers.py
|
python
|
_convert_dataset
|
(split_name, filenames, class_names_to_ids, dataset_dir)
|
Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
|
Converts the given filenames to a TFRecord dataset.
|
[
"Converts",
"the",
"given",
"filenames",
"to",
"a",
"TFRecord",
"dataset",
"."
] |
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
|
[
"def",
"_convert_dataset",
"(",
"split_name",
",",
"filenames",
",",
"class_names_to_ids",
",",
"dataset_dir",
")",
":",
"assert",
"split_name",
"in",
"[",
"'train'",
",",
"'validation'",
"]",
"num_per_shard",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"len",
"(",
"filenames",
")",
"/",
"float",
"(",
"_NUM_SHARDS",
")",
")",
")",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"image_reader",
"=",
"ImageReader",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
"''",
")",
"as",
"sess",
":",
"for",
"shard_id",
"in",
"range",
"(",
"_NUM_SHARDS",
")",
":",
"output_filename",
"=",
"_get_dataset_filename",
"(",
"dataset_dir",
",",
"split_name",
",",
"shard_id",
")",
"with",
"tf",
".",
"python_io",
".",
"TFRecordWriter",
"(",
"output_filename",
")",
"as",
"tfrecord_writer",
":",
"start_ndx",
"=",
"shard_id",
"*",
"num_per_shard",
"end_ndx",
"=",
"min",
"(",
"(",
"shard_id",
"+",
"1",
")",
"*",
"num_per_shard",
",",
"len",
"(",
"filenames",
")",
")",
"for",
"i",
"in",
"range",
"(",
"start_ndx",
",",
"end_ndx",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\r>> Converting image %d/%d shard %d'",
"%",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"filenames",
")",
",",
"shard_id",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"# Read the filename:",
"image_data",
"=",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"filenames",
"[",
"i",
"]",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"height",
",",
"width",
"=",
"image_reader",
".",
"read_image_dims",
"(",
"sess",
",",
"image_data",
")",
"class_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filenames",
"[",
"i",
"]",
")",
")",
"class_id",
"=",
"class_names_to_ids",
"[",
"class_name",
"]",
"example",
"=",
"dataset_utils",
".",
"image_to_tfexample",
"(",
"image_data",
",",
"b'jpg'",
",",
"height",
",",
"width",
",",
"class_id",
")",
"tfrecord_writer",
".",
"write",
"(",
"example",
".",
"SerializeToString",
"(",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/tensorflow_dl_models/research/slim/datasets/download_and_convert_flowers.py#L107-L150
|
||
unknown-horizons/unknown-horizons
|
7397fb333006d26c3d9fe796c7bd9cb8c3b43a49
|
horizons/gui/ingamegui.py
|
python
|
IngameGui._on_new_disaster
|
(self, message)
|
Called when a building is 'infected' with a disaster.
|
Called when a building is 'infected' with a disaster.
|
[
"Called",
"when",
"a",
"building",
"is",
"infected",
"with",
"a",
"disaster",
"."
] |
def _on_new_disaster(self, message):
"""Called when a building is 'infected' with a disaster."""
if message.building.owner.is_local_player and len(message.disaster._affected_buildings) == 1:
pos = message.building.position.center
self.message_widget.add(point=pos, string_id=message.disaster_class.NOTIFICATION_TYPE)
|
[
"def",
"_on_new_disaster",
"(",
"self",
",",
"message",
")",
":",
"if",
"message",
".",
"building",
".",
"owner",
".",
"is_local_player",
"and",
"len",
"(",
"message",
".",
"disaster",
".",
"_affected_buildings",
")",
"==",
"1",
":",
"pos",
"=",
"message",
".",
"building",
".",
"position",
".",
"center",
"self",
".",
"message_widget",
".",
"add",
"(",
"point",
"=",
"pos",
",",
"string_id",
"=",
"message",
".",
"disaster_class",
".",
"NOTIFICATION_TYPE",
")"
] |
https://github.com/unknown-horizons/unknown-horizons/blob/7397fb333006d26c3d9fe796c7bd9cb8c3b43a49/horizons/gui/ingamegui.py#L630-L634
|
||
rgerum/pylustrator
|
b01825bc3de75ac127291647729fa7b0e6f8b821
|
pylustrator/drag_helper.py
|
python
|
GrabbableRectangleSelection.update_grabber
|
(self)
|
update the position of the grabber elements
|
update the position of the grabber elements
|
[
"update",
"the",
"position",
"of",
"the",
"grabber",
"elements"
] |
def update_grabber(self):
""" update the position of the grabber elements """
if self.do_target_scale():
for grabber in self.grabbers:
grabber.updatePos()
else:
self.hide_grabber()
|
[
"def",
"update_grabber",
"(",
"self",
")",
":",
"if",
"self",
".",
"do_target_scale",
"(",
")",
":",
"for",
"grabber",
"in",
"self",
".",
"grabbers",
":",
"grabber",
".",
"updatePos",
"(",
")",
"else",
":",
"self",
".",
"hide_grabber",
"(",
")"
] |
https://github.com/rgerum/pylustrator/blob/b01825bc3de75ac127291647729fa7b0e6f8b821/pylustrator/drag_helper.py#L289-L295
|
||
ustayready/CredKing
|
68b612e4cdf01d2b65b14ab2869bb8a5531056ee
|
plugins/gmail/lxml/html/__init__.py
|
python
|
HtmlMixin.head
|
(self)
|
return self.xpath('//head|//x:head', namespaces={'x':XHTML_NAMESPACE})[0]
|
Returns the <head> element. Can be called from a child
element to get the document's head.
|
Returns the <head> element. Can be called from a child
element to get the document's head.
|
[
"Returns",
"the",
"<head",
">",
"element",
".",
"Can",
"be",
"called",
"from",
"a",
"child",
"element",
"to",
"get",
"the",
"document",
"s",
"head",
"."
] |
def head(self):
"""
Returns the <head> element. Can be called from a child
element to get the document's head.
"""
return self.xpath('//head|//x:head', namespaces={'x':XHTML_NAMESPACE})[0]
|
[
"def",
"head",
"(",
"self",
")",
":",
"return",
"self",
".",
"xpath",
"(",
"'//head|//x:head'",
",",
"namespaces",
"=",
"{",
"'x'",
":",
"XHTML_NAMESPACE",
"}",
")",
"[",
"0",
"]"
] |
https://github.com/ustayready/CredKing/blob/68b612e4cdf01d2b65b14ab2869bb8a5531056ee/plugins/gmail/lxml/html/__init__.py#L293-L298
|
|
transferwise/pipelinewise
|
6934b3851512dbdd4280790bf253a0a13ab65684
|
pipelinewise/fastsync/commons/tap_mongodb.py
|
python
|
get_connection_string
|
(config: Dict)
|
return connection_string
|
Generates a MongoClientConnectionString based on configuration
Args:
config: DB config
Returns: A MongoClient connection string
|
Generates a MongoClientConnectionString based on configuration
Args:
config: DB config
|
[
"Generates",
"a",
"MongoClientConnectionString",
"based",
"on",
"configuration",
"Args",
":",
"config",
":",
"DB",
"config"
] |
def get_connection_string(config: Dict):
"""
Generates a MongoClientConnectionString based on configuration
Args:
config: DB config
Returns: A MongoClient connection string
"""
srv = config.get('srv') == 'true'
# Default SSL verify mode to true, give option to disable
verify_mode = config.get('verify_mode', 'true') == 'true'
use_ssl = config.get('ssl') == 'true'
connection_query = {
'readPreference': 'secondaryPreferred',
'authSource': config['auth_database'],
}
if config.get('replica_set'):
connection_query['replicaSet'] = config['replica_set']
if use_ssl:
connection_query['ssl'] = 'true'
# NB: "sslAllowInvalidCertificates" must ONLY be supplied if `SSL` is true.
if not verify_mode and use_ssl:
connection_query['tlsAllowInvalidCertificates'] = 'true'
query_string = parse.urlencode(connection_query)
connection_string = '{protocol}://{user}:{password}@{host}{port}/{database}?{query_string}'.format(
protocol='mongodb+srv' if srv else 'mongodb',
user=config['user'],
password=config['password'],
host=config['host'],
port='' if srv else ':{port}'.format(port=int(config['port'])),
database=config['database'],
query_string=query_string
)
return connection_string
|
[
"def",
"get_connection_string",
"(",
"config",
":",
"Dict",
")",
":",
"srv",
"=",
"config",
".",
"get",
"(",
"'srv'",
")",
"==",
"'true'",
"# Default SSL verify mode to true, give option to disable",
"verify_mode",
"=",
"config",
".",
"get",
"(",
"'verify_mode'",
",",
"'true'",
")",
"==",
"'true'",
"use_ssl",
"=",
"config",
".",
"get",
"(",
"'ssl'",
")",
"==",
"'true'",
"connection_query",
"=",
"{",
"'readPreference'",
":",
"'secondaryPreferred'",
",",
"'authSource'",
":",
"config",
"[",
"'auth_database'",
"]",
",",
"}",
"if",
"config",
".",
"get",
"(",
"'replica_set'",
")",
":",
"connection_query",
"[",
"'replicaSet'",
"]",
"=",
"config",
"[",
"'replica_set'",
"]",
"if",
"use_ssl",
":",
"connection_query",
"[",
"'ssl'",
"]",
"=",
"'true'",
"# NB: \"sslAllowInvalidCertificates\" must ONLY be supplied if `SSL` is true.",
"if",
"not",
"verify_mode",
"and",
"use_ssl",
":",
"connection_query",
"[",
"'tlsAllowInvalidCertificates'",
"]",
"=",
"'true'",
"query_string",
"=",
"parse",
".",
"urlencode",
"(",
"connection_query",
")",
"connection_string",
"=",
"'{protocol}://{user}:{password}@{host}{port}/{database}?{query_string}'",
".",
"format",
"(",
"protocol",
"=",
"'mongodb+srv'",
"if",
"srv",
"else",
"'mongodb'",
",",
"user",
"=",
"config",
"[",
"'user'",
"]",
",",
"password",
"=",
"config",
"[",
"'password'",
"]",
",",
"host",
"=",
"config",
"[",
"'host'",
"]",
",",
"port",
"=",
"''",
"if",
"srv",
"else",
"':{port}'",
".",
"format",
"(",
"port",
"=",
"int",
"(",
"config",
"[",
"'port'",
"]",
")",
")",
",",
"database",
"=",
"config",
"[",
"'database'",
"]",
",",
"query_string",
"=",
"query_string",
")",
"return",
"connection_string"
] |
https://github.com/transferwise/pipelinewise/blob/6934b3851512dbdd4280790bf253a0a13ab65684/pipelinewise/fastsync/commons/tap_mongodb.py#L156-L197
|
|
google/trax
|
d6cae2067dedd0490b78d831033607357e975015
|
trax/layers/research/efficient_attention.py
|
python
|
LSHSelfAttention.backward
|
(self, inputs, output, grad, weights, state, new_state, rng=None,
**kwargs)
|
return inputs_grad, weights_grad
|
Custom backward pass, for efficiency (see forward_and_or_backward).
|
Custom backward pass, for efficiency (see forward_and_or_backward).
|
[
"Custom",
"backward",
"pass",
"for",
"efficiency",
"(",
"see",
"forward_and_or_backward",
")",
"."
] |
def backward(self, inputs, output, grad, weights, state, new_state, rng=None,
**kwargs):
"""Custom backward pass, for efficiency (see forward_and_or_backward)."""
assert not self._use_reference_code
del output, state, kwargs
_, _, inputs_grad, weights_grad = self.forward_and_or_backward(
inputs, weights, new_state, rng, output_grad=grad,
compute_output=False, update_state=False)
return inputs_grad, weights_grad
|
[
"def",
"backward",
"(",
"self",
",",
"inputs",
",",
"output",
",",
"grad",
",",
"weights",
",",
"state",
",",
"new_state",
",",
"rng",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"not",
"self",
".",
"_use_reference_code",
"del",
"output",
",",
"state",
",",
"kwargs",
"_",
",",
"_",
",",
"inputs_grad",
",",
"weights_grad",
"=",
"self",
".",
"forward_and_or_backward",
"(",
"inputs",
",",
"weights",
",",
"new_state",
",",
"rng",
",",
"output_grad",
"=",
"grad",
",",
"compute_output",
"=",
"False",
",",
"update_state",
"=",
"False",
")",
"return",
"inputs_grad",
",",
"weights_grad"
] |
https://github.com/google/trax/blob/d6cae2067dedd0490b78d831033607357e975015/trax/layers/research/efficient_attention.py#L2252-L2260
|
|
aws-ia/taskcat
|
67d19abdc1ad2070296925c297b614bbe7caaa76
|
taskcat/_cli_modules/lint.py
|
python
|
Lint.__init__
|
(
self,
input_file: str = ".taskcat.yml",
project_root: str = "./",
strict: bool = False,
)
|
:param input_file: path to project config or CloudFormation template
:param project_root: base path for project
:param strict: fail on lint warnings as well as errors
|
:param input_file: path to project config or CloudFormation template
:param project_root: base path for project
:param strict: fail on lint warnings as well as errors
|
[
":",
"param",
"input_file",
":",
"path",
"to",
"project",
"config",
"or",
"CloudFormation",
"template",
":",
"param",
"project_root",
":",
"base",
"path",
"for",
"project",
":",
"param",
"strict",
":",
"fail",
"on",
"lint",
"warnings",
"as",
"well",
"as",
"errors"
] |
def __init__(
self,
input_file: str = ".taskcat.yml",
project_root: str = "./",
strict: bool = False,
):
"""
:param input_file: path to project config or CloudFormation template
:param project_root: base path for project
:param strict: fail on lint warnings as well as errors
"""
project_root_path: Path = Path(project_root).expanduser().resolve()
input_file_path: Path = project_root_path / input_file
config = Config.create(
project_root=project_root_path, project_config_path=input_file_path
)
templates = config.get_templates()
lint = TaskCatLint(config, templates, strict)
errors = lint.lints[1]
lint.output_results()
if errors or not lint.passed:
raise TaskCatException("Lint failed with errors")
|
[
"def",
"__init__",
"(",
"self",
",",
"input_file",
":",
"str",
"=",
"\".taskcat.yml\"",
",",
"project_root",
":",
"str",
"=",
"\"./\"",
",",
"strict",
":",
"bool",
"=",
"False",
",",
")",
":",
"project_root_path",
":",
"Path",
"=",
"Path",
"(",
"project_root",
")",
".",
"expanduser",
"(",
")",
".",
"resolve",
"(",
")",
"input_file_path",
":",
"Path",
"=",
"project_root_path",
"/",
"input_file",
"config",
"=",
"Config",
".",
"create",
"(",
"project_root",
"=",
"project_root_path",
",",
"project_config_path",
"=",
"input_file_path",
")",
"templates",
"=",
"config",
".",
"get_templates",
"(",
")",
"lint",
"=",
"TaskCatLint",
"(",
"config",
",",
"templates",
",",
"strict",
")",
"errors",
"=",
"lint",
".",
"lints",
"[",
"1",
"]",
"lint",
".",
"output_results",
"(",
")",
"if",
"errors",
"or",
"not",
"lint",
".",
"passed",
":",
"raise",
"TaskCatException",
"(",
"\"Lint failed with errors\"",
")"
] |
https://github.com/aws-ia/taskcat/blob/67d19abdc1ad2070296925c297b614bbe7caaa76/taskcat/_cli_modules/lint.py#L14-L37
|
||
Calysto/metakernel
|
9815c0e8b3f9c427105b5d094e9041a303302469
|
metakernel/_metakernel.py
|
python
|
MetaKernel.reload_magics
|
(self)
|
Reload all of the line and cell magics.
|
Reload all of the line and cell magics.
|
[
"Reload",
"all",
"of",
"the",
"line",
"and",
"cell",
"magics",
"."
] |
def reload_magics(self):
"""Reload all of the line and cell magics."""
self.line_magics = {}
self.cell_magics = {}
# get base magic files and those relative to the current class
# directory
magic_files = []
# Make a metakernel/magics if it doesn't exist:
local_magics_dir = get_local_magics_dir()
# Search all of the places there could be magics:
try:
paths = [os.path.join(os.path.dirname(
os.path.abspath(inspect.getfile(self.__class__))), "magics")]
except:
paths = []
paths += [local_magics_dir,
os.path.join(os.path.dirname(os.path.abspath(__file__)), "magics")]
for magic_dir in paths:
sys.path.append(magic_dir)
magic_files.extend(glob.glob(os.path.join(magic_dir, "*.py")))
for magic in magic_files:
basename = os.path.basename(magic)
if basename == "__init__.py":
continue
try:
module = __import__(os.path.splitext(basename)[0])
importlib.reload(module)
module.register_magics(self)
except Exception as e:
self.log.error("Can't load '%s': error: %s" % (magic, e))
|
[
"def",
"reload_magics",
"(",
"self",
")",
":",
"self",
".",
"line_magics",
"=",
"{",
"}",
"self",
".",
"cell_magics",
"=",
"{",
"}",
"# get base magic files and those relative to the current class",
"# directory",
"magic_files",
"=",
"[",
"]",
"# Make a metakernel/magics if it doesn't exist:",
"local_magics_dir",
"=",
"get_local_magics_dir",
"(",
")",
"# Search all of the places there could be magics:",
"try",
":",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"inspect",
".",
"getfile",
"(",
"self",
".",
"__class__",
")",
")",
")",
",",
"\"magics\"",
")",
"]",
"except",
":",
"paths",
"=",
"[",
"]",
"paths",
"+=",
"[",
"local_magics_dir",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"\"magics\"",
")",
"]",
"for",
"magic_dir",
"in",
"paths",
":",
"sys",
".",
"path",
".",
"append",
"(",
"magic_dir",
")",
"magic_files",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"magic_dir",
",",
"\"*.py\"",
")",
")",
")",
"for",
"magic",
"in",
"magic_files",
":",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"magic",
")",
"if",
"basename",
"==",
"\"__init__.py\"",
":",
"continue",
"try",
":",
"module",
"=",
"__import__",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"basename",
")",
"[",
"0",
"]",
")",
"importlib",
".",
"reload",
"(",
"module",
")",
"module",
".",
"register_magics",
"(",
"self",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Can't load '%s': error: %s\"",
"%",
"(",
"magic",
",",
"e",
")",
")"
] |
https://github.com/Calysto/metakernel/blob/9815c0e8b3f9c427105b5d094e9041a303302469/metakernel/_metakernel.py#L737-L768
|
||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/combinat/dyck_word.py
|
python
|
DyckWords_size.__iter__
|
(self)
|
r"""
Return an iterator for Dyck words with ``k1`` opening and ``k2``
closing parentheses.
EXAMPLES::
sage: list(DyckWords(0))
[[]]
sage: list(DyckWords(1))
[[1, 0]]
sage: list(DyckWords(2))
[[1, 0, 1, 0], [1, 1, 0, 0]]
sage: len(DyckWords(5))
42
sage: list(DyckWords(3,2))
[[1, 0, 1, 0, 1],
[1, 0, 1, 1, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 1, 0],
[1, 1, 1, 0, 0]]
|
r"""
Return an iterator for Dyck words with ``k1`` opening and ``k2``
closing parentheses.
|
[
"r",
"Return",
"an",
"iterator",
"for",
"Dyck",
"words",
"with",
"k1",
"opening",
"and",
"k2",
"closing",
"parentheses",
"."
] |
def __iter__(self):
r"""
Return an iterator for Dyck words with ``k1`` opening and ``k2``
closing parentheses.
EXAMPLES::
sage: list(DyckWords(0))
[[]]
sage: list(DyckWords(1))
[[1, 0]]
sage: list(DyckWords(2))
[[1, 0, 1, 0], [1, 1, 0, 0]]
sage: len(DyckWords(5))
42
sage: list(DyckWords(3,2))
[[1, 0, 1, 0, 1],
[1, 0, 1, 1, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 1, 0],
[1, 1, 1, 0, 0]]
"""
if self.k1 == 0:
yield self.element_class(self, [])
elif self.k2 == 0:
yield self.element_class(self, [open_symbol] * self.k1)
else:
for w in DyckWordBacktracker(self.k1, self.k2):
yield self.element_class(self, w)
|
[
"def",
"__iter__",
"(",
"self",
")",
":",
"if",
"self",
".",
"k1",
"==",
"0",
":",
"yield",
"self",
".",
"element_class",
"(",
"self",
",",
"[",
"]",
")",
"elif",
"self",
".",
"k2",
"==",
"0",
":",
"yield",
"self",
".",
"element_class",
"(",
"self",
",",
"[",
"open_symbol",
"]",
"*",
"self",
".",
"k1",
")",
"else",
":",
"for",
"w",
"in",
"DyckWordBacktracker",
"(",
"self",
".",
"k1",
",",
"self",
".",
"k2",
")",
":",
"yield",
"self",
".",
"element_class",
"(",
"self",
",",
"w",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/dyck_word.py#L3677-L3705
|
||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/combinat/set_partition_ordered.py
|
python
|
OrderedSetPartition.base_set_cardinality
|
(self)
|
return sum(len(x) for x in self)
|
Return the cardinality of the base set of ``self``, which is the sum
of the sizes of the parts of ``self``.
This is also known as the *size* (sometimes the *weight*) of
an ordered set partition.
EXAMPLES::
sage: OrderedSetPartition([[1], [2,3], [4]]).base_set_cardinality()
4
sage: OrderedSetPartition([[1,2,3,4]]).base_set_cardinality()
4
|
Return the cardinality of the base set of ``self``, which is the sum
of the sizes of the parts of ``self``.
|
[
"Return",
"the",
"cardinality",
"of",
"the",
"base",
"set",
"of",
"self",
"which",
"is",
"the",
"sum",
"of",
"the",
"sizes",
"of",
"the",
"parts",
"of",
"self",
"."
] |
def base_set_cardinality(self):
"""
Return the cardinality of the base set of ``self``, which is the sum
of the sizes of the parts of ``self``.
This is also known as the *size* (sometimes the *weight*) of
an ordered set partition.
EXAMPLES::
sage: OrderedSetPartition([[1], [2,3], [4]]).base_set_cardinality()
4
sage: OrderedSetPartition([[1,2,3,4]]).base_set_cardinality()
4
"""
return sum(len(x) for x in self)
|
[
"def",
"base_set_cardinality",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"self",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/set_partition_ordered.py#L263-L278
|
|
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
|
python
|
FilterModule.filters
|
(self)
|
return {'get_router_replicas': self.get_router_replicas}
|
returns a mapping of filters to methods
|
returns a mapping of filters to methods
|
[
"returns",
"a",
"mapping",
"of",
"filters",
"to",
"methods"
] |
def filters(self):
''' returns a mapping of filters to methods '''
return {'get_router_replicas': self.get_router_replicas}
|
[
"def",
"filters",
"(",
"self",
")",
":",
"return",
"{",
"'get_router_replicas'",
":",
"self",
".",
"get_router_replicas",
"}"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/filter_plugins/openshift_hosted_filters.py#L40-L42
|
|
techwithtim/Sudoku-GUI-Solver
|
d02ece82f114f120d4632815d8276a0787775cd2
|
solver.py
|
python
|
print_board
|
(bo)
|
[] |
def print_board(bo):
for i in range(len(bo)):
if i % 3 == 0 and i != 0:
print("- - - - - - - - - - - - - ")
for j in range(len(bo[0])):
if j % 3 == 0 and j != 0:
print(" | ", end="")
if j == 8:
print(bo[i][j])
else:
print(str(bo[i][j]) + " ", end="")
|
[
"def",
"print_board",
"(",
"bo",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"bo",
")",
")",
":",
"if",
"i",
"%",
"3",
"==",
"0",
"and",
"i",
"!=",
"0",
":",
"print",
"(",
"\"- - - - - - - - - - - - - \"",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"bo",
"[",
"0",
"]",
")",
")",
":",
"if",
"j",
"%",
"3",
"==",
"0",
"and",
"j",
"!=",
"0",
":",
"print",
"(",
"\" | \"",
",",
"end",
"=",
"\"\"",
")",
"if",
"j",
"==",
"8",
":",
"print",
"(",
"bo",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"else",
":",
"print",
"(",
"str",
"(",
"bo",
"[",
"i",
"]",
"[",
"j",
"]",
")",
"+",
"\" \"",
",",
"end",
"=",
"\"\"",
")"
] |
https://github.com/techwithtim/Sudoku-GUI-Solver/blob/d02ece82f114f120d4632815d8276a0787775cd2/solver.py#L45-L57
|
||||
Kozea/pygal
|
8267b03535ff55789a30bf66b798302adad88623
|
pygal/colors.py
|
python
|
saturate
|
(color, percent)
|
return adjust(color, 1, percent)
|
Saturate a color by increasing its saturation by percent
|
Saturate a color by increasing its saturation by percent
|
[
"Saturate",
"a",
"color",
"by",
"increasing",
"its",
"saturation",
"by",
"percent"
] |
def saturate(color, percent):
"""Saturate a color by increasing its saturation by percent"""
return adjust(color, 1, percent)
|
[
"def",
"saturate",
"(",
"color",
",",
"percent",
")",
":",
"return",
"adjust",
"(",
"color",
",",
"1",
",",
"percent",
")"
] |
https://github.com/Kozea/pygal/blob/8267b03535ff55789a30bf66b798302adad88623/pygal/colors.py#L191-L193
|
|
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/pymaging-bench.py
|
python
|
draw_lines
|
(img)
|
return img
|
[] |
def draw_lines(img):
topleft_bottomright = Line(0, 0, 999, 999)
bottomright_topleft = Line(999, 999, 0, 0)
bottomleft_topright = Line(0, 999, 999, 0)
topright_bottomleft = Line(999, 0, 0, 999)
img.draw(topleft_bottomright, White)
img.draw(bottomright_topleft, White)
img.draw(bottomleft_topright, White)
img.draw(topright_bottomleft, White)
slop1 = Line(100, 0, 899, 999)
slop2 = Line(899, 999, 100, 0)
slop3 = Line(0, 899, 899, 0)
slop4 = Line(899, 0, 0, 899)
img.draw(slop1, Yellow)
img.draw(slop2, Yellow)
img.draw(slop3, Yellow)
img.draw(slop4, Yellow)
blue1 = Line(10, 30, 500, 600)
blue2 = Line(700, 900, 100, 20)
blue3 = Line(0, 300, 666, 33)
blue4 = Line(876, 0, 20, 717)
img.draw(blue1, SlateBlue)
img.draw(blue2, SlateBlue)
img.draw(blue3, SlateBlue)
img.draw(blue4, SlateBlue)
return img
|
[
"def",
"draw_lines",
"(",
"img",
")",
":",
"topleft_bottomright",
"=",
"Line",
"(",
"0",
",",
"0",
",",
"999",
",",
"999",
")",
"bottomright_topleft",
"=",
"Line",
"(",
"999",
",",
"999",
",",
"0",
",",
"0",
")",
"bottomleft_topright",
"=",
"Line",
"(",
"0",
",",
"999",
",",
"999",
",",
"0",
")",
"topright_bottomleft",
"=",
"Line",
"(",
"999",
",",
"0",
",",
"0",
",",
"999",
")",
"img",
".",
"draw",
"(",
"topleft_bottomright",
",",
"White",
")",
"img",
".",
"draw",
"(",
"bottomright_topleft",
",",
"White",
")",
"img",
".",
"draw",
"(",
"bottomleft_topright",
",",
"White",
")",
"img",
".",
"draw",
"(",
"topright_bottomleft",
",",
"White",
")",
"slop1",
"=",
"Line",
"(",
"100",
",",
"0",
",",
"899",
",",
"999",
")",
"slop2",
"=",
"Line",
"(",
"899",
",",
"999",
",",
"100",
",",
"0",
")",
"slop3",
"=",
"Line",
"(",
"0",
",",
"899",
",",
"899",
",",
"0",
")",
"slop4",
"=",
"Line",
"(",
"899",
",",
"0",
",",
"0",
",",
"899",
")",
"img",
".",
"draw",
"(",
"slop1",
",",
"Yellow",
")",
"img",
".",
"draw",
"(",
"slop2",
",",
"Yellow",
")",
"img",
".",
"draw",
"(",
"slop3",
",",
"Yellow",
")",
"img",
".",
"draw",
"(",
"slop4",
",",
"Yellow",
")",
"blue1",
"=",
"Line",
"(",
"10",
",",
"30",
",",
"500",
",",
"600",
")",
"blue2",
"=",
"Line",
"(",
"700",
",",
"900",
",",
"100",
",",
"20",
")",
"blue3",
"=",
"Line",
"(",
"0",
",",
"300",
",",
"666",
",",
"33",
")",
"blue4",
"=",
"Line",
"(",
"876",
",",
"0",
",",
"20",
",",
"717",
")",
"img",
".",
"draw",
"(",
"blue1",
",",
"SlateBlue",
")",
"img",
".",
"draw",
"(",
"blue2",
",",
"SlateBlue",
")",
"img",
".",
"draw",
"(",
"blue3",
",",
"SlateBlue",
")",
"img",
".",
"draw",
"(",
"blue4",
",",
"SlateBlue",
")",
"return",
"img"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/pymaging-bench.py#L19-L50
|
|||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/flaskbb/user/forms.py
|
python
|
ChangeEmailForm.__init__
|
(self, user, *args, **kwargs)
|
[] |
def __init__(self, user, *args, **kwargs):
self.user = user
kwargs['obj'] = self.user
super(ChangeEmailForm, self).__init__(*args, **kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"user",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"user",
"=",
"user",
"kwargs",
"[",
"'obj'",
"]",
"=",
"self",
".",
"user",
"super",
"(",
"ChangeEmailForm",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/flaskbb/user/forms.py#L53-L56
|
||||
Ultimaker/Cura
|
a1622c77ea7259ecb956acd6de07b7d34b7ac52b
|
cura/Settings/MachineManager.py
|
python
|
MachineManager.variantBuildplateUsable
|
(self)
|
return result
|
The selected buildplate is usable if it is usable for all materials OR it is compatible for one but not compatible
for the other material but the buildplate is still usable
|
The selected buildplate is usable if it is usable for all materials OR it is compatible for one but not compatible
|
[
"The",
"selected",
"buildplate",
"is",
"usable",
"if",
"it",
"is",
"usable",
"for",
"all",
"materials",
"OR",
"it",
"is",
"compatible",
"for",
"one",
"but",
"not",
"compatible"
] |
def variantBuildplateUsable(self) -> bool:
"""The selected buildplate is usable if it is usable for all materials OR it is compatible for one but not compatible
for the other material but the buildplate is still usable
"""
if not self._global_container_stack:
return True
# Here the next formula is being calculated:
# result = (not (material_left_compatible and material_right_compatible)) and
# (material_left_compatible or material_left_usable) and
# (material_right_compatible or material_right_usable)
result = not self.variantBuildplateCompatible
for stack in self._global_container_stack.extruderList:
material_container = stack.material
if material_container == empty_material_container:
continue
buildplate_compatible = material_container.getMetaDataEntry("buildplate_compatible")[self.activeVariantBuildplateName] if material_container.getMetaDataEntry("buildplate_compatible") else True
buildplate_usable = material_container.getMetaDataEntry("buildplate_recommended")[self.activeVariantBuildplateName] if material_container.getMetaDataEntry("buildplate_recommended") else True
result = result and (buildplate_compatible or buildplate_usable)
return result
|
[
"def",
"variantBuildplateUsable",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"not",
"self",
".",
"_global_container_stack",
":",
"return",
"True",
"# Here the next formula is being calculated:",
"# result = (not (material_left_compatible and material_right_compatible)) and",
"# (material_left_compatible or material_left_usable) and",
"# (material_right_compatible or material_right_usable)",
"result",
"=",
"not",
"self",
".",
"variantBuildplateCompatible",
"for",
"stack",
"in",
"self",
".",
"_global_container_stack",
".",
"extruderList",
":",
"material_container",
"=",
"stack",
".",
"material",
"if",
"material_container",
"==",
"empty_material_container",
":",
"continue",
"buildplate_compatible",
"=",
"material_container",
".",
"getMetaDataEntry",
"(",
"\"buildplate_compatible\"",
")",
"[",
"self",
".",
"activeVariantBuildplateName",
"]",
"if",
"material_container",
".",
"getMetaDataEntry",
"(",
"\"buildplate_compatible\"",
")",
"else",
"True",
"buildplate_usable",
"=",
"material_container",
".",
"getMetaDataEntry",
"(",
"\"buildplate_recommended\"",
")",
"[",
"self",
".",
"activeVariantBuildplateName",
"]",
"if",
"material_container",
".",
"getMetaDataEntry",
"(",
"\"buildplate_recommended\"",
")",
"else",
"True",
"result",
"=",
"result",
"and",
"(",
"buildplate_compatible",
"or",
"buildplate_usable",
")",
"return",
"result"
] |
https://github.com/Ultimaker/Cura/blob/a1622c77ea7259ecb956acd6de07b7d34b7ac52b/cura/Settings/MachineManager.py#L786-L809
|
|
BRML/climin
|
2215b1abb5906a98ba95b868072b5f4f66b11679
|
climin/mathadapt.py
|
python
|
where
|
(x, *args)
|
Delegate to gnumpy.where or numpy.where depending on the type of `x`.
|
Delegate to gnumpy.where or numpy.where depending on the type of `x`.
|
[
"Delegate",
"to",
"gnumpy",
".",
"where",
"or",
"numpy",
".",
"where",
"depending",
"on",
"the",
"type",
"of",
"x",
"."
] |
def where(x, *args):
"""Delegate to gnumpy.where or numpy.where depending on the type of `x`."""
if not isinstance(x, np.ndarray):
return gp.where(x, *args)
else:
return np.where(x, *args)
|
[
"def",
"where",
"(",
"x",
",",
"*",
"args",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"gp",
".",
"where",
"(",
"x",
",",
"*",
"args",
")",
"else",
":",
"return",
"np",
".",
"where",
"(",
"x",
",",
"*",
"args",
")"
] |
https://github.com/BRML/climin/blob/2215b1abb5906a98ba95b868072b5f4f66b11679/climin/mathadapt.py#L58-L63
|
||
OfflineIMAP/offlineimap
|
e70d3992a0e9bb0fcdf3c94e1edf25a4124dfcd2
|
offlineimap/CustomConfig.py
|
python
|
ConfigHelperMixin.getconfint
|
(self, option, default = CustomConfigDefault)
|
return self._confighelper_runner(option, default,
self.getconfig().getdefaultint,
self.getconfig().getint)
|
Retrieves integer value from the configuration.
Arguments:
- option: option name whose value is to be retrieved;
- default: default return value if no such option
exists.
|
Retrieves integer value from the configuration.
|
[
"Retrieves",
"integer",
"value",
"from",
"the",
"configuration",
"."
] |
def getconfint(self, option, default = CustomConfigDefault):
"""
Retrieves integer value from the configuration.
Arguments:
- option: option name whose value is to be retrieved;
- default: default return value if no such option
exists.
"""
return self._confighelper_runner(option, default,
self.getconfig().getdefaultint,
self.getconfig().getint)
|
[
"def",
"getconfint",
"(",
"self",
",",
"option",
",",
"default",
"=",
"CustomConfigDefault",
")",
":",
"return",
"self",
".",
"_confighelper_runner",
"(",
"option",
",",
"default",
",",
"self",
".",
"getconfig",
"(",
")",
".",
"getdefaultint",
",",
"self",
".",
"getconfig",
"(",
")",
".",
"getint",
")"
] |
https://github.com/OfflineIMAP/offlineimap/blob/e70d3992a0e9bb0fcdf3c94e1edf25a4124dfcd2/offlineimap/CustomConfig.py#L270-L283
|
|
EnableSecurity/wafw00f
|
3257c48d45ffb2f6504629aa3c5d529f1b886c1b
|
wafw00f/plugins/siteground.py
|
python
|
is_waf
|
(self)
|
return False
|
[] |
def is_waf(self):
schemes = [
self.matchContent(r"Our system thinks you might be a robot!"),
self.matchContent(r'access is restricted due to a security rule')
]
if any(i for i in schemes):
return True
return False
|
[
"def",
"is_waf",
"(",
"self",
")",
":",
"schemes",
"=",
"[",
"self",
".",
"matchContent",
"(",
"r\"Our system thinks you might be a robot!\"",
")",
",",
"self",
".",
"matchContent",
"(",
"r'access is restricted due to a security rule'",
")",
"]",
"if",
"any",
"(",
"i",
"for",
"i",
"in",
"schemes",
")",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/EnableSecurity/wafw00f/blob/3257c48d45ffb2f6504629aa3c5d529f1b886c1b/wafw00f/plugins/siteground.py#L10-L17
|
|||
OpnTec/open-event-server
|
a48f7e4c6002db6fb4dc06bac6508536a0dc585e
|
app/api/helpers/jwt.py
|
python
|
jwt_authenticate
|
(email, password)
|
helper function to authenticate user if credentials are correct
:param email:
:param password:
:return:
|
helper function to authenticate user if credentials are correct
:param email:
:param password:
:return:
|
[
"helper",
"function",
"to",
"authenticate",
"user",
"if",
"credentials",
"are",
"correct",
":",
"param",
"email",
":",
":",
"param",
"password",
":",
":",
"return",
":"
] |
def jwt_authenticate(email, password):
"""
helper function to authenticate user if credentials are correct
:param email:
:param password:
:return:
"""
user = User.query.filter_by(email=email).first()
if user is None:
return None
auth_ok = check_password_hash(
password.encode('utf-8'),
user.password.encode('utf-8'),
user.salt
)
if auth_ok:
return user
else:
return None
|
[
"def",
"jwt_authenticate",
"(",
"email",
",",
"password",
")",
":",
"user",
"=",
"User",
".",
"query",
".",
"filter_by",
"(",
"email",
"=",
"email",
")",
".",
"first",
"(",
")",
"if",
"user",
"is",
"None",
":",
"return",
"None",
"auth_ok",
"=",
"check_password_hash",
"(",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"user",
".",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"user",
".",
"salt",
")",
"if",
"auth_ok",
":",
"return",
"user",
"else",
":",
"return",
"None"
] |
https://github.com/OpnTec/open-event-server/blob/a48f7e4c6002db6fb4dc06bac6508536a0dc585e/app/api/helpers/jwt.py#L7-L25
|
||
mrkipling/maraschino
|
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
|
lib/pastebin/pastebin.py
|
python
|
PastebinAPI.trending
|
(self, api_dev_key)
|
return response
|
Returns the top trending paste details.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> details = x.trending('453a994e0e2f1efae07f8759e59e075b')
>>> print details
<paste>
<paste_key>jjMRFDH6</paste_key>
<paste_date>1333230838</paste_date>
<paste_title></paste_title>
<paste_size>6416</paste_size>
<paste_expire_date>0</paste_expire_date>
<paste_private>0</paste_private>
<paste_format_long>None</paste_format_long>
<paste_format_short>text</paste_format_short>
<paste_url>http://pastebin.com/jjMRFDH6</paste_url>
<paste_hits>6384</paste_hits>
</paste>
Note: Returns multiple trending pastes, not just 1.
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@rtype: string
@return: Returns the string (XML formatted) containing the top trending pastes.
|
Returns the top trending paste details.
|
[
"Returns",
"the",
"top",
"trending",
"paste",
"details",
"."
] |
def trending(self, api_dev_key):
"""Returns the top trending paste details.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> details = x.trending('453a994e0e2f1efae07f8759e59e075b')
>>> print details
<paste>
<paste_key>jjMRFDH6</paste_key>
<paste_date>1333230838</paste_date>
<paste_title></paste_title>
<paste_size>6416</paste_size>
<paste_expire_date>0</paste_expire_date>
<paste_private>0</paste_private>
<paste_format_long>None</paste_format_long>
<paste_format_short>text</paste_format_short>
<paste_url>http://pastebin.com/jjMRFDH6</paste_url>
<paste_hits>6384</paste_hits>
</paste>
Note: Returns multiple trending pastes, not just 1.
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@rtype: string
@return: Returns the string (XML formatted) containing the top trending pastes.
"""
# Valid api developer key
argv = {'api_dev_key' : str(api_dev_key) }
# Valid API option - 'trends' is returns trending pastes
argv['api_option'] = str('trends')
# lets try to read the URL that we've just built.
request_string = urllib.urlopen(self._api_url, urllib.urlencode(argv))
response = request_string.read()
# do some basic error checking here so we can gracefully handle any errors we are likely to encounter
if response.startswith(self._bad_request):
raise PastebinError(response)
elif not response.startswith('<paste>'):
raise PastebinError(response)
return response
|
[
"def",
"trending",
"(",
"self",
",",
"api_dev_key",
")",
":",
"# Valid api developer key",
"argv",
"=",
"{",
"'api_dev_key'",
":",
"str",
"(",
"api_dev_key",
")",
"}",
"# Valid API option - 'trends' is returns trending pastes",
"argv",
"[",
"'api_option'",
"]",
"=",
"str",
"(",
"'trends'",
")",
"# lets try to read the URL that we've just built.",
"request_string",
"=",
"urllib",
".",
"urlopen",
"(",
"self",
".",
"_api_url",
",",
"urllib",
".",
"urlencode",
"(",
"argv",
")",
")",
"response",
"=",
"request_string",
".",
"read",
"(",
")",
"# do some basic error checking here so we can gracefully handle any errors we are likely to encounter",
"if",
"response",
".",
"startswith",
"(",
"self",
".",
"_bad_request",
")",
":",
"raise",
"PastebinError",
"(",
"response",
")",
"elif",
"not",
"response",
".",
"startswith",
"(",
"'<paste>'",
")",
":",
"raise",
"PastebinError",
"(",
"response",
")",
"return",
"response"
] |
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/pastebin/pastebin.py#L401-L451
|
|
shiyanhui/FileHeader
|
f347cc134021fb0b710694b71c57742476f5fd2b
|
jinja2/filters.py
|
python
|
do_attr
|
(environment, obj, name)
|
return environment.undefined(obj=obj, name=name)
|
Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
|
Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
|
[
"Get",
"an",
"attribute",
"of",
"an",
"object",
".",
"foo|attr",
"(",
"bar",
")",
"works",
"like",
"foo",
"[",
"bar",
"]",
"just",
"that",
"always",
"an",
"attribute",
"is",
"returned",
"and",
"items",
"are",
"not",
"looked",
"up",
"."
] |
def do_attr(environment, obj, name):
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed and not \
environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
|
[
"def",
"do_attr",
"(",
"environment",
",",
"obj",
",",
"name",
")",
":",
"try",
":",
"name",
"=",
"str",
"(",
"name",
")",
"except",
"UnicodeError",
":",
"pass",
"else",
":",
"try",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"name",
")",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"if",
"environment",
".",
"sandboxed",
"and",
"not",
"environment",
".",
"is_safe_attribute",
"(",
"obj",
",",
"name",
",",
"value",
")",
":",
"return",
"environment",
".",
"unsafe_undefined",
"(",
"obj",
",",
"name",
")",
"return",
"value",
"return",
"environment",
".",
"undefined",
"(",
"obj",
"=",
"obj",
",",
"name",
"=",
"name",
")"
] |
https://github.com/shiyanhui/FileHeader/blob/f347cc134021fb0b710694b71c57742476f5fd2b/jinja2/filters.py#L773-L794
|
|
allenai/allennlp
|
a3d71254fcc0f3615910e9c3d48874515edf53e0
|
allennlp/commands/predict.py
|
python
|
_PredictManager._get_json_data
|
(self)
|
[] |
def _get_json_data(self) -> Iterator[JsonDict]:
if self._input_file == "-":
for line in sys.stdin:
if not line.isspace():
yield self._predictor.load_line(line)
else:
input_file = cached_path(self._input_file)
with open(input_file, "r") as file_input:
for line in file_input:
if not line.isspace():
yield self._predictor.load_line(line)
|
[
"def",
"_get_json_data",
"(",
"self",
")",
"->",
"Iterator",
"[",
"JsonDict",
"]",
":",
"if",
"self",
".",
"_input_file",
"==",
"\"-\"",
":",
"for",
"line",
"in",
"sys",
".",
"stdin",
":",
"if",
"not",
"line",
".",
"isspace",
"(",
")",
":",
"yield",
"self",
".",
"_predictor",
".",
"load_line",
"(",
"line",
")",
"else",
":",
"input_file",
"=",
"cached_path",
"(",
"self",
".",
"_input_file",
")",
"with",
"open",
"(",
"input_file",
",",
"\"r\"",
")",
"as",
"file_input",
":",
"for",
"line",
"in",
"file_input",
":",
"if",
"not",
"line",
".",
"isspace",
"(",
")",
":",
"yield",
"self",
".",
"_predictor",
".",
"load_line",
"(",
"line",
")"
] |
https://github.com/allenai/allennlp/blob/a3d71254fcc0f3615910e9c3d48874515edf53e0/allennlp/commands/predict.py#L206-L216
|
||||
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/Django-1.11.29/django/contrib/gis/gdal/field.py
|
python
|
Field.type
|
(self)
|
return capi.get_field_type(self.ptr)
|
Returns the OGR type of this Field.
|
Returns the OGR type of this Field.
|
[
"Returns",
"the",
"OGR",
"type",
"of",
"this",
"Field",
"."
] |
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
|
[
"def",
"type",
"(",
"self",
")",
":",
"return",
"capi",
".",
"get_field_type",
"(",
"self",
".",
"ptr",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/Django-1.11.29/django/contrib/gis/gdal/field.py#L88-L90
|
|
Tencent/tencent-ml-images
|
182631879cdb3d44d594d13d3f29a98bf7acdf81
|
models/resnet.py
|
python
|
ResNet._batch_norm
|
(self, name, x, is_training=True)
|
Batch normalization.
Considering the performance, we use batch_normalization in contrib/layers/python/layers/layers.py
instead of tf.nn.batch_normalization and set fused=True
Args:
x: input tensor
is_training: Whether to return the output in training mode or in inference mode, use the argment
in finetune
|
Batch normalization.
Considering the performance, we use batch_normalization in contrib/layers/python/layers/layers.py
instead of tf.nn.batch_normalization and set fused=True
Args:
x: input tensor
is_training: Whether to return the output in training mode or in inference mode, use the argment
in finetune
|
[
"Batch",
"normalization",
".",
"Considering",
"the",
"performance",
"we",
"use",
"batch_normalization",
"in",
"contrib",
"/",
"layers",
"/",
"python",
"/",
"layers",
"/",
"layers",
".",
"py",
"instead",
"of",
"tf",
".",
"nn",
".",
"batch_normalization",
"and",
"set",
"fused",
"=",
"True",
"Args",
":",
"x",
":",
"input",
"tensor",
"is_training",
":",
"Whether",
"to",
"return",
"the",
"output",
"in",
"training",
"mode",
"or",
"in",
"inference",
"mode",
"use",
"the",
"argment",
"in",
"finetune"
] |
def _batch_norm(self, name, x, is_training=True):
"""Batch normalization.
Considering the performance, we use batch_normalization in contrib/layers/python/layers/layers.py
instead of tf.nn.batch_normalization and set fused=True
Args:
x: input tensor
is_training: Whether to return the output in training mode or in inference mode, use the argment
in finetune
"""
with tf.variable_scope(name):
return tf.layers.batch_normalization(
inputs=x,
axis=1 if self.data_format == 'NCHW' else 3,
momentum = FLAGS.batch_norm_decay,
epsilon = FLAGS.batch_norm_epsilon,
center=True,
scale=True,
training=is_training,
fused=True
)
|
[
"def",
"_batch_norm",
"(",
"self",
",",
"name",
",",
"x",
",",
"is_training",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"return",
"tf",
".",
"layers",
".",
"batch_normalization",
"(",
"inputs",
"=",
"x",
",",
"axis",
"=",
"1",
"if",
"self",
".",
"data_format",
"==",
"'NCHW'",
"else",
"3",
",",
"momentum",
"=",
"FLAGS",
".",
"batch_norm_decay",
",",
"epsilon",
"=",
"FLAGS",
".",
"batch_norm_epsilon",
",",
"center",
"=",
"True",
",",
"scale",
"=",
"True",
",",
"training",
"=",
"is_training",
",",
"fused",
"=",
"True",
")"
] |
https://github.com/Tencent/tencent-ml-images/blob/182631879cdb3d44d594d13d3f29a98bf7acdf81/models/resnet.py#L113-L132
|
||
eirannejad/pyRevit
|
49c0b7eb54eb343458ce1365425e6552d0c47d44
|
site-packages/sqlalchemy/util/queue.py
|
python
|
Queue.put
|
(self, item, block=True, timeout=None)
|
Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
|
Put an item into the queue.
|
[
"Put",
"an",
"item",
"into",
"the",
"queue",
"."
] |
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.not_empty.notify()
finally:
self.not_full.release()
|
[
"def",
"put",
"(",
"self",
",",
"item",
",",
"block",
"=",
"True",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"not_full",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"not",
"block",
":",
"if",
"self",
".",
"_full",
"(",
")",
":",
"raise",
"Full",
"elif",
"timeout",
"is",
"None",
":",
"while",
"self",
".",
"_full",
"(",
")",
":",
"self",
".",
"not_full",
".",
"wait",
"(",
")",
"else",
":",
"if",
"timeout",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"'timeout' must be a positive number\"",
")",
"endtime",
"=",
"_time",
"(",
")",
"+",
"timeout",
"while",
"self",
".",
"_full",
"(",
")",
":",
"remaining",
"=",
"endtime",
"-",
"_time",
"(",
")",
"if",
"remaining",
"<=",
"0.0",
":",
"raise",
"Full",
"self",
".",
"not_full",
".",
"wait",
"(",
"remaining",
")",
"self",
".",
"_put",
"(",
"item",
")",
"self",
".",
"not_empty",
".",
"notify",
"(",
")",
"finally",
":",
"self",
".",
"not_full",
".",
"release",
"(",
")"
] |
https://github.com/eirannejad/pyRevit/blob/49c0b7eb54eb343458ce1365425e6552d0c47d44/site-packages/sqlalchemy/util/queue.py#L87-L120
|
||
MongoEngine/django-mongoengine
|
e8a75e8e5860545ecfbadaf1b1285495022bd7cb
|
django_mongoengine/forms/widgets.py
|
python
|
Dictionary.__init__
|
(self, schema=None, no_schema=1, max_depth=None,
flags=None, sub_attrs=None, attrs=None, verbose_dict=None,
verbose_field=None)
|
:param schema: A dictionary representing the future schema of
the Dictionary widget. It is responsible for the
creation of subwidgets.
:param no_schema: An integer that can take 3 values : 0,1,2.
0 means that no schema was passed.
1 means that the schema passed was the default
one. This is the default value.
2 means that the schema passed was given
by a parent widget, and that it actually
represent data for rendering.
3 means that the schema was rebuilt after
retrieving form data.
:param max_depth: An integer representing the max depth of
sub-dicts. If passed, the system will
prevent to save dictionaries with depths
superior to this parameter.
:param flags: A list of flags. Available values :
- 'FORCE_SCHEMA' : would force dictionaries
to keep a certain schema. Only Pair fields
could be added.
:param sub_attrs: A dictionary that contains the classes
for the keys (key.class) and the values
(value.class) of each pair
:param verbose_field: verbose for 'Add field'
:param verbose_dict: verbose for 'Add dict'
|
:param schema: A dictionary representing the future schema of
the Dictionary widget. It is responsible for the
creation of subwidgets.
:param no_schema: An integer that can take 3 values : 0,1,2.
0 means that no schema was passed.
1 means that the schema passed was the default
one. This is the default value.
2 means that the schema passed was given
by a parent widget, and that it actually
represent data for rendering.
3 means that the schema was rebuilt after
retrieving form data.
:param max_depth: An integer representing the max depth of
sub-dicts. If passed, the system will
prevent to save dictionaries with depths
superior to this parameter.
:param flags: A list of flags. Available values :
- 'FORCE_SCHEMA' : would force dictionaries
to keep a certain schema. Only Pair fields
could be added.
:param sub_attrs: A dictionary that contains the classes
for the keys (key.class) and the values
(value.class) of each pair
:param verbose_field: verbose for 'Add field'
:param verbose_dict: verbose for 'Add dict'
|
[
":",
"param",
"schema",
":",
"A",
"dictionary",
"representing",
"the",
"future",
"schema",
"of",
"the",
"Dictionary",
"widget",
".",
"It",
"is",
"responsible",
"for",
"the",
"creation",
"of",
"subwidgets",
".",
":",
"param",
"no_schema",
":",
"An",
"integer",
"that",
"can",
"take",
"3",
"values",
":",
"0",
"1",
"2",
".",
"0",
"means",
"that",
"no",
"schema",
"was",
"passed",
".",
"1",
"means",
"that",
"the",
"schema",
"passed",
"was",
"the",
"default",
"one",
".",
"This",
"is",
"the",
"default",
"value",
".",
"2",
"means",
"that",
"the",
"schema",
"passed",
"was",
"given",
"by",
"a",
"parent",
"widget",
"and",
"that",
"it",
"actually",
"represent",
"data",
"for",
"rendering",
".",
"3",
"means",
"that",
"the",
"schema",
"was",
"rebuilt",
"after",
"retrieving",
"form",
"data",
".",
":",
"param",
"max_depth",
":",
"An",
"integer",
"representing",
"the",
"max",
"depth",
"of",
"sub",
"-",
"dicts",
".",
"If",
"passed",
"the",
"system",
"will",
"prevent",
"to",
"save",
"dictionaries",
"with",
"depths",
"superior",
"to",
"this",
"parameter",
".",
":",
"param",
"flags",
":",
"A",
"list",
"of",
"flags",
".",
"Available",
"values",
":",
"-",
"FORCE_SCHEMA",
":",
"would",
"force",
"dictionaries",
"to",
"keep",
"a",
"certain",
"schema",
".",
"Only",
"Pair",
"fields",
"could",
"be",
"added",
".",
":",
"param",
"sub_attrs",
":",
"A",
"dictionary",
"that",
"contains",
"the",
"classes",
"for",
"the",
"keys",
"(",
"key",
".",
"class",
")",
"and",
"the",
"values",
"(",
"value",
".",
"class",
")",
"of",
"each",
"pair",
":",
"param",
"verbose_field",
":",
"verbose",
"for",
"Add",
"field",
":",
"param",
"verbose_dict",
":",
"verbose",
"for",
"Add",
"dict"
] |
def __init__(self, schema=None, no_schema=1, max_depth=None,
flags=None, sub_attrs=None, attrs=None, verbose_dict=None,
verbose_field=None):
"""
:param schema: A dictionary representing the future schema of
the Dictionary widget. It is responsible for the
creation of subwidgets.
:param no_schema: An integer that can take 3 values : 0,1,2.
0 means that no schema was passed.
1 means that the schema passed was the default
one. This is the default value.
2 means that the schema passed was given
by a parent widget, and that it actually
represent data for rendering.
3 means that the schema was rebuilt after
retrieving form data.
:param max_depth: An integer representing the max depth of
sub-dicts. If passed, the system will
prevent to save dictionaries with depths
superior to this parameter.
:param flags: A list of flags. Available values :
- 'FORCE_SCHEMA' : would force dictionaries
to keep a certain schema. Only Pair fields
could be added.
:param sub_attrs: A dictionary that contains the classes
for the keys (key.class) and the values
(value.class) of each pair
:param verbose_field: verbose for 'Add field'
:param verbose_dict: verbose for 'Add dict'
"""
self.verbose_field = verbose_field or ADD_FIELD_VERBOSE
self.verbose_dict = verbose_dict or ADD_DICT_VERBOSE
self.no_schema = no_schema
self.max_depth = (max_depth if max_depth and max_depth >= 0 else None)
self.flags = flags or []
self.sub_attrs = sub_attrs or {}
if flags is not None and 'FORCE_SCHEMA' in flags:
self.pair = StaticPair
self.subdict = StaticSubDictionary
else:
self.pair = Pair
self.subdict = SubDictionary
widget_object = []
if isinstance(schema, dict) and self.no_schema > 0:
for key in schema:
if isinstance(schema[key], dict):
widget_object.append(self.subdict(key_value=key, schema=schema[key],
max_depth=max_depth, sub_attrs=self.sub_attrs,
attrs=attrs, verbose_field=self.verbose_field,
verbose_dict=self.verbose_dict))
else:
widget_object.append(self.pair(key_value=key, sub_attrs=self.sub_attrs, attrs=attrs))
else:
widget_object.append(self.pair(sub_attrs=self.sub_attrs, sattrs=attrs))
super(Dictionary, self).__init__(widget_object, attrs)
|
[
"def",
"__init__",
"(",
"self",
",",
"schema",
"=",
"None",
",",
"no_schema",
"=",
"1",
",",
"max_depth",
"=",
"None",
",",
"flags",
"=",
"None",
",",
"sub_attrs",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"verbose_dict",
"=",
"None",
",",
"verbose_field",
"=",
"None",
")",
":",
"self",
".",
"verbose_field",
"=",
"verbose_field",
"or",
"ADD_FIELD_VERBOSE",
"self",
".",
"verbose_dict",
"=",
"verbose_dict",
"or",
"ADD_DICT_VERBOSE",
"self",
".",
"no_schema",
"=",
"no_schema",
"self",
".",
"max_depth",
"=",
"(",
"max_depth",
"if",
"max_depth",
"and",
"max_depth",
">=",
"0",
"else",
"None",
")",
"self",
".",
"flags",
"=",
"flags",
"or",
"[",
"]",
"self",
".",
"sub_attrs",
"=",
"sub_attrs",
"or",
"{",
"}",
"if",
"flags",
"is",
"not",
"None",
"and",
"'FORCE_SCHEMA'",
"in",
"flags",
":",
"self",
".",
"pair",
"=",
"StaticPair",
"self",
".",
"subdict",
"=",
"StaticSubDictionary",
"else",
":",
"self",
".",
"pair",
"=",
"Pair",
"self",
".",
"subdict",
"=",
"SubDictionary",
"widget_object",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"schema",
",",
"dict",
")",
"and",
"self",
".",
"no_schema",
">",
"0",
":",
"for",
"key",
"in",
"schema",
":",
"if",
"isinstance",
"(",
"schema",
"[",
"key",
"]",
",",
"dict",
")",
":",
"widget_object",
".",
"append",
"(",
"self",
".",
"subdict",
"(",
"key_value",
"=",
"key",
",",
"schema",
"=",
"schema",
"[",
"key",
"]",
",",
"max_depth",
"=",
"max_depth",
",",
"sub_attrs",
"=",
"self",
".",
"sub_attrs",
",",
"attrs",
"=",
"attrs",
",",
"verbose_field",
"=",
"self",
".",
"verbose_field",
",",
"verbose_dict",
"=",
"self",
".",
"verbose_dict",
")",
")",
"else",
":",
"widget_object",
".",
"append",
"(",
"self",
".",
"pair",
"(",
"key_value",
"=",
"key",
",",
"sub_attrs",
"=",
"self",
".",
"sub_attrs",
",",
"attrs",
"=",
"attrs",
")",
")",
"else",
":",
"widget_object",
".",
"append",
"(",
"self",
".",
"pair",
"(",
"sub_attrs",
"=",
"self",
".",
"sub_attrs",
",",
"sattrs",
"=",
"attrs",
")",
")",
"super",
"(",
"Dictionary",
",",
"self",
")",
".",
"__init__",
"(",
"widget_object",
",",
"attrs",
")"
] |
https://github.com/MongoEngine/django-mongoengine/blob/e8a75e8e5860545ecfbadaf1b1285495022bd7cb/django_mongoengine/forms/widgets.py#L19-L76
|
||
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/lib-python/3/xmlrpc/client.py
|
python
|
ServerProxy.__close
|
(self)
|
[] |
def __close(self):
self.__transport.close()
|
[
"def",
"__close",
"(",
"self",
")",
":",
"self",
".",
"__transport",
".",
"close",
"(",
")"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/xmlrpc/client.py#L1390-L1391
|
||||
OpenEndedGroup/Field
|
4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c
|
Contents/lib/python/mailbox.py
|
python
|
_singlefileMailbox.lock
|
(self)
|
Lock the mailbox.
|
Lock the mailbox.
|
[
"Lock",
"the",
"mailbox",
"."
] |
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
|
[
"def",
"lock",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_locked",
":",
"_lock_file",
"(",
"self",
".",
"_file",
")",
"self",
".",
"_locked",
"=",
"True"
] |
https://github.com/OpenEndedGroup/Field/blob/4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c/Contents/lib/python/mailbox.py#L560-L564
|
||
ddbourgin/numpy-ml
|
b0359af5285fbf9699d64fd5ec059493228af03e
|
numpy_ml/neural_nets/layers/layers.py
|
python
|
LayerNorm1D.__init__
|
(self, epsilon=1e-5, optimizer=None)
|
A layer normalization layer for 1D inputs.
Notes
-----
In contrast to :class:`BatchNorm1D`, the LayerNorm layer calculates the
mean and variance across *features* rather than examples in the batch
ensuring that the mean and variance estimates are independent of batch
size and permitting straightforward application in RNNs.
Equations [train & test]::
Y = scaler * norm(X) + intercept
norm(X) = (X - mean(X)) / sqrt(var(X) + epsilon)
Also in contrast to :class:`BatchNorm1D`, `scaler` and `intercept` are applied
*elementwise* to ``norm(X)``.
Parameters
----------
epsilon : float
A small smoothing constant to use during computation of ``norm(X)``
to avoid divide-by-zero errors. Default is 1e-5.
optimizer : str, :doc:`Optimizer <numpy_ml.neural_nets.optimizers>` object, or None
The optimization strategy to use when performing gradient updates
within the :meth:`update` method. If None, use the :class:`SGD
<numpy_ml.neural_nets.optimizers.SGD>` optimizer with
default parameters. Default is None.
Attributes
----------
X : list
Running list of inputs to the :meth:`forward <numpy_ml.neural_nets.LayerBase.forward>` method since the last call to :meth:`update <numpy_ml.neural_nets.LayerBase.update>`. Only updated if the `retain_derived` argument was set to True.
gradients : dict
Dictionary of loss gradients with regard to the layer parameters
parameters : dict
Dictionary of layer parameters
hyperparameters : dict
Dictionary of layer hyperparameters
derived_variables : dict
Dictionary of any intermediate values computed during
forward/backward propagation.
|
A layer normalization layer for 1D inputs.
|
[
"A",
"layer",
"normalization",
"layer",
"for",
"1D",
"inputs",
"."
] |
def __init__(self, epsilon=1e-5, optimizer=None):
"""
A layer normalization layer for 1D inputs.
Notes
-----
In contrast to :class:`BatchNorm1D`, the LayerNorm layer calculates the
mean and variance across *features* rather than examples in the batch
ensuring that the mean and variance estimates are independent of batch
size and permitting straightforward application in RNNs.
Equations [train & test]::
Y = scaler * norm(X) + intercept
norm(X) = (X - mean(X)) / sqrt(var(X) + epsilon)
Also in contrast to :class:`BatchNorm1D`, `scaler` and `intercept` are applied
*elementwise* to ``norm(X)``.
Parameters
----------
epsilon : float
A small smoothing constant to use during computation of ``norm(X)``
to avoid divide-by-zero errors. Default is 1e-5.
optimizer : str, :doc:`Optimizer <numpy_ml.neural_nets.optimizers>` object, or None
The optimization strategy to use when performing gradient updates
within the :meth:`update` method. If None, use the :class:`SGD
<numpy_ml.neural_nets.optimizers.SGD>` optimizer with
default parameters. Default is None.
Attributes
----------
X : list
Running list of inputs to the :meth:`forward <numpy_ml.neural_nets.LayerBase.forward>` method since the last call to :meth:`update <numpy_ml.neural_nets.LayerBase.update>`. Only updated if the `retain_derived` argument was set to True.
gradients : dict
Dictionary of loss gradients with regard to the layer parameters
parameters : dict
Dictionary of layer parameters
hyperparameters : dict
Dictionary of layer hyperparameters
derived_variables : dict
Dictionary of any intermediate values computed during
forward/backward propagation.
""" # noqa: E501
super().__init__(optimizer)
self.n_in = None
self.n_out = None
self.epsilon = epsilon
self.parameters = {"scaler": None, "intercept": None}
self.is_initialized = False
|
[
"def",
"__init__",
"(",
"self",
",",
"epsilon",
"=",
"1e-5",
",",
"optimizer",
"=",
"None",
")",
":",
"# noqa: E501",
"super",
"(",
")",
".",
"__init__",
"(",
"optimizer",
")",
"self",
".",
"n_in",
"=",
"None",
"self",
".",
"n_out",
"=",
"None",
"self",
".",
"epsilon",
"=",
"epsilon",
"self",
".",
"parameters",
"=",
"{",
"\"scaler\"",
":",
"None",
",",
"\"intercept\"",
":",
"None",
"}",
"self",
".",
"is_initialized",
"=",
"False"
] |
https://github.com/ddbourgin/numpy-ml/blob/b0359af5285fbf9699d64fd5ec059493228af03e/numpy_ml/neural_nets/layers/layers.py#L1635-L1685
|
||
laspy/laspy
|
c9d9b9c0e8d84288134c02bf4ecec3964f5afa29
|
laspy/header.py
|
python
|
LasHeader.y_offset
|
(self)
|
return self.offsets[1]
|
[] |
def y_offset(self) -> float:
return self.offsets[1]
|
[
"def",
"y_offset",
"(",
"self",
")",
"->",
"float",
":",
"return",
"self",
".",
"offsets",
"[",
"1",
"]"
] |
https://github.com/laspy/laspy/blob/c9d9b9c0e8d84288134c02bf4ecec3964f5afa29/laspy/header.py#L314-L315
|
|||
AwesomeTTS/awesometts-anki-addon
|
c7c2c94479b610b9767ec44cdbb825002bc0c2b7
|
awesometts/router.py
|
python
|
Router.get_options
|
(self, svc_id)
|
return service['options']
|
Returns a list of options that should be displayed for the
service, with defaults highlighted.
|
Returns a list of options that should be displayed for the
service, with defaults highlighted.
|
[
"Returns",
"a",
"list",
"of",
"options",
"that",
"should",
"be",
"displayed",
"for",
"the",
"service",
"with",
"defaults",
"highlighted",
"."
] |
def get_options(self, svc_id):
"""
Returns a list of options that should be displayed for the
service, with defaults highlighted.
"""
svc_id, service = self._fetch_options_and_extras(svc_id)
return service['options']
|
[
"def",
"get_options",
"(",
"self",
",",
"svc_id",
")",
":",
"svc_id",
",",
"service",
"=",
"self",
".",
"_fetch_options_and_extras",
"(",
"svc_id",
")",
"return",
"service",
"[",
"'options'",
"]"
] |
https://github.com/AwesomeTTS/awesometts-anki-addon/blob/c7c2c94479b610b9767ec44cdbb825002bc0c2b7/awesometts/router.py#L212-L219
|
|
kivy/python-for-android
|
4ecaa5fe01aa25e3bc8cadc52ae481645754f955
|
pythonforandroid/recipe.py
|
python
|
CythonRecipe.build_arch
|
(self, arch)
|
Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
|
Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
|
[
"Build",
"any",
"cython",
"components",
"then",
"install",
"the",
"Python",
"module",
"by",
"calling",
"setup",
".",
"py",
"install",
"with",
"the",
"target",
"Python",
"dir",
"."
] |
def build_arch(self, arch):
'''Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
'''
Recipe.build_arch(self, arch)
self.build_cython_components(arch)
self.install_python_package(arch)
|
[
"def",
"build_arch",
"(",
"self",
",",
"arch",
")",
":",
"Recipe",
".",
"build_arch",
"(",
"self",
",",
"arch",
")",
"self",
".",
"build_cython_components",
"(",
"arch",
")",
"self",
".",
"install_python_package",
"(",
"arch",
")"
] |
https://github.com/kivy/python-for-android/blob/4ecaa5fe01aa25e3bc8cadc52ae481645754f955/pythonforandroid/recipe.py#L1054-L1060
|
||
firedrakeproject/firedrake
|
06ab4975c14c0d4dcb79be55821f8b9e41554125
|
firedrake/slate/slate.py
|
python
|
Tensor._output_string
|
(self, prec=None)
|
return ["S", "V", "M"][self.rank] + "_%d" % self.id
|
Creates a string representation of the tensor.
|
Creates a string representation of the tensor.
|
[
"Creates",
"a",
"string",
"representation",
"of",
"the",
"tensor",
"."
] |
def _output_string(self, prec=None):
"""Creates a string representation of the tensor."""
return ["S", "V", "M"][self.rank] + "_%d" % self.id
|
[
"def",
"_output_string",
"(",
"self",
",",
"prec",
"=",
"None",
")",
":",
"return",
"[",
"\"S\"",
",",
"\"V\"",
",",
"\"M\"",
"]",
"[",
"self",
".",
"rank",
"]",
"+",
"\"_%d\"",
"%",
"self",
".",
"id"
] |
https://github.com/firedrakeproject/firedrake/blob/06ab4975c14c0d4dcb79be55821f8b9e41554125/firedrake/slate/slate.py#L870-L872
|
|
albertogeniola/MerossIot
|
35abe51dbc97f1aadcba7ec52b58b88711a6a0ef
|
meross_iot/controller/mixins/roller_shutter.py
|
python
|
RollerShutterTimerMixin.get_position
|
(self, channel: int = 0, *args, **kwargs)
|
return self._roller_shutter_position_by_channel.get(channel)
|
The current roller shutter position. Returns 100 if the given roller shutter is open, 0 if it is close, -1 if it is stop.
:param channel: channel of which status is needed
:return: 100 if the given roller shutter is opened, 0 if it is closed, -1 if it is stopped.
|
The current roller shutter position. Returns 100 if the given roller shutter is open, 0 if it is close, -1 if it is stop.
|
[
"The",
"current",
"roller",
"shutter",
"position",
".",
"Returns",
"100",
"if",
"the",
"given",
"roller",
"shutter",
"is",
"open",
"0",
"if",
"it",
"is",
"close",
"-",
"1",
"if",
"it",
"is",
"stop",
"."
] |
def get_position(self, channel: int = 0, *args, **kwargs) -> Optional[int]:
"""
The current roller shutter position. Returns 100 if the given roller shutter is open, 0 if it is close, -1 if it is stop.
:param channel: channel of which status is needed
:return: 100 if the given roller shutter is opened, 0 if it is closed, -1 if it is stopped.
"""
self.check_full_update_done()
return self._roller_shutter_position_by_channel.get(channel)
|
[
"def",
"get_position",
"(",
"self",
",",
"channel",
":",
"int",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"self",
".",
"check_full_update_done",
"(",
")",
"return",
"self",
".",
"_roller_shutter_position_by_channel",
".",
"get",
"(",
"channel",
")"
] |
https://github.com/albertogeniola/MerossIot/blob/35abe51dbc97f1aadcba7ec52b58b88711a6a0ef/meross_iot/controller/mixins/roller_shutter.py#L133-L142
|
|
DataDog/integrations-core
|
934674b29d94b70ccc008f76ea172d0cdae05e1e
|
coredns/datadog_checks/coredns/config_models/defaults.py
|
python
|
instance_tls_cert
|
(field, value)
|
return get_default_field_value(field, value)
|
[] |
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
|
[
"def",
"instance_tls_cert",
"(",
"field",
",",
"value",
")",
":",
"return",
"get_default_field_value",
"(",
"field",
",",
"value",
")"
] |
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/coredns/datadog_checks/coredns/config_models/defaults.py#L309-L310
|
|||
PaddlePaddle/models
|
511e2e282960ed4c7440c3f1d1e62017acb90e11
|
tutorials/mobilenetv3_prod/Step6/paddlevision/datasets/vision.py
|
python
|
StandardTransform._format_transform_repr
|
(self, transform: Callable,
head: str)
|
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
|
[] |
def _format_transform_repr(self, transform: Callable,
head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
|
[
"def",
"_format_transform_repr",
"(",
"self",
",",
"transform",
":",
"Callable",
",",
"head",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"lines",
"=",
"transform",
".",
"__repr__",
"(",
")",
".",
"splitlines",
"(",
")",
"return",
"(",
"[",
"\"{}{}\"",
".",
"format",
"(",
"head",
",",
"lines",
"[",
"0",
"]",
")",
"]",
"+",
"[",
"\"{}{}\"",
".",
"format",
"(",
"\" \"",
"*",
"len",
"(",
"head",
")",
",",
"line",
")",
"for",
"line",
"in",
"lines",
"[",
"1",
":",
"]",
"]",
")"
] |
https://github.com/PaddlePaddle/models/blob/511e2e282960ed4c7440c3f1d1e62017acb90e11/tutorials/mobilenetv3_prod/Step6/paddlevision/datasets/vision.py#L99-L103
|
|||
tensorflow/benchmarks
|
16af178ad312e8c1213efb27a5f227044228bfdf
|
scripts/tf_cnn_benchmarks/preprocessing.py
|
python
|
Cifar10ImagePreprocessor.preprocess
|
(self, raw_image)
|
return tf.cast(normalized, self.dtype)
|
Preprocessing raw image.
|
Preprocessing raw image.
|
[
"Preprocessing",
"raw",
"image",
"."
] |
def preprocess(self, raw_image):
"""Preprocessing raw image."""
if self.summary_verbosity >= 3:
tf.summary.image('raw.image', tf.expand_dims(raw_image, 0))
if self.train and self.distortions:
image = self._distort_image(raw_image)
else:
image = self._eval_image(raw_image)
normalized = normalized_image(image)
return tf.cast(normalized, self.dtype)
|
[
"def",
"preprocess",
"(",
"self",
",",
"raw_image",
")",
":",
"if",
"self",
".",
"summary_verbosity",
">=",
"3",
":",
"tf",
".",
"summary",
".",
"image",
"(",
"'raw.image'",
",",
"tf",
".",
"expand_dims",
"(",
"raw_image",
",",
"0",
")",
")",
"if",
"self",
".",
"train",
"and",
"self",
".",
"distortions",
":",
"image",
"=",
"self",
".",
"_distort_image",
"(",
"raw_image",
")",
"else",
":",
"image",
"=",
"self",
".",
"_eval_image",
"(",
"raw_image",
")",
"normalized",
"=",
"normalized_image",
"(",
"image",
")",
"return",
"tf",
".",
"cast",
"(",
"normalized",
",",
"self",
".",
"dtype",
")"
] |
https://github.com/tensorflow/benchmarks/blob/16af178ad312e8c1213efb27a5f227044228bfdf/scripts/tf_cnn_benchmarks/preprocessing.py#L854-L863
|
|
nosmokingbandit/watcher
|
dadacd21a5790ee609058a98a17fcc8954d24439
|
lib/sqlalchemy/sql/type_api.py
|
python
|
TypeEngine.result_processor
|
(self, dialect, coltype)
|
return None
|
Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
|
Return a conversion function for processing result row values.
|
[
"Return",
"a",
"conversion",
"function",
"for",
"processing",
"result",
"row",
"values",
"."
] |
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
|
[
"def",
"result_processor",
"(",
"self",
",",
"dialect",
",",
"coltype",
")",
":",
"return",
"None"
] |
https://github.com/nosmokingbandit/watcher/blob/dadacd21a5790ee609058a98a17fcc8954d24439/lib/sqlalchemy/sql/type_api.py#L261-L275
|
|
smart-mobile-software/gitstack
|
d9fee8f414f202143eb6e620529e8e5539a2af56
|
python/Lib/site-packages/setuptools/sandbox.py
|
python
|
run_setup
|
(setup_script, args)
|
Run a distutils setup script, sandboxed in its directory
|
Run a distutils setup script, sandboxed in its directory
|
[
"Run",
"a",
"distutils",
"setup",
"script",
"sandboxed",
"in",
"its",
"directory"
] |
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
old_dir = os.getcwd()
save_argv = sys.argv[:]
save_path = sys.path[:]
setup_dir = os.path.abspath(os.path.dirname(setup_script))
temp_dir = os.path.join(setup_dir,'temp')
if not os.path.isdir(temp_dir): os.makedirs(temp_dir)
save_tmp = tempfile.tempdir
save_modules = sys.modules.copy()
pr_state = pkg_resources.__getstate__()
try:
tempfile.tempdir = temp_dir; os.chdir(setup_dir)
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
DirectorySandbox(setup_dir).run(
lambda: execfile(
"setup.py",
{'__file__':setup_script, '__name__':'__main__'}
)
)
except SystemExit, v:
if v.args and v.args[0]:
raise
# Normal exit, just return
finally:
pkg_resources.__setstate__(pr_state)
sys.modules.update(save_modules)
for key in list(sys.modules):
if key not in save_modules: del sys.modules[key]
os.chdir(old_dir)
sys.path[:] = save_path
sys.argv[:] = save_argv
tempfile.tempdir = save_tmp
|
[
"def",
"run_setup",
"(",
"setup_script",
",",
"args",
")",
":",
"old_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"save_argv",
"=",
"sys",
".",
"argv",
"[",
":",
"]",
"save_path",
"=",
"sys",
".",
"path",
"[",
":",
"]",
"setup_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"setup_script",
")",
")",
"temp_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"setup_dir",
",",
"'temp'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"temp_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"temp_dir",
")",
"save_tmp",
"=",
"tempfile",
".",
"tempdir",
"save_modules",
"=",
"sys",
".",
"modules",
".",
"copy",
"(",
")",
"pr_state",
"=",
"pkg_resources",
".",
"__getstate__",
"(",
")",
"try",
":",
"tempfile",
".",
"tempdir",
"=",
"temp_dir",
"os",
".",
"chdir",
"(",
"setup_dir",
")",
"try",
":",
"sys",
".",
"argv",
"[",
":",
"]",
"=",
"[",
"setup_script",
"]",
"+",
"list",
"(",
"args",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"setup_dir",
")",
"# reset to include setup dir, w/clean callback list",
"working_set",
".",
"__init__",
"(",
")",
"working_set",
".",
"callbacks",
".",
"append",
"(",
"lambda",
"dist",
":",
"dist",
".",
"activate",
"(",
")",
")",
"DirectorySandbox",
"(",
"setup_dir",
")",
".",
"run",
"(",
"lambda",
":",
"execfile",
"(",
"\"setup.py\"",
",",
"{",
"'__file__'",
":",
"setup_script",
",",
"'__name__'",
":",
"'__main__'",
"}",
")",
")",
"except",
"SystemExit",
",",
"v",
":",
"if",
"v",
".",
"args",
"and",
"v",
".",
"args",
"[",
"0",
"]",
":",
"raise",
"# Normal exit, just return",
"finally",
":",
"pkg_resources",
".",
"__setstate__",
"(",
"pr_state",
")",
"sys",
".",
"modules",
".",
"update",
"(",
"save_modules",
")",
"for",
"key",
"in",
"list",
"(",
"sys",
".",
"modules",
")",
":",
"if",
"key",
"not",
"in",
"save_modules",
":",
"del",
"sys",
".",
"modules",
"[",
"key",
"]",
"os",
".",
"chdir",
"(",
"old_dir",
")",
"sys",
".",
"path",
"[",
":",
"]",
"=",
"save_path",
"sys",
".",
"argv",
"[",
":",
"]",
"=",
"save_argv",
"tempfile",
".",
"tempdir",
"=",
"save_tmp"
] |
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/site-packages/setuptools/sandbox.py#L42-L79
|
||
PaddlePaddle/Research
|
2da0bd6c72d60e9df403aff23a7802779561c4a1
|
NLP/EMNLP2019-MAL/src/train.py
|
python
|
prepare_batch_input
|
(insts, data_input_names, src_pad_idx, trg_pad_idx,
n_head, d_model)
|
return data_input_dict, np.asarray([num_token], dtype="float32")
|
Put all padded data needed by training into a dict.
|
Put all padded data needed by training into a dict.
|
[
"Put",
"all",
"padded",
"data",
"needed",
"by",
"training",
"into",
"a",
"dict",
"."
] |
def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx,
n_head, d_model):
"""
Put all padded data needed by training into a dict.
"""
src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data(
[inst[0] for inst in insts], src_pad_idx, n_head, is_target=False)
src_word = src_word.reshape(-1, src_max_len, 1)
src_pos = src_pos.reshape(-1, src_max_len, 1)
trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data(
[inst[1] for inst in insts], trg_pad_idx, n_head, is_target=True)
trg_word = trg_word.reshape(-1, trg_max_len, 1)
trg_word = trg_word[:, 1:, :]
trg_pos = trg_pos.reshape(-1, trg_max_len, 1)
trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],
[1, 1, trg_max_len, 1]).astype("float32")
lbl_word, lbl_weight, num_token = pad_batch_data(
[inst[2] for inst in insts],
trg_pad_idx,
n_head,
is_target=False,
is_label=True,
return_attn_bias=False,
return_max_len=False,
return_num_token=True)
# reverse_target
reverse_trg_word, _, _, _ = pad_batch_data(
[inst[3] for inst in insts], trg_pad_idx, n_head, is_target=True)
reverse_trg_word = reverse_trg_word.reshape(-1, trg_max_len, 1)
reverse_trg_word = reverse_trg_word[:, 1:, :]
reverse_lbl_word, _, _ = pad_batch_data(
[inst[4] for inst in insts],
trg_pad_idx,
n_head,
is_target=False,
is_label=True,
return_attn_bias=False,
return_max_len=False,
return_num_token=True)
eos_position = []
meet_eos = False
for word_id in reverse_lbl_word:
if word_id[0] == 1 and not meet_eos:
meet_eos = True
eos_position.append([1])
elif word_id[0] == 1 and meet_eos:
eos_position.append([0])
else:
meet_eos = False
eos_position.append([0])
data_input_dict = dict(
zip(data_input_names, [
src_word, src_pos, src_slf_attn_bias, trg_word, reverse_trg_word, trg_pos,
trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight, reverse_lbl_word, np.asarray(eos_position, dtype = "int64")
]))
return data_input_dict, np.asarray([num_token], dtype="float32")
|
[
"def",
"prepare_batch_input",
"(",
"insts",
",",
"data_input_names",
",",
"src_pad_idx",
",",
"trg_pad_idx",
",",
"n_head",
",",
"d_model",
")",
":",
"src_word",
",",
"src_pos",
",",
"src_slf_attn_bias",
",",
"src_max_len",
"=",
"pad_batch_data",
"(",
"[",
"inst",
"[",
"0",
"]",
"for",
"inst",
"in",
"insts",
"]",
",",
"src_pad_idx",
",",
"n_head",
",",
"is_target",
"=",
"False",
")",
"src_word",
"=",
"src_word",
".",
"reshape",
"(",
"-",
"1",
",",
"src_max_len",
",",
"1",
")",
"src_pos",
"=",
"src_pos",
".",
"reshape",
"(",
"-",
"1",
",",
"src_max_len",
",",
"1",
")",
"trg_word",
",",
"trg_pos",
",",
"trg_slf_attn_bias",
",",
"trg_max_len",
"=",
"pad_batch_data",
"(",
"[",
"inst",
"[",
"1",
"]",
"for",
"inst",
"in",
"insts",
"]",
",",
"trg_pad_idx",
",",
"n_head",
",",
"is_target",
"=",
"True",
")",
"trg_word",
"=",
"trg_word",
".",
"reshape",
"(",
"-",
"1",
",",
"trg_max_len",
",",
"1",
")",
"trg_word",
"=",
"trg_word",
"[",
":",
",",
"1",
":",
",",
":",
"]",
"trg_pos",
"=",
"trg_pos",
".",
"reshape",
"(",
"-",
"1",
",",
"trg_max_len",
",",
"1",
")",
"trg_src_attn_bias",
"=",
"np",
".",
"tile",
"(",
"src_slf_attn_bias",
"[",
":",
",",
":",
",",
":",
":",
"src_max_len",
",",
":",
"]",
",",
"[",
"1",
",",
"1",
",",
"trg_max_len",
",",
"1",
"]",
")",
".",
"astype",
"(",
"\"float32\"",
")",
"lbl_word",
",",
"lbl_weight",
",",
"num_token",
"=",
"pad_batch_data",
"(",
"[",
"inst",
"[",
"2",
"]",
"for",
"inst",
"in",
"insts",
"]",
",",
"trg_pad_idx",
",",
"n_head",
",",
"is_target",
"=",
"False",
",",
"is_label",
"=",
"True",
",",
"return_attn_bias",
"=",
"False",
",",
"return_max_len",
"=",
"False",
",",
"return_num_token",
"=",
"True",
")",
"# reverse_target",
"reverse_trg_word",
",",
"_",
",",
"_",
",",
"_",
"=",
"pad_batch_data",
"(",
"[",
"inst",
"[",
"3",
"]",
"for",
"inst",
"in",
"insts",
"]",
",",
"trg_pad_idx",
",",
"n_head",
",",
"is_target",
"=",
"True",
")",
"reverse_trg_word",
"=",
"reverse_trg_word",
".",
"reshape",
"(",
"-",
"1",
",",
"trg_max_len",
",",
"1",
")",
"reverse_trg_word",
"=",
"reverse_trg_word",
"[",
":",
",",
"1",
":",
",",
":",
"]",
"reverse_lbl_word",
",",
"_",
",",
"_",
"=",
"pad_batch_data",
"(",
"[",
"inst",
"[",
"4",
"]",
"for",
"inst",
"in",
"insts",
"]",
",",
"trg_pad_idx",
",",
"n_head",
",",
"is_target",
"=",
"False",
",",
"is_label",
"=",
"True",
",",
"return_attn_bias",
"=",
"False",
",",
"return_max_len",
"=",
"False",
",",
"return_num_token",
"=",
"True",
")",
"eos_position",
"=",
"[",
"]",
"meet_eos",
"=",
"False",
"for",
"word_id",
"in",
"reverse_lbl_word",
":",
"if",
"word_id",
"[",
"0",
"]",
"==",
"1",
"and",
"not",
"meet_eos",
":",
"meet_eos",
"=",
"True",
"eos_position",
".",
"append",
"(",
"[",
"1",
"]",
")",
"elif",
"word_id",
"[",
"0",
"]",
"==",
"1",
"and",
"meet_eos",
":",
"eos_position",
".",
"append",
"(",
"[",
"0",
"]",
")",
"else",
":",
"meet_eos",
"=",
"False",
"eos_position",
".",
"append",
"(",
"[",
"0",
"]",
")",
"data_input_dict",
"=",
"dict",
"(",
"zip",
"(",
"data_input_names",
",",
"[",
"src_word",
",",
"src_pos",
",",
"src_slf_attn_bias",
",",
"trg_word",
",",
"reverse_trg_word",
",",
"trg_pos",
",",
"trg_slf_attn_bias",
",",
"trg_src_attn_bias",
",",
"lbl_word",
",",
"lbl_weight",
",",
"reverse_lbl_word",
",",
"np",
".",
"asarray",
"(",
"eos_position",
",",
"dtype",
"=",
"\"int64\"",
")",
"]",
")",
")",
"return",
"data_input_dict",
",",
"np",
".",
"asarray",
"(",
"[",
"num_token",
"]",
",",
"dtype",
"=",
"\"float32\"",
")"
] |
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/NLP/EMNLP2019-MAL/src/train.py#L238-L301
|
|
wistbean/fxxkpython
|
88e16d79d8dd37236ba6ecd0d0ff11d63143968c
|
vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/html5lib/treeadapters/sax.py
|
python
|
to_sax
|
(walker, handler)
|
Call SAX-like content handler based on treewalker walker
:arg walker: the treewalker to use to walk the tree to convert it
:arg handler: SAX handler to use
|
Call SAX-like content handler based on treewalker walker
|
[
"Call",
"SAX",
"-",
"like",
"content",
"handler",
"based",
"on",
"treewalker",
"walker"
] |
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker
:arg walker: the treewalker to use to walk the tree to convert it
:arg handler: SAX handler to use
"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
[
"def",
"to_sax",
"(",
"walker",
",",
"handler",
")",
":",
"handler",
".",
"startDocument",
"(",
")",
"for",
"prefix",
",",
"namespace",
"in",
"prefix_mapping",
".",
"items",
"(",
")",
":",
"handler",
".",
"startPrefixMapping",
"(",
"prefix",
",",
"namespace",
")",
"for",
"token",
"in",
"walker",
":",
"type",
"=",
"token",
"[",
"\"type\"",
"]",
"if",
"type",
"==",
"\"Doctype\"",
":",
"continue",
"elif",
"type",
"in",
"(",
"\"StartTag\"",
",",
"\"EmptyTag\"",
")",
":",
"attrs",
"=",
"AttributesNSImpl",
"(",
"token",
"[",
"\"data\"",
"]",
",",
"unadjustForeignAttributes",
")",
"handler",
".",
"startElementNS",
"(",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
",",
"token",
"[",
"\"name\"",
"]",
",",
"attrs",
")",
"if",
"type",
"==",
"\"EmptyTag\"",
":",
"handler",
".",
"endElementNS",
"(",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
",",
"token",
"[",
"\"name\"",
"]",
")",
"elif",
"type",
"==",
"\"EndTag\"",
":",
"handler",
".",
"endElementNS",
"(",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
",",
"token",
"[",
"\"name\"",
"]",
")",
"elif",
"type",
"in",
"(",
"\"Characters\"",
",",
"\"SpaceCharacters\"",
")",
":",
"handler",
".",
"characters",
"(",
"token",
"[",
"\"data\"",
"]",
")",
"elif",
"type",
"==",
"\"Comment\"",
":",
"pass",
"else",
":",
"assert",
"False",
",",
"\"Unknown token type\"",
"for",
"prefix",
",",
"namespace",
"in",
"prefix_mapping",
".",
"items",
"(",
")",
":",
"handler",
".",
"endPrefixMapping",
"(",
"prefix",
")",
"handler",
".",
"endDocument",
"(",
")"
] |
https://github.com/wistbean/fxxkpython/blob/88e16d79d8dd37236ba6ecd0d0ff11d63143968c/vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/html5lib/treeadapters/sax.py#L13-L50
|
||
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
ext/github/View.py
|
python
|
View.timestamp
|
(self)
|
return self._timestamp.value
|
:type: datetime.datetime
|
:type: datetime.datetime
|
[
":",
"type",
":",
"datetime",
".",
"datetime"
] |
def timestamp(self):
"""
:type: datetime.datetime
"""
return self._timestamp.value
|
[
"def",
"timestamp",
"(",
"self",
")",
":",
"return",
"self",
".",
"_timestamp",
".",
"value"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/github/View.py#L50-L54
|
|
Qiskit/qiskit-terra
|
b66030e3b9192efdd3eb95cf25c6545fe0a13da4
|
qiskit/algorithms/optimizers/gsls.py
|
python
|
GSLS.ls_optimize
|
(
self,
n: int,
obj_fun: Callable,
initial_point: np.ndarray,
var_lb: np.ndarray,
var_ub: np.ndarray,
)
|
return x, x_value, n_evals, grad_norm
|
Run the line search optimization.
Args:
n: Dimension of the problem.
obj_fun: Objective function.
initial_point: Initial point.
var_lb: Vector of lower bounds on the decision variables. Vector elements can be -np.inf
if the corresponding variable is unbounded from below.
var_ub: Vector of upper bounds on the decision variables. Vector elements can be np.inf
if the corresponding variable is unbounded from below.
Returns:
Final iterate as a vector, corresponding objective function value,
number of evaluations, and norm of the gradient estimate.
Raises:
ValueError: If the number of dimensions mismatches the size of the initial point or
the length of the lower or upper bound.
|
Run the line search optimization.
|
[
"Run",
"the",
"line",
"search",
"optimization",
"."
] |
def ls_optimize(
self,
n: int,
obj_fun: Callable,
initial_point: np.ndarray,
var_lb: np.ndarray,
var_ub: np.ndarray,
) -> Tuple[np.ndarray, float, int, float]:
"""Run the line search optimization.
Args:
n: Dimension of the problem.
obj_fun: Objective function.
initial_point: Initial point.
var_lb: Vector of lower bounds on the decision variables. Vector elements can be -np.inf
if the corresponding variable is unbounded from below.
var_ub: Vector of upper bounds on the decision variables. Vector elements can be np.inf
if the corresponding variable is unbounded from below.
Returns:
Final iterate as a vector, corresponding objective function value,
number of evaluations, and norm of the gradient estimate.
Raises:
ValueError: If the number of dimensions mismatches the size of the initial point or
the length of the lower or upper bound.
"""
if len(initial_point) != n:
raise ValueError("Size of the initial point mismatches the number of dimensions.")
if len(var_lb) != n:
raise ValueError("Length of the lower bound mismatches the number of dimensions.")
if len(var_ub) != n:
raise ValueError("Length of the upper bound mismatches the number of dimensions.")
# Initialize counters and data
iter_count = 0
n_evals = 0
prev_iter_successful = True
prev_directions, prev_sample_set_x, prev_sample_set_y = None, None, None
consecutive_fail_iter = 0
alpha = self._options["initial_step_size"]
grad_norm = np.inf
sample_set_size = int(round(self._options["sample_size_factor"] * n))
# Initial point
x = initial_point
x_value = obj_fun(x)
n_evals += 1
while iter_count < self._options["maxiter"] and n_evals < self._options["max_eval"]:
# Determine set of sample points
directions, sample_set_x = self.sample_set(n, x, var_lb, var_ub, sample_set_size)
if n_evals + len(sample_set_x) + 1 >= self._options["max_eval"]:
# The evaluation budget is too small to allow for
# another full iteration; we therefore exit now
break
sample_set_y = np.array([obj_fun(point) for point in sample_set_x])
n_evals += len(sample_set_x)
# Expand sample set if we could not improve
if not prev_iter_successful:
directions = np.vstack((prev_directions, directions))
sample_set_x = np.vstack((prev_sample_set_x, sample_set_x))
sample_set_y = np.hstack((prev_sample_set_y, sample_set_y))
# Find gradient approximation and candidate point
grad = self.gradient_approximation(
n, x, x_value, directions, sample_set_x, sample_set_y
)
grad_norm = np.linalg.norm(grad)
new_x = np.clip(x - alpha * grad, var_lb, var_ub)
new_x_value = obj_fun(new_x)
n_evals += 1
# Print information
if self._options["disp"]:
print(f"Iter {iter_count:d}")
print(f"Point {x} obj {x_value}")
print(f"Gradient {grad}")
print(f"Grad norm {grad_norm} new_x_value {new_x_value} step_size {alpha}")
print(f"Direction {directions}")
# Test Armijo condition for sufficient decrease
if new_x_value <= x_value - self._options["armijo_parameter"] * alpha * grad_norm:
# Accept point
x, x_value = new_x, new_x_value
alpha /= 2 * self._options["step_size_multiplier"]
prev_iter_successful = True
consecutive_fail_iter = 0
# Reset sample set
prev_directions = None
prev_sample_set_x = None
prev_sample_set_y = None
else:
# Do not accept point
alpha *= self._options["step_size_multiplier"]
prev_iter_successful = False
consecutive_fail_iter += 1
# Store sample set to enlarge it
prev_directions = directions
prev_sample_set_x, prev_sample_set_y = sample_set_x, sample_set_y
iter_count += 1
# Check termination criterion
if (
grad_norm <= self._options["min_gradient_norm"]
or alpha <= self._options["min_step_size"]
):
break
return x, x_value, n_evals, grad_norm
|
[
"def",
"ls_optimize",
"(",
"self",
",",
"n",
":",
"int",
",",
"obj_fun",
":",
"Callable",
",",
"initial_point",
":",
"np",
".",
"ndarray",
",",
"var_lb",
":",
"np",
".",
"ndarray",
",",
"var_ub",
":",
"np",
".",
"ndarray",
",",
")",
"->",
"Tuple",
"[",
"np",
".",
"ndarray",
",",
"float",
",",
"int",
",",
"float",
"]",
":",
"if",
"len",
"(",
"initial_point",
")",
"!=",
"n",
":",
"raise",
"ValueError",
"(",
"\"Size of the initial point mismatches the number of dimensions.\"",
")",
"if",
"len",
"(",
"var_lb",
")",
"!=",
"n",
":",
"raise",
"ValueError",
"(",
"\"Length of the lower bound mismatches the number of dimensions.\"",
")",
"if",
"len",
"(",
"var_ub",
")",
"!=",
"n",
":",
"raise",
"ValueError",
"(",
"\"Length of the upper bound mismatches the number of dimensions.\"",
")",
"# Initialize counters and data",
"iter_count",
"=",
"0",
"n_evals",
"=",
"0",
"prev_iter_successful",
"=",
"True",
"prev_directions",
",",
"prev_sample_set_x",
",",
"prev_sample_set_y",
"=",
"None",
",",
"None",
",",
"None",
"consecutive_fail_iter",
"=",
"0",
"alpha",
"=",
"self",
".",
"_options",
"[",
"\"initial_step_size\"",
"]",
"grad_norm",
"=",
"np",
".",
"inf",
"sample_set_size",
"=",
"int",
"(",
"round",
"(",
"self",
".",
"_options",
"[",
"\"sample_size_factor\"",
"]",
"*",
"n",
")",
")",
"# Initial point",
"x",
"=",
"initial_point",
"x_value",
"=",
"obj_fun",
"(",
"x",
")",
"n_evals",
"+=",
"1",
"while",
"iter_count",
"<",
"self",
".",
"_options",
"[",
"\"maxiter\"",
"]",
"and",
"n_evals",
"<",
"self",
".",
"_options",
"[",
"\"max_eval\"",
"]",
":",
"# Determine set of sample points",
"directions",
",",
"sample_set_x",
"=",
"self",
".",
"sample_set",
"(",
"n",
",",
"x",
",",
"var_lb",
",",
"var_ub",
",",
"sample_set_size",
")",
"if",
"n_evals",
"+",
"len",
"(",
"sample_set_x",
")",
"+",
"1",
">=",
"self",
".",
"_options",
"[",
"\"max_eval\"",
"]",
":",
"# The evaluation budget is too small to allow for",
"# another full iteration; we therefore exit now",
"break",
"sample_set_y",
"=",
"np",
".",
"array",
"(",
"[",
"obj_fun",
"(",
"point",
")",
"for",
"point",
"in",
"sample_set_x",
"]",
")",
"n_evals",
"+=",
"len",
"(",
"sample_set_x",
")",
"# Expand sample set if we could not improve",
"if",
"not",
"prev_iter_successful",
":",
"directions",
"=",
"np",
".",
"vstack",
"(",
"(",
"prev_directions",
",",
"directions",
")",
")",
"sample_set_x",
"=",
"np",
".",
"vstack",
"(",
"(",
"prev_sample_set_x",
",",
"sample_set_x",
")",
")",
"sample_set_y",
"=",
"np",
".",
"hstack",
"(",
"(",
"prev_sample_set_y",
",",
"sample_set_y",
")",
")",
"# Find gradient approximation and candidate point",
"grad",
"=",
"self",
".",
"gradient_approximation",
"(",
"n",
",",
"x",
",",
"x_value",
",",
"directions",
",",
"sample_set_x",
",",
"sample_set_y",
")",
"grad_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"grad",
")",
"new_x",
"=",
"np",
".",
"clip",
"(",
"x",
"-",
"alpha",
"*",
"grad",
",",
"var_lb",
",",
"var_ub",
")",
"new_x_value",
"=",
"obj_fun",
"(",
"new_x",
")",
"n_evals",
"+=",
"1",
"# Print information",
"if",
"self",
".",
"_options",
"[",
"\"disp\"",
"]",
":",
"print",
"(",
"f\"Iter {iter_count:d}\"",
")",
"print",
"(",
"f\"Point {x} obj {x_value}\"",
")",
"print",
"(",
"f\"Gradient {grad}\"",
")",
"print",
"(",
"f\"Grad norm {grad_norm} new_x_value {new_x_value} step_size {alpha}\"",
")",
"print",
"(",
"f\"Direction {directions}\"",
")",
"# Test Armijo condition for sufficient decrease",
"if",
"new_x_value",
"<=",
"x_value",
"-",
"self",
".",
"_options",
"[",
"\"armijo_parameter\"",
"]",
"*",
"alpha",
"*",
"grad_norm",
":",
"# Accept point",
"x",
",",
"x_value",
"=",
"new_x",
",",
"new_x_value",
"alpha",
"/=",
"2",
"*",
"self",
".",
"_options",
"[",
"\"step_size_multiplier\"",
"]",
"prev_iter_successful",
"=",
"True",
"consecutive_fail_iter",
"=",
"0",
"# Reset sample set",
"prev_directions",
"=",
"None",
"prev_sample_set_x",
"=",
"None",
"prev_sample_set_y",
"=",
"None",
"else",
":",
"# Do not accept point",
"alpha",
"*=",
"self",
".",
"_options",
"[",
"\"step_size_multiplier\"",
"]",
"prev_iter_successful",
"=",
"False",
"consecutive_fail_iter",
"+=",
"1",
"# Store sample set to enlarge it",
"prev_directions",
"=",
"directions",
"prev_sample_set_x",
",",
"prev_sample_set_y",
"=",
"sample_set_x",
",",
"sample_set_y",
"iter_count",
"+=",
"1",
"# Check termination criterion",
"if",
"(",
"grad_norm",
"<=",
"self",
".",
"_options",
"[",
"\"min_gradient_norm\"",
"]",
"or",
"alpha",
"<=",
"self",
".",
"_options",
"[",
"\"min_step_size\"",
"]",
")",
":",
"break",
"return",
"x",
",",
"x_value",
",",
"n_evals",
",",
"grad_norm"
] |
https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/algorithms/optimizers/gsls.py#L153-L268
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/functions/orthogonal_polys.py
|
python
|
Func_assoc_legendre_P._derivative_
|
(self, n, m, x, *args, **kwds)
|
Return the derivative of ``gen_legendre_P(n,m,x)``.
EXAMPLES::
sage: (m,n) = var('m,n')
sage: derivative(gen_legendre_P(n,m,x), x)
-((n + 1)*x*gen_legendre_P(n, m, x) + (m - n - 1)*gen_legendre_P(n + 1, m, x))/(x^2 - 1)
sage: gen_legendre_P(3,2,x,hold=True).diff(x).expand().simplify_full()
-45*x^2 + 15
sage: derivative(gen_legendre_P(n,m,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
|
Return the derivative of ``gen_legendre_P(n,m,x)``.
|
[
"Return",
"the",
"derivative",
"of",
"gen_legendre_P",
"(",
"n",
"m",
"x",
")",
"."
] |
def _derivative_(self, n, m, x, *args, **kwds):
"""
Return the derivative of ``gen_legendre_P(n,m,x)``.
EXAMPLES::
sage: (m,n) = var('m,n')
sage: derivative(gen_legendre_P(n,m,x), x)
-((n + 1)*x*gen_legendre_P(n, m, x) + (m - n - 1)*gen_legendre_P(n + 1, m, x))/(x^2 - 1)
sage: gen_legendre_P(3,2,x,hold=True).diff(x).expand().simplify_full()
-45*x^2 + 15
sage: derivative(gen_legendre_P(n,m,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
# https://dlmf.nist.gov/14.10#E4
return ((m-n-1)*gen_legendre_P(n+1, m, x) + (n+1)*x*gen_legendre_P(n, m, x))/(1 - x**2)
|
[
"def",
"_derivative_",
"(",
"self",
",",
"n",
",",
"m",
",",
"x",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"diff_param",
"=",
"kwds",
"[",
"'diff_param'",
"]",
"if",
"diff_param",
"==",
"0",
":",
"raise",
"NotImplementedError",
"(",
"\"Derivative w.r.t. to the index is not supported.\"",
")",
"else",
":",
"# https://dlmf.nist.gov/14.10#E4",
"return",
"(",
"(",
"m",
"-",
"n",
"-",
"1",
")",
"*",
"gen_legendre_P",
"(",
"n",
"+",
"1",
",",
"m",
",",
"x",
")",
"+",
"(",
"n",
"+",
"1",
")",
"*",
"x",
"*",
"gen_legendre_P",
"(",
"n",
",",
"m",
",",
"x",
")",
")",
"/",
"(",
"1",
"-",
"x",
"**",
"2",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/functions/orthogonal_polys.py#L1715-L1737
|
||
evennia/evennia
|
fa79110ba6b219932f22297838e8ac72ebc0be0e
|
evennia/contrib/clothing.py
|
python
|
Clothing.at_get
|
(self, getter)
|
Makes absolutely sure clothes aren't already set as 'worn'
when they're picked up, in case they've somehow had their
location changed without getting removed.
|
Makes absolutely sure clothes aren't already set as 'worn'
when they're picked up, in case they've somehow had their
location changed without getting removed.
|
[
"Makes",
"absolutely",
"sure",
"clothes",
"aren",
"t",
"already",
"set",
"as",
"worn",
"when",
"they",
"re",
"picked",
"up",
"in",
"case",
"they",
"ve",
"somehow",
"had",
"their",
"location",
"changed",
"without",
"getting",
"removed",
"."
] |
def at_get(self, getter):
"""
Makes absolutely sure clothes aren't already set as 'worn'
when they're picked up, in case they've somehow had their
location changed without getting removed.
"""
self.db.worn = False
|
[
"def",
"at_get",
"(",
"self",
",",
"getter",
")",
":",
"self",
".",
"db",
".",
"worn",
"=",
"False"
] |
https://github.com/evennia/evennia/blob/fa79110ba6b219932f22297838e8ac72ebc0be0e/evennia/contrib/clothing.py#L302-L308
|
||
hellohaptik/chatbot_ner
|
742104790170ae5b73c583c94db6786549337dc4
|
ner_v2/detectors/temporal/date/date_detection.py
|
python
|
DateAdvancedDetector._detect_return_date
|
(self)
|
return date_dict_list
|
Finds return type dates in the given text by matching few keywords like 'coming back', 'return date',
'leaving on', 'returning on', 'returning at', 'arriving', 'arrive' . It detects dates in the part of text right
to these keywords.
Args:
Returns:
The list of dictionary containing the dictionary for date which is detected as return date.
For departure date the key "to" will be set to True.
|
Finds return type dates in the given text by matching few keywords like 'coming back', 'return date',
'leaving on', 'returning on', 'returning at', 'arriving', 'arrive' . It detects dates in the part of text right
to these keywords.
|
[
"Finds",
"return",
"type",
"dates",
"in",
"the",
"given",
"text",
"by",
"matching",
"few",
"keywords",
"like",
"coming",
"back",
"return",
"date",
"leaving",
"on",
"returning",
"on",
"returning",
"at",
"arriving",
"arrive",
".",
"It",
"detects",
"dates",
"in",
"the",
"part",
"of",
"text",
"right",
"to",
"these",
"keywords",
"."
] |
def _detect_return_date(self):
"""
Finds return type dates in the given text by matching few keywords like 'coming back', 'return date',
'leaving on', 'returning on', 'returning at', 'arriving', 'arrive' . It detects dates in the part of text right
to these keywords.
Args:
Returns:
The list of dictionary containing the dictionary for date which is detected as return date.
For departure date the key "to" will be set to True.
"""
date_dict_list = []
regex_pattern_1 = re.compile(r'\b'
r'(?:check(?:\s|\-)?out date (?:is|\:)?|'
r'coming back|return date\s?(?:\:|\-)?|returning on|returning at|'
r'arriving|arrive|return|back)'
r'\s+(.+?)(?:\band|&|(?<!\d)\.|$)', flags=re.UNICODE)
regex_pattern_2 = re.compile(r'(.+?)\s+(?:ko?\s+)?(?:aana|ana|aunga|aaun)', flags=re.UNICODE)
matches = None
matches_1 = regex_pattern_1.findall(self.processed_text)
matches_2 = regex_pattern_2.findall(self.processed_text)
if matches_1:
matches = matches_1
elif matches_2:
matches = matches_2
matches = matches or []
for match in matches:
date_dict_list.extend(self._date_dict_from_text(text=match, to_property=True))
return date_dict_list
|
[
"def",
"_detect_return_date",
"(",
"self",
")",
":",
"date_dict_list",
"=",
"[",
"]",
"regex_pattern_1",
"=",
"re",
".",
"compile",
"(",
"r'\\b'",
"r'(?:check(?:\\s|\\-)?out date (?:is|\\:)?|'",
"r'coming back|return date\\s?(?:\\:|\\-)?|returning on|returning at|'",
"r'arriving|arrive|return|back)'",
"r'\\s+(.+?)(?:\\band|&|(?<!\\d)\\.|$)'",
",",
"flags",
"=",
"re",
".",
"UNICODE",
")",
"regex_pattern_2",
"=",
"re",
".",
"compile",
"(",
"r'(.+?)\\s+(?:ko?\\s+)?(?:aana|ana|aunga|aaun)'",
",",
"flags",
"=",
"re",
".",
"UNICODE",
")",
"matches",
"=",
"None",
"matches_1",
"=",
"regex_pattern_1",
".",
"findall",
"(",
"self",
".",
"processed_text",
")",
"matches_2",
"=",
"regex_pattern_2",
".",
"findall",
"(",
"self",
".",
"processed_text",
")",
"if",
"matches_1",
":",
"matches",
"=",
"matches_1",
"elif",
"matches_2",
":",
"matches",
"=",
"matches_2",
"matches",
"=",
"matches",
"or",
"[",
"]",
"for",
"match",
"in",
"matches",
":",
"date_dict_list",
".",
"extend",
"(",
"self",
".",
"_date_dict_from_text",
"(",
"text",
"=",
"match",
",",
"to_property",
"=",
"True",
")",
")",
"return",
"date_dict_list"
] |
https://github.com/hellohaptik/chatbot_ner/blob/742104790170ae5b73c583c94db6786549337dc4/ner_v2/detectors/temporal/date/date_detection.py#L323-L355
|
|
ahmetcemturan/SFACT
|
7576e29ba72b33e5058049b77b7b558875542747
|
fabmetheus_utilities/archive.py
|
python
|
getAbsoluteFrozenFolderPath
|
(filePath, folderName='')
|
return getAbsoluteFolderPath(filePath, folderName)
|
Get the absolute frozen folder path.
|
Get the absolute frozen folder path.
|
[
"Get",
"the",
"absolute",
"frozen",
"folder",
"path",
"."
] |
def getAbsoluteFrozenFolderPath(filePath, folderName=''):
'Get the absolute frozen folder path.'
if hasattr(sys, 'frozen'):
if '.py' in filePath:
filePath = ''.join(filePath.rpartition('\\')[: 2])
filePath = os.path.join(filePath, 'skeinforge_application')
return getAbsoluteFolderPath(filePath, folderName)
|
[
"def",
"getAbsoluteFrozenFolderPath",
"(",
"filePath",
",",
"folderName",
"=",
"''",
")",
":",
"if",
"hasattr",
"(",
"sys",
",",
"'frozen'",
")",
":",
"if",
"'.py'",
"in",
"filePath",
":",
"filePath",
"=",
"''",
".",
"join",
"(",
"filePath",
".",
"rpartition",
"(",
"'\\\\'",
")",
"[",
":",
"2",
"]",
")",
"filePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"filePath",
",",
"'skeinforge_application'",
")",
"return",
"getAbsoluteFolderPath",
"(",
"filePath",
",",
"folderName",
")"
] |
https://github.com/ahmetcemturan/SFACT/blob/7576e29ba72b33e5058049b77b7b558875542747/fabmetheus_utilities/archive.py#L39-L45
|
|
hakril/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
windows/native_exec/simple_x64.py
|
python
|
create_displacement
|
(base=None, index=None, scale=None, disp=0, prefix=None)
|
return mem_access(base, index, scale, disp, prefix)
|
[] |
def create_displacement(base=None, index=None, scale=None, disp=0, prefix=None):
if index is not None and scale is None:
scale = 1
if scale and index is None:
raise ValueError("Cannot create displacement with scale and no index")
if scale and index.upper() == "RSP":
raise ValueError("Cannot create displacement with index == RSP")
return mem_access(base, index, scale, disp, prefix)
|
[
"def",
"create_displacement",
"(",
"base",
"=",
"None",
",",
"index",
"=",
"None",
",",
"scale",
"=",
"None",
",",
"disp",
"=",
"0",
",",
"prefix",
"=",
"None",
")",
":",
"if",
"index",
"is",
"not",
"None",
"and",
"scale",
"is",
"None",
":",
"scale",
"=",
"1",
"if",
"scale",
"and",
"index",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot create displacement with scale and no index\"",
")",
"if",
"scale",
"and",
"index",
".",
"upper",
"(",
")",
"==",
"\"RSP\"",
":",
"raise",
"ValueError",
"(",
"\"Cannot create displacement with index == RSP\"",
")",
"return",
"mem_access",
"(",
"base",
",",
"index",
",",
"scale",
",",
"disp",
",",
"prefix",
")"
] |
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/native_exec/simple_x64.py#L197-L204
|
|||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/combinat/root_system/type_marked.py
|
python
|
CartanType_affine.special_node
|
(self)
|
return self._type.special_node()
|
r"""
Return the special node of the Cartan type.
.. SEEALSO:: :meth:`~sage.combinat.root_system.CartanType_affine.special_node`
It is the special node of the non-marked Cartan type..
EXAMPLES::
sage: CartanType(['B', 3, 1]).marked_nodes([1,3]).special_node()
0
|
r"""
Return the special node of the Cartan type.
|
[
"r",
"Return",
"the",
"special",
"node",
"of",
"the",
"Cartan",
"type",
"."
] |
def special_node(self):
r"""
Return the special node of the Cartan type.
.. SEEALSO:: :meth:`~sage.combinat.root_system.CartanType_affine.special_node`
It is the special node of the non-marked Cartan type..
EXAMPLES::
sage: CartanType(['B', 3, 1]).marked_nodes([1,3]).special_node()
0
"""
return self._type.special_node()
|
[
"def",
"special_node",
"(",
"self",
")",
":",
"return",
"self",
".",
"_type",
".",
"special_node",
"(",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/root_system/type_marked.py#L712-L725
|
|
zhang-can/ECO-pytorch
|
355c3866b35cdaa5d451263c1f3291c150e22eeb
|
tf_model_zoo/models/neural_gpu/data_utils.py
|
python
|
to_id
|
(s)
|
return int(s) + 1
|
Covert text to ids.
|
Covert text to ids.
|
[
"Covert",
"text",
"to",
"ids",
"."
] |
def to_id(s):
"""Covert text to ids."""
if s == "+": return 11
if s == "*": return 12
return int(s) + 1
|
[
"def",
"to_id",
"(",
"s",
")",
":",
"if",
"s",
"==",
"\"+\"",
":",
"return",
"11",
"if",
"s",
"==",
"\"*\"",
":",
"return",
"12",
"return",
"int",
"(",
"s",
")",
"+",
"1"
] |
https://github.com/zhang-can/ECO-pytorch/blob/355c3866b35cdaa5d451263c1f3291c150e22eeb/tf_model_zoo/models/neural_gpu/data_utils.py#L214-L218
|
|
runawayhorse001/LearningApacheSpark
|
67f3879dce17553195f094f5728b94a01badcf24
|
pyspark/rdd.py
|
python
|
RDD.values
|
(self)
|
return self.map(lambda x: x[1])
|
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
|
Return an RDD with the values of each tuple.
|
[
"Return",
"an",
"RDD",
"with",
"the",
"values",
"of",
"each",
"tuple",
"."
] |
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
|
[
"def",
"values",
"(",
"self",
")",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")"
] |
https://github.com/runawayhorse001/LearningApacheSpark/blob/67f3879dce17553195f094f5728b94a01badcf24/pyspark/rdd.py#L1599-L1607
|
|
mrlesmithjr/Ansible
|
d44f0dc0d942bdf3bf7334b307e6048f0ee16e36
|
roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/wheel/metadata.py
|
python
|
pkginfo_unicode
|
(pkg_info, field)
|
return text
|
Hack to coax Unicode out of an email Message() - Python 3.3+
|
Hack to coax Unicode out of an email Message() - Python 3.3+
|
[
"Hack",
"to",
"coax",
"Unicode",
"out",
"of",
"an",
"email",
"Message",
"()",
"-",
"Python",
"3",
".",
"3",
"+"
] |
def pkginfo_unicode(pkg_info, field):
"""Hack to coax Unicode out of an email Message() - Python 3.3+"""
text = pkg_info[field]
field = field.lower()
if not isinstance(text, str):
if not hasattr(pkg_info, 'raw_items'): # Python 3.2
return str(text)
for item in pkg_info.raw_items():
if item[0].lower() == field:
text = item[1].encode('ascii', 'surrogateescape') \
.decode('utf-8')
break
return text
|
[
"def",
"pkginfo_unicode",
"(",
"pkg_info",
",",
"field",
")",
":",
"text",
"=",
"pkg_info",
"[",
"field",
"]",
"field",
"=",
"field",
".",
"lower",
"(",
")",
"if",
"not",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"if",
"not",
"hasattr",
"(",
"pkg_info",
",",
"'raw_items'",
")",
":",
"# Python 3.2",
"return",
"str",
"(",
"text",
")",
"for",
"item",
"in",
"pkg_info",
".",
"raw_items",
"(",
")",
":",
"if",
"item",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"==",
"field",
":",
"text",
"=",
"item",
"[",
"1",
"]",
".",
"encode",
"(",
"'ascii'",
",",
"'surrogateescape'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"break",
"return",
"text"
] |
https://github.com/mrlesmithjr/Ansible/blob/d44f0dc0d942bdf3bf7334b307e6048f0ee16e36/roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/wheel/metadata.py#L290-L303
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py
|
python
|
Results.upgrade_and_extend
|
(self, results)
|
Combines the effects of extend() and upgrade(): hits that are also
in 'results' are raised. Then any hits from the other results object
that are not in this results object are appended to the end.
:param results: another results object.
|
Combines the effects of extend() and upgrade(): hits that are also
in 'results' are raised. Then any hits from the other results object
that are not in this results object are appended to the end.
|
[
"Combines",
"the",
"effects",
"of",
"extend",
"()",
"and",
"upgrade",
"()",
":",
"hits",
"that",
"are",
"also",
"in",
"results",
"are",
"raised",
".",
"Then",
"any",
"hits",
"from",
"the",
"other",
"results",
"object",
"that",
"are",
"not",
"in",
"this",
"results",
"object",
"are",
"appended",
"to",
"the",
"end",
"."
] |
def upgrade_and_extend(self, results):
"""Combines the effects of extend() and upgrade(): hits that are also
in 'results' are raised. Then any hits from the other results object
that are not in this results object are appended to the end.
:param results: another results object.
"""
if not len(results):
return
docs = self.docs()
otherdocs = results.docs()
arein = [item for item in self.top_n if item[1] in otherdocs]
notin = [item for item in self.top_n if item[1] not in otherdocs]
other = [item for item in results.top_n if item[1] not in docs]
self.docset = docs | otherdocs
self.top_n = arein + notin + other
|
[
"def",
"upgrade_and_extend",
"(",
"self",
",",
"results",
")",
":",
"if",
"not",
"len",
"(",
"results",
")",
":",
"return",
"docs",
"=",
"self",
".",
"docs",
"(",
")",
"otherdocs",
"=",
"results",
".",
"docs",
"(",
")",
"arein",
"=",
"[",
"item",
"for",
"item",
"in",
"self",
".",
"top_n",
"if",
"item",
"[",
"1",
"]",
"in",
"otherdocs",
"]",
"notin",
"=",
"[",
"item",
"for",
"item",
"in",
"self",
".",
"top_n",
"if",
"item",
"[",
"1",
"]",
"not",
"in",
"otherdocs",
"]",
"other",
"=",
"[",
"item",
"for",
"item",
"in",
"results",
".",
"top_n",
"if",
"item",
"[",
"1",
"]",
"not",
"in",
"docs",
"]",
"self",
".",
"docset",
"=",
"docs",
"|",
"otherdocs",
"self",
".",
"top_n",
"=",
"arein",
"+",
"notin",
"+",
"other"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py#L1326-L1345
|
||
carbonblack/cbapi-python
|
24d677ffd99aee911c2c76ecb5528e4e9320c7cc
|
src/cbapi/live_response_api.py
|
python
|
LiveResponseJobScheduler._cleanup_idle_workers
|
(self, max=None)
|
[] |
def _cleanup_idle_workers(self, max=None):
if not max:
max = self._max_workers
for sensor in list(self._idle_workers)[:max]:
log.debug("asking worker for sensor id {0} to exit".format(sensor))
self._job_workers[sensor].job_queue.put(None)
|
[
"def",
"_cleanup_idle_workers",
"(",
"self",
",",
"max",
"=",
"None",
")",
":",
"if",
"not",
"max",
":",
"max",
"=",
"self",
".",
"_max_workers",
"for",
"sensor",
"in",
"list",
"(",
"self",
".",
"_idle_workers",
")",
"[",
":",
"max",
"]",
":",
"log",
".",
"debug",
"(",
"\"asking worker for sensor id {0} to exit\"",
".",
"format",
"(",
"sensor",
")",
")",
"self",
".",
"_job_workers",
"[",
"sensor",
"]",
".",
"job_queue",
".",
"put",
"(",
"None",
")"
] |
https://github.com/carbonblack/cbapi-python/blob/24d677ffd99aee911c2c76ecb5528e4e9320c7cc/src/cbapi/live_response_api.py#L942-L948
|
||||
CouchPotato/CouchPotatoServer
|
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
|
libs/suds/xsd/doctor.py
|
python
|
Import.__init__
|
(self, ns, location=None)
|
@param ns: An import namespace.
@type ns: str
@param location: An optional I{schemaLocation}.
@type location: str
|
[] |
def __init__(self, ns, location=None):
"""
@param ns: An import namespace.
@type ns: str
@param location: An optional I{schemaLocation}.
@type location: str
"""
self.ns = ns
self.location = location
self.filter = TnsFilter()
|
[
"def",
"__init__",
"(",
"self",
",",
"ns",
",",
"location",
"=",
"None",
")",
":",
"self",
".",
"ns",
"=",
"ns",
"self",
".",
"location",
"=",
"location",
"self",
".",
"filter",
"=",
"TnsFilter",
"(",
")"
] |
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/suds/xsd/doctor.py#L123-L132
|
|||
spack/spack
|
675210bd8bd1c5d32ad1cc83d898fb43b569ed74
|
lib/spack/spack/build_systems/waf.py
|
python
|
WafPackage.build_test
|
(self)
|
Run unit tests after build.
By default, does nothing. Override this if you want to
add package-specific tests.
|
Run unit tests after build.
|
[
"Run",
"unit",
"tests",
"after",
"build",
"."
] |
def build_test(self):
"""Run unit tests after build.
By default, does nothing. Override this if you want to
add package-specific tests.
"""
pass
|
[
"def",
"build_test",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/spack/spack/blob/675210bd8bd1c5d32ad1cc83d898fb43b569ed74/lib/spack/spack/build_systems/waf.py#L109-L115
|
||
FPGAwars/apio
|
6a3451549f94223a99878b9b67c8e52b9dfcb200
|
apio/util.py
|
python
|
get_bin_dir_table
|
(base_dir)
|
return bin_dir
|
Return a table with the package name and the folder were
the executable files are stored
* Input: Table with the package base_dir
|
Return a table with the package name and the folder were
the executable files are stored
* Input: Table with the package base_dir
|
[
"Return",
"a",
"table",
"with",
"the",
"package",
"name",
"and",
"the",
"folder",
"were",
"the",
"executable",
"files",
"are",
"stored",
"*",
"Input",
":",
"Table",
"with",
"the",
"package",
"base_dir"
] |
def get_bin_dir_table(base_dir):
"""Return a table with the package name and the folder were
the executable files are stored
* Input: Table with the package base_dir
"""
bin_dir = {
OSS_CAD_SUITE: str(Path(base_dir.get(OSS_CAD_SUITE)) / BIN),
SCONS: str(Path(base_dir.get(SCONS)) / "script"),
YOSYS: str(Path(base_dir.get(YOSYS)) / BIN),
ICE40: str(Path(base_dir.get(ICE40)) / BIN),
ECP5: str(Path(base_dir.get(ECP5)) / BIN),
IVERILOG: str(Path(base_dir.get(IVERILOG)) / BIN),
VERILATOR: str(Path(base_dir.get(VERILATOR)) / BIN),
GTKWAVE: str(Path(base_dir.get(GTKWAVE)) / BIN),
FUJPROG: str(Path(base_dir.get(FUJPROG)) / BIN),
ICESPROG: str(Path(base_dir.get(ICESPROG)) / BIN),
DFU: str(Path(base_dir.get(DFU)) / BIN),
# -- Obsolete package
SYSTEM: str(Path(base_dir.get(SYSTEM)) / BIN),
}
return bin_dir
|
[
"def",
"get_bin_dir_table",
"(",
"base_dir",
")",
":",
"bin_dir",
"=",
"{",
"OSS_CAD_SUITE",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"OSS_CAD_SUITE",
")",
")",
"/",
"BIN",
")",
",",
"SCONS",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"SCONS",
")",
")",
"/",
"\"script\"",
")",
",",
"YOSYS",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"YOSYS",
")",
")",
"/",
"BIN",
")",
",",
"ICE40",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"ICE40",
")",
")",
"/",
"BIN",
")",
",",
"ECP5",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"ECP5",
")",
")",
"/",
"BIN",
")",
",",
"IVERILOG",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"IVERILOG",
")",
")",
"/",
"BIN",
")",
",",
"VERILATOR",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"VERILATOR",
")",
")",
"/",
"BIN",
")",
",",
"GTKWAVE",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"GTKWAVE",
")",
")",
"/",
"BIN",
")",
",",
"FUJPROG",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"FUJPROG",
")",
")",
"/",
"BIN",
")",
",",
"ICESPROG",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"ICESPROG",
")",
")",
"/",
"BIN",
")",
",",
"DFU",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"DFU",
")",
")",
"/",
"BIN",
")",
",",
"# -- Obsolete package",
"SYSTEM",
":",
"str",
"(",
"Path",
"(",
"base_dir",
".",
"get",
"(",
"SYSTEM",
")",
")",
"/",
"BIN",
")",
",",
"}",
"return",
"bin_dir"
] |
https://github.com/FPGAwars/apio/blob/6a3451549f94223a99878b9b67c8e52b9dfcb200/apio/util.py#L485-L507
|
|
kuri65536/python-for-android
|
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
|
python3-alpha/python-libs/gdata/finance/service.py
|
python
|
FinanceService.DeletePosition
|
(self, position_entry=None,
portfolio_id=None, ticker_id=None, transaction_feed=None)
|
return True
|
A position is deleted by deleting all its transactions.
Args:
position_entry: PositionEntry (optional; see Notes)
portfolio_id: string (optional; see Notes) This may be obtained
from a PortfolioEntry's portfolio_id attribute.
ticker_id: string (optional; see Notes) This may be obtained from
a PositionEntry's ticker_id attribute. Alternatively it can
be constructed using the security's exchange and symbol,
e.g. 'NASDAQ:GOOG'
transaction_feed: TransactionFeed (optional; see Notes)
Notes:
Either a PositionEntry OR (a portfolio ID AND ticker ID) OR
a TransactionFeed must be provided.
|
A position is deleted by deleting all its transactions.
|
[
"A",
"position",
"is",
"deleted",
"by",
"deleting",
"all",
"its",
"transactions",
"."
] |
def DeletePosition(self, position_entry=None,
portfolio_id=None, ticker_id=None, transaction_feed=None):
"""A position is deleted by deleting all its transactions.
Args:
position_entry: PositionEntry (optional; see Notes)
portfolio_id: string (optional; see Notes) This may be obtained
from a PortfolioEntry's portfolio_id attribute.
ticker_id: string (optional; see Notes) This may be obtained from
a PositionEntry's ticker_id attribute. Alternatively it can
be constructed using the security's exchange and symbol,
e.g. 'NASDAQ:GOOG'
transaction_feed: TransactionFeed (optional; see Notes)
Notes:
Either a PositionEntry OR (a portfolio ID AND ticker ID) OR
a TransactionFeed must be provided.
"""
if transaction_feed:
feed = transaction_feed
else:
if position_entry:
feed = self.GetTransactionFeed(position_entry=position_entry)
elif portfolio_id and ticker_id:
feed = self.GetTransactionFeed(
portfolio_id=portfolio_id, ticker_id=ticker_id)
for txn in feed.entry:
self.DeleteTransaction(txn)
return True
|
[
"def",
"DeletePosition",
"(",
"self",
",",
"position_entry",
"=",
"None",
",",
"portfolio_id",
"=",
"None",
",",
"ticker_id",
"=",
"None",
",",
"transaction_feed",
"=",
"None",
")",
":",
"if",
"transaction_feed",
":",
"feed",
"=",
"transaction_feed",
"else",
":",
"if",
"position_entry",
":",
"feed",
"=",
"self",
".",
"GetTransactionFeed",
"(",
"position_entry",
"=",
"position_entry",
")",
"elif",
"portfolio_id",
"and",
"ticker_id",
":",
"feed",
"=",
"self",
".",
"GetTransactionFeed",
"(",
"portfolio_id",
"=",
"portfolio_id",
",",
"ticker_id",
"=",
"ticker_id",
")",
"for",
"txn",
"in",
"feed",
".",
"entry",
":",
"self",
".",
"DeleteTransaction",
"(",
"txn",
")",
"return",
"True"
] |
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python3-alpha/python-libs/gdata/finance/service.py#L172-L200
|
|
QCoDeS/Qcodes
|
3cda2cef44812e2aa4672781f2423bf5f816f9f9
|
qcodes/instrument_drivers/tektronix/Keithley_2450.py
|
python
|
Sense2450.auto_zero_once
|
(self)
|
This command causes the instrument to refresh the reference and zero
measurements once.
|
This command causes the instrument to refresh the reference and zero
measurements once.
|
[
"This",
"command",
"causes",
"the",
"instrument",
"to",
"refresh",
"the",
"reference",
"and",
"zero",
"measurements",
"once",
"."
] |
def auto_zero_once(self) -> None:
"""
This command causes the instrument to refresh the reference and zero
measurements once.
"""
self.write(":SENSe:AZERo:ONCE")
|
[
"def",
"auto_zero_once",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"write",
"(",
"\":SENSe:AZERo:ONCE\"",
")"
] |
https://github.com/QCoDeS/Qcodes/blob/3cda2cef44812e2aa4672781f2423bf5f816f9f9/qcodes/instrument_drivers/tektronix/Keithley_2450.py#L336-L341
|
||
w3h/isf
|
6faf0a3df185465ec17369c90ccc16e2a03a1870
|
lib/thirdparty/bitstring.py
|
python
|
Bits.split
|
(self, delimiter, start=None, end=None, count=None,
bytealigned=None)
|
return
|
Return bitstring generator by splittling using a delimiter.
The first item returned is the initial bitstring before the delimiter,
which may be an empty bitstring.
delimiter -- The bitstring used as the divider.
start -- The bit position to start the split. Defaults to 0.
end -- The bit position one past the last bit to use in the split.
Defaults to self.len.
count -- If specified then at most count items are generated.
Default is to split as many times as possible.
bytealigned -- If True splits will only occur on byte boundaries.
Raises ValueError if the delimiter is empty.
|
Return bitstring generator by splittling using a delimiter.
|
[
"Return",
"bitstring",
"generator",
"by",
"splittling",
"using",
"a",
"delimiter",
"."
] |
def split(self, delimiter, start=None, end=None, count=None,
bytealigned=None):
"""Return bitstring generator by splittling using a delimiter.
The first item returned is the initial bitstring before the delimiter,
which may be an empty bitstring.
delimiter -- The bitstring used as the divider.
start -- The bit position to start the split. Defaults to 0.
end -- The bit position one past the last bit to use in the split.
Defaults to self.len.
count -- If specified then at most count items are generated.
Default is to split as many times as possible.
bytealigned -- If True splits will only occur on byte boundaries.
Raises ValueError if the delimiter is empty.
"""
delimiter = Bits(delimiter)
if not delimiter.len:
raise ValueError("split delimiter cannot be empty.")
start, end = self._validate_slice(start, end)
if bytealigned is None:
bytealigned = globals()['bytealigned']
if count is not None and count < 0:
raise ValueError("Cannot split - count must be >= 0.")
if count == 0:
return
if bytealigned and not delimiter.len % 8 and not self._datastore.offset:
# Use the quick find method
f = self._findbytes
x = delimiter._getbytes()
else:
f = self._findregex
x = re.compile(delimiter._getbin())
found = f(x, start, end, bytealigned)
if not found:
# Initial bits are the whole bitstring being searched
yield self._slice(start, end)
return
# yield the bytes before the first occurrence of the delimiter, even if empty
yield self._slice(start, found[0])
startpos = pos = found[0]
c = 1
while count is None or c < count:
pos += delimiter.len
found = f(x, pos, end, bytealigned)
if not found:
# No more occurrences, so return the rest of the bitstring
yield self._slice(startpos, end)
return
c += 1
yield self._slice(startpos, found[0])
startpos = pos = found[0]
# Have generated count bitstrings, so time to quit.
return
|
[
"def",
"split",
"(",
"self",
",",
"delimiter",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"count",
"=",
"None",
",",
"bytealigned",
"=",
"None",
")",
":",
"delimiter",
"=",
"Bits",
"(",
"delimiter",
")",
"if",
"not",
"delimiter",
".",
"len",
":",
"raise",
"ValueError",
"(",
"\"split delimiter cannot be empty.\"",
")",
"start",
",",
"end",
"=",
"self",
".",
"_validate_slice",
"(",
"start",
",",
"end",
")",
"if",
"bytealigned",
"is",
"None",
":",
"bytealigned",
"=",
"globals",
"(",
")",
"[",
"'bytealigned'",
"]",
"if",
"count",
"is",
"not",
"None",
"and",
"count",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cannot split - count must be >= 0.\"",
")",
"if",
"count",
"==",
"0",
":",
"return",
"if",
"bytealigned",
"and",
"not",
"delimiter",
".",
"len",
"%",
"8",
"and",
"not",
"self",
".",
"_datastore",
".",
"offset",
":",
"# Use the quick find method",
"f",
"=",
"self",
".",
"_findbytes",
"x",
"=",
"delimiter",
".",
"_getbytes",
"(",
")",
"else",
":",
"f",
"=",
"self",
".",
"_findregex",
"x",
"=",
"re",
".",
"compile",
"(",
"delimiter",
".",
"_getbin",
"(",
")",
")",
"found",
"=",
"f",
"(",
"x",
",",
"start",
",",
"end",
",",
"bytealigned",
")",
"if",
"not",
"found",
":",
"# Initial bits are the whole bitstring being searched",
"yield",
"self",
".",
"_slice",
"(",
"start",
",",
"end",
")",
"return",
"# yield the bytes before the first occurrence of the delimiter, even if empty",
"yield",
"self",
".",
"_slice",
"(",
"start",
",",
"found",
"[",
"0",
"]",
")",
"startpos",
"=",
"pos",
"=",
"found",
"[",
"0",
"]",
"c",
"=",
"1",
"while",
"count",
"is",
"None",
"or",
"c",
"<",
"count",
":",
"pos",
"+=",
"delimiter",
".",
"len",
"found",
"=",
"f",
"(",
"x",
",",
"pos",
",",
"end",
",",
"bytealigned",
")",
"if",
"not",
"found",
":",
"# No more occurrences, so return the rest of the bitstring",
"yield",
"self",
".",
"_slice",
"(",
"startpos",
",",
"end",
")",
"return",
"c",
"+=",
"1",
"yield",
"self",
".",
"_slice",
"(",
"startpos",
",",
"found",
"[",
"0",
"]",
")",
"startpos",
"=",
"pos",
"=",
"found",
"[",
"0",
"]",
"# Have generated count bitstrings, so time to quit.",
"return"
] |
https://github.com/w3h/isf/blob/6faf0a3df185465ec17369c90ccc16e2a03a1870/lib/thirdparty/bitstring.py#L2567-L2622
|
|
SeuTao/TGS-Salt-Identification-Challenge-2018-_4th_place_solution
|
aae45f7040464e0dd2f85825b99ca90821f2caeb
|
loss/lovasz_losses.py
|
python
|
flatten_binary_scores
|
(scores, labels, ignore=None)
|
return vscores, vlabels
|
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
|
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
|
[
"Flattens",
"predictions",
"in",
"the",
"batch",
"(",
"binary",
"case",
")",
"Remove",
"labels",
"equal",
"to",
"ignore"
] |
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
|
[
"def",
"flatten_binary_scores",
"(",
"scores",
",",
"labels",
",",
"ignore",
"=",
"None",
")",
":",
"scores",
"=",
"scores",
".",
"view",
"(",
"-",
"1",
")",
"labels",
"=",
"labels",
".",
"view",
"(",
"-",
"1",
")",
"if",
"ignore",
"is",
"None",
":",
"return",
"scores",
",",
"labels",
"valid",
"=",
"(",
"labels",
"!=",
"ignore",
")",
"vscores",
"=",
"scores",
"[",
"valid",
"]",
"vlabels",
"=",
"labels",
"[",
"valid",
"]",
"return",
"vscores",
",",
"vlabels"
] |
https://github.com/SeuTao/TGS-Salt-Identification-Challenge-2018-_4th_place_solution/blob/aae45f7040464e0dd2f85825b99ca90821f2caeb/loss/lovasz_losses.py#L117-L129
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/axes/_base.py
|
python
|
_AxesBase.has_data
|
(self)
|
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches)) > 0
|
Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
|
Return *True* if any artists have been added to axes.
|
[
"Return",
"*",
"True",
"*",
"if",
"any",
"artists",
"have",
"been",
"added",
"to",
"axes",
"."
] |
def has_data(self):
"""
Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
"""
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches)) > 0
|
[
"def",
"has_data",
"(",
"self",
")",
":",
"return",
"(",
"len",
"(",
"self",
".",
"collections",
")",
"+",
"len",
"(",
"self",
".",
"images",
")",
"+",
"len",
"(",
"self",
".",
"lines",
")",
"+",
"len",
"(",
"self",
".",
"patches",
")",
")",
">",
"0"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/axes/_base.py#L1786-L1798
|
|
oilshell/oil
|
94388e7d44a9ad879b12615f6203b38596b5a2d3
|
Python-2.7.13/Lib/lib-tk/ttk.py
|
python
|
Treeview.identify_element
|
(self, x, y)
|
return self.identify("element", x, y)
|
Returns the element at position x, y.
* Availability: Tk 8.6
|
Returns the element at position x, y.
|
[
"Returns",
"the",
"element",
"at",
"position",
"x",
"y",
"."
] |
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
|
[
"def",
"identify_element",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"return",
"self",
".",
"identify",
"(",
"\"element\"",
",",
"x",
",",
"y",
")"
] |
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/lib-tk/ttk.py#L1308-L1312
|
|
nipy/nibabel
|
4703f4d8e32be4cec30e829c2d93ebe54759bb62
|
nibabel/ecat.py
|
python
|
EcatImage.to_file_map
|
(self, file_map=None)
|
Write ECAT7 image to `file_map` or contained ``self.file_map``
The format consist of:
- A main header (512L) with dictionary entries in the form
[numAvail, nextDir, previousDir, numUsed]
- For every frame (3D volume in 4D data)
- A subheader (size = frame_offset)
- Frame data (3D volume)
|
Write ECAT7 image to `file_map` or contained ``self.file_map``
|
[
"Write",
"ECAT7",
"image",
"to",
"file_map",
"or",
"contained",
"self",
".",
"file_map"
] |
def to_file_map(self, file_map=None):
""" Write ECAT7 image to `file_map` or contained ``self.file_map``
The format consist of:
- A main header (512L) with dictionary entries in the form
[numAvail, nextDir, previousDir, numUsed]
- For every frame (3D volume in 4D data)
- A subheader (size = frame_offset)
- Frame data (3D volume)
"""
if file_map is None:
file_map = self.file_map
# It appears to be necessary to load the data before saving even if the
# data itself is not used.
self.get_fdata()
hdr = self.header
mlist = self._mlist
subheaders = self.get_subheaders()
dir_pos = 512
entry_pos = dir_pos + 16 # 528
current_dir = self._get_empty_dir()
hdr_fh, img_fh = self._get_fileholders(file_map)
hdrf = hdr_fh.get_prepare_fileobj(mode='wb')
imgf = hdrf
# Write main header
hdr.write_to(hdrf)
# Write every frames
for index in range(0, self.header['num_frames']):
# Move to subheader offset
frame_offset = subheaders._get_frame_offset(index) - 512
imgf.seek(frame_offset)
# Write subheader
subhdr = subheaders.subheaders[index]
imgf.write(subhdr.tobytes())
# Seek to the next image block
pos = imgf.tell()
imgf.seek(pos + 2)
# Get frame
image = self._subheader.raw_data_from_fileobj(index)
# Write frame images
self._write_data(image, imgf, pos + 2, endianness='>')
# Move to dictionary offset and write dictionary entry
self._write_data(mlist[index], imgf, entry_pos, endianness='>')
entry_pos = entry_pos + 16
current_dir[0] = current_dir[0] - 1
current_dir[3] = current_dir[3] + 1
# Create a new directory is previous one is full
if current_dir[0] == 0:
# self._write_dir(current_dir, imgf, dir_pos)
self._write_data(current_dir, imgf, dir_pos)
current_dir = self._get_empty_dir()
current_dir[3] = dir_pos / 512
dir_pos = mlist[index][2] + 1
entry_pos = dir_pos + 16
tmp_avail = current_dir[0]
tmp_used = current_dir[3]
# Fill directory with empty data until directory is full
while current_dir[0] > 0:
entry_pos = dir_pos + 16 + (16 * current_dir[3])
self._write_data(np.zeros(4, dtype=np.int32), imgf, entry_pos)
current_dir[0] = current_dir[0] - 1
current_dir[3] = current_dir[3] + 1
current_dir[0] = tmp_avail
current_dir[3] = tmp_used
# Write directory index
self._write_data(current_dir, imgf, dir_pos, endianness='>')
|
[
"def",
"to_file_map",
"(",
"self",
",",
"file_map",
"=",
"None",
")",
":",
"if",
"file_map",
"is",
"None",
":",
"file_map",
"=",
"self",
".",
"file_map",
"# It appears to be necessary to load the data before saving even if the",
"# data itself is not used.",
"self",
".",
"get_fdata",
"(",
")",
"hdr",
"=",
"self",
".",
"header",
"mlist",
"=",
"self",
".",
"_mlist",
"subheaders",
"=",
"self",
".",
"get_subheaders",
"(",
")",
"dir_pos",
"=",
"512",
"entry_pos",
"=",
"dir_pos",
"+",
"16",
"# 528",
"current_dir",
"=",
"self",
".",
"_get_empty_dir",
"(",
")",
"hdr_fh",
",",
"img_fh",
"=",
"self",
".",
"_get_fileholders",
"(",
"file_map",
")",
"hdrf",
"=",
"hdr_fh",
".",
"get_prepare_fileobj",
"(",
"mode",
"=",
"'wb'",
")",
"imgf",
"=",
"hdrf",
"# Write main header",
"hdr",
".",
"write_to",
"(",
"hdrf",
")",
"# Write every frames",
"for",
"index",
"in",
"range",
"(",
"0",
",",
"self",
".",
"header",
"[",
"'num_frames'",
"]",
")",
":",
"# Move to subheader offset",
"frame_offset",
"=",
"subheaders",
".",
"_get_frame_offset",
"(",
"index",
")",
"-",
"512",
"imgf",
".",
"seek",
"(",
"frame_offset",
")",
"# Write subheader",
"subhdr",
"=",
"subheaders",
".",
"subheaders",
"[",
"index",
"]",
"imgf",
".",
"write",
"(",
"subhdr",
".",
"tobytes",
"(",
")",
")",
"# Seek to the next image block",
"pos",
"=",
"imgf",
".",
"tell",
"(",
")",
"imgf",
".",
"seek",
"(",
"pos",
"+",
"2",
")",
"# Get frame",
"image",
"=",
"self",
".",
"_subheader",
".",
"raw_data_from_fileobj",
"(",
"index",
")",
"# Write frame images",
"self",
".",
"_write_data",
"(",
"image",
",",
"imgf",
",",
"pos",
"+",
"2",
",",
"endianness",
"=",
"'>'",
")",
"# Move to dictionary offset and write dictionary entry",
"self",
".",
"_write_data",
"(",
"mlist",
"[",
"index",
"]",
",",
"imgf",
",",
"entry_pos",
",",
"endianness",
"=",
"'>'",
")",
"entry_pos",
"=",
"entry_pos",
"+",
"16",
"current_dir",
"[",
"0",
"]",
"=",
"current_dir",
"[",
"0",
"]",
"-",
"1",
"current_dir",
"[",
"3",
"]",
"=",
"current_dir",
"[",
"3",
"]",
"+",
"1",
"# Create a new directory is previous one is full",
"if",
"current_dir",
"[",
"0",
"]",
"==",
"0",
":",
"# self._write_dir(current_dir, imgf, dir_pos)",
"self",
".",
"_write_data",
"(",
"current_dir",
",",
"imgf",
",",
"dir_pos",
")",
"current_dir",
"=",
"self",
".",
"_get_empty_dir",
"(",
")",
"current_dir",
"[",
"3",
"]",
"=",
"dir_pos",
"/",
"512",
"dir_pos",
"=",
"mlist",
"[",
"index",
"]",
"[",
"2",
"]",
"+",
"1",
"entry_pos",
"=",
"dir_pos",
"+",
"16",
"tmp_avail",
"=",
"current_dir",
"[",
"0",
"]",
"tmp_used",
"=",
"current_dir",
"[",
"3",
"]",
"# Fill directory with empty data until directory is full",
"while",
"current_dir",
"[",
"0",
"]",
">",
"0",
":",
"entry_pos",
"=",
"dir_pos",
"+",
"16",
"+",
"(",
"16",
"*",
"current_dir",
"[",
"3",
"]",
")",
"self",
".",
"_write_data",
"(",
"np",
".",
"zeros",
"(",
"4",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
",",
"imgf",
",",
"entry_pos",
")",
"current_dir",
"[",
"0",
"]",
"=",
"current_dir",
"[",
"0",
"]",
"-",
"1",
"current_dir",
"[",
"3",
"]",
"=",
"current_dir",
"[",
"3",
"]",
"+",
"1",
"current_dir",
"[",
"0",
"]",
"=",
"tmp_avail",
"current_dir",
"[",
"3",
"]",
"=",
"tmp_used",
"# Write directory index",
"self",
".",
"_write_data",
"(",
"current_dir",
",",
"imgf",
",",
"dir_pos",
",",
"endianness",
"=",
"'>'",
")"
] |
https://github.com/nipy/nibabel/blob/4703f4d8e32be4cec30e829c2d93ebe54759bb62/nibabel/ecat.py#L941-L1023
|
||
Source-Python-Dev-Team/Source.Python
|
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
|
addons/source-python/Python3/datetime.py
|
python
|
date.isoweekday
|
(self)
|
return self.toordinal() % 7 or 7
|
Return day of the week, where Monday == 1 ... Sunday == 7.
|
Return day of the week, where Monday == 1 ... Sunday == 7.
|
[
"Return",
"day",
"of",
"the",
"week",
"where",
"Monday",
"==",
"1",
"...",
"Sunday",
"==",
"7",
"."
] |
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
|
[
"def",
"isoweekday",
"(",
"self",
")",
":",
"# 1-Jan-0001 is a Monday",
"return",
"self",
".",
"toordinal",
"(",
")",
"%",
"7",
"or",
"7"
] |
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/Python3/datetime.py#L900-L903
|
|
asdf-format/asdf
|
c1f6cf915409da5372c47ac725dc922b4bd52f7d
|
asdf/extension/_converter.py
|
python
|
Converter.tags
|
(self)
|
Get the YAML tags that this converter is capable of
handling. URI patterns are permitted, see
`asdf.util.uri_match` for details.
Returns
-------
iterable of str
Tag URIs or URI patterns.
|
Get the YAML tags that this converter is capable of
handling. URI patterns are permitted, see
`asdf.util.uri_match` for details.
|
[
"Get",
"the",
"YAML",
"tags",
"that",
"this",
"converter",
"is",
"capable",
"of",
"handling",
".",
"URI",
"patterns",
"are",
"permitted",
"see",
"asdf",
".",
"util",
".",
"uri_match",
"for",
"details",
"."
] |
def tags(self):
"""
Get the YAML tags that this converter is capable of
handling. URI patterns are permitted, see
`asdf.util.uri_match` for details.
Returns
-------
iterable of str
Tag URIs or URI patterns.
"""
pass
|
[
"def",
"tags",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/asdf-format/asdf/blob/c1f6cf915409da5372c47ac725dc922b4bd52f7d/asdf/extension/_converter.py#L29-L40
|
||
pandas-dev/pandas
|
5ba7d714014ae8feaccc0dd4a98890828cf2832d
|
pandas/io/formats/format.py
|
python
|
FloatArrayFormatter._value_formatter
|
(
self,
float_format: FloatFormatType | None = None,
threshold: float | int | None = None,
)
|
return formatter
|
Returns a function to be applied on each value to format it
|
Returns a function to be applied on each value to format it
|
[
"Returns",
"a",
"function",
"to",
"be",
"applied",
"on",
"each",
"value",
"to",
"format",
"it"
] |
def _value_formatter(
self,
float_format: FloatFormatType | None = None,
threshold: float | int | None = None,
) -> Callable:
"""Returns a function to be applied on each value to format it"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
assert float_format is not None # for mypy
# error: "str" not callable
# error: Unexpected keyword argument "value" for "__call__" of
# "EngFormatter"
return (
float_format(value=v) # type: ignore[operator,call-arg]
if notna(v)
else self.na_rep
)
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != ".":
def decimal_formatter(v):
return base_formatter(v).replace(".", self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
|
[
"def",
"_value_formatter",
"(",
"self",
",",
"float_format",
":",
"FloatFormatType",
"|",
"None",
"=",
"None",
",",
"threshold",
":",
"float",
"|",
"int",
"|",
"None",
"=",
"None",
",",
")",
"->",
"Callable",
":",
"# the float_format parameter supersedes self.float_format",
"if",
"float_format",
"is",
"None",
":",
"float_format",
"=",
"self",
".",
"float_format",
"# we are going to compose different functions, to first convert to",
"# a string, then replace the decimal symbol, and finally chop according",
"# to the threshold",
"# when there is no float_format, we use str instead of '%g'",
"# because str(0.0) = '0.0' while '%g' % 0.0 = '0'",
"if",
"float_format",
":",
"def",
"base_formatter",
"(",
"v",
")",
":",
"assert",
"float_format",
"is",
"not",
"None",
"# for mypy",
"# error: \"str\" not callable",
"# error: Unexpected keyword argument \"value\" for \"__call__\" of",
"# \"EngFormatter\"",
"return",
"(",
"float_format",
"(",
"value",
"=",
"v",
")",
"# type: ignore[operator,call-arg]",
"if",
"notna",
"(",
"v",
")",
"else",
"self",
".",
"na_rep",
")",
"else",
":",
"def",
"base_formatter",
"(",
"v",
")",
":",
"return",
"str",
"(",
"v",
")",
"if",
"notna",
"(",
"v",
")",
"else",
"self",
".",
"na_rep",
"if",
"self",
".",
"decimal",
"!=",
"\".\"",
":",
"def",
"decimal_formatter",
"(",
"v",
")",
":",
"return",
"base_formatter",
"(",
"v",
")",
".",
"replace",
"(",
"\".\"",
",",
"self",
".",
"decimal",
",",
"1",
")",
"else",
":",
"decimal_formatter",
"=",
"base_formatter",
"if",
"threshold",
"is",
"None",
":",
"return",
"decimal_formatter",
"def",
"formatter",
"(",
"value",
")",
":",
"if",
"notna",
"(",
"value",
")",
":",
"if",
"abs",
"(",
"value",
")",
">",
"threshold",
":",
"return",
"decimal_formatter",
"(",
"value",
")",
"else",
":",
"return",
"decimal_formatter",
"(",
"0.0",
")",
"else",
":",
"return",
"self",
".",
"na_rep",
"return",
"formatter"
] |
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/io/formats/format.py#L1438-L1492
|
|
carlospolop/legion
|
df23a348663dcf29d20114ce475988e114e0f135
|
lib/interact.py
|
python
|
LegionPrompt.do_run
|
(self, _)
|
Execute the confiured protocol attack
|
Execute the confiured protocol attack
|
[
"Execute",
"the",
"confiured",
"protocol",
"attack"
] |
def do_run(self, _):
'''Execute the confiured protocol attack'''
self.priv_values["protohelp"] = False
self.update_executed()
warrior = self.initW()
if warrior != -1: # If -1, then something went wrong creating the warrior
self.ws.append(warrior)
thread = Thread(target=warrior.run)
thread.start()
else:
print_error("Something went wrong, nothing is going to be executed")
|
[
"def",
"do_run",
"(",
"self",
",",
"_",
")",
":",
"self",
".",
"priv_values",
"[",
"\"protohelp\"",
"]",
"=",
"False",
"self",
".",
"update_executed",
"(",
")",
"warrior",
"=",
"self",
".",
"initW",
"(",
")",
"if",
"warrior",
"!=",
"-",
"1",
":",
"# If -1, then something went wrong creating the warrior",
"self",
".",
"ws",
".",
"append",
"(",
"warrior",
")",
"thread",
"=",
"Thread",
"(",
"target",
"=",
"warrior",
".",
"run",
")",
"thread",
".",
"start",
"(",
")",
"else",
":",
"print_error",
"(",
"\"Something went wrong, nothing is going to be executed\"",
")"
] |
https://github.com/carlospolop/legion/blob/df23a348663dcf29d20114ce475988e114e0f135/lib/interact.py#L160-L171
|
||
zetaops/ulakbus
|
bcc05abf17bbd6dbeec93809e4ad30885e94e83e
|
ulakbus/services/common/banka.py
|
python
|
authenticate
|
(func)
|
return auth
|
Banka yetkilendirme kontrolü için decorator fonksiyon.
Banka servislerine istek geldiğinde öncelikle bu fonksiyon tetiklenir ve
Banka kullanıcı bilgileri kontrol edilir.
Bilgiler sistemdeki bilgilerle uyuşuyorsa işleme devam edilir.
Aksi taktirde AuthException hatası verilir.
Args:
func (function): Yetkilendirme işleminden geçmesi gereken fonksiyon.
|
Banka yetkilendirme kontrolü için decorator fonksiyon.
|
[
"Banka",
"yetkilendirme",
"kontrolü",
"için",
"decorator",
"fonksiyon",
"."
] |
def authenticate(func):
"""
Banka yetkilendirme kontrolü için decorator fonksiyon.
Banka servislerine istek geldiğinde öncelikle bu fonksiyon tetiklenir ve
Banka kullanıcı bilgileri kontrol edilir.
Bilgiler sistemdeki bilgilerle uyuşuyorsa işleme devam edilir.
Aksi taktirde AuthException hatası verilir.
Args:
func (function): Yetkilendirme işleminden geçmesi gereken fonksiyon.
"""
def auth(self):
try:
self.banka = Banka.objects.get(kod=str(self.request.input.banka_kodu))
BankaAuth.objects.get(username=self.request.input.bank_username,
password=self.request.input.bank_password,
banka=self.banka)
self.logger.info("Authentication completed successfully.")
except Exception as e:
raise AuthException("Authentication failed. %s" % e)
return func(self)
return auth
|
[
"def",
"authenticate",
"(",
"func",
")",
":",
"def",
"auth",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"banka",
"=",
"Banka",
".",
"objects",
".",
"get",
"(",
"kod",
"=",
"str",
"(",
"self",
".",
"request",
".",
"input",
".",
"banka_kodu",
")",
")",
"BankaAuth",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"self",
".",
"request",
".",
"input",
".",
"bank_username",
",",
"password",
"=",
"self",
".",
"request",
".",
"input",
".",
"bank_password",
",",
"banka",
"=",
"self",
".",
"banka",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Authentication completed successfully.\"",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"AuthException",
"(",
"\"Authentication failed. %s\"",
"%",
"e",
")",
"return",
"func",
"(",
"self",
")",
"return",
"auth"
] |
https://github.com/zetaops/ulakbus/blob/bcc05abf17bbd6dbeec93809e4ad30885e94e83e/ulakbus/services/common/banka.py#L29-L55
|
|
Latitude-Archives/AIDungeon
|
591f318091f46306d01a5307505534a32bd18024
|
generator/gpt2/src/model.py
|
python
|
shape_list
|
(x)
|
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
|
Deal with dynamic shape in tensorflow cleanly.
|
Deal with dynamic shape in tensorflow cleanly.
|
[
"Deal",
"with",
"dynamic",
"shape",
"in",
"tensorflow",
"cleanly",
"."
] |
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
|
[
"def",
"shape_list",
"(",
"x",
")",
":",
"static",
"=",
"x",
".",
"shape",
".",
"as_list",
"(",
")",
"dynamic",
"=",
"tf",
".",
"shape",
"(",
"x",
")",
"return",
"[",
"dynamic",
"[",
"i",
"]",
"if",
"s",
"is",
"None",
"else",
"s",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"static",
")",
"]"
] |
https://github.com/Latitude-Archives/AIDungeon/blob/591f318091f46306d01a5307505534a32bd18024/generator/gpt2/src/model.py#L11-L15
|
|
n1nj4sec/pupy
|
a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39
|
pupy/pupylib/utils/rpyc_utils.py
|
python
|
redirected_stdio
|
(module, stdout=None, stderr=None)
|
r"""
Redirects the other party's ``stdin``, ``stdout`` and ``stderr`` to
those of the local party, so remote IO will occur locally.
Example usage::
with redirected_stdio(conn):
conn.modules.sys.stdout.write("hello\n") # will be printed locally
|
r"""
Redirects the other party's ``stdin``, ``stdout`` and ``stderr`` to
those of the local party, so remote IO will occur locally.
|
[
"r",
"Redirects",
"the",
"other",
"party",
"s",
"stdin",
"stdout",
"and",
"stderr",
"to",
"those",
"of",
"the",
"local",
"party",
"so",
"remote",
"IO",
"will",
"occur",
"locally",
"."
] |
def redirected_stdio(module, stdout=None, stderr=None):
r"""
Redirects the other party's ``stdin``, ``stdout`` and ``stderr`` to
those of the local party, so remote IO will occur locally.
Example usage::
with redirected_stdio(conn):
conn.modules.sys.stdout.write("hello\n") # will be printed locally
"""
ns = module.client.conn.namespace
stdin = sys.stdin
if stdout is None:
stdout = module.stdout
if stderr is None:
stderr = module.stdout
try:
ns['redirect_stdio'](
restricted(
stdin, ['softspace', 'write', 'readline', 'encoding', 'close']),
restricted(
stdout, ['softspace', 'write', 'readline', 'encoding', 'close']),
restricted(
stderr, ['softspace', 'write', 'readline', 'encoding', 'close']))
module.client.conn.register_remote_cleanup(ns['reset_stdio'])
yield
finally:
ns['reset_stdio']()
module.client.conn.unregister_remote_cleanup(ns['reset_stdio'])
|
[
"def",
"redirected_stdio",
"(",
"module",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
")",
":",
"ns",
"=",
"module",
".",
"client",
".",
"conn",
".",
"namespace",
"stdin",
"=",
"sys",
".",
"stdin",
"if",
"stdout",
"is",
"None",
":",
"stdout",
"=",
"module",
".",
"stdout",
"if",
"stderr",
"is",
"None",
":",
"stderr",
"=",
"module",
".",
"stdout",
"try",
":",
"ns",
"[",
"'redirect_stdio'",
"]",
"(",
"restricted",
"(",
"stdin",
",",
"[",
"'softspace'",
",",
"'write'",
",",
"'readline'",
",",
"'encoding'",
",",
"'close'",
"]",
")",
",",
"restricted",
"(",
"stdout",
",",
"[",
"'softspace'",
",",
"'write'",
",",
"'readline'",
",",
"'encoding'",
",",
"'close'",
"]",
")",
",",
"restricted",
"(",
"stderr",
",",
"[",
"'softspace'",
",",
"'write'",
",",
"'readline'",
",",
"'encoding'",
",",
"'close'",
"]",
")",
")",
"module",
".",
"client",
".",
"conn",
".",
"register_remote_cleanup",
"(",
"ns",
"[",
"'reset_stdio'",
"]",
")",
"yield",
"finally",
":",
"ns",
"[",
"'reset_stdio'",
"]",
"(",
")",
"module",
".",
"client",
".",
"conn",
".",
"unregister_remote_cleanup",
"(",
"ns",
"[",
"'reset_stdio'",
"]",
")"
] |
https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/pupylib/utils/rpyc_utils.py#L91-L127
|
||
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/logging/__init__.py
|
python
|
BufferingFormatter.__init__
|
(self, linefmt=None)
|
Optionally specify a formatter which will be used to format each
individual record.
|
Optionally specify a formatter which will be used to format each
individual record.
|
[
"Optionally",
"specify",
"a",
"formatter",
"which",
"will",
"be",
"used",
"to",
"format",
"each",
"individual",
"record",
"."
] |
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
|
[
"def",
"__init__",
"(",
"self",
",",
"linefmt",
"=",
"None",
")",
":",
"if",
"linefmt",
":",
"self",
".",
"linefmt",
"=",
"linefmt",
"else",
":",
"self",
".",
"linefmt",
"=",
"_defaultFormatter"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/logging/__init__.py#L507-L515
|
||
researchmm/tasn
|
5dba8ccc096cedc63913730eeea14a9647911129
|
tasn-mxnet/python/mxnet/operator.py
|
python
|
PythonOp.infer_shape
|
(self, in_shape)
|
return in_shape, [in_shape[0]]
|
Interface for ``infer_shape``. Can override when creating new operators.
Parameters
----------
in_shape : list
List of argument shapes in the same order as
declared in list_arguments.
Returns
-------
in_shape : list
List of argument shapes. Can be modified from in_shape.
out_shape : list
List of output shapes calculated from in_shape,
in the same order as declared in list_arguments.
|
Interface for ``infer_shape``. Can override when creating new operators.
|
[
"Interface",
"for",
"infer_shape",
".",
"Can",
"override",
"when",
"creating",
"new",
"operators",
"."
] |
def infer_shape(self, in_shape):
"""Interface for ``infer_shape``. Can override when creating new operators.
Parameters
----------
in_shape : list
List of argument shapes in the same order as
declared in list_arguments.
Returns
-------
in_shape : list
List of argument shapes. Can be modified from in_shape.
out_shape : list
List of output shapes calculated from in_shape,
in the same order as declared in list_arguments.
"""
return in_shape, [in_shape[0]]
|
[
"def",
"infer_shape",
"(",
"self",
",",
"in_shape",
")",
":",
"return",
"in_shape",
",",
"[",
"in_shape",
"[",
"0",
"]",
"]"
] |
https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/python/mxnet/operator.py#L99-L116
|
|
libtcod/python-tcod
|
e12c4172baa9efdfd74aff6ee9bab8454a835248
|
tcod/sdl.py
|
python
|
Window.size
|
(self)
|
return xy[0], xy[1]
|
Return the pixel (width, height) of the window.
This attribute can be set to change the size of the window but the
given size must be greater than (1, 1) or else an exception will be
raised.
|
Return the pixel (width, height) of the window.
|
[
"Return",
"the",
"pixel",
"(",
"width",
"height",
")",
"of",
"the",
"window",
"."
] |
def size(self) -> Tuple[int, int]:
"""Return the pixel (width, height) of the window.
This attribute can be set to change the size of the window but the
given size must be greater than (1, 1) or else an exception will be
raised.
"""
xy = ffi.new("int[2]")
lib.SDL_GetWindowSize(self.p, xy, xy + 1)
return xy[0], xy[1]
|
[
"def",
"size",
"(",
"self",
")",
"->",
"Tuple",
"[",
"int",
",",
"int",
"]",
":",
"xy",
"=",
"ffi",
".",
"new",
"(",
"\"int[2]\"",
")",
"lib",
".",
"SDL_GetWindowSize",
"(",
"self",
".",
"p",
",",
"xy",
",",
"xy",
"+",
"1",
")",
"return",
"xy",
"[",
"0",
"]",
",",
"xy",
"[",
"1",
"]"
] |
https://github.com/libtcod/python-tcod/blob/e12c4172baa9efdfd74aff6ee9bab8454a835248/tcod/sdl.py#L99-L108
|
|
Python-Markdown/markdown
|
af38c42706f8dff93694d4a7572003dbd8b0ddc0
|
markdown/extensions/smarty.py
|
python
|
SubstituteTextPattern.__init__
|
(self, pattern, replace, md)
|
Replaces matches with some text.
|
Replaces matches with some text.
|
[
"Replaces",
"matches",
"with",
"some",
"text",
"."
] |
def __init__(self, pattern, replace, md):
""" Replaces matches with some text. """
HtmlInlineProcessor.__init__(self, pattern)
self.replace = replace
self.md = md
|
[
"def",
"__init__",
"(",
"self",
",",
"pattern",
",",
"replace",
",",
"md",
")",
":",
"HtmlInlineProcessor",
".",
"__init__",
"(",
"self",
",",
"pattern",
")",
"self",
".",
"replace",
"=",
"replace",
"self",
".",
"md",
"=",
"md"
] |
https://github.com/Python-Markdown/markdown/blob/af38c42706f8dff93694d4a7572003dbd8b0ddc0/markdown/extensions/smarty.py#L152-L156
|
||
meduza-corp/interstellar
|
40a801ccd7856491726f5a126621d9318cabe2e1
|
gsutil/third_party/python-gflags/gflags.py
|
python
|
FlagValues.ShortestUniquePrefixes
|
(self, fl)
|
return shortest_matches
|
Returns: dictionary; maps flag names to their shortest unique prefix.
|
Returns: dictionary; maps flag names to their shortest unique prefix.
|
[
"Returns",
":",
"dictionary",
";",
"maps",
"flag",
"names",
"to",
"their",
"shortest",
"unique",
"prefix",
"."
] |
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
|
[
"def",
"ShortestUniquePrefixes",
"(",
"self",
",",
"fl",
")",
":",
"# Sort the list of flag names",
"sorted_flags",
"=",
"[",
"]",
"for",
"name",
",",
"flag",
"in",
"fl",
".",
"items",
"(",
")",
":",
"sorted_flags",
".",
"append",
"(",
"name",
")",
"if",
"flag",
".",
"boolean",
":",
"sorted_flags",
".",
"append",
"(",
"'no%s'",
"%",
"name",
")",
"sorted_flags",
".",
"sort",
"(",
")",
"# For each name in the sorted list, determine the shortest unique",
"# prefix by comparing itself to the next name and to the previous",
"# name (the latter check uses cached info from the previous loop).",
"shortest_matches",
"=",
"{",
"}",
"prev_idx",
"=",
"0",
"for",
"flag_idx",
"in",
"range",
"(",
"len",
"(",
"sorted_flags",
")",
")",
":",
"curr",
"=",
"sorted_flags",
"[",
"flag_idx",
"]",
"if",
"flag_idx",
"==",
"(",
"len",
"(",
"sorted_flags",
")",
"-",
"1",
")",
":",
"next",
"=",
"None",
"else",
":",
"next",
"=",
"sorted_flags",
"[",
"flag_idx",
"+",
"1",
"]",
"next_len",
"=",
"len",
"(",
"next",
")",
"for",
"curr_idx",
"in",
"range",
"(",
"len",
"(",
"curr",
")",
")",
":",
"if",
"(",
"next",
"is",
"None",
"or",
"curr_idx",
">=",
"next_len",
"or",
"curr",
"[",
"curr_idx",
"]",
"!=",
"next",
"[",
"curr_idx",
"]",
")",
":",
"# curr longer than next or no more chars in common",
"shortest_matches",
"[",
"curr",
"]",
"=",
"curr",
"[",
":",
"max",
"(",
"prev_idx",
",",
"curr_idx",
")",
"+",
"1",
"]",
"prev_idx",
"=",
"curr_idx",
"break",
"else",
":",
"# curr shorter than (or equal to) next",
"shortest_matches",
"[",
"curr",
"]",
"=",
"curr",
"prev_idx",
"=",
"curr_idx",
"+",
"1",
"# next will need at least one more char",
"return",
"shortest_matches"
] |
https://github.com/meduza-corp/interstellar/blob/40a801ccd7856491726f5a126621d9318cabe2e1/gsutil/third_party/python-gflags/gflags.py#L1487-L1521
|
|
krintoxi/NoobSec-Toolkit
|
38738541cbc03cedb9a3b3ed13b629f781ad64f6
|
NoobSecToolkit - MAC OSX/tools/inject/plugins/generic/filesystem.py
|
python
|
Filesystem.writeFile
|
(self, localFile, remoteFile, fileType=None, forceCheck=False)
|
return written
|
[] |
def writeFile(self, localFile, remoteFile, fileType=None, forceCheck=False):
written = False
checkFile(localFile)
self.checkDbmsOs()
if localFile.endswith('_'):
localFile = decloakToTemp(localFile)
if conf.direct or isStackingAvailable():
if isStackingAvailable():
debugMsg = "going to upload the file '%s' with " % fileType
debugMsg += "stacked query SQL injection technique"
logger.debug(debugMsg)
written = self.stackedWriteFile(localFile, remoteFile, fileType, forceCheck)
self.cleanup(onlyFileTbl=True)
elif isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION) and Backend.isDbms(DBMS.MYSQL):
debugMsg = "going to upload the file '%s' with " % fileType
debugMsg += "UNION query SQL injection technique"
logger.debug(debugMsg)
written = self.unionWriteFile(localFile, remoteFile, fileType, forceCheck)
else:
errMsg = "none of the SQL injection techniques detected can "
errMsg += "be used to write files to the underlying file "
errMsg += "system of the back-end %s server" % Backend.getDbms()
logger.error(errMsg)
return None
return written
|
[
"def",
"writeFile",
"(",
"self",
",",
"localFile",
",",
"remoteFile",
",",
"fileType",
"=",
"None",
",",
"forceCheck",
"=",
"False",
")",
":",
"written",
"=",
"False",
"checkFile",
"(",
"localFile",
")",
"self",
".",
"checkDbmsOs",
"(",
")",
"if",
"localFile",
".",
"endswith",
"(",
"'_'",
")",
":",
"localFile",
"=",
"decloakToTemp",
"(",
"localFile",
")",
"if",
"conf",
".",
"direct",
"or",
"isStackingAvailable",
"(",
")",
":",
"if",
"isStackingAvailable",
"(",
")",
":",
"debugMsg",
"=",
"\"going to upload the file '%s' with \"",
"%",
"fileType",
"debugMsg",
"+=",
"\"stacked query SQL injection technique\"",
"logger",
".",
"debug",
"(",
"debugMsg",
")",
"written",
"=",
"self",
".",
"stackedWriteFile",
"(",
"localFile",
",",
"remoteFile",
",",
"fileType",
",",
"forceCheck",
")",
"self",
".",
"cleanup",
"(",
"onlyFileTbl",
"=",
"True",
")",
"elif",
"isTechniqueAvailable",
"(",
"PAYLOAD",
".",
"TECHNIQUE",
".",
"UNION",
")",
"and",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"MYSQL",
")",
":",
"debugMsg",
"=",
"\"going to upload the file '%s' with \"",
"%",
"fileType",
"debugMsg",
"+=",
"\"UNION query SQL injection technique\"",
"logger",
".",
"debug",
"(",
"debugMsg",
")",
"written",
"=",
"self",
".",
"unionWriteFile",
"(",
"localFile",
",",
"remoteFile",
",",
"fileType",
",",
"forceCheck",
")",
"else",
":",
"errMsg",
"=",
"\"none of the SQL injection techniques detected can \"",
"errMsg",
"+=",
"\"be used to write files to the underlying file \"",
"errMsg",
"+=",
"\"system of the back-end %s server\"",
"%",
"Backend",
".",
"getDbms",
"(",
")",
"logger",
".",
"error",
"(",
"errMsg",
")",
"return",
"None",
"return",
"written"
] |
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/tools/inject/plugins/generic/filesystem.py#L267-L299
|
|||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/Python-2.7.9/Lib/asynchat.py
|
python
|
async_chat.writable
|
(self)
|
return self.producer_fifo or (not self.connected)
|
predicate for inclusion in the writable for select()
|
predicate for inclusion in the writable for select()
|
[
"predicate",
"for",
"inclusion",
"in",
"the",
"writable",
"for",
"select",
"()"
] |
def writable (self):
"predicate for inclusion in the writable for select()"
return self.producer_fifo or (not self.connected)
|
[
"def",
"writable",
"(",
"self",
")",
":",
"return",
"self",
".",
"producer_fifo",
"or",
"(",
"not",
"self",
".",
"connected",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/asynchat.py#L207-L209
|
|
CoinAlpha/hummingbot
|
36f6149c1644c07cd36795b915f38b8f49b798e7
|
hummingbot/connector/exchange/wazirx/wazirx_exchange.py
|
python
|
WazirxExchange.tracking_states
|
(self)
|
return {
key: value.to_json()
for key, value in self._in_flight_orders.items()
if not value.is_done
}
|
:return active in-flight orders in json format, is used to save in sqlite db.
|
:return active in-flight orders in json format, is used to save in sqlite db.
|
[
":",
"return",
"active",
"in",
"-",
"flight",
"orders",
"in",
"json",
"format",
"is",
"used",
"to",
"save",
"in",
"sqlite",
"db",
"."
] |
def tracking_states(self) -> Dict[str, any]:
"""
:return active in-flight orders in json format, is used to save in sqlite db.
"""
return {
key: value.to_json()
for key, value in self._in_flight_orders.items()
if not value.is_done
}
|
[
"def",
"tracking_states",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"any",
"]",
":",
"return",
"{",
"key",
":",
"value",
".",
"to_json",
"(",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_in_flight_orders",
".",
"items",
"(",
")",
"if",
"not",
"value",
".",
"is_done",
"}"
] |
https://github.com/CoinAlpha/hummingbot/blob/36f6149c1644c07cd36795b915f38b8f49b798e7/hummingbot/connector/exchange/wazirx/wazirx_exchange.py#L139-L147
|
|
TheSouthFrog/stylealign
|
910632d2fccc9db61b00c265ae18a88913113c1d
|
pytorch_code/util/visualizer.py
|
python
|
Visualizer.reset
|
(self)
|
[] |
def reset(self):
self.saved = False
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"saved",
"=",
"False"
] |
https://github.com/TheSouthFrog/stylealign/blob/910632d2fccc9db61b00c265ae18a88913113c1d/pytorch_code/util/visualizer.py#L54-L55
|
||||
pyenchant/pyenchant
|
fc2a4a3fca6a55d510d01455b814aa27cdfc961e
|
enchant/checker/__init__.py
|
python
|
SpellChecker.leading_context
|
(self, chars: int)
|
return self._array_to_string(context)
|
Get `chars` characters of leading context.
This method returns up to `chars` characters of leading
context - the text that occurs in the string immediately
before the current erroneous word.
|
Get `chars` characters of leading context.
|
[
"Get",
"chars",
"characters",
"of",
"leading",
"context",
"."
] |
def leading_context(self, chars: int) -> str:
"""Get `chars` characters of leading context.
This method returns up to `chars` characters of leading
context - the text that occurs in the string immediately
before the current erroneous word.
"""
start = max(self.wordpos - chars, 0)
context = self._text[start : self.wordpos]
return self._array_to_string(context)
|
[
"def",
"leading_context",
"(",
"self",
",",
"chars",
":",
"int",
")",
"->",
"str",
":",
"start",
"=",
"max",
"(",
"self",
".",
"wordpos",
"-",
"chars",
",",
"0",
")",
"context",
"=",
"self",
".",
"_text",
"[",
"start",
":",
"self",
".",
"wordpos",
"]",
"return",
"self",
".",
"_array_to_string",
"(",
"context",
")"
] |
https://github.com/pyenchant/pyenchant/blob/fc2a4a3fca6a55d510d01455b814aa27cdfc961e/enchant/checker/__init__.py#L377-L386
|
|
khanhnamle1994/natural-language-processing
|
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
|
assignment1/.env/lib/python2.7/site-packages/numpy/lib/arraysetops.py
|
python
|
unique
|
(ar, return_index=False, return_inverse=False, return_counts=False)
|
return ret
|
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are two optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, and the indices of the unique array that
reconstruct the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
.. versionadded:: 1.9.0
If True, also return the number of times each unique value comes up
in `ar`.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
.. versionadded:: 1.9.0
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
|
Find the unique elements of an array.
|
[
"Find",
"the",
"unique",
"elements",
"of",
"an",
"array",
"."
] |
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are two optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, and the indices of the unique array that
reconstruct the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
.. versionadded:: 1.9.0
If True, also return the number of times each unique value comes up
in `ar`.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
.. versionadded:: 1.9.0
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
ret += (np.take(iflag, iperm),)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
|
[
"def",
"unique",
"(",
"ar",
",",
"return_index",
"=",
"False",
",",
"return_inverse",
"=",
"False",
",",
"return_counts",
"=",
"False",
")",
":",
"ar",
"=",
"np",
".",
"asanyarray",
"(",
"ar",
")",
".",
"flatten",
"(",
")",
"optional_indices",
"=",
"return_index",
"or",
"return_inverse",
"optional_returns",
"=",
"optional_indices",
"or",
"return_counts",
"if",
"ar",
".",
"size",
"==",
"0",
":",
"if",
"not",
"optional_returns",
":",
"ret",
"=",
"ar",
"else",
":",
"ret",
"=",
"(",
"ar",
",",
")",
"if",
"return_index",
":",
"ret",
"+=",
"(",
"np",
".",
"empty",
"(",
"0",
",",
"np",
".",
"bool",
")",
",",
")",
"if",
"return_inverse",
":",
"ret",
"+=",
"(",
"np",
".",
"empty",
"(",
"0",
",",
"np",
".",
"bool",
")",
",",
")",
"if",
"return_counts",
":",
"ret",
"+=",
"(",
"np",
".",
"empty",
"(",
"0",
",",
"np",
".",
"intp",
")",
",",
")",
"return",
"ret",
"if",
"optional_indices",
":",
"perm",
"=",
"ar",
".",
"argsort",
"(",
"kind",
"=",
"'mergesort'",
"if",
"return_index",
"else",
"'quicksort'",
")",
"aux",
"=",
"ar",
"[",
"perm",
"]",
"else",
":",
"ar",
".",
"sort",
"(",
")",
"aux",
"=",
"ar",
"flag",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"True",
"]",
",",
"aux",
"[",
"1",
":",
"]",
"!=",
"aux",
"[",
":",
"-",
"1",
"]",
")",
")",
"if",
"not",
"optional_returns",
":",
"ret",
"=",
"aux",
"[",
"flag",
"]",
"else",
":",
"ret",
"=",
"(",
"aux",
"[",
"flag",
"]",
",",
")",
"if",
"return_index",
":",
"ret",
"+=",
"(",
"perm",
"[",
"flag",
"]",
",",
")",
"if",
"return_inverse",
":",
"iflag",
"=",
"np",
".",
"cumsum",
"(",
"flag",
")",
"-",
"1",
"iperm",
"=",
"perm",
".",
"argsort",
"(",
")",
"ret",
"+=",
"(",
"np",
".",
"take",
"(",
"iflag",
",",
"iperm",
")",
",",
")",
"if",
"return_counts",
":",
"idx",
"=",
"np",
".",
"concatenate",
"(",
"np",
".",
"nonzero",
"(",
"flag",
")",
"+",
"(",
"[",
"ar",
".",
"size",
"]",
",",
")",
")",
"ret",
"+=",
"(",
"np",
".",
"diff",
"(",
"idx",
")",
",",
")",
"return",
"ret"
] |
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/numpy/lib/arraysetops.py#L96-L212
|
|
aws-quickstart/quickstart-redhat-openshift
|
2b87dd38b72e7e4c439a606c5a9ea458d72da612
|
functions/source/DeleteBucketContents/requests/models.py
|
python
|
Response.ok
|
(self)
|
return True
|
Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
|
Returns True if :attr:`status_code` is less than 400, False if not.
|
[
"Returns",
"True",
"if",
":",
"attr",
":",
"status_code",
"is",
"less",
"than",
"400",
"False",
"if",
"not",
"."
] |
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
|
[
"def",
"ok",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"raise_for_status",
"(",
")",
"except",
"HTTPError",
":",
"return",
"False",
"return",
"True"
] |
https://github.com/aws-quickstart/quickstart-redhat-openshift/blob/2b87dd38b72e7e4c439a606c5a9ea458d72da612/functions/source/DeleteBucketContents/requests/models.py#L693-L705
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/pip/_vendor/ipaddress.py
|
python
|
_BaseNetwork._get_networks_key
|
(self)
|
return (self._version, self.network_address, self.netmask)
|
Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
|
Network-only key function.
|
[
"Network",
"-",
"only",
"key",
"function",
"."
] |
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
|
[
"def",
"_get_networks_key",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"_version",
",",
"self",
".",
"network_address",
",",
"self",
".",
"netmask",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pip/_vendor/ipaddress.py#L986-L994
|
|
mgear-dev/mgear
|
06ddc26c5adb5eab07ca470c7fafa77404c8a1de
|
scripts/mgear/maya/shifter/__init__.py
|
python
|
Rig.findControlRelative
|
(self, guideName)
|
return self.components[comp_name].getControlRelation(relative_name)
|
Return the control objects in the rig matching the guide object.
Args:
guideName (str): Name of the guide object.
Returns:
transform: The relative control object
|
Return the control objects in the rig matching the guide object.
|
[
"Return",
"the",
"control",
"objects",
"in",
"the",
"rig",
"matching",
"the",
"guide",
"object",
"."
] |
def findControlRelative(self, guideName):
"""Return the control objects in the rig matching the guide object.
Args:
guideName (str): Name of the guide object.
Returns:
transform: The relative control object
"""
if guideName is None:
return self.global_ctl
# localName = self.getLocalName(guideName)
comp_name = self.getComponentName(guideName)
relative_name = self.getRelativeName(guideName)
if comp_name not in self.components.keys():
return self.global_ctl
return self.components[comp_name].getControlRelation(relative_name)
|
[
"def",
"findControlRelative",
"(",
"self",
",",
"guideName",
")",
":",
"if",
"guideName",
"is",
"None",
":",
"return",
"self",
".",
"global_ctl",
"# localName = self.getLocalName(guideName)",
"comp_name",
"=",
"self",
".",
"getComponentName",
"(",
"guideName",
")",
"relative_name",
"=",
"self",
".",
"getRelativeName",
"(",
"guideName",
")",
"if",
"comp_name",
"not",
"in",
"self",
".",
"components",
".",
"keys",
"(",
")",
":",
"return",
"self",
".",
"global_ctl",
"return",
"self",
".",
"components",
"[",
"comp_name",
"]",
".",
"getControlRelation",
"(",
"relative_name",
")"
] |
https://github.com/mgear-dev/mgear/blob/06ddc26c5adb5eab07ca470c7fafa77404c8a1de/scripts/mgear/maya/shifter/__init__.py#L568-L588
|
|
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/google/appengine/tools/devappserver2/api_server.py
|
python
|
create_api_server
|
(request_info, storage_path, options, app_id, app_root,
request_context=None)
|
return APIServer('localhost', options.api_port, app_id, request_context)
|
Creates an API server.
Args:
request_info: An apiproxy_stub.RequestInfo instance used by the stubs to
lookup information about the request associated with an API call.
storage_path: A string directory for storing API stub data.
options: An instance of argparse.Namespace containing command line flags.
app_id: String representing an application ID, used for configuring paths
and string constants in API stubs.
app_root: The path to the directory containing the user's
application e.g. "/home/joe/myapp", used for locating application yaml
files, eg index.yaml for the datastore stub.
request_context: Callback for starting requests
Returns:
An instance of APIServer.
|
Creates an API server.
|
[
"Creates",
"an",
"API",
"server",
"."
] |
def create_api_server(request_info, storage_path, options, app_id, app_root,
request_context=None):
"""Creates an API server.
Args:
request_info: An apiproxy_stub.RequestInfo instance used by the stubs to
lookup information about the request associated with an API call.
storage_path: A string directory for storing API stub data.
options: An instance of argparse.Namespace containing command line flags.
app_id: String representing an application ID, used for configuring paths
and string constants in API stubs.
app_root: The path to the directory containing the user's
application e.g. "/home/joe/myapp", used for locating application yaml
files, eg index.yaml for the datastore stub.
request_context: Callback for starting requests
Returns:
An instance of APIServer.
"""
datastore_path = options.datastore_path or os.path.join(
storage_path, 'datastore.db')
logs_path = options.logs_path or os.path.join(storage_path, 'logs.db')
search_index_path = options.search_indexes_path or os.path.join(
storage_path, 'search_indexes')
prospective_search_path = options.prospective_search_path or os.path.join(
storage_path, 'prospective-search')
blobstore_path = options.blobstore_path or os.path.join(
storage_path, 'blobs')
if options.clear_datastore:
_clear_datastore_storage(datastore_path)
if options.clear_prospective_search:
_clear_prospective_search_storage(prospective_search_path)
if options.clear_search_indexes:
_clear_search_indexes_storage(search_index_path)
if options.auto_id_policy == datastore_stub_util.SEQUENTIAL:
logging.warn("--auto_id_policy='sequential' is deprecated. This option "
"will be removed in a future release.")
application_address = '%s' % options.host
if options.port and options.port != 80:
application_address += ':' + str(options.port)
user_login_url = '/%s?%s=%%s' % (
login.LOGIN_URL_RELATIVE, login.CONTINUE_PARAM)
user_logout_url = '%s&%s=%s' % (
user_login_url, login.ACTION_PARAM, login.LOGOUT_ACTION)
if options.datastore_consistency_policy == 'time':
consistency = datastore_stub_util.TimeBasedHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'random':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'consistent':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy(1.0)
else:
assert 0, ('unknown consistency policy: %r' %
options.datastore_consistency_policy)
app_identity_location = None
if options.external_api_port:
app_identity_location = ':'.join(['localhost',
str(options.external_api_port)])
maybe_convert_datastore_file_stub_data_to_sqlite(app_id, datastore_path)
setup_stubs(
request_data=request_info,
app_id=app_id,
application_root=app_root,
# The "trusted" flag is only relevant for Google administrative
# applications.
trusted=getattr(options, 'trusted', False),
blobstore_path=blobstore_path,
datastore_path=datastore_path,
datastore_consistency=consistency,
datastore_require_indexes=options.require_indexes,
datastore_auto_id_policy=options.auto_id_policy,
images_host_prefix='http://%s' % application_address,
logs_path=logs_path,
mail_smtp_host=options.smtp_host,
mail_smtp_port=options.smtp_port,
mail_smtp_user=options.smtp_user,
mail_smtp_password=options.smtp_password,
mail_enable_sendmail=options.enable_sendmail,
mail_show_mail_body=options.show_mail_body,
matcher_prospective_search_path=prospective_search_path,
search_index_path=search_index_path,
taskqueue_auto_run_tasks=options.enable_task_running,
taskqueue_default_http_server=application_address,
user_login_url=user_login_url,
user_logout_url=user_logout_url,
default_gcs_bucket_name=options.default_gcs_bucket_name,
uaserver_path=options.uaserver_path,
xmpp_path=options.xmpp_path,
xmpp_domain=options.login_server,
app_identity_location=app_identity_location)
# The APIServer must bind to localhost because that is what the runtime
# instances talk to.
return APIServer('localhost', options.api_port, app_id, request_context)
|
[
"def",
"create_api_server",
"(",
"request_info",
",",
"storage_path",
",",
"options",
",",
"app_id",
",",
"app_root",
",",
"request_context",
"=",
"None",
")",
":",
"datastore_path",
"=",
"options",
".",
"datastore_path",
"or",
"os",
".",
"path",
".",
"join",
"(",
"storage_path",
",",
"'datastore.db'",
")",
"logs_path",
"=",
"options",
".",
"logs_path",
"or",
"os",
".",
"path",
".",
"join",
"(",
"storage_path",
",",
"'logs.db'",
")",
"search_index_path",
"=",
"options",
".",
"search_indexes_path",
"or",
"os",
".",
"path",
".",
"join",
"(",
"storage_path",
",",
"'search_indexes'",
")",
"prospective_search_path",
"=",
"options",
".",
"prospective_search_path",
"or",
"os",
".",
"path",
".",
"join",
"(",
"storage_path",
",",
"'prospective-search'",
")",
"blobstore_path",
"=",
"options",
".",
"blobstore_path",
"or",
"os",
".",
"path",
".",
"join",
"(",
"storage_path",
",",
"'blobs'",
")",
"if",
"options",
".",
"clear_datastore",
":",
"_clear_datastore_storage",
"(",
"datastore_path",
")",
"if",
"options",
".",
"clear_prospective_search",
":",
"_clear_prospective_search_storage",
"(",
"prospective_search_path",
")",
"if",
"options",
".",
"clear_search_indexes",
":",
"_clear_search_indexes_storage",
"(",
"search_index_path",
")",
"if",
"options",
".",
"auto_id_policy",
"==",
"datastore_stub_util",
".",
"SEQUENTIAL",
":",
"logging",
".",
"warn",
"(",
"\"--auto_id_policy='sequential' is deprecated. This option \"",
"\"will be removed in a future release.\"",
")",
"application_address",
"=",
"'%s'",
"%",
"options",
".",
"host",
"if",
"options",
".",
"port",
"and",
"options",
".",
"port",
"!=",
"80",
":",
"application_address",
"+=",
"':'",
"+",
"str",
"(",
"options",
".",
"port",
")",
"user_login_url",
"=",
"'/%s?%s=%%s'",
"%",
"(",
"login",
".",
"LOGIN_URL_RELATIVE",
",",
"login",
".",
"CONTINUE_PARAM",
")",
"user_logout_url",
"=",
"'%s&%s=%s'",
"%",
"(",
"user_login_url",
",",
"login",
".",
"ACTION_PARAM",
",",
"login",
".",
"LOGOUT_ACTION",
")",
"if",
"options",
".",
"datastore_consistency_policy",
"==",
"'time'",
":",
"consistency",
"=",
"datastore_stub_util",
".",
"TimeBasedHRConsistencyPolicy",
"(",
")",
"elif",
"options",
".",
"datastore_consistency_policy",
"==",
"'random'",
":",
"consistency",
"=",
"datastore_stub_util",
".",
"PseudoRandomHRConsistencyPolicy",
"(",
")",
"elif",
"options",
".",
"datastore_consistency_policy",
"==",
"'consistent'",
":",
"consistency",
"=",
"datastore_stub_util",
".",
"PseudoRandomHRConsistencyPolicy",
"(",
"1.0",
")",
"else",
":",
"assert",
"0",
",",
"(",
"'unknown consistency policy: %r'",
"%",
"options",
".",
"datastore_consistency_policy",
")",
"app_identity_location",
"=",
"None",
"if",
"options",
".",
"external_api_port",
":",
"app_identity_location",
"=",
"':'",
".",
"join",
"(",
"[",
"'localhost'",
",",
"str",
"(",
"options",
".",
"external_api_port",
")",
"]",
")",
"maybe_convert_datastore_file_stub_data_to_sqlite",
"(",
"app_id",
",",
"datastore_path",
")",
"setup_stubs",
"(",
"request_data",
"=",
"request_info",
",",
"app_id",
"=",
"app_id",
",",
"application_root",
"=",
"app_root",
",",
"# The \"trusted\" flag is only relevant for Google administrative",
"# applications.",
"trusted",
"=",
"getattr",
"(",
"options",
",",
"'trusted'",
",",
"False",
")",
",",
"blobstore_path",
"=",
"blobstore_path",
",",
"datastore_path",
"=",
"datastore_path",
",",
"datastore_consistency",
"=",
"consistency",
",",
"datastore_require_indexes",
"=",
"options",
".",
"require_indexes",
",",
"datastore_auto_id_policy",
"=",
"options",
".",
"auto_id_policy",
",",
"images_host_prefix",
"=",
"'http://%s'",
"%",
"application_address",
",",
"logs_path",
"=",
"logs_path",
",",
"mail_smtp_host",
"=",
"options",
".",
"smtp_host",
",",
"mail_smtp_port",
"=",
"options",
".",
"smtp_port",
",",
"mail_smtp_user",
"=",
"options",
".",
"smtp_user",
",",
"mail_smtp_password",
"=",
"options",
".",
"smtp_password",
",",
"mail_enable_sendmail",
"=",
"options",
".",
"enable_sendmail",
",",
"mail_show_mail_body",
"=",
"options",
".",
"show_mail_body",
",",
"matcher_prospective_search_path",
"=",
"prospective_search_path",
",",
"search_index_path",
"=",
"search_index_path",
",",
"taskqueue_auto_run_tasks",
"=",
"options",
".",
"enable_task_running",
",",
"taskqueue_default_http_server",
"=",
"application_address",
",",
"user_login_url",
"=",
"user_login_url",
",",
"user_logout_url",
"=",
"user_logout_url",
",",
"default_gcs_bucket_name",
"=",
"options",
".",
"default_gcs_bucket_name",
",",
"uaserver_path",
"=",
"options",
".",
"uaserver_path",
",",
"xmpp_path",
"=",
"options",
".",
"xmpp_path",
",",
"xmpp_domain",
"=",
"options",
".",
"login_server",
",",
"app_identity_location",
"=",
"app_identity_location",
")",
"# The APIServer must bind to localhost because that is what the runtime",
"# instances talk to.",
"return",
"APIServer",
"(",
"'localhost'",
",",
"options",
".",
"api_port",
",",
"app_id",
",",
"request_context",
")"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/tools/devappserver2/api_server.py#L234-L333
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-linux/x64/cffi/backend_ctypes.py
|
python
|
CTypesData._create_ctype_obj
|
(cls, init)
|
[] |
def _create_ctype_obj(cls, init):
if init is None:
return cls._arg_to_ctypes()
else:
return cls._arg_to_ctypes(init)
|
[
"def",
"_create_ctype_obj",
"(",
"cls",
",",
"init",
")",
":",
"if",
"init",
"is",
"None",
":",
"return",
"cls",
".",
"_arg_to_ctypes",
"(",
")",
"else",
":",
"return",
"cls",
".",
"_arg_to_ctypes",
"(",
"init",
")"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/cffi/backend_ctypes.py#L47-L51
|
||||
chubin/cheat.sh
|
46d1a5f73c6b88da15d809154245dbf234e9479e
|
lib/fmt/comments.py
|
python
|
_language_name
|
(name)
|
return VIM_NAME.get(name, name)
|
[] |
def _language_name(name):
return VIM_NAME.get(name, name)
|
[
"def",
"_language_name",
"(",
"name",
")",
":",
"return",
"VIM_NAME",
".",
"get",
"(",
"name",
",",
"name",
")"
] |
https://github.com/chubin/cheat.sh/blob/46d1a5f73c6b88da15d809154245dbf234e9479e/lib/fmt/comments.py#L41-L42
|
|||
CouchPotato/CouchPotatoV1
|
135b3331d1b88ef645e29b76f2d4cc4a732c9232
|
library/hachoir_parser/parser_list.py
|
python
|
ParserList.print_
|
(self, title=None, out=None, verbose=False, format="one-line")
|
Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)
|
Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)
|
[
"Display",
"a",
"list",
"of",
"parser",
"with",
"its",
"title",
"*",
"out",
":",
"output",
"file",
"*",
"title",
":",
"title",
"of",
"the",
"list",
"to",
"display",
"*",
"format",
":",
"rest",
"trac",
"file",
"-",
"ext",
"mime",
"or",
"one_line",
"(",
"default",
")"
] |
def print_(self, title=None, out=None, verbose=False, format="one-line"):
"""Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)
"""
if out is None:
out = sys.stdout
if format in ("file-ext", "mime"):
# Create file extension set
extensions = set()
for parser in self:
file_ext = parser.getParserTags().get(format, ())
file_ext = list(file_ext)
try:
file_ext.remove("")
except ValueError:
pass
extensions |= set(file_ext)
# Remove empty extension
extensions -= set(('',))
# Convert to list and sort by ASCII order
extensions = list(extensions)
extensions.sort()
# Print list
text = ", ".join( str(item) for item in extensions )
if format == "file-ext":
print >>out, "File extensions: %s." % text
print >>out
print >>out, "Total: %s file extensions." % len(extensions)
else:
print >>out, "MIME types: %s." % text
print >>out
print >>out, "Total: %s MIME types." % len(extensions)
return
if format == "trac":
print >>out, "== List of parsers =="
print >>out
print >>out, "Total: %s parsers" % len(self.parser_list)
print >>out
elif format == "one_line":
if title:
print >>out, title
else:
print >>out, _("List of Hachoir parsers.")
print >>out
# Create parser list sorted by module
bycategory = self.bytag["category"]
for category in sorted(bycategory.iterkeys()):
if format == "one_line":
parser_list = [ parser.PARSER_TAGS["id"] for parser in bycategory[category] ]
parser_list.sort()
print >>out, "- %s: %s" % (category.title(), ", ".join(parser_list))
else:
if format == "rest":
print >>out, category.replace("_", " ").title()
print >>out, "-" * len(category)
print >>out
elif format == "trac":
print >>out, "=== %s ===" % category.replace("_", " ").title()
print >>out
else:
print >>out, "[%s]" % category
parser_list = sorted(bycategory[category],
key=lambda parser: parser.PARSER_TAGS["id"])
if format == "rest":
for parser in parser_list:
tags = parser.getParserTags()
print >>out, "* %s: %s" % (tags["id"], tags["description"])
elif format == "trac":
for parser in parser_list:
tags = parser.getParserTags()
desc = tags["description"]
desc = re.sub(r"([A-Z][a-z]+[A-Z][^ ]+)", r"!\1", desc)
print >>out, " * %s: %s" % (tags["id"], desc)
else:
for parser in parser_list:
parser.print_(out, verbose)
print >>out
if format != "trac":
print >>out, "Total: %s parsers" % len(self.parser_list)
|
[
"def",
"print_",
"(",
"self",
",",
"title",
"=",
"None",
",",
"out",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"format",
"=",
"\"one-line\"",
")",
":",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"sys",
".",
"stdout",
"if",
"format",
"in",
"(",
"\"file-ext\"",
",",
"\"mime\"",
")",
":",
"# Create file extension set",
"extensions",
"=",
"set",
"(",
")",
"for",
"parser",
"in",
"self",
":",
"file_ext",
"=",
"parser",
".",
"getParserTags",
"(",
")",
".",
"get",
"(",
"format",
",",
"(",
")",
")",
"file_ext",
"=",
"list",
"(",
"file_ext",
")",
"try",
":",
"file_ext",
".",
"remove",
"(",
"\"\"",
")",
"except",
"ValueError",
":",
"pass",
"extensions",
"|=",
"set",
"(",
"file_ext",
")",
"# Remove empty extension",
"extensions",
"-=",
"set",
"(",
"(",
"''",
",",
")",
")",
"# Convert to list and sort by ASCII order",
"extensions",
"=",
"list",
"(",
"extensions",
")",
"extensions",
".",
"sort",
"(",
")",
"# Print list",
"text",
"=",
"\", \"",
".",
"join",
"(",
"str",
"(",
"item",
")",
"for",
"item",
"in",
"extensions",
")",
"if",
"format",
"==",
"\"file-ext\"",
":",
"print",
">>",
"out",
",",
"\"File extensions: %s.\"",
"%",
"text",
"print",
">>",
"out",
"print",
">>",
"out",
",",
"\"Total: %s file extensions.\"",
"%",
"len",
"(",
"extensions",
")",
"else",
":",
"print",
">>",
"out",
",",
"\"MIME types: %s.\"",
"%",
"text",
"print",
">>",
"out",
"print",
">>",
"out",
",",
"\"Total: %s MIME types.\"",
"%",
"len",
"(",
"extensions",
")",
"return",
"if",
"format",
"==",
"\"trac\"",
":",
"print",
">>",
"out",
",",
"\"== List of parsers ==\"",
"print",
">>",
"out",
"print",
">>",
"out",
",",
"\"Total: %s parsers\"",
"%",
"len",
"(",
"self",
".",
"parser_list",
")",
"print",
">>",
"out",
"elif",
"format",
"==",
"\"one_line\"",
":",
"if",
"title",
":",
"print",
">>",
"out",
",",
"title",
"else",
":",
"print",
">>",
"out",
",",
"_",
"(",
"\"List of Hachoir parsers.\"",
")",
"print",
">>",
"out",
"# Create parser list sorted by module",
"bycategory",
"=",
"self",
".",
"bytag",
"[",
"\"category\"",
"]",
"for",
"category",
"in",
"sorted",
"(",
"bycategory",
".",
"iterkeys",
"(",
")",
")",
":",
"if",
"format",
"==",
"\"one_line\"",
":",
"parser_list",
"=",
"[",
"parser",
".",
"PARSER_TAGS",
"[",
"\"id\"",
"]",
"for",
"parser",
"in",
"bycategory",
"[",
"category",
"]",
"]",
"parser_list",
".",
"sort",
"(",
")",
"print",
">>",
"out",
",",
"\"- %s: %s\"",
"%",
"(",
"category",
".",
"title",
"(",
")",
",",
"\", \"",
".",
"join",
"(",
"parser_list",
")",
")",
"else",
":",
"if",
"format",
"==",
"\"rest\"",
":",
"print",
">>",
"out",
",",
"category",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
")",
".",
"title",
"(",
")",
"print",
">>",
"out",
",",
"\"-\"",
"*",
"len",
"(",
"category",
")",
"print",
">>",
"out",
"elif",
"format",
"==",
"\"trac\"",
":",
"print",
">>",
"out",
",",
"\"=== %s ===\"",
"%",
"category",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
")",
".",
"title",
"(",
")",
"print",
">>",
"out",
"else",
":",
"print",
">>",
"out",
",",
"\"[%s]\"",
"%",
"category",
"parser_list",
"=",
"sorted",
"(",
"bycategory",
"[",
"category",
"]",
",",
"key",
"=",
"lambda",
"parser",
":",
"parser",
".",
"PARSER_TAGS",
"[",
"\"id\"",
"]",
")",
"if",
"format",
"==",
"\"rest\"",
":",
"for",
"parser",
"in",
"parser_list",
":",
"tags",
"=",
"parser",
".",
"getParserTags",
"(",
")",
"print",
">>",
"out",
",",
"\"* %s: %s\"",
"%",
"(",
"tags",
"[",
"\"id\"",
"]",
",",
"tags",
"[",
"\"description\"",
"]",
")",
"elif",
"format",
"==",
"\"trac\"",
":",
"for",
"parser",
"in",
"parser_list",
":",
"tags",
"=",
"parser",
".",
"getParserTags",
"(",
")",
"desc",
"=",
"tags",
"[",
"\"description\"",
"]",
"desc",
"=",
"re",
".",
"sub",
"(",
"r\"([A-Z][a-z]+[A-Z][^ ]+)\"",
",",
"r\"!\\1\"",
",",
"desc",
")",
"print",
">>",
"out",
",",
"\" * %s: %s\"",
"%",
"(",
"tags",
"[",
"\"id\"",
"]",
",",
"desc",
")",
"else",
":",
"for",
"parser",
"in",
"parser_list",
":",
"parser",
".",
"print_",
"(",
"out",
",",
"verbose",
")",
"print",
">>",
"out",
"if",
"format",
"!=",
"\"trac\"",
":",
"print",
">>",
"out",
",",
"\"Total: %s parsers\"",
"%",
"len",
"(",
"self",
".",
"parser_list",
")"
] |
https://github.com/CouchPotato/CouchPotatoV1/blob/135b3331d1b88ef645e29b76f2d4cc4a732c9232/library/hachoir_parser/parser_list.py#L88-L174
|
||
jazzband/sorl-thumbnail
|
4fcb55df2cbebb0464c9de6c00e3d089273d687a
|
sorl/thumbnail/engines/base.py
|
python
|
EngineBase.blur
|
(self, image, geometry, options)
|
return image
|
Wrapper for ``_blur``
|
Wrapper for ``_blur``
|
[
"Wrapper",
"for",
"_blur"
] |
def blur(self, image, geometry, options):
"""
Wrapper for ``_blur``
"""
if options.get('blur'):
return self._blur(image, int(options.get('blur')))
return image
|
[
"def",
"blur",
"(",
"self",
",",
"image",
",",
"geometry",
",",
"options",
")",
":",
"if",
"options",
".",
"get",
"(",
"'blur'",
")",
":",
"return",
"self",
".",
"_blur",
"(",
"image",
",",
"int",
"(",
"options",
".",
"get",
"(",
"'blur'",
")",
")",
")",
"return",
"image"
] |
https://github.com/jazzband/sorl-thumbnail/blob/4fcb55df2cbebb0464c9de6c00e3d089273d687a/sorl/thumbnail/engines/base.py#L118-L124
|
|
jython/frozen-mirror
|
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
|
lib-python/2.7/plat-mac/lib-scriptpackages/StdSuites/Standard_Suite.py
|
python
|
Standard_Suite_Events.duplicate
|
(self, _object, _attributes={}, **_arguments)
|
duplicate: Duplicate one or more objects
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument with_properties: the initial values for properties of the new object that are to be different from the original
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s)
|
duplicate: Duplicate one or more objects
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument with_properties: the initial values for properties of the new object that are to be different from the original
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s)
|
[
"duplicate",
":",
"Duplicate",
"one",
"or",
"more",
"objects",
"Required",
"argument",
":",
"the",
"object",
"(",
"s",
")",
"to",
"duplicate",
"Keyword",
"argument",
"to",
":",
"the",
"new",
"location",
"for",
"the",
"object",
"(",
"s",
")",
"Keyword",
"argument",
"with_properties",
":",
"the",
"initial",
"values",
"for",
"properties",
"of",
"the",
"new",
"object",
"that",
"are",
"to",
"be",
"different",
"from",
"the",
"original",
"Keyword",
"argument",
"_attributes",
":",
"AppleEvent",
"attribute",
"dictionary",
"Returns",
":",
"to",
"the",
"duplicated",
"object",
"(",
"s",
")"
] |
def duplicate(self, _object, _attributes={}, **_arguments):
"""duplicate: Duplicate one or more objects
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument with_properties: the initial values for properties of the new object that are to be different from the original
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s)
"""
_code = 'core'
_subcode = 'clon'
aetools.keysubst(_arguments, self._argmap_duplicate)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
|
[
"def",
"duplicate",
"(",
"self",
",",
"_object",
",",
"_attributes",
"=",
"{",
"}",
",",
"*",
"*",
"_arguments",
")",
":",
"_code",
"=",
"'core'",
"_subcode",
"=",
"'clon'",
"aetools",
".",
"keysubst",
"(",
"_arguments",
",",
"self",
".",
"_argmap_duplicate",
")",
"_arguments",
"[",
"'----'",
"]",
"=",
"_object",
"_reply",
",",
"_arguments",
",",
"_attributes",
"=",
"self",
".",
"send",
"(",
"_code",
",",
"_subcode",
",",
"_arguments",
",",
"_attributes",
")",
"if",
"_arguments",
".",
"get",
"(",
"'errn'",
",",
"0",
")",
":",
"raise",
"aetools",
".",
"Error",
",",
"aetools",
".",
"decodeerror",
"(",
"_arguments",
")",
"# XXXX Optionally decode result",
"if",
"_arguments",
".",
"has_key",
"(",
"'----'",
")",
":",
"return",
"_arguments",
"[",
"'----'",
"]"
] |
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/plat-mac/lib-scriptpackages/StdSuites/Standard_Suite.py#L147-L168
|
||
blogbar/blogbar
|
4d3703af1a9bdc6d0db97907bcea65432ace49aa
|
application/controllers/site.py
|
python
|
wiki
|
()
|
return render_template('site/wiki.html')
|
帮助
|
帮助
|
[
"帮助"
] |
def wiki():
"""帮助"""
return render_template('site/wiki.html')
|
[
"def",
"wiki",
"(",
")",
":",
"return",
"render_template",
"(",
"'site/wiki.html'",
")"
] |
https://github.com/blogbar/blogbar/blob/4d3703af1a9bdc6d0db97907bcea65432ace49aa/application/controllers/site.py#L66-L68
|
|
espnet/espnet
|
ea411f3f627b8f101c211e107d0ff7053344ac80
|
espnet/nets/asr_interface.py
|
python
|
ASRInterface.forward
|
(self, xs, ilens, ys)
|
Compute loss for training.
:param xs:
For pytorch, batch of padded source sequences torch.Tensor (B, Tmax, idim)
For chainer, list of source sequences chainer.Variable
:param ilens: batch of lengths of source sequences (B)
For pytorch, torch.Tensor
For chainer, list of int
:param ys:
For pytorch, batch of padded source sequences torch.Tensor (B, Lmax)
For chainer, list of source sequences chainer.Variable
:return: loss value
:rtype: torch.Tensor for pytorch, chainer.Variable for chainer
|
Compute loss for training.
|
[
"Compute",
"loss",
"for",
"training",
"."
] |
def forward(self, xs, ilens, ys):
"""Compute loss for training.
:param xs:
For pytorch, batch of padded source sequences torch.Tensor (B, Tmax, idim)
For chainer, list of source sequences chainer.Variable
:param ilens: batch of lengths of source sequences (B)
For pytorch, torch.Tensor
For chainer, list of int
:param ys:
For pytorch, batch of padded source sequences torch.Tensor (B, Lmax)
For chainer, list of source sequences chainer.Variable
:return: loss value
:rtype: torch.Tensor for pytorch, chainer.Variable for chainer
"""
raise NotImplementedError("forward method is not implemented")
|
[
"def",
"forward",
"(",
"self",
",",
"xs",
",",
"ilens",
",",
"ys",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"forward method is not implemented\"",
")"
] |
https://github.com/espnet/espnet/blob/ea411f3f627b8f101c211e107d0ff7053344ac80/espnet/nets/asr_interface.py#L38-L53
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.