repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
django-salesforce/django-salesforce
|
salesforce/backend/utils.py
|
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/backend/utils.py#L389-L399
|
def id_request(self):
"""The Force.com Identity Service (return type dict of text_type)"""
# https://developer.salesforce.com/page/Digging_Deeper_into_OAuth_2.0_at_Salesforce.com?language=en&language=en#The_Force.com_Identity_Service
if 'id' in self.oauth:
url = self.oauth['id']
else:
# dynamic auth without 'id' parameter
url = self.urls_request()['identity']
ret = self.handle_api_exceptions('GET', url) # TODO
return ret.json()
|
[
"def",
"id_request",
"(",
"self",
")",
":",
"# https://developer.salesforce.com/page/Digging_Deeper_into_OAuth_2.0_at_Salesforce.com?language=en&language=en#The_Force.com_Identity_Service",
"if",
"'id'",
"in",
"self",
".",
"oauth",
":",
"url",
"=",
"self",
".",
"oauth",
"[",
"'id'",
"]",
"else",
":",
"# dynamic auth without 'id' parameter",
"url",
"=",
"self",
".",
"urls_request",
"(",
")",
"[",
"'identity'",
"]",
"ret",
"=",
"self",
".",
"handle_api_exceptions",
"(",
"'GET'",
",",
"url",
")",
"# TODO",
"return",
"ret",
".",
"json",
"(",
")"
] |
The Force.com Identity Service (return type dict of text_type)
|
[
"The",
"Force",
".",
"com",
"Identity",
"Service",
"(",
"return",
"type",
"dict",
"of",
"text_type",
")"
] |
python
|
train
| 46.090909 |
AkihikoITOH/capybara
|
capybara/virtualenv/lib/python2.7/site-packages/flask/blueprints.py
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/blueprints.py#L174-L186
|
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
|
[
"def",
"endpoint",
"(",
"self",
",",
"endpoint",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"def",
"register_endpoint",
"(",
"state",
")",
":",
"state",
".",
"app",
".",
"view_functions",
"[",
"endpoint",
"]",
"=",
"f",
"self",
".",
"record_once",
"(",
"register_endpoint",
")",
"return",
"f",
"return",
"decorator"
] |
Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
|
[
"Like",
":",
"meth",
":",
"Flask",
".",
"endpoint",
"but",
"for",
"a",
"blueprint",
".",
"This",
"does",
"not",
"prefix",
"the",
"endpoint",
"with",
"the",
"blueprint",
"name",
"this",
"has",
"to",
"be",
"done",
"explicitly",
"by",
"the",
"user",
"of",
"this",
"method",
".",
"If",
"the",
"endpoint",
"is",
"prefixed",
"with",
"a",
".",
"it",
"will",
"be",
"registered",
"to",
"the",
"current",
"blueprint",
"otherwise",
"it",
"s",
"an",
"application",
"independent",
"endpoint",
"."
] |
python
|
test
| 45.923077 |
raags/passdb
|
passdb/manage_passdb.py
|
https://github.com/raags/passdb/blob/7fd6665e291bbfb4db59c1230ae0257e48ad503e/passdb/manage_passdb.py#L82-L92
|
def delete_user(self, recipient_email):
"""
Remove user from encryption
"""
emailid_list = self.list_user_emails()
if recipient_email not in emailid_list:
raise Exception("User {0} not present!".format(recipient_email))
else:
emailid_list.remove(recipient_email)
self.y = self.decrypt()
self.encrypt(emailid_list=emailid_list)
|
[
"def",
"delete_user",
"(",
"self",
",",
"recipient_email",
")",
":",
"emailid_list",
"=",
"self",
".",
"list_user_emails",
"(",
")",
"if",
"recipient_email",
"not",
"in",
"emailid_list",
":",
"raise",
"Exception",
"(",
"\"User {0} not present!\"",
".",
"format",
"(",
"recipient_email",
")",
")",
"else",
":",
"emailid_list",
".",
"remove",
"(",
"recipient_email",
")",
"self",
".",
"y",
"=",
"self",
".",
"decrypt",
"(",
")",
"self",
".",
"encrypt",
"(",
"emailid_list",
"=",
"emailid_list",
")"
] |
Remove user from encryption
|
[
"Remove",
"user",
"from",
"encryption"
] |
python
|
train
| 37.454545 |
tanghaibao/jcvi
|
jcvi/formats/bed.py
|
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L1154-L1195
|
def some(args):
"""
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
"""
from jcvi.formats.base import SetFile
from jcvi.utils.cbook import gene_name
p = OptionParser(some.__doc__)
p.add_option("-v", dest="inverse", default=False, action="store_true",
help="Get the inverse, like grep -v [default: %default]")
p.set_outfile()
p.set_stripnames()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, idsfile = args
inverse = opts.inverse
ostrip = opts.strip_names
fw = must_open(opts.outfile, "w")
ids = SetFile(idsfile)
if ostrip:
ids = set(gene_name(x) for x in ids)
bed = Bed(bedfile)
ntotal = nkeep = 0
for b in bed:
ntotal += 1
keep = b.accn in ids
if inverse:
keep = not keep
if keep:
nkeep += 1
print(b, file=fw)
fw.close()
logging.debug("Stats: {0} features kept.".\
format(percentage(nkeep, ntotal)))
|
[
"def",
"some",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"SetFile",
"from",
"jcvi",
".",
"utils",
".",
"cbook",
"import",
"gene_name",
"p",
"=",
"OptionParser",
"(",
"some",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"-v\"",
",",
"dest",
"=",
"\"inverse\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Get the inverse, like grep -v [default: %default]\"",
")",
"p",
".",
"set_outfile",
"(",
")",
"p",
".",
"set_stripnames",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"bedfile",
",",
"idsfile",
"=",
"args",
"inverse",
"=",
"opts",
".",
"inverse",
"ostrip",
"=",
"opts",
".",
"strip_names",
"fw",
"=",
"must_open",
"(",
"opts",
".",
"outfile",
",",
"\"w\"",
")",
"ids",
"=",
"SetFile",
"(",
"idsfile",
")",
"if",
"ostrip",
":",
"ids",
"=",
"set",
"(",
"gene_name",
"(",
"x",
")",
"for",
"x",
"in",
"ids",
")",
"bed",
"=",
"Bed",
"(",
"bedfile",
")",
"ntotal",
"=",
"nkeep",
"=",
"0",
"for",
"b",
"in",
"bed",
":",
"ntotal",
"+=",
"1",
"keep",
"=",
"b",
".",
"accn",
"in",
"ids",
"if",
"inverse",
":",
"keep",
"=",
"not",
"keep",
"if",
"keep",
":",
"nkeep",
"+=",
"1",
"print",
"(",
"b",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"logging",
".",
"debug",
"(",
"\"Stats: {0} features kept.\"",
".",
"format",
"(",
"percentage",
"(",
"nkeep",
",",
"ntotal",
")",
")",
")"
] |
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
|
[
"%prog",
"some",
"bedfile",
"idsfile",
">",
"newbedfile"
] |
python
|
train
| 25.428571 |
saltstack/salt
|
salt/states/vagrant.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/vagrant.py#L130-L194
|
def running(name, **kwargs):
r'''
Defines and starts a new VM with specified arguments, or restart a
VM (or group of VMs). (Runs ``vagrant up``.)
:param name: the Salt_id node name you wish your VM to have.
If ``name`` contains a "?" or "*" then it will re-start a group of VMs
which have been paused or stopped.
Each machine must be initially started individually using this function
or the vagrant.init execution module call.
\[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM.
Possible keyword arguments:
- cwd: The directory (path) containing the Vagrantfile
- machine: ('') the name of the machine (in the Vagrantfile) if not default
- vagrant_runas: ('root') the username who owns the vagrantbox file
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
- vm: ({}) a dictionary containing these or other keyword arguments
.. code-block:: yaml
node_name:
vagrant.running
.. code-block:: yaml
node_name:
vagrant.running:
- cwd: /projects/my_project
- vagrant_runas: my_username
- machine: machine1
'''
if '*' in name or '?' in name:
return _vagrant_call(name, 'start', 'restarted',
"Machine has been restarted", "running")
else:
ret = {'name': name,
'changes': {},
'result': True,
'comment': '{0} is already running'.format(name)
}
try:
info = __salt__['vagrant.vm_state'](name)
if info[0]['state'] != 'running':
__salt__['vagrant.start'](name)
ret['changes'][name] = 'Machine started'
ret['comment'] = 'Node {0} started'.format(name)
except (SaltInvocationError, CommandExecutionError):
# there was no viable existing machine to start
ret, kwargs = _find_init_change(name, ret, **kwargs)
kwargs['start'] = True
__salt__['vagrant.init'](name, **kwargs)
ret['changes'][name] = 'Node defined and started'
ret['comment'] = 'Node {0} defined and started'.format(name)
return ret
|
[
"def",
"running",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'*'",
"in",
"name",
"or",
"'?'",
"in",
"name",
":",
"return",
"_vagrant_call",
"(",
"name",
",",
"'start'",
",",
"'restarted'",
",",
"\"Machine has been restarted\"",
",",
"\"running\"",
")",
"else",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"'{0} is already running'",
".",
"format",
"(",
"name",
")",
"}",
"try",
":",
"info",
"=",
"__salt__",
"[",
"'vagrant.vm_state'",
"]",
"(",
"name",
")",
"if",
"info",
"[",
"0",
"]",
"[",
"'state'",
"]",
"!=",
"'running'",
":",
"__salt__",
"[",
"'vagrant.start'",
"]",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Machine started'",
"ret",
"[",
"'comment'",
"]",
"=",
"'Node {0} started'",
".",
"format",
"(",
"name",
")",
"except",
"(",
"SaltInvocationError",
",",
"CommandExecutionError",
")",
":",
"# there was no viable existing machine to start",
"ret",
",",
"kwargs",
"=",
"_find_init_change",
"(",
"name",
",",
"ret",
",",
"*",
"*",
"kwargs",
")",
"kwargs",
"[",
"'start'",
"]",
"=",
"True",
"__salt__",
"[",
"'vagrant.init'",
"]",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"name",
"]",
"=",
"'Node defined and started'",
"ret",
"[",
"'comment'",
"]",
"=",
"'Node {0} defined and started'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] |
r'''
Defines and starts a new VM with specified arguments, or restart a
VM (or group of VMs). (Runs ``vagrant up``.)
:param name: the Salt_id node name you wish your VM to have.
If ``name`` contains a "?" or "*" then it will re-start a group of VMs
which have been paused or stopped.
Each machine must be initially started individually using this function
or the vagrant.init execution module call.
\[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM.
Possible keyword arguments:
- cwd: The directory (path) containing the Vagrantfile
- machine: ('') the name of the machine (in the Vagrantfile) if not default
- vagrant_runas: ('root') the username who owns the vagrantbox file
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
- vm: ({}) a dictionary containing these or other keyword arguments
.. code-block:: yaml
node_name:
vagrant.running
.. code-block:: yaml
node_name:
vagrant.running:
- cwd: /projects/my_project
- vagrant_runas: my_username
- machine: machine1
|
[
"r",
"Defines",
"and",
"starts",
"a",
"new",
"VM",
"with",
"specified",
"arguments",
"or",
"restart",
"a",
"VM",
"(",
"or",
"group",
"of",
"VMs",
")",
".",
"(",
"Runs",
"vagrant",
"up",
".",
")"
] |
python
|
train
| 33.907692 |
konstantint/matplotlib-venn
|
matplotlib_venn/_venn2.py
|
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_venn2.py#L57-L86
|
def solve_venn2_circles(venn_areas):
'''
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3)
array([ 0.564, 0.564])
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3)
array([ 0.461, 0.515])
'''
(A_a, A_b, A_ab) = list(map(float, venn_areas))
r_a, r_b = np.sqrt(A_a / np.pi), np.sqrt(A_b / np.pi)
radii = np.array([r_a, r_b])
if A_ab > tol:
# Nonzero intersection
coords = np.zeros((2, 2))
coords[1][0] = find_distance_by_area(radii[0], radii[1], A_ab)
else:
# Zero intersection
coords = np.zeros((2, 2))
coords[1][0] = radii[0] + radii[1] + max(np.mean(radii) * 1.1, 0.2) # The max here is needed for the case r_a = r_b = 0
coords = normalize_by_center_of_mass(coords, radii)
return (coords, radii)
|
[
"def",
"solve_venn2_circles",
"(",
"venn_areas",
")",
":",
"(",
"A_a",
",",
"A_b",
",",
"A_ab",
")",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"venn_areas",
")",
")",
"r_a",
",",
"r_b",
"=",
"np",
".",
"sqrt",
"(",
"A_a",
"/",
"np",
".",
"pi",
")",
",",
"np",
".",
"sqrt",
"(",
"A_b",
"/",
"np",
".",
"pi",
")",
"radii",
"=",
"np",
".",
"array",
"(",
"[",
"r_a",
",",
"r_b",
"]",
")",
"if",
"A_ab",
">",
"tol",
":",
"# Nonzero intersection",
"coords",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"2",
")",
")",
"coords",
"[",
"1",
"]",
"[",
"0",
"]",
"=",
"find_distance_by_area",
"(",
"radii",
"[",
"0",
"]",
",",
"radii",
"[",
"1",
"]",
",",
"A_ab",
")",
"else",
":",
"# Zero intersection",
"coords",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"2",
")",
")",
"coords",
"[",
"1",
"]",
"[",
"0",
"]",
"=",
"radii",
"[",
"0",
"]",
"+",
"radii",
"[",
"1",
"]",
"+",
"max",
"(",
"np",
".",
"mean",
"(",
"radii",
")",
"*",
"1.1",
",",
"0.2",
")",
"# The max here is needed for the case r_a = r_b = 0",
"coords",
"=",
"normalize_by_center_of_mass",
"(",
"coords",
",",
"radii",
")",
"return",
"(",
"coords",
",",
"radii",
")"
] |
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3)
array([ 0.564, 0.564])
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3)
array([ 0.461, 0.515])
|
[
"Given",
"the",
"list",
"of",
"venn",
"areas",
"(",
"as",
"output",
"from",
"compute_venn2_areas",
"i",
".",
"e",
".",
"[",
"A",
"B",
"AB",
"]",
")",
"finds",
"the",
"positions",
"and",
"radii",
"of",
"the",
"two",
"circles",
".",
"The",
"return",
"value",
"is",
"a",
"tuple",
"(",
"coords",
"radii",
")",
"where",
"coords",
"is",
"a",
"2x2",
"array",
"of",
"coordinates",
"and",
"radii",
"is",
"a",
"2x1",
"array",
"of",
"circle",
"radii",
"."
] |
python
|
train
| 40.933333 |
miguelgrinberg/python-socketio
|
socketio/namespace.py
|
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/namespace.py#L52-L62
|
def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None):
"""Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.send(data, room=room, skip_sid=skip_sid,
namespace=namespace or self.namespace,
callback=callback)
|
[
"def",
"send",
"(",
"self",
",",
"data",
",",
"room",
"=",
"None",
",",
"skip_sid",
"=",
"None",
",",
"namespace",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"return",
"self",
".",
"server",
".",
"send",
"(",
"data",
",",
"room",
"=",
"room",
",",
"skip_sid",
"=",
"skip_sid",
",",
"namespace",
"=",
"namespace",
"or",
"self",
".",
"namespace",
",",
"callback",
"=",
"callback",
")"
] |
Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
|
[
"Send",
"a",
"message",
"to",
"one",
"or",
"more",
"connected",
"clients",
"."
] |
python
|
train
| 48.636364 |
bitcraft/PyTMX
|
pytmx/util_pygame.py
|
https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/util_pygame.py#L144-L208
|
def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
"""
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.layers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects
|
[
"def",
"build_rects",
"(",
"tmxmap",
",",
"layer",
",",
"tileset",
"=",
"None",
",",
"real_gid",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"tileset",
",",
"int",
")",
":",
"try",
":",
"tileset",
"=",
"tmxmap",
".",
"tilesets",
"[",
"tileset",
"]",
"except",
"IndexError",
":",
"msg",
"=",
"\"Tileset #{0} not found in map {1}.\"",
"logger",
".",
"debug",
"(",
"msg",
".",
"format",
"(",
"tileset",
",",
"tmxmap",
")",
")",
"raise",
"IndexError",
"elif",
"isinstance",
"(",
"tileset",
",",
"str",
")",
":",
"try",
":",
"tileset",
"=",
"[",
"t",
"for",
"t",
"in",
"tmxmap",
".",
"tilesets",
"if",
"t",
".",
"name",
"==",
"tileset",
"]",
".",
"pop",
"(",
")",
"except",
"IndexError",
":",
"msg",
"=",
"\"Tileset \\\"{0}\\\" not found in map {1}.\"",
"logger",
".",
"debug",
"(",
"msg",
".",
"format",
"(",
"tileset",
",",
"tmxmap",
")",
")",
"raise",
"ValueError",
"elif",
"tileset",
":",
"msg",
"=",
"\"Tileset must be either a int or string. got: {0}\"",
"logger",
".",
"debug",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"tileset",
")",
")",
")",
"raise",
"TypeError",
"gid",
"=",
"None",
"if",
"real_gid",
":",
"try",
":",
"gid",
",",
"flags",
"=",
"tmxmap",
".",
"map_gid",
"(",
"real_gid",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"msg",
"=",
"\"GID #{0} not found\"",
"logger",
".",
"debug",
"(",
"msg",
".",
"format",
"(",
"real_gid",
")",
")",
"raise",
"ValueError",
"if",
"isinstance",
"(",
"layer",
",",
"int",
")",
":",
"layer_data",
"=",
"tmxmap",
".",
"get_layer_data",
"(",
"layer",
")",
"elif",
"isinstance",
"(",
"layer",
",",
"str",
")",
":",
"try",
":",
"layer",
"=",
"[",
"l",
"for",
"l",
"in",
"tmxmap",
".",
"layers",
"if",
"l",
".",
"name",
"==",
"layer",
"]",
".",
"pop",
"(",
")",
"layer_data",
"=",
"layer",
".",
"data",
"except",
"IndexError",
":",
"msg",
"=",
"\"Layer \\\"{0}\\\" not found in map {1}.\"",
"logger",
".",
"debug",
"(",
"msg",
".",
"format",
"(",
"layer",
",",
"tmxmap",
")",
")",
"raise",
"ValueError",
"p",
"=",
"itertools",
".",
"product",
"(",
"range",
"(",
"tmxmap",
".",
"width",
")",
",",
"range",
"(",
"tmxmap",
".",
"height",
")",
")",
"if",
"gid",
":",
"points",
"=",
"[",
"(",
"x",
",",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"p",
"if",
"layer_data",
"[",
"y",
"]",
"[",
"x",
"]",
"==",
"gid",
"]",
"else",
":",
"points",
"=",
"[",
"(",
"x",
",",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"p",
"if",
"layer_data",
"[",
"y",
"]",
"[",
"x",
"]",
"]",
"rects",
"=",
"simplify",
"(",
"points",
",",
"tmxmap",
".",
"tilewidth",
",",
"tmxmap",
".",
"tileheight",
")",
"return",
"rects"
] |
generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
|
[
"generate",
"a",
"set",
"of",
"non",
"-",
"overlapping",
"rects",
"that",
"represents",
"the",
"distribution",
"of",
"the",
"specified",
"gid",
"."
] |
python
|
train
| 33.923077 |
its-rigs/Trolly
|
trolly/checklist.py
|
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L71-L80
|
def add_item(self, query_params=None):
'''
Add an item to this checklist. Returns a dictionary of values of new
item.
'''
return self.fetch_json(
uri_path=self.base_uri + '/checkItems',
http_method='POST',
query_params=query_params or {}
)
|
[
"def",
"add_item",
"(",
"self",
",",
"query_params",
"=",
"None",
")",
":",
"return",
"self",
".",
"fetch_json",
"(",
"uri_path",
"=",
"self",
".",
"base_uri",
"+",
"'/checkItems'",
",",
"http_method",
"=",
"'POST'",
",",
"query_params",
"=",
"query_params",
"or",
"{",
"}",
")"
] |
Add an item to this checklist. Returns a dictionary of values of new
item.
|
[
"Add",
"an",
"item",
"to",
"this",
"checklist",
".",
"Returns",
"a",
"dictionary",
"of",
"values",
"of",
"new",
"item",
"."
] |
python
|
test
| 31.4 |
andresriancho/splunk-logger
|
splunk_logger/utils.py
|
https://github.com/andresriancho/splunk-logger/blob/448d5ba54464fc355786ffb64f11fd6367792381/splunk_logger/utils.py#L27-L51
|
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
project_id: ...
access_token: ...
api_domain: ...
:param filename: The filename to parse
:return: A tuple with:
- project_id
- access_token
- api_domain
"""
try:
doc = yaml.load(file(filename).read())
project_id = doc["credentials"]["project_id"]
access_token = doc["credentials"]["access_token"]
api_domain = doc["credentials"]["api_domain"]
return project_id, access_token, api_domain
except:
return None, None, None
|
[
"def",
"_parse_config_file_impl",
"(",
"filename",
")",
":",
"try",
":",
"doc",
"=",
"yaml",
".",
"load",
"(",
"file",
"(",
"filename",
")",
".",
"read",
"(",
")",
")",
"project_id",
"=",
"doc",
"[",
"\"credentials\"",
"]",
"[",
"\"project_id\"",
"]",
"access_token",
"=",
"doc",
"[",
"\"credentials\"",
"]",
"[",
"\"access_token\"",
"]",
"api_domain",
"=",
"doc",
"[",
"\"credentials\"",
"]",
"[",
"\"api_domain\"",
"]",
"return",
"project_id",
",",
"access_token",
",",
"api_domain",
"except",
":",
"return",
"None",
",",
"None",
",",
"None"
] |
Format for the file is:
credentials:
project_id: ...
access_token: ...
api_domain: ...
:param filename: The filename to parse
:return: A tuple with:
- project_id
- access_token
- api_domain
|
[
"Format",
"for",
"the",
"file",
"is",
":",
"credentials",
":",
"project_id",
":",
"...",
"access_token",
":",
"...",
"api_domain",
":",
"...",
":",
"param",
"filename",
":",
"The",
"filename",
"to",
"parse",
":",
"return",
":",
"A",
"tuple",
"with",
":",
"-",
"project_id",
"-",
"access_token",
"-",
"api_domain"
] |
python
|
valid
| 26.96 |
log2timeline/plaso
|
plaso/storage/sqlite/sqlite_file.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/sqlite/sqlite_file.py#L614-L627
|
def AddEventSource(self, event_source):
"""Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
"""
self._RaiseIfNotWritable()
self._AddAttributeContainer(
self._CONTAINER_TYPE_EVENT_SOURCE, event_source)
|
[
"def",
"AddEventSource",
"(",
"self",
",",
"event_source",
")",
":",
"self",
".",
"_RaiseIfNotWritable",
"(",
")",
"self",
".",
"_AddAttributeContainer",
"(",
"self",
".",
"_CONTAINER_TYPE_EVENT_SOURCE",
",",
"event_source",
")"
] |
Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
|
[
"Adds",
"an",
"event",
"source",
"."
] |
python
|
train
| 27.071429 |
tensorflow/mesh
|
mesh_tensorflow/ops.py
|
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1017-L1036
|
def shift_by_n_processors(self, x, mesh_axis, offset, wrap):
"""Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
"""
n = self.shape[mesh_axis].size
source_pcoord = []
for i in xrange(n):
c = i - offset
if c != c % n:
if wrap:
c = c % n
else:
c = None
source_pcoord.append(c)
return self.receive(x, mesh_axis, source_pcoord)
|
[
"def",
"shift_by_n_processors",
"(",
"self",
",",
"x",
",",
"mesh_axis",
",",
"offset",
",",
"wrap",
")",
":",
"n",
"=",
"self",
".",
"shape",
"[",
"mesh_axis",
"]",
".",
"size",
"source_pcoord",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"n",
")",
":",
"c",
"=",
"i",
"-",
"offset",
"if",
"c",
"!=",
"c",
"%",
"n",
":",
"if",
"wrap",
":",
"c",
"=",
"c",
"%",
"n",
"else",
":",
"c",
"=",
"None",
"source_pcoord",
".",
"append",
"(",
"c",
")",
"return",
"self",
".",
"receive",
"(",
"x",
",",
"mesh_axis",
",",
"source_pcoord",
")"
] |
Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
|
[
"Receive",
"the",
"slice",
"from",
"processor",
"pcoord",
"-",
"offset",
"."
] |
python
|
train
| 27.45 |
steveYeah/PyBomb
|
pybomb/clients/base_client.py
|
https://github.com/steveYeah/PyBomb/blob/54045d74e642f8a1c4366c24bd6a330ae3da6257/pybomb/clients/base_client.py#L117-L128
|
def _create_search_filter(filter_by):
"""
:param filter_by:
:return: dict
"""
return ",".join(
[
"{0}:{1}".format(key, value)
for key, value in filter_by.items()
if value is not None
]
)
|
[
"def",
"_create_search_filter",
"(",
"filter_by",
")",
":",
"return",
"\",\"",
".",
"join",
"(",
"[",
"\"{0}:{1}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"filter_by",
".",
"items",
"(",
")",
"if",
"value",
"is",
"not",
"None",
"]",
")"
] |
:param filter_by:
:return: dict
|
[
":",
"param",
"filter_by",
":",
":",
"return",
":",
"dict"
] |
python
|
train
| 24.583333 |
robotools/fontParts
|
Lib/fontParts/base/glyph.py
|
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/glyph.py#L1988-L1995
|
def _get_area(self):
"""
Subclasses may override this method.
"""
from fontTools.pens.areaPen import AreaPen
pen = AreaPen(self.layer)
self.draw(pen)
return abs(pen.value)
|
[
"def",
"_get_area",
"(",
"self",
")",
":",
"from",
"fontTools",
".",
"pens",
".",
"areaPen",
"import",
"AreaPen",
"pen",
"=",
"AreaPen",
"(",
"self",
".",
"layer",
")",
"self",
".",
"draw",
"(",
"pen",
")",
"return",
"abs",
"(",
"pen",
".",
"value",
")"
] |
Subclasses may override this method.
|
[
"Subclasses",
"may",
"override",
"this",
"method",
"."
] |
python
|
train
| 27.5 |
williamgilpin/pypdb
|
pypdb/pypdb.py
|
https://github.com/williamgilpin/pypdb/blob/bfb9e1b15b4ad097c5add50c4c176ac6cb28ee15/pypdb/pypdb.py#L844-L873
|
def get_pfam(pdb_id):
"""Return PFAM annotations of given PDB_ID
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
Returns
-------
out : dict
A dictionary containing the PFAM annotations for the specified PDB ID
Examples
--------
>>> pfam_info = get_pfam('2LME')
>>> print(pfam_info)
{'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor',
'@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28',
'@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}}
"""
out = get_info(pdb_id, url_root = 'http://www.rcsb.org/pdb/rest/hmmer?structureId=')
out = to_dict(out)
if not out['hmmer3']:
return dict()
return remove_at_sign(out['hmmer3'])
|
[
"def",
"get_pfam",
"(",
"pdb_id",
")",
":",
"out",
"=",
"get_info",
"(",
"pdb_id",
",",
"url_root",
"=",
"'http://www.rcsb.org/pdb/rest/hmmer?structureId='",
")",
"out",
"=",
"to_dict",
"(",
"out",
")",
"if",
"not",
"out",
"[",
"'hmmer3'",
"]",
":",
"return",
"dict",
"(",
")",
"return",
"remove_at_sign",
"(",
"out",
"[",
"'hmmer3'",
"]",
")"
] |
Return PFAM annotations of given PDB_ID
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
Returns
-------
out : dict
A dictionary containing the PFAM annotations for the specified PDB ID
Examples
--------
>>> pfam_info = get_pfam('2LME')
>>> print(pfam_info)
{'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor',
'@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28',
'@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}}
|
[
"Return",
"PFAM",
"annotations",
"of",
"given",
"PDB_ID"
] |
python
|
train
| 26.866667 |
TissueMAPS/TmDeploy
|
tmdeploy/config.py
|
https://github.com/TissueMAPS/TmDeploy/blob/f891b4ffb21431988bc4a063ae871da3bf284a45/tmdeploy/config.py#L655-L661
|
def db_group(self):
'''str: database system group (defaults to
:attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`)
'''
if self._db_group is None:
self._db_group = self.db_user
return self._db_group
|
[
"def",
"db_group",
"(",
"self",
")",
":",
"if",
"self",
".",
"_db_group",
"is",
"None",
":",
"self",
".",
"_db_group",
"=",
"self",
".",
"db_user",
"return",
"self",
".",
"_db_group"
] |
str: database system group (defaults to
:attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`)
|
[
"str",
":",
"database",
"system",
"group",
"(",
"defaults",
"to",
":",
"attr",
":",
"db_user",
"<tmdeploy",
".",
"config",
".",
"AnsibleHostVariableSection",
".",
"db_user",
">",
")"
] |
python
|
train
| 37.285714 |
CellProfiler/centrosome
|
centrosome/cpmorphology.py
|
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L2860-L2881
|
def openlines(image, linelength=10, dAngle=10, mask=None):
"""
Do a morphological opening along lines of different angles.
Return difference between max and min response to different angles for each pixel.
This effectively removes dots and only keeps lines.
image - pixel image to operate on
length - length of the structural element
angluar_resolution - angle step for the rotating lines
mask - if present, only use unmasked pixels for operations
"""
nAngles = 180//dAngle
openingstack = np.zeros((nAngles,image.shape[0],image.shape[1]),image.dtype)
for iAngle in range(nAngles):
angle = dAngle * iAngle
se = strel_line(linelength,angle)
openingstack[iAngle,:,:] = opening(image, mask=mask, footprint=se)
imLines = np.max(openingstack,axis=0) - np.min(openingstack,axis=0)
return imLines
|
[
"def",
"openlines",
"(",
"image",
",",
"linelength",
"=",
"10",
",",
"dAngle",
"=",
"10",
",",
"mask",
"=",
"None",
")",
":",
"nAngles",
"=",
"180",
"//",
"dAngle",
"openingstack",
"=",
"np",
".",
"zeros",
"(",
"(",
"nAngles",
",",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"image",
".",
"shape",
"[",
"1",
"]",
")",
",",
"image",
".",
"dtype",
")",
"for",
"iAngle",
"in",
"range",
"(",
"nAngles",
")",
":",
"angle",
"=",
"dAngle",
"*",
"iAngle",
"se",
"=",
"strel_line",
"(",
"linelength",
",",
"angle",
")",
"openingstack",
"[",
"iAngle",
",",
":",
",",
":",
"]",
"=",
"opening",
"(",
"image",
",",
"mask",
"=",
"mask",
",",
"footprint",
"=",
"se",
")",
"imLines",
"=",
"np",
".",
"max",
"(",
"openingstack",
",",
"axis",
"=",
"0",
")",
"-",
"np",
".",
"min",
"(",
"openingstack",
",",
"axis",
"=",
"0",
")",
"return",
"imLines"
] |
Do a morphological opening along lines of different angles.
Return difference between max and min response to different angles for each pixel.
This effectively removes dots and only keeps lines.
image - pixel image to operate on
length - length of the structural element
angluar_resolution - angle step for the rotating lines
mask - if present, only use unmasked pixels for operations
|
[
"Do",
"a",
"morphological",
"opening",
"along",
"lines",
"of",
"different",
"angles",
"."
] |
python
|
train
| 38.681818 |
glomex/gcdt
|
gcdt/ramuda_wire.py
|
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/ramuda_wire.py#L84-L106
|
def _get_event_source_obj(awsclient, evt_source):
"""
Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object.
"""
event_source_map = {
'dynamodb': event_source.dynamodb_stream.DynamoDBStreamEventSource,
'kinesis': event_source.kinesis.KinesisEventSource,
's3': event_source.s3.S3EventSource,
'sns': event_source.sns.SNSEventSource,
'events': event_source.cloudwatch.CloudWatchEventSource,
'cloudfront': event_source.cloudfront.CloudFrontEventSource,
'cloudwatch_logs': event_source.cloudwatch_logs.CloudWatchLogsEventSource,
}
evt_type = _get_event_type(evt_source)
event_source_func = event_source_map.get(evt_type, None)
if not event_source:
raise ValueError('Unknown event source: {0}'.format(
evt_source['arn']))
return event_source_func(awsclient, evt_source)
|
[
"def",
"_get_event_source_obj",
"(",
"awsclient",
",",
"evt_source",
")",
":",
"event_source_map",
"=",
"{",
"'dynamodb'",
":",
"event_source",
".",
"dynamodb_stream",
".",
"DynamoDBStreamEventSource",
",",
"'kinesis'",
":",
"event_source",
".",
"kinesis",
".",
"KinesisEventSource",
",",
"'s3'",
":",
"event_source",
".",
"s3",
".",
"S3EventSource",
",",
"'sns'",
":",
"event_source",
".",
"sns",
".",
"SNSEventSource",
",",
"'events'",
":",
"event_source",
".",
"cloudwatch",
".",
"CloudWatchEventSource",
",",
"'cloudfront'",
":",
"event_source",
".",
"cloudfront",
".",
"CloudFrontEventSource",
",",
"'cloudwatch_logs'",
":",
"event_source",
".",
"cloudwatch_logs",
".",
"CloudWatchLogsEventSource",
",",
"}",
"evt_type",
"=",
"_get_event_type",
"(",
"evt_source",
")",
"event_source_func",
"=",
"event_source_map",
".",
"get",
"(",
"evt_type",
",",
"None",
")",
"if",
"not",
"event_source",
":",
"raise",
"ValueError",
"(",
"'Unknown event source: {0}'",
".",
"format",
"(",
"evt_source",
"[",
"'arn'",
"]",
")",
")",
"return",
"event_source_func",
"(",
"awsclient",
",",
"evt_source",
")"
] |
Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object.
|
[
"Given",
"awsclient",
"event_source",
"dictionary",
"item",
"create",
"an",
"event_source",
"object",
"of",
"the",
"appropriate",
"event",
"type",
"to",
"schedule",
"this",
"event",
"and",
"return",
"the",
"object",
"."
] |
python
|
train
| 41.782609 |
synw/dataswim
|
dataswim/charts/altair.py
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/altair.py#L38-L48
|
def _altair_line_num_(self, xfield, yfield, opts, style, encode):
"""
Get a line + text number chart
"""
try:
c = self._altair_chart_num_("line", xfield,
yfield, opts, style, encode)
except Exception as e:
self.err(e, "Can not draw a line num chart")
return
return c
|
[
"def",
"_altair_line_num_",
"(",
"self",
",",
"xfield",
",",
"yfield",
",",
"opts",
",",
"style",
",",
"encode",
")",
":",
"try",
":",
"c",
"=",
"self",
".",
"_altair_chart_num_",
"(",
"\"line\"",
",",
"xfield",
",",
"yfield",
",",
"opts",
",",
"style",
",",
"encode",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not draw a line num chart\"",
")",
"return",
"return",
"c"
] |
Get a line + text number chart
|
[
"Get",
"a",
"line",
"+",
"text",
"number",
"chart"
] |
python
|
train
| 34.818182 |
tensorflow/tensor2tensor
|
tensor2tensor/trax/trax.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L255-L277
|
def epochs(steps=None, epoch_steps=1):
"""Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch)
"""
try:
iter(epoch_steps)
except TypeError:
epoch_steps = itertools.repeat(epoch_steps)
step = 0
for epoch, epoch_steps in enumerate(epoch_steps):
epoch_steps = min(epoch_steps, steps - step)
yield (epoch + 1, epoch_steps)
step += epoch_steps
if steps and step >= steps:
break
|
[
"def",
"epochs",
"(",
"steps",
"=",
"None",
",",
"epoch_steps",
"=",
"1",
")",
":",
"try",
":",
"iter",
"(",
"epoch_steps",
")",
"except",
"TypeError",
":",
"epoch_steps",
"=",
"itertools",
".",
"repeat",
"(",
"epoch_steps",
")",
"step",
"=",
"0",
"for",
"epoch",
",",
"epoch_steps",
"in",
"enumerate",
"(",
"epoch_steps",
")",
":",
"epoch_steps",
"=",
"min",
"(",
"epoch_steps",
",",
"steps",
"-",
"step",
")",
"yield",
"(",
"epoch",
"+",
"1",
",",
"epoch_steps",
")",
"step",
"+=",
"epoch_steps",
"if",
"steps",
"and",
"step",
">=",
"steps",
":",
"break"
] |
Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch)
|
[
"Iterator",
"over",
"epochs",
"until",
"steps",
"is",
"reached",
".",
"1",
"-",
"indexed",
"."
] |
python
|
train
| 29 |
robertpeteuil/multi-cloud-control
|
mcc/core.py
|
https://github.com/robertpeteuil/multi-cloud-control/blob/f1565af1c0b6ed465ff312d3ccc592ba0609f4a2/mcc/core.py#L103-L112
|
def config_prov(config):
"""Read providers from configfile and de-duplicate it."""
try:
providers = [e.strip() for e in (config['info']
['providers']).split(',')]
except KeyError as e:
print("Error reading config item: {}".format(e))
sys.exit()
providers = list(OrderedDict.fromkeys(providers))
return providers
|
[
"def",
"config_prov",
"(",
"config",
")",
":",
"try",
":",
"providers",
"=",
"[",
"e",
".",
"strip",
"(",
")",
"for",
"e",
"in",
"(",
"config",
"[",
"'info'",
"]",
"[",
"'providers'",
"]",
")",
".",
"split",
"(",
"','",
")",
"]",
"except",
"KeyError",
"as",
"e",
":",
"print",
"(",
"\"Error reading config item: {}\"",
".",
"format",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
")",
"providers",
"=",
"list",
"(",
"OrderedDict",
".",
"fromkeys",
"(",
"providers",
")",
")",
"return",
"providers"
] |
Read providers from configfile and de-duplicate it.
|
[
"Read",
"providers",
"from",
"configfile",
"and",
"de",
"-",
"duplicate",
"it",
"."
] |
python
|
train
| 38.7 |
twilio/twilio-python
|
twilio/rest/autopilot/v1/assistant/field_type/__init__.py
|
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/field_type/__init__.py#L451-L461
|
def update(self, friendly_name=values.unset, unique_name=values.unset):
"""
Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance
"""
return self._proxy.update(friendly_name=friendly_name, unique_name=unique_name, )
|
[
"def",
"update",
"(",
"self",
",",
"friendly_name",
"=",
"values",
".",
"unset",
",",
"unique_name",
"=",
"values",
".",
"unset",
")",
":",
"return",
"self",
".",
"_proxy",
".",
"update",
"(",
"friendly_name",
"=",
"friendly_name",
",",
"unique_name",
"=",
"unique_name",
",",
")"
] |
Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance
|
[
"Update",
"the",
"FieldTypeInstance"
] |
python
|
train
| 46.727273 |
noahbenson/neuropythy
|
neuropythy/util/core.py
|
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/util/core.py#L716-L741
|
def czdivide(a, b, null=0):
'''
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
'''
if null == 0: return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b)
elif sps.issparse(b): b = b.toarray()
else: b = np.asarray(b)
z = np.isclose(b, 0)
q = np.logical_not(z)
zi = q / (b + z)
if sps.issparse(a):
r = a.multiply(zi).tocsr()
else:
r = np.asarray(a) * zi
r[np.ones(a.shape, dtype=np.bool)*z] = null
return r
|
[
"def",
"czdivide",
"(",
"a",
",",
"b",
",",
"null",
"=",
"0",
")",
":",
"if",
"null",
"==",
"0",
":",
"return",
"a",
".",
"multiply",
"(",
"zinv",
"(",
"b",
")",
")",
"if",
"sps",
".",
"issparse",
"(",
"a",
")",
"else",
"a",
"*",
"zinv",
"(",
"b",
")",
"elif",
"sps",
".",
"issparse",
"(",
"b",
")",
":",
"b",
"=",
"b",
".",
"toarray",
"(",
")",
"else",
":",
"b",
"=",
"np",
".",
"asarray",
"(",
"b",
")",
"z",
"=",
"np",
".",
"isclose",
"(",
"b",
",",
"0",
")",
"q",
"=",
"np",
".",
"logical_not",
"(",
"z",
")",
"zi",
"=",
"q",
"/",
"(",
"b",
"+",
"z",
")",
"if",
"sps",
".",
"issparse",
"(",
"a",
")",
":",
"r",
"=",
"a",
".",
"multiply",
"(",
"zi",
")",
".",
"tocsr",
"(",
")",
"else",
":",
"r",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"*",
"zi",
"r",
"[",
"np",
".",
"ones",
"(",
"a",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"*",
"z",
"]",
"=",
"null",
"return",
"r"
] |
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
|
[
"czdivide",
"(",
"a",
"b",
")",
"returns",
"the",
"quotient",
"a",
"/",
"b",
"as",
"a",
"numpy",
"array",
"object",
".",
"Like",
"numpy",
"s",
"divide",
"function",
"or",
"a",
"/",
"b",
"syntax",
"czdivide",
"will",
"thread",
"over",
"the",
"latest",
"dimension",
"possible",
".",
"Unlike",
"numpy",
"s",
"divide",
"czdivide",
"works",
"with",
"sparse",
"matrices",
".",
"Additionally",
"czdivide",
"multiplies",
"a",
"by",
"the",
"zinv",
"of",
"b",
"so",
"divide",
"-",
"by",
"-",
"zero",
"entries",
"are",
"replaced",
"with",
"0",
"in",
"the",
"result",
"."
] |
python
|
train
| 46.115385 |
poldracklab/niworkflows
|
niworkflows/interfaces/masks.py
|
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/masks.py#L148-L161
|
def _post_run_hook(self, runtime):
''' generates a report showing slices from each axis of an arbitrary
volume of in_file, with the resulting binary brain mask overlaid '''
self._anat_file = self.inputs.in_file
self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file
self._seg_files = [self._mask_file]
self._masked = True
NIWORKFLOWS_LOG.info(
'Generating report for nilearn.compute_epi_mask. file "%s", and mask file "%s"',
self._anat_file, self._mask_file)
return super(ComputeEPIMask, self)._post_run_hook(runtime)
|
[
"def",
"_post_run_hook",
"(",
"self",
",",
"runtime",
")",
":",
"self",
".",
"_anat_file",
"=",
"self",
".",
"inputs",
".",
"in_file",
"self",
".",
"_mask_file",
"=",
"self",
".",
"aggregate_outputs",
"(",
"runtime",
"=",
"runtime",
")",
".",
"mask_file",
"self",
".",
"_seg_files",
"=",
"[",
"self",
".",
"_mask_file",
"]",
"self",
".",
"_masked",
"=",
"True",
"NIWORKFLOWS_LOG",
".",
"info",
"(",
"'Generating report for nilearn.compute_epi_mask. file \"%s\", and mask file \"%s\"'",
",",
"self",
".",
"_anat_file",
",",
"self",
".",
"_mask_file",
")",
"return",
"super",
"(",
"ComputeEPIMask",
",",
"self",
")",
".",
"_post_run_hook",
"(",
"runtime",
")"
] |
generates a report showing slices from each axis of an arbitrary
volume of in_file, with the resulting binary brain mask overlaid
|
[
"generates",
"a",
"report",
"showing",
"slices",
"from",
"each",
"axis",
"of",
"an",
"arbitrary",
"volume",
"of",
"in_file",
"with",
"the",
"resulting",
"binary",
"brain",
"mask",
"overlaid"
] |
python
|
train
| 43.428571 |
juju/charm-helpers
|
charmhelpers/contrib/openstack/context.py
|
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/context.py#L1736-L1759
|
def _determine_ctxt(self):
"""Determines the Volume API endpoint information.
Determines the appropriate version of the API that should be used
as well as the catalog_info string that would be supplied. Returns
a dict containing the volume_api_version and the volume_catalog_info.
"""
rel = os_release(self.pkg, base='icehouse')
version = '2'
if CompareOpenStackReleases(rel) >= 'pike':
version = '3'
service_type = 'volumev{version}'.format(version=version)
service_name = 'cinderv{version}'.format(version=version)
endpoint_type = 'publicURL'
if config('use-internal-endpoints'):
endpoint_type = 'internalURL'
catalog_info = '{type}:{name}:{endpoint}'.format(
type=service_type, name=service_name, endpoint=endpoint_type)
return {
'volume_api_version': version,
'volume_catalog_info': catalog_info,
}
|
[
"def",
"_determine_ctxt",
"(",
"self",
")",
":",
"rel",
"=",
"os_release",
"(",
"self",
".",
"pkg",
",",
"base",
"=",
"'icehouse'",
")",
"version",
"=",
"'2'",
"if",
"CompareOpenStackReleases",
"(",
"rel",
")",
">=",
"'pike'",
":",
"version",
"=",
"'3'",
"service_type",
"=",
"'volumev{version}'",
".",
"format",
"(",
"version",
"=",
"version",
")",
"service_name",
"=",
"'cinderv{version}'",
".",
"format",
"(",
"version",
"=",
"version",
")",
"endpoint_type",
"=",
"'publicURL'",
"if",
"config",
"(",
"'use-internal-endpoints'",
")",
":",
"endpoint_type",
"=",
"'internalURL'",
"catalog_info",
"=",
"'{type}:{name}:{endpoint}'",
".",
"format",
"(",
"type",
"=",
"service_type",
",",
"name",
"=",
"service_name",
",",
"endpoint",
"=",
"endpoint_type",
")",
"return",
"{",
"'volume_api_version'",
":",
"version",
",",
"'volume_catalog_info'",
":",
"catalog_info",
",",
"}"
] |
Determines the Volume API endpoint information.
Determines the appropriate version of the API that should be used
as well as the catalog_info string that would be supplied. Returns
a dict containing the volume_api_version and the volume_catalog_info.
|
[
"Determines",
"the",
"Volume",
"API",
"endpoint",
"information",
"."
] |
python
|
train
| 40.083333 |
ejhigson/nestcheck
|
nestcheck/data_processing.py
|
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/data_processing.py#L594-L610
|
def sample_less_than_condition(choices_in, condition):
"""Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
"""
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
# randomly select one of the choices which meets condition
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
# remove the chosen value
choices = np.delete(choices, selected_ind)
return output
|
[
"def",
"sample_less_than_condition",
"(",
"choices_in",
",",
"condition",
")",
":",
"output",
"=",
"np",
".",
"zeros",
"(",
"min",
"(",
"condition",
".",
"shape",
"[",
"0",
"]",
",",
"choices_in",
".",
"shape",
"[",
"0",
"]",
")",
")",
"choices",
"=",
"copy",
".",
"deepcopy",
"(",
"choices_in",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"output",
")",
":",
"# randomly select one of the choices which meets condition",
"avail_inds",
"=",
"np",
".",
"where",
"(",
"choices",
"<",
"condition",
"[",
"i",
"]",
")",
"[",
"0",
"]",
"selected_ind",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"avail_inds",
")",
"output",
"[",
"i",
"]",
"=",
"choices",
"[",
"selected_ind",
"]",
"# remove the chosen value",
"choices",
"=",
"np",
".",
"delete",
"(",
"choices",
",",
"selected_ind",
")",
"return",
"output"
] |
Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
|
[
"Creates",
"a",
"random",
"sample",
"from",
"choices",
"without",
"replacement",
"subject",
"to",
"the",
"condition",
"that",
"each",
"element",
"of",
"the",
"output",
"is",
"greater",
"than",
"the",
"corresponding",
"element",
"of",
"the",
"condition",
"array",
"."
] |
python
|
train
| 44.176471 |
numenta/htmresearch
|
projects/speech_commands/data/process_dataset.py
|
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/speech_commands/data/process_dataset.py#L48-L82
|
def transform_folder(args):
"""
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
"""
command, (transform, src, dest) = args
try:
print(progress.value, "remaining")
# Apply transformations to all files
data = []
data_dir = os.path.join(src, command)
for filename in os.listdir(data_dir):
path = os.path.join(data_dir, filename)
data.append(transform({'path': path}))
# Save results
pickleFile = os.path.join(dest, "{}.pkl".format(command))
gc.disable()
with open(pickleFile, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
gc.enable()
# Update progress
with progress.get_lock():
progress.value -= 1
except Exception as e:
print(command, e, file=sys.stderr)
traceback.print_exc()
|
[
"def",
"transform_folder",
"(",
"args",
")",
":",
"command",
",",
"(",
"transform",
",",
"src",
",",
"dest",
")",
"=",
"args",
"try",
":",
"print",
"(",
"progress",
".",
"value",
",",
"\"remaining\"",
")",
"# Apply transformations to all files",
"data",
"=",
"[",
"]",
"data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"command",
")",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"data_dir",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"filename",
")",
"data",
".",
"append",
"(",
"transform",
"(",
"{",
"'path'",
":",
"path",
"}",
")",
")",
"# Save results",
"pickleFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest",
",",
"\"{}.pkl\"",
".",
"format",
"(",
"command",
")",
")",
"gc",
".",
"disable",
"(",
")",
"with",
"open",
"(",
"pickleFile",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"data",
",",
"f",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"gc",
".",
"enable",
"(",
")",
"# Update progress",
"with",
"progress",
".",
"get_lock",
"(",
")",
":",
"progress",
".",
"value",
"-=",
"1",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"command",
",",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"traceback",
".",
"print_exc",
"(",
")"
] |
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
|
[
"Transform",
"all",
"the",
"files",
"in",
"the",
"source",
"dataset",
"for",
"the",
"given",
"command",
"and",
"save",
"the",
"results",
"as",
"a",
"single",
"pickle",
"file",
"in",
"the",
"destination",
"dataset",
":",
"param",
"args",
":",
"tuple",
"with",
"the",
"following",
"arguments",
":",
"-",
"the",
"command",
"name",
":",
"zero",
"one",
"two",
"...",
"-",
"transforms",
"to",
"apply",
"to",
"wav",
"file",
"-",
"full",
"path",
"of",
"the",
"source",
"dataset",
"-",
"full",
"path",
"of",
"the",
"destination",
"dataset"
] |
python
|
train
| 31.6 |
StackStorm/pybind
|
pybind/slxos/v17r_1_01a/mpls_state/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/mpls_state/__init__.py#L1187-L1210
|
def _set_statistics_oam(self, v, load=False):
"""
Setter method for statistics_oam, mapped from YANG variable /mpls_state/statistics_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics_oam() directly.
YANG Description: OAM packet statistics
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=statistics_oam.statistics_oam, is_container='container', presence=False, yang_name="statistics-oam", rest_name="statistics-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """statistics_oam must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=statistics_oam.statistics_oam, is_container='container', presence=False, yang_name="statistics-oam", rest_name="statistics-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__statistics_oam = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_statistics_oam",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"statistics_oam",
".",
"statistics_oam",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"statistics-oam\"",
",",
"rest_name",
"=",
"\"statistics-oam\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'callpoint'",
":",
"u'mpls-statistics-oam'",
",",
"u'cli-suppress-show-path'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-mpls-operational'",
",",
"defining_module",
"=",
"'brocade-mpls-operational'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"statistics_oam must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=statistics_oam.statistics_oam, is_container='container', presence=False, yang_name=\"statistics-oam\", rest_name=\"statistics-oam\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__statistics_oam",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for statistics_oam, mapped from YANG variable /mpls_state/statistics_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics_oam() directly.
YANG Description: OAM packet statistics
|
[
"Setter",
"method",
"for",
"statistics_oam",
"mapped",
"from",
"YANG",
"variable",
"/",
"mpls_state",
"/",
"statistics_oam",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_statistics_oam",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_statistics_oam",
"()",
"directly",
"."
] |
python
|
train
| 75.166667 |
openstack/python-monascaclient
|
monascaclient/v2_0/notifications.py
|
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/notifications.py#L43-L47
|
def delete(self, **kwargs):
"""Delete a notification."""
url = self.base_url + '/%s' % kwargs['notification_id']
resp = self.client.delete(url=url)
return resp
|
[
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"'/%s'",
"%",
"kwargs",
"[",
"'notification_id'",
"]",
"resp",
"=",
"self",
".",
"client",
".",
"delete",
"(",
"url",
"=",
"url",
")",
"return",
"resp"
] |
Delete a notification.
|
[
"Delete",
"a",
"notification",
"."
] |
python
|
train
| 37.4 |
apache/airflow
|
airflow/utils/log/wasb_task_handler.py
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/wasb_task_handler.py#L68-L95
|
def close(self):
"""
Close and upload local log file to remote storage Wasb.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.wasb_write(log, remote_loc, append=True)
if self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True
|
[
"def",
"close",
"(",
"self",
")",
":",
"# When application exit, system shuts down all handlers by",
"# calling close method. Here we check if logger is already",
"# closed to prevent uploading the log to remote storage multiple",
"# times when `logging.shutdown` is called.",
"if",
"self",
".",
"closed",
":",
"return",
"super",
"(",
")",
".",
"close",
"(",
")",
"if",
"not",
"self",
".",
"upload_on_close",
":",
"return",
"local_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"local_base",
",",
"self",
".",
"log_relative_path",
")",
"remote_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"remote_base",
",",
"self",
".",
"log_relative_path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"local_loc",
")",
":",
"# read log and remove old logs to get just the latest additions",
"with",
"open",
"(",
"local_loc",
",",
"'r'",
")",
"as",
"logfile",
":",
"log",
"=",
"logfile",
".",
"read",
"(",
")",
"self",
".",
"wasb_write",
"(",
"log",
",",
"remote_loc",
",",
"append",
"=",
"True",
")",
"if",
"self",
".",
"delete_local_copy",
":",
"shutil",
".",
"rmtree",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"local_loc",
")",
")",
"# Mark closed so we don't double write if close is called twice",
"self",
".",
"closed",
"=",
"True"
] |
Close and upload local log file to remote storage Wasb.
|
[
"Close",
"and",
"upload",
"local",
"log",
"file",
"to",
"remote",
"storage",
"Wasb",
"."
] |
python
|
test
| 38.107143 |
openstax/cnx-publishing
|
cnxpublishing/db.py
|
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/db.py#L119-L160
|
def upsert_pending_licensors(cursor, document_id):
"""Update or insert records for pending license acceptors."""
cursor.execute("""\
SELECT "uuid", "metadata"
FROM pending_documents
WHERE id = %s""", (document_id,))
uuid_, metadata = cursor.fetchone()
acceptors = set([uid for uid, type_ in _dissect_roles(metadata)])
# Acquire a list of existing acceptors.
cursor.execute("""\
SELECT "user_id", "accepted"
FROM license_acceptances
WHERE uuid = %s""", (uuid_,))
existing_acceptors_mapping = dict(cursor.fetchall())
# Who's not in the existing list?
existing_acceptors = set(existing_acceptors_mapping.keys())
new_acceptors = acceptors.difference(existing_acceptors)
# Insert the new licensor acceptors.
for acceptor in new_acceptors:
cursor.execute("""\
INSERT INTO license_acceptances
("uuid", "user_id", "accepted")
VALUES (%s, %s, NULL)""", (uuid_, acceptor,))
# Has everyone already accepted?
cursor.execute("""\
SELECT user_id
FROM license_acceptances
WHERE
uuid = %s
AND
(accepted is UNKNOWN OR accepted is FALSE)""", (uuid_,))
defectors = set(cursor.fetchall())
if not defectors:
# Update the pending document license acceptance state.
cursor.execute("""\
update pending_documents set license_accepted = 't'
where id = %s""", (document_id,))
|
[
"def",
"upsert_pending_licensors",
"(",
"cursor",
",",
"document_id",
")",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nSELECT \"uuid\", \"metadata\"\nFROM pending_documents\nWHERE id = %s\"\"\"",
",",
"(",
"document_id",
",",
")",
")",
"uuid_",
",",
"metadata",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"acceptors",
"=",
"set",
"(",
"[",
"uid",
"for",
"uid",
",",
"type_",
"in",
"_dissect_roles",
"(",
"metadata",
")",
"]",
")",
"# Acquire a list of existing acceptors.",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nSELECT \"user_id\", \"accepted\"\nFROM license_acceptances\nWHERE uuid = %s\"\"\"",
",",
"(",
"uuid_",
",",
")",
")",
"existing_acceptors_mapping",
"=",
"dict",
"(",
"cursor",
".",
"fetchall",
"(",
")",
")",
"# Who's not in the existing list?",
"existing_acceptors",
"=",
"set",
"(",
"existing_acceptors_mapping",
".",
"keys",
"(",
")",
")",
"new_acceptors",
"=",
"acceptors",
".",
"difference",
"(",
"existing_acceptors",
")",
"# Insert the new licensor acceptors.",
"for",
"acceptor",
"in",
"new_acceptors",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nINSERT INTO license_acceptances\n (\"uuid\", \"user_id\", \"accepted\")\nVALUES (%s, %s, NULL)\"\"\"",
",",
"(",
"uuid_",
",",
"acceptor",
",",
")",
")",
"# Has everyone already accepted?",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nSELECT user_id\nFROM license_acceptances\nWHERE\n uuid = %s\n AND\n (accepted is UNKNOWN OR accepted is FALSE)\"\"\"",
",",
"(",
"uuid_",
",",
")",
")",
"defectors",
"=",
"set",
"(",
"cursor",
".",
"fetchall",
"(",
")",
")",
"if",
"not",
"defectors",
":",
"# Update the pending document license acceptance state.",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nupdate pending_documents set license_accepted = 't'\nwhere id = %s\"\"\"",
",",
"(",
"document_id",
",",
")",
")"
] |
Update or insert records for pending license acceptors.
|
[
"Update",
"or",
"insert",
"records",
"for",
"pending",
"license",
"acceptors",
"."
] |
python
|
valid
| 31.142857 |
openstack/pyghmi
|
pyghmi/ipmi/private/util.py
|
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L71-L77
|
def get_ipv4(hostname):
"""Get list of ipv4 addresses for hostname
"""
addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET,
socket.SOCK_STREAM)
return [addrinfo[x][4][0] for x in range(len(addrinfo))]
|
[
"def",
"get_ipv4",
"(",
"hostname",
")",
":",
"addrinfo",
"=",
"socket",
".",
"getaddrinfo",
"(",
"hostname",
",",
"None",
",",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"return",
"[",
"addrinfo",
"[",
"x",
"]",
"[",
"4",
"]",
"[",
"0",
"]",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"addrinfo",
")",
")",
"]"
] |
Get list of ipv4 addresses for hostname
|
[
"Get",
"list",
"of",
"ipv4",
"addresses",
"for",
"hostname"
] |
python
|
train
| 36.285714 |
iotile/coretools
|
iotilebuild/iotile/build/tilebus/block.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/tilebus/block.py#L69-L81
|
def set_api_version(self, major, minor):
"""Set the API version this module was designed for.
Each module must declare the mib12 API version it was compiled with as a
2 byte major.minor number. This information is used by the pic12_executive
to decide whether the application is compatible.
"""
if not self._is_byte(major) or not self._is_byte(minor):
raise ArgumentError("Invalid API version number with component that does not fit in 1 byte",
major=major, minor=minor)
self.api_version = (major, minor)
|
[
"def",
"set_api_version",
"(",
"self",
",",
"major",
",",
"minor",
")",
":",
"if",
"not",
"self",
".",
"_is_byte",
"(",
"major",
")",
"or",
"not",
"self",
".",
"_is_byte",
"(",
"minor",
")",
":",
"raise",
"ArgumentError",
"(",
"\"Invalid API version number with component that does not fit in 1 byte\"",
",",
"major",
"=",
"major",
",",
"minor",
"=",
"minor",
")",
"self",
".",
"api_version",
"=",
"(",
"major",
",",
"minor",
")"
] |
Set the API version this module was designed for.
Each module must declare the mib12 API version it was compiled with as a
2 byte major.minor number. This information is used by the pic12_executive
to decide whether the application is compatible.
|
[
"Set",
"the",
"API",
"version",
"this",
"module",
"was",
"designed",
"for",
"."
] |
python
|
train
| 45.846154 |
mapeveri/django-endless-pagination-vue
|
endless_pagination/templatetags/endless.py
|
https://github.com/mapeveri/django-endless-pagination-vue/blob/3faa79a51b11d7ae0bd431abf8c38ecaf9180704/endless_pagination/templatetags/endless.py#L327-L366
|
def show_more(context, label=None, loading=settings.LOADING):
"""Show the link to get the next page in a Twitter-like pagination.
Usage::
{% show_more %}
Alternatively you can override the label passed to the default template::
{% show_more "even more" %}
You can override the loading text too::
{% show_more "even more" "working" %}
Must be called after ``{% paginate objects %}``.
"""
# This template tag could raise a PaginationError: you have to call
# *paginate* or *lazy_paginate* before including the showmore template.
data = utils.get_data_from_context(context)
page = data['page']
# show the template only if there is a next page
if page.has_next():
request = context['request']
page_number = page.next_page_number()
# Generate the querystring.
querystring_key = data['querystring_key']
querystring = utils.get_querystring_for_page(
request, page_number, querystring_key,
default_number=data['default_number'])
return {
'label': label,
'loading': loading,
'path': iri_to_uri(data['override_path'] or request.path),
'querystring': querystring,
'querystring_key': querystring_key,
'request': request,
}
# No next page, nothing to see.
return {}
|
[
"def",
"show_more",
"(",
"context",
",",
"label",
"=",
"None",
",",
"loading",
"=",
"settings",
".",
"LOADING",
")",
":",
"# This template tag could raise a PaginationError: you have to call",
"# *paginate* or *lazy_paginate* before including the showmore template.",
"data",
"=",
"utils",
".",
"get_data_from_context",
"(",
"context",
")",
"page",
"=",
"data",
"[",
"'page'",
"]",
"# show the template only if there is a next page",
"if",
"page",
".",
"has_next",
"(",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"page_number",
"=",
"page",
".",
"next_page_number",
"(",
")",
"# Generate the querystring.",
"querystring_key",
"=",
"data",
"[",
"'querystring_key'",
"]",
"querystring",
"=",
"utils",
".",
"get_querystring_for_page",
"(",
"request",
",",
"page_number",
",",
"querystring_key",
",",
"default_number",
"=",
"data",
"[",
"'default_number'",
"]",
")",
"return",
"{",
"'label'",
":",
"label",
",",
"'loading'",
":",
"loading",
",",
"'path'",
":",
"iri_to_uri",
"(",
"data",
"[",
"'override_path'",
"]",
"or",
"request",
".",
"path",
")",
",",
"'querystring'",
":",
"querystring",
",",
"'querystring_key'",
":",
"querystring_key",
",",
"'request'",
":",
"request",
",",
"}",
"# No next page, nothing to see.",
"return",
"{",
"}"
] |
Show the link to get the next page in a Twitter-like pagination.
Usage::
{% show_more %}
Alternatively you can override the label passed to the default template::
{% show_more "even more" %}
You can override the loading text too::
{% show_more "even more" "working" %}
Must be called after ``{% paginate objects %}``.
|
[
"Show",
"the",
"link",
"to",
"get",
"the",
"next",
"page",
"in",
"a",
"Twitter",
"-",
"like",
"pagination",
"."
] |
python
|
train
| 33.8 |
atlassian-api/atlassian-python-api
|
atlassian/jira.py
|
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L465-L483
|
def remove_group(self, name, swap_group=None):
"""
Delete a group by given group parameter
If you delete a group and content is restricted to that group, the content will be hidden from all users
To prevent this, use this parameter to specify a different group to transfer the restrictions
(comments and worklogs only) to
:param name: str
:param swap_group: str
:return:
"""
log.warning('Removing group...')
url = 'rest/api/2/group'
if swap_group is not None:
params = {'groupname': name, 'swapGroup': swap_group}
else:
params = {'groupname': name}
return self.delete(url, params=params)
|
[
"def",
"remove_group",
"(",
"self",
",",
"name",
",",
"swap_group",
"=",
"None",
")",
":",
"log",
".",
"warning",
"(",
"'Removing group...'",
")",
"url",
"=",
"'rest/api/2/group'",
"if",
"swap_group",
"is",
"not",
"None",
":",
"params",
"=",
"{",
"'groupname'",
":",
"name",
",",
"'swapGroup'",
":",
"swap_group",
"}",
"else",
":",
"params",
"=",
"{",
"'groupname'",
":",
"name",
"}",
"return",
"self",
".",
"delete",
"(",
"url",
",",
"params",
"=",
"params",
")"
] |
Delete a group by given group parameter
If you delete a group and content is restricted to that group, the content will be hidden from all users
To prevent this, use this parameter to specify a different group to transfer the restrictions
(comments and worklogs only) to
:param name: str
:param swap_group: str
:return:
|
[
"Delete",
"a",
"group",
"by",
"given",
"group",
"parameter",
"If",
"you",
"delete",
"a",
"group",
"and",
"content",
"is",
"restricted",
"to",
"that",
"group",
"the",
"content",
"will",
"be",
"hidden",
"from",
"all",
"users",
"To",
"prevent",
"this",
"use",
"this",
"parameter",
"to",
"specify",
"a",
"different",
"group",
"to",
"transfer",
"the",
"restrictions",
"(",
"comments",
"and",
"worklogs",
"only",
")",
"to"
] |
python
|
train
| 37.210526 |
fabioz/PyDev.Debugger
|
pydevd_attach_to_process/winappdbg/breakpoint.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L3263-L3291
|
def enable_process_breakpoints(self, dwProcessId):
"""
Enables all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# enable code breakpoints
for bp in self.get_process_code_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_code_breakpoint(dwProcessId, bp.get_address())
# enable page breakpoints
for bp in self.get_process_page_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_page_breakpoint(dwProcessId, bp.get_address())
# enable hardware breakpoints
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
if bp.is_disabled():
self.enable_hardware_breakpoint(dwThreadId, bp.get_address())
|
[
"def",
"enable_process_breakpoints",
"(",
"self",
",",
"dwProcessId",
")",
":",
"# enable code breakpoints",
"for",
"bp",
"in",
"self",
".",
"get_process_code_breakpoints",
"(",
"dwProcessId",
")",
":",
"if",
"bp",
".",
"is_disabled",
"(",
")",
":",
"self",
".",
"enable_code_breakpoint",
"(",
"dwProcessId",
",",
"bp",
".",
"get_address",
"(",
")",
")",
"# enable page breakpoints",
"for",
"bp",
"in",
"self",
".",
"get_process_page_breakpoints",
"(",
"dwProcessId",
")",
":",
"if",
"bp",
".",
"is_disabled",
"(",
")",
":",
"self",
".",
"enable_page_breakpoint",
"(",
"dwProcessId",
",",
"bp",
".",
"get_address",
"(",
")",
")",
"# enable hardware breakpoints",
"if",
"self",
".",
"system",
".",
"has_process",
"(",
"dwProcessId",
")",
":",
"aProcess",
"=",
"self",
".",
"system",
".",
"get_process",
"(",
"dwProcessId",
")",
"else",
":",
"aProcess",
"=",
"Process",
"(",
"dwProcessId",
")",
"aProcess",
".",
"scan_threads",
"(",
")",
"for",
"aThread",
"in",
"aProcess",
".",
"iter_threads",
"(",
")",
":",
"dwThreadId",
"=",
"aThread",
".",
"get_tid",
"(",
")",
"for",
"bp",
"in",
"self",
".",
"get_thread_hardware_breakpoints",
"(",
"dwThreadId",
")",
":",
"if",
"bp",
".",
"is_disabled",
"(",
")",
":",
"self",
".",
"enable_hardware_breakpoint",
"(",
"dwThreadId",
",",
"bp",
".",
"get_address",
"(",
")",
")"
] |
Enables all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
|
[
"Enables",
"all",
"disabled",
"breakpoints",
"for",
"the",
"given",
"process",
"."
] |
python
|
train
| 39.034483 |
panosl/django-currencies
|
currencies/management/commands/updatecurrencies.py
|
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/management/commands/updatecurrencies.py#L23-L40
|
def get_base(self, option):
"""
Parse the base command option. Can be supplied as a 3 character code or a settings variable name
If base is not supplied, looks for settings CURRENCIES_BASE and SHOP_DEFAULT_CURRENCY
"""
if option:
if option.isupper():
if len(option) > 3:
return getattr(settings, option), True
elif len(option) == 3:
return option, True
raise ImproperlyConfigured("Invalid currency code found: %s" % option)
for attr in ('CURRENCIES_BASE', 'SHOP_DEFAULT_CURRENCY'):
try:
return getattr(settings, attr), True
except AttributeError:
continue
return 'USD', False
|
[
"def",
"get_base",
"(",
"self",
",",
"option",
")",
":",
"if",
"option",
":",
"if",
"option",
".",
"isupper",
"(",
")",
":",
"if",
"len",
"(",
"option",
")",
">",
"3",
":",
"return",
"getattr",
"(",
"settings",
",",
"option",
")",
",",
"True",
"elif",
"len",
"(",
"option",
")",
"==",
"3",
":",
"return",
"option",
",",
"True",
"raise",
"ImproperlyConfigured",
"(",
"\"Invalid currency code found: %s\"",
"%",
"option",
")",
"for",
"attr",
"in",
"(",
"'CURRENCIES_BASE'",
",",
"'SHOP_DEFAULT_CURRENCY'",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"settings",
",",
"attr",
")",
",",
"True",
"except",
"AttributeError",
":",
"continue",
"return",
"'USD'",
",",
"False"
] |
Parse the base command option. Can be supplied as a 3 character code or a settings variable name
If base is not supplied, looks for settings CURRENCIES_BASE and SHOP_DEFAULT_CURRENCY
|
[
"Parse",
"the",
"base",
"command",
"option",
".",
"Can",
"be",
"supplied",
"as",
"a",
"3",
"character",
"code",
"or",
"a",
"settings",
"variable",
"name",
"If",
"base",
"is",
"not",
"supplied",
"looks",
"for",
"settings",
"CURRENCIES_BASE",
"and",
"SHOP_DEFAULT_CURRENCY"
] |
python
|
train
| 42.555556 |
RudolfCardinal/pythonlib
|
cardinal_pythonlib/sqlalchemy/orm_inspect.py
|
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_inspect.py#L523-L557
|
def gen_columns(obj) -> Generator[Tuple[str, Column], None, None]:
"""
Asks a SQLAlchemy ORM object: "what are your SQLAlchemy columns?"
Yields tuples of ``(attr_name, Column)`` from an SQLAlchemy ORM object
instance. Also works with the corresponding SQLAlchemy ORM class. Examples:
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column("pk", Integer, primary_key=True, autoincrement=True)
a = Column("a", Integer)
x = MyClass()
list(gen_columns(x))
list(gen_columns(MyClass))
"""
mapper = obj.__mapper__ # type: Mapper
assert mapper, "gen_columns called on {!r} which is not an " \
"SQLAlchemy ORM object".format(obj)
colmap = mapper.columns # type: OrderedProperties
if not colmap:
return
for attrname, column in colmap.items():
# NB: column.name is the SQL column name, not the attribute name
yield attrname, column
|
[
"def",
"gen_columns",
"(",
"obj",
")",
"->",
"Generator",
"[",
"Tuple",
"[",
"str",
",",
"Column",
"]",
",",
"None",
",",
"None",
"]",
":",
"mapper",
"=",
"obj",
".",
"__mapper__",
"# type: Mapper",
"assert",
"mapper",
",",
"\"gen_columns called on {!r} which is not an \"",
"\"SQLAlchemy ORM object\"",
".",
"format",
"(",
"obj",
")",
"colmap",
"=",
"mapper",
".",
"columns",
"# type: OrderedProperties",
"if",
"not",
"colmap",
":",
"return",
"for",
"attrname",
",",
"column",
"in",
"colmap",
".",
"items",
"(",
")",
":",
"# NB: column.name is the SQL column name, not the attribute name",
"yield",
"attrname",
",",
"column"
] |
Asks a SQLAlchemy ORM object: "what are your SQLAlchemy columns?"
Yields tuples of ``(attr_name, Column)`` from an SQLAlchemy ORM object
instance. Also works with the corresponding SQLAlchemy ORM class. Examples:
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column("pk", Integer, primary_key=True, autoincrement=True)
a = Column("a", Integer)
x = MyClass()
list(gen_columns(x))
list(gen_columns(MyClass))
|
[
"Asks",
"a",
"SQLAlchemy",
"ORM",
"object",
":",
"what",
"are",
"your",
"SQLAlchemy",
"columns?"
] |
python
|
train
| 33.628571 |
MartinThoma/hwrt
|
hwrt/datasets/crohme_eval.py
|
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/datasets/crohme_eval.py#L68-L95
|
def generate_output_csv(evaluation_results, filename='results.csv'):
"""Generate the evaluation results in the format
Parameters
----------
evaluation_results : list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
Examples
--------
MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j
scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times
scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100
"""
with open(filename, 'w') as f:
for result in evaluation_results:
for i, entry in enumerate(result['results']):
if entry['semantics'] == ',':
result['results']['semantics'] = 'COMMA'
f.write("%s, " % result['filename'])
f.write(", ".join([entry['semantics'] for entry in result['results']]))
f.write("\n")
f.write("%s, " % "scores")
f.write(", ".join([str(entry['probability']) for entry in result['results']]))
f.write("\n")
|
[
"def",
"generate_output_csv",
"(",
"evaluation_results",
",",
"filename",
"=",
"'results.csv'",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"result",
"in",
"evaluation_results",
":",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"result",
"[",
"'results'",
"]",
")",
":",
"if",
"entry",
"[",
"'semantics'",
"]",
"==",
"','",
":",
"result",
"[",
"'results'",
"]",
"[",
"'semantics'",
"]",
"=",
"'COMMA'",
"f",
".",
"write",
"(",
"\"%s, \"",
"%",
"result",
"[",
"'filename'",
"]",
")",
"f",
".",
"write",
"(",
"\", \"",
".",
"join",
"(",
"[",
"entry",
"[",
"'semantics'",
"]",
"for",
"entry",
"in",
"result",
"[",
"'results'",
"]",
"]",
")",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
")",
"f",
".",
"write",
"(",
"\"%s, \"",
"%",
"\"scores\"",
")",
"f",
".",
"write",
"(",
"\", \"",
".",
"join",
"(",
"[",
"str",
"(",
"entry",
"[",
"'probability'",
"]",
")",
"for",
"entry",
"in",
"result",
"[",
"'results'",
"]",
"]",
")",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
")"
] |
Generate the evaluation results in the format
Parameters
----------
evaluation_results : list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
Examples
--------
MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j
scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times
scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100
|
[
"Generate",
"the",
"evaluation",
"results",
"in",
"the",
"format"
] |
python
|
train
| 41.928571 |
Ex-Mente/auxi.0
|
auxi/modelling/process/materials/thermo.py
|
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/process/materials/thermo.py#L1535-L1544
|
def T(self, T):
"""
Set the temperature of the stream to the specified value, and
recalculate it's enthalpy.
:param T: Temperature. [°C]
"""
self._T = T
self._Hfr = self._calculate_Hfr(T)
|
[
"def",
"T",
"(",
"self",
",",
"T",
")",
":",
"self",
".",
"_T",
"=",
"T",
"self",
".",
"_Hfr",
"=",
"self",
".",
"_calculate_Hfr",
"(",
"T",
")"
] |
Set the temperature of the stream to the specified value, and
recalculate it's enthalpy.
:param T: Temperature. [°C]
|
[
"Set",
"the",
"temperature",
"of",
"the",
"stream",
"to",
"the",
"specified",
"value",
"and",
"recalculate",
"it",
"s",
"enthalpy",
"."
] |
python
|
valid
| 23.6 |
SatelliteQE/nailgun
|
nailgun/entities.py
|
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L1116-L1121
|
def update_payload(self, fields=None):
"""Wrap submitted data within an extra dict."""
payload = super(DiscoveryRule, self).update_payload(fields)
if 'search_' in payload:
payload['search'] = payload.pop('search_')
return {u'discovery_rule': payload}
|
[
"def",
"update_payload",
"(",
"self",
",",
"fields",
"=",
"None",
")",
":",
"payload",
"=",
"super",
"(",
"DiscoveryRule",
",",
"self",
")",
".",
"update_payload",
"(",
"fields",
")",
"if",
"'search_'",
"in",
"payload",
":",
"payload",
"[",
"'search'",
"]",
"=",
"payload",
".",
"pop",
"(",
"'search_'",
")",
"return",
"{",
"u'discovery_rule'",
":",
"payload",
"}"
] |
Wrap submitted data within an extra dict.
|
[
"Wrap",
"submitted",
"data",
"within",
"an",
"extra",
"dict",
"."
] |
python
|
train
| 48.166667 |
aconrad/pycobertura
|
pycobertura/cobertura.py
|
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L93-L103
|
def line_rate(self, filename=None):
"""
Return the global line rate of the coverage report. If the
`filename` file is given, return the line rate of the file.
"""
if filename is None:
el = self.xml
else:
el = self._get_class_element_by_filename(filename)
return float(el.attrib['line-rate'])
|
[
"def",
"line_rate",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"None",
":",
"el",
"=",
"self",
".",
"xml",
"else",
":",
"el",
"=",
"self",
".",
"_get_class_element_by_filename",
"(",
"filename",
")",
"return",
"float",
"(",
"el",
".",
"attrib",
"[",
"'line-rate'",
"]",
")"
] |
Return the global line rate of the coverage report. If the
`filename` file is given, return the line rate of the file.
|
[
"Return",
"the",
"global",
"line",
"rate",
"of",
"the",
"coverage",
"report",
".",
"If",
"the",
"filename",
"file",
"is",
"given",
"return",
"the",
"line",
"rate",
"of",
"the",
"file",
"."
] |
python
|
train
| 32.909091 |
bitesofcode/projex
|
projex/scaffold.py
|
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/scaffold.py#L432-L459
|
def uifile(self):
"""
Returns the uifile for this scaffold.
:return <str>
"""
output = ''
# build from a zip file
if zipfile.is_zipfile(self.source()):
zfile = zipfile.ZipFile(self.source(), 'r')
if 'properties.ui' in zfile.namelist():
tempdir = tempfile.gettempdir()
output = os.path.join(tempdir,
'{0}_properties.ui'.format(self.name()))
f = open(output, 'w')
f.write(zfile.read('properties.ui'))
f.close()
zfile.close()
else:
uifile = os.path.join(os.path.dirname(self.source()),
'properties.ui')
if os.path.exists(uifile):
output = uifile
return output
|
[
"def",
"uifile",
"(",
"self",
")",
":",
"output",
"=",
"''",
"# build from a zip file",
"if",
"zipfile",
".",
"is_zipfile",
"(",
"self",
".",
"source",
"(",
")",
")",
":",
"zfile",
"=",
"zipfile",
".",
"ZipFile",
"(",
"self",
".",
"source",
"(",
")",
",",
"'r'",
")",
"if",
"'properties.ui'",
"in",
"zfile",
".",
"namelist",
"(",
")",
":",
"tempdir",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"output",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'{0}_properties.ui'",
".",
"format",
"(",
"self",
".",
"name",
"(",
")",
")",
")",
"f",
"=",
"open",
"(",
"output",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"zfile",
".",
"read",
"(",
"'properties.ui'",
")",
")",
"f",
".",
"close",
"(",
")",
"zfile",
".",
"close",
"(",
")",
"else",
":",
"uifile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"source",
"(",
")",
")",
",",
"'properties.ui'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"uifile",
")",
":",
"output",
"=",
"uifile",
"return",
"output"
] |
Returns the uifile for this scaffold.
:return <str>
|
[
"Returns",
"the",
"uifile",
"for",
"this",
"scaffold",
".",
":",
"return",
"<str",
">"
] |
python
|
train
| 30.214286 |
pyvisa/pyvisa
|
pyvisa/resources/resource.py
|
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/resources/resource.py#L293-L301
|
def uninstall_handler(self, event_type, handler, user_handle=None):
"""Uninstalls handlers for events in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be uninstalled by a client application.
:param user_handle: The user handle (ctypes object or None) returned by install_handler.
"""
self.visalib.uninstall_visa_handler(self.session, event_type, handler, user_handle)
|
[
"def",
"uninstall_handler",
"(",
"self",
",",
"event_type",
",",
"handler",
",",
"user_handle",
"=",
"None",
")",
":",
"self",
".",
"visalib",
".",
"uninstall_visa_handler",
"(",
"self",
".",
"session",
",",
"event_type",
",",
"handler",
",",
"user_handle",
")"
] |
Uninstalls handlers for events in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be uninstalled by a client application.
:param user_handle: The user handle (ctypes object or None) returned by install_handler.
|
[
"Uninstalls",
"handlers",
"for",
"events",
"in",
"this",
"resource",
"."
] |
python
|
train
| 54.222222 |
higlass/higlass-python
|
higlass/viewer.py
|
https://github.com/higlass/higlass-python/blob/0a5bf2759cc0020844aefbf0df4f9e8f9137a0b7/higlass/viewer.py#L45-L73
|
def view(tilesets):
'''
Create a higlass viewer that displays the specified tilesets
Parameters:
-----------
Returns
-------
Nothing
'''
from .server import Server
from .client import View
curr_view = View()
server = Server()
server.start(tilesets)
for ts in tilesets:
if (ts.track_type is not None
and ts.track_position is not None):
curr_view.add_track(ts.track_type,
ts.track_position,
api_url=server.api_address,
tileset_uuid=ts.uuid,
)
curr_view.server = server
return curr_view
|
[
"def",
"view",
"(",
"tilesets",
")",
":",
"from",
".",
"server",
"import",
"Server",
"from",
".",
"client",
"import",
"View",
"curr_view",
"=",
"View",
"(",
")",
"server",
"=",
"Server",
"(",
")",
"server",
".",
"start",
"(",
"tilesets",
")",
"for",
"ts",
"in",
"tilesets",
":",
"if",
"(",
"ts",
".",
"track_type",
"is",
"not",
"None",
"and",
"ts",
".",
"track_position",
"is",
"not",
"None",
")",
":",
"curr_view",
".",
"add_track",
"(",
"ts",
".",
"track_type",
",",
"ts",
".",
"track_position",
",",
"api_url",
"=",
"server",
".",
"api_address",
",",
"tileset_uuid",
"=",
"ts",
".",
"uuid",
",",
")",
"curr_view",
".",
"server",
"=",
"server",
"return",
"curr_view"
] |
Create a higlass viewer that displays the specified tilesets
Parameters:
-----------
Returns
-------
Nothing
|
[
"Create",
"a",
"higlass",
"viewer",
"that",
"displays",
"the",
"specified",
"tilesets"
] |
python
|
train
| 22.068966 |
oanda/v20-python
|
src/v20/user.py
|
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/user.py#L140-L228
|
def get_info(
self,
userSpecifier,
**kwargs
):
"""
Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/users/{userSpecifier}'
)
request.set_path_param(
'userSpecifier',
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('userInfo') is not None:
parsed_body['userInfo'] = \
self.ctx.user.UserInfo.from_dict(
jbody['userInfo'],
self.ctx
)
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
|
[
"def",
"get_info",
"(",
"self",
",",
"userSpecifier",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"Request",
"(",
"'GET'",
",",
"'/v3/users/{userSpecifier}'",
")",
"request",
".",
"set_path_param",
"(",
"'userSpecifier'",
",",
"userSpecifier",
")",
"response",
"=",
"self",
".",
"ctx",
".",
"request",
"(",
"request",
")",
"if",
"response",
".",
"content_type",
"is",
"None",
":",
"return",
"response",
"if",
"not",
"response",
".",
"content_type",
".",
"startswith",
"(",
"\"application/json\"",
")",
":",
"return",
"response",
"jbody",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"raw_body",
")",
"parsed_body",
"=",
"{",
"}",
"#",
"# Parse responses as defined by the API specification",
"#",
"if",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"200\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'userInfo'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'userInfo'",
"]",
"=",
"self",
".",
"ctx",
".",
"user",
".",
"UserInfo",
".",
"from_dict",
"(",
"jbody",
"[",
"'userInfo'",
"]",
",",
"self",
".",
"ctx",
")",
"elif",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"401\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorCode'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorMessage'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"elif",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"403\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorCode'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorMessage'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"elif",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"405\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorCode'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorMessage'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"#",
"# Unexpected response status",
"#",
"else",
":",
"parsed_body",
"=",
"jbody",
"response",
".",
"body",
"=",
"parsed_body",
"return",
"response"
] |
Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
|
[
"Fetch",
"the",
"user",
"information",
"for",
"the",
"specified",
"user",
".",
"This",
"endpoint",
"is",
"intended",
"to",
"be",
"used",
"by",
"the",
"user",
"themself",
"to",
"obtain",
"their",
"own",
"information",
"."
] |
python
|
train
| 26.831461 |
adfinis-sygroup/freeze
|
freeze/xfreeze.py
|
https://github.com/adfinis-sygroup/freeze/blob/61b4fab8a90ed76d685448723baaa57e2bbd5ef9/freeze/xfreeze.py#L752-L823
|
def tree_diff(a, b, n=5, sort=False):
"""Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7))
"""
a = dump(a)
b = dump(b)
if not sort:
a = vformat(a).split("\n")
b = vformat(b).split("\n")
else:
a = vformat(recursive_sort(a)).split("\n")
b = vformat(recursive_sort(b)).split("\n")
return "\n".join(difflib.unified_diff(a, b, n=n, lineterm=""))
|
[
"def",
"tree_diff",
"(",
"a",
",",
"b",
",",
"n",
"=",
"5",
",",
"sort",
"=",
"False",
")",
":",
"a",
"=",
"dump",
"(",
"a",
")",
"b",
"=",
"dump",
"(",
"b",
")",
"if",
"not",
"sort",
":",
"a",
"=",
"vformat",
"(",
"a",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"b",
"=",
"vformat",
"(",
"b",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"else",
":",
"a",
"=",
"vformat",
"(",
"recursive_sort",
"(",
"a",
")",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"b",
"=",
"vformat",
"(",
"recursive_sort",
"(",
"b",
")",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"difflib",
".",
"unified_diff",
"(",
"a",
",",
"b",
",",
"n",
"=",
"n",
",",
"lineterm",
"=",
"\"\"",
")",
")"
] |
Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7))
|
[
"Dump",
"any",
"data",
"-",
"structure",
"or",
"object",
"traverse",
"it",
"depth",
"-",
"first",
"in",
"-",
"order",
"and",
"apply",
"a",
"unified",
"diff",
"."
] |
python
|
train
| 26.458333 |
jpscaletti/moar
|
moar/optimage.py
|
https://github.com/jpscaletti/moar/blob/22694e5671b6adaccc4c9c87db7bdd701d20e734/moar/optimage.py#L67-L77
|
def _temporary_filenames(total):
"""Context manager to create temporary files and remove them after use."""
temp_files = [_get_temporary_filename('optimage-') for i in range(total)]
yield temp_files
for temp_file in temp_files:
try:
os.remove(temp_file)
except OSError:
# Continue in case we could not remove the file. One reason is that
# the fail was never created.
pass
|
[
"def",
"_temporary_filenames",
"(",
"total",
")",
":",
"temp_files",
"=",
"[",
"_get_temporary_filename",
"(",
"'optimage-'",
")",
"for",
"i",
"in",
"range",
"(",
"total",
")",
"]",
"yield",
"temp_files",
"for",
"temp_file",
"in",
"temp_files",
":",
"try",
":",
"os",
".",
"remove",
"(",
"temp_file",
")",
"except",
"OSError",
":",
"# Continue in case we could not remove the file. One reason is that",
"# the fail was never created.",
"pass"
] |
Context manager to create temporary files and remove them after use.
|
[
"Context",
"manager",
"to",
"create",
"temporary",
"files",
"and",
"remove",
"them",
"after",
"use",
"."
] |
python
|
train
| 40.181818 |
Ex-Mente/auxi.0
|
auxi/modelling/process/materials/thermo.py
|
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/process/materials/thermo.py#L1642-L1653
|
def get_compound_afrs(self):
"""
Determine the amount flow rates of all the compounds.
:returns: List of amount flow rates. [kmol/h]
"""
result = self._compound_mfrs * 1.0
for compound in self.material.compounds:
index = self.material.get_compound_index(compound)
result[index] = stoich.amount(compound, result[index])
return result
|
[
"def",
"get_compound_afrs",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"_compound_mfrs",
"*",
"1.0",
"for",
"compound",
"in",
"self",
".",
"material",
".",
"compounds",
":",
"index",
"=",
"self",
".",
"material",
".",
"get_compound_index",
"(",
"compound",
")",
"result",
"[",
"index",
"]",
"=",
"stoich",
".",
"amount",
"(",
"compound",
",",
"result",
"[",
"index",
"]",
")",
"return",
"result"
] |
Determine the amount flow rates of all the compounds.
:returns: List of amount flow rates. [kmol/h]
|
[
"Determine",
"the",
"amount",
"flow",
"rates",
"of",
"all",
"the",
"compounds",
"."
] |
python
|
valid
| 33.583333 |
androguard/androguard
|
androguard/core/bytecodes/dvm.py
|
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L6621-L6628
|
def show(self):
"""
Display (with a pretty print) this object
"""
off = 0
for n, i in enumerate(self.get_instructions()):
print("{:8d} (0x{:08x}) {:04x} {:30} {}".format(n, off, i.get_op_value(), i.get_name(), i.get_output(self.idx)))
off += i.get_length()
|
[
"def",
"show",
"(",
"self",
")",
":",
"off",
"=",
"0",
"for",
"n",
",",
"i",
"in",
"enumerate",
"(",
"self",
".",
"get_instructions",
"(",
")",
")",
":",
"print",
"(",
"\"{:8d} (0x{:08x}) {:04x} {:30} {}\"",
".",
"format",
"(",
"n",
",",
"off",
",",
"i",
".",
"get_op_value",
"(",
")",
",",
"i",
".",
"get_name",
"(",
")",
",",
"i",
".",
"get_output",
"(",
"self",
".",
"idx",
")",
")",
")",
"off",
"+=",
"i",
".",
"get_length",
"(",
")"
] |
Display (with a pretty print) this object
|
[
"Display",
"(",
"with",
"a",
"pretty",
"print",
")",
"this",
"object"
] |
python
|
train
| 39.125 |
tdegeus/GooseMPL
|
GooseMPL/__init__.py
|
https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L1049-L1076
|
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
|
[
"def",
"histogram_cumulative",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"return_edges",
"=",
"kwargs",
".",
"pop",
"(",
"'return_edges'",
",",
"True",
")",
"norm",
"=",
"kwargs",
".",
"pop",
"(",
"'normalize'",
",",
"False",
")",
"P",
",",
"edges",
"=",
"np",
".",
"histogram",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
"P",
"=",
"np",
".",
"cumsum",
"(",
"P",
")",
"if",
"norm",
":",
"P",
"=",
"P",
"/",
"P",
"[",
"-",
"1",
"]",
"if",
"not",
"return_edges",
":",
"edges",
"=",
"np",
".",
"diff",
"(",
"edges",
")",
"/",
"2.",
"+",
"edges",
"[",
":",
"-",
"1",
"]",
"return",
"P",
",",
"edges"
] |
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
|
[
"r",
"Compute",
"cumulative",
"histogram",
".",
"See",
"numpy",
".",
"histrogram",
"<https",
":",
"//",
"docs",
".",
"scipy",
".",
"org",
"/",
"doc",
"/",
"numpy",
"/",
"reference",
"/",
"generated",
"/",
"numpy",
".",
"histogram",
".",
"html",
">",
"_"
] |
python
|
train
| 26.464286 |
pyQode/pyqode.cobol
|
pyqode/cobol/modes/indenter.py
|
https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/indenter.py#L149-L178
|
def unindent(self):
"""
Un-indents text at cursor position.
"""
_logger().debug('unindent')
cursor = self.editor.textCursor()
_logger().debug('cursor has selection %r', cursor.hasSelection())
if cursor.hasSelection():
cursor.beginEditBlock()
self.unindent_selection(cursor)
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
else:
tab_len = self.editor.tab_length
indentation = cursor.positionInBlock()
indentation -= self.min_column
if indentation == 0:
return
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(cursor, max_spaces)
_logger().info('deleting %d space before cursor' % spaces)
cursor.beginEditBlock()
for _ in range(spaces):
cursor.deletePreviousChar()
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
_logger().debug(cursor.block().text())
|
[
"def",
"unindent",
"(",
"self",
")",
":",
"_logger",
"(",
")",
".",
"debug",
"(",
"'unindent'",
")",
"cursor",
"=",
"self",
".",
"editor",
".",
"textCursor",
"(",
")",
"_logger",
"(",
")",
".",
"debug",
"(",
"'cursor has selection %r'",
",",
"cursor",
".",
"hasSelection",
"(",
")",
")",
"if",
"cursor",
".",
"hasSelection",
"(",
")",
":",
"cursor",
".",
"beginEditBlock",
"(",
")",
"self",
".",
"unindent_selection",
"(",
"cursor",
")",
"cursor",
".",
"endEditBlock",
"(",
")",
"self",
".",
"editor",
".",
"setTextCursor",
"(",
"cursor",
")",
"else",
":",
"tab_len",
"=",
"self",
".",
"editor",
".",
"tab_length",
"indentation",
"=",
"cursor",
".",
"positionInBlock",
"(",
")",
"indentation",
"-=",
"self",
".",
"min_column",
"if",
"indentation",
"==",
"0",
":",
"return",
"max_spaces",
"=",
"indentation",
"%",
"tab_len",
"if",
"max_spaces",
"==",
"0",
":",
"max_spaces",
"=",
"tab_len",
"spaces",
"=",
"self",
".",
"count_deletable_spaces",
"(",
"cursor",
",",
"max_spaces",
")",
"_logger",
"(",
")",
".",
"info",
"(",
"'deleting %d space before cursor'",
"%",
"spaces",
")",
"cursor",
".",
"beginEditBlock",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"spaces",
")",
":",
"cursor",
".",
"deletePreviousChar",
"(",
")",
"cursor",
".",
"endEditBlock",
"(",
")",
"self",
".",
"editor",
".",
"setTextCursor",
"(",
"cursor",
")",
"_logger",
"(",
")",
".",
"debug",
"(",
"cursor",
".",
"block",
"(",
")",
".",
"text",
"(",
")",
")"
] |
Un-indents text at cursor position.
|
[
"Un",
"-",
"indents",
"text",
"at",
"cursor",
"position",
"."
] |
python
|
train
| 37.233333 |
housecanary/hc-api-python
|
housecanary/output.py
|
https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/output.py#L61-L89
|
def process_json_response(self, response):
"""For a json response, check if there was any error and throw exception.
Otherwise, create a housecanary.response.Response."""
response_json = response.json()
# handle errors
code_key = "code"
if code_key in response_json and response_json[code_key] != constants.HTTP_CODE_OK:
code = response_json[code_key]
message = response_json
if "message" in response_json:
message = response_json["message"]
elif "code_description" in response_json:
message = response_json["code_description"]
if code == constants.HTTP_FORBIDDEN:
raise housecanary.exceptions.UnauthorizedException(code, message)
if code == constants.HTTP_TOO_MANY_REQUESTS:
raise housecanary.exceptions.RateLimitException(code, message, response)
else:
raise housecanary.exceptions.RequestException(code, message)
request_url = response.request.url
endpoint_name = self._parse_endpoint_name_from_url(request_url)
return Response.create(endpoint_name, response_json, response)
|
[
"def",
"process_json_response",
"(",
"self",
",",
"response",
")",
":",
"response_json",
"=",
"response",
".",
"json",
"(",
")",
"# handle errors",
"code_key",
"=",
"\"code\"",
"if",
"code_key",
"in",
"response_json",
"and",
"response_json",
"[",
"code_key",
"]",
"!=",
"constants",
".",
"HTTP_CODE_OK",
":",
"code",
"=",
"response_json",
"[",
"code_key",
"]",
"message",
"=",
"response_json",
"if",
"\"message\"",
"in",
"response_json",
":",
"message",
"=",
"response_json",
"[",
"\"message\"",
"]",
"elif",
"\"code_description\"",
"in",
"response_json",
":",
"message",
"=",
"response_json",
"[",
"\"code_description\"",
"]",
"if",
"code",
"==",
"constants",
".",
"HTTP_FORBIDDEN",
":",
"raise",
"housecanary",
".",
"exceptions",
".",
"UnauthorizedException",
"(",
"code",
",",
"message",
")",
"if",
"code",
"==",
"constants",
".",
"HTTP_TOO_MANY_REQUESTS",
":",
"raise",
"housecanary",
".",
"exceptions",
".",
"RateLimitException",
"(",
"code",
",",
"message",
",",
"response",
")",
"else",
":",
"raise",
"housecanary",
".",
"exceptions",
".",
"RequestException",
"(",
"code",
",",
"message",
")",
"request_url",
"=",
"response",
".",
"request",
".",
"url",
"endpoint_name",
"=",
"self",
".",
"_parse_endpoint_name_from_url",
"(",
"request_url",
")",
"return",
"Response",
".",
"create",
"(",
"endpoint_name",
",",
"response_json",
",",
"response",
")"
] |
For a json response, check if there was any error and throw exception.
Otherwise, create a housecanary.response.Response.
|
[
"For",
"a",
"json",
"response",
"check",
"if",
"there",
"was",
"any",
"error",
"and",
"throw",
"exception",
".",
"Otherwise",
"create",
"a",
"housecanary",
".",
"response",
".",
"Response",
"."
] |
python
|
train
| 41.103448 |
codelv/enaml-native
|
src/enamlnative/android/android_dialog.py
|
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_dialog.py#L96-L108
|
def destroy(self):
""" A reimplemented destructor that cancels
the dialog before destroying.
"""
dialog = self.dialog
if dialog:
#: Clear the dismiss listener
#: (or we get an error during the callback)
dialog.setOnDismissListener(None)
dialog.dismiss()
del self.dialog
super(AndroidDialog, self).destroy()
|
[
"def",
"destroy",
"(",
"self",
")",
":",
"dialog",
"=",
"self",
".",
"dialog",
"if",
"dialog",
":",
"#: Clear the dismiss listener",
"#: (or we get an error during the callback)",
"dialog",
".",
"setOnDismissListener",
"(",
"None",
")",
"dialog",
".",
"dismiss",
"(",
")",
"del",
"self",
".",
"dialog",
"super",
"(",
"AndroidDialog",
",",
"self",
")",
".",
"destroy",
"(",
")"
] |
A reimplemented destructor that cancels
the dialog before destroying.
|
[
"A",
"reimplemented",
"destructor",
"that",
"cancels",
"the",
"dialog",
"before",
"destroying",
"."
] |
python
|
train
| 31.769231 |
reingart/gui2py
|
gui/component.py
|
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/component.py#L392-L398
|
def _get_fully_qualified_name(self):
"return full parents name + self name (useful as key)"
parent_name = self._get_parent_name()
if not parent_name:
return self._name
else:
return "%s.%s" % (parent_name, self._name)
|
[
"def",
"_get_fully_qualified_name",
"(",
"self",
")",
":",
"parent_name",
"=",
"self",
".",
"_get_parent_name",
"(",
")",
"if",
"not",
"parent_name",
":",
"return",
"self",
".",
"_name",
"else",
":",
"return",
"\"%s.%s\"",
"%",
"(",
"parent_name",
",",
"self",
".",
"_name",
")"
] |
return full parents name + self name (useful as key)
|
[
"return",
"full",
"parents",
"name",
"+",
"self",
"name",
"(",
"useful",
"as",
"key",
")"
] |
python
|
test
| 38.857143 |
tobami/littlechef
|
littlechef/lib.py
|
https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L31-L39
|
def _resolve_hostname(name):
"""Returns resolved hostname using the ssh config"""
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)['hostname']
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name
|
[
"def",
"_resolve_hostname",
"(",
"name",
")",
":",
"if",
"env",
".",
"ssh_config",
"is",
"None",
":",
"return",
"name",
"elif",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"nodes\"",
",",
"name",
"+",
"\".json\"",
")",
")",
":",
"resolved_name",
"=",
"env",
".",
"ssh_config",
".",
"lookup",
"(",
"name",
")",
"[",
"'hostname'",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"nodes\"",
",",
"resolved_name",
"+",
"\".json\"",
")",
")",
":",
"name",
"=",
"resolved_name",
"return",
"name"
] |
Returns resolved hostname using the ssh config
|
[
"Returns",
"resolved",
"hostname",
"using",
"the",
"ssh",
"config"
] |
python
|
train
| 42.666667 |
fastai/fastai
|
fastai/torch_core.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L216-L221
|
def set_bn_eval(m:nn.Module)->None:
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
l.eval()
set_bn_eval(l)
|
[
"def",
"set_bn_eval",
"(",
"m",
":",
"nn",
".",
"Module",
")",
"->",
"None",
":",
"for",
"l",
"in",
"m",
".",
"children",
"(",
")",
":",
"if",
"isinstance",
"(",
"l",
",",
"bn_types",
")",
"and",
"not",
"next",
"(",
"l",
".",
"parameters",
"(",
")",
")",
".",
"requires_grad",
":",
"l",
".",
"eval",
"(",
")",
"set_bn_eval",
"(",
"l",
")"
] |
Set bn layers in eval mode for all recursive children of `m`.
|
[
"Set",
"bn",
"layers",
"in",
"eval",
"mode",
"for",
"all",
"recursive",
"children",
"of",
"m",
"."
] |
python
|
train
| 41.333333 |
chaoss/grimoirelab-perceval
|
perceval/backends/core/gitlab.py
|
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/gitlab.py#L311-L322
|
def __get_award_emoji(self, item_type, item_id):
"""Get award emojis for issue/merge request"""
emojis = []
group_emojis = self.client.emojis(item_type, item_id)
for raw_emojis in group_emojis:
for emoji in json.loads(raw_emojis):
emojis.append(emoji)
return emojis
|
[
"def",
"__get_award_emoji",
"(",
"self",
",",
"item_type",
",",
"item_id",
")",
":",
"emojis",
"=",
"[",
"]",
"group_emojis",
"=",
"self",
".",
"client",
".",
"emojis",
"(",
"item_type",
",",
"item_id",
")",
"for",
"raw_emojis",
"in",
"group_emojis",
":",
"for",
"emoji",
"in",
"json",
".",
"loads",
"(",
"raw_emojis",
")",
":",
"emojis",
".",
"append",
"(",
"emoji",
")",
"return",
"emojis"
] |
Get award emojis for issue/merge request
|
[
"Get",
"award",
"emojis",
"for",
"issue",
"/",
"merge",
"request"
] |
python
|
test
| 27.166667 |
jjkester/django-auditlog
|
src/auditlog/middleware.py
|
https://github.com/jjkester/django-auditlog/blob/a22978e05b7ed43b87e4b6109550b86c738578fe/src/auditlog/middleware.py#L49-L56
|
def process_response(self, request, response):
"""
Disconnects the signal receiver to prevent it from staying active.
"""
if hasattr(threadlocal, 'auditlog'):
pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'])
return response
|
[
"def",
"process_response",
"(",
"self",
",",
"request",
",",
"response",
")",
":",
"if",
"hasattr",
"(",
"threadlocal",
",",
"'auditlog'",
")",
":",
"pre_save",
".",
"disconnect",
"(",
"sender",
"=",
"LogEntry",
",",
"dispatch_uid",
"=",
"threadlocal",
".",
"auditlog",
"[",
"'signal_duid'",
"]",
")",
"return",
"response"
] |
Disconnects the signal receiver to prevent it from staying active.
|
[
"Disconnects",
"the",
"signal",
"receiver",
"to",
"prevent",
"it",
"from",
"staying",
"active",
"."
] |
python
|
train
| 38.375 |
hall-lab/svtyper
|
svtyper/parsers.py
|
https://github.com/hall-lab/svtyper/blob/5fc30763fd3025793ee712a563de800c010f6bea/svtyper/parsers.py#L1089-L1101
|
def get_reference_end_from_cigar(reference_start, cigar):
'''
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
'''
reference_end = reference_start
# iterate through cigartuple
for i in xrange(len(cigar)):
k, n = cigar[i]
if k in (0,2,3,7,8): # M, D, N, =, X
reference_end += n
return reference_end
|
[
"def",
"get_reference_end_from_cigar",
"(",
"reference_start",
",",
"cigar",
")",
":",
"reference_end",
"=",
"reference_start",
"# iterate through cigartuple",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"cigar",
")",
")",
":",
"k",
",",
"n",
"=",
"cigar",
"[",
"i",
"]",
"if",
"k",
"in",
"(",
"0",
",",
"2",
",",
"3",
",",
"7",
",",
"8",
")",
":",
"# M, D, N, =, X",
"reference_end",
"+=",
"n",
"return",
"reference_end"
] |
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
|
[
"This",
"returns",
"the",
"coordinate",
"just",
"past",
"the",
"last",
"aligned",
"base",
".",
"This",
"matches",
"the",
"behavior",
"of",
"pysam",
"s",
"reference_end",
"method"
] |
python
|
train
| 36 |
batiste/django-page-cms
|
pages/views.py
|
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/views.py#L134-L143
|
def resolve_redirection(self, request, context):
"""Check for redirections."""
current_page = context['current_page']
lang = context['lang']
if current_page.redirect_to_url:
return HttpResponsePermanentRedirect(current_page.redirect_to_url)
if current_page.redirect_to:
return HttpResponsePermanentRedirect(
current_page.redirect_to.get_url_path(lang))
|
[
"def",
"resolve_redirection",
"(",
"self",
",",
"request",
",",
"context",
")",
":",
"current_page",
"=",
"context",
"[",
"'current_page'",
"]",
"lang",
"=",
"context",
"[",
"'lang'",
"]",
"if",
"current_page",
".",
"redirect_to_url",
":",
"return",
"HttpResponsePermanentRedirect",
"(",
"current_page",
".",
"redirect_to_url",
")",
"if",
"current_page",
".",
"redirect_to",
":",
"return",
"HttpResponsePermanentRedirect",
"(",
"current_page",
".",
"redirect_to",
".",
"get_url_path",
"(",
"lang",
")",
")"
] |
Check for redirections.
|
[
"Check",
"for",
"redirections",
"."
] |
python
|
train
| 42.4 |
ffcalculator/fantasydata-python
|
fantasy_data/FantasyData.py
|
https://github.com/ffcalculator/fantasydata-python/blob/af90cac1e80d8356cffaa80621ee513201f6c661/fantasy_data/FantasyData.py#L168-L173
|
def get_projected_player_game_stats_by_team(self, season, week, team_id):
"""
Projected Player Game Stats by Team
"""
result = self._method_call("PlayerGameProjectionStatsByTeam/{season}/{week}/{team_id}", "projections", season=season, week=week, team_id=team_id)
return result
|
[
"def",
"get_projected_player_game_stats_by_team",
"(",
"self",
",",
"season",
",",
"week",
",",
"team_id",
")",
":",
"result",
"=",
"self",
".",
"_method_call",
"(",
"\"PlayerGameProjectionStatsByTeam/{season}/{week}/{team_id}\"",
",",
"\"projections\"",
",",
"season",
"=",
"season",
",",
"week",
"=",
"week",
",",
"team_id",
"=",
"team_id",
")",
"return",
"result"
] |
Projected Player Game Stats by Team
|
[
"Projected",
"Player",
"Game",
"Stats",
"by",
"Team"
] |
python
|
train
| 52 |
albahnsen/CostSensitiveClassification
|
costcla/utils/cross_validation.py
|
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/utils/cross_validation.py#L1311-L1320
|
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
|
[
"def",
"_shuffle",
"(",
"y",
",",
"labels",
",",
"random_state",
")",
":",
"if",
"labels",
"is",
"None",
":",
"ind",
"=",
"random_state",
".",
"permutation",
"(",
"len",
"(",
"y",
")",
")",
"else",
":",
"ind",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"labels",
")",
")",
"for",
"label",
"in",
"np",
".",
"unique",
"(",
"labels",
")",
":",
"this_mask",
"=",
"(",
"labels",
"==",
"label",
")",
"ind",
"[",
"this_mask",
"]",
"=",
"random_state",
".",
"permutation",
"(",
"ind",
"[",
"this_mask",
"]",
")",
"return",
"y",
"[",
"ind",
"]"
] |
Return a shuffled copy of y eventually shuffle among same labels.
|
[
"Return",
"a",
"shuffled",
"copy",
"of",
"y",
"eventually",
"shuffle",
"among",
"same",
"labels",
"."
] |
python
|
train
| 39.2 |
pytroll/pyspectral
|
pyspectral/radiance_tb_conversion.py
|
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/radiance_tb_conversion.py#L249-L259
|
def radiance2tb(rad, wavelength):
"""
Get the Tb from the radiance using the Planck function
rad:
Radiance in SI units
wavelength:
Wavelength in SI units (meter)
"""
from pyspectral.blackbody import blackbody_rad2temp as rad2temp
return rad2temp(wavelength, rad)
|
[
"def",
"radiance2tb",
"(",
"rad",
",",
"wavelength",
")",
":",
"from",
"pyspectral",
".",
"blackbody",
"import",
"blackbody_rad2temp",
"as",
"rad2temp",
"return",
"rad2temp",
"(",
"wavelength",
",",
"rad",
")"
] |
Get the Tb from the radiance using the Planck function
rad:
Radiance in SI units
wavelength:
Wavelength in SI units (meter)
|
[
"Get",
"the",
"Tb",
"from",
"the",
"radiance",
"using",
"the",
"Planck",
"function"
] |
python
|
train
| 27 |
nugget/python-insteonplm
|
insteonplm/states/onOff.py
|
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L526-L532
|
def led_changed(self, addr, group, val):
"""Capture a change to the LED for this button."""
_LOGGER.debug("Button %d LED changed from %d to %d",
self._group, self._value, val)
led_on = bool(val)
if led_on != bool(self._value):
self._update_subscribers(int(led_on))
|
[
"def",
"led_changed",
"(",
"self",
",",
"addr",
",",
"group",
",",
"val",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Button %d LED changed from %d to %d\"",
",",
"self",
".",
"_group",
",",
"self",
".",
"_value",
",",
"val",
")",
"led_on",
"=",
"bool",
"(",
"val",
")",
"if",
"led_on",
"!=",
"bool",
"(",
"self",
".",
"_value",
")",
":",
"self",
".",
"_update_subscribers",
"(",
"int",
"(",
"led_on",
")",
")"
] |
Capture a change to the LED for this button.
|
[
"Capture",
"a",
"change",
"to",
"the",
"LED",
"for",
"this",
"button",
"."
] |
python
|
train
| 46.285714 |
saltstack/salt
|
salt/modules/dracr.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dracr.py#L872-L901
|
def server_poweroff(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Powers down the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power off on the chassis such as a blade.
If not provided, the chassis will be powered off.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweroff
salt dell dracr.server_poweroff module=server-1
'''
return __execute_cmd('serveraction powerdown',
host=host, admin_username=admin_username,
admin_password=admin_password, module=module)
|
[
"def",
"server_poweroff",
"(",
"host",
"=",
"None",
",",
"admin_username",
"=",
"None",
",",
"admin_password",
"=",
"None",
",",
"module",
"=",
"None",
")",
":",
"return",
"__execute_cmd",
"(",
"'serveraction powerdown'",
",",
"host",
"=",
"host",
",",
"admin_username",
"=",
"admin_username",
",",
"admin_password",
"=",
"admin_password",
",",
"module",
"=",
"module",
")"
] |
Powers down the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power off on the chassis such as a blade.
If not provided, the chassis will be powered off.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweroff
salt dell dracr.server_poweroff module=server-1
|
[
"Powers",
"down",
"the",
"managed",
"server",
"."
] |
python
|
train
| 26.933333 |
facelessuser/backrefs
|
backrefs/bregex.py
|
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/bregex.py#L121-L130
|
def _apply_replace_backrefs(m, repl=None, flags=0):
"""Expand with either the `ReplaceTemplate` or compile on the fly, or return None."""
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m)
|
[
"def",
"_apply_replace_backrefs",
"(",
"m",
",",
"repl",
"=",
"None",
",",
"flags",
"=",
"0",
")",
":",
"if",
"m",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Match is None!\"",
")",
"else",
":",
"if",
"isinstance",
"(",
"repl",
",",
"ReplaceTemplate",
")",
":",
"return",
"repl",
".",
"expand",
"(",
"m",
")",
"elif",
"isinstance",
"(",
"repl",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"return",
"_bregex_parse",
".",
"_ReplaceParser",
"(",
")",
".",
"parse",
"(",
"m",
".",
"re",
",",
"repl",
",",
"bool",
"(",
"flags",
"&",
"FORMAT",
")",
")",
".",
"expand",
"(",
"m",
")"
] |
Expand with either the `ReplaceTemplate` or compile on the fly, or return None.
|
[
"Expand",
"with",
"either",
"the",
"ReplaceTemplate",
"or",
"compile",
"on",
"the",
"fly",
"or",
"return",
"None",
"."
] |
python
|
train
| 42.9 |
spacetelescope/drizzlepac
|
drizzlepac/buildmask.py
|
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/buildmask.py#L88-L153
|
def buildMaskImage(rootname, bitvalue, output, extname='DQ', extver=1):
""" Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
"""
# If no bitvalue is set or rootname given, assume no mask is desired
# However, this name would be useful as the output mask from
# other processing, such as MultiDrizzle, so return it anyway.
#if bitvalue == None or rootname == None:
# return None
# build output name
maskname = output
# If an old version of the maskfile was present, remove it and rebuild it.
if fileutil.findFile(maskname):
fileutil.removeFile(maskname)
# Open input file with DQ array
fdq = fileutil.openImage(rootname, mode='readonly', memmap=False)
try:
_extn = fileutil.findExtname(fdq, extname, extver=extver)
if _extn is not None:
# Read in DQ array
dqarr = fdq[_extn].data
else:
dqarr = None
# For the case where there is no DQ array,
# create a mask image of all ones.
if dqarr is None:
# We need to get the dimensions of the output DQ array
# Since the DQ array is non-existent, look for the SCI extension
_sci_extn = fileutil.findExtname(fdq,'SCI',extver=extver)
if _sci_extn is not None:
_shape = fdq[_sci_extn].data.shape
dqarr = np.zeros(_shape,dtype=np.uint16)
else:
raise Exception
# Build mask array from DQ array
maskarr = buildMask(dqarr,bitvalue)
#Write out the mask file as simple FITS file
fmask = fits.open(maskname, mode='append', memmap=False)
maskhdu = fits.PrimaryHDU(data = maskarr)
fmask.append(maskhdu)
#Close files
fmask.close()
del fmask
fdq.close()
del fdq
except:
fdq.close()
del fdq
# Safeguard against leaving behind an incomplete file
if fileutil.findFile(maskname):
os.remove(maskname)
_errstr = "\nWarning: Problem creating MASK file for "+rootname+".\n"
#raise IOError, _errstr
print(_errstr)
return None
# Return the name of the mask image written out
return maskname
|
[
"def",
"buildMaskImage",
"(",
"rootname",
",",
"bitvalue",
",",
"output",
",",
"extname",
"=",
"'DQ'",
",",
"extver",
"=",
"1",
")",
":",
"# If no bitvalue is set or rootname given, assume no mask is desired",
"# However, this name would be useful as the output mask from",
"# other processing, such as MultiDrizzle, so return it anyway.",
"#if bitvalue == None or rootname == None:",
"# return None",
"# build output name",
"maskname",
"=",
"output",
"# If an old version of the maskfile was present, remove it and rebuild it.",
"if",
"fileutil",
".",
"findFile",
"(",
"maskname",
")",
":",
"fileutil",
".",
"removeFile",
"(",
"maskname",
")",
"# Open input file with DQ array",
"fdq",
"=",
"fileutil",
".",
"openImage",
"(",
"rootname",
",",
"mode",
"=",
"'readonly'",
",",
"memmap",
"=",
"False",
")",
"try",
":",
"_extn",
"=",
"fileutil",
".",
"findExtname",
"(",
"fdq",
",",
"extname",
",",
"extver",
"=",
"extver",
")",
"if",
"_extn",
"is",
"not",
"None",
":",
"# Read in DQ array",
"dqarr",
"=",
"fdq",
"[",
"_extn",
"]",
".",
"data",
"else",
":",
"dqarr",
"=",
"None",
"# For the case where there is no DQ array,",
"# create a mask image of all ones.",
"if",
"dqarr",
"is",
"None",
":",
"# We need to get the dimensions of the output DQ array",
"# Since the DQ array is non-existent, look for the SCI extension",
"_sci_extn",
"=",
"fileutil",
".",
"findExtname",
"(",
"fdq",
",",
"'SCI'",
",",
"extver",
"=",
"extver",
")",
"if",
"_sci_extn",
"is",
"not",
"None",
":",
"_shape",
"=",
"fdq",
"[",
"_sci_extn",
"]",
".",
"data",
".",
"shape",
"dqarr",
"=",
"np",
".",
"zeros",
"(",
"_shape",
",",
"dtype",
"=",
"np",
".",
"uint16",
")",
"else",
":",
"raise",
"Exception",
"# Build mask array from DQ array",
"maskarr",
"=",
"buildMask",
"(",
"dqarr",
",",
"bitvalue",
")",
"#Write out the mask file as simple FITS file",
"fmask",
"=",
"fits",
".",
"open",
"(",
"maskname",
",",
"mode",
"=",
"'append'",
",",
"memmap",
"=",
"False",
")",
"maskhdu",
"=",
"fits",
".",
"PrimaryHDU",
"(",
"data",
"=",
"maskarr",
")",
"fmask",
".",
"append",
"(",
"maskhdu",
")",
"#Close files",
"fmask",
".",
"close",
"(",
")",
"del",
"fmask",
"fdq",
".",
"close",
"(",
")",
"del",
"fdq",
"except",
":",
"fdq",
".",
"close",
"(",
")",
"del",
"fdq",
"# Safeguard against leaving behind an incomplete file",
"if",
"fileutil",
".",
"findFile",
"(",
"maskname",
")",
":",
"os",
".",
"remove",
"(",
"maskname",
")",
"_errstr",
"=",
"\"\\nWarning: Problem creating MASK file for \"",
"+",
"rootname",
"+",
"\".\\n\"",
"#raise IOError, _errstr",
"print",
"(",
"_errstr",
")",
"return",
"None",
"# Return the name of the mask image written out",
"return",
"maskname"
] |
Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
|
[
"Builds",
"mask",
"image",
"from",
"rootname",
"s",
"DQ",
"array",
"If",
"there",
"is",
"no",
"valid",
"DQ",
"array",
"in",
"image",
"then",
"return",
"an",
"empty",
"string",
"."
] |
python
|
train
| 34.318182 |
shexSpec/grammar
|
parsers/python/pyshexc/parser_impl/shex_oneofshape_parser.py
|
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_oneofshape_parser.py#L72-L77
|
def visitEncapsulatedShape(self, ctx: ShExDocParser.EncapsulatedShapeContext):
""" encapsulatedShape: '(' innerShape ')' cardinality? annotation* semanticActions """
enc_shape = ShexOneOfShapeParser(self.context)
enc_shape.visit(ctx.innerShape())
self.expression = enc_shape.expression
self._card_annotations_and_semacts(ctx)
|
[
"def",
"visitEncapsulatedShape",
"(",
"self",
",",
"ctx",
":",
"ShExDocParser",
".",
"EncapsulatedShapeContext",
")",
":",
"enc_shape",
"=",
"ShexOneOfShapeParser",
"(",
"self",
".",
"context",
")",
"enc_shape",
".",
"visit",
"(",
"ctx",
".",
"innerShape",
"(",
")",
")",
"self",
".",
"expression",
"=",
"enc_shape",
".",
"expression",
"self",
".",
"_card_annotations_and_semacts",
"(",
"ctx",
")"
] |
encapsulatedShape: '(' innerShape ')' cardinality? annotation* semanticActions
|
[
"encapsulatedShape",
":",
"(",
"innerShape",
")",
"cardinality?",
"annotation",
"*",
"semanticActions"
] |
python
|
train
| 60 |
gitpython-developers/GitPython
|
git/index/base.py
|
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/base.py#L437-L450
|
def iter_blobs(self, predicate=lambda t: True):
"""
:return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob)
:param predicate:
Function(t) returning True if tuple(stage, Blob) should be yielded by the
iterator. A default filter, the BlobFilter, allows you to yield blobs
only if they match a given list of paths. """
for entry in mviter(self.entries):
blob = entry.to_blob(self.repo)
blob.size = entry.size
output = (entry.stage, blob)
if predicate(output):
yield output
|
[
"def",
"iter_blobs",
"(",
"self",
",",
"predicate",
"=",
"lambda",
"t",
":",
"True",
")",
":",
"for",
"entry",
"in",
"mviter",
"(",
"self",
".",
"entries",
")",
":",
"blob",
"=",
"entry",
".",
"to_blob",
"(",
"self",
".",
"repo",
")",
"blob",
".",
"size",
"=",
"entry",
".",
"size",
"output",
"=",
"(",
"entry",
".",
"stage",
",",
"blob",
")",
"if",
"predicate",
"(",
"output",
")",
":",
"yield",
"output"
] |
:return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob)
:param predicate:
Function(t) returning True if tuple(stage, Blob) should be yielded by the
iterator. A default filter, the BlobFilter, allows you to yield blobs
only if they match a given list of paths.
|
[
":",
"return",
":",
"Iterator",
"yielding",
"tuples",
"of",
"Blob",
"objects",
"and",
"stages",
"tuple",
"(",
"stage",
"Blob",
")"
] |
python
|
train
| 43.857143 |
polysquare/polysquare-generic-file-linter
|
polysquarelinter/linter.py
|
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/linter.py#L847-L894
|
def main(arguments=None): # suppress(unused-function)
"""Entry point for the linter."""
result = _parse_arguments(arguments)
linter_funcs = _ordered(linter_functions_from_filters,
result.whitelist,
result.blacklist)
global_options = vars(result)
tool_options = tool_options_from_global(global_options, len(result.files))
any_would_run = _any_would_run(_run_lint_on_file_exceptions,
result.files,
result.stamp_file_path,
result.log_technical_terms_to,
linter_funcs,
tool_options,
result.fix_what_you_can)
if any_would_run:
for linter_function in linter_funcs.values():
if linter_function.before_all:
linter_function.before_all(global_options, tool_options)
use_multiprocessing = _should_use_multiprocessing(len(result.files))
else:
use_multiprocessing = False
if use_multiprocessing:
mapper = parmap.map
else:
# suppress(E731)
mapper = lambda f, i, *a: [f(*((x, ) + a)) for x in i]
errors = list(itertools.chain(*mapper(_run_lint_on_file_stamped,
result.files,
result.stamp_file_path,
result.log_technical_terms_to,
linter_funcs,
tool_options,
result.fix_what_you_can)))
for error in sorted(errors):
_report_lint_error(error.failure, os.path.relpath(error.absolute_path))
if any_would_run:
for linter_funcs in linter_funcs.values():
if linter_funcs.after_all:
linter_funcs.after_all(global_options, tool_options)
return len(errors)
|
[
"def",
"main",
"(",
"arguments",
"=",
"None",
")",
":",
"# suppress(unused-function)",
"result",
"=",
"_parse_arguments",
"(",
"arguments",
")",
"linter_funcs",
"=",
"_ordered",
"(",
"linter_functions_from_filters",
",",
"result",
".",
"whitelist",
",",
"result",
".",
"blacklist",
")",
"global_options",
"=",
"vars",
"(",
"result",
")",
"tool_options",
"=",
"tool_options_from_global",
"(",
"global_options",
",",
"len",
"(",
"result",
".",
"files",
")",
")",
"any_would_run",
"=",
"_any_would_run",
"(",
"_run_lint_on_file_exceptions",
",",
"result",
".",
"files",
",",
"result",
".",
"stamp_file_path",
",",
"result",
".",
"log_technical_terms_to",
",",
"linter_funcs",
",",
"tool_options",
",",
"result",
".",
"fix_what_you_can",
")",
"if",
"any_would_run",
":",
"for",
"linter_function",
"in",
"linter_funcs",
".",
"values",
"(",
")",
":",
"if",
"linter_function",
".",
"before_all",
":",
"linter_function",
".",
"before_all",
"(",
"global_options",
",",
"tool_options",
")",
"use_multiprocessing",
"=",
"_should_use_multiprocessing",
"(",
"len",
"(",
"result",
".",
"files",
")",
")",
"else",
":",
"use_multiprocessing",
"=",
"False",
"if",
"use_multiprocessing",
":",
"mapper",
"=",
"parmap",
".",
"map",
"else",
":",
"# suppress(E731)",
"mapper",
"=",
"lambda",
"f",
",",
"i",
",",
"*",
"a",
":",
"[",
"f",
"(",
"*",
"(",
"(",
"x",
",",
")",
"+",
"a",
")",
")",
"for",
"x",
"in",
"i",
"]",
"errors",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"mapper",
"(",
"_run_lint_on_file_stamped",
",",
"result",
".",
"files",
",",
"result",
".",
"stamp_file_path",
",",
"result",
".",
"log_technical_terms_to",
",",
"linter_funcs",
",",
"tool_options",
",",
"result",
".",
"fix_what_you_can",
")",
")",
")",
"for",
"error",
"in",
"sorted",
"(",
"errors",
")",
":",
"_report_lint_error",
"(",
"error",
".",
"failure",
",",
"os",
".",
"path",
".",
"relpath",
"(",
"error",
".",
"absolute_path",
")",
")",
"if",
"any_would_run",
":",
"for",
"linter_funcs",
"in",
"linter_funcs",
".",
"values",
"(",
")",
":",
"if",
"linter_funcs",
".",
"after_all",
":",
"linter_funcs",
".",
"after_all",
"(",
"global_options",
",",
"tool_options",
")",
"return",
"len",
"(",
"errors",
")"
] |
Entry point for the linter.
|
[
"Entry",
"point",
"for",
"the",
"linter",
"."
] |
python
|
train
| 41.375 |
idlesign/django-sitemessage
|
sitemessage/shortcuts.py
|
https://github.com/idlesign/django-sitemessage/blob/25b179b798370354c5988042ec209e255d23793f/sitemessage/shortcuts.py#L57-L65
|
def schedule_telegram_message(message, to, sender=None, priority=None):
"""Schedules Telegram message for delivery.
:param str message: text to send.
:param list|str|unicode to: recipients addresses or Django User model heir instances with `telegram` attributes.
:param User sender: User model heir instance
:param int priority: number describing message priority. If set overrides priority provided with message type.
"""
schedule_messages(message, recipients('telegram', to), sender=sender, priority=priority)
|
[
"def",
"schedule_telegram_message",
"(",
"message",
",",
"to",
",",
"sender",
"=",
"None",
",",
"priority",
"=",
"None",
")",
":",
"schedule_messages",
"(",
"message",
",",
"recipients",
"(",
"'telegram'",
",",
"to",
")",
",",
"sender",
"=",
"sender",
",",
"priority",
"=",
"priority",
")"
] |
Schedules Telegram message for delivery.
:param str message: text to send.
:param list|str|unicode to: recipients addresses or Django User model heir instances with `telegram` attributes.
:param User sender: User model heir instance
:param int priority: number describing message priority. If set overrides priority provided with message type.
|
[
"Schedules",
"Telegram",
"message",
"for",
"delivery",
"."
] |
python
|
train
| 59.111111 |
apple/turicreate
|
src/unity/python/turicreate/toolkits/classifier/random_forest_classifier.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/random_forest_classifier.py#L360-L406
|
def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(RandomForestClassifier, self).classify(dataset,
missing_value_action=missing_value_action)
|
[
"def",
"classify",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"RandomForestClassifier",
",",
"self",
")",
".",
"classify",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] |
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
|
[
"Return",
"a",
"classification",
"for",
"each",
"example",
"in",
"the",
"dataset",
"using",
"the",
"trained",
"random",
"forest",
"model",
".",
"The",
"output",
"SFrame",
"contains",
"predictions",
"as",
"class",
"labels",
"(",
"0",
"or",
"1",
")",
"and",
"probabilities",
"associated",
"with",
"the",
"the",
"example",
"."
] |
python
|
train
| 43.468085 |
tonioo/sievelib
|
sievelib/parser.py
|
https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/parser.py#L188-L228
|
def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
"""
if not self.__curcommand.iscomplete():
return True
ctype = self.__curcommand.get_type()
if ctype == "action" or \
(ctype == "control" and
not self.__curcommand.accept_children):
if testsemicolon:
self.__set_expected("semicolon")
return True
while self.__curcommand.parent:
cmd = self.__curcommand
self.__curcommand = self.__curcommand.parent
if self.__curcommand.get_type() in ["control", "test"]:
if self.__curcommand.iscomplete():
if self.__curcommand.get_type() == "control":
break
continue
if not self.__curcommand.check_next_arg("test", cmd, add=False):
return False
if not self.__curcommand.iscomplete():
if self.__curcommand.variable_args_nb:
self.__set_expected("comma", "right_parenthesis")
break
return True
|
[
"def",
"__check_command_completion",
"(",
"self",
",",
"testsemicolon",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"__curcommand",
".",
"iscomplete",
"(",
")",
":",
"return",
"True",
"ctype",
"=",
"self",
".",
"__curcommand",
".",
"get_type",
"(",
")",
"if",
"ctype",
"==",
"\"action\"",
"or",
"(",
"ctype",
"==",
"\"control\"",
"and",
"not",
"self",
".",
"__curcommand",
".",
"accept_children",
")",
":",
"if",
"testsemicolon",
":",
"self",
".",
"__set_expected",
"(",
"\"semicolon\"",
")",
"return",
"True",
"while",
"self",
".",
"__curcommand",
".",
"parent",
":",
"cmd",
"=",
"self",
".",
"__curcommand",
"self",
".",
"__curcommand",
"=",
"self",
".",
"__curcommand",
".",
"parent",
"if",
"self",
".",
"__curcommand",
".",
"get_type",
"(",
")",
"in",
"[",
"\"control\"",
",",
"\"test\"",
"]",
":",
"if",
"self",
".",
"__curcommand",
".",
"iscomplete",
"(",
")",
":",
"if",
"self",
".",
"__curcommand",
".",
"get_type",
"(",
")",
"==",
"\"control\"",
":",
"break",
"continue",
"if",
"not",
"self",
".",
"__curcommand",
".",
"check_next_arg",
"(",
"\"test\"",
",",
"cmd",
",",
"add",
"=",
"False",
")",
":",
"return",
"False",
"if",
"not",
"self",
".",
"__curcommand",
".",
"iscomplete",
"(",
")",
":",
"if",
"self",
".",
"__curcommand",
".",
"variable_args_nb",
":",
"self",
".",
"__set_expected",
"(",
"\"comma\"",
",",
"\"right_parenthesis\"",
")",
"break",
"return",
"True"
] |
Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
|
[
"Check",
"for",
"command",
"(",
"s",
")",
"completion"
] |
python
|
train
| 40.609756 |
wakatime/wakatime
|
wakatime/packages/pygments/util.py
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L218-L254
|
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
|
[
"def",
"unirange",
"(",
"a",
",",
"b",
")",
":",
"if",
"b",
"<",
"a",
":",
"raise",
"ValueError",
"(",
"\"Bad character range\"",
")",
"if",
"a",
"<",
"0x10000",
"or",
"b",
"<",
"0x10000",
":",
"raise",
"ValueError",
"(",
"\"unirange is only defined for non-BMP ranges\"",
")",
"if",
"sys",
".",
"maxunicode",
">",
"0xffff",
":",
"# wide build",
"return",
"u'[%s-%s]'",
"%",
"(",
"unichr",
"(",
"a",
")",
",",
"unichr",
"(",
"b",
")",
")",
"else",
":",
"# narrow build stores surrogates, and the 're' module handles them",
"# (incorrectly) as characters. Since there is still ordering among",
"# these characters, expand the range to one that it understands. Some",
"# background in http://bugs.python.org/issue3665 and",
"# http://bugs.python.org/issue12749",
"#",
"# Additionally, the lower constants are using unichr rather than",
"# literals because jython [which uses the wide path] can't load this",
"# file if they are literals.",
"ah",
",",
"al",
"=",
"_surrogatepair",
"(",
"a",
")",
"bh",
",",
"bl",
"=",
"_surrogatepair",
"(",
"b",
")",
"if",
"ah",
"==",
"bh",
":",
"return",
"u'(?:%s[%s-%s])'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"unichr",
"(",
"bl",
")",
")",
"else",
":",
"buf",
"=",
"[",
"]",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"ah",
"==",
"bh",
"and",
"unichr",
"(",
"bl",
")",
"or",
"unichr",
"(",
"0xdfff",
")",
")",
")",
"if",
"ah",
"-",
"bh",
">",
"1",
":",
"buf",
".",
"append",
"(",
"u'[%s-%s][%s-%s]'",
"%",
"unichr",
"(",
"ah",
"+",
"1",
")",
",",
"unichr",
"(",
"bh",
"-",
"1",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"0xdfff",
")",
")",
"if",
"ah",
"!=",
"bh",
":",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"bh",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"bl",
")",
")",
")",
"return",
"u'(?:'",
"+",
"u'|'",
".",
"join",
"(",
"buf",
")",
"+",
"u')'"
] |
Returns a regular expression string to match the given non-BMP range.
|
[
"Returns",
"a",
"regular",
"expression",
"string",
"to",
"match",
"the",
"given",
"non",
"-",
"BMP",
"range",
"."
] |
python
|
train
| 42.783784 |
gem/oq-engine
|
openquake/hmtk/strain/shift.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/strain/shift.py#L279-L367
|
def calculate_activity_rate(self, strain_data, cumulative=False,
in_seconds=False):
'''
Main function to calculate the activity rate (for each of the
magnitudes in target_magnitudes) for all of the cells specified in
the input strain model file
:param strain_data:
Strain model as an instance of :class:
openquake.hmtk.strain.geodetic_strain.GeodeticStrain
:param bool cumulative:
Set to true if the cumulative rate is required, False for
incremental
:param bool in_seconds:
Returns the activity rate in seconds (True) or else as an annual
activity rate
'''
self.strain = strain_data
self.strain.target_magnitudes = self.target_magnitudes
# Adjust strain rates from annual to seconds (SI)
for key in STRAIN_VARIABLES:
self.strain.data[key] = self.strain.data[key] / SECS_PER_YEAR
if 'region' not in self.strain.data:
raise ValueError('Cannot implment SHIFT methodology without '
'definition of regionalisation')
else:
self._reclassify_Bird_regions_with_data()
# Initially all seismicity rates assigned to background rate
self.strain.seismicity_rate = np.tile(
self.base_rate,
[self.strain.get_number_observations(), 1])
regionalisation_zones = (
np.unique(self.strain.data['region'])).tolist()
for region in regionalisation_zones:
id0 = self.strain.data['region'] == region
if b'IPL' in region:
# For intra-plate seismicity everything is refered to
# the background rate
continue
elif b'OSR_special_1' in region:
# Special case 1 - normal and transform faulting
calculated_rate = self.get_rate_osr_normal_transform(
self.threshold_moment, id0)
elif b'OSR_special_2' in region:
# Special case 2 - convergent and transform faulting
calculated_rate = self.get_rate_osr_convergent_transform(
self.threshold_moment, id0)
else:
region = region.decode('utf-8')
calculated_rate = \
self.regionalisation[region]['adjustment_factor'] * \
self.continuum_seismicity(self.threshold_moment,
self.strain.data['e1h'][id0],
self.strain.data['e2h'][id0],
self.strain.data['err'][id0],
self.regionalisation[region])
for jloc, iloc in enumerate(np.where(id0)[0]):
# Where the calculated rate exceeds the base rate then becomes
# calculated rate. In this version the magnitudes are treated
# independently (i.e. if Rate(M < 7) > Base Rate (M < 7) but
# Rate (M > 7) < Base Rate (M > 7) then returned Rate (M < 7)
# = Rate (M < 7) and returned Rate (M > 7) = Base Rate (M > 7)
id1 = calculated_rate[jloc] > self.base_rate
self.strain.seismicity_rate[iloc, id1] = calculated_rate[jloc,
id1]
if not cumulative and self.number_magnitudes > 1:
# Seismicity rates are currently cumulative - need to turn them
# into discrete
for iloc in range(0, self.number_magnitudes - 1):
self.strain.seismicity_rate[:, iloc] = \
self.strain.seismicity_rate[:, iloc] -\
self.strain.seismicity_rate[:, iloc + 1]
if not in_seconds:
self.strain.seismicity_rate = self.strain.seismicity_rate * \
SECS_PER_YEAR
for key in STRAIN_VARIABLES:
self.strain.data[key] = self.strain.data[key] * SECS_PER_YEAR
|
[
"def",
"calculate_activity_rate",
"(",
"self",
",",
"strain_data",
",",
"cumulative",
"=",
"False",
",",
"in_seconds",
"=",
"False",
")",
":",
"self",
".",
"strain",
"=",
"strain_data",
"self",
".",
"strain",
".",
"target_magnitudes",
"=",
"self",
".",
"target_magnitudes",
"# Adjust strain rates from annual to seconds (SI)",
"for",
"key",
"in",
"STRAIN_VARIABLES",
":",
"self",
".",
"strain",
".",
"data",
"[",
"key",
"]",
"=",
"self",
".",
"strain",
".",
"data",
"[",
"key",
"]",
"/",
"SECS_PER_YEAR",
"if",
"'region'",
"not",
"in",
"self",
".",
"strain",
".",
"data",
":",
"raise",
"ValueError",
"(",
"'Cannot implment SHIFT methodology without '",
"'definition of regionalisation'",
")",
"else",
":",
"self",
".",
"_reclassify_Bird_regions_with_data",
"(",
")",
"# Initially all seismicity rates assigned to background rate",
"self",
".",
"strain",
".",
"seismicity_rate",
"=",
"np",
".",
"tile",
"(",
"self",
".",
"base_rate",
",",
"[",
"self",
".",
"strain",
".",
"get_number_observations",
"(",
")",
",",
"1",
"]",
")",
"regionalisation_zones",
"=",
"(",
"np",
".",
"unique",
"(",
"self",
".",
"strain",
".",
"data",
"[",
"'region'",
"]",
")",
")",
".",
"tolist",
"(",
")",
"for",
"region",
"in",
"regionalisation_zones",
":",
"id0",
"=",
"self",
".",
"strain",
".",
"data",
"[",
"'region'",
"]",
"==",
"region",
"if",
"b'IPL'",
"in",
"region",
":",
"# For intra-plate seismicity everything is refered to",
"# the background rate",
"continue",
"elif",
"b'OSR_special_1'",
"in",
"region",
":",
"# Special case 1 - normal and transform faulting",
"calculated_rate",
"=",
"self",
".",
"get_rate_osr_normal_transform",
"(",
"self",
".",
"threshold_moment",
",",
"id0",
")",
"elif",
"b'OSR_special_2'",
"in",
"region",
":",
"# Special case 2 - convergent and transform faulting",
"calculated_rate",
"=",
"self",
".",
"get_rate_osr_convergent_transform",
"(",
"self",
".",
"threshold_moment",
",",
"id0",
")",
"else",
":",
"region",
"=",
"region",
".",
"decode",
"(",
"'utf-8'",
")",
"calculated_rate",
"=",
"self",
".",
"regionalisation",
"[",
"region",
"]",
"[",
"'adjustment_factor'",
"]",
"*",
"self",
".",
"continuum_seismicity",
"(",
"self",
".",
"threshold_moment",
",",
"self",
".",
"strain",
".",
"data",
"[",
"'e1h'",
"]",
"[",
"id0",
"]",
",",
"self",
".",
"strain",
".",
"data",
"[",
"'e2h'",
"]",
"[",
"id0",
"]",
",",
"self",
".",
"strain",
".",
"data",
"[",
"'err'",
"]",
"[",
"id0",
"]",
",",
"self",
".",
"regionalisation",
"[",
"region",
"]",
")",
"for",
"jloc",
",",
"iloc",
"in",
"enumerate",
"(",
"np",
".",
"where",
"(",
"id0",
")",
"[",
"0",
"]",
")",
":",
"# Where the calculated rate exceeds the base rate then becomes",
"# calculated rate. In this version the magnitudes are treated",
"# independently (i.e. if Rate(M < 7) > Base Rate (M < 7) but",
"# Rate (M > 7) < Base Rate (M > 7) then returned Rate (M < 7)",
"# = Rate (M < 7) and returned Rate (M > 7) = Base Rate (M > 7)",
"id1",
"=",
"calculated_rate",
"[",
"jloc",
"]",
">",
"self",
".",
"base_rate",
"self",
".",
"strain",
".",
"seismicity_rate",
"[",
"iloc",
",",
"id1",
"]",
"=",
"calculated_rate",
"[",
"jloc",
",",
"id1",
"]",
"if",
"not",
"cumulative",
"and",
"self",
".",
"number_magnitudes",
">",
"1",
":",
"# Seismicity rates are currently cumulative - need to turn them",
"# into discrete",
"for",
"iloc",
"in",
"range",
"(",
"0",
",",
"self",
".",
"number_magnitudes",
"-",
"1",
")",
":",
"self",
".",
"strain",
".",
"seismicity_rate",
"[",
":",
",",
"iloc",
"]",
"=",
"self",
".",
"strain",
".",
"seismicity_rate",
"[",
":",
",",
"iloc",
"]",
"-",
"self",
".",
"strain",
".",
"seismicity_rate",
"[",
":",
",",
"iloc",
"+",
"1",
"]",
"if",
"not",
"in_seconds",
":",
"self",
".",
"strain",
".",
"seismicity_rate",
"=",
"self",
".",
"strain",
".",
"seismicity_rate",
"*",
"SECS_PER_YEAR",
"for",
"key",
"in",
"STRAIN_VARIABLES",
":",
"self",
".",
"strain",
".",
"data",
"[",
"key",
"]",
"=",
"self",
".",
"strain",
".",
"data",
"[",
"key",
"]",
"*",
"SECS_PER_YEAR"
] |
Main function to calculate the activity rate (for each of the
magnitudes in target_magnitudes) for all of the cells specified in
the input strain model file
:param strain_data:
Strain model as an instance of :class:
openquake.hmtk.strain.geodetic_strain.GeodeticStrain
:param bool cumulative:
Set to true if the cumulative rate is required, False for
incremental
:param bool in_seconds:
Returns the activity rate in seconds (True) or else as an annual
activity rate
|
[
"Main",
"function",
"to",
"calculate",
"the",
"activity",
"rate",
"(",
"for",
"each",
"of",
"the",
"magnitudes",
"in",
"target_magnitudes",
")",
"for",
"all",
"of",
"the",
"cells",
"specified",
"in",
"the",
"input",
"strain",
"model",
"file"
] |
python
|
train
| 45.606742 |
sengupta/twss
|
twss/twsslib.py
|
https://github.com/sengupta/twss/blob/69269b58bc1c388f53b181ecb7c5d6ee5ee8c03f/twss/twsslib.py#L73-L79
|
def is_twss(self, phrase):
"""
The magic function- this accepts a phrase and tells you if it
classifies as an entendre
"""
featureset = self.extract_features(phrase)
return self.classifier.classify(featureset)
|
[
"def",
"is_twss",
"(",
"self",
",",
"phrase",
")",
":",
"featureset",
"=",
"self",
".",
"extract_features",
"(",
"phrase",
")",
"return",
"self",
".",
"classifier",
".",
"classify",
"(",
"featureset",
")"
] |
The magic function- this accepts a phrase and tells you if it
classifies as an entendre
|
[
"The",
"magic",
"function",
"-",
"this",
"accepts",
"a",
"phrase",
"and",
"tells",
"you",
"if",
"it",
"classifies",
"as",
"an",
"entendre"
] |
python
|
train
| 35.857143 |
benley/butcher
|
butcher/main.py
|
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/main.py#L416-L420
|
def stub_main():
"""setuptools blah: it still can't run a module as a script entry_point"""
from google.apputils import run_script_module
import butcher.main
run_script_module.RunScriptModule(butcher.main)
|
[
"def",
"stub_main",
"(",
")",
":",
"from",
"google",
".",
"apputils",
"import",
"run_script_module",
"import",
"butcher",
".",
"main",
"run_script_module",
".",
"RunScriptModule",
"(",
"butcher",
".",
"main",
")"
] |
setuptools blah: it still can't run a module as a script entry_point
|
[
"setuptools",
"blah",
":",
"it",
"still",
"can",
"t",
"run",
"a",
"module",
"as",
"a",
"script",
"entry_point"
] |
python
|
train
| 43.4 |
googleapis/google-cloud-python
|
logging/google/cloud/logging/sink.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/sink.py#L182-L206
|
def update(self, client=None, unique_writer_identity=False):
"""API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
"""
client = self._require_client(client)
resource = client.sinks_api.sink_update(
self.project,
self.name,
self.filter_,
self.destination,
unique_writer_identity=unique_writer_identity,
)
self._update_from_api_repr(resource)
|
[
"def",
"update",
"(",
"self",
",",
"client",
"=",
"None",
",",
"unique_writer_identity",
"=",
"False",
")",
":",
"client",
"=",
"self",
".",
"_require_client",
"(",
"client",
")",
"resource",
"=",
"client",
".",
"sinks_api",
".",
"sink_update",
"(",
"self",
".",
"project",
",",
"self",
".",
"name",
",",
"self",
".",
"filter_",
",",
"self",
".",
"destination",
",",
"unique_writer_identity",
"=",
"unique_writer_identity",
",",
")",
"self",
".",
"_update_from_api_repr",
"(",
"resource",
")"
] |
API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
|
[
"API",
"call",
":",
"update",
"sink",
"configuration",
"via",
"a",
"PUT",
"request"
] |
python
|
train
| 40.84 |
jkenlooper/chill
|
src/chill/api.py
|
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/api.py#L99-L118
|
def _template(node_id, value=None):
"Check if a template is assigned to it and render that with the value"
result = []
select_template_from_node = fetch_query_string('select_template_from_node.sql')
try:
result = db.execute(text(select_template_from_node), node_id=node_id)
template_result = result.fetchone()
result.close()
if template_result and template_result['name']:
template = template_result['name']
if isinstance(value, dict):
return render_template(template, **value)
else:
return render_template(template, value=value)
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
# No template assigned to this node so just return the value
return value
|
[
"def",
"_template",
"(",
"node_id",
",",
"value",
"=",
"None",
")",
":",
"result",
"=",
"[",
"]",
"select_template_from_node",
"=",
"fetch_query_string",
"(",
"'select_template_from_node.sql'",
")",
"try",
":",
"result",
"=",
"db",
".",
"execute",
"(",
"text",
"(",
"select_template_from_node",
")",
",",
"node_id",
"=",
"node_id",
")",
"template_result",
"=",
"result",
".",
"fetchone",
"(",
")",
"result",
".",
"close",
"(",
")",
"if",
"template_result",
"and",
"template_result",
"[",
"'name'",
"]",
":",
"template",
"=",
"template_result",
"[",
"'name'",
"]",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"render_template",
"(",
"template",
",",
"*",
"*",
"value",
")",
"else",
":",
"return",
"render_template",
"(",
"template",
",",
"value",
"=",
"value",
")",
"except",
"DatabaseError",
"as",
"err",
":",
"current_app",
".",
"logger",
".",
"error",
"(",
"\"DatabaseError: %s\"",
",",
"err",
")",
"# No template assigned to this node so just return the value",
"return",
"value"
] |
Check if a template is assigned to it and render that with the value
|
[
"Check",
"if",
"a",
"template",
"is",
"assigned",
"to",
"it",
"and",
"render",
"that",
"with",
"the",
"value"
] |
python
|
train
| 40.1 |
robotools/fontParts
|
Lib/fontParts/base/bPoint.py
|
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/bPoint.py#L193-L218
|
def _set_bcpIn(self, value):
"""
Subclasses may override this method.
"""
x, y = absoluteBCPIn(self.anchor, value)
segment = self._segment
if segment.type == "move" and value != (0, 0):
raise FontPartsError(("Cannot set the bcpIn for the first "
"point in an open contour.")
)
else:
offCurves = segment.offCurve
if offCurves:
# if the two off curves are located at the anchor
# coordinates we can switch to a line segment type.
if value == (0, 0) and self.bcpOut == (0, 0):
segment.type = "line"
segment.smooth = False
else:
offCurves[-1].x = x
offCurves[-1].y = y
elif value != (0, 0):
segment.type = "curve"
offCurves = segment.offCurve
offCurves[-1].x = x
offCurves[-1].y = y
|
[
"def",
"_set_bcpIn",
"(",
"self",
",",
"value",
")",
":",
"x",
",",
"y",
"=",
"absoluteBCPIn",
"(",
"self",
".",
"anchor",
",",
"value",
")",
"segment",
"=",
"self",
".",
"_segment",
"if",
"segment",
".",
"type",
"==",
"\"move\"",
"and",
"value",
"!=",
"(",
"0",
",",
"0",
")",
":",
"raise",
"FontPartsError",
"(",
"(",
"\"Cannot set the bcpIn for the first \"",
"\"point in an open contour.\"",
")",
")",
"else",
":",
"offCurves",
"=",
"segment",
".",
"offCurve",
"if",
"offCurves",
":",
"# if the two off curves are located at the anchor",
"# coordinates we can switch to a line segment type.",
"if",
"value",
"==",
"(",
"0",
",",
"0",
")",
"and",
"self",
".",
"bcpOut",
"==",
"(",
"0",
",",
"0",
")",
":",
"segment",
".",
"type",
"=",
"\"line\"",
"segment",
".",
"smooth",
"=",
"False",
"else",
":",
"offCurves",
"[",
"-",
"1",
"]",
".",
"x",
"=",
"x",
"offCurves",
"[",
"-",
"1",
"]",
".",
"y",
"=",
"y",
"elif",
"value",
"!=",
"(",
"0",
",",
"0",
")",
":",
"segment",
".",
"type",
"=",
"\"curve\"",
"offCurves",
"=",
"segment",
".",
"offCurve",
"offCurves",
"[",
"-",
"1",
"]",
".",
"x",
"=",
"x",
"offCurves",
"[",
"-",
"1",
"]",
".",
"y",
"=",
"y"
] |
Subclasses may override this method.
|
[
"Subclasses",
"may",
"override",
"this",
"method",
"."
] |
python
|
train
| 39.692308 |
saltstack/salt
|
salt/sdb/memcached.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/memcached.py#L62-L68
|
def set_(key, value, profile=None):
'''
Set a key/value pair in memcached
'''
conn = salt.utils.memcached.get_conn(profile)
time = profile.get('expire', DEFAULT_EXPIRATION)
return salt.utils.memcached.set_(conn, key, value, time=time)
|
[
"def",
"set_",
"(",
"key",
",",
"value",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"salt",
".",
"utils",
".",
"memcached",
".",
"get_conn",
"(",
"profile",
")",
"time",
"=",
"profile",
".",
"get",
"(",
"'expire'",
",",
"DEFAULT_EXPIRATION",
")",
"return",
"salt",
".",
"utils",
".",
"memcached",
".",
"set_",
"(",
"conn",
",",
"key",
",",
"value",
",",
"time",
"=",
"time",
")"
] |
Set a key/value pair in memcached
|
[
"Set",
"a",
"key",
"/",
"value",
"pair",
"in",
"memcached"
] |
python
|
train
| 36 |
iotile/coretools
|
transport_plugins/websocket/iotile_transport_websocket/device_adapter.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/websocket/iotile_transport_websocket/device_adapter.py#L84-L95
|
async def connect(self, conn_id, connection_string):
"""Connect to a device.
See :meth:`AbstractDeviceAdapter.connect`.
"""
self._ensure_connection(conn_id, False)
msg = dict(connection_string=connection_string)
await self._send_command(OPERATIONS.CONNECT, msg, COMMANDS.ConnectResponse)
self._setup_connection(conn_id, connection_string)
|
[
"async",
"def",
"connect",
"(",
"self",
",",
"conn_id",
",",
"connection_string",
")",
":",
"self",
".",
"_ensure_connection",
"(",
"conn_id",
",",
"False",
")",
"msg",
"=",
"dict",
"(",
"connection_string",
"=",
"connection_string",
")",
"await",
"self",
".",
"_send_command",
"(",
"OPERATIONS",
".",
"CONNECT",
",",
"msg",
",",
"COMMANDS",
".",
"ConnectResponse",
")",
"self",
".",
"_setup_connection",
"(",
"conn_id",
",",
"connection_string",
")"
] |
Connect to a device.
See :meth:`AbstractDeviceAdapter.connect`.
|
[
"Connect",
"to",
"a",
"device",
"."
] |
python
|
train
| 32.25 |
gawel/irc3
|
irc3/plugins/async.py
|
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/plugins/async.py#L92-L100
|
def process_results(self, results=None, **value):
"""take results list of all events and put them in a dict"""
channels = []
for res in results:
channels.extend(res.pop('channels', '').split())
value.update(res)
value['channels'] = channels
value['success'] = value.get('retcode') == '318'
return value
|
[
"def",
"process_results",
"(",
"self",
",",
"results",
"=",
"None",
",",
"*",
"*",
"value",
")",
":",
"channels",
"=",
"[",
"]",
"for",
"res",
"in",
"results",
":",
"channels",
".",
"extend",
"(",
"res",
".",
"pop",
"(",
"'channels'",
",",
"''",
")",
".",
"split",
"(",
")",
")",
"value",
".",
"update",
"(",
"res",
")",
"value",
"[",
"'channels'",
"]",
"=",
"channels",
"value",
"[",
"'success'",
"]",
"=",
"value",
".",
"get",
"(",
"'retcode'",
")",
"==",
"'318'",
"return",
"value"
] |
take results list of all events and put them in a dict
|
[
"take",
"results",
"list",
"of",
"all",
"events",
"and",
"put",
"them",
"in",
"a",
"dict"
] |
python
|
train
| 40.666667 |
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py#L53-L92
|
def new_output(output_type=None, output_text=None, output_png=None,
output_html=None, output_svg=None, output_latex=None, output_json=None,
output_javascript=None, output_jpeg=None, prompt_number=None,
etype=None, evalue=None, traceback=None):
"""Create a new code cell with input and output"""
output = NotebookNode()
if output_type is not None:
output.output_type = unicode(output_type)
if output_type != 'pyerr':
if output_text is not None:
output.text = unicode(output_text)
if output_png is not None:
output.png = bytes(output_png)
if output_jpeg is not None:
output.jpeg = bytes(output_jpeg)
if output_html is not None:
output.html = unicode(output_html)
if output_svg is not None:
output.svg = unicode(output_svg)
if output_latex is not None:
output.latex = unicode(output_latex)
if output_json is not None:
output.json = unicode(output_json)
if output_javascript is not None:
output.javascript = unicode(output_javascript)
if output_type == u'pyout':
if prompt_number is not None:
output.prompt_number = int(prompt_number)
if output_type == u'pyerr':
if etype is not None:
output.etype = unicode(etype)
if evalue is not None:
output.evalue = unicode(evalue)
if traceback is not None:
output.traceback = [unicode(frame) for frame in list(traceback)]
return output
|
[
"def",
"new_output",
"(",
"output_type",
"=",
"None",
",",
"output_text",
"=",
"None",
",",
"output_png",
"=",
"None",
",",
"output_html",
"=",
"None",
",",
"output_svg",
"=",
"None",
",",
"output_latex",
"=",
"None",
",",
"output_json",
"=",
"None",
",",
"output_javascript",
"=",
"None",
",",
"output_jpeg",
"=",
"None",
",",
"prompt_number",
"=",
"None",
",",
"etype",
"=",
"None",
",",
"evalue",
"=",
"None",
",",
"traceback",
"=",
"None",
")",
":",
"output",
"=",
"NotebookNode",
"(",
")",
"if",
"output_type",
"is",
"not",
"None",
":",
"output",
".",
"output_type",
"=",
"unicode",
"(",
"output_type",
")",
"if",
"output_type",
"!=",
"'pyerr'",
":",
"if",
"output_text",
"is",
"not",
"None",
":",
"output",
".",
"text",
"=",
"unicode",
"(",
"output_text",
")",
"if",
"output_png",
"is",
"not",
"None",
":",
"output",
".",
"png",
"=",
"bytes",
"(",
"output_png",
")",
"if",
"output_jpeg",
"is",
"not",
"None",
":",
"output",
".",
"jpeg",
"=",
"bytes",
"(",
"output_jpeg",
")",
"if",
"output_html",
"is",
"not",
"None",
":",
"output",
".",
"html",
"=",
"unicode",
"(",
"output_html",
")",
"if",
"output_svg",
"is",
"not",
"None",
":",
"output",
".",
"svg",
"=",
"unicode",
"(",
"output_svg",
")",
"if",
"output_latex",
"is",
"not",
"None",
":",
"output",
".",
"latex",
"=",
"unicode",
"(",
"output_latex",
")",
"if",
"output_json",
"is",
"not",
"None",
":",
"output",
".",
"json",
"=",
"unicode",
"(",
"output_json",
")",
"if",
"output_javascript",
"is",
"not",
"None",
":",
"output",
".",
"javascript",
"=",
"unicode",
"(",
"output_javascript",
")",
"if",
"output_type",
"==",
"u'pyout'",
":",
"if",
"prompt_number",
"is",
"not",
"None",
":",
"output",
".",
"prompt_number",
"=",
"int",
"(",
"prompt_number",
")",
"if",
"output_type",
"==",
"u'pyerr'",
":",
"if",
"etype",
"is",
"not",
"None",
":",
"output",
".",
"etype",
"=",
"unicode",
"(",
"etype",
")",
"if",
"evalue",
"is",
"not",
"None",
":",
"output",
".",
"evalue",
"=",
"unicode",
"(",
"evalue",
")",
"if",
"traceback",
"is",
"not",
"None",
":",
"output",
".",
"traceback",
"=",
"[",
"unicode",
"(",
"frame",
")",
"for",
"frame",
"in",
"list",
"(",
"traceback",
")",
"]",
"return",
"output"
] |
Create a new code cell with input and output
|
[
"Create",
"a",
"new",
"code",
"cell",
"with",
"input",
"and",
"output"
] |
python
|
test
| 38.075 |
pywbem/pywbem
|
attic/cimxml_parse.py
|
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cimxml_parse.py#L633-L659
|
def parse_qualifier(parser, event, node): #pylint: disable=unused-argument
"""Parse CIM/XML QUALIFIER element and return CIMQualifier"""
name = _get_required_attribute(node, 'NAME')
cim_type = _get_required_attribute(node, 'TYPE')
# TODO 2/16 KS: Why is propagated not used?
propagated = _get_attribute(node, 'PROPAGATED')
(next_event, next_node) = six.next(parser)
if _is_end(next_event, next_node, 'QUALIFIER'):
return CIMQualifier(name, None, type=cim_type)
if _is_start(next_event, next_node, 'VALUE'):
value = parse_value(parser, next_event, next_node)
elif _is_start(next_event, next_node, 'VALUE.ARRAY'):
#pylint: disable=redefined-variable-type
# redefined from str to list.
value = parse_value_array(parser, next_event, next_node)
else:
raise ParseError('Expecting (VALUE | VALUE.ARRAY)')
result = CIMQualifier(name, tocimobj(cim_type, value))
_get_end_event(parser, 'QUALIFIER')
return result
|
[
"def",
"parse_qualifier",
"(",
"parser",
",",
"event",
",",
"node",
")",
":",
"#pylint: disable=unused-argument",
"name",
"=",
"_get_required_attribute",
"(",
"node",
",",
"'NAME'",
")",
"cim_type",
"=",
"_get_required_attribute",
"(",
"node",
",",
"'TYPE'",
")",
"# TODO 2/16 KS: Why is propagated not used?",
"propagated",
"=",
"_get_attribute",
"(",
"node",
",",
"'PROPAGATED'",
")",
"(",
"next_event",
",",
"next_node",
")",
"=",
"six",
".",
"next",
"(",
"parser",
")",
"if",
"_is_end",
"(",
"next_event",
",",
"next_node",
",",
"'QUALIFIER'",
")",
":",
"return",
"CIMQualifier",
"(",
"name",
",",
"None",
",",
"type",
"=",
"cim_type",
")",
"if",
"_is_start",
"(",
"next_event",
",",
"next_node",
",",
"'VALUE'",
")",
":",
"value",
"=",
"parse_value",
"(",
"parser",
",",
"next_event",
",",
"next_node",
")",
"elif",
"_is_start",
"(",
"next_event",
",",
"next_node",
",",
"'VALUE.ARRAY'",
")",
":",
"#pylint: disable=redefined-variable-type",
"# redefined from str to list.",
"value",
"=",
"parse_value_array",
"(",
"parser",
",",
"next_event",
",",
"next_node",
")",
"else",
":",
"raise",
"ParseError",
"(",
"'Expecting (VALUE | VALUE.ARRAY)'",
")",
"result",
"=",
"CIMQualifier",
"(",
"name",
",",
"tocimobj",
"(",
"cim_type",
",",
"value",
")",
")",
"_get_end_event",
"(",
"parser",
",",
"'QUALIFIER'",
")",
"return",
"result"
] |
Parse CIM/XML QUALIFIER element and return CIMQualifier
|
[
"Parse",
"CIM",
"/",
"XML",
"QUALIFIER",
"element",
"and",
"return",
"CIMQualifier"
] |
python
|
train
| 36.407407 |
piface/pifacedigitalio
|
pifacedigitalio/core.py
|
https://github.com/piface/pifacedigitalio/blob/d231a82bdb55d5f57f44ba7aec00bfd6c0b9a9d4/pifacedigitalio/core.py#L216-L233
|
def digital_write(pin_num, value, hardware_addr=0):
"""Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).output_pins[pin_num].value = value
|
[
"def",
"digital_write",
"(",
"pin_num",
",",
"value",
",",
"hardware_addr",
"=",
"0",
")",
":",
"_get_pifacedigital",
"(",
"hardware_addr",
")",
".",
"output_pins",
"[",
"pin_num",
"]",
".",
"value",
"=",
"value"
] |
Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
|
[
"Writes",
"the",
"value",
"to",
"the",
"input",
"pin",
"specified",
"."
] |
python
|
train
| 36.722222 |
bitesofcode/projexui
|
projexui/widgets/xchart/xchartdataset.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartdataset.py#L39-L47
|
def color(self, key=None):
"""
Returns the color for this data set.
:return <QColor>
"""
if key is not None:
return self._colorMap.get(nativestring(key), self._color)
return self._color
|
[
"def",
"color",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_colorMap",
".",
"get",
"(",
"nativestring",
"(",
"key",
")",
",",
"self",
".",
"_color",
")",
"return",
"self",
".",
"_color"
] |
Returns the color for this data set.
:return <QColor>
|
[
"Returns",
"the",
"color",
"for",
"this",
"data",
"set",
".",
":",
"return",
"<QColor",
">"
] |
python
|
train
| 28.666667 |
coursera-dl/coursera-dl
|
coursera/downloaders.py
|
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/downloaders.py#L389-L406
|
def get_downloader(session, class_name, args):
"""
Decides which downloader to use.
"""
external = {
'wget': WgetDownloader,
'curl': CurlDownloader,
'aria2': Aria2Downloader,
'axel': AxelDownloader,
}
for bin, class_ in iteritems(external):
if getattr(args, bin):
return class_(session, bin=getattr(args, bin),
downloader_arguments=args.downloader_arguments)
return NativeDownloader(session)
|
[
"def",
"get_downloader",
"(",
"session",
",",
"class_name",
",",
"args",
")",
":",
"external",
"=",
"{",
"'wget'",
":",
"WgetDownloader",
",",
"'curl'",
":",
"CurlDownloader",
",",
"'aria2'",
":",
"Aria2Downloader",
",",
"'axel'",
":",
"AxelDownloader",
",",
"}",
"for",
"bin",
",",
"class_",
"in",
"iteritems",
"(",
"external",
")",
":",
"if",
"getattr",
"(",
"args",
",",
"bin",
")",
":",
"return",
"class_",
"(",
"session",
",",
"bin",
"=",
"getattr",
"(",
"args",
",",
"bin",
")",
",",
"downloader_arguments",
"=",
"args",
".",
"downloader_arguments",
")",
"return",
"NativeDownloader",
"(",
"session",
")"
] |
Decides which downloader to use.
|
[
"Decides",
"which",
"downloader",
"to",
"use",
"."
] |
python
|
train
| 26.833333 |
IdentityPython/oidcendpoint
|
src/oidcendpoint/oidc/registration.py
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/registration.py#L288-L322
|
def _verify_sector_identifier(self, request):
"""
Verify `sector_identifier_uri` is reachable and that it contains
`redirect_uri`s.
:param request: Provider registration request
:return: si_redirects, sector_id
:raises: InvalidSectorIdentifier
"""
si_url = request["sector_identifier_uri"]
try:
res = self.endpoint_context.httpc.get(si_url)
except Exception as err:
logger.error(err)
res = None
if not res:
raise InvalidSectorIdentifier("Couldn't read from sector_identifier_uri")
logger.debug("sector_identifier_uri => %s", sanitize(res.text))
try:
si_redirects = json.loads(res.text)
except ValueError:
raise InvalidSectorIdentifier(
"Error deserializing sector_identifier_uri content")
if "redirect_uris" in request:
logger.debug("redirect_uris: %s", request["redirect_uris"])
for uri in request["redirect_uris"]:
if uri not in si_redirects:
raise InvalidSectorIdentifier(
"redirect_uri missing from sector_identifiers")
return si_redirects, si_url
|
[
"def",
"_verify_sector_identifier",
"(",
"self",
",",
"request",
")",
":",
"si_url",
"=",
"request",
"[",
"\"sector_identifier_uri\"",
"]",
"try",
":",
"res",
"=",
"self",
".",
"endpoint_context",
".",
"httpc",
".",
"get",
"(",
"si_url",
")",
"except",
"Exception",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"err",
")",
"res",
"=",
"None",
"if",
"not",
"res",
":",
"raise",
"InvalidSectorIdentifier",
"(",
"\"Couldn't read from sector_identifier_uri\"",
")",
"logger",
".",
"debug",
"(",
"\"sector_identifier_uri => %s\"",
",",
"sanitize",
"(",
"res",
".",
"text",
")",
")",
"try",
":",
"si_redirects",
"=",
"json",
".",
"loads",
"(",
"res",
".",
"text",
")",
"except",
"ValueError",
":",
"raise",
"InvalidSectorIdentifier",
"(",
"\"Error deserializing sector_identifier_uri content\"",
")",
"if",
"\"redirect_uris\"",
"in",
"request",
":",
"logger",
".",
"debug",
"(",
"\"redirect_uris: %s\"",
",",
"request",
"[",
"\"redirect_uris\"",
"]",
")",
"for",
"uri",
"in",
"request",
"[",
"\"redirect_uris\"",
"]",
":",
"if",
"uri",
"not",
"in",
"si_redirects",
":",
"raise",
"InvalidSectorIdentifier",
"(",
"\"redirect_uri missing from sector_identifiers\"",
")",
"return",
"si_redirects",
",",
"si_url"
] |
Verify `sector_identifier_uri` is reachable and that it contains
`redirect_uri`s.
:param request: Provider registration request
:return: si_redirects, sector_id
:raises: InvalidSectorIdentifier
|
[
"Verify",
"sector_identifier_uri",
"is",
"reachable",
"and",
"that",
"it",
"contains",
"redirect_uri",
"s",
"."
] |
python
|
train
| 34.971429 |
biolink/ontobio
|
ontobio/ontol.py
|
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L543-L572
|
def traverse_nodes(self, qids, up=True, down=False, **args):
"""
Traverse (optionally) up and (optionally) down from an input set of nodes
Arguments
---------
qids : list[str]
list of seed node IDs to start from
up : bool
if True, include ancestors
down : bool
if True, include descendants
relations : list[str]
list of relations used to filter
Return
------
list[str]
nodes reachable from qids
"""
g = self.get_filtered_graph(**args)
nodes = set()
for id in qids:
# reflexive - always add self
nodes.add(id)
if down:
nodes.update(nx.descendants(g, id))
if up:
nodes.update(nx.ancestors(g, id))
return nodes
|
[
"def",
"traverse_nodes",
"(",
"self",
",",
"qids",
",",
"up",
"=",
"True",
",",
"down",
"=",
"False",
",",
"*",
"*",
"args",
")",
":",
"g",
"=",
"self",
".",
"get_filtered_graph",
"(",
"*",
"*",
"args",
")",
"nodes",
"=",
"set",
"(",
")",
"for",
"id",
"in",
"qids",
":",
"# reflexive - always add self",
"nodes",
".",
"add",
"(",
"id",
")",
"if",
"down",
":",
"nodes",
".",
"update",
"(",
"nx",
".",
"descendants",
"(",
"g",
",",
"id",
")",
")",
"if",
"up",
":",
"nodes",
".",
"update",
"(",
"nx",
".",
"ancestors",
"(",
"g",
",",
"id",
")",
")",
"return",
"nodes"
] |
Traverse (optionally) up and (optionally) down from an input set of nodes
Arguments
---------
qids : list[str]
list of seed node IDs to start from
up : bool
if True, include ancestors
down : bool
if True, include descendants
relations : list[str]
list of relations used to filter
Return
------
list[str]
nodes reachable from qids
|
[
"Traverse",
"(",
"optionally",
")",
"up",
"and",
"(",
"optionally",
")",
"down",
"from",
"an",
"input",
"set",
"of",
"nodes"
] |
python
|
train
| 28.266667 |
annoviko/pyclustering
|
pyclustering/cluster/__init__.py
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/__init__.py#L618-L653
|
def __draw_canvas_cluster(self, ax, dimension, cluster_descr):
"""!
@brief Draw canvas cluster descriptor.
@param[in] ax (Axis): Axis of the canvas where canvas cluster descriptor should be displayed.
@param[in] dimension (uint): Canvas dimension.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@return (fig) Figure where clusters are shown.
"""
cluster = cluster_descr.cluster
data = cluster_descr.data
marker = cluster_descr.marker
markersize = cluster_descr.markersize
color = cluster_descr.color
for item in cluster:
if dimension == 1:
if data is None:
ax.plot(item[0], 0.0, color = color, marker = marker, markersize = markersize)
else:
ax.plot(data[item][0], 0.0, color = color, marker = marker, markersize = markersize)
elif dimension == 2:
if data is None:
ax.plot(item[0], item[1], color = color, marker = marker, markersize = markersize)
else:
ax.plot(data[item][0], data[item][1], color = color, marker = marker, markersize = markersize)
elif dimension == 3:
if data is None:
ax.scatter(item[0], item[1], item[2], c = color, marker = marker, s = markersize)
else:
ax.scatter(data[item][0], data[item][1], data[item][2], c = color, marker = marker, s = markersize)
|
[
"def",
"__draw_canvas_cluster",
"(",
"self",
",",
"ax",
",",
"dimension",
",",
"cluster_descr",
")",
":",
"cluster",
"=",
"cluster_descr",
".",
"cluster",
"data",
"=",
"cluster_descr",
".",
"data",
"marker",
"=",
"cluster_descr",
".",
"marker",
"markersize",
"=",
"cluster_descr",
".",
"markersize",
"color",
"=",
"cluster_descr",
".",
"color",
"for",
"item",
"in",
"cluster",
":",
"if",
"dimension",
"==",
"1",
":",
"if",
"data",
"is",
"None",
":",
"ax",
".",
"plot",
"(",
"item",
"[",
"0",
"]",
",",
"0.0",
",",
"color",
"=",
"color",
",",
"marker",
"=",
"marker",
",",
"markersize",
"=",
"markersize",
")",
"else",
":",
"ax",
".",
"plot",
"(",
"data",
"[",
"item",
"]",
"[",
"0",
"]",
",",
"0.0",
",",
"color",
"=",
"color",
",",
"marker",
"=",
"marker",
",",
"markersize",
"=",
"markersize",
")",
"elif",
"dimension",
"==",
"2",
":",
"if",
"data",
"is",
"None",
":",
"ax",
".",
"plot",
"(",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"1",
"]",
",",
"color",
"=",
"color",
",",
"marker",
"=",
"marker",
",",
"markersize",
"=",
"markersize",
")",
"else",
":",
"ax",
".",
"plot",
"(",
"data",
"[",
"item",
"]",
"[",
"0",
"]",
",",
"data",
"[",
"item",
"]",
"[",
"1",
"]",
",",
"color",
"=",
"color",
",",
"marker",
"=",
"marker",
",",
"markersize",
"=",
"markersize",
")",
"elif",
"dimension",
"==",
"3",
":",
"if",
"data",
"is",
"None",
":",
"ax",
".",
"scatter",
"(",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"1",
"]",
",",
"item",
"[",
"2",
"]",
",",
"c",
"=",
"color",
",",
"marker",
"=",
"marker",
",",
"s",
"=",
"markersize",
")",
"else",
":",
"ax",
".",
"scatter",
"(",
"data",
"[",
"item",
"]",
"[",
"0",
"]",
",",
"data",
"[",
"item",
"]",
"[",
"1",
"]",
",",
"data",
"[",
"item",
"]",
"[",
"2",
"]",
",",
"c",
"=",
"color",
",",
"marker",
"=",
"marker",
",",
"s",
"=",
"markersize",
")"
] |
!
@brief Draw canvas cluster descriptor.
@param[in] ax (Axis): Axis of the canvas where canvas cluster descriptor should be displayed.
@param[in] dimension (uint): Canvas dimension.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@return (fig) Figure where clusters are shown.
|
[
"!"
] |
python
|
valid
| 44.638889 |
SheffieldML/GPy
|
GPy/models/sparse_gp_classification.py
|
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/models/sparse_gp_classification.py#L76-L89
|
def from_dict(input_dict, data=None):
"""
Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
"""
import GPy
m = GPy.core.model.Model.from_dict(input_dict, data)
from copy import deepcopy
sparse_gp = deepcopy(m)
return SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification')
|
[
"def",
"from_dict",
"(",
"input_dict",
",",
"data",
"=",
"None",
")",
":",
"import",
"GPy",
"m",
"=",
"GPy",
".",
"core",
".",
"model",
".",
"Model",
".",
"from_dict",
"(",
"input_dict",
",",
"data",
")",
"from",
"copy",
"import",
"deepcopy",
"sparse_gp",
"=",
"deepcopy",
"(",
"m",
")",
"return",
"SparseGPClassification",
"(",
"sparse_gp",
".",
"X",
",",
"sparse_gp",
".",
"Y",
",",
"sparse_gp",
".",
"Z",
",",
"sparse_gp",
".",
"kern",
",",
"sparse_gp",
".",
"likelihood",
",",
"sparse_gp",
".",
"inference_method",
",",
"sparse_gp",
".",
"mean_function",
",",
"name",
"=",
"'sparse_gp_classification'",
")"
] |
Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
|
[
"Instantiate",
"an",
"SparseGPClassification",
"object",
"using",
"the",
"information",
"in",
"input_dict",
"(",
"built",
"by",
"the",
"to_dict",
"method",
")",
"."
] |
python
|
train
| 52.428571 |
mitsei/dlkit
|
dlkit/handcar/repository/sessions.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/sessions.py#L1905-L1945
|
def update_repository(self, repository_form=None):
"""Updates an existing repository.
:param repository_form: the form containing the elements to be updated
:type repository_form: ``osid.repository.RepositoryForm``
:raise: ``IllegalState`` -- ``repository_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
if repository_form is None:
raise NullArgument()
if not isinstance(repository_form, abc_repository_objects.RepositoryForm):
raise InvalidArgument('argument type is not a RepositoryForm')
if not repository_form.is_for_update():
raise InvalidArgument('form is for create only, not update')
# Check for "sandbox" genus type. Hardcoded for now:
if repository_form._my_map['genusTypeId'] != 'mc3-objectivebank%3Amc3.learning.objectivebank.sandbox%40MIT-OEIT':
raise PermissionDenied('Handcar only supports updating \'sandbox\' type Repositories')
try:
if self._forms[repository_form.get_id().get_identifier()] == UPDATED:
raise IllegalState('form already used in an update transaction')
except KeyError:
raise Unsupported('form did not originate from this session')
if not repository_form.is_valid():
raise InvalidArgument('one or more of the form elements is invalid')
url_path = construct_url('objective_banks')
try:
result = self._put_request(url_path, repository_form._my_map)
except Exception:
raise # OperationFailed
self._forms[repository_form.get_id().get_identifier()] = UPDATED
return objects.Repository(result)
|
[
"def",
"update_repository",
"(",
"self",
",",
"repository_form",
"=",
"None",
")",
":",
"if",
"repository_form",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"if",
"not",
"isinstance",
"(",
"repository_form",
",",
"abc_repository_objects",
".",
"RepositoryForm",
")",
":",
"raise",
"InvalidArgument",
"(",
"'argument type is not a RepositoryForm'",
")",
"if",
"not",
"repository_form",
".",
"is_for_update",
"(",
")",
":",
"raise",
"InvalidArgument",
"(",
"'form is for create only, not update'",
")",
"# Check for \"sandbox\" genus type. Hardcoded for now:",
"if",
"repository_form",
".",
"_my_map",
"[",
"'genusTypeId'",
"]",
"!=",
"'mc3-objectivebank%3Amc3.learning.objectivebank.sandbox%40MIT-OEIT'",
":",
"raise",
"PermissionDenied",
"(",
"'Handcar only supports updating \\'sandbox\\' type Repositories'",
")",
"try",
":",
"if",
"self",
".",
"_forms",
"[",
"repository_form",
".",
"get_id",
"(",
")",
".",
"get_identifier",
"(",
")",
"]",
"==",
"UPDATED",
":",
"raise",
"IllegalState",
"(",
"'form already used in an update transaction'",
")",
"except",
"KeyError",
":",
"raise",
"Unsupported",
"(",
"'form did not originate from this session'",
")",
"if",
"not",
"repository_form",
".",
"is_valid",
"(",
")",
":",
"raise",
"InvalidArgument",
"(",
"'one or more of the form elements is invalid'",
")",
"url_path",
"=",
"construct_url",
"(",
"'objective_banks'",
")",
"try",
":",
"result",
"=",
"self",
".",
"_put_request",
"(",
"url_path",
",",
"repository_form",
".",
"_my_map",
")",
"except",
"Exception",
":",
"raise",
"# OperationFailed",
"self",
".",
"_forms",
"[",
"repository_form",
".",
"get_id",
"(",
")",
".",
"get_identifier",
"(",
")",
"]",
"=",
"UPDATED",
"return",
"objects",
".",
"Repository",
"(",
"result",
")"
] |
Updates an existing repository.
:param repository_form: the form containing the elements to be updated
:type repository_form: ``osid.repository.RepositoryForm``
:raise: ``IllegalState`` -- ``repository_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
|
[
"Updates",
"an",
"existing",
"repository",
"."
] |
python
|
train
| 51.463415 |
ottogroup/palladium
|
palladium/eval.py
|
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/eval.py#L82-L97
|
def list_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen.
"""
docopt(list_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
list()
|
[
"def",
"list_cmd",
"(",
"argv",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
":",
"# pragma: no cover",
"docopt",
"(",
"list_cmd",
".",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"initialize_config",
"(",
"__mode__",
"=",
"'fit'",
")",
"list",
"(",
")"
] |
\
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen.
|
[
"\\",
"List",
"information",
"about",
"available",
"models",
"."
] |
python
|
train
| 22.8125 |
spyder-ide/spyder
|
spyder/widgets/mixins.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/mixins.py#L482-L487
|
def extend_selection_to_next(self, what='word', direction='left'):
"""
Extend selection to next *what* ('word' or 'character')
toward *direction* ('left' or 'right')
"""
self.__move_cursor_anchor(what, direction, QTextCursor.KeepAnchor)
|
[
"def",
"extend_selection_to_next",
"(",
"self",
",",
"what",
"=",
"'word'",
",",
"direction",
"=",
"'left'",
")",
":",
"self",
".",
"__move_cursor_anchor",
"(",
"what",
",",
"direction",
",",
"QTextCursor",
".",
"KeepAnchor",
")"
] |
Extend selection to next *what* ('word' or 'character')
toward *direction* ('left' or 'right')
|
[
"Extend",
"selection",
"to",
"next",
"*",
"what",
"*",
"(",
"word",
"or",
"character",
")",
"toward",
"*",
"direction",
"*",
"(",
"left",
"or",
"right",
")"
] |
python
|
train
| 46 |
Mindwerks/worldengine
|
worldengine/drawing_functions.py
|
https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/drawing_functions.py#L39-L52
|
def draw_rivers_on_image(world, target, factor=1):
"""Draw only the rivers, it expect the background to be in place
"""
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255))
if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255))
|
[
"def",
"draw_rivers_on_image",
"(",
"world",
",",
"target",
",",
"factor",
"=",
"1",
")",
":",
"for",
"y",
"in",
"range",
"(",
"world",
".",
"height",
")",
":",
"for",
"x",
"in",
"range",
"(",
"world",
".",
"width",
")",
":",
"if",
"world",
".",
"is_land",
"(",
"(",
"x",
",",
"y",
")",
")",
"and",
"(",
"world",
".",
"layers",
"[",
"'river_map'",
"]",
".",
"data",
"[",
"y",
",",
"x",
"]",
">",
"0.0",
")",
":",
"for",
"dx",
"in",
"range",
"(",
"factor",
")",
":",
"for",
"dy",
"in",
"range",
"(",
"factor",
")",
":",
"target",
".",
"set_pixel",
"(",
"x",
"*",
"factor",
"+",
"dx",
",",
"y",
"*",
"factor",
"+",
"dy",
",",
"(",
"0",
",",
"0",
",",
"128",
",",
"255",
")",
")",
"if",
"world",
".",
"is_land",
"(",
"(",
"x",
",",
"y",
")",
")",
"and",
"(",
"world",
".",
"layers",
"[",
"'lake_map'",
"]",
".",
"data",
"[",
"y",
",",
"x",
"]",
"!=",
"0",
")",
":",
"for",
"dx",
"in",
"range",
"(",
"factor",
")",
":",
"for",
"dy",
"in",
"range",
"(",
"factor",
")",
":",
"target",
".",
"set_pixel",
"(",
"x",
"*",
"factor",
"+",
"dx",
",",
"y",
"*",
"factor",
"+",
"dy",
",",
"(",
"0",
",",
"100",
",",
"128",
",",
"255",
")",
")"
] |
Draw only the rivers, it expect the background to be in place
|
[
"Draw",
"only",
"the",
"rivers",
"it",
"expect",
"the",
"background",
"to",
"be",
"in",
"place"
] |
python
|
train
| 51.285714 |
boriel/zxbasic
|
arch/zx48k/backend/__16bit.py
|
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__16bit.py#L894-L930
|
def _shru16(ins):
''' Logical right shift 16bit unsigned integer.
The result is pushed onto the stack.
Optimizations:
* If 2nd op is 0 then
do nothing
* If 2nd op is 1
Shift Right Arithmetic
'''
op1, op2 = tuple(ins.quad[2:])
if is_int(op2):
op = int16(op2)
if op == 0:
return []
output = _16bit_oper(op1)
if op == 1:
output.append('srl h')
output.append('rr l')
output.append('push hl')
return output
output.append('ld b, %i' % op)
else:
output = _8bit_oper(op2)
output.append('ld b, a')
output.extend(_16bit_oper(op1))
label = tmp_label()
output.append('%s:' % label)
output.append('srl h')
output.append('rr l')
output.append('djnz %s' % label)
output.append('push hl')
return output
|
[
"def",
"_shru16",
"(",
"ins",
")",
":",
"op1",
",",
"op2",
"=",
"tuple",
"(",
"ins",
".",
"quad",
"[",
"2",
":",
"]",
")",
"if",
"is_int",
"(",
"op2",
")",
":",
"op",
"=",
"int16",
"(",
"op2",
")",
"if",
"op",
"==",
"0",
":",
"return",
"[",
"]",
"output",
"=",
"_16bit_oper",
"(",
"op1",
")",
"if",
"op",
"==",
"1",
":",
"output",
".",
"append",
"(",
"'srl h'",
")",
"output",
".",
"append",
"(",
"'rr l'",
")",
"output",
".",
"append",
"(",
"'push hl'",
")",
"return",
"output",
"output",
".",
"append",
"(",
"'ld b, %i'",
"%",
"op",
")",
"else",
":",
"output",
"=",
"_8bit_oper",
"(",
"op2",
")",
"output",
".",
"append",
"(",
"'ld b, a'",
")",
"output",
".",
"extend",
"(",
"_16bit_oper",
"(",
"op1",
")",
")",
"label",
"=",
"tmp_label",
"(",
")",
"output",
".",
"append",
"(",
"'%s:'",
"%",
"label",
")",
"output",
".",
"append",
"(",
"'srl h'",
")",
"output",
".",
"append",
"(",
"'rr l'",
")",
"output",
".",
"append",
"(",
"'djnz %s'",
"%",
"label",
")",
"output",
".",
"append",
"(",
"'push hl'",
")",
"return",
"output"
] |
Logical right shift 16bit unsigned integer.
The result is pushed onto the stack.
Optimizations:
* If 2nd op is 0 then
do nothing
* If 2nd op is 1
Shift Right Arithmetic
|
[
"Logical",
"right",
"shift",
"16bit",
"unsigned",
"integer",
".",
"The",
"result",
"is",
"pushed",
"onto",
"the",
"stack",
"."
] |
python
|
train
| 23.324324 |
matthewdeanmartin/find_known_secrets
|
find_known_secrets/searcher.py
|
https://github.com/matthewdeanmartin/find_known_secrets/blob/f25735c1ab4512bad85ade33af7021f6fac1d13b/find_known_secrets/searcher.py#L58-L80
|
def append_known_secrets(self): # type: () -> None
"""
Read key-value pair files with secrets. For example, .conf and .ini files.
:return:
"""
for file_name in self.files:
if "~" in file_name:
file_name = os.path.expanduser(file_name)
if not os.path.isfile(file_name):
print(
"Don't have "
+ Back.BLACK
+ Fore.YELLOW
+ file_name
+ ", won't use."
)
continue
with open(os.path.expanduser(file_name), "r") as file:
for line in file:
if line and "=" in line:
possible = line.split("=")[1].strip(" \"'\n")
if len(possible) > 4 and possible not in self.false_positives:
self.secrets.append(possible)
|
[
"def",
"append_known_secrets",
"(",
"self",
")",
":",
"# type: () -> None",
"for",
"file_name",
"in",
"self",
".",
"files",
":",
"if",
"\"~\"",
"in",
"file_name",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"file_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"print",
"(",
"\"Don't have \"",
"+",
"Back",
".",
"BLACK",
"+",
"Fore",
".",
"YELLOW",
"+",
"file_name",
"+",
"\", won't use.\"",
")",
"continue",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"file_name",
")",
",",
"\"r\"",
")",
"as",
"file",
":",
"for",
"line",
"in",
"file",
":",
"if",
"line",
"and",
"\"=\"",
"in",
"line",
":",
"possible",
"=",
"line",
".",
"split",
"(",
"\"=\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"\" \\\"'\\n\"",
")",
"if",
"len",
"(",
"possible",
")",
">",
"4",
"and",
"possible",
"not",
"in",
"self",
".",
"false_positives",
":",
"self",
".",
"secrets",
".",
"append",
"(",
"possible",
")"
] |
Read key-value pair files with secrets. For example, .conf and .ini files.
:return:
|
[
"Read",
"key",
"-",
"value",
"pair",
"files",
"with",
"secrets",
".",
"For",
"example",
".",
"conf",
"and",
".",
"ini",
"files",
".",
":",
"return",
":"
] |
python
|
test
| 40.173913 |
boriel/zxbasic
|
zxbparser.py
|
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L1677-L1698
|
def p_do_loop_while(p):
""" statement : do_start program_co label_loop WHILE expr
| do_start label_loop WHILE expr
| DO label_loop WHILE expr
"""
if len(p) == 6:
q = make_block(p[2], p[3])
r = p[5]
else:
q = p[2]
r = p[4]
if p[1] == 'DO':
gl.LOOPS.append(('DO',))
p[0] = make_sentence('DO_WHILE', r, q)
gl.LOOPS.pop()
if is_number(r):
api.errmsg.warning_condition_is_always(p.lineno(3), bool(r.value))
if q is None:
api.errmsg.warning_empty_loop(p.lineno(3))
|
[
"def",
"p_do_loop_while",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"6",
":",
"q",
"=",
"make_block",
"(",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"3",
"]",
")",
"r",
"=",
"p",
"[",
"5",
"]",
"else",
":",
"q",
"=",
"p",
"[",
"2",
"]",
"r",
"=",
"p",
"[",
"4",
"]",
"if",
"p",
"[",
"1",
"]",
"==",
"'DO'",
":",
"gl",
".",
"LOOPS",
".",
"append",
"(",
"(",
"'DO'",
",",
")",
")",
"p",
"[",
"0",
"]",
"=",
"make_sentence",
"(",
"'DO_WHILE'",
",",
"r",
",",
"q",
")",
"gl",
".",
"LOOPS",
".",
"pop",
"(",
")",
"if",
"is_number",
"(",
"r",
")",
":",
"api",
".",
"errmsg",
".",
"warning_condition_is_always",
"(",
"p",
".",
"lineno",
"(",
"3",
")",
",",
"bool",
"(",
"r",
".",
"value",
")",
")",
"if",
"q",
"is",
"None",
":",
"api",
".",
"errmsg",
".",
"warning_empty_loop",
"(",
"p",
".",
"lineno",
"(",
"3",
")",
")"
] |
statement : do_start program_co label_loop WHILE expr
| do_start label_loop WHILE expr
| DO label_loop WHILE expr
|
[
"statement",
":",
"do_start",
"program_co",
"label_loop",
"WHILE",
"expr",
"|",
"do_start",
"label_loop",
"WHILE",
"expr",
"|",
"DO",
"label_loop",
"WHILE",
"expr"
] |
python
|
train
| 25.818182 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.