repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
fprimex/zdesk
|
zdesk/zdesk_api.py
|
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4399-L4402
|
def views_preview_count(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#preview-count"
api_path = "/api/v2/views/preview/count.json"
return self.call(api_path, method="POST", data=data, **kwargs)
|
[
"def",
"views_preview_count",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/views/preview/count.json\"",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"POST\"",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] |
https://developer.zendesk.com/rest_api/docs/core/views#preview-count
|
[
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"core",
"/",
"views#preview",
"-",
"count"
] |
python
|
train
| 61.75 |
tdryer/hangups
|
hangups/conversation.py
|
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation.py#L836-L872
|
async def _on_state_update(self, state_update):
"""Receive a StateUpdate and fan out to Conversations.
Args:
state_update: hangouts_pb2.StateUpdate instance
"""
# The state update will include some type of notification:
notification_type = state_update.WhichOneof('state_update')
# If conversation fields have been updated, the state update will have
# a conversation containing changed fields. Handle updating the
# conversation from this delta:
if state_update.HasField('conversation'):
try:
await self._handle_conversation_delta(
state_update.conversation
)
except exceptions.NetworkError:
logger.warning(
'Discarding %s for %s: Failed to fetch conversation',
notification_type.replace('_', ' '),
state_update.conversation.conversation_id.id
)
return
if notification_type == 'typing_notification':
await self._handle_set_typing_notification(
state_update.typing_notification
)
elif notification_type == 'watermark_notification':
await self._handle_watermark_notification(
state_update.watermark_notification
)
elif notification_type == 'event_notification':
await self._on_event(
state_update.event_notification.event
)
|
[
"async",
"def",
"_on_state_update",
"(",
"self",
",",
"state_update",
")",
":",
"# The state update will include some type of notification:",
"notification_type",
"=",
"state_update",
".",
"WhichOneof",
"(",
"'state_update'",
")",
"# If conversation fields have been updated, the state update will have",
"# a conversation containing changed fields. Handle updating the",
"# conversation from this delta:",
"if",
"state_update",
".",
"HasField",
"(",
"'conversation'",
")",
":",
"try",
":",
"await",
"self",
".",
"_handle_conversation_delta",
"(",
"state_update",
".",
"conversation",
")",
"except",
"exceptions",
".",
"NetworkError",
":",
"logger",
".",
"warning",
"(",
"'Discarding %s for %s: Failed to fetch conversation'",
",",
"notification_type",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
",",
"state_update",
".",
"conversation",
".",
"conversation_id",
".",
"id",
")",
"return",
"if",
"notification_type",
"==",
"'typing_notification'",
":",
"await",
"self",
".",
"_handle_set_typing_notification",
"(",
"state_update",
".",
"typing_notification",
")",
"elif",
"notification_type",
"==",
"'watermark_notification'",
":",
"await",
"self",
".",
"_handle_watermark_notification",
"(",
"state_update",
".",
"watermark_notification",
")",
"elif",
"notification_type",
"==",
"'event_notification'",
":",
"await",
"self",
".",
"_on_event",
"(",
"state_update",
".",
"event_notification",
".",
"event",
")"
] |
Receive a StateUpdate and fan out to Conversations.
Args:
state_update: hangouts_pb2.StateUpdate instance
|
[
"Receive",
"a",
"StateUpdate",
"and",
"fan",
"out",
"to",
"Conversations",
"."
] |
python
|
valid
| 40.567568 |
ebu/PlugIt
|
plugit_proxy/views.py
|
https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/views.py#L36-L59
|
def getPlugItObject(hproPk):
"""Return the plugit object and the baseURI to use if not in standalone mode"""
from hprojects.models import HostedProject
try:
hproject = HostedProject.objects.get(pk=hproPk)
except (HostedProject.DoesNotExist, ValueError):
try:
hproject = HostedProject.objects.get(plugItCustomUrlKey=hproPk)
except HostedProject.DoesNotExist:
raise Http404
if hproject.plugItURI == '' and not hproject.runURI:
raise Http404
plugIt = PlugIt(hproject.plugItURI)
# Test if we should use custom key
if hasattr(hproject, 'plugItCustomUrlKey') and hproject.plugItCustomUrlKey:
baseURI = reverse('plugIt.views.main', args=(hproject.plugItCustomUrlKey, ''))
else:
baseURI = reverse('plugIt.views.main', args=(hproject.pk, ''))
return (plugIt, baseURI, hproject)
|
[
"def",
"getPlugItObject",
"(",
"hproPk",
")",
":",
"from",
"hprojects",
".",
"models",
"import",
"HostedProject",
"try",
":",
"hproject",
"=",
"HostedProject",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"hproPk",
")",
"except",
"(",
"HostedProject",
".",
"DoesNotExist",
",",
"ValueError",
")",
":",
"try",
":",
"hproject",
"=",
"HostedProject",
".",
"objects",
".",
"get",
"(",
"plugItCustomUrlKey",
"=",
"hproPk",
")",
"except",
"HostedProject",
".",
"DoesNotExist",
":",
"raise",
"Http404",
"if",
"hproject",
".",
"plugItURI",
"==",
"''",
"and",
"not",
"hproject",
".",
"runURI",
":",
"raise",
"Http404",
"plugIt",
"=",
"PlugIt",
"(",
"hproject",
".",
"plugItURI",
")",
"# Test if we should use custom key",
"if",
"hasattr",
"(",
"hproject",
",",
"'plugItCustomUrlKey'",
")",
"and",
"hproject",
".",
"plugItCustomUrlKey",
":",
"baseURI",
"=",
"reverse",
"(",
"'plugIt.views.main'",
",",
"args",
"=",
"(",
"hproject",
".",
"plugItCustomUrlKey",
",",
"''",
")",
")",
"else",
":",
"baseURI",
"=",
"reverse",
"(",
"'plugIt.views.main'",
",",
"args",
"=",
"(",
"hproject",
".",
"pk",
",",
"''",
")",
")",
"return",
"(",
"plugIt",
",",
"baseURI",
",",
"hproject",
")"
] |
Return the plugit object and the baseURI to use if not in standalone mode
|
[
"Return",
"the",
"plugit",
"object",
"and",
"the",
"baseURI",
"to",
"use",
"if",
"not",
"in",
"standalone",
"mode"
] |
python
|
train
| 35.916667 |
saltstack/salt
|
salt/modules/boto_apigateway.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L1642-L1664
|
def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None):
'''
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
'''
return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile)
|
[
"def",
"attach_usage_plan_to_apis",
"(",
"plan_id",
",",
"apis",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"return",
"_update_usage_plan_apis",
"(",
"plan_id",
",",
"apis",
",",
"'add'",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")"
] |
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
|
[
"Attaches",
"given",
"usage",
"plan",
"to",
"each",
"of",
"the",
"apis",
"provided",
"in",
"a",
"list",
"of",
"apiId",
"and",
"stage",
"values"
] |
python
|
train
| 34.26087 |
hotdoc/hotdoc
|
hotdoc/core/config.py
|
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/config.py#L31-L52
|
def load_config_json(conf_file):
"""Banana?"""
try:
with open(conf_file) as _:
try:
json_conf = json.load(_)
except ValueError as ze_error:
error('invalid-config',
'The provided configuration file %s is not valid json.\n'
'The exact error was %s.\n'
'This often happens because of missing or extra commas, '
'but it may be something else, please fix it!\n' %
(conf_file, str(ze_error)))
except FileNotFoundError:
json_conf = {}
except IOError as _err:
error('setup-issue',
'Passed config file %s could not be opened (%s)' %
(conf_file, _err))
return json_conf
|
[
"def",
"load_config_json",
"(",
"conf_file",
")",
":",
"try",
":",
"with",
"open",
"(",
"conf_file",
")",
"as",
"_",
":",
"try",
":",
"json_conf",
"=",
"json",
".",
"load",
"(",
"_",
")",
"except",
"ValueError",
"as",
"ze_error",
":",
"error",
"(",
"'invalid-config'",
",",
"'The provided configuration file %s is not valid json.\\n'",
"'The exact error was %s.\\n'",
"'This often happens because of missing or extra commas, '",
"'but it may be something else, please fix it!\\n'",
"%",
"(",
"conf_file",
",",
"str",
"(",
"ze_error",
")",
")",
")",
"except",
"FileNotFoundError",
":",
"json_conf",
"=",
"{",
"}",
"except",
"IOError",
"as",
"_err",
":",
"error",
"(",
"'setup-issue'",
",",
"'Passed config file %s could not be opened (%s)'",
"%",
"(",
"conf_file",
",",
"_err",
")",
")",
"return",
"json_conf"
] |
Banana?
|
[
"Banana?"
] |
python
|
train
| 35.363636 |
glue-viz/glue-vispy-viewers
|
glue_vispy_viewers/extern/vispy/util/config.py
|
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/config.py#L188-L224
|
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path
|
[
"def",
"_get_vispy_app_dir",
"(",
")",
":",
"# Define default user directory",
"user_dir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"# Get system app data dir",
"path",
"=",
"None",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"path1",
",",
"path2",
"=",
"os",
".",
"getenv",
"(",
"'LOCALAPPDATA'",
")",
",",
"os",
".",
"getenv",
"(",
"'APPDATA'",
")",
"path",
"=",
"path1",
"or",
"path2",
"elif",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'darwin'",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"user_dir",
",",
"'Library'",
",",
"'Application Support'",
")",
"# On Linux and as fallback",
"if",
"not",
"(",
"path",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
")",
":",
"path",
"=",
"user_dir",
"# Maybe we should store things local to the executable (in case of a",
"# portable distro or a frozen application that wants to be portable)",
"prefix",
"=",
"sys",
".",
"prefix",
"if",
"getattr",
"(",
"sys",
",",
"'frozen'",
",",
"None",
")",
":",
"# See application_dir() function",
"prefix",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"path",
"[",
"0",
"]",
")",
")",
"for",
"reldir",
"in",
"(",
"'settings'",
",",
"'../settings'",
")",
":",
"localpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"reldir",
")",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"localpath",
")",
":",
"try",
":",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"localpath",
",",
"'test.write'",
")",
",",
"'wb'",
")",
".",
"close",
"(",
")",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"localpath",
",",
"'test.write'",
")",
")",
"except",
"IOError",
":",
"pass",
"# We cannot write in this directory",
"else",
":",
"path",
"=",
"localpath",
"break",
"# Get path specific for this app",
"appname",
"=",
"'.vispy'",
"if",
"path",
"==",
"user_dir",
"else",
"'vispy'",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"appname",
")",
"return",
"path"
] |
Helper to get the default directory for storing vispy data
|
[
"Helper",
"to",
"get",
"the",
"default",
"directory",
"for",
"storing",
"vispy",
"data"
] |
python
|
train
| 39.27027 |
inveniosoftware/invenio-search
|
invenio_search/cli.py
|
https://github.com/inveniosoftware/invenio-search/blob/19c073d608d4c811f1c5aecb6622402d39715228/invenio_search/cli.py#L68-L81
|
def init(force):
"""Initialize registered aliases and mappings."""
click.secho('Creating indexes...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.create(ignore=[400] if force else None),
length=current_search.number_of_indexes) as bar:
for name, response in bar:
bar.label = name
click.secho('Putting templates...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.put_templates(ignore=[400] if force else None),
length=len(current_search.templates.keys())) as bar:
for response in bar:
bar.label = response
|
[
"def",
"init",
"(",
"force",
")",
":",
"click",
".",
"secho",
"(",
"'Creating indexes...'",
",",
"fg",
"=",
"'green'",
",",
"bold",
"=",
"True",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"with",
"click",
".",
"progressbar",
"(",
"current_search",
".",
"create",
"(",
"ignore",
"=",
"[",
"400",
"]",
"if",
"force",
"else",
"None",
")",
",",
"length",
"=",
"current_search",
".",
"number_of_indexes",
")",
"as",
"bar",
":",
"for",
"name",
",",
"response",
"in",
"bar",
":",
"bar",
".",
"label",
"=",
"name",
"click",
".",
"secho",
"(",
"'Putting templates...'",
",",
"fg",
"=",
"'green'",
",",
"bold",
"=",
"True",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"with",
"click",
".",
"progressbar",
"(",
"current_search",
".",
"put_templates",
"(",
"ignore",
"=",
"[",
"400",
"]",
"if",
"force",
"else",
"None",
")",
",",
"length",
"=",
"len",
"(",
"current_search",
".",
"templates",
".",
"keys",
"(",
")",
")",
")",
"as",
"bar",
":",
"for",
"response",
"in",
"bar",
":",
"bar",
".",
"label",
"=",
"response"
] |
Initialize registered aliases and mappings.
|
[
"Initialize",
"registered",
"aliases",
"and",
"mappings",
"."
] |
python
|
train
| 47.642857 |
inveniosoftware-contrib/invenio-workflows
|
invenio_workflows/models.py
|
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/models.py#L96-L99
|
def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete)
|
[
"def",
"delete",
"(",
"cls",
",",
"uuid",
")",
":",
"to_delete",
"=",
"Workflow",
".",
"query",
".",
"get",
"(",
"uuid",
")",
"db",
".",
"session",
".",
"delete",
"(",
"to_delete",
")"
] |
Delete a workflow.
|
[
"Delete",
"a",
"workflow",
"."
] |
python
|
train
| 33.5 |
pymc-devs/pymc
|
pymc/examples/disaster_model_gof.py
|
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/disaster_model_gof.py#L50-L55
|
def disasters_sim(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Coal mining disasters sampled from the posterior predictive distribution"""
return concatenate((pm.rpoisson(early_mean, size=switchpoint), pm.rpoisson(
late_mean, size=n - switchpoint)))
|
[
"def",
"disasters_sim",
"(",
"early_mean",
"=",
"early_mean",
",",
"late_mean",
"=",
"late_mean",
",",
"switchpoint",
"=",
"switchpoint",
")",
":",
"return",
"concatenate",
"(",
"(",
"pm",
".",
"rpoisson",
"(",
"early_mean",
",",
"size",
"=",
"switchpoint",
")",
",",
"pm",
".",
"rpoisson",
"(",
"late_mean",
",",
"size",
"=",
"n",
"-",
"switchpoint",
")",
")",
")"
] |
Coal mining disasters sampled from the posterior predictive distribution
|
[
"Coal",
"mining",
"disasters",
"sampled",
"from",
"the",
"posterior",
"predictive",
"distribution"
] |
python
|
train
| 54 |
coreGreenberet/homematicip-rest-api
|
homematicip/home.py
|
https://github.com/coreGreenberet/homematicip-rest-api/blob/d4c8df53281577e01709f75cacb78b1a5a1d00db/homematicip/home.py#L559-L578
|
def set_pin(self, newPin: str, oldPin: str = None) -> dict:
""" sets a new pin for the home
Args:
newPin(str): the new pin
oldPin(str): optional, if there is currently a pin active it must be given here.
Otherwise it will not be possible to set the new pin
Returns:
the result of the call
"""
if newPin == None:
newPin = ""
data = {"pin": newPin}
if oldPin:
self._connection.headers["PIN"] = str(oldPin)
result = self._restCall("home/setPin", body=json.dumps(data))
if oldPin:
del self._connection.headers["PIN"]
return result
|
[
"def",
"set_pin",
"(",
"self",
",",
"newPin",
":",
"str",
",",
"oldPin",
":",
"str",
"=",
"None",
")",
"->",
"dict",
":",
"if",
"newPin",
"==",
"None",
":",
"newPin",
"=",
"\"\"",
"data",
"=",
"{",
"\"pin\"",
":",
"newPin",
"}",
"if",
"oldPin",
":",
"self",
".",
"_connection",
".",
"headers",
"[",
"\"PIN\"",
"]",
"=",
"str",
"(",
"oldPin",
")",
"result",
"=",
"self",
".",
"_restCall",
"(",
"\"home/setPin\"",
",",
"body",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"if",
"oldPin",
":",
"del",
"self",
".",
"_connection",
".",
"headers",
"[",
"\"PIN\"",
"]",
"return",
"result"
] |
sets a new pin for the home
Args:
newPin(str): the new pin
oldPin(str): optional, if there is currently a pin active it must be given here.
Otherwise it will not be possible to set the new pin
Returns:
the result of the call
|
[
"sets",
"a",
"new",
"pin",
"for",
"the",
"home",
"Args",
":",
"newPin",
"(",
"str",
")",
":",
"the",
"new",
"pin",
"oldPin",
"(",
"str",
")",
":",
"optional",
"if",
"there",
"is",
"currently",
"a",
"pin",
"active",
"it",
"must",
"be",
"given",
"here",
".",
"Otherwise",
"it",
"will",
"not",
"be",
"possible",
"to",
"set",
"the",
"new",
"pin",
"Returns",
":",
"the",
"result",
"of",
"the",
"call"
] |
python
|
train
| 35.2 |
astropy/photutils
|
photutils/psf/models.py
|
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/models.py#L708-L742
|
def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0)
|
[
"def",
"evaluate",
"(",
"self",
",",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")",
":",
"# NOTE: this is needed because the PSF photometry routines input",
"# length-1 values instead of scalars. TODO: fix the photometry",
"# routines.",
"if",
"not",
"np",
".",
"isscalar",
"(",
"x_0",
")",
":",
"x_0",
"=",
"x_0",
"[",
"0",
"]",
"if",
"not",
"np",
".",
"isscalar",
"(",
"y_0",
")",
":",
"y_0",
"=",
"y_0",
"[",
"0",
"]",
"if",
"(",
"x_0",
"<",
"self",
".",
"_xgrid_min",
"or",
"x_0",
">",
"self",
".",
"_xgrid_max",
"or",
"y_0",
"<",
"self",
".",
"_ygrid_min",
"or",
"y_0",
">",
"self",
".",
"_ygrid_max",
")",
":",
"# position is outside of the grid, so simply use the",
"# closest reference PSF",
"self",
".",
"_ref_indices",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"hypot",
"(",
"self",
".",
"_grid_xpos",
"-",
"x_0",
",",
"self",
".",
"_grid_ypos",
"-",
"y_0",
")",
")",
"[",
"0",
"]",
"self",
".",
"_psf_interp",
"=",
"self",
".",
"data",
"[",
"self",
".",
"_ref_indices",
",",
":",
",",
":",
"]",
"else",
":",
"# find the four bounding reference PSFs and interpolate",
"self",
".",
"_ref_indices",
"=",
"self",
".",
"_find_bounding_points",
"(",
"x_0",
",",
"y_0",
")",
"xyref",
"=",
"np",
".",
"array",
"(",
"self",
".",
"grid_xypos",
")",
"[",
"self",
".",
"_ref_indices",
"]",
"psfs",
"=",
"self",
".",
"data",
"[",
"self",
".",
"_ref_indices",
",",
":",
",",
":",
"]",
"self",
".",
"_psf_interp",
"=",
"self",
".",
"_bilinear_interp",
"(",
"xyref",
",",
"psfs",
",",
"x_0",
",",
"y_0",
")",
"# now evaluate the PSF at the (x_0, y_0) subpixel position on",
"# the input (x, y) values",
"psfmodel",
"=",
"FittableImageModel",
"(",
"self",
".",
"_psf_interp",
",",
"oversampling",
"=",
"self",
".",
"oversampling",
")",
"return",
"psfmodel",
".",
"evaluate",
"(",
"x",
",",
"y",
",",
"flux",
",",
"x_0",
",",
"y_0",
")"
] |
Evaluate the `GriddedPSFModel` for the input parameters.
|
[
"Evaluate",
"the",
"GriddedPSFModel",
"for",
"the",
"input",
"parameters",
"."
] |
python
|
train
| 41.771429 |
RudolfCardinal/pythonlib
|
cardinal_pythonlib/rnc_db.py
|
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L907-L920
|
def get_sql_select_all_fields_by_key(
table: str,
fieldlist: Sequence[str],
keyname: str,
delims: Tuple[str, str] = ("", "")) -> str:
"""Returns SQL:
SELECT [all fields in the fieldlist] WHERE [keyname] = ?
"""
return (
"SELECT " +
",".join([delimit(x, delims) for x in fieldlist]) +
" FROM " + delimit(table, delims) +
" WHERE " + delimit(keyname, delims) + "=?"
)
|
[
"def",
"get_sql_select_all_fields_by_key",
"(",
"table",
":",
"str",
",",
"fieldlist",
":",
"Sequence",
"[",
"str",
"]",
",",
"keyname",
":",
"str",
",",
"delims",
":",
"Tuple",
"[",
"str",
",",
"str",
"]",
"=",
"(",
"\"\"",
",",
"\"\"",
")",
")",
"->",
"str",
":",
"return",
"(",
"\"SELECT \"",
"+",
"\",\"",
".",
"join",
"(",
"[",
"delimit",
"(",
"x",
",",
"delims",
")",
"for",
"x",
"in",
"fieldlist",
"]",
")",
"+",
"\" FROM \"",
"+",
"delimit",
"(",
"table",
",",
"delims",
")",
"+",
"\" WHERE \"",
"+",
"delimit",
"(",
"keyname",
",",
"delims",
")",
"+",
"\"=?\"",
")"
] |
Returns SQL:
SELECT [all fields in the fieldlist] WHERE [keyname] = ?
|
[
"Returns",
"SQL",
":",
"SELECT",
"[",
"all",
"fields",
"in",
"the",
"fieldlist",
"]",
"WHERE",
"[",
"keyname",
"]",
"=",
"?"
] |
python
|
train
| 31.428571 |
bwohlberg/sporco
|
sporco/cnvrep.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/cnvrep.py#L655-L677
|
def normalise(v, dimN=2):
r"""Normalise vectors, corresponding to slices along specified number
of initial spatial dimensions of an array, to have unit
:math:`\ell_2` norm. The remaining axes enumerate the distinct
vectors to be normalised.
Parameters
----------
v : array_like
Array with components to be normalised
dimN : int, optional (default 2)
Number of initial dimensions over which norm should be computed
Returns
-------
vnrm : ndarray
Normalised array
"""
axisN = tuple(range(0, dimN))
vn = np.sqrt(np.sum(v**2, axisN, keepdims=True))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype)
|
[
"def",
"normalise",
"(",
"v",
",",
"dimN",
"=",
"2",
")",
":",
"axisN",
"=",
"tuple",
"(",
"range",
"(",
"0",
",",
"dimN",
")",
")",
"vn",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"v",
"**",
"2",
",",
"axisN",
",",
"keepdims",
"=",
"True",
")",
")",
"vn",
"[",
"vn",
"==",
"0",
"]",
"=",
"1.0",
"return",
"np",
".",
"asarray",
"(",
"v",
"/",
"vn",
",",
"dtype",
"=",
"v",
".",
"dtype",
")"
] |
r"""Normalise vectors, corresponding to slices along specified number
of initial spatial dimensions of an array, to have unit
:math:`\ell_2` norm. The remaining axes enumerate the distinct
vectors to be normalised.
Parameters
----------
v : array_like
Array with components to be normalised
dimN : int, optional (default 2)
Number of initial dimensions over which norm should be computed
Returns
-------
vnrm : ndarray
Normalised array
|
[
"r",
"Normalise",
"vectors",
"corresponding",
"to",
"slices",
"along",
"specified",
"number",
"of",
"initial",
"spatial",
"dimensions",
"of",
"an",
"array",
"to",
"have",
"unit",
":",
"math",
":",
"\\",
"ell_2",
"norm",
".",
"The",
"remaining",
"axes",
"enumerate",
"the",
"distinct",
"vectors",
"to",
"be",
"normalised",
"."
] |
python
|
train
| 28.956522 |
SylvanasSun/FishFishJump
|
fish_core/simhash.py
|
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/simhash.py#L103-L119
|
def simhash(self, content):
"""
Select policies for simhash on the different types of content.
"""
if content is None:
self.hash = -1
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception("Unsupported parameter type %s" % type(content))
|
[
"def",
"simhash",
"(",
"self",
",",
"content",
")",
":",
"if",
"content",
"is",
"None",
":",
"self",
".",
"hash",
"=",
"-",
"1",
"return",
"if",
"isinstance",
"(",
"content",
",",
"str",
")",
":",
"features",
"=",
"self",
".",
"tokenizer_func",
"(",
"content",
",",
"self",
".",
"keyword_weight_pari",
")",
"self",
".",
"hash",
"=",
"self",
".",
"build_from_features",
"(",
"features",
")",
"elif",
"isinstance",
"(",
"content",
",",
"collections",
".",
"Iterable",
")",
":",
"self",
".",
"hash",
"=",
"self",
".",
"build_from_features",
"(",
"content",
")",
"elif",
"isinstance",
"(",
"content",
",",
"int",
")",
":",
"self",
".",
"hash",
"=",
"content",
"else",
":",
"raise",
"Exception",
"(",
"\"Unsupported parameter type %s\"",
"%",
"type",
"(",
"content",
")",
")"
] |
Select policies for simhash on the different types of content.
|
[
"Select",
"policies",
"for",
"simhash",
"on",
"the",
"different",
"types",
"of",
"content",
"."
] |
python
|
train
| 37.117647 |
CalebBell/thermo
|
thermo/safety.py
|
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/safety.py#L1179-L1229
|
def Crowl_Louvar_UFL(atoms):
r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_
correlation. Uses molecular formula only.
The upper flammability limit of a gas is air is:
.. math::
C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O
\text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1}
Parameters
----------
atoms : dict
Dictionary of atoms and atom counts
Returns
-------
UFL : float
Upper flammability limit, mole fraction
Notes
-----
Coefficient of 3.5 taken from [2]_
Examples
--------
Hexane, example from [1]_, lit. 7.5 %
>>> Crowl_Louvar_UFL({'H': 14, 'C': 6})
0.07572479446127219
References
----------
.. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
.. [2] Jones, G. W. "Inflammation Limits and Their Practical Application
in Hazardous Industrial Operations." Chemical Reviews 22, no. 1
(February 1, 1938): 1-26. doi:10.1021/cr60071a001
'''
nC, nH, nO = 0, 0, 0
if 'C' in atoms and atoms['C']:
nC = atoms['C']
else:
return None
if 'H' in atoms:
nH = atoms['H']
if 'O' in atoms:
nO = atoms['O']
return 3.5/(4.76*nC + 1.19*nH - 2.38*nO + 1.)
|
[
"def",
"Crowl_Louvar_UFL",
"(",
"atoms",
")",
":",
"nC",
",",
"nH",
",",
"nO",
"=",
"0",
",",
"0",
",",
"0",
"if",
"'C'",
"in",
"atoms",
"and",
"atoms",
"[",
"'C'",
"]",
":",
"nC",
"=",
"atoms",
"[",
"'C'",
"]",
"else",
":",
"return",
"None",
"if",
"'H'",
"in",
"atoms",
":",
"nH",
"=",
"atoms",
"[",
"'H'",
"]",
"if",
"'O'",
"in",
"atoms",
":",
"nO",
"=",
"atoms",
"[",
"'O'",
"]",
"return",
"3.5",
"/",
"(",
"4.76",
"*",
"nC",
"+",
"1.19",
"*",
"nH",
"-",
"2.38",
"*",
"nO",
"+",
"1.",
")"
] |
r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_
correlation. Uses molecular formula only.
The upper flammability limit of a gas is air is:
.. math::
C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O
\text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1}
Parameters
----------
atoms : dict
Dictionary of atoms and atom counts
Returns
-------
UFL : float
Upper flammability limit, mole fraction
Notes
-----
Coefficient of 3.5 taken from [2]_
Examples
--------
Hexane, example from [1]_, lit. 7.5 %
>>> Crowl_Louvar_UFL({'H': 14, 'C': 6})
0.07572479446127219
References
----------
.. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
.. [2] Jones, G. W. "Inflammation Limits and Their Practical Application
in Hazardous Industrial Operations." Chemical Reviews 22, no. 1
(February 1, 1938): 1-26. doi:10.1021/cr60071a001
|
[
"r",
"Calculates",
"upper",
"flammability",
"limit",
"using",
"the",
"Crowl",
"-",
"Louvar",
"[",
"1",
"]",
"_",
"correlation",
".",
"Uses",
"molecular",
"formula",
"only",
"."
] |
python
|
valid
| 26.058824 |
uw-it-aca/uw-restclients-graderoster
|
uw_sws_graderoster/__init__.py
|
https://github.com/uw-it-aca/uw-restclients-graderoster/blob/1e41553eb7363765af60e87223ca9d22cf6c9187/uw_sws_graderoster/__init__.py#L34-L56
|
def update_graderoster(graderoster, requestor):
"""
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
"""
label = graderoster.graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Content-Type": "application/xhtml+xml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
body = graderoster.xhtml()
response = SWS_GradeRoster_DAO().putURL(url, headers, body)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=graderoster.section,
instructor=graderoster.instructor)
|
[
"def",
"update_graderoster",
"(",
"graderoster",
",",
"requestor",
")",
":",
"label",
"=",
"graderoster",
".",
"graderoster_label",
"(",
")",
"url",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"graderoster_url",
",",
"encode_section_label",
"(",
"label",
")",
")",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/xhtml+xml\"",
",",
"\"Connection\"",
":",
"\"keep-alive\"",
",",
"\"X-UW-Act-as\"",
":",
"requestor",
".",
"uwnetid",
"}",
"body",
"=",
"graderoster",
".",
"xhtml",
"(",
")",
"response",
"=",
"SWS_GradeRoster_DAO",
"(",
")",
".",
"putURL",
"(",
"url",
",",
"headers",
",",
"body",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"root",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"data",
")",
"msg",
"=",
"root",
".",
"find",
"(",
"\".//*[@class='status_description']\"",
")",
".",
"text",
".",
"strip",
"(",
")",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"msg",
")",
"return",
"GradeRoster",
"(",
"data",
"=",
"etree",
".",
"fromstring",
"(",
"response",
".",
"data",
".",
"strip",
"(",
")",
")",
",",
"section",
"=",
"graderoster",
".",
"section",
",",
"instructor",
"=",
"graderoster",
".",
"instructor",
")"
] |
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
|
[
"Updates",
"the",
"graderoster",
"resource",
"for",
"the",
"passed",
"restclients",
".",
"GradeRoster",
"model",
".",
"A",
"new",
"restclients",
".",
"GradeRoster",
"is",
"returned",
"representing",
"the",
"document",
"returned",
"from",
"the",
"update",
"request",
"."
] |
python
|
train
| 43 |
google/budou
|
budou/nlapisegmenter.py
|
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/nlapisegmenter.py#L203-L221
|
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
|
[
"def",
"_group_chunks_by_entities",
"(",
"self",
",",
"chunks",
",",
"entities",
")",
":",
"for",
"entity",
"in",
"entities",
":",
"chunks_to_concat",
"=",
"chunks",
".",
"get_overlaps",
"(",
"entity",
"[",
"'beginOffset'",
"]",
",",
"len",
"(",
"entity",
"[",
"'content'",
"]",
")",
")",
"if",
"not",
"chunks_to_concat",
":",
"continue",
"new_chunk_word",
"=",
"u''",
".",
"join",
"(",
"[",
"chunk",
".",
"word",
"for",
"chunk",
"in",
"chunks_to_concat",
"]",
")",
"new_chunk",
"=",
"Chunk",
"(",
"new_chunk_word",
")",
"chunks",
".",
"swap",
"(",
"chunks_to_concat",
",",
"new_chunk",
")",
"return",
"chunks"
] |
Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
|
[
"Groups",
"chunks",
"by",
"entities",
"retrieved",
"from",
"NL",
"API",
"Entity",
"Analysis",
"."
] |
python
|
train
| 36.421053 |
bjmorgan/vasppy
|
vasppy/procar.py
|
https://github.com/bjmorgan/vasppy/blob/cc2d1449697b17ee1c43715a02cddcb1139a6834/vasppy/procar.py#L38-L56
|
def points_are_in_a_straight_line( points, tolerance=1e-7 ):
"""
Check whether a set of points fall on a straight line.
Calculates the areas of triangles formed by triplets of the points.
Returns False is any of these areas are larger than the tolerance.
Args:
points (list(np.array)): list of Cartesian coordinates for each point.
tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.
Returns:
(bool): True if all points fall on a straight line (within the allowed tolerance).
"""
a = points[0]
b = points[1]
for c in points[2:]:
if area_of_a_triangle_in_cartesian_space( a, b, c ) > tolerance:
return False
return True
|
[
"def",
"points_are_in_a_straight_line",
"(",
"points",
",",
"tolerance",
"=",
"1e-7",
")",
":",
"a",
"=",
"points",
"[",
"0",
"]",
"b",
"=",
"points",
"[",
"1",
"]",
"for",
"c",
"in",
"points",
"[",
"2",
":",
"]",
":",
"if",
"area_of_a_triangle_in_cartesian_space",
"(",
"a",
",",
"b",
",",
"c",
")",
">",
"tolerance",
":",
"return",
"False",
"return",
"True"
] |
Check whether a set of points fall on a straight line.
Calculates the areas of triangles formed by triplets of the points.
Returns False is any of these areas are larger than the tolerance.
Args:
points (list(np.array)): list of Cartesian coordinates for each point.
tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.
Returns:
(bool): True if all points fall on a straight line (within the allowed tolerance).
|
[
"Check",
"whether",
"a",
"set",
"of",
"points",
"fall",
"on",
"a",
"straight",
"line",
".",
"Calculates",
"the",
"areas",
"of",
"triangles",
"formed",
"by",
"triplets",
"of",
"the",
"points",
".",
"Returns",
"False",
"is",
"any",
"of",
"these",
"areas",
"are",
"larger",
"than",
"the",
"tolerance",
"."
] |
python
|
train
| 39.631579 |
shaypal5/strct
|
strct/dicts/_dict.py
|
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L609-L643
|
def sum_num_dicts(dicts, normalize=False):
"""Sums the given dicts into a single dict mapping each key to the sum
of its mappings in all given dicts.
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts.
Example
-------
>>> dict1 = {'a': 3, 'b': 2}
>>> dict2 = {'a':7, 'c': 8}
>>> result = sum_num_dicts([dict1, dict2])
>>> print(sorted(result.items()))
[('a', 10), ('b', 2), ('c', 8)]
>>> result = sum_num_dicts([dict1, dict2], normalize=True)
>>> print(sorted(result.items()))
[('a', 0.5), ('b', 0.1), ('c', 0.4)]
"""
sum_dict = {}
for dicti in dicts:
for key in dicti:
sum_dict[key] = sum_dict.get(key, 0) + dicti[key]
if normalize:
return norm_int_dict(sum_dict)
return sum_dict
|
[
"def",
"sum_num_dicts",
"(",
"dicts",
",",
"normalize",
"=",
"False",
")",
":",
"sum_dict",
"=",
"{",
"}",
"for",
"dicti",
"in",
"dicts",
":",
"for",
"key",
"in",
"dicti",
":",
"sum_dict",
"[",
"key",
"]",
"=",
"sum_dict",
".",
"get",
"(",
"key",
",",
"0",
")",
"+",
"dicti",
"[",
"key",
"]",
"if",
"normalize",
":",
"return",
"norm_int_dict",
"(",
"sum_dict",
")",
"return",
"sum_dict"
] |
Sums the given dicts into a single dict mapping each key to the sum
of its mappings in all given dicts.
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts.
Example
-------
>>> dict1 = {'a': 3, 'b': 2}
>>> dict2 = {'a':7, 'c': 8}
>>> result = sum_num_dicts([dict1, dict2])
>>> print(sorted(result.items()))
[('a', 10), ('b', 2), ('c', 8)]
>>> result = sum_num_dicts([dict1, dict2], normalize=True)
>>> print(sorted(result.items()))
[('a', 0.5), ('b', 0.1), ('c', 0.4)]
|
[
"Sums",
"the",
"given",
"dicts",
"into",
"a",
"single",
"dict",
"mapping",
"each",
"key",
"to",
"the",
"sum",
"of",
"its",
"mappings",
"in",
"all",
"given",
"dicts",
"."
] |
python
|
train
| 29.628571 |
andreikop/qutepart
|
qutepart/indenter/scheme.py
|
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/indenter/scheme.py#L25-L34
|
def _lastWord(self, text):
"""Move backward to the start of the word at the end of a string.
Return the word
"""
for index, char in enumerate(text[::-1]):
if char.isspace() or \
char in ('(', ')'):
return text[len(text) - index :]
else:
return text
|
[
"def",
"_lastWord",
"(",
"self",
",",
"text",
")",
":",
"for",
"index",
",",
"char",
"in",
"enumerate",
"(",
"text",
"[",
":",
":",
"-",
"1",
"]",
")",
":",
"if",
"char",
".",
"isspace",
"(",
")",
"or",
"char",
"in",
"(",
"'('",
",",
"')'",
")",
":",
"return",
"text",
"[",
"len",
"(",
"text",
")",
"-",
"index",
":",
"]",
"else",
":",
"return",
"text"
] |
Move backward to the start of the word at the end of a string.
Return the word
|
[
"Move",
"backward",
"to",
"the",
"start",
"of",
"the",
"word",
"at",
"the",
"end",
"of",
"a",
"string",
".",
"Return",
"the",
"word"
] |
python
|
train
| 33.4 |
samgiles/slumber
|
slumber/utils.py
|
https://github.com/samgiles/slumber/blob/af0f9ef7bd8df8bde6b47088630786c737869bce/slumber/utils.py#L9-L16
|
def url_join(base, *args):
"""
Helper function to join an arbitrary number of url segments together.
"""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment])
|
[
"def",
"url_join",
"(",
"base",
",",
"*",
"args",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"urlsplit",
"(",
"base",
")",
"path",
"=",
"path",
"if",
"len",
"(",
"path",
")",
"else",
"\"/\"",
"path",
"=",
"posixpath",
".",
"join",
"(",
"path",
",",
"*",
"[",
"(",
"'%s'",
"%",
"x",
")",
"for",
"x",
"in",
"args",
"]",
")",
"return",
"urlunsplit",
"(",
"[",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"]",
")"
] |
Helper function to join an arbitrary number of url segments together.
|
[
"Helper",
"function",
"to",
"join",
"an",
"arbitrary",
"number",
"of",
"url",
"segments",
"together",
"."
] |
python
|
train
| 41.25 |
ligyxy/DictMySQL
|
dictmysql.py
|
https://github.com/ligyxy/DictMySQL/blob/f40d649193ccf58d1c7933189be1042b37afbe31/dictmysql.py#L355-L372
|
def select_page(self, limit, offset=0, **kwargs):
"""
:type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return:
"""
start = offset
while True:
result = self.select(limit=[start, limit], **kwargs)
start += limit
if result:
yield result
else:
break
if self.debug:
break
|
[
"def",
"select_page",
"(",
"self",
",",
"limit",
",",
"offset",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"start",
"=",
"offset",
"while",
"True",
":",
"result",
"=",
"self",
".",
"select",
"(",
"limit",
"=",
"[",
"start",
",",
"limit",
"]",
",",
"*",
"*",
"kwargs",
")",
"start",
"+=",
"limit",
"if",
"result",
":",
"yield",
"result",
"else",
":",
"break",
"if",
"self",
".",
"debug",
":",
"break"
] |
:type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return:
|
[
":",
"type",
"limit",
":",
"int",
":",
"param",
"limit",
":",
"The",
"max",
"row",
"number",
"for",
"each",
"page",
":",
"type",
"offset",
":",
"int",
":",
"param",
"offset",
":",
"The",
"starting",
"position",
"of",
"the",
"page",
":",
"return",
":"
] |
python
|
train
| 28.444444 |
nephila/djangocms-apphook-setup
|
djangocms_apphook_setup/base.py
|
https://github.com/nephila/djangocms-apphook-setup/blob/e82c0afdf966f859fe13dc80fcd417b44080f460/djangocms_apphook_setup/base.py#L22-L57
|
def _create_page(cls, page, lang, auto_title, cms_app=None, parent=None, namespace=None,
site=None, set_home=False):
"""
Create a single page or titles
:param page: Page instance
:param lang: language code
:param auto_title: title text for the newly created title
:param cms_app: Apphook Class to be attached to the page
:param parent: parent page (None when creating the home page)
:param namespace: application instance name (as provided to the ApphookConfig)
:param set_home: mark as home page (on django CMS 3.5 only)
:return: draft copy of the created page
"""
from cms.api import create_page, create_title
from cms.utils.conf import get_templates
default_template = get_templates()[0][0]
if page is None:
page = create_page(
auto_title, language=lang, parent=parent, site=site,
template=default_template, in_navigation=True, published=True
)
page.application_urls = cms_app
page.application_namespace = namespace
page.save()
page.publish(lang)
elif lang not in page.get_languages():
create_title(
language=lang, title=auto_title, page=page
)
page.publish(lang)
if set_home:
page.set_as_homepage()
return page.get_draft_object()
|
[
"def",
"_create_page",
"(",
"cls",
",",
"page",
",",
"lang",
",",
"auto_title",
",",
"cms_app",
"=",
"None",
",",
"parent",
"=",
"None",
",",
"namespace",
"=",
"None",
",",
"site",
"=",
"None",
",",
"set_home",
"=",
"False",
")",
":",
"from",
"cms",
".",
"api",
"import",
"create_page",
",",
"create_title",
"from",
"cms",
".",
"utils",
".",
"conf",
"import",
"get_templates",
"default_template",
"=",
"get_templates",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"page",
"is",
"None",
":",
"page",
"=",
"create_page",
"(",
"auto_title",
",",
"language",
"=",
"lang",
",",
"parent",
"=",
"parent",
",",
"site",
"=",
"site",
",",
"template",
"=",
"default_template",
",",
"in_navigation",
"=",
"True",
",",
"published",
"=",
"True",
")",
"page",
".",
"application_urls",
"=",
"cms_app",
"page",
".",
"application_namespace",
"=",
"namespace",
"page",
".",
"save",
"(",
")",
"page",
".",
"publish",
"(",
"lang",
")",
"elif",
"lang",
"not",
"in",
"page",
".",
"get_languages",
"(",
")",
":",
"create_title",
"(",
"language",
"=",
"lang",
",",
"title",
"=",
"auto_title",
",",
"page",
"=",
"page",
")",
"page",
".",
"publish",
"(",
"lang",
")",
"if",
"set_home",
":",
"page",
".",
"set_as_homepage",
"(",
")",
"return",
"page",
".",
"get_draft_object",
"(",
")"
] |
Create a single page or titles
:param page: Page instance
:param lang: language code
:param auto_title: title text for the newly created title
:param cms_app: Apphook Class to be attached to the page
:param parent: parent page (None when creating the home page)
:param namespace: application instance name (as provided to the ApphookConfig)
:param set_home: mark as home page (on django CMS 3.5 only)
:return: draft copy of the created page
|
[
"Create",
"a",
"single",
"page",
"or",
"titles"
] |
python
|
train
| 39.833333 |
log2timeline/plaso
|
plaso/parsers/chrome_preferences.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/chrome_preferences.py#L156-L189
|
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator):
"""Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
"""
for permission in exceptions_dict:
if permission not in self._EXCEPTIONS_KEYS:
continue
exception_dict = exceptions_dict.get(permission, {})
for urls, url_dict in exception_dict.items():
last_used = url_dict.get('last_used', None)
if not last_used:
continue
# If secondary_url is '*', the permission applies to primary_url.
# If secondary_url is a valid URL, the permission applies to
# elements loaded from secondary_url being embedded in primary_url.
primary_url, secondary_url = urls.split(',')
event_data = ChromeContentSettingsExceptionsEventData()
event_data.permission = permission
event_data.primary_url = primary_url
event_data.secondary_url = secondary_url
timestamp = int(last_used * 1000000)
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
[
"def",
"_ExtractContentSettingsExceptions",
"(",
"self",
",",
"exceptions_dict",
",",
"parser_mediator",
")",
":",
"for",
"permission",
"in",
"exceptions_dict",
":",
"if",
"permission",
"not",
"in",
"self",
".",
"_EXCEPTIONS_KEYS",
":",
"continue",
"exception_dict",
"=",
"exceptions_dict",
".",
"get",
"(",
"permission",
",",
"{",
"}",
")",
"for",
"urls",
",",
"url_dict",
"in",
"exception_dict",
".",
"items",
"(",
")",
":",
"last_used",
"=",
"url_dict",
".",
"get",
"(",
"'last_used'",
",",
"None",
")",
"if",
"not",
"last_used",
":",
"continue",
"# If secondary_url is '*', the permission applies to primary_url.",
"# If secondary_url is a valid URL, the permission applies to",
"# elements loaded from secondary_url being embedded in primary_url.",
"primary_url",
",",
"secondary_url",
"=",
"urls",
".",
"split",
"(",
"','",
")",
"event_data",
"=",
"ChromeContentSettingsExceptionsEventData",
"(",
")",
"event_data",
".",
"permission",
"=",
"permission",
"event_data",
".",
"primary_url",
"=",
"primary_url",
"event_data",
".",
"secondary_url",
"=",
"secondary_url",
"timestamp",
"=",
"int",
"(",
"last_used",
"*",
"1000000",
")",
"date_time",
"=",
"dfdatetime_posix_time",
".",
"PosixTimeInMicroseconds",
"(",
"timestamp",
"=",
"timestamp",
")",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_VISITED",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")"
] |
Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
|
[
"Extracts",
"site",
"specific",
"events",
"."
] |
python
|
train
| 42.323529 |
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/parse.py
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L328-L333
|
def WhatMustIUnderstand(self):
'''Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set.
'''
return [ ( E.namespaceURI, E.localName )
for E in self.header_elements if _find_mu(E) == "1" ]
|
[
"def",
"WhatMustIUnderstand",
"(",
"self",
")",
":",
"return",
"[",
"(",
"E",
".",
"namespaceURI",
",",
"E",
".",
"localName",
")",
"for",
"E",
"in",
"self",
".",
"header_elements",
"if",
"_find_mu",
"(",
"E",
")",
"==",
"\"1\"",
"]"
] |
Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set.
|
[
"Return",
"a",
"list",
"of",
"(",
"uri",
"localname",
")",
"tuples",
"for",
"all",
"elements",
"in",
"the",
"header",
"that",
"have",
"mustUnderstand",
"set",
"."
] |
python
|
train
| 46 |
pytroll/trollsift
|
trollsift/parser.py
|
https://github.com/pytroll/trollsift/blob/d0e5b6006e248974d806d0dd8e20cc6641d778fb/trollsift/parser.py#L354-L362
|
def get_convert_dict(fmt):
"""Retrieve parse definition from the format string `fmt`."""
convdef = {}
for literal_text, field_name, format_spec, conversion in formatter.parse(fmt):
if field_name is None:
continue
# XXX: Do I need to include 'conversion'?
convdef[field_name] = format_spec
return convdef
|
[
"def",
"get_convert_dict",
"(",
"fmt",
")",
":",
"convdef",
"=",
"{",
"}",
"for",
"literal_text",
",",
"field_name",
",",
"format_spec",
",",
"conversion",
"in",
"formatter",
".",
"parse",
"(",
"fmt",
")",
":",
"if",
"field_name",
"is",
"None",
":",
"continue",
"# XXX: Do I need to include 'conversion'?",
"convdef",
"[",
"field_name",
"]",
"=",
"format_spec",
"return",
"convdef"
] |
Retrieve parse definition from the format string `fmt`.
|
[
"Retrieve",
"parse",
"definition",
"from",
"the",
"format",
"string",
"fmt",
"."
] |
python
|
train
| 38.555556 |
ChristianTremblay/BAC0
|
BAC0/core/devices/Points.py
|
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Points.py#L902-L912
|
def value(self):
"""
Take last known value as the value
"""
try:
value = self.lastValue
except IndexError:
value = "NaN"
except ValueError:
value = "NaN"
return value
|
[
"def",
"value",
"(",
"self",
")",
":",
"try",
":",
"value",
"=",
"self",
".",
"lastValue",
"except",
"IndexError",
":",
"value",
"=",
"\"NaN\"",
"except",
"ValueError",
":",
"value",
"=",
"\"NaN\"",
"return",
"value"
] |
Take last known value as the value
|
[
"Take",
"last",
"known",
"value",
"as",
"the",
"value"
] |
python
|
train
| 22.545455 |
linkedin/luminol
|
src/luminol/correlator.py
|
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L113-L118
|
def is_correlated(self, threshold=0):
"""
Compare with a threshold to determine whether two timeseries correlate to each other.
:return: a CorrelationResult object if two time series correlate otherwise false.
"""
return self.correlation_result if self.correlation_result.coefficient >= threshold else False
|
[
"def",
"is_correlated",
"(",
"self",
",",
"threshold",
"=",
"0",
")",
":",
"return",
"self",
".",
"correlation_result",
"if",
"self",
".",
"correlation_result",
".",
"coefficient",
">=",
"threshold",
"else",
"False"
] |
Compare with a threshold to determine whether two timeseries correlate to each other.
:return: a CorrelationResult object if two time series correlate otherwise false.
|
[
"Compare",
"with",
"a",
"threshold",
"to",
"determine",
"whether",
"two",
"timeseries",
"correlate",
"to",
"each",
"other",
".",
":",
"return",
":",
"a",
"CorrelationResult",
"object",
"if",
"two",
"time",
"series",
"correlate",
"otherwise",
"false",
"."
] |
python
|
train
| 57 |
google/openhtf
|
openhtf/util/console_output.py
|
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/console_output.py#L78-L109
|
def banner_print(msg, color='', width=60, file=sys.stdout, logger=_LOG):
"""Print the message as a banner with a fixed width.
Also logs the message (un-bannered) to the given logger at the debug level.
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz =======================
"""
if logger:
logger.debug(ANSI_ESC_RE.sub('', msg))
if CLI_QUIET:
return
lpad = int(math.ceil((width - _printed_len(msg) - 2) / 2.0)) * '='
rpad = int(math.floor((width - _printed_len(msg) - 2) / 2.0)) * '='
file.write('{sep}{color}{lpad} {msg} {rpad}{reset}{sep}{sep}'.format(
sep=_linesep_for_file(file), color=color, lpad=lpad, msg=msg, rpad=rpad,
reset=colorama.Style.RESET_ALL))
file.flush()
|
[
"def",
"banner_print",
"(",
"msg",
",",
"color",
"=",
"''",
",",
"width",
"=",
"60",
",",
"file",
"=",
"sys",
".",
"stdout",
",",
"logger",
"=",
"_LOG",
")",
":",
"if",
"logger",
":",
"logger",
".",
"debug",
"(",
"ANSI_ESC_RE",
".",
"sub",
"(",
"''",
",",
"msg",
")",
")",
"if",
"CLI_QUIET",
":",
"return",
"lpad",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"(",
"width",
"-",
"_printed_len",
"(",
"msg",
")",
"-",
"2",
")",
"/",
"2.0",
")",
")",
"*",
"'='",
"rpad",
"=",
"int",
"(",
"math",
".",
"floor",
"(",
"(",
"width",
"-",
"_printed_len",
"(",
"msg",
")",
"-",
"2",
")",
"/",
"2.0",
")",
")",
"*",
"'='",
"file",
".",
"write",
"(",
"'{sep}{color}{lpad} {msg} {rpad}{reset}{sep}{sep}'",
".",
"format",
"(",
"sep",
"=",
"_linesep_for_file",
"(",
"file",
")",
",",
"color",
"=",
"color",
",",
"lpad",
"=",
"lpad",
",",
"msg",
"=",
"msg",
",",
"rpad",
"=",
"rpad",
",",
"reset",
"=",
"colorama",
".",
"Style",
".",
"RESET_ALL",
")",
")",
"file",
".",
"flush",
"(",
")"
] |
Print the message as a banner with a fixed width.
Also logs the message (un-bannered) to the given logger at the debug level.
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz =======================
|
[
"Print",
"the",
"message",
"as",
"a",
"banner",
"with",
"a",
"fixed",
"width",
"."
] |
python
|
train
| 37.25 |
wright-group/WrightTools
|
WrightTools/data/_data.py
|
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L211-L229
|
def _on_axes_updated(self):
"""Method to run when axes are changed in any way.
Propagates updated axes properly.
"""
# update attrs
self.attrs["axes"] = [a.identity.encode() for a in self._axes]
# remove old attributes
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass # already gone
# populate new attributes
for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key)
|
[
"def",
"_on_axes_updated",
"(",
"self",
")",
":",
"# update attrs",
"self",
".",
"attrs",
"[",
"\"axes\"",
"]",
"=",
"[",
"a",
".",
"identity",
".",
"encode",
"(",
")",
"for",
"a",
"in",
"self",
".",
"_axes",
"]",
"# remove old attributes",
"while",
"len",
"(",
"self",
".",
"_current_axis_identities_in_natural_namespace",
")",
">",
"0",
":",
"key",
"=",
"self",
".",
"_current_axis_identities_in_natural_namespace",
".",
"pop",
"(",
"0",
")",
"try",
":",
"delattr",
"(",
"self",
",",
"key",
")",
"except",
"AttributeError",
":",
"pass",
"# already gone",
"# populate new attributes",
"for",
"a",
"in",
"self",
".",
"_axes",
":",
"key",
"=",
"a",
".",
"natural_name",
"setattr",
"(",
"self",
",",
"key",
",",
"a",
")",
"self",
".",
"_current_axis_identities_in_natural_namespace",
".",
"append",
"(",
"key",
")"
] |
Method to run when axes are changed in any way.
Propagates updated axes properly.
|
[
"Method",
"to",
"run",
"when",
"axes",
"are",
"changed",
"in",
"any",
"way",
"."
] |
python
|
train
| 38.368421 |
ToucanToco/toucan-data-sdk
|
toucan_data_sdk/utils/decorators.py
|
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L185-L213
|
def domain(domain_name):
"""
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
dfs, *args = args
if not isinstance(dfs, dict):
raise TypeError(f'{dfs} is not a dict')
df = dfs.pop(domain_name)
df = func(df, *args, **kwargs)
return {domain_name: df, **dfs}
return wrapper
return decorator
|
[
"def",
"domain",
"(",
"domain_name",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"dfs",
",",
"",
"*",
"args",
"=",
"args",
"if",
"not",
"isinstance",
"(",
"dfs",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"f'{dfs} is not a dict'",
")",
"df",
"=",
"dfs",
".",
"pop",
"(",
"domain_name",
")",
"df",
"=",
"func",
"(",
"df",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"{",
"domain_name",
":",
"df",
",",
"*",
"*",
"dfs",
"}",
"return",
"wrapper",
"return",
"decorator"
] |
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
|
[
"Allow",
"to",
"apply",
"a",
"function",
"f",
"(",
"df",
":",
"DataFrame",
")",
"-",
">",
"DataFrame",
")",
"on",
"dfs",
"by",
"specifying",
"the",
"key",
"E",
".",
"g",
"instead",
"of",
"writing",
":",
"def",
"process_domain1",
"(",
"dfs",
")",
":",
"df",
"=",
"dfs",
"[",
"domain1",
"]",
"#",
"actual",
"process",
"dfs",
"[",
"domain1",
"]",
"=",
"df",
"return",
"dfs"
] |
python
|
test
| 27.344828 |
bapakode/OmMongo
|
ommongo/fields/fields.py
|
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/fields/fields.py#L366-L375
|
def unwrap(self, value, session=None):
''' Unwrap value using the unwrap function from ``EnumField.item_type``.
Since unwrap validation could not happen in is_valid_wrap, it
happens in this function.'''
self.validate_unwrap(value)
value = self.item_type.unwrap(value, session=session)
for val in self.values:
if val == value:
return val
self._fail_validation(value, 'Value was not in the enum values')
|
[
"def",
"unwrap",
"(",
"self",
",",
"value",
",",
"session",
"=",
"None",
")",
":",
"self",
".",
"validate_unwrap",
"(",
"value",
")",
"value",
"=",
"self",
".",
"item_type",
".",
"unwrap",
"(",
"value",
",",
"session",
"=",
"session",
")",
"for",
"val",
"in",
"self",
".",
"values",
":",
"if",
"val",
"==",
"value",
":",
"return",
"val",
"self",
".",
"_fail_validation",
"(",
"value",
",",
"'Value was not in the enum values'",
")"
] |
Unwrap value using the unwrap function from ``EnumField.item_type``.
Since unwrap validation could not happen in is_valid_wrap, it
happens in this function.
|
[
"Unwrap",
"value",
"using",
"the",
"unwrap",
"function",
"from",
"EnumField",
".",
"item_type",
".",
"Since",
"unwrap",
"validation",
"could",
"not",
"happen",
"in",
"is_valid_wrap",
"it",
"happens",
"in",
"this",
"function",
"."
] |
python
|
train
| 48.4 |
bakwc/PySyncObj
|
pysyncobj/batteries.py
|
https://github.com/bakwc/PySyncObj/blob/be3b0aaa932d5156f5df140c23c962430f51b7b8/pysyncobj/batteries.py#L496-L509
|
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout)
|
[
"def",
"tryAcquire",
"(",
"self",
",",
"lockID",
",",
"callback",
"=",
"None",
",",
"sync",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"self",
".",
"__lockImpl",
".",
"acquire",
"(",
"lockID",
",",
"self",
".",
"__selfID",
",",
"time",
".",
"time",
"(",
")",
",",
"callback",
"=",
"callback",
",",
"sync",
"=",
"sync",
",",
"timeout",
"=",
"timeout",
")"
] |
Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
|
[
"Attempt",
"to",
"acquire",
"lock",
"."
] |
python
|
test
| 51.142857 |
ska-sa/montblanc
|
montblanc/util/parsing.py
|
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/parsing.py#L12-L122
|
def parse_python_assigns(assign_str):
"""
Parses a string, containing assign statements
into a dictionary.
.. code-block:: python
h5 = katdal.open('123456789.h5')
kwargs = parse_python_assigns("spw=3; scans=[1,2];"
"targets='bpcal,radec';"
"channels=slice(0,2048)")
h5.select(**kwargs)
Parameters
----------
assign_str: str
Assignment string. Should only contain assignment statements
assigning python literals or builtin function calls, to variable names.
Multiple assignment statements should be separated by semi-colons.
Returns
-------
dict
Dictionary { name: value } containing
assignment results.
"""
if not assign_str:
return {}
def _eval_value(stmt_value):
# If the statement value is a call to a builtin, try evaluate it
if isinstance(stmt_value, ast.Call):
func_name = stmt_value.func.id
if func_name not in _BUILTIN_WHITELIST:
raise ValueError("Function '%s' in '%s' is not builtin. "
"Available builtins: '%s'"
% (func_name, assign_str, list(_BUILTIN_WHITELIST)))
# Recursively pass arguments through this same function
if stmt_value.args is not None:
args = tuple(_eval_value(a) for a in stmt_value.args)
else:
args = ()
# Recursively pass keyword arguments through this same function
if stmt_value.keywords is not None:
kwargs = {kw.arg : _eval_value(kw.value) for kw
in stmt_value.keywords}
else:
kwargs = {}
return getattr(__builtin__, func_name)(*args, **kwargs)
# Try a literal eval
else:
return ast.literal_eval(stmt_value)
# Variable dictionary
variables = {}
# Parse the assignment string
stmts = ast.parse(assign_str, mode='single').body
for i, stmt in enumerate(stmts):
if not isinstance(stmt, ast.Assign):
raise ValueError("Statement %d in '%s' is not a "
"variable assignment." % (i, assign_str))
# Evaluate assignment lhs
values = _eval_value(stmt.value)
# "a = b = c" => targets 'a' and 'b' with 'c' as result
for target in stmt.targets:
# a = 2
if isinstance(target, ast.Name):
variables[target.id] = values
# Tuple/List unpacking case
# (a, b) = 2
elif isinstance(target, (ast.Tuple, ast.List)):
# Require all tuple/list elements to be variable names,
# although anything else is probably a syntax error
if not all(isinstance(e, ast.Name) for e in target.elts):
raise ValueError("Tuple unpacking in assignment %d "
"in expression '%s' failed as not all "
"tuple contents are variable names.")
# Promote for zip and length checking
if not isinstance(values, (tuple, list)):
elements = (values,)
else:
elements = values
if not len(target.elts) == len(elements):
raise ValueError("Unpacking '%s' into a tuple/list in "
"assignment %d of expression '%s' failed. "
"The number of tuple elements did not match "
"the number of values."
% (values, i, assign_str))
# Unpack
for variable, value in zip(target.elts, elements):
variables[variable.id] = value
else:
raise TypeError("'%s' types are not supported"
"as assignment targets." % type(target))
return variables
|
[
"def",
"parse_python_assigns",
"(",
"assign_str",
")",
":",
"if",
"not",
"assign_str",
":",
"return",
"{",
"}",
"def",
"_eval_value",
"(",
"stmt_value",
")",
":",
"# If the statement value is a call to a builtin, try evaluate it",
"if",
"isinstance",
"(",
"stmt_value",
",",
"ast",
".",
"Call",
")",
":",
"func_name",
"=",
"stmt_value",
".",
"func",
".",
"id",
"if",
"func_name",
"not",
"in",
"_BUILTIN_WHITELIST",
":",
"raise",
"ValueError",
"(",
"\"Function '%s' in '%s' is not builtin. \"",
"\"Available builtins: '%s'\"",
"%",
"(",
"func_name",
",",
"assign_str",
",",
"list",
"(",
"_BUILTIN_WHITELIST",
")",
")",
")",
"# Recursively pass arguments through this same function",
"if",
"stmt_value",
".",
"args",
"is",
"not",
"None",
":",
"args",
"=",
"tuple",
"(",
"_eval_value",
"(",
"a",
")",
"for",
"a",
"in",
"stmt_value",
".",
"args",
")",
"else",
":",
"args",
"=",
"(",
")",
"# Recursively pass keyword arguments through this same function",
"if",
"stmt_value",
".",
"keywords",
"is",
"not",
"None",
":",
"kwargs",
"=",
"{",
"kw",
".",
"arg",
":",
"_eval_value",
"(",
"kw",
".",
"value",
")",
"for",
"kw",
"in",
"stmt_value",
".",
"keywords",
"}",
"else",
":",
"kwargs",
"=",
"{",
"}",
"return",
"getattr",
"(",
"__builtin__",
",",
"func_name",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Try a literal eval",
"else",
":",
"return",
"ast",
".",
"literal_eval",
"(",
"stmt_value",
")",
"# Variable dictionary",
"variables",
"=",
"{",
"}",
"# Parse the assignment string",
"stmts",
"=",
"ast",
".",
"parse",
"(",
"assign_str",
",",
"mode",
"=",
"'single'",
")",
".",
"body",
"for",
"i",
",",
"stmt",
"in",
"enumerate",
"(",
"stmts",
")",
":",
"if",
"not",
"isinstance",
"(",
"stmt",
",",
"ast",
".",
"Assign",
")",
":",
"raise",
"ValueError",
"(",
"\"Statement %d in '%s' is not a \"",
"\"variable assignment.\"",
"%",
"(",
"i",
",",
"assign_str",
")",
")",
"# Evaluate assignment lhs",
"values",
"=",
"_eval_value",
"(",
"stmt",
".",
"value",
")",
"# \"a = b = c\" => targets 'a' and 'b' with 'c' as result",
"for",
"target",
"in",
"stmt",
".",
"targets",
":",
"# a = 2",
"if",
"isinstance",
"(",
"target",
",",
"ast",
".",
"Name",
")",
":",
"variables",
"[",
"target",
".",
"id",
"]",
"=",
"values",
"# Tuple/List unpacking case",
"# (a, b) = 2",
"elif",
"isinstance",
"(",
"target",
",",
"(",
"ast",
".",
"Tuple",
",",
"ast",
".",
"List",
")",
")",
":",
"# Require all tuple/list elements to be variable names,",
"# although anything else is probably a syntax error",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"e",
",",
"ast",
".",
"Name",
")",
"for",
"e",
"in",
"target",
".",
"elts",
")",
":",
"raise",
"ValueError",
"(",
"\"Tuple unpacking in assignment %d \"",
"\"in expression '%s' failed as not all \"",
"\"tuple contents are variable names.\"",
")",
"# Promote for zip and length checking",
"if",
"not",
"isinstance",
"(",
"values",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"elements",
"=",
"(",
"values",
",",
")",
"else",
":",
"elements",
"=",
"values",
"if",
"not",
"len",
"(",
"target",
".",
"elts",
")",
"==",
"len",
"(",
"elements",
")",
":",
"raise",
"ValueError",
"(",
"\"Unpacking '%s' into a tuple/list in \"",
"\"assignment %d of expression '%s' failed. \"",
"\"The number of tuple elements did not match \"",
"\"the number of values.\"",
"%",
"(",
"values",
",",
"i",
",",
"assign_str",
")",
")",
"# Unpack",
"for",
"variable",
",",
"value",
"in",
"zip",
"(",
"target",
".",
"elts",
",",
"elements",
")",
":",
"variables",
"[",
"variable",
".",
"id",
"]",
"=",
"value",
"else",
":",
"raise",
"TypeError",
"(",
"\"'%s' types are not supported\"",
"\"as assignment targets.\"",
"%",
"type",
"(",
"target",
")",
")",
"return",
"variables"
] |
Parses a string, containing assign statements
into a dictionary.
.. code-block:: python
h5 = katdal.open('123456789.h5')
kwargs = parse_python_assigns("spw=3; scans=[1,2];"
"targets='bpcal,radec';"
"channels=slice(0,2048)")
h5.select(**kwargs)
Parameters
----------
assign_str: str
Assignment string. Should only contain assignment statements
assigning python literals or builtin function calls, to variable names.
Multiple assignment statements should be separated by semi-colons.
Returns
-------
dict
Dictionary { name: value } containing
assignment results.
|
[
"Parses",
"a",
"string",
"containing",
"assign",
"statements",
"into",
"a",
"dictionary",
"."
] |
python
|
train
| 36.504505 |
jaijuneja/PyTLDR
|
pytldr/nlp/tokenizer.py
|
https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L61-L66
|
def stem(self, word):
"""Perform stemming on an input word."""
if self.stemmer:
return unicode_to_ascii(self._stemmer.stem(word))
else:
return word
|
[
"def",
"stem",
"(",
"self",
",",
"word",
")",
":",
"if",
"self",
".",
"stemmer",
":",
"return",
"unicode_to_ascii",
"(",
"self",
".",
"_stemmer",
".",
"stem",
"(",
"word",
")",
")",
"else",
":",
"return",
"word"
] |
Perform stemming on an input word.
|
[
"Perform",
"stemming",
"on",
"an",
"input",
"word",
"."
] |
python
|
train
| 31.666667 |
kislyuk/aegea
|
aegea/packages/github3/users.py
|
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/users.py#L395-L418
|
def update(self, name=None, email=None, blog=None, company=None,
location=None, hireable=False, bio=None):
"""If authenticated as this user, update the information with
the information provided in the parameters.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., '[email protected]'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
"""
user = {'name': name, 'email': email, 'blog': blog,
'company': company, 'location': location,
'hireable': hireable, 'bio': bio}
self._remove_none(user)
url = self._build_url('user')
json = self._json(self._patch(url, data=dumps(user)), 200)
if json:
self._update_(json)
return True
return False
|
[
"def",
"update",
"(",
"self",
",",
"name",
"=",
"None",
",",
"email",
"=",
"None",
",",
"blog",
"=",
"None",
",",
"company",
"=",
"None",
",",
"location",
"=",
"None",
",",
"hireable",
"=",
"False",
",",
"bio",
"=",
"None",
")",
":",
"user",
"=",
"{",
"'name'",
":",
"name",
",",
"'email'",
":",
"email",
",",
"'blog'",
":",
"blog",
",",
"'company'",
":",
"company",
",",
"'location'",
":",
"location",
",",
"'hireable'",
":",
"hireable",
",",
"'bio'",
":",
"bio",
"}",
"self",
".",
"_remove_none",
"(",
"user",
")",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'user'",
")",
"json",
"=",
"self",
".",
"_json",
"(",
"self",
".",
"_patch",
"(",
"url",
",",
"data",
"=",
"dumps",
"(",
"user",
")",
")",
",",
"200",
")",
"if",
"json",
":",
"self",
".",
"_update_",
"(",
"json",
")",
"return",
"True",
"return",
"False"
] |
If authenticated as this user, update the information with
the information provided in the parameters.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., '[email protected]'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
|
[
"If",
"authenticated",
"as",
"this",
"user",
"update",
"the",
"information",
"with",
"the",
"information",
"provided",
"in",
"the",
"parameters",
"."
] |
python
|
train
| 41.416667 |
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAIndicator/hurst.py
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAIndicator/hurst.py#L55-L65
|
def deviation(self, series, start, limit, mean):
'''
:type start: int
:type limit: int
:type mean: int
:rtype: list()
'''
d = []
for x in range(start, limit):
d.append(float(series[x] - mean))
return d
|
[
"def",
"deviation",
"(",
"self",
",",
"series",
",",
"start",
",",
"limit",
",",
"mean",
")",
":",
"d",
"=",
"[",
"]",
"for",
"x",
"in",
"range",
"(",
"start",
",",
"limit",
")",
":",
"d",
".",
"append",
"(",
"float",
"(",
"series",
"[",
"x",
"]",
"-",
"mean",
")",
")",
"return",
"d"
] |
:type start: int
:type limit: int
:type mean: int
:rtype: list()
|
[
":",
"type",
"start",
":",
"int",
":",
"type",
"limit",
":",
"int",
":",
"type",
"mean",
":",
"int",
":",
"rtype",
":",
"list",
"()"
] |
python
|
train
| 25 |
theonion/django-bulbs
|
bulbs/content/models.py
|
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/models.py#L278-L291
|
def first_image(self):
"""Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
"""
# loop through image fields and grab the first non-none one
for model_field in self._meta.fields:
if isinstance(model_field, ImageField):
if model_field.name is not 'thumbnail_override':
field_value = getattr(self, model_field.name)
if field_value.id is not None:
return field_value
# no non-none images, return None
return None
|
[
"def",
"first_image",
"(",
"self",
")",
":",
"# loop through image fields and grab the first non-none one",
"for",
"model_field",
"in",
"self",
".",
"_meta",
".",
"fields",
":",
"if",
"isinstance",
"(",
"model_field",
",",
"ImageField",
")",
":",
"if",
"model_field",
".",
"name",
"is",
"not",
"'thumbnail_override'",
":",
"field_value",
"=",
"getattr",
"(",
"self",
",",
"model_field",
".",
"name",
")",
"if",
"field_value",
".",
"id",
"is",
"not",
"None",
":",
"return",
"field_value",
"# no non-none images, return None",
"return",
"None"
] |
Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
|
[
"Ready",
"-",
"only",
"attribute",
"that",
"provides",
"the",
"value",
"of",
"the",
"first",
"non",
"-",
"none",
"image",
"that",
"s",
"not",
"the",
"thumbnail",
"override",
"field",
"."
] |
python
|
train
| 43.428571 |
sixty-north/cosmic-ray
|
src/cosmic_ray/work_db.py
|
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/work_db.py#L128-L133
|
def results(self):
"An iterable of all `(job-id, WorkResult)`s."
cur = self._conn.cursor()
rows = cur.execute("SELECT * FROM results")
for row in rows:
yield (row['job_id'], _row_to_work_result(row))
|
[
"def",
"results",
"(",
"self",
")",
":",
"cur",
"=",
"self",
".",
"_conn",
".",
"cursor",
"(",
")",
"rows",
"=",
"cur",
".",
"execute",
"(",
"\"SELECT * FROM results\"",
")",
"for",
"row",
"in",
"rows",
":",
"yield",
"(",
"row",
"[",
"'job_id'",
"]",
",",
"_row_to_work_result",
"(",
"row",
")",
")"
] |
An iterable of all `(job-id, WorkResult)`s.
|
[
"An",
"iterable",
"of",
"all",
"(",
"job",
"-",
"id",
"WorkResult",
")",
"s",
"."
] |
python
|
train
| 39.666667 |
jmgilman/Neolib
|
neolib/pyamf/remoting/gateway/twisted.py
|
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/twisted.py#L238-L281
|
def render_POST(self, request):
"""
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
"""
def handleDecodeError(failure):
"""
Return HTTP 400 Bad Request.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 400, body)
request.content.seek(0, 0)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.decode, request.content.read(),
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
def cb(amf_request):
if self.logger:
self.logger.debug("AMF Request: %r" % amf_request)
x = self.getResponse(request, amf_request)
x.addCallback(self.sendResponse, request)
# Process the request
d.addCallback(cb).addErrback(handleDecodeError)
return server.NOT_DONE_YET
|
[
"def",
"render_POST",
"(",
"self",
",",
"request",
")",
":",
"def",
"handleDecodeError",
"(",
"failure",
")",
":",
"\"\"\"\n Return HTTP 400 Bad Request.\n \"\"\"",
"errMesg",
"=",
"\"%s: %s\"",
"%",
"(",
"failure",
".",
"type",
",",
"failure",
".",
"getErrorMessage",
"(",
")",
")",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"error",
"(",
"errMesg",
")",
"self",
".",
"logger",
".",
"error",
"(",
"failure",
".",
"getTraceback",
"(",
")",
")",
"body",
"=",
"\"400 Bad Request\\n\\nThe request body was unable to \"",
"\"be successfully decoded.\"",
"if",
"self",
".",
"debug",
":",
"body",
"+=",
"\"\\n\\nTraceback:\\n\\n%s\"",
"%",
"failure",
".",
"getTraceback",
"(",
")",
"self",
".",
"_finaliseRequest",
"(",
"request",
",",
"400",
",",
"body",
")",
"request",
".",
"content",
".",
"seek",
"(",
"0",
",",
"0",
")",
"timezone_offset",
"=",
"self",
".",
"_get_timezone_offset",
"(",
")",
"d",
"=",
"threads",
".",
"deferToThread",
"(",
"remoting",
".",
"decode",
",",
"request",
".",
"content",
".",
"read",
"(",
")",
",",
"strict",
"=",
"self",
".",
"strict",
",",
"logger",
"=",
"self",
".",
"logger",
",",
"timezone_offset",
"=",
"timezone_offset",
")",
"def",
"cb",
"(",
"amf_request",
")",
":",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"AMF Request: %r\"",
"%",
"amf_request",
")",
"x",
"=",
"self",
".",
"getResponse",
"(",
"request",
",",
"amf_request",
")",
"x",
".",
"addCallback",
"(",
"self",
".",
"sendResponse",
",",
"request",
")",
"# Process the request",
"d",
".",
"addCallback",
"(",
"cb",
")",
".",
"addErrback",
"(",
"handleDecodeError",
")",
"return",
"server",
".",
"NOT_DONE_YET"
] |
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
|
[
"Read",
"remoting",
"request",
"from",
"the",
"client",
"."
] |
python
|
train
| 31.045455 |
saltstack/salt
|
salt/minion.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2681-L2698
|
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
|
[
"def",
"_fallback_cleanups",
"(",
"self",
")",
":",
"# Add an extra fallback in case a forked process leaks through",
"multiprocessing",
".",
"active_children",
"(",
")",
"# Cleanup Windows threads",
"if",
"not",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"return",
"for",
"thread",
"in",
"self",
".",
"win_proc",
":",
"if",
"not",
"thread",
".",
"is_alive",
"(",
")",
":",
"thread",
".",
"join",
"(",
")",
"try",
":",
"self",
".",
"win_proc",
".",
"remove",
"(",
"thread",
")",
"del",
"thread",
"except",
"(",
"ValueError",
",",
"NameError",
")",
":",
"pass"
] |
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
|
[
"Fallback",
"cleanup",
"routines",
"attempting",
"to",
"fix",
"leaked",
"processes",
"threads",
"etc",
"."
] |
python
|
train
| 34.222222 |
saulpw/visidata
|
visidata/vdtui.py
|
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L1537-L1543
|
def unselect(self, rows, status=True, progress=True):
"Unselect given rows. Don't show progress if progress=False; don't show status if status=False."
before = len(self._selectedRows)
for r in (Progress(rows, 'unselecting') if progress else rows):
self.unselectRow(r)
if status:
vd().status('unselected %s/%s %s' % (before-len(self._selectedRows), before, self.rowtype))
|
[
"def",
"unselect",
"(",
"self",
",",
"rows",
",",
"status",
"=",
"True",
",",
"progress",
"=",
"True",
")",
":",
"before",
"=",
"len",
"(",
"self",
".",
"_selectedRows",
")",
"for",
"r",
"in",
"(",
"Progress",
"(",
"rows",
",",
"'unselecting'",
")",
"if",
"progress",
"else",
"rows",
")",
":",
"self",
".",
"unselectRow",
"(",
"r",
")",
"if",
"status",
":",
"vd",
"(",
")",
".",
"status",
"(",
"'unselected %s/%s %s'",
"%",
"(",
"before",
"-",
"len",
"(",
"self",
".",
"_selectedRows",
")",
",",
"before",
",",
"self",
".",
"rowtype",
")",
")"
] |
Unselect given rows. Don't show progress if progress=False; don't show status if status=False.
|
[
"Unselect",
"given",
"rows",
".",
"Don",
"t",
"show",
"progress",
"if",
"progress",
"=",
"False",
";",
"don",
"t",
"show",
"status",
"if",
"status",
"=",
"False",
"."
] |
python
|
train
| 60 |
seomoz/qless-py
|
qless/workers/forking.py
|
https://github.com/seomoz/qless-py/blob/3eda4ffcd4c0016c9a7e44f780d6155e1a354dda/qless/workers/forking.py#L112-L116
|
def handler(self, signum, frame): # pragma: no cover
'''Signal handler for this process'''
if signum in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
self.stop(signum)
os._exit(0)
|
[
"def",
"handler",
"(",
"self",
",",
"signum",
",",
"frame",
")",
":",
"# pragma: no cover",
"if",
"signum",
"in",
"(",
"signal",
".",
"SIGTERM",
",",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIGQUIT",
")",
":",
"self",
".",
"stop",
"(",
"signum",
")",
"os",
".",
"_exit",
"(",
"0",
")"
] |
Signal handler for this process
|
[
"Signal",
"handler",
"for",
"this",
"process"
] |
python
|
train
| 43.8 |
rigetti/pyquil
|
pyquil/gate_matrices.py
|
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gate_matrices.py#L225-L231
|
def relaxation_operators(p):
"""
Return the amplitude damping Kraus operators
"""
k0 = np.array([[1.0, 0.0], [0.0, np.sqrt(1 - p)]])
k1 = np.array([[0.0, np.sqrt(p)], [0.0, 0.0]])
return k0, k1
|
[
"def",
"relaxation_operators",
"(",
"p",
")",
":",
"k0",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1.0",
",",
"0.0",
"]",
",",
"[",
"0.0",
",",
"np",
".",
"sqrt",
"(",
"1",
"-",
"p",
")",
"]",
"]",
")",
"k1",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.0",
",",
"np",
".",
"sqrt",
"(",
"p",
")",
"]",
",",
"[",
"0.0",
",",
"0.0",
"]",
"]",
")",
"return",
"k0",
",",
"k1"
] |
Return the amplitude damping Kraus operators
|
[
"Return",
"the",
"amplitude",
"damping",
"Kraus",
"operators"
] |
python
|
train
| 30.142857 |
nicolargo/glances
|
glances/outputs/glances_curses.py
|
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/outputs/glances_curses.py#L769-L845
|
def display_popup(self, message,
size_x=None, size_y=None,
duration=3,
is_input=False,
input_size=30,
input_value=None):
"""
Display a centered popup.
If is_input is False:
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
If is_input is True:
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
"""
# Center the popup
sentence_list = message.split('\n')
if size_x is None:
size_x = len(max(sentence_list, key=len)) + 4
# Add space for the input field
if is_input:
size_x += input_size
if size_y is None:
size_y = len(sentence_list) + 4
screen_x = self.screen.getmaxyx()[1]
screen_y = self.screen.getmaxyx()[0]
if size_x > screen_x or size_y > screen_y:
# No size to display the popup => abord
return False
pos_x = int((screen_x - size_x) / 2)
pos_y = int((screen_y - size_y) / 2)
# Create the popup
popup = curses.newwin(size_y, size_x, pos_y, pos_x)
# Fill the popup
popup.border()
# Add the message
for y, m in enumerate(message.split('\n')):
popup.addnstr(2 + y, 2, m, len(m))
if is_input and not WINDOWS:
# Create a subwindow for the text field
subpop = popup.derwin(1, input_size, 2, 2 + len(m))
subpop.attron(self.colors_list['FILTER'])
# Init the field with the current value
if input_value is not None:
subpop.addnstr(0, 0, input_value, len(input_value))
# Display the popup
popup.refresh()
subpop.refresh()
# Create the textbox inside the subwindows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextbox(subpop, insert_mode=False)
textbox.edit()
self.set_cursor(0)
self.term_window.keypad(0)
if textbox.gather() != '':
logger.debug(
"User enters the following string: %s" % textbox.gather())
return textbox.gather()[:-1]
else:
logger.debug("User centers an empty string")
return None
else:
# Display the popup
popup.refresh()
self.wait(duration * 1000)
return True
|
[
"def",
"display_popup",
"(",
"self",
",",
"message",
",",
"size_x",
"=",
"None",
",",
"size_y",
"=",
"None",
",",
"duration",
"=",
"3",
",",
"is_input",
"=",
"False",
",",
"input_size",
"=",
"30",
",",
"input_value",
"=",
"None",
")",
":",
"# Center the popup",
"sentence_list",
"=",
"message",
".",
"split",
"(",
"'\\n'",
")",
"if",
"size_x",
"is",
"None",
":",
"size_x",
"=",
"len",
"(",
"max",
"(",
"sentence_list",
",",
"key",
"=",
"len",
")",
")",
"+",
"4",
"# Add space for the input field",
"if",
"is_input",
":",
"size_x",
"+=",
"input_size",
"if",
"size_y",
"is",
"None",
":",
"size_y",
"=",
"len",
"(",
"sentence_list",
")",
"+",
"4",
"screen_x",
"=",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"[",
"1",
"]",
"screen_y",
"=",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"[",
"0",
"]",
"if",
"size_x",
">",
"screen_x",
"or",
"size_y",
">",
"screen_y",
":",
"# No size to display the popup => abord",
"return",
"False",
"pos_x",
"=",
"int",
"(",
"(",
"screen_x",
"-",
"size_x",
")",
"/",
"2",
")",
"pos_y",
"=",
"int",
"(",
"(",
"screen_y",
"-",
"size_y",
")",
"/",
"2",
")",
"# Create the popup",
"popup",
"=",
"curses",
".",
"newwin",
"(",
"size_y",
",",
"size_x",
",",
"pos_y",
",",
"pos_x",
")",
"# Fill the popup",
"popup",
".",
"border",
"(",
")",
"# Add the message",
"for",
"y",
",",
"m",
"in",
"enumerate",
"(",
"message",
".",
"split",
"(",
"'\\n'",
")",
")",
":",
"popup",
".",
"addnstr",
"(",
"2",
"+",
"y",
",",
"2",
",",
"m",
",",
"len",
"(",
"m",
")",
")",
"if",
"is_input",
"and",
"not",
"WINDOWS",
":",
"# Create a subwindow for the text field",
"subpop",
"=",
"popup",
".",
"derwin",
"(",
"1",
",",
"input_size",
",",
"2",
",",
"2",
"+",
"len",
"(",
"m",
")",
")",
"subpop",
".",
"attron",
"(",
"self",
".",
"colors_list",
"[",
"'FILTER'",
"]",
")",
"# Init the field with the current value",
"if",
"input_value",
"is",
"not",
"None",
":",
"subpop",
".",
"addnstr",
"(",
"0",
",",
"0",
",",
"input_value",
",",
"len",
"(",
"input_value",
")",
")",
"# Display the popup",
"popup",
".",
"refresh",
"(",
")",
"subpop",
".",
"refresh",
"(",
")",
"# Create the textbox inside the subwindows",
"self",
".",
"set_cursor",
"(",
"2",
")",
"self",
".",
"term_window",
".",
"keypad",
"(",
"1",
")",
"textbox",
"=",
"GlancesTextbox",
"(",
"subpop",
",",
"insert_mode",
"=",
"False",
")",
"textbox",
".",
"edit",
"(",
")",
"self",
".",
"set_cursor",
"(",
"0",
")",
"self",
".",
"term_window",
".",
"keypad",
"(",
"0",
")",
"if",
"textbox",
".",
"gather",
"(",
")",
"!=",
"''",
":",
"logger",
".",
"debug",
"(",
"\"User enters the following string: %s\"",
"%",
"textbox",
".",
"gather",
"(",
")",
")",
"return",
"textbox",
".",
"gather",
"(",
")",
"[",
":",
"-",
"1",
"]",
"else",
":",
"logger",
".",
"debug",
"(",
"\"User centers an empty string\"",
")",
"return",
"None",
"else",
":",
"# Display the popup",
"popup",
".",
"refresh",
"(",
")",
"self",
".",
"wait",
"(",
"duration",
"*",
"1000",
")",
"return",
"True"
] |
Display a centered popup.
If is_input is False:
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
If is_input is True:
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
|
[
"Display",
"a",
"centered",
"popup",
"."
] |
python
|
train
| 36.350649 |
SuperCowPowers/workbench
|
workbench/workers/pcap_http_graph.py
|
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/pcap_http_graph.py#L68-L86
|
def http_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro http.log) '''
print 'Entering http_log_graph...'
for row in list(stream):
# Skip '-' hosts
if (row['id.orig_h'] == '-'):
continue
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['host', 'origin'])
# Add the response host and reponse ip
self.add_node(row['host'], row['host'], ['host'])
self.add_node(row['id.resp_h'], row['id.resp_h'], ['host'])
# Add the http request relationships
self.add_rel(row['id.orig_h'], row['host'], 'http_request')
self.add_rel(row['host'], row['id.resp_h'], 'A')
|
[
"def",
"http_log_graph",
"(",
"self",
",",
"stream",
")",
":",
"print",
"'Entering http_log_graph...'",
"for",
"row",
"in",
"list",
"(",
"stream",
")",
":",
"# Skip '-' hosts",
"if",
"(",
"row",
"[",
"'id.orig_h'",
"]",
"==",
"'-'",
")",
":",
"continue",
"# Add the originating host",
"self",
".",
"add_node",
"(",
"row",
"[",
"'id.orig_h'",
"]",
",",
"row",
"[",
"'id.orig_h'",
"]",
",",
"[",
"'host'",
",",
"'origin'",
"]",
")",
"# Add the response host and reponse ip",
"self",
".",
"add_node",
"(",
"row",
"[",
"'host'",
"]",
",",
"row",
"[",
"'host'",
"]",
",",
"[",
"'host'",
"]",
")",
"self",
".",
"add_node",
"(",
"row",
"[",
"'id.resp_h'",
"]",
",",
"row",
"[",
"'id.resp_h'",
"]",
",",
"[",
"'host'",
"]",
")",
"# Add the http request relationships",
"self",
".",
"add_rel",
"(",
"row",
"[",
"'id.orig_h'",
"]",
",",
"row",
"[",
"'host'",
"]",
",",
"'http_request'",
")",
"self",
".",
"add_rel",
"(",
"row",
"[",
"'host'",
"]",
",",
"row",
"[",
"'id.resp_h'",
"]",
",",
"'A'",
")"
] |
Build up a graph (nodes and edges from a Bro http.log)
|
[
"Build",
"up",
"a",
"graph",
"(",
"nodes",
"and",
"edges",
"from",
"a",
"Bro",
"http",
".",
"log",
")"
] |
python
|
train
| 39.473684 |
Metatab/metapack
|
metapack/index.py
|
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/index.py#L140-L148
|
def update(self,o):
"""Update from another index or index dict"""
self.open()
try:
self._db.update(o._db)
except AttributeError:
self._db.update(o)
|
[
"def",
"update",
"(",
"self",
",",
"o",
")",
":",
"self",
".",
"open",
"(",
")",
"try",
":",
"self",
".",
"_db",
".",
"update",
"(",
"o",
".",
"_db",
")",
"except",
"AttributeError",
":",
"self",
".",
"_db",
".",
"update",
"(",
"o",
")"
] |
Update from another index or index dict
|
[
"Update",
"from",
"another",
"index",
"or",
"index",
"dict"
] |
python
|
train
| 21.888889 |
santoshphilip/eppy
|
eppy/EPlusInterfaceFunctions/mylib2.py
|
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L307-L310
|
def fsliceafter(astr, sub):
"""Return the slice after at sub in string astr"""
findex = astr.find(sub)
return astr[findex + len(sub):]
|
[
"def",
"fsliceafter",
"(",
"astr",
",",
"sub",
")",
":",
"findex",
"=",
"astr",
".",
"find",
"(",
"sub",
")",
"return",
"astr",
"[",
"findex",
"+",
"len",
"(",
"sub",
")",
":",
"]"
] |
Return the slice after at sub in string astr
|
[
"Return",
"the",
"slice",
"after",
"at",
"sub",
"in",
"string",
"astr"
] |
python
|
train
| 35.75 |
a1ezzz/wasp-general
|
wasp_general/task/dependency.py
|
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/dependency.py#L122-L151
|
def dependency_check(self, task_cls, skip_unresolved=False):
""" Check dependency of task for irresolvable conflicts (like task to task mutual dependency)
:param task_cls: task to check
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
"""
def check(check_task_cls, global_dependencies):
if check_task_cls.__registry_tag__ in global_dependencies:
raise RuntimeError('Recursion dependencies for %s' % task_cls.__registry_tag__)
dependencies = global_dependencies.copy()
dependencies.append(check_task_cls.__registry_tag__)
for dependency in check_task_cls.__dependency__:
dependent_task = self.tasks_by_tag(dependency)
if dependent_task is None and skip_unresolved is False:
raise RuntimeError(
"Task '%s' dependency unresolved (%s)" %
(task_cls.__registry_tag__, dependency)
)
if dependent_task is not None:
check(dependent_task, dependencies)
check(task_cls, [])
|
[
"def",
"dependency_check",
"(",
"self",
",",
"task_cls",
",",
"skip_unresolved",
"=",
"False",
")",
":",
"def",
"check",
"(",
"check_task_cls",
",",
"global_dependencies",
")",
":",
"if",
"check_task_cls",
".",
"__registry_tag__",
"in",
"global_dependencies",
":",
"raise",
"RuntimeError",
"(",
"'Recursion dependencies for %s'",
"%",
"task_cls",
".",
"__registry_tag__",
")",
"dependencies",
"=",
"global_dependencies",
".",
"copy",
"(",
")",
"dependencies",
".",
"append",
"(",
"check_task_cls",
".",
"__registry_tag__",
")",
"for",
"dependency",
"in",
"check_task_cls",
".",
"__dependency__",
":",
"dependent_task",
"=",
"self",
".",
"tasks_by_tag",
"(",
"dependency",
")",
"if",
"dependent_task",
"is",
"None",
"and",
"skip_unresolved",
"is",
"False",
":",
"raise",
"RuntimeError",
"(",
"\"Task '%s' dependency unresolved (%s)\"",
"%",
"(",
"task_cls",
".",
"__registry_tag__",
",",
"dependency",
")",
")",
"if",
"dependent_task",
"is",
"not",
"None",
":",
"check",
"(",
"dependent_task",
",",
"dependencies",
")",
"check",
"(",
"task_cls",
",",
"[",
"]",
")"
] |
Check dependency of task for irresolvable conflicts (like task to task mutual dependency)
:param task_cls: task to check
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
|
[
"Check",
"dependency",
"of",
"task",
"for",
"irresolvable",
"conflicts",
"(",
"like",
"task",
"to",
"task",
"mutual",
"dependency",
")"
] |
python
|
train
| 38.333333 |
Phyks/libbmc
|
libbmc/tools.py
|
https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/tools.py#L39-L54
|
def map_or_apply(function, param):
"""
Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``.
"""
try:
if isinstance(param, list):
return [next(iter(function(i))) for i in param]
else:
return next(iter(function(param)))
except StopIteration:
return None
|
[
"def",
"map_or_apply",
"(",
"function",
",",
"param",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"param",
",",
"list",
")",
":",
"return",
"[",
"next",
"(",
"iter",
"(",
"function",
"(",
"i",
")",
")",
")",
"for",
"i",
"in",
"param",
"]",
"else",
":",
"return",
"next",
"(",
"iter",
"(",
"function",
"(",
"param",
")",
")",
")",
"except",
"StopIteration",
":",
"return",
"None"
] |
Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``.
|
[
"Map",
"the",
"function",
"on",
"param",
"or",
"apply",
"it",
"depending",
"whether",
"param",
"\\",
"is",
"a",
"list",
"or",
"an",
"item",
"."
] |
python
|
train
| 32.75 |
Azure/azure-sdk-for-python
|
azure-mgmt-storage/azure/mgmt/storage/storage_management_client.py
|
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-storage/azure/mgmt/storage/storage_management_client.py#L108-L144
|
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.storage.v2015_06_15.models>`
* 2016-01-01: :mod:`v2016_01_01.models<azure.mgmt.storage.v2016_01_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.storage.v2016_12_01.models>`
* 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.storage.v2017_06_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.storage.v2017_10_01.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.storage.v2018_02_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.storage.v2018_03_01_preview.models>`
* 2018-07-01: :mod:`v2018_07_01.models<azure.mgmt.storage.v2018_07_01.models>`
"""
if api_version == '2015-06-15':
from .v2015_06_15 import models
return models
elif api_version == '2016-01-01':
from .v2016_01_01 import models
return models
elif api_version == '2016-12-01':
from .v2016_12_01 import models
return models
elif api_version == '2017-06-01':
from .v2017_06_01 import models
return models
elif api_version == '2017-10-01':
from .v2017_10_01 import models
return models
elif api_version == '2018-02-01':
from .v2018_02_01 import models
return models
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview import models
return models
elif api_version == '2018-07-01':
from .v2018_07_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version))
|
[
"def",
"models",
"(",
"cls",
",",
"api_version",
"=",
"DEFAULT_API_VERSION",
")",
":",
"if",
"api_version",
"==",
"'2015-06-15'",
":",
"from",
".",
"v2015_06_15",
"import",
"models",
"return",
"models",
"elif",
"api_version",
"==",
"'2016-01-01'",
":",
"from",
".",
"v2016_01_01",
"import",
"models",
"return",
"models",
"elif",
"api_version",
"==",
"'2016-12-01'",
":",
"from",
".",
"v2016_12_01",
"import",
"models",
"return",
"models",
"elif",
"api_version",
"==",
"'2017-06-01'",
":",
"from",
".",
"v2017_06_01",
"import",
"models",
"return",
"models",
"elif",
"api_version",
"==",
"'2017-10-01'",
":",
"from",
".",
"v2017_10_01",
"import",
"models",
"return",
"models",
"elif",
"api_version",
"==",
"'2018-02-01'",
":",
"from",
".",
"v2018_02_01",
"import",
"models",
"return",
"models",
"elif",
"api_version",
"==",
"'2018-03-01-preview'",
":",
"from",
".",
"v2018_03_01_preview",
"import",
"models",
"return",
"models",
"elif",
"api_version",
"==",
"'2018-07-01'",
":",
"from",
".",
"v2018_07_01",
"import",
"models",
"return",
"models",
"raise",
"NotImplementedError",
"(",
"\"APIVersion {} is not available\"",
".",
"format",
"(",
"api_version",
")",
")"
] |
Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.storage.v2015_06_15.models>`
* 2016-01-01: :mod:`v2016_01_01.models<azure.mgmt.storage.v2016_01_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.storage.v2016_12_01.models>`
* 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.storage.v2017_06_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.storage.v2017_10_01.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.storage.v2018_02_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.storage.v2018_03_01_preview.models>`
* 2018-07-01: :mod:`v2018_07_01.models<azure.mgmt.storage.v2018_07_01.models>`
|
[
"Module",
"depends",
"on",
"the",
"API",
"version",
":"
] |
python
|
test
| 49.027027 |
GetmeUK/MongoFrames
|
mongoframes/frames.py
|
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/frames.py#L305-L328
|
def insert_many(cls, documents):
"""Insert a list of documents"""
from mongoframes.queries import to_refs
# Ensure all documents have been converted to frames
frames = cls._ensure_frames(documents)
# Send insert signal
signal('insert').send(cls, frames=frames)
# Prepare the documents to be inserted
documents = [to_refs(f._document) for f in frames]
# Bulk insert
ids = cls.get_collection().insert_many(documents).inserted_ids
# Apply the Ids to the frames
for i, id in enumerate(ids):
frames[i]._id = id
# Send inserted signal
signal('inserted').send(cls, frames=frames)
return frames
|
[
"def",
"insert_many",
"(",
"cls",
",",
"documents",
")",
":",
"from",
"mongoframes",
".",
"queries",
"import",
"to_refs",
"# Ensure all documents have been converted to frames",
"frames",
"=",
"cls",
".",
"_ensure_frames",
"(",
"documents",
")",
"# Send insert signal",
"signal",
"(",
"'insert'",
")",
".",
"send",
"(",
"cls",
",",
"frames",
"=",
"frames",
")",
"# Prepare the documents to be inserted",
"documents",
"=",
"[",
"to_refs",
"(",
"f",
".",
"_document",
")",
"for",
"f",
"in",
"frames",
"]",
"# Bulk insert",
"ids",
"=",
"cls",
".",
"get_collection",
"(",
")",
".",
"insert_many",
"(",
"documents",
")",
".",
"inserted_ids",
"# Apply the Ids to the frames",
"for",
"i",
",",
"id",
"in",
"enumerate",
"(",
"ids",
")",
":",
"frames",
"[",
"i",
"]",
".",
"_id",
"=",
"id",
"# Send inserted signal",
"signal",
"(",
"'inserted'",
")",
".",
"send",
"(",
"cls",
",",
"frames",
"=",
"frames",
")",
"return",
"frames"
] |
Insert a list of documents
|
[
"Insert",
"a",
"list",
"of",
"documents"
] |
python
|
train
| 29.25 |
Clinical-Genomics/scout
|
scout/commands/load/variants.py
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/commands/load/variants.py#L27-L104
|
def variants(context, case_id, institute, force, cancer, cancer_research, sv,
sv_research, snv, snv_research, str_clinical, chrom, start, end, hgnc_id,
hgnc_symbol, rank_treshold):
"""Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'.
"""
LOG.info("Running scout load variants")
adapter = context.obj['adapter']
if institute:
case_id = "{0}-{1}".format(institute, case_id)
else:
institute = case_id.split('-')[0]
case_obj = adapter.case(case_id=case_id)
if case_obj is None:
LOG.info("No matching case found")
context.abort()
files = [
{'category': 'cancer', 'variant_type': 'clinical', 'upload': cancer},
{'category': 'cancer', 'variant_type': 'research', 'upload': cancer_research},
{'category': 'sv', 'variant_type': 'clinical', 'upload': sv},
{'category': 'sv', 'variant_type': 'research', 'upload': sv_research},
{'category': 'snv', 'variant_type': 'clinical', 'upload': snv},
{'category': 'snv', 'variant_type': 'research', 'upload': snv_research},
{'category': 'str', 'variant_type': 'clinical', 'upload': str_clinical},
]
gene_obj = None
if (hgnc_id or hgnc_symbol):
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if hgnc_symbol:
for res in adapter.gene_by_alias(hgnc_symbol):
gene_obj = res
if not gene_obj:
LOG.warning("The gene could not be found")
context.abort()
i = 0
for file_type in files:
variant_type = file_type['variant_type']
category = file_type['category']
if file_type['upload']:
i += 1
if variant_type == 'research':
if not (force or case_obj['research_requested']):
LOG.warn("research not requested, use '--force'")
context.abort()
LOG.info("Delete {0} {1} variants for case {2}".format(
variant_type, category, case_id))
adapter.delete_variants(case_id=case_obj['_id'],
variant_type=variant_type,
category=category)
LOG.info("Load {0} {1} variants for case {2}".format(
variant_type, category, case_id))
try:
adapter.load_variants(
case_obj=case_obj,
variant_type=variant_type,
category=category,
rank_threshold=rank_treshold,
chrom=chrom,
start=start,
end=end,
gene_obj=gene_obj
)
except Exception as e:
LOG.warning(e)
context.abort()
if i == 0:
LOG.info("No files where specified to upload variants from")
|
[
"def",
"variants",
"(",
"context",
",",
"case_id",
",",
"institute",
",",
"force",
",",
"cancer",
",",
"cancer_research",
",",
"sv",
",",
"sv_research",
",",
"snv",
",",
"snv_research",
",",
"str_clinical",
",",
"chrom",
",",
"start",
",",
"end",
",",
"hgnc_id",
",",
"hgnc_symbol",
",",
"rank_treshold",
")",
":",
"LOG",
".",
"info",
"(",
"\"Running scout load variants\"",
")",
"adapter",
"=",
"context",
".",
"obj",
"[",
"'adapter'",
"]",
"if",
"institute",
":",
"case_id",
"=",
"\"{0}-{1}\"",
".",
"format",
"(",
"institute",
",",
"case_id",
")",
"else",
":",
"institute",
"=",
"case_id",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"case_obj",
"=",
"adapter",
".",
"case",
"(",
"case_id",
"=",
"case_id",
")",
"if",
"case_obj",
"is",
"None",
":",
"LOG",
".",
"info",
"(",
"\"No matching case found\"",
")",
"context",
".",
"abort",
"(",
")",
"files",
"=",
"[",
"{",
"'category'",
":",
"'cancer'",
",",
"'variant_type'",
":",
"'clinical'",
",",
"'upload'",
":",
"cancer",
"}",
",",
"{",
"'category'",
":",
"'cancer'",
",",
"'variant_type'",
":",
"'research'",
",",
"'upload'",
":",
"cancer_research",
"}",
",",
"{",
"'category'",
":",
"'sv'",
",",
"'variant_type'",
":",
"'clinical'",
",",
"'upload'",
":",
"sv",
"}",
",",
"{",
"'category'",
":",
"'sv'",
",",
"'variant_type'",
":",
"'research'",
",",
"'upload'",
":",
"sv_research",
"}",
",",
"{",
"'category'",
":",
"'snv'",
",",
"'variant_type'",
":",
"'clinical'",
",",
"'upload'",
":",
"snv",
"}",
",",
"{",
"'category'",
":",
"'snv'",
",",
"'variant_type'",
":",
"'research'",
",",
"'upload'",
":",
"snv_research",
"}",
",",
"{",
"'category'",
":",
"'str'",
",",
"'variant_type'",
":",
"'clinical'",
",",
"'upload'",
":",
"str_clinical",
"}",
",",
"]",
"gene_obj",
"=",
"None",
"if",
"(",
"hgnc_id",
"or",
"hgnc_symbol",
")",
":",
"if",
"hgnc_id",
":",
"gene_obj",
"=",
"adapter",
".",
"hgnc_gene",
"(",
"hgnc_id",
")",
"if",
"hgnc_symbol",
":",
"for",
"res",
"in",
"adapter",
".",
"gene_by_alias",
"(",
"hgnc_symbol",
")",
":",
"gene_obj",
"=",
"res",
"if",
"not",
"gene_obj",
":",
"LOG",
".",
"warning",
"(",
"\"The gene could not be found\"",
")",
"context",
".",
"abort",
"(",
")",
"i",
"=",
"0",
"for",
"file_type",
"in",
"files",
":",
"variant_type",
"=",
"file_type",
"[",
"'variant_type'",
"]",
"category",
"=",
"file_type",
"[",
"'category'",
"]",
"if",
"file_type",
"[",
"'upload'",
"]",
":",
"i",
"+=",
"1",
"if",
"variant_type",
"==",
"'research'",
":",
"if",
"not",
"(",
"force",
"or",
"case_obj",
"[",
"'research_requested'",
"]",
")",
":",
"LOG",
".",
"warn",
"(",
"\"research not requested, use '--force'\"",
")",
"context",
".",
"abort",
"(",
")",
"LOG",
".",
"info",
"(",
"\"Delete {0} {1} variants for case {2}\"",
".",
"format",
"(",
"variant_type",
",",
"category",
",",
"case_id",
")",
")",
"adapter",
".",
"delete_variants",
"(",
"case_id",
"=",
"case_obj",
"[",
"'_id'",
"]",
",",
"variant_type",
"=",
"variant_type",
",",
"category",
"=",
"category",
")",
"LOG",
".",
"info",
"(",
"\"Load {0} {1} variants for case {2}\"",
".",
"format",
"(",
"variant_type",
",",
"category",
",",
"case_id",
")",
")",
"try",
":",
"adapter",
".",
"load_variants",
"(",
"case_obj",
"=",
"case_obj",
",",
"variant_type",
"=",
"variant_type",
",",
"category",
"=",
"category",
",",
"rank_threshold",
"=",
"rank_treshold",
",",
"chrom",
"=",
"chrom",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"gene_obj",
"=",
"gene_obj",
")",
"except",
"Exception",
"as",
"e",
":",
"LOG",
".",
"warning",
"(",
"e",
")",
"context",
".",
"abort",
"(",
")",
"if",
"i",
"==",
"0",
":",
"LOG",
".",
"info",
"(",
"\"No files where specified to upload variants from\"",
")"
] |
Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'.
|
[
"Upload",
"variants",
"to",
"a",
"case"
] |
python
|
test
| 38.487179 |
gears/gears
|
gears/environment.py
|
https://github.com/gears/gears/blob/5729c2525a8c04c185e998bd9a86233708972921/gears/environment.py#L99-L102
|
def register(self, mimetype, processor):
"""Register passed `processor` for passed `mimetype`."""
if mimetype not in self or processor not in self[mimetype]:
self.setdefault(mimetype, []).append(processor)
|
[
"def",
"register",
"(",
"self",
",",
"mimetype",
",",
"processor",
")",
":",
"if",
"mimetype",
"not",
"in",
"self",
"or",
"processor",
"not",
"in",
"self",
"[",
"mimetype",
"]",
":",
"self",
".",
"setdefault",
"(",
"mimetype",
",",
"[",
"]",
")",
".",
"append",
"(",
"processor",
")"
] |
Register passed `processor` for passed `mimetype`.
|
[
"Register",
"passed",
"processor",
"for",
"passed",
"mimetype",
"."
] |
python
|
test
| 57.5 |
TheHive-Project/Cortex-Analyzers
|
analyzers/MaxMind/ipaddr.py
|
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L1462-L1483
|
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
if len(hextet_str) > 4:
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int
|
[
"def",
"_parse_hextet",
"(",
"self",
",",
"hextet_str",
")",
":",
"# Whitelist the characters, since int() allows a lot of bizarre stuff.",
"if",
"not",
"self",
".",
"_HEX_DIGITS",
".",
"issuperset",
"(",
"hextet_str",
")",
":",
"raise",
"ValueError",
"if",
"len",
"(",
"hextet_str",
")",
">",
"4",
":",
"raise",
"ValueError",
"hextet_int",
"=",
"int",
"(",
"hextet_str",
",",
"16",
")",
"if",
"hextet_int",
">",
"0xFFFF",
":",
"raise",
"ValueError",
"return",
"hextet_int"
] |
Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
|
[
"Convert",
"an",
"IPv6",
"hextet",
"string",
"into",
"an",
"integer",
"."
] |
python
|
train
| 30.045455 |
tjcsl/cslbot
|
cslbot/commands/help.py
|
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/help.py#L23-L48
|
def cmd(send, msg, args):
"""Gives help.
Syntax: {command} [command]
"""
cmdchar = args['config']['core']['cmdchar']
if msg:
if msg.startswith(cmdchar):
msg = msg[len(cmdchar):]
if len(msg.split()) > 1:
send("One argument only")
elif not command_registry.is_registered(msg):
send("Not a module.")
else:
doc = command_registry.get_command(msg).get_doc()
if doc is None:
send("No documentation found.")
else:
for line in doc.splitlines():
send(line.format(command=cmdchar + msg), target=args['nick'])
else:
modules = sorted(command_registry.get_enabled_commands())
cmdlist = (' %s' % cmdchar).join(modules)
send('Commands: %s%s' % (cmdchar, cmdlist), target=args['nick'], ignore_length=True)
send('%shelp <command> for more info on a command.' % cmdchar, target=args['nick'])
|
[
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"cmdchar",
"=",
"args",
"[",
"'config'",
"]",
"[",
"'core'",
"]",
"[",
"'cmdchar'",
"]",
"if",
"msg",
":",
"if",
"msg",
".",
"startswith",
"(",
"cmdchar",
")",
":",
"msg",
"=",
"msg",
"[",
"len",
"(",
"cmdchar",
")",
":",
"]",
"if",
"len",
"(",
"msg",
".",
"split",
"(",
")",
")",
">",
"1",
":",
"send",
"(",
"\"One argument only\"",
")",
"elif",
"not",
"command_registry",
".",
"is_registered",
"(",
"msg",
")",
":",
"send",
"(",
"\"Not a module.\"",
")",
"else",
":",
"doc",
"=",
"command_registry",
".",
"get_command",
"(",
"msg",
")",
".",
"get_doc",
"(",
")",
"if",
"doc",
"is",
"None",
":",
"send",
"(",
"\"No documentation found.\"",
")",
"else",
":",
"for",
"line",
"in",
"doc",
".",
"splitlines",
"(",
")",
":",
"send",
"(",
"line",
".",
"format",
"(",
"command",
"=",
"cmdchar",
"+",
"msg",
")",
",",
"target",
"=",
"args",
"[",
"'nick'",
"]",
")",
"else",
":",
"modules",
"=",
"sorted",
"(",
"command_registry",
".",
"get_enabled_commands",
"(",
")",
")",
"cmdlist",
"=",
"(",
"' %s'",
"%",
"cmdchar",
")",
".",
"join",
"(",
"modules",
")",
"send",
"(",
"'Commands: %s%s'",
"%",
"(",
"cmdchar",
",",
"cmdlist",
")",
",",
"target",
"=",
"args",
"[",
"'nick'",
"]",
",",
"ignore_length",
"=",
"True",
")",
"send",
"(",
"'%shelp <command> for more info on a command.'",
"%",
"cmdchar",
",",
"target",
"=",
"args",
"[",
"'nick'",
"]",
")"
] |
Gives help.
Syntax: {command} [command]
|
[
"Gives",
"help",
"."
] |
python
|
train
| 37 |
bukun/TorCMS
|
torcms/model/evaluation_model.py
|
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/evaluation_model.py#L38-L54
|
def add_or_update(user_id, app_id, value):
'''
Editing evaluation.
'''
rec = MEvaluation.get_by_signature(user_id, app_id)
if rec:
entry = TabEvaluation.update(
value=value,
).where(TabEvaluation.uid == rec.uid)
entry.execute()
else:
TabEvaluation.create(
uid=tools.get_uuid(),
user_id=user_id,
post_id=app_id,
value=value,
)
|
[
"def",
"add_or_update",
"(",
"user_id",
",",
"app_id",
",",
"value",
")",
":",
"rec",
"=",
"MEvaluation",
".",
"get_by_signature",
"(",
"user_id",
",",
"app_id",
")",
"if",
"rec",
":",
"entry",
"=",
"TabEvaluation",
".",
"update",
"(",
"value",
"=",
"value",
",",
")",
".",
"where",
"(",
"TabEvaluation",
".",
"uid",
"==",
"rec",
".",
"uid",
")",
"entry",
".",
"execute",
"(",
")",
"else",
":",
"TabEvaluation",
".",
"create",
"(",
"uid",
"=",
"tools",
".",
"get_uuid",
"(",
")",
",",
"user_id",
"=",
"user_id",
",",
"post_id",
"=",
"app_id",
",",
"value",
"=",
"value",
",",
")"
] |
Editing evaluation.
|
[
"Editing",
"evaluation",
"."
] |
python
|
train
| 29.235294 |
capitalone/giraffez
|
giraffez/export.py
|
https://github.com/capitalone/giraffez/blob/6b4d27eb1a1eaf188c6885c7364ef27e92b1b957/giraffez/export.py#L204-L219
|
def to_str(self, delimiter='|', null='NULL'):
"""
Sets the current encoder output to Python `str` and returns
a row iterator.
:param str null: The string representation of null values
:param str delimiter: The string delimiting values in the output
string
:rtype: iterator (yields ``str``)
"""
self.export.set_null(null)
self.export.set_delimiter(delimiter)
self.options("delimiter", escape_string(delimiter), 2)
self.options("null", null, 3)
return self._fetchall(ENCODER_SETTINGS_STRING, coerce_floats=False)
|
[
"def",
"to_str",
"(",
"self",
",",
"delimiter",
"=",
"'|'",
",",
"null",
"=",
"'NULL'",
")",
":",
"self",
".",
"export",
".",
"set_null",
"(",
"null",
")",
"self",
".",
"export",
".",
"set_delimiter",
"(",
"delimiter",
")",
"self",
".",
"options",
"(",
"\"delimiter\"",
",",
"escape_string",
"(",
"delimiter",
")",
",",
"2",
")",
"self",
".",
"options",
"(",
"\"null\"",
",",
"null",
",",
"3",
")",
"return",
"self",
".",
"_fetchall",
"(",
"ENCODER_SETTINGS_STRING",
",",
"coerce_floats",
"=",
"False",
")"
] |
Sets the current encoder output to Python `str` and returns
a row iterator.
:param str null: The string representation of null values
:param str delimiter: The string delimiting values in the output
string
:rtype: iterator (yields ``str``)
|
[
"Sets",
"the",
"current",
"encoder",
"output",
"to",
"Python",
"str",
"and",
"returns",
"a",
"row",
"iterator",
"."
] |
python
|
test
| 37.8125 |
paperhive/ansible-ec2-inventory
|
ansible_ec2_inventory/ec2inventory.py
|
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L810-L895
|
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
|
[
"def",
"add_elasticache_node",
"(",
"self",
",",
"node",
",",
"cluster",
",",
"region",
")",
":",
"# Only want available nodes unless all_elasticache_nodes is True",
"if",
"not",
"self",
".",
"all_elasticache_nodes",
"and",
"node",
"[",
"'CacheNodeStatus'",
"]",
"!=",
"'available'",
":",
"return",
"# Select the best destination address",
"dest",
"=",
"node",
"[",
"'Endpoint'",
"]",
"[",
"'Address'",
"]",
"if",
"not",
"dest",
":",
"# Skip nodes we cannot address (e.g. private VPC subnet)",
"return",
"node_id",
"=",
"self",
".",
"to_safe",
"(",
"cluster",
"[",
"'CacheClusterId'",
"]",
"+",
"'_'",
"+",
"node",
"[",
"'CacheNodeId'",
"]",
")",
"# Add to index",
"self",
".",
"index",
"[",
"dest",
"]",
"=",
"[",
"region",
",",
"node_id",
"]",
"# Inventory: Group by node ID (always a group of 1)",
"if",
"self",
".",
"group_by_instance_id",
":",
"self",
".",
"inventory",
"[",
"node_id",
"]",
"=",
"[",
"dest",
"]",
"if",
"self",
".",
"nested_groups",
":",
"self",
".",
"push_group",
"(",
"self",
".",
"inventory",
",",
"'instances'",
",",
"node_id",
")",
"# Inventory: Group by region",
"if",
"self",
".",
"group_by_region",
":",
"self",
".",
"push",
"(",
"self",
".",
"inventory",
",",
"region",
",",
"dest",
")",
"if",
"self",
".",
"nested_groups",
":",
"self",
".",
"push_group",
"(",
"self",
".",
"inventory",
",",
"'regions'",
",",
"region",
")",
"# Inventory: Group by availability zone",
"if",
"self",
".",
"group_by_availability_zone",
":",
"self",
".",
"push",
"(",
"self",
".",
"inventory",
",",
"cluster",
"[",
"'PreferredAvailabilityZone'",
"]",
",",
"dest",
")",
"if",
"self",
".",
"nested_groups",
":",
"if",
"self",
".",
"group_by_region",
":",
"self",
".",
"push_group",
"(",
"self",
".",
"inventory",
",",
"region",
",",
"cluster",
"[",
"'PreferredAvailabilityZone'",
"]",
")",
"self",
".",
"push_group",
"(",
"self",
".",
"inventory",
",",
"'zones'",
",",
"cluster",
"[",
"'PreferredAvailabilityZone'",
"]",
")",
"# Inventory: Group by node type",
"if",
"self",
".",
"group_by_instance_type",
":",
"type_name",
"=",
"self",
".",
"to_safe",
"(",
"'type_'",
"+",
"cluster",
"[",
"'CacheNodeType'",
"]",
")",
"self",
".",
"push",
"(",
"self",
".",
"inventory",
",",
"type_name",
",",
"dest",
")",
"if",
"self",
".",
"nested_groups",
":",
"self",
".",
"push_group",
"(",
"self",
".",
"inventory",
",",
"'types'",
",",
"type_name",
")",
"# Inventory: Group by VPC (information not available in the current",
"# AWS API version for ElastiCache)",
"# Inventory: Group by security group",
"if",
"self",
".",
"group_by_security_group",
":",
"# Check for the existence of the 'SecurityGroups' key and also if",
"# this key has some value. When the cluster is not placed in a SG",
"# the query can return None here and cause an error.",
"if",
"'SecurityGroups'",
"in",
"cluster",
"and",
"cluster",
"[",
"'SecurityGroups'",
"]",
"is",
"not",
"None",
":",
"for",
"security_group",
"in",
"cluster",
"[",
"'SecurityGroups'",
"]",
":",
"key",
"=",
"self",
".",
"to_safe",
"(",
"\"security_group_\"",
"+",
"security_group",
"[",
"'SecurityGroupId'",
"]",
")",
"self",
".",
"push",
"(",
"self",
".",
"inventory",
",",
"key",
",",
"dest",
")",
"if",
"self",
".",
"nested_groups",
":",
"self",
".",
"push_group",
"(",
"self",
".",
"inventory",
",",
"'security_groups'",
",",
"key",
")",
"# Inventory: Group by engine",
"if",
"self",
".",
"group_by_elasticache_engine",
":",
"self",
".",
"push",
"(",
"self",
".",
"inventory",
",",
"self",
".",
"to_safe",
"(",
"\"elasticache_\"",
"+",
"cluster",
"[",
"'Engine'",
"]",
")",
",",
"dest",
")",
"if",
"self",
".",
"nested_groups",
":",
"self",
".",
"push_group",
"(",
"self",
".",
"inventory",
",",
"'elasticache_engines'",
",",
"self",
".",
"to_safe",
"(",
"\"elasticache_\"",
"+",
"cluster",
"[",
"'Engine'",
"]",
")",
")",
"# Inventory: Group by parameter group (done at cluster level)",
"# Inventory: Group by replication group (done at cluster level)",
"# Inventory: Group by ElastiCache Cluster",
"if",
"self",
".",
"group_by_elasticache_cluster",
":",
"self",
".",
"push",
"(",
"self",
".",
"inventory",
",",
"self",
".",
"to_safe",
"(",
"\"elasticache_cluster_\"",
"+",
"cluster",
"[",
"'CacheClusterId'",
"]",
")",
",",
"dest",
")",
"# Global Tag: all ElastiCache nodes",
"self",
".",
"push",
"(",
"self",
".",
"inventory",
",",
"'elasticache_nodes'",
",",
"dest",
")",
"host_info",
"=",
"self",
".",
"get_host_info_dict_from_describe_dict",
"(",
"node",
")",
"if",
"dest",
"in",
"self",
".",
"inventory",
"[",
"\"_meta\"",
"]",
"[",
"\"hostvars\"",
"]",
":",
"self",
".",
"inventory",
"[",
"\"_meta\"",
"]",
"[",
"\"hostvars\"",
"]",
"[",
"dest",
"]",
".",
"update",
"(",
"host_info",
")",
"else",
":",
"self",
".",
"inventory",
"[",
"\"_meta\"",
"]",
"[",
"\"hostvars\"",
"]",
"[",
"dest",
"]",
"=",
"host_info"
] |
Adds an ElastiCache node to the inventory and index, as long as
it is addressable
|
[
"Adds",
"an",
"ElastiCache",
"node",
"to",
"the",
"inventory",
"and",
"index",
"as",
"long",
"as",
"it",
"is",
"addressable"
] |
python
|
train
| 43.755814 |
sentinel-hub/sentinelhub-py
|
sentinelhub/opensearch.py
|
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/opensearch.py#L97-L113
|
def get_area_dates(bbox, date_interval, maxcc=None):
""" Get list of times of existing images from specified area and time range
:param bbox: bounding box of requested area
:type bbox: geometry.BBox
:param date_interval: a pair of time strings in ISO8601 format
:type date_interval: tuple(str)
:param maxcc: filter images by maximum percentage of cloud coverage
:type maxcc: float in range [0, 1] or None
:return: list of time strings in ISO8601 format
:rtype: list[datetime.datetime]
"""
area_info = get_area_info(bbox, date_interval, maxcc=maxcc)
return sorted({datetime.datetime.strptime(tile_info['properties']['startDate'].strip('Z'),
'%Y-%m-%dT%H:%M:%S')
for tile_info in area_info})
|
[
"def",
"get_area_dates",
"(",
"bbox",
",",
"date_interval",
",",
"maxcc",
"=",
"None",
")",
":",
"area_info",
"=",
"get_area_info",
"(",
"bbox",
",",
"date_interval",
",",
"maxcc",
"=",
"maxcc",
")",
"return",
"sorted",
"(",
"{",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"tile_info",
"[",
"'properties'",
"]",
"[",
"'startDate'",
"]",
".",
"strip",
"(",
"'Z'",
")",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"for",
"tile_info",
"in",
"area_info",
"}",
")"
] |
Get list of times of existing images from specified area and time range
:param bbox: bounding box of requested area
:type bbox: geometry.BBox
:param date_interval: a pair of time strings in ISO8601 format
:type date_interval: tuple(str)
:param maxcc: filter images by maximum percentage of cloud coverage
:type maxcc: float in range [0, 1] or None
:return: list of time strings in ISO8601 format
:rtype: list[datetime.datetime]
|
[
"Get",
"list",
"of",
"times",
"of",
"existing",
"images",
"from",
"specified",
"area",
"and",
"time",
"range"
] |
python
|
train
| 46.352941 |
StellarCN/py-stellar-base
|
stellar_base/horizon.py
|
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/horizon.py#L352-L376
|
def assets(self, asset_code=None, asset_issuer=None, cursor=None, order='asc', limit=10):
"""This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
"""
endpoint = '/assets'
params = self.__query_params(asset_code=asset_code, asset_issuer=asset_issuer, cursor=cursor, order=order,
limit=limit)
return self.query(endpoint, params)
|
[
"def",
"assets",
"(",
"self",
",",
"asset_code",
"=",
"None",
",",
"asset_issuer",
"=",
"None",
",",
"cursor",
"=",
"None",
",",
"order",
"=",
"'asc'",
",",
"limit",
"=",
"10",
")",
":",
"endpoint",
"=",
"'/assets'",
"params",
"=",
"self",
".",
"__query_params",
"(",
"asset_code",
"=",
"asset_code",
",",
"asset_issuer",
"=",
"asset_issuer",
",",
"cursor",
"=",
"cursor",
",",
"order",
"=",
"order",
",",
"limit",
"=",
"limit",
")",
"return",
"self",
".",
"query",
"(",
"endpoint",
",",
"params",
")"
] |
This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
|
[
"This",
"endpoint",
"represents",
"all",
"assets",
".",
"It",
"will",
"give",
"you",
"all",
"the",
"assets",
"in",
"the",
"system",
"along",
"with",
"various",
"statistics",
"about",
"each",
"."
] |
python
|
train
| 48.56 |
horazont/aioxmpp
|
aioxmpp/stream.py
|
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stream.py#L2733-L2765
|
def message_handler(stream, type_, from_, cb):
"""
Context manager to temporarily register a callback to handle messages on a
:class:`StanzaStream`.
:param stream: Stanza stream to register the coroutine at
:type stream: :class:`StanzaStream`
:param type_: Message type to listen for, or :data:`None` for a wildcard
match.
:type type_: :class:`~.MessageType` or :data:`None`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`
:param cb: Callback to register
The callback is registered when the context is entered and unregistered
when the context is exited.
.. versionadded:: 0.8
"""
stream.register_message_callback(
type_,
from_,
cb,
)
try:
yield
finally:
stream.unregister_message_callback(
type_,
from_,
)
|
[
"def",
"message_handler",
"(",
"stream",
",",
"type_",
",",
"from_",
",",
"cb",
")",
":",
"stream",
".",
"register_message_callback",
"(",
"type_",
",",
"from_",
",",
"cb",
",",
")",
"try",
":",
"yield",
"finally",
":",
"stream",
".",
"unregister_message_callback",
"(",
"type_",
",",
"from_",
",",
")"
] |
Context manager to temporarily register a callback to handle messages on a
:class:`StanzaStream`.
:param stream: Stanza stream to register the coroutine at
:type stream: :class:`StanzaStream`
:param type_: Message type to listen for, or :data:`None` for a wildcard
match.
:type type_: :class:`~.MessageType` or :data:`None`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`
:param cb: Callback to register
The callback is registered when the context is entered and unregistered
when the context is exited.
.. versionadded:: 0.8
|
[
"Context",
"manager",
"to",
"temporarily",
"register",
"a",
"callback",
"to",
"handle",
"messages",
"on",
"a",
":",
"class",
":",
"StanzaStream",
"."
] |
python
|
train
| 28.454545 |
richardkiss/pycoin
|
pycoin/key/electrum.py
|
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/key/electrum.py#L11-L20
|
def initial_key_to_master_key(initial_key):
"""
initial_key:
a hex string of length 32
"""
b = initial_key.encode("utf8")
orig_input = b
for i in range(100000):
b = hashlib.sha256(b + orig_input).digest()
return from_bytes_32(b)
|
[
"def",
"initial_key_to_master_key",
"(",
"initial_key",
")",
":",
"b",
"=",
"initial_key",
".",
"encode",
"(",
"\"utf8\"",
")",
"orig_input",
"=",
"b",
"for",
"i",
"in",
"range",
"(",
"100000",
")",
":",
"b",
"=",
"hashlib",
".",
"sha256",
"(",
"b",
"+",
"orig_input",
")",
".",
"digest",
"(",
")",
"return",
"from_bytes_32",
"(",
"b",
")"
] |
initial_key:
a hex string of length 32
|
[
"initial_key",
":",
"a",
"hex",
"string",
"of",
"length",
"32"
] |
python
|
train
| 26.3 |
boriel/zxbasic
|
zxblex.py
|
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxblex.py#L274-L281
|
def t_string_NGRAPH(t):
r"\\[ '.:][ '.:]"
global __STRING
P = {' ': 0, "'": 2, '.': 8, ':': 10}
N = {' ': 0, "'": 1, '.': 4, ':': 5}
__STRING += chr(128 + P[t.value[1]] + N[t.value[2]])
|
[
"def",
"t_string_NGRAPH",
"(",
"t",
")",
":",
"global",
"__STRING",
"P",
"=",
"{",
"' '",
":",
"0",
",",
"\"'\"",
":",
"2",
",",
"'.'",
":",
"8",
",",
"':'",
":",
"10",
"}",
"N",
"=",
"{",
"' '",
":",
"0",
",",
"\"'\"",
":",
"1",
",",
"'.'",
":",
"4",
",",
"':'",
":",
"5",
"}",
"__STRING",
"+=",
"chr",
"(",
"128",
"+",
"P",
"[",
"t",
".",
"value",
"[",
"1",
"]",
"]",
"+",
"N",
"[",
"t",
".",
"value",
"[",
"2",
"]",
"]",
")"
] |
r"\\[ '.:][ '.:]
|
[
"r",
"\\\\",
"[",
".",
":",
"]",
"[",
".",
":",
"]"
] |
python
|
train
| 25 |
cidles/pressagio
|
src/pressagio/dbconnector.py
|
https://github.com/cidles/pressagio/blob/2b3b89ae82316b929244e4c63e393682b2a57e57/src/pressagio/dbconnector.py#L488-L522
|
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
|
[
"def",
"create_index",
"(",
"self",
",",
"cardinality",
")",
":",
"DatabaseConnector",
".",
"create_index",
"(",
"self",
",",
"cardinality",
")",
"query",
"=",
"\"CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);\"",
".",
"format",
"(",
"cardinality",
")",
"self",
".",
"execute_sql",
"(",
"query",
")",
"if",
"self",
".",
"lowercase",
":",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"cardinality",
")",
")",
":",
"if",
"i",
"!=",
"0",
":",
"query",
"=",
"\"CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));\"",
".",
"format",
"(",
"cardinality",
",",
"i",
")",
"self",
".",
"execute_sql",
"(",
"query",
")",
"if",
"self",
".",
"normalize",
":",
"query",
"=",
"\"CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);\"",
".",
"format",
"(",
"cardinality",
")",
"self",
".",
"execute_sql",
"(",
"query",
")",
"else",
":",
"query",
"=",
"\"CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);\"",
".",
"format",
"(",
"cardinality",
")",
"self",
".",
"execute_sql",
"(",
"query",
")",
"elif",
"self",
".",
"normalize",
":",
"query",
"=",
"\"CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);\"",
".",
"format",
"(",
"cardinality",
")",
"self",
".",
"execute_sql",
"(",
"query",
")"
] |
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
|
[
"Create",
"an",
"index",
"for",
"the",
"table",
"with",
"the",
"given",
"cardinality",
"."
] |
python
|
train
| 37.371429 |
Open-ET/openet-core-beta
|
openet/core/interp.py
|
https://github.com/Open-ET/openet-core-beta/blob/f2b81ccf87bf7e7fe1b9f3dd1d4081d0ec7852db/openet/core/interp.py#L161-L227
|
def aggregate_daily(image_coll, start_date=None, end_date=None,
agg_type='mean'):
"""Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
if start_date and end_date:
test_coll = image_coll.filterDate(ee.Date(start_date), ee.Date(end_date))
elif start_date:
test_coll = image_coll.filter(ee.Filter.greaterThanOrEquals(
'system:time_start', ee.Date(start_date).millis()))
elif end_date:
test_coll = image_coll.filter(ee.Filter.lessThan(
'system:time_start', ee.Date(end_date).millis()))
else:
test_coll = image_coll
# Build a list of dates in the image_coll
def get_date(time):
return ee.Date(ee.Number(time)).format('yyyy-MM-dd')
date_list = ee.List(test_coll.aggregate_array('system:time_start'))\
.map(get_date).distinct().sort()
def aggregate_func(date_str):
start_date = ee.Date(ee.String(date_str))
end_date = start_date.advance(1, 'day')
agg_coll = image_coll.filterDate(start_date, end_date)
# if agg_type.lower() == 'mean':
agg_img = agg_coll.mean()
# elif agg_type.lower() == 'median':
# agg_img = agg_coll.median()
return agg_img.set({
'system:index': start_date.format('yyyyMMdd'),
'system:time_start': start_date.millis(),
'date': start_date.format('yyyy-MM-dd'),
})
return ee.ImageCollection(date_list.map(aggregate_func))
|
[
"def",
"aggregate_daily",
"(",
"image_coll",
",",
"start_date",
"=",
"None",
",",
"end_date",
"=",
"None",
",",
"agg_type",
"=",
"'mean'",
")",
":",
"if",
"start_date",
"and",
"end_date",
":",
"test_coll",
"=",
"image_coll",
".",
"filterDate",
"(",
"ee",
".",
"Date",
"(",
"start_date",
")",
",",
"ee",
".",
"Date",
"(",
"end_date",
")",
")",
"elif",
"start_date",
":",
"test_coll",
"=",
"image_coll",
".",
"filter",
"(",
"ee",
".",
"Filter",
".",
"greaterThanOrEquals",
"(",
"'system:time_start'",
",",
"ee",
".",
"Date",
"(",
"start_date",
")",
".",
"millis",
"(",
")",
")",
")",
"elif",
"end_date",
":",
"test_coll",
"=",
"image_coll",
".",
"filter",
"(",
"ee",
".",
"Filter",
".",
"lessThan",
"(",
"'system:time_start'",
",",
"ee",
".",
"Date",
"(",
"end_date",
")",
".",
"millis",
"(",
")",
")",
")",
"else",
":",
"test_coll",
"=",
"image_coll",
"# Build a list of dates in the image_coll",
"def",
"get_date",
"(",
"time",
")",
":",
"return",
"ee",
".",
"Date",
"(",
"ee",
".",
"Number",
"(",
"time",
")",
")",
".",
"format",
"(",
"'yyyy-MM-dd'",
")",
"date_list",
"=",
"ee",
".",
"List",
"(",
"test_coll",
".",
"aggregate_array",
"(",
"'system:time_start'",
")",
")",
".",
"map",
"(",
"get_date",
")",
".",
"distinct",
"(",
")",
".",
"sort",
"(",
")",
"def",
"aggregate_func",
"(",
"date_str",
")",
":",
"start_date",
"=",
"ee",
".",
"Date",
"(",
"ee",
".",
"String",
"(",
"date_str",
")",
")",
"end_date",
"=",
"start_date",
".",
"advance",
"(",
"1",
",",
"'day'",
")",
"agg_coll",
"=",
"image_coll",
".",
"filterDate",
"(",
"start_date",
",",
"end_date",
")",
"# if agg_type.lower() == 'mean':",
"agg_img",
"=",
"agg_coll",
".",
"mean",
"(",
")",
"# elif agg_type.lower() == 'median':",
"# agg_img = agg_coll.median()",
"return",
"agg_img",
".",
"set",
"(",
"{",
"'system:index'",
":",
"start_date",
".",
"format",
"(",
"'yyyyMMdd'",
")",
",",
"'system:time_start'",
":",
"start_date",
".",
"millis",
"(",
")",
",",
"'date'",
":",
"start_date",
".",
"format",
"(",
"'yyyy-MM-dd'",
")",
",",
"}",
")",
"return",
"ee",
".",
"ImageCollection",
"(",
"date_list",
".",
"map",
"(",
"aggregate_func",
")",
")"
] |
Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
|
[
"Aggregate",
"images",
"by",
"day",
"without",
"using",
"joins"
] |
python
|
train
| 34.985075 |
andrenarchy/krypy
|
krypy/recycling/factories.py
|
https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/recycling/factories.py#L53-L136
|
def _get_best_subset(self, ritz):
'''Return candidate set with smallest goal functional.'''
# (c,\omega(c)) for all considered subsets c
overall_evaluations = {}
def evaluate(_subset, _evaluations):
try:
_evaluations[_subset] = \
self.subset_evaluator.evaluate(ritz, _subset)
except utils.AssumptionError:
# no evaluation possible -> move on
pass
# I in algo
current_subset = frozenset()
# evaluate empty set
evaluate(current_subset, overall_evaluations)
while True:
# get a list of subset candidates for inclusion in current_subset
# (S in algo)
remaining_subset = set(range(len(ritz.values))) \
.difference(current_subset)
subsets = self.subsets_generator.generate(ritz, remaining_subset)
# no more candidates to check?
if len(subsets) == 0:
break
# evaluate candidates
evaluations = {}
for subset in subsets:
eval_subset = current_subset.union(subset)
evaluate(eval_subset, evaluations)
if len(evaluations) > 0:
current_subset = min(evaluations, key=evaluations.get)
else:
# fallback: pick the subset with smallest residual
# note: only a bound is used if the subset consists of more
# than one index.
resnorms = [numpy.sum(ritz.resnorms[list(subset)])
for subset in subsets]
subset = subsets[numpy.argmin(resnorms)]
current_subset = current_subset.union(subset)
overall_evaluations.update(evaluations)
if len(overall_evaluations) > 0:
# if there was a successfull evaluation: pick the best one
selection = list(min(overall_evaluations,
key=overall_evaluations.get))
else:
# otherwise: return empty list
selection = []
# debug output requested?
if self.print_results == 'number':
print('# of selected deflation vectors: {0}'
.format(len(selection)))
elif self.print_results == 'values':
print('{0} Ritz values corresponding to selected deflation '
.format(len(selection)) + 'vectors: '
+ (', '.join([str(el) for el in ritz.values[selection]])))
elif self.print_results == 'timings':
import operator
print('Timings for all successfully evaluated choices of '
'deflation vectors with corresponding Ritz values:')
for subset, time in sorted(overall_evaluations.items(),
key=operator.itemgetter(1)):
print(' {0}s: '.format(time)
+ ', '.join([str(el)
for el in ritz.values[list(subset)]]))
elif self.print_results is None:
pass
else:
raise utils.ArgumentError(
'Invalid value `{0}` for argument `print_result`. '
.format(self.print_results)
+ 'Valid are `None`, `number`, `values` and `timings`.')
return selection
|
[
"def",
"_get_best_subset",
"(",
"self",
",",
"ritz",
")",
":",
"# (c,\\omega(c)) for all considered subsets c",
"overall_evaluations",
"=",
"{",
"}",
"def",
"evaluate",
"(",
"_subset",
",",
"_evaluations",
")",
":",
"try",
":",
"_evaluations",
"[",
"_subset",
"]",
"=",
"self",
".",
"subset_evaluator",
".",
"evaluate",
"(",
"ritz",
",",
"_subset",
")",
"except",
"utils",
".",
"AssumptionError",
":",
"# no evaluation possible -> move on",
"pass",
"# I in algo",
"current_subset",
"=",
"frozenset",
"(",
")",
"# evaluate empty set",
"evaluate",
"(",
"current_subset",
",",
"overall_evaluations",
")",
"while",
"True",
":",
"# get a list of subset candidates for inclusion in current_subset",
"# (S in algo)",
"remaining_subset",
"=",
"set",
"(",
"range",
"(",
"len",
"(",
"ritz",
".",
"values",
")",
")",
")",
".",
"difference",
"(",
"current_subset",
")",
"subsets",
"=",
"self",
".",
"subsets_generator",
".",
"generate",
"(",
"ritz",
",",
"remaining_subset",
")",
"# no more candidates to check?",
"if",
"len",
"(",
"subsets",
")",
"==",
"0",
":",
"break",
"# evaluate candidates",
"evaluations",
"=",
"{",
"}",
"for",
"subset",
"in",
"subsets",
":",
"eval_subset",
"=",
"current_subset",
".",
"union",
"(",
"subset",
")",
"evaluate",
"(",
"eval_subset",
",",
"evaluations",
")",
"if",
"len",
"(",
"evaluations",
")",
">",
"0",
":",
"current_subset",
"=",
"min",
"(",
"evaluations",
",",
"key",
"=",
"evaluations",
".",
"get",
")",
"else",
":",
"# fallback: pick the subset with smallest residual",
"# note: only a bound is used if the subset consists of more",
"# than one index.",
"resnorms",
"=",
"[",
"numpy",
".",
"sum",
"(",
"ritz",
".",
"resnorms",
"[",
"list",
"(",
"subset",
")",
"]",
")",
"for",
"subset",
"in",
"subsets",
"]",
"subset",
"=",
"subsets",
"[",
"numpy",
".",
"argmin",
"(",
"resnorms",
")",
"]",
"current_subset",
"=",
"current_subset",
".",
"union",
"(",
"subset",
")",
"overall_evaluations",
".",
"update",
"(",
"evaluations",
")",
"if",
"len",
"(",
"overall_evaluations",
")",
">",
"0",
":",
"# if there was a successfull evaluation: pick the best one",
"selection",
"=",
"list",
"(",
"min",
"(",
"overall_evaluations",
",",
"key",
"=",
"overall_evaluations",
".",
"get",
")",
")",
"else",
":",
"# otherwise: return empty list",
"selection",
"=",
"[",
"]",
"# debug output requested?",
"if",
"self",
".",
"print_results",
"==",
"'number'",
":",
"print",
"(",
"'# of selected deflation vectors: {0}'",
".",
"format",
"(",
"len",
"(",
"selection",
")",
")",
")",
"elif",
"self",
".",
"print_results",
"==",
"'values'",
":",
"print",
"(",
"'{0} Ritz values corresponding to selected deflation '",
".",
"format",
"(",
"len",
"(",
"selection",
")",
")",
"+",
"'vectors: '",
"+",
"(",
"', '",
".",
"join",
"(",
"[",
"str",
"(",
"el",
")",
"for",
"el",
"in",
"ritz",
".",
"values",
"[",
"selection",
"]",
"]",
")",
")",
")",
"elif",
"self",
".",
"print_results",
"==",
"'timings'",
":",
"import",
"operator",
"print",
"(",
"'Timings for all successfully evaluated choices of '",
"'deflation vectors with corresponding Ritz values:'",
")",
"for",
"subset",
",",
"time",
"in",
"sorted",
"(",
"overall_evaluations",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
")",
":",
"print",
"(",
"' {0}s: '",
".",
"format",
"(",
"time",
")",
"+",
"', '",
".",
"join",
"(",
"[",
"str",
"(",
"el",
")",
"for",
"el",
"in",
"ritz",
".",
"values",
"[",
"list",
"(",
"subset",
")",
"]",
"]",
")",
")",
"elif",
"self",
".",
"print_results",
"is",
"None",
":",
"pass",
"else",
":",
"raise",
"utils",
".",
"ArgumentError",
"(",
"'Invalid value `{0}` for argument `print_result`. '",
".",
"format",
"(",
"self",
".",
"print_results",
")",
"+",
"'Valid are `None`, `number`, `values` and `timings`.'",
")",
"return",
"selection"
] |
Return candidate set with smallest goal functional.
|
[
"Return",
"candidate",
"set",
"with",
"smallest",
"goal",
"functional",
"."
] |
python
|
train
| 39.52381 |
github/octodns
|
octodns/manager.py
|
https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/manager.py#L360-L387
|
def dump(self, zone, output_dir, lenient, split, source, *sources):
'''
Dump zone data from the specified source
'''
self.log.info('dump: zone=%s, sources=%s', zone, sources)
# We broke out source to force at least one to be passed, add it to any
# others we got.
sources = [source] + list(sources)
try:
sources = [self.providers[s] for s in sources]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
clz = YamlProvider
if split:
clz = SplitYamlProvider
target = clz('dump', output_dir)
zone = Zone(zone, self.configured_sub_zones(zone))
for source in sources:
source.populate(zone, lenient=lenient)
plan = target.plan(zone)
if plan is None:
plan = Plan(zone, zone, [], False)
target.apply(plan)
|
[
"def",
"dump",
"(",
"self",
",",
"zone",
",",
"output_dir",
",",
"lenient",
",",
"split",
",",
"source",
",",
"*",
"sources",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'dump: zone=%s, sources=%s'",
",",
"zone",
",",
"sources",
")",
"# We broke out source to force at least one to be passed, add it to any",
"# others we got.",
"sources",
"=",
"[",
"source",
"]",
"+",
"list",
"(",
"sources",
")",
"try",
":",
"sources",
"=",
"[",
"self",
".",
"providers",
"[",
"s",
"]",
"for",
"s",
"in",
"sources",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"Exception",
"(",
"'Unknown source: {}'",
".",
"format",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
")",
"clz",
"=",
"YamlProvider",
"if",
"split",
":",
"clz",
"=",
"SplitYamlProvider",
"target",
"=",
"clz",
"(",
"'dump'",
",",
"output_dir",
")",
"zone",
"=",
"Zone",
"(",
"zone",
",",
"self",
".",
"configured_sub_zones",
"(",
"zone",
")",
")",
"for",
"source",
"in",
"sources",
":",
"source",
".",
"populate",
"(",
"zone",
",",
"lenient",
"=",
"lenient",
")",
"plan",
"=",
"target",
".",
"plan",
"(",
"zone",
")",
"if",
"plan",
"is",
"None",
":",
"plan",
"=",
"Plan",
"(",
"zone",
",",
"zone",
",",
"[",
"]",
",",
"False",
")",
"target",
".",
"apply",
"(",
"plan",
")"
] |
Dump zone data from the specified source
|
[
"Dump",
"zone",
"data",
"from",
"the",
"specified",
"source"
] |
python
|
train
| 32.035714 |
jobovy/galpy
|
galpy/actionAngle/actionAngleStaeckel.py
|
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleStaeckel.py#L609-L651
|
def JR(self,**kwargs):
"""
NAME:
JR
PURPOSE:
Calculate the radial action
INPUT:
fixed_quad= (False) if True, use n=10 fixed_quad
+scipy.integrate.quad keywords
OUTPUT:
J_R(R,vT,vT)/ro/vc + estimate of the error (nan for fixed_quad)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
if hasattr(self,'_JR'): #pragma: no cover
return self._JR
umin, umax= self.calcUminUmax()
#print self._ux, self._pux, (umax-umin)/umax
if (umax-umin)/umax < 10.**-6: return nu.array([0.])
order= kwargs.pop('order',10)
if kwargs.pop('fixed_quad',False):
# factor in next line bc integrand=/2delta^2
self._JR= 1./nu.pi*nu.sqrt(2.)*self._delta\
*integrate.fixed_quad(_JRStaeckelIntegrand,
umin,umax,
args=(self._E,self._Lz,self._I3U,
self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
n=order,
**kwargs)[0]
else:
self._JR= 1./nu.pi*nu.sqrt(2.)*self._delta\
*integrate.quad(_JRStaeckelIntegrand,
umin,umax,
args=(self._E,self._Lz,self._I3U,
self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
**kwargs)[0]
return self._JR
|
[
"def",
"JR",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_JR'",
")",
":",
"#pragma: no cover",
"return",
"self",
".",
"_JR",
"umin",
",",
"umax",
"=",
"self",
".",
"calcUminUmax",
"(",
")",
"#print self._ux, self._pux, (umax-umin)/umax",
"if",
"(",
"umax",
"-",
"umin",
")",
"/",
"umax",
"<",
"10.",
"**",
"-",
"6",
":",
"return",
"nu",
".",
"array",
"(",
"[",
"0.",
"]",
")",
"order",
"=",
"kwargs",
".",
"pop",
"(",
"'order'",
",",
"10",
")",
"if",
"kwargs",
".",
"pop",
"(",
"'fixed_quad'",
",",
"False",
")",
":",
"# factor in next line bc integrand=/2delta^2",
"self",
".",
"_JR",
"=",
"1.",
"/",
"nu",
".",
"pi",
"*",
"nu",
".",
"sqrt",
"(",
"2.",
")",
"*",
"self",
".",
"_delta",
"*",
"integrate",
".",
"fixed_quad",
"(",
"_JRStaeckelIntegrand",
",",
"umin",
",",
"umax",
",",
"args",
"=",
"(",
"self",
".",
"_E",
",",
"self",
".",
"_Lz",
",",
"self",
".",
"_I3U",
",",
"self",
".",
"_delta",
",",
"self",
".",
"_u0",
",",
"self",
".",
"_sinhu0",
"**",
"2.",
",",
"self",
".",
"_vx",
",",
"self",
".",
"_sinvx",
"**",
"2.",
",",
"self",
".",
"_potu0v0",
",",
"self",
".",
"_pot",
")",
",",
"n",
"=",
"order",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"else",
":",
"self",
".",
"_JR",
"=",
"1.",
"/",
"nu",
".",
"pi",
"*",
"nu",
".",
"sqrt",
"(",
"2.",
")",
"*",
"self",
".",
"_delta",
"*",
"integrate",
".",
"quad",
"(",
"_JRStaeckelIntegrand",
",",
"umin",
",",
"umax",
",",
"args",
"=",
"(",
"self",
".",
"_E",
",",
"self",
".",
"_Lz",
",",
"self",
".",
"_I3U",
",",
"self",
".",
"_delta",
",",
"self",
".",
"_u0",
",",
"self",
".",
"_sinhu0",
"**",
"2.",
",",
"self",
".",
"_vx",
",",
"self",
".",
"_sinvx",
"**",
"2.",
",",
"self",
".",
"_potu0v0",
",",
"self",
".",
"_pot",
")",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"return",
"self",
".",
"_JR"
] |
NAME:
JR
PURPOSE:
Calculate the radial action
INPUT:
fixed_quad= (False) if True, use n=10 fixed_quad
+scipy.integrate.quad keywords
OUTPUT:
J_R(R,vT,vT)/ro/vc + estimate of the error (nan for fixed_quad)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
|
[
"NAME",
":",
"JR",
"PURPOSE",
":",
"Calculate",
"the",
"radial",
"action",
"INPUT",
":",
"fixed_quad",
"=",
"(",
"False",
")",
"if",
"True",
"use",
"n",
"=",
"10",
"fixed_quad",
"+",
"scipy",
".",
"integrate",
".",
"quad",
"keywords",
"OUTPUT",
":",
"J_R",
"(",
"R",
"vT",
"vT",
")",
"/",
"ro",
"/",
"vc",
"+",
"estimate",
"of",
"the",
"error",
"(",
"nan",
"for",
"fixed_quad",
")",
"HISTORY",
":",
"2012",
"-",
"11",
"-",
"27",
"-",
"Written",
"-",
"Bovy",
"(",
"IAS",
")"
] |
python
|
train
| 43.651163 |
log2timeline/dftimewolf
|
dftimewolf/lib/collectors/grr_hunt.py
|
https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hunt.py#L145-L160
|
def process(self):
"""Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
"""
print('Hunt to collect {0:d} items'.format(len(self.file_path_list)))
print('Files to be collected: {0!s}'.format(self.file_path_list))
hunt_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
hunt_args = flows_pb2.FileFinderArgs(
paths=self.file_path_list, action=hunt_action)
return self._create_hunt('FileFinder', hunt_args)
|
[
"def",
"process",
"(",
"self",
")",
":",
"print",
"(",
"'Hunt to collect {0:d} items'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"file_path_list",
")",
")",
")",
"print",
"(",
"'Files to be collected: {0!s}'",
".",
"format",
"(",
"self",
".",
"file_path_list",
")",
")",
"hunt_action",
"=",
"flows_pb2",
".",
"FileFinderAction",
"(",
"action_type",
"=",
"flows_pb2",
".",
"FileFinderAction",
".",
"DOWNLOAD",
")",
"hunt_args",
"=",
"flows_pb2",
".",
"FileFinderArgs",
"(",
"paths",
"=",
"self",
".",
"file_path_list",
",",
"action",
"=",
"hunt_action",
")",
"return",
"self",
".",
"_create_hunt",
"(",
"'FileFinder'",
",",
"hunt_args",
")"
] |
Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
|
[
"Construct",
"and",
"start",
"a",
"new",
"File",
"hunt",
"."
] |
python
|
train
| 36.1875 |
tensorflow/tensorboard
|
tensorboard/plugins/image/images_plugin.py
|
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_plugin.py#L266-L318
|
def _get_individual_image(self, run, tag, index, sample):
"""
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
"""
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute(
'''
SELECT data
FROM TensorStrings
WHERE
/* Skip first 2 elements which are width and height. */
idx = 2 + :sample
AND tensor_rowid = (
SELECT rowid
FROM Tensors
WHERE
series = (
SELECT tag_id
FROM Runs
CROSS JOIN Tags USING (run_id)
WHERE
Runs.run_name = :run
AND Tags.tag_name = :tag)
AND step IS NOT NULL
AND dtype = :dtype
/* Should be n-vector, n >= 3: [width, height, samples...] */
AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3)
ORDER BY step
LIMIT 1
OFFSET :index)
''',
{'run': run,
'tag': tag,
'sample': sample,
'index': index,
'dtype': tf.string.as_datatype_enum})
(data,) = cursor.fetchone()
return six.binary_type(data)
events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)
images = events[index].tensor_proto.string_val[2:] # skip width, height
return images[sample]
|
[
"def",
"_get_individual_image",
"(",
"self",
",",
"run",
",",
"tag",
",",
"index",
",",
"sample",
")",
":",
"if",
"self",
".",
"_db_connection_provider",
":",
"db",
"=",
"self",
".",
"_db_connection_provider",
"(",
")",
"cursor",
"=",
"db",
".",
"execute",
"(",
"'''\n SELECT data\n FROM TensorStrings\n WHERE\n /* Skip first 2 elements which are width and height. */\n idx = 2 + :sample\n AND tensor_rowid = (\n SELECT rowid\n FROM Tensors\n WHERE\n series = (\n SELECT tag_id\n FROM Runs\n CROSS JOIN Tags USING (run_id)\n WHERE\n Runs.run_name = :run\n AND Tags.tag_name = :tag)\n AND step IS NOT NULL\n AND dtype = :dtype\n /* Should be n-vector, n >= 3: [width, height, samples...] */\n AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3)\n ORDER BY step\n LIMIT 1\n OFFSET :index)\n '''",
",",
"{",
"'run'",
":",
"run",
",",
"'tag'",
":",
"tag",
",",
"'sample'",
":",
"sample",
",",
"'index'",
":",
"index",
",",
"'dtype'",
":",
"tf",
".",
"string",
".",
"as_datatype_enum",
"}",
")",
"(",
"data",
",",
")",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"return",
"six",
".",
"binary_type",
"(",
"data",
")",
"events",
"=",
"self",
".",
"_filter_by_sample",
"(",
"self",
".",
"_multiplexer",
".",
"Tensors",
"(",
"run",
",",
"tag",
")",
",",
"sample",
")",
"images",
"=",
"events",
"[",
"index",
"]",
".",
"tensor_proto",
".",
"string_val",
"[",
"2",
":",
"]",
"# skip width, height",
"return",
"images",
"[",
"sample",
"]"
] |
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
|
[
"Returns",
"the",
"actual",
"image",
"bytes",
"for",
"a",
"given",
"image",
"."
] |
python
|
train
| 34.603774 |
treycucco/pyebnf
|
pyebnf/primitive.py
|
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/primitive.py#L140-L160
|
def trimmed(self, pred=trimmed_pred_default):
"""Trim a ParseTree.
A node is trimmed if pred(node) returns True.
"""
new_children = []
for child in self.children:
if isinstance(child, ParseNode):
new_child = child.trimmed(pred)
else:
new_child = child
if not pred(new_child, self):
new_children.append(new_child)
return ParseNode(self.node_type,
children=new_children,
consumed=self.consumed,
position=self.position,
ignored=self.ignored)
|
[
"def",
"trimmed",
"(",
"self",
",",
"pred",
"=",
"trimmed_pred_default",
")",
":",
"new_children",
"=",
"[",
"]",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"ParseNode",
")",
":",
"new_child",
"=",
"child",
".",
"trimmed",
"(",
"pred",
")",
"else",
":",
"new_child",
"=",
"child",
"if",
"not",
"pred",
"(",
"new_child",
",",
"self",
")",
":",
"new_children",
".",
"append",
"(",
"new_child",
")",
"return",
"ParseNode",
"(",
"self",
".",
"node_type",
",",
"children",
"=",
"new_children",
",",
"consumed",
"=",
"self",
".",
"consumed",
",",
"position",
"=",
"self",
".",
"position",
",",
"ignored",
"=",
"self",
".",
"ignored",
")"
] |
Trim a ParseTree.
A node is trimmed if pred(node) returns True.
|
[
"Trim",
"a",
"ParseTree",
"."
] |
python
|
test
| 27.238095 |
manns/pyspread
|
pyspread/src/gui/_grid_cell_editor.py
|
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid_cell_editor.py#L264-L288
|
def StartingKey(self, evt):
"""
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
"""
key = evt.GetKeyCode()
ch = None
if key in [
wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3,
wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7,
wx.WXK_NUMPAD8, wx.WXK_NUMPAD9]:
ch = ch = chr(ord('0') + key - wx.WXK_NUMPAD0)
elif key < 256 and key >= 0 and chr(key) in string.printable:
ch = chr(key)
if ch is not None and self._tc.IsEnabled():
# For this example, replace the text. Normally we would append it.
#self._tc.AppendText(ch)
self._tc.SetValue(ch)
self._tc.SetInsertionPointEnd()
else:
evt.Skip()
|
[
"def",
"StartingKey",
"(",
"self",
",",
"evt",
")",
":",
"key",
"=",
"evt",
".",
"GetKeyCode",
"(",
")",
"ch",
"=",
"None",
"if",
"key",
"in",
"[",
"wx",
".",
"WXK_NUMPAD0",
",",
"wx",
".",
"WXK_NUMPAD1",
",",
"wx",
".",
"WXK_NUMPAD2",
",",
"wx",
".",
"WXK_NUMPAD3",
",",
"wx",
".",
"WXK_NUMPAD4",
",",
"wx",
".",
"WXK_NUMPAD5",
",",
"wx",
".",
"WXK_NUMPAD6",
",",
"wx",
".",
"WXK_NUMPAD7",
",",
"wx",
".",
"WXK_NUMPAD8",
",",
"wx",
".",
"WXK_NUMPAD9",
"]",
":",
"ch",
"=",
"ch",
"=",
"chr",
"(",
"ord",
"(",
"'0'",
")",
"+",
"key",
"-",
"wx",
".",
"WXK_NUMPAD0",
")",
"elif",
"key",
"<",
"256",
"and",
"key",
">=",
"0",
"and",
"chr",
"(",
"key",
")",
"in",
"string",
".",
"printable",
":",
"ch",
"=",
"chr",
"(",
"key",
")",
"if",
"ch",
"is",
"not",
"None",
"and",
"self",
".",
"_tc",
".",
"IsEnabled",
"(",
")",
":",
"# For this example, replace the text. Normally we would append it.",
"#self._tc.AppendText(ch)",
"self",
".",
"_tc",
".",
"SetValue",
"(",
"ch",
")",
"self",
".",
"_tc",
".",
"SetInsertionPointEnd",
"(",
")",
"else",
":",
"evt",
".",
"Skip",
"(",
")"
] |
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
|
[
"If",
"the",
"editor",
"is",
"enabled",
"by",
"pressing",
"keys",
"on",
"the",
"grid",
"this",
"will",
"be",
"called",
"to",
"let",
"the",
"editor",
"do",
"something",
"about",
"that",
"first",
"key",
"if",
"desired",
"."
] |
python
|
train
| 36.12 |
calmjs/calmjs.parse
|
src/calmjs/parse/parsers/es5.py
|
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L1241-L1250
|
def p_iteration_statement_6(self, p):
"""
iteration_statement \
: FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
"""
vardecl = self.asttypes.VarDeclNoIn(
identifier=p[4], initializer=p[5])
vardecl.setpos(p, 3)
p[0] = self.asttypes.ForIn(item=vardecl, iterable=p[7], statement=p[9])
p[0].setpos(p)
|
[
"def",
"p_iteration_statement_6",
"(",
"self",
",",
"p",
")",
":",
"vardecl",
"=",
"self",
".",
"asttypes",
".",
"VarDeclNoIn",
"(",
"identifier",
"=",
"p",
"[",
"4",
"]",
",",
"initializer",
"=",
"p",
"[",
"5",
"]",
")",
"vardecl",
".",
"setpos",
"(",
"p",
",",
"3",
")",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"ForIn",
"(",
"item",
"=",
"vardecl",
",",
"iterable",
"=",
"p",
"[",
"7",
"]",
",",
"statement",
"=",
"p",
"[",
"9",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
] |
iteration_statement \
: FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
|
[
"iteration_statement",
"\\",
":",
"FOR",
"LPAREN",
"VAR",
"identifier",
"initializer_noin",
"IN",
"expr",
"RPAREN",
"statement"
] |
python
|
train
| 38.6 |
loli/medpy
|
medpy/metric/binary.py
|
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/binary.py#L456-L563
|
def asd(result, reference, voxelspacing=None, connectivity=1):
"""
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
"""
sds = __surface_distances(result, reference, voxelspacing, connectivity)
asd = sds.mean()
return asd
|
[
"def",
"asd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"sds",
"=",
"__surface_distances",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
",",
"connectivity",
")",
"asd",
"=",
"sds",
".",
"mean",
"(",
")",
"return",
"asd"
] |
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
|
[
"Average",
"surface",
"distance",
"metric",
".",
"Computes",
"the",
"average",
"surface",
"distance",
"(",
"ASD",
")",
"between",
"the",
"binary",
"objects",
"in",
"two",
"images",
".",
"Parameters",
"----------",
"result",
":",
"array_like",
"Input",
"data",
"containing",
"objects",
".",
"Can",
"be",
"any",
"type",
"but",
"will",
"be",
"converted",
"into",
"binary",
":",
"background",
"where",
"0",
"object",
"everywhere",
"else",
".",
"reference",
":",
"array_like",
"Input",
"data",
"containing",
"objects",
".",
"Can",
"be",
"any",
"type",
"but",
"will",
"be",
"converted",
"into",
"binary",
":",
"background",
"where",
"0",
"object",
"everywhere",
"else",
".",
"voxelspacing",
":",
"float",
"or",
"sequence",
"of",
"floats",
"optional",
"The",
"voxelspacing",
"in",
"a",
"distance",
"unit",
"i",
".",
"e",
".",
"spacing",
"of",
"elements",
"along",
"each",
"dimension",
".",
"If",
"a",
"sequence",
"must",
"be",
"of",
"length",
"equal",
"to",
"the",
"input",
"rank",
";",
"if",
"a",
"single",
"number",
"this",
"is",
"used",
"for",
"all",
"axes",
".",
"If",
"not",
"specified",
"a",
"grid",
"spacing",
"of",
"unity",
"is",
"implied",
".",
"connectivity",
":",
"int",
"The",
"neighbourhood",
"/",
"connectivity",
"considered",
"when",
"determining",
"the",
"surface",
"of",
"the",
"binary",
"objects",
".",
"This",
"value",
"is",
"passed",
"to",
"scipy",
".",
"ndimage",
".",
"morphology",
".",
"generate_binary_structure",
"and",
"should",
"usually",
"be",
":",
"math",
":",
">",
"1",
".",
"The",
"decision",
"on",
"the",
"connectivity",
"is",
"important",
"as",
"it",
"can",
"influence",
"the",
"results",
"strongly",
".",
"If",
"in",
"doubt",
"leave",
"it",
"as",
"it",
"is",
".",
"Returns",
"-------",
"asd",
":",
"float",
"The",
"average",
"surface",
"distance",
"between",
"the",
"object",
"(",
"s",
")",
"in",
"result",
"and",
"the",
"object",
"(",
"s",
")",
"in",
"reference",
".",
"The",
"distance",
"unit",
"is",
"the",
"same",
"as",
"for",
"the",
"spacing",
"of",
"elements",
"along",
"each",
"dimension",
"which",
"is",
"usually",
"given",
"in",
"mm",
".",
"See",
"also",
"--------",
":",
"func",
":",
"assd",
":",
"func",
":",
"hd",
"Notes",
"-----",
"This",
"is",
"not",
"a",
"real",
"metric",
"as",
"it",
"is",
"directed",
".",
"See",
"assd",
"for",
"a",
"real",
"metric",
"of",
"this",
".",
"The",
"method",
"is",
"implemented",
"making",
"use",
"of",
"distance",
"images",
"and",
"simple",
"binary",
"morphology",
"to",
"achieve",
"high",
"computational",
"speed",
".",
"Examples",
"--------",
"The",
"connectivity",
"determines",
"what",
"pixels",
"/",
"voxels",
"are",
"considered",
"the",
"surface",
"of",
"a",
"binary",
"object",
".",
"Take",
"the",
"following",
"binary",
"image",
"showing",
"a",
"cross",
">>>",
"from",
"scipy",
".",
"ndimage",
".",
"morphology",
"import",
"generate_binary_structure",
">>>",
"cross",
"=",
"generate_binary_structure",
"(",
"2",
"1",
")",
"array",
"(",
"[[",
"0",
"1",
"0",
"]",
"[",
"1",
"1",
"1",
"]",
"[",
"0",
"1",
"0",
"]]",
")",
"With",
"connectivity",
"set",
"to",
"1",
"a",
"4",
"-",
"neighbourhood",
"is",
"considered",
"when",
"determining",
"the",
"object",
"surface",
"resulting",
"in",
"the",
"surface",
"..",
"code",
"-",
"block",
"::",
"python",
"array",
"(",
"[[",
"0",
"1",
"0",
"]",
"[",
"1",
"0",
"1",
"]",
"[",
"0",
"1",
"0",
"]]",
")",
"Changing",
"connectivity",
"to",
"2",
"a",
"8",
"-",
"neighbourhood",
"is",
"considered",
"and",
"we",
"get",
":",
"..",
"code",
"-",
"block",
"::",
"python",
"array",
"(",
"[[",
"0",
"1",
"0",
"]",
"[",
"1",
"1",
"1",
"]",
"[",
"0",
"1",
"0",
"]]",
")",
"as",
"a",
"diagonal",
"connection",
"does",
"no",
"longer",
"qualifies",
"as",
"valid",
"object",
"surface",
".",
"This",
"influences",
"the",
"results",
"asd",
"returns",
".",
"Imagine",
"we",
"want",
"to",
"compute",
"the",
"surface",
"distance",
"of",
"our",
"cross",
"to",
"a",
"cube",
"-",
"like",
"object",
":",
">>>",
"cube",
"=",
"generate_binary_structure",
"(",
"2",
"1",
")",
"array",
"(",
"[[",
"1",
"1",
"1",
"]",
"[",
"1",
"1",
"1",
"]",
"[",
"1",
"1",
"1",
"]]",
")",
"which",
"surface",
"is",
"independent",
"of",
"the",
"connectivity",
"value",
"set",
"always",
"..",
"code",
"-",
"block",
"::",
"python",
"array",
"(",
"[[",
"1",
"1",
"1",
"]",
"[",
"1",
"0",
"1",
"]",
"[",
"1",
"1",
"1",
"]]",
")",
"Using",
"a",
"connectivity",
"of",
"1",
"we",
"get",
">>>",
"asd",
"(",
"cross",
"cube",
"connectivity",
"=",
"1",
")",
"0",
".",
"0",
"while",
"a",
"value",
"of",
"2",
"returns",
"us",
">>>",
"asd",
"(",
"cross",
"cube",
"connectivity",
"=",
"2",
")",
"0",
".",
"20000000000000001",
"due",
"to",
"the",
"center",
"of",
"the",
"cross",
"being",
"considered",
"surface",
"as",
"well",
"."
] |
python
|
train
| 33.398148 |
python-openxml/python-docx
|
docx/opc/phys_pkg.py
|
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/phys_pkg.py#L150-L155
|
def write(self, pack_uri, blob):
"""
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
"""
self._zipf.writestr(pack_uri.membername, blob)
|
[
"def",
"write",
"(",
"self",
",",
"pack_uri",
",",
"blob",
")",
":",
"self",
".",
"_zipf",
".",
"writestr",
"(",
"pack_uri",
".",
"membername",
",",
"blob",
")"
] |
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
|
[
"Write",
"*",
"blob",
"*",
"to",
"this",
"zip",
"package",
"with",
"the",
"membername",
"corresponding",
"to",
"*",
"pack_uri",
"*",
"."
] |
python
|
train
| 34 |
freelancer/freelancer-sdk-python
|
freelancersdk/resources/projects/projects.py
|
https://github.com/freelancer/freelancer-sdk-python/blob/e09034936d6f13b3909a9464ee329c81c1834941/freelancersdk/resources/projects/projects.py#L230-L252
|
def place_project_bid(session, project_id, bidder_id, description, amount,
period, milestone_percentage):
"""
Place a bid on a project
"""
bid_data = {
'project_id': project_id,
'bidder_id': bidder_id,
'description': description,
'amount': amount,
'period': period,
'milestone_percentage': milestone_percentage,
}
# POST /api/projects/0.1/bids/
response = make_post_request(session, 'bids', json_data=bid_data)
json_data = response.json()
if response.status_code == 200:
bid_data = json_data['result']
return Bid(bid_data)
else:
raise BidNotPlacedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id'])
|
[
"def",
"place_project_bid",
"(",
"session",
",",
"project_id",
",",
"bidder_id",
",",
"description",
",",
"amount",
",",
"period",
",",
"milestone_percentage",
")",
":",
"bid_data",
"=",
"{",
"'project_id'",
":",
"project_id",
",",
"'bidder_id'",
":",
"bidder_id",
",",
"'description'",
":",
"description",
",",
"'amount'",
":",
"amount",
",",
"'period'",
":",
"period",
",",
"'milestone_percentage'",
":",
"milestone_percentage",
",",
"}",
"# POST /api/projects/0.1/bids/",
"response",
"=",
"make_post_request",
"(",
"session",
",",
"'bids'",
",",
"json_data",
"=",
"bid_data",
")",
"json_data",
"=",
"response",
".",
"json",
"(",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"bid_data",
"=",
"json_data",
"[",
"'result'",
"]",
"return",
"Bid",
"(",
"bid_data",
")",
"else",
":",
"raise",
"BidNotPlacedException",
"(",
"message",
"=",
"json_data",
"[",
"'message'",
"]",
",",
"error_code",
"=",
"json_data",
"[",
"'error_code'",
"]",
",",
"request_id",
"=",
"json_data",
"[",
"'request_id'",
"]",
")"
] |
Place a bid on a project
|
[
"Place",
"a",
"bid",
"on",
"a",
"project"
] |
python
|
valid
| 36.608696 |
koalalorenzo/python-digitalocean
|
digitalocean/Image.py
|
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Image.py#L141-L149
|
def transfer(self, new_region_slug):
"""
Transfer the image
"""
return self.get_data(
"images/%s/actions/" % self.id,
type=POST,
params={"type": "transfer", "region": new_region_slug}
)
|
[
"def",
"transfer",
"(",
"self",
",",
"new_region_slug",
")",
":",
"return",
"self",
".",
"get_data",
"(",
"\"images/%s/actions/\"",
"%",
"self",
".",
"id",
",",
"type",
"=",
"POST",
",",
"params",
"=",
"{",
"\"type\"",
":",
"\"transfer\"",
",",
"\"region\"",
":",
"new_region_slug",
"}",
")"
] |
Transfer the image
|
[
"Transfer",
"the",
"image"
] |
python
|
valid
| 28.555556 |
biocommons/hgvs
|
hgvs/projector.py
|
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/projector.py#L64-L77
|
def project_variant_forward(self, c_variant):
"""
project c_variant on the source transcript onto the destination transcript
:param c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the source transcript
:returns: c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the destination transcript
"""
if c_variant.ac != self.src_tm.tx_ac:
raise RuntimeError("variant accession does not match that used to initialize " +
__name__)
new_c_variant = copy.deepcopy(c_variant)
new_c_variant.ac = self.dst_tm.tx_ac
new_c_variant.posedit.pos = self.project_interval_forward(c_variant.posedit.pos)
return new_c_variant
|
[
"def",
"project_variant_forward",
"(",
"self",
",",
"c_variant",
")",
":",
"if",
"c_variant",
".",
"ac",
"!=",
"self",
".",
"src_tm",
".",
"tx_ac",
":",
"raise",
"RuntimeError",
"(",
"\"variant accession does not match that used to initialize \"",
"+",
"__name__",
")",
"new_c_variant",
"=",
"copy",
".",
"deepcopy",
"(",
"c_variant",
")",
"new_c_variant",
".",
"ac",
"=",
"self",
".",
"dst_tm",
".",
"tx_ac",
"new_c_variant",
".",
"posedit",
".",
"pos",
"=",
"self",
".",
"project_interval_forward",
"(",
"c_variant",
".",
"posedit",
".",
"pos",
")",
"return",
"new_c_variant"
] |
project c_variant on the source transcript onto the destination transcript
:param c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the source transcript
:returns: c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the destination transcript
|
[
"project",
"c_variant",
"on",
"the",
"source",
"transcript",
"onto",
"the",
"destination",
"transcript"
] |
python
|
train
| 53.857143 |
push-things/django-th
|
django_th/services/services.py
|
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/django_th/services/services.py#L131-L152
|
def save_data(self, trigger_id, **data):
"""
used to save data to the service
but first of all
make some work about the data to find
and the data to convert
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
title = self.set_title(data)
title = HtmlEntities(title).html_entity_decode
content = self.set_content(data)
content = HtmlEntities(content).html_entity_decode
if data.get('output_format'):
# pandoc to convert tools
import pypandoc
content = pypandoc.convert(content, str(data.get('output_format')), format='html')
return title, content
|
[
"def",
"save_data",
"(",
"self",
",",
"trigger_id",
",",
"*",
"*",
"data",
")",
":",
"title",
"=",
"self",
".",
"set_title",
"(",
"data",
")",
"title",
"=",
"HtmlEntities",
"(",
"title",
")",
".",
"html_entity_decode",
"content",
"=",
"self",
".",
"set_content",
"(",
"data",
")",
"content",
"=",
"HtmlEntities",
"(",
"content",
")",
".",
"html_entity_decode",
"if",
"data",
".",
"get",
"(",
"'output_format'",
")",
":",
"# pandoc to convert tools",
"import",
"pypandoc",
"content",
"=",
"pypandoc",
".",
"convert",
"(",
"content",
",",
"str",
"(",
"data",
".",
"get",
"(",
"'output_format'",
")",
")",
",",
"format",
"=",
"'html'",
")",
"return",
"title",
",",
"content"
] |
used to save data to the service
but first of all
make some work about the data to find
and the data to convert
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
|
[
"used",
"to",
"save",
"data",
"to",
"the",
"service",
"but",
"first",
"of",
"all",
"make",
"some",
"work",
"about",
"the",
"data",
"to",
"find",
"and",
"the",
"data",
"to",
"convert",
":",
"param",
"trigger_id",
":",
"trigger",
"ID",
"from",
"which",
"to",
"save",
"data",
":",
"param",
"data",
":",
"the",
"data",
"to",
"check",
"to",
"be",
"used",
"and",
"save",
":",
"type",
"trigger_id",
":",
"int",
":",
"type",
"data",
":",
"dict",
":",
"return",
":",
"the",
"status",
"of",
"the",
"save",
"statement",
":",
"rtype",
":",
"boolean"
] |
python
|
train
| 40.863636 |
linkedin/naarad
|
src/naarad/__init__.py
|
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/__init__.py#L74-L81
|
def create_analysis(self, config):
"""
Create Analysis and save in Naarad from config
:param config:
:return:
"""
self._default_test_id += 1
self._analyses[self._default_test_id] = _Analysis(ts_start=None, config=config, test_id=self._default_test_id)
|
[
"def",
"create_analysis",
"(",
"self",
",",
"config",
")",
":",
"self",
".",
"_default_test_id",
"+=",
"1",
"self",
".",
"_analyses",
"[",
"self",
".",
"_default_test_id",
"]",
"=",
"_Analysis",
"(",
"ts_start",
"=",
"None",
",",
"config",
"=",
"config",
",",
"test_id",
"=",
"self",
".",
"_default_test_id",
")"
] |
Create Analysis and save in Naarad from config
:param config:
:return:
|
[
"Create",
"Analysis",
"and",
"save",
"in",
"Naarad",
"from",
"config",
":",
"param",
"config",
":",
":",
"return",
":"
] |
python
|
valid
| 34 |
Britefury/batchup
|
batchup/datasets/dataset.py
|
https://github.com/Britefury/batchup/blob/3fc2304e629f813c05f9e7a85a18acef3581a536/batchup/datasets/dataset.py#L124-L135
|
def acquire(self, **kwargs):
"""
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
"""
return config.download_data(self.temp_filename, self.url,
self.sha256)
|
[
"def",
"acquire",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"config",
".",
"download_data",
"(",
"self",
".",
"temp_filename",
",",
"self",
".",
"url",
",",
"self",
".",
"sha256",
")"
] |
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
|
[
"Download",
"the",
"file",
"and",
"return",
"its",
"path"
] |
python
|
train
| 30.416667 |
materialsproject/pymatgen
|
pymatgen/ext/matproj.py
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/ext/matproj.py#L1064-L1087
|
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / \
entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el),
mp_decode=False)[0]
isolated_atom_e_sum += e['output']["final_energy"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula/n if per_atom else ecoh_per_formula
|
[
"def",
"get_cohesive_energy",
"(",
"self",
",",
"material_id",
",",
"per_atom",
"=",
"False",
")",
":",
"entry",
"=",
"self",
".",
"get_entry_by_material_id",
"(",
"material_id",
")",
"ebulk",
"=",
"entry",
".",
"energy",
"/",
"entry",
".",
"composition",
".",
"get_integer_formula_and_factor",
"(",
")",
"[",
"1",
"]",
"comp_dict",
"=",
"entry",
".",
"composition",
".",
"reduced_composition",
".",
"as_dict",
"(",
")",
"isolated_atom_e_sum",
",",
"n",
"=",
"0",
",",
"0",
"for",
"el",
"in",
"comp_dict",
".",
"keys",
"(",
")",
":",
"e",
"=",
"self",
".",
"_make_request",
"(",
"\"/element/%s/tasks/isolated_atom\"",
"%",
"(",
"el",
")",
",",
"mp_decode",
"=",
"False",
")",
"[",
"0",
"]",
"isolated_atom_e_sum",
"+=",
"e",
"[",
"'output'",
"]",
"[",
"\"final_energy\"",
"]",
"*",
"comp_dict",
"[",
"el",
"]",
"n",
"+=",
"comp_dict",
"[",
"el",
"]",
"ecoh_per_formula",
"=",
"isolated_atom_e_sum",
"-",
"ebulk",
"return",
"ecoh_per_formula",
"/",
"n",
"if",
"per_atom",
"else",
"ecoh_per_formula"
] |
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
|
[
"Gets",
"the",
"cohesive",
"for",
"a",
"material",
"(",
"eV",
"per",
"formula",
"unit",
")",
".",
"Cohesive",
"energy",
"is",
"defined",
"as",
"the",
"difference",
"between",
"the",
"bulk",
"energy",
"and",
"the",
"sum",
"of",
"total",
"DFT",
"energy",
"of",
"isolated",
"atoms",
"for",
"atom",
"elements",
"in",
"the",
"bulk",
".",
"Args",
":",
"material_id",
"(",
"str",
")",
":",
"Materials",
"Project",
"material_id",
"e",
".",
"g",
".",
"mp",
"-",
"123",
".",
"per_atom",
"(",
"bool",
")",
":",
"Whether",
"or",
"not",
"to",
"return",
"cohesive",
"energy",
"per",
"atom",
"Returns",
":",
"Cohesive",
"energy",
"(",
"eV",
")",
"."
] |
python
|
train
| 49.5 |
chrisspen/weka
|
weka/arff.py
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L592-L601
|
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
|
[
"def",
"define_attribute",
"(",
"self",
",",
"name",
",",
"atype",
",",
"data",
"=",
"None",
")",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"name",
")",
"assert",
"atype",
"in",
"TYPES",
",",
"\"Unknown type '%s'. Must be one of: %s\"",
"%",
"(",
"atype",
",",
"', '",
".",
"join",
"(",
"TYPES",
")",
",",
")",
"self",
".",
"attribute_types",
"[",
"name",
"]",
"=",
"atype",
"self",
".",
"attribute_data",
"[",
"name",
"]",
"=",
"data"
] |
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
|
[
"Define",
"a",
"new",
"attribute",
".",
"atype",
"has",
"to",
"be",
"one",
"of",
"integer",
"real",
"numeric",
"string",
"date",
"or",
"nominal",
".",
"For",
"nominal",
"attributes",
"pass",
"the",
"possible",
"values",
"as",
"data",
".",
"For",
"date",
"attributes",
"pass",
"the",
"format",
"as",
"data",
"."
] |
python
|
train
| 52.3 |
thusoy/headsup
|
headsup.py
|
https://github.com/thusoy/headsup/blob/165a63cc6c987f664f2efd901d483ca07b7bc898/headsup.py#L83-L90
|
def get_default_net_device():
""" Find the device where the default route is. """
with open('/proc/net/route') as fh:
for line in fh:
iface, dest, _ = line.split(None, 2)
if dest == '00000000':
return iface
return None
|
[
"def",
"get_default_net_device",
"(",
")",
":",
"with",
"open",
"(",
"'/proc/net/route'",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"iface",
",",
"dest",
",",
"_",
"=",
"line",
".",
"split",
"(",
"None",
",",
"2",
")",
"if",
"dest",
"==",
"'00000000'",
":",
"return",
"iface",
"return",
"None"
] |
Find the device where the default route is.
|
[
"Find",
"the",
"device",
"where",
"the",
"default",
"route",
"is",
"."
] |
python
|
train
| 33.875 |
criteo/gourde
|
gourde/gourde.py
|
https://github.com/criteo/gourde/blob/9a274e534a2af5d2b2a5e99f10c59010adb94863/gourde/gourde.py#L251-L264
|
def threads_bt(self):
"""Display thread backtraces."""
import threading
import traceback
threads = {}
for thread in threading.enumerate():
frames = sys._current_frames().get(thread.ident)
if frames:
stack = traceback.format_stack(frames)
else:
stack = []
threads[thread] = "".join(stack)
return flask.render_template("gourde/threads.html", threads=threads)
|
[
"def",
"threads_bt",
"(",
"self",
")",
":",
"import",
"threading",
"import",
"traceback",
"threads",
"=",
"{",
"}",
"for",
"thread",
"in",
"threading",
".",
"enumerate",
"(",
")",
":",
"frames",
"=",
"sys",
".",
"_current_frames",
"(",
")",
".",
"get",
"(",
"thread",
".",
"ident",
")",
"if",
"frames",
":",
"stack",
"=",
"traceback",
".",
"format_stack",
"(",
"frames",
")",
"else",
":",
"stack",
"=",
"[",
"]",
"threads",
"[",
"thread",
"]",
"=",
"\"\"",
".",
"join",
"(",
"stack",
")",
"return",
"flask",
".",
"render_template",
"(",
"\"gourde/threads.html\"",
",",
"threads",
"=",
"threads",
")"
] |
Display thread backtraces.
|
[
"Display",
"thread",
"backtraces",
"."
] |
python
|
train
| 33.714286 |
dpgaspar/Flask-AppBuilder
|
flask_appbuilder/console.py
|
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/console.py#L156-L168
|
def create_user(app, appbuilder, role, username, firstname, lastname, email, password):
"""
Create a user
"""
_appbuilder = import_application(app, appbuilder)
role_object = _appbuilder.sm.find_role(role)
user = _appbuilder.sm.add_user(
username, firstname, lastname, email, role_object, password
)
if user:
click.echo(click.style("User {0} created.".format(username), fg="green"))
else:
click.echo(click.style("Error! No user created", fg="red"))
|
[
"def",
"create_user",
"(",
"app",
",",
"appbuilder",
",",
"role",
",",
"username",
",",
"firstname",
",",
"lastname",
",",
"email",
",",
"password",
")",
":",
"_appbuilder",
"=",
"import_application",
"(",
"app",
",",
"appbuilder",
")",
"role_object",
"=",
"_appbuilder",
".",
"sm",
".",
"find_role",
"(",
"role",
")",
"user",
"=",
"_appbuilder",
".",
"sm",
".",
"add_user",
"(",
"username",
",",
"firstname",
",",
"lastname",
",",
"email",
",",
"role_object",
",",
"password",
")",
"if",
"user",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"User {0} created.\"",
".",
"format",
"(",
"username",
")",
",",
"fg",
"=",
"\"green\"",
")",
")",
"else",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"Error! No user created\"",
",",
"fg",
"=",
"\"red\"",
")",
")"
] |
Create a user
|
[
"Create",
"a",
"user"
] |
python
|
train
| 38.384615 |
Azure/azure-multiapi-storage-python
|
azure/multiapi/storage/v2015_04_05/table/tableservice.py
|
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/tableservice.py#L804-L836
|
def insert_entity(self, table_name, entity, timeout=None):
'''
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_entity(entity)
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _extract_etag(response)
|
[
"def",
"insert_entity",
"(",
"self",
",",
"table_name",
",",
"entity",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'table_name'",
",",
"table_name",
")",
"request",
"=",
"_insert_entity",
"(",
"entity",
")",
"request",
".",
"host",
"=",
"self",
".",
"_get_host",
"(",
")",
"request",
".",
"path",
"=",
"'/'",
"+",
"_to_str",
"(",
"table_name",
")",
"request",
".",
"query",
"+=",
"[",
"(",
"'timeout'",
",",
"_int_to_str",
"(",
"timeout",
")",
")",
"]",
"response",
"=",
"self",
".",
"_perform_request",
"(",
"request",
")",
"return",
"_extract_etag",
"(",
"response",
")"
] |
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
|
[
"Inserts",
"a",
"new",
"entity",
"into",
"the",
"table",
".",
"Throws",
"if",
"an",
"entity",
"with",
"the",
"same",
"PartitionKey",
"and",
"RowKey",
"already",
"exists",
"."
] |
python
|
train
| 48.060606 |
agoragames/kairos
|
kairos/cassandra_backend.py
|
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/cassandra_backend.py#L188-L196
|
def _insert_data(self, connection, name, value, timestamp, interval, config):
'''Helper to insert data into cql.'''
cursor = connection.cursor()
try:
stmt = self._insert_stmt(name, value, timestamp, interval, config)
if stmt:
cursor.execute(stmt)
finally:
cursor.close()
|
[
"def",
"_insert_data",
"(",
"self",
",",
"connection",
",",
"name",
",",
"value",
",",
"timestamp",
",",
"interval",
",",
"config",
")",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"try",
":",
"stmt",
"=",
"self",
".",
"_insert_stmt",
"(",
"name",
",",
"value",
",",
"timestamp",
",",
"interval",
",",
"config",
")",
"if",
"stmt",
":",
"cursor",
".",
"execute",
"(",
"stmt",
")",
"finally",
":",
"cursor",
".",
"close",
"(",
")"
] |
Helper to insert data into cql.
|
[
"Helper",
"to",
"insert",
"data",
"into",
"cql",
"."
] |
python
|
train
| 33.777778 |
Kozea/cairocffi
|
cairocffi/matrix.py
|
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/matrix.py#L106-L123
|
def translate(self, tx, ty):
"""Applies a translation by :obj:`tx`, :obj:`ty`
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by :obj:`tx` and :obj:`ty`,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float
"""
cairo.cairo_matrix_translate(self._pointer, tx, ty)
|
[
"def",
"translate",
"(",
"self",
",",
"tx",
",",
"ty",
")",
":",
"cairo",
".",
"cairo_matrix_translate",
"(",
"self",
".",
"_pointer",
",",
"tx",
",",
"ty",
")"
] |
Applies a translation by :obj:`tx`, :obj:`ty`
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by :obj:`tx` and :obj:`ty`,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float
|
[
"Applies",
"a",
"translation",
"by",
":",
"obj",
":",
"tx",
":",
"obj",
":",
"ty",
"to",
"the",
"transformation",
"in",
"this",
"matrix",
"."
] |
python
|
train
| 33.666667 |
kennethreitz/records
|
records.py
|
https://github.com/kennethreitz/records/blob/ecd857266c5e7830d657cbe0196816314790563b/records.py#L207-L226
|
def first(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it."""
# Try to get a record, or return/raise default.
try:
record = self[0]
except IndexError:
if isexception(default):
raise default
return default
# Cast and return.
if as_dict:
return record.as_dict()
elif as_ordereddict:
return record.as_dict(ordered=True)
else:
return record
|
[
"def",
"first",
"(",
"self",
",",
"default",
"=",
"None",
",",
"as_dict",
"=",
"False",
",",
"as_ordereddict",
"=",
"False",
")",
":",
"# Try to get a record, or return/raise default.",
"try",
":",
"record",
"=",
"self",
"[",
"0",
"]",
"except",
"IndexError",
":",
"if",
"isexception",
"(",
"default",
")",
":",
"raise",
"default",
"return",
"default",
"# Cast and return.",
"if",
"as_dict",
":",
"return",
"record",
".",
"as_dict",
"(",
")",
"elif",
"as_ordereddict",
":",
"return",
"record",
".",
"as_dict",
"(",
"ordered",
"=",
"True",
")",
"else",
":",
"return",
"record"
] |
Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it.
|
[
"Returns",
"a",
"single",
"record",
"for",
"the",
"RecordCollection",
"or",
"default",
".",
"If",
"default",
"is",
"an",
"instance",
"or",
"subclass",
"of",
"Exception",
"then",
"raise",
"it",
"instead",
"of",
"returning",
"it",
"."
] |
python
|
train
| 32.8 |
awickert/gFlex
|
gflex/f2d.py
|
https://github.com/awickert/gFlex/blob/3ac32249375b0f8d342a142585d86ea4d905a5a0/gflex/f2d.py#L184-L196
|
def elasprep(self):
"""
dx4, dy4, dx2dy2, D = elasprep(dx,dy,Te,E=1E11,nu=0.25)
Defines the variables that are required to create the 2D finite
difference solution coefficient matrix
"""
if self.Method != 'SAS_NG':
self.dx4 = self.dx**4
self.dy4 = self.dy**4
self.dx2dy2 = self.dx**2 * self.dy**2
self.D = self.E*self.Te**3/(12*(1-self.nu**2))
|
[
"def",
"elasprep",
"(",
"self",
")",
":",
"if",
"self",
".",
"Method",
"!=",
"'SAS_NG'",
":",
"self",
".",
"dx4",
"=",
"self",
".",
"dx",
"**",
"4",
"self",
".",
"dy4",
"=",
"self",
".",
"dy",
"**",
"4",
"self",
".",
"dx2dy2",
"=",
"self",
".",
"dx",
"**",
"2",
"*",
"self",
".",
"dy",
"**",
"2",
"self",
".",
"D",
"=",
"self",
".",
"E",
"*",
"self",
".",
"Te",
"**",
"3",
"/",
"(",
"12",
"*",
"(",
"1",
"-",
"self",
".",
"nu",
"**",
"2",
")",
")"
] |
dx4, dy4, dx2dy2, D = elasprep(dx,dy,Te,E=1E11,nu=0.25)
Defines the variables that are required to create the 2D finite
difference solution coefficient matrix
|
[
"dx4",
"dy4",
"dx2dy2",
"D",
"=",
"elasprep",
"(",
"dx",
"dy",
"Te",
"E",
"=",
"1E11",
"nu",
"=",
"0",
".",
"25",
")",
"Defines",
"the",
"variables",
"that",
"are",
"required",
"to",
"create",
"the",
"2D",
"finite",
"difference",
"solution",
"coefficient",
"matrix"
] |
python
|
train
| 29.846154 |
LEMS/pylems
|
lems/model/dynamics.py
|
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/dynamics.py#L104-L116
|
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<DerivedVariable name="{0}"'.format(self.name) +\
(' dimension="{0}"'.format(self.dimension) if self.dimension else '') +\
(' exposure="{0}"'.format(self.exposure) if self.exposure else '') +\
(' select="{0}"'.format(self.select) if self.select else '') +\
(' value="{0}"'.format(self.value) if self.value else '') +\
(' reduce="{0}"'.format(self.reduce) if self.reduce else '') +\
(' required="{0}"'.format(self.required) if self.required else '') +\
'/>'
|
[
"def",
"toxml",
"(",
"self",
")",
":",
"return",
"'<DerivedVariable name=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"name",
")",
"+",
"(",
"' dimension=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"dimension",
")",
"if",
"self",
".",
"dimension",
"else",
"''",
")",
"+",
"(",
"' exposure=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"exposure",
")",
"if",
"self",
".",
"exposure",
"else",
"''",
")",
"+",
"(",
"' select=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"select",
")",
"if",
"self",
".",
"select",
"else",
"''",
")",
"+",
"(",
"' value=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"value",
")",
"if",
"self",
".",
"value",
"else",
"''",
")",
"+",
"(",
"' reduce=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"reduce",
")",
"if",
"self",
".",
"reduce",
"else",
"''",
")",
"+",
"(",
"' required=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"required",
")",
"if",
"self",
".",
"required",
"else",
"''",
")",
"+",
"'/>'"
] |
Exports this object into a LEMS XML object
|
[
"Exports",
"this",
"object",
"into",
"a",
"LEMS",
"XML",
"object"
] |
python
|
train
| 47.923077 |
ontio/ontology-python-sdk
|
ontology/smart_contract/neo_contract/oep4.py
|
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/neo_contract/oep4.py#L123-L150
|
def transfer(self, from_acct: Account, b58_to_address: str, value: int, payer_acct: Account, gas_limit: int,
gas_price: int) -> str:
"""
This interface is used to call the Transfer method in ope4
that transfer an amount of tokens from one account to another account.
:param from_acct: an Account class that send the oep4 token.
:param b58_to_address: a base58 encode address that receive the oep4 token.
:param value: an int value that indicate the amount oep4 token that will be transferred in this transaction.
:param payer_acct: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: the hexadecimal transaction hash value.
"""
func = InvokeFunction('transfer')
if not isinstance(value, int):
raise SDKException(ErrorCode.param_err('the data type of value should be int.'))
if value < 0:
raise SDKException(ErrorCode.param_err('the value should be equal or great than 0.'))
if not isinstance(from_acct, Account):
raise SDKException(ErrorCode.param_err('the data type of from_acct should be Account.'))
Oep4.__b58_address_check(b58_to_address)
from_address = from_acct.get_address().to_bytes()
to_address = Address.b58decode(b58_to_address).to_bytes()
func.set_params_value(from_address, to_address, value)
tx_hash = self.__sdk.get_network().send_neo_vm_transaction(self.__hex_contract_address, from_acct, payer_acct,
gas_limit, gas_price, func, False)
return tx_hash
|
[
"def",
"transfer",
"(",
"self",
",",
"from_acct",
":",
"Account",
",",
"b58_to_address",
":",
"str",
",",
"value",
":",
"int",
",",
"payer_acct",
":",
"Account",
",",
"gas_limit",
":",
"int",
",",
"gas_price",
":",
"int",
")",
"->",
"str",
":",
"func",
"=",
"InvokeFunction",
"(",
"'transfer'",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"raise",
"SDKException",
"(",
"ErrorCode",
".",
"param_err",
"(",
"'the data type of value should be int.'",
")",
")",
"if",
"value",
"<",
"0",
":",
"raise",
"SDKException",
"(",
"ErrorCode",
".",
"param_err",
"(",
"'the value should be equal or great than 0.'",
")",
")",
"if",
"not",
"isinstance",
"(",
"from_acct",
",",
"Account",
")",
":",
"raise",
"SDKException",
"(",
"ErrorCode",
".",
"param_err",
"(",
"'the data type of from_acct should be Account.'",
")",
")",
"Oep4",
".",
"__b58_address_check",
"(",
"b58_to_address",
")",
"from_address",
"=",
"from_acct",
".",
"get_address",
"(",
")",
".",
"to_bytes",
"(",
")",
"to_address",
"=",
"Address",
".",
"b58decode",
"(",
"b58_to_address",
")",
".",
"to_bytes",
"(",
")",
"func",
".",
"set_params_value",
"(",
"from_address",
",",
"to_address",
",",
"value",
")",
"tx_hash",
"=",
"self",
".",
"__sdk",
".",
"get_network",
"(",
")",
".",
"send_neo_vm_transaction",
"(",
"self",
".",
"__hex_contract_address",
",",
"from_acct",
",",
"payer_acct",
",",
"gas_limit",
",",
"gas_price",
",",
"func",
",",
"False",
")",
"return",
"tx_hash"
] |
This interface is used to call the Transfer method in ope4
that transfer an amount of tokens from one account to another account.
:param from_acct: an Account class that send the oep4 token.
:param b58_to_address: a base58 encode address that receive the oep4 token.
:param value: an int value that indicate the amount oep4 token that will be transferred in this transaction.
:param payer_acct: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: the hexadecimal transaction hash value.
|
[
"This",
"interface",
"is",
"used",
"to",
"call",
"the",
"Transfer",
"method",
"in",
"ope4",
"that",
"transfer",
"an",
"amount",
"of",
"tokens",
"from",
"one",
"account",
"to",
"another",
"account",
"."
] |
python
|
train
| 62.857143 |
CxAalto/gtfspy
|
gtfspy/mapviz.py
|
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/mapviz.py#L262-L305
|
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):
"""
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
"""
b = bounds
height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min'])
width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max'])
x_per_y_meters = width_meters / height_meters
x_per_y_axes = ax_width / ax_height
if x_per_y_axes > x_per_y_meters: # x-axis
# axis x_axis has slack -> the spatial longitude bounds need to be extended
width_meters_new = (height_meters * x_per_y_axes)
d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new
mean_lon = (b['lon_min'] + b['lon_max'])/2.
lon_min = mean_lon - d_lon_new / 2.
lon_max = mean_lon + d_lon_new / 2.
spatial_bounds = {
"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": b['lat_min'],
"lat_max": b['lat_max']
}
else:
# axis y_axis has slack -> the spatial latitude bounds need to be extended
height_meters_new = (width_meters / x_per_y_axes)
d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new
mean_lat = (b['lat_min'] + b['lat_max']) / 2.
lat_min = mean_lat - d_lat_new / 2.
lat_max = mean_lat + d_lat_new / 2.
spatial_bounds = {
"lon_min": b['lon_min'],
"lon_max": b['lon_max'],
"lat_min": lat_min,
"lat_max": lat_max
}
return spatial_bounds
|
[
"def",
"_expand_spatial_bounds_to_fit_axes",
"(",
"bounds",
",",
"ax_width",
",",
"ax_height",
")",
":",
"b",
"=",
"bounds",
"height_meters",
"=",
"util",
".",
"wgs84_distance",
"(",
"b",
"[",
"'lat_min'",
"]",
",",
"b",
"[",
"'lon_min'",
"]",
",",
"b",
"[",
"'lat_max'",
"]",
",",
"b",
"[",
"'lon_min'",
"]",
")",
"width_meters",
"=",
"util",
".",
"wgs84_distance",
"(",
"b",
"[",
"'lat_min'",
"]",
",",
"b",
"[",
"'lon_min'",
"]",
",",
"b",
"[",
"'lat_min'",
"]",
",",
"b",
"[",
"'lon_max'",
"]",
")",
"x_per_y_meters",
"=",
"width_meters",
"/",
"height_meters",
"x_per_y_axes",
"=",
"ax_width",
"/",
"ax_height",
"if",
"x_per_y_axes",
">",
"x_per_y_meters",
":",
"# x-axis",
"# axis x_axis has slack -> the spatial longitude bounds need to be extended",
"width_meters_new",
"=",
"(",
"height_meters",
"*",
"x_per_y_axes",
")",
"d_lon_new",
"=",
"(",
"(",
"b",
"[",
"'lon_max'",
"]",
"-",
"b",
"[",
"'lon_min'",
"]",
")",
"/",
"width_meters",
")",
"*",
"width_meters_new",
"mean_lon",
"=",
"(",
"b",
"[",
"'lon_min'",
"]",
"+",
"b",
"[",
"'lon_max'",
"]",
")",
"/",
"2.",
"lon_min",
"=",
"mean_lon",
"-",
"d_lon_new",
"/",
"2.",
"lon_max",
"=",
"mean_lon",
"+",
"d_lon_new",
"/",
"2.",
"spatial_bounds",
"=",
"{",
"\"lon_min\"",
":",
"lon_min",
",",
"\"lon_max\"",
":",
"lon_max",
",",
"\"lat_min\"",
":",
"b",
"[",
"'lat_min'",
"]",
",",
"\"lat_max\"",
":",
"b",
"[",
"'lat_max'",
"]",
"}",
"else",
":",
"# axis y_axis has slack -> the spatial latitude bounds need to be extended",
"height_meters_new",
"=",
"(",
"width_meters",
"/",
"x_per_y_axes",
")",
"d_lat_new",
"=",
"(",
"(",
"b",
"[",
"'lat_max'",
"]",
"-",
"b",
"[",
"'lat_min'",
"]",
")",
"/",
"height_meters",
")",
"*",
"height_meters_new",
"mean_lat",
"=",
"(",
"b",
"[",
"'lat_min'",
"]",
"+",
"b",
"[",
"'lat_max'",
"]",
")",
"/",
"2.",
"lat_min",
"=",
"mean_lat",
"-",
"d_lat_new",
"/",
"2.",
"lat_max",
"=",
"mean_lat",
"+",
"d_lat_new",
"/",
"2.",
"spatial_bounds",
"=",
"{",
"\"lon_min\"",
":",
"b",
"[",
"'lon_min'",
"]",
",",
"\"lon_max\"",
":",
"b",
"[",
"'lon_max'",
"]",
",",
"\"lat_min\"",
":",
"lat_min",
",",
"\"lat_max\"",
":",
"lat_max",
"}",
"return",
"spatial_bounds"
] |
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
|
[
"Parameters",
"----------",
"bounds",
":",
"dict",
"ax_width",
":",
"float",
"ax_height",
":",
"float"
] |
python
|
valid
| 37.272727 |
JdeRobot/base
|
src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/magfit.py
|
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/magfit.py#L51-L58
|
def radius_cmp(a, b, offsets):
'''return +1 or -1 for for sorting'''
diff = radius(a, offsets) - radius(b, offsets)
if diff > 0:
return 1
if diff < 0:
return -1
return 0
|
[
"def",
"radius_cmp",
"(",
"a",
",",
"b",
",",
"offsets",
")",
":",
"diff",
"=",
"radius",
"(",
"a",
",",
"offsets",
")",
"-",
"radius",
"(",
"b",
",",
"offsets",
")",
"if",
"diff",
">",
"0",
":",
"return",
"1",
"if",
"diff",
"<",
"0",
":",
"return",
"-",
"1",
"return",
"0"
] |
return +1 or -1 for for sorting
|
[
"return",
"+",
"1",
"or",
"-",
"1",
"for",
"for",
"sorting"
] |
python
|
train
| 24.75 |
gtaylor/django-athumb
|
athumb/management/commands/athumb_regen_field.py
|
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/management/commands/athumb_regen_field.py#L43-L119
|
def regenerate_thumbs(self):
"""
Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple.
"""
Model = self.model
instances = Model.objects.all()
num_instances = instances.count()
# Filenames are keys in here, to help avoid re-genning something that
# we have already done.
regen_tracker = {}
counter = 1
for instance in instances:
file = getattr(instance, self.field)
if not file:
print "(%d/%d) ID: %d -- Skipped -- No file" % (counter,
num_instances,
instance.id)
counter += 1
continue
file_name = os.path.basename(file.name)
if regen_tracker.has_key(file_name):
print "(%d/%d) ID: %d -- Skipped -- Already re-genned %s" % (
counter,
num_instances,
instance.id,
file_name)
counter += 1
continue
# Keep them informed on the progress.
print "(%d/%d) ID: %d -- %s" % (counter, num_instances,
instance.id, file_name)
try:
fdat = file.read()
file.close()
del file.file
except IOError:
# Key didn't exist.
print "(%d/%d) ID %d -- Error -- File missing on S3" % (
counter,
num_instances,
instance.id)
counter += 1
continue
try:
file_contents = ContentFile(fdat)
except ValueError:
# This field has no file associated with it, skip it.
print "(%d/%d) ID %d -- Skipped -- No file on field)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
# Saving pumps it back through the thumbnailer, if this is a
# ThumbnailField. If not, it's still pretty harmless.
try:
file.generate_thumbs(file_name, file_contents)
except IOError, e:
print "(%d/%d) ID %d -- Error -- Image may be corrupt)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
regen_tracker[file_name] = True
counter += 1
|
[
"def",
"regenerate_thumbs",
"(",
"self",
")",
":",
"Model",
"=",
"self",
".",
"model",
"instances",
"=",
"Model",
".",
"objects",
".",
"all",
"(",
")",
"num_instances",
"=",
"instances",
".",
"count",
"(",
")",
"# Filenames are keys in here, to help avoid re-genning something that",
"# we have already done.",
"regen_tracker",
"=",
"{",
"}",
"counter",
"=",
"1",
"for",
"instance",
"in",
"instances",
":",
"file",
"=",
"getattr",
"(",
"instance",
",",
"self",
".",
"field",
")",
"if",
"not",
"file",
":",
"print",
"\"(%d/%d) ID: %d -- Skipped -- No file\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file",
".",
"name",
")",
"if",
"regen_tracker",
".",
"has_key",
"(",
"file_name",
")",
":",
"print",
"\"(%d/%d) ID: %d -- Skipped -- Already re-genned %s\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
",",
"file_name",
")",
"counter",
"+=",
"1",
"continue",
"# Keep them informed on the progress.",
"print",
"\"(%d/%d) ID: %d -- %s\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
",",
"file_name",
")",
"try",
":",
"fdat",
"=",
"file",
".",
"read",
"(",
")",
"file",
".",
"close",
"(",
")",
"del",
"file",
".",
"file",
"except",
"IOError",
":",
"# Key didn't exist.",
"print",
"\"(%d/%d) ID %d -- Error -- File missing on S3\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"try",
":",
"file_contents",
"=",
"ContentFile",
"(",
"fdat",
")",
"except",
"ValueError",
":",
"# This field has no file associated with it, skip it.",
"print",
"\"(%d/%d) ID %d -- Skipped -- No file on field)\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"# Saving pumps it back through the thumbnailer, if this is a",
"# ThumbnailField. If not, it's still pretty harmless.",
"try",
":",
"file",
".",
"generate_thumbs",
"(",
"file_name",
",",
"file_contents",
")",
"except",
"IOError",
",",
"e",
":",
"print",
"\"(%d/%d) ID %d -- Error -- Image may be corrupt)\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"regen_tracker",
"[",
"file_name",
"]",
"=",
"True",
"counter",
"+=",
"1"
] |
Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple.
|
[
"Handle",
"re",
"-",
"generating",
"the",
"thumbnails",
".",
"All",
"this",
"involves",
"is",
"reading",
"the",
"original",
"file",
"then",
"saving",
"the",
"same",
"exact",
"thing",
".",
"Kind",
"of",
"annoying",
"but",
"it",
"s",
"simple",
"."
] |
python
|
train
| 40.103896 |
ladybug-tools/ladybug
|
ladybug/futil.py
|
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L295-L305
|
def csv_to_num_matrix(csv_file_path):
"""Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
"""
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append([float(val) for val in row.split(',')])
return mtx
|
[
"def",
"csv_to_num_matrix",
"(",
"csv_file_path",
")",
":",
"mtx",
"=",
"[",
"]",
"with",
"open",
"(",
"csv_file_path",
")",
"as",
"csv_data_file",
":",
"for",
"row",
"in",
"csv_data_file",
":",
"mtx",
".",
"append",
"(",
"[",
"float",
"(",
"val",
")",
"for",
"val",
"in",
"row",
".",
"split",
"(",
"','",
")",
"]",
")",
"return",
"mtx"
] |
Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
|
[
"Load",
"a",
"CSV",
"file",
"consisting",
"only",
"of",
"numbers",
"into",
"a",
"Python",
"matrix",
"of",
"floats",
"."
] |
python
|
train
| 34.545455 |
DiamondLightSource/python-workflows
|
workflows/transport/common_transport.py
|
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/transport/common_transport.py#L260-L285
|
def nack(self, message, subscription_id=None, **kwargs):
"""Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
"""
if isinstance(message, dict):
message_id = message.get("message-id")
if not subscription_id:
subscription_id = message.get("subscription")
else:
message_id = message
if not message_id:
raise workflows.Error("Cannot reject message without " + "message ID")
if not subscription_id:
raise workflows.Error("Cannot reject message without " + "subscription ID")
self.log.debug(
"Rejecting message %s on subscription %s", message_id, subscription_id
)
self._nack(message_id, subscription_id, **kwargs)
|
[
"def",
"nack",
"(",
"self",
",",
"message",
",",
"subscription_id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"dict",
")",
":",
"message_id",
"=",
"message",
".",
"get",
"(",
"\"message-id\"",
")",
"if",
"not",
"subscription_id",
":",
"subscription_id",
"=",
"message",
".",
"get",
"(",
"\"subscription\"",
")",
"else",
":",
"message_id",
"=",
"message",
"if",
"not",
"message_id",
":",
"raise",
"workflows",
".",
"Error",
"(",
"\"Cannot reject message without \"",
"+",
"\"message ID\"",
")",
"if",
"not",
"subscription_id",
":",
"raise",
"workflows",
".",
"Error",
"(",
"\"Cannot reject message without \"",
"+",
"\"subscription ID\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Rejecting message %s on subscription %s\"",
",",
"message_id",
",",
"subscription_id",
")",
"self",
".",
"_nack",
"(",
"message_id",
",",
"subscription_id",
",",
"*",
"*",
"kwargs",
")"
] |
Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
|
[
"Reject",
"receipt",
"of",
"a",
"message",
".",
"This",
"only",
"makes",
"sense",
"when",
"the",
"acknowledgement",
"flag",
"was",
"set",
"for",
"the",
"relevant",
"subscription",
".",
":",
"param",
"message",
":",
"ID",
"of",
"the",
"message",
"to",
"be",
"rejected",
"OR",
"a",
"dictionary",
"containing",
"a",
"field",
"message",
"-",
"id",
".",
":",
"param",
"subscription_id",
":",
"ID",
"of",
"the",
"associated",
"subscription",
".",
"Optional",
"when",
"a",
"dictionary",
"is",
"passed",
"as",
"first",
"parameter",
"and",
"that",
"dictionary",
"contains",
"field",
"subscription",
".",
":",
"param",
"**",
"kwargs",
":",
"Further",
"parameters",
"for",
"the",
"transport",
"layer",
".",
"For",
"example",
"transaction",
":",
"Transaction",
"ID",
"if",
"rejection",
"should",
"be",
"part",
"of",
"a",
"transaction"
] |
python
|
train
| 53.423077 |
scot-dev/scot
|
scot/external/infomax_.py
|
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/external/infomax_.py#L22-L281
|
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
kurt_size=6000, ext_blocks=1, max_iter=200,
random_state=None, verbose=None):
"""Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
"""
rng = check_random_state(random_state)
# define some default parameter
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
blowup = 1e4
blowup_fac = 0.5
n_small_angle = 20
degconst = 180.0 / np.pi
# for extended Infomax
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
if ext_blocks > 0: # allow not to recompute kurtosis
n_subgauss = 1 # but initialize n_subgauss to 1 if you recompute
# check data shape
n_samples, n_features = data.shape
n_features_square = n_features ** 2
# check input parameter
# heuristic default - may need adjustment for
# large or tiny data sets
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
logger.info('computing%sInfomax ICA' % ' Extended ' if extended is True
else ' ')
# collect parameter
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
# initialize training
if weights is None:
# initialize weights as identity matrix
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
# for extended Infomax
if extended is True:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros((n_features, n_features))
# trainings loop
olddelta, oldchange = 1., 0.
while step < max_iter:
# shuffle data at each step
permute = list(range(n_samples))
rng.shuffle(permute)
# ICA training block
# loop across block samples
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended is True:
# extended ICA update
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI - np.dot(np.dot(u.T, y), signs) -
np.dot(u.T, u))
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
# logistic ICA weights update
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64), (n_features, 1))
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
# ICA kurtosis estimation
if extended is True:
n = np.fix(blockno / ext_blocks)
if np.abs(n) * ext_blocks == blockno:
if kurt_size < n_samples:
rp = np.floor(rng.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
# estimate kurtosis
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
# estimate weighted signs
signs.flat[::n_features + 1] = ((kurt + signsbias) /
np.abs(kurt + signsbias))
ndiff = ((signs.flat[::n_features + 1] -
oldsigns.flat[::n_features + 1]) != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
# here we continue after the for
# loop over the ICA training blocks
# if weights in bounds:
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if step > 1:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
# anneal learning rate
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step # anneal learning rate
# accumulate angledelta until anneal_deg reached l_rates
olddelta = delta
oldchange = change
count_small_angle = 0 # reset count when angle delta is large
else:
if step == 1: # on first step only
olddelta = delta # initialize
oldchange = change
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
# apply stopping rule
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
# restart if weights blow up
# (for lowering l_rate)
else:
step = 0 # start again
wts_blowup = 0 # re-initialize variables
blockno = 1
l_rate *= restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
# for extended Infomax
if extended:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
oldsigns = np.zeros((n_features, n_features))
if l_rate > min_l_rate:
if verbose:
logger.info('... lowering learning rate to %g'
'\n... re-starting...' % l_rate)
else:
raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
'might not be invertible!')
# prepare return values
return weights.T
|
[
"def",
"infomax",
"(",
"data",
",",
"weights",
"=",
"None",
",",
"l_rate",
"=",
"None",
",",
"block",
"=",
"None",
",",
"w_change",
"=",
"1e-12",
",",
"anneal_deg",
"=",
"60.",
",",
"anneal_step",
"=",
"0.9",
",",
"extended",
"=",
"False",
",",
"n_subgauss",
"=",
"1",
",",
"kurt_size",
"=",
"6000",
",",
"ext_blocks",
"=",
"1",
",",
"max_iter",
"=",
"200",
",",
"random_state",
"=",
"None",
",",
"verbose",
"=",
"None",
")",
":",
"rng",
"=",
"check_random_state",
"(",
"random_state",
")",
"# define some default parameter",
"max_weight",
"=",
"1e8",
"restart_fac",
"=",
"0.9",
"min_l_rate",
"=",
"1e-10",
"blowup",
"=",
"1e4",
"blowup_fac",
"=",
"0.5",
"n_small_angle",
"=",
"20",
"degconst",
"=",
"180.0",
"/",
"np",
".",
"pi",
"# for extended Infomax",
"extmomentum",
"=",
"0.5",
"signsbias",
"=",
"0.02",
"signcount_threshold",
"=",
"25",
"signcount_step",
"=",
"2",
"if",
"ext_blocks",
">",
"0",
":",
"# allow not to recompute kurtosis",
"n_subgauss",
"=",
"1",
"# but initialize n_subgauss to 1 if you recompute",
"# check data shape",
"n_samples",
",",
"n_features",
"=",
"data",
".",
"shape",
"n_features_square",
"=",
"n_features",
"**",
"2",
"# check input parameter",
"# heuristic default - may need adjustment for",
"# large or tiny data sets",
"if",
"l_rate",
"is",
"None",
":",
"l_rate",
"=",
"0.01",
"/",
"math",
".",
"log",
"(",
"n_features",
"**",
"2.0",
")",
"if",
"block",
"is",
"None",
":",
"block",
"=",
"int",
"(",
"math",
".",
"floor",
"(",
"math",
".",
"sqrt",
"(",
"n_samples",
"/",
"3.0",
")",
")",
")",
"logger",
".",
"info",
"(",
"'computing%sInfomax ICA'",
"%",
"' Extended '",
"if",
"extended",
"is",
"True",
"else",
"' '",
")",
"# collect parameter",
"nblock",
"=",
"n_samples",
"//",
"block",
"lastt",
"=",
"(",
"nblock",
"-",
"1",
")",
"*",
"block",
"+",
"1",
"# initialize training",
"if",
"weights",
"is",
"None",
":",
"# initialize weights as identity matrix",
"weights",
"=",
"np",
".",
"identity",
"(",
"n_features",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"BI",
"=",
"block",
"*",
"np",
".",
"identity",
"(",
"n_features",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"bias",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_features",
",",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"onesrow",
"=",
"np",
".",
"ones",
"(",
"(",
"1",
",",
"block",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"startweights",
"=",
"weights",
".",
"copy",
"(",
")",
"oldweights",
"=",
"startweights",
".",
"copy",
"(",
")",
"step",
"=",
"0",
"count_small_angle",
"=",
"0",
"wts_blowup",
"=",
"False",
"blockno",
"=",
"0",
"signcount",
"=",
"0",
"# for extended Infomax",
"if",
"extended",
"is",
"True",
":",
"signs",
"=",
"np",
".",
"identity",
"(",
"n_features",
")",
"signs",
".",
"flat",
"[",
"slice",
"(",
"0",
",",
"n_features",
"*",
"n_subgauss",
",",
"n_features",
")",
"]",
"kurt_size",
"=",
"min",
"(",
"kurt_size",
",",
"n_samples",
")",
"old_kurt",
"=",
"np",
".",
"zeros",
"(",
"n_features",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"oldsigns",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_features",
",",
"n_features",
")",
")",
"# trainings loop",
"olddelta",
",",
"oldchange",
"=",
"1.",
",",
"0.",
"while",
"step",
"<",
"max_iter",
":",
"# shuffle data at each step",
"permute",
"=",
"list",
"(",
"range",
"(",
"n_samples",
")",
")",
"rng",
".",
"shuffle",
"(",
"permute",
")",
"# ICA training block",
"# loop across block samples",
"for",
"t",
"in",
"range",
"(",
"0",
",",
"lastt",
",",
"block",
")",
":",
"u",
"=",
"np",
".",
"dot",
"(",
"data",
"[",
"permute",
"[",
"t",
":",
"t",
"+",
"block",
"]",
",",
":",
"]",
",",
"weights",
")",
"u",
"+=",
"np",
".",
"dot",
"(",
"bias",
",",
"onesrow",
")",
".",
"T",
"if",
"extended",
"is",
"True",
":",
"# extended ICA update",
"y",
"=",
"np",
".",
"tanh",
"(",
"u",
")",
"weights",
"+=",
"l_rate",
"*",
"np",
".",
"dot",
"(",
"weights",
",",
"BI",
"-",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"u",
".",
"T",
",",
"y",
")",
",",
"signs",
")",
"-",
"np",
".",
"dot",
"(",
"u",
".",
"T",
",",
"u",
")",
")",
"bias",
"+=",
"l_rate",
"*",
"np",
".",
"reshape",
"(",
"np",
".",
"sum",
"(",
"y",
",",
"axis",
"=",
"0",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"*",
"-",
"2.0",
",",
"(",
"n_features",
",",
"1",
")",
")",
"else",
":",
"# logistic ICA weights update",
"y",
"=",
"1.0",
"/",
"(",
"1.0",
"+",
"np",
".",
"exp",
"(",
"-",
"u",
")",
")",
"weights",
"+=",
"l_rate",
"*",
"np",
".",
"dot",
"(",
"weights",
",",
"BI",
"+",
"np",
".",
"dot",
"(",
"u",
".",
"T",
",",
"(",
"1.0",
"-",
"2.0",
"*",
"y",
")",
")",
")",
"bias",
"+=",
"l_rate",
"*",
"np",
".",
"reshape",
"(",
"np",
".",
"sum",
"(",
"(",
"1.0",
"-",
"2.0",
"*",
"y",
")",
",",
"axis",
"=",
"0",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
",",
"(",
"n_features",
",",
"1",
")",
")",
"# check change limit",
"max_weight_val",
"=",
"np",
".",
"max",
"(",
"np",
".",
"abs",
"(",
"weights",
")",
")",
"if",
"max_weight_val",
">",
"max_weight",
":",
"wts_blowup",
"=",
"True",
"blockno",
"+=",
"1",
"if",
"wts_blowup",
":",
"break",
"# ICA kurtosis estimation",
"if",
"extended",
"is",
"True",
":",
"n",
"=",
"np",
".",
"fix",
"(",
"blockno",
"/",
"ext_blocks",
")",
"if",
"np",
".",
"abs",
"(",
"n",
")",
"*",
"ext_blocks",
"==",
"blockno",
":",
"if",
"kurt_size",
"<",
"n_samples",
":",
"rp",
"=",
"np",
".",
"floor",
"(",
"rng",
".",
"uniform",
"(",
"0",
",",
"1",
",",
"kurt_size",
")",
"*",
"(",
"n_samples",
"-",
"1",
")",
")",
"tpartact",
"=",
"np",
".",
"dot",
"(",
"data",
"[",
"rp",
".",
"astype",
"(",
"int",
")",
",",
":",
"]",
",",
"weights",
")",
".",
"T",
"else",
":",
"tpartact",
"=",
"np",
".",
"dot",
"(",
"data",
",",
"weights",
")",
".",
"T",
"# estimate kurtosis",
"kurt",
"=",
"kurtosis",
"(",
"tpartact",
",",
"axis",
"=",
"1",
",",
"fisher",
"=",
"True",
")",
"if",
"extmomentum",
"!=",
"0",
":",
"kurt",
"=",
"(",
"extmomentum",
"*",
"old_kurt",
"+",
"(",
"1.0",
"-",
"extmomentum",
")",
"*",
"kurt",
")",
"old_kurt",
"=",
"kurt",
"# estimate weighted signs",
"signs",
".",
"flat",
"[",
":",
":",
"n_features",
"+",
"1",
"]",
"=",
"(",
"(",
"kurt",
"+",
"signsbias",
")",
"/",
"np",
".",
"abs",
"(",
"kurt",
"+",
"signsbias",
")",
")",
"ndiff",
"=",
"(",
"(",
"signs",
".",
"flat",
"[",
":",
":",
"n_features",
"+",
"1",
"]",
"-",
"oldsigns",
".",
"flat",
"[",
":",
":",
"n_features",
"+",
"1",
"]",
")",
"!=",
"0",
")",
".",
"sum",
"(",
")",
"if",
"ndiff",
"==",
"0",
":",
"signcount",
"+=",
"1",
"else",
":",
"signcount",
"=",
"0",
"oldsigns",
"=",
"signs",
"if",
"signcount",
">=",
"signcount_threshold",
":",
"ext_blocks",
"=",
"np",
".",
"fix",
"(",
"ext_blocks",
"*",
"signcount_step",
")",
"signcount",
"=",
"0",
"# here we continue after the for",
"# loop over the ICA training blocks",
"# if weights in bounds:",
"if",
"not",
"wts_blowup",
":",
"oldwtchange",
"=",
"weights",
"-",
"oldweights",
"step",
"+=",
"1",
"angledelta",
"=",
"0.0",
"delta",
"=",
"oldwtchange",
".",
"reshape",
"(",
"1",
",",
"n_features_square",
")",
"change",
"=",
"np",
".",
"sum",
"(",
"delta",
"*",
"delta",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"step",
">",
"1",
":",
"angledelta",
"=",
"math",
".",
"acos",
"(",
"np",
".",
"sum",
"(",
"delta",
"*",
"olddelta",
")",
"/",
"math",
".",
"sqrt",
"(",
"change",
"*",
"oldchange",
")",
")",
"angledelta",
"*=",
"degconst",
"# anneal learning rate",
"oldweights",
"=",
"weights",
".",
"copy",
"(",
")",
"if",
"angledelta",
">",
"anneal_deg",
":",
"l_rate",
"*=",
"anneal_step",
"# anneal learning rate",
"# accumulate angledelta until anneal_deg reached l_rates",
"olddelta",
"=",
"delta",
"oldchange",
"=",
"change",
"count_small_angle",
"=",
"0",
"# reset count when angle delta is large",
"else",
":",
"if",
"step",
"==",
"1",
":",
"# on first step only",
"olddelta",
"=",
"delta",
"# initialize",
"oldchange",
"=",
"change",
"count_small_angle",
"+=",
"1",
"if",
"count_small_angle",
">",
"n_small_angle",
":",
"max_iter",
"=",
"step",
"# apply stopping rule",
"if",
"step",
">",
"2",
"and",
"change",
"<",
"w_change",
":",
"step",
"=",
"max_iter",
"elif",
"change",
">",
"blowup",
":",
"l_rate",
"*=",
"blowup_fac",
"# restart if weights blow up",
"# (for lowering l_rate)",
"else",
":",
"step",
"=",
"0",
"# start again",
"wts_blowup",
"=",
"0",
"# re-initialize variables",
"blockno",
"=",
"1",
"l_rate",
"*=",
"restart_fac",
"# with lower learning rate",
"weights",
"=",
"startweights",
".",
"copy",
"(",
")",
"oldweights",
"=",
"startweights",
".",
"copy",
"(",
")",
"olddelta",
"=",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"n_features_square",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"bias",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_features",
",",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"# for extended Infomax",
"if",
"extended",
":",
"signs",
"=",
"np",
".",
"identity",
"(",
"n_features",
")",
"signs",
".",
"flat",
"[",
"slice",
"(",
"0",
",",
"n_features",
"*",
"n_subgauss",
",",
"n_features",
")",
"]",
"oldsigns",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_features",
",",
"n_features",
")",
")",
"if",
"l_rate",
">",
"min_l_rate",
":",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"'... lowering learning rate to %g'",
"'\\n... re-starting...'",
"%",
"l_rate",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Error in Infomax ICA: unmixing_matrix matrix'",
"'might not be invertible!'",
")",
"# prepare return values",
"return",
"weights",
".",
"T"
] |
Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
|
[
"Run",
"the",
"(",
"extended",
")",
"Infomax",
"ICA",
"decomposition",
"on",
"raw",
"data"
] |
python
|
train
| 36.307692 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.