repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
abseil/abseil-py
|
absl/flags/argparse_flags.py
|
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/argparse_flags.py#L212-L235
|
def _define_absl_flag(self, flag_instance, suppress):
"""Defines a flag from the flag_instance."""
flag_name = flag_instance.name
short_name = flag_instance.short_name
argument_names = ['--' + flag_name]
if short_name:
argument_names.insert(0, '-' + short_name)
if suppress:
helptext = argparse.SUPPRESS
else:
# argparse help string uses %-formatting. Escape the literal %'s.
helptext = flag_instance.help.replace('%', '%%')
if flag_instance.boolean:
# Only add the `no` form to the long name.
argument_names.append('--no' + flag_name)
self.add_argument(
*argument_names, action=_BooleanFlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance)
else:
self.add_argument(
*argument_names, action=_FlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance)
|
[
"def",
"_define_absl_flag",
"(",
"self",
",",
"flag_instance",
",",
"suppress",
")",
":",
"flag_name",
"=",
"flag_instance",
".",
"name",
"short_name",
"=",
"flag_instance",
".",
"short_name",
"argument_names",
"=",
"[",
"'--'",
"+",
"flag_name",
"]",
"if",
"short_name",
":",
"argument_names",
".",
"insert",
"(",
"0",
",",
"'-'",
"+",
"short_name",
")",
"if",
"suppress",
":",
"helptext",
"=",
"argparse",
".",
"SUPPRESS",
"else",
":",
"# argparse help string uses %-formatting. Escape the literal %'s.",
"helptext",
"=",
"flag_instance",
".",
"help",
".",
"replace",
"(",
"'%'",
",",
"'%%'",
")",
"if",
"flag_instance",
".",
"boolean",
":",
"# Only add the `no` form to the long name.",
"argument_names",
".",
"append",
"(",
"'--no'",
"+",
"flag_name",
")",
"self",
".",
"add_argument",
"(",
"*",
"argument_names",
",",
"action",
"=",
"_BooleanFlagAction",
",",
"help",
"=",
"helptext",
",",
"metavar",
"=",
"flag_instance",
".",
"name",
".",
"upper",
"(",
")",
",",
"flag_instance",
"=",
"flag_instance",
")",
"else",
":",
"self",
".",
"add_argument",
"(",
"*",
"argument_names",
",",
"action",
"=",
"_FlagAction",
",",
"help",
"=",
"helptext",
",",
"metavar",
"=",
"flag_instance",
".",
"name",
".",
"upper",
"(",
")",
",",
"flag_instance",
"=",
"flag_instance",
")"
] |
Defines a flag from the flag_instance.
|
[
"Defines",
"a",
"flag",
"from",
"the",
"flag_instance",
"."
] |
python
|
train
|
iotile/coretools
|
transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py#L318-L352
|
def _on_services_probed(self, success, result, failure_reason, context):
"""Callback called when the services has been probed.
It is executed in the baBLE working thread: should not be blocking.
Args:
success (bool): A bool indicating that the operation is successful or not
result (dict): Information probed (if successful)
- services (list): The list of services probed (bable_interface.Service instances)
failure_reason (any): An object indicating the reason why the operation is not successful (else None)
"""
connection_id = context['connection_id']
if not success:
self._logger.error("Error while probing services to the device, err=%s", failure_reason)
context['failure_reason'] = "Error while probing services"
self.disconnect_async(connection_id, self._on_connection_failed)
return
services = {service: {} for service in result['services']}
# Validate that this is a proper IOTile device
if TileBusService not in services:
context['failure_reason'] = 'TileBus service not present in GATT services'
self.disconnect_async(connection_id, self._on_connection_failed)
return
context['services'] = services
# Finally, probe GATT characteristics
self.bable.probe_characteristics(
connection_handle=context['connection_handle'],
start_handle=TileBusService.handle,
end_handle=TileBusService.group_end_handle,
on_characteristics_probed=[self._on_characteristics_probed, context]
)
|
[
"def",
"_on_services_probed",
"(",
"self",
",",
"success",
",",
"result",
",",
"failure_reason",
",",
"context",
")",
":",
"connection_id",
"=",
"context",
"[",
"'connection_id'",
"]",
"if",
"not",
"success",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Error while probing services to the device, err=%s\"",
",",
"failure_reason",
")",
"context",
"[",
"'failure_reason'",
"]",
"=",
"\"Error while probing services\"",
"self",
".",
"disconnect_async",
"(",
"connection_id",
",",
"self",
".",
"_on_connection_failed",
")",
"return",
"services",
"=",
"{",
"service",
":",
"{",
"}",
"for",
"service",
"in",
"result",
"[",
"'services'",
"]",
"}",
"# Validate that this is a proper IOTile device",
"if",
"TileBusService",
"not",
"in",
"services",
":",
"context",
"[",
"'failure_reason'",
"]",
"=",
"'TileBus service not present in GATT services'",
"self",
".",
"disconnect_async",
"(",
"connection_id",
",",
"self",
".",
"_on_connection_failed",
")",
"return",
"context",
"[",
"'services'",
"]",
"=",
"services",
"# Finally, probe GATT characteristics",
"self",
".",
"bable",
".",
"probe_characteristics",
"(",
"connection_handle",
"=",
"context",
"[",
"'connection_handle'",
"]",
",",
"start_handle",
"=",
"TileBusService",
".",
"handle",
",",
"end_handle",
"=",
"TileBusService",
".",
"group_end_handle",
",",
"on_characteristics_probed",
"=",
"[",
"self",
".",
"_on_characteristics_probed",
",",
"context",
"]",
")"
] |
Callback called when the services has been probed.
It is executed in the baBLE working thread: should not be blocking.
Args:
success (bool): A bool indicating that the operation is successful or not
result (dict): Information probed (if successful)
- services (list): The list of services probed (bable_interface.Service instances)
failure_reason (any): An object indicating the reason why the operation is not successful (else None)
|
[
"Callback",
"called",
"when",
"the",
"services",
"has",
"been",
"probed",
".",
"It",
"is",
"executed",
"in",
"the",
"baBLE",
"working",
"thread",
":",
"should",
"not",
"be",
"blocking",
"."
] |
python
|
train
|
davidfokkema/artist
|
demo/demo_fourier_with_legend.py
|
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/demo/demo_fourier_with_legend.py#L26-L32
|
def fourier(x, N):
"""Fourier approximation with N terms"""
term = 0.
for n in range(1, N, 2):
term += (1. / n) * math.sin(n * math.pi * x / L)
return (4. / (math.pi)) * term
|
[
"def",
"fourier",
"(",
"x",
",",
"N",
")",
":",
"term",
"=",
"0.",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"N",
",",
"2",
")",
":",
"term",
"+=",
"(",
"1.",
"/",
"n",
")",
"*",
"math",
".",
"sin",
"(",
"n",
"*",
"math",
".",
"pi",
"*",
"x",
"/",
"L",
")",
"return",
"(",
"4.",
"/",
"(",
"math",
".",
"pi",
")",
")",
"*",
"term"
] |
Fourier approximation with N terms
|
[
"Fourier",
"approximation",
"with",
"N",
"terms"
] |
python
|
train
|
cltrudeau/django-awl
|
awl/admintools.py
|
https://github.com/cltrudeau/django-awl/blob/70d469ef9a161c1170b53aa017cf02d7c15eb90c/awl/admintools.py#L12-L32
|
def admin_obj_link(obj, display=''):
"""Returns a link to the django admin change list with a filter set to
only the object given.
:param obj:
Object to create the admin change list display link for
:param display:
Text to display in the link. Defaults to string call of the object
:returns:
Text containing HTML for a link
"""
# get the url for the change list for this object
url = reverse('admin:%s_%s_changelist' % (obj._meta.app_label,
obj._meta.model_name))
url += '?id__exact=%s' % obj.id
text = str(obj)
if display:
text = display
return format_html('<a href="{}">{}</a>', url, text)
|
[
"def",
"admin_obj_link",
"(",
"obj",
",",
"display",
"=",
"''",
")",
":",
"# get the url for the change list for this object",
"url",
"=",
"reverse",
"(",
"'admin:%s_%s_changelist'",
"%",
"(",
"obj",
".",
"_meta",
".",
"app_label",
",",
"obj",
".",
"_meta",
".",
"model_name",
")",
")",
"url",
"+=",
"'?id__exact=%s'",
"%",
"obj",
".",
"id",
"text",
"=",
"str",
"(",
"obj",
")",
"if",
"display",
":",
"text",
"=",
"display",
"return",
"format_html",
"(",
"'<a href=\"{}\">{}</a>'",
",",
"url",
",",
"text",
")"
] |
Returns a link to the django admin change list with a filter set to
only the object given.
:param obj:
Object to create the admin change list display link for
:param display:
Text to display in the link. Defaults to string call of the object
:returns:
Text containing HTML for a link
|
[
"Returns",
"a",
"link",
"to",
"the",
"django",
"admin",
"change",
"list",
"with",
"a",
"filter",
"set",
"to",
"only",
"the",
"object",
"given",
"."
] |
python
|
valid
|
peterldowns/djoauth2
|
djoauth2/helpers.py
|
https://github.com/peterldowns/djoauth2/blob/151c7619d1d7a91d720397cfecf3a29fcc9747a9/djoauth2/helpers.py#L51-L90
|
def update_parameters(url, parameters, encoding='utf8'):
""" Updates a URL's existing GET parameters.
:param url: a base URL to which to add additional parameters.
:param parameters: a dictionary of parameters, any mix of
unicode and string objects as the parameters and the values.
:parameter encoding: the byte encoding to use when passed unicode
for the base URL or for keys and values of the parameters dict. This
isnecessary because `urllib.urlencode` calls the `str()` function on all of
its inputs. This raises a `UnicodeDecodeError` when it encounters a
unicode string with characters outside of the default ASCII charset.
:rtype: a string URL.
"""
# Convert the base URL to the default encoding.
if isinstance(url, unicode):
url = url.encode(encoding)
parsed_url = urlparse.urlparse(url)
existing_query_parameters = urlparse.parse_qsl(parsed_url.query)
# Convert unicode parameters to the default encoding.
byte_parameters = []
for key, value in (existing_query_parameters + parameters.items()):
if isinstance(key, unicode):
key = key.encode(encoding)
if isinstance(value, unicode):
value = value.encode(encoding)
byte_parameters.append((key, value))
# Generate the final URL with all of the updated parameters. Read
# http://docs.python.org/2/library/urlparse.html#urlparse.urlparse if this is
# confusing.
return urlparse.urlunparse((
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
parsed_url.params,
urlencode(byte_parameters),
parsed_url.fragment
))
|
[
"def",
"update_parameters",
"(",
"url",
",",
"parameters",
",",
"encoding",
"=",
"'utf8'",
")",
":",
"# Convert the base URL to the default encoding.",
"if",
"isinstance",
"(",
"url",
",",
"unicode",
")",
":",
"url",
"=",
"url",
".",
"encode",
"(",
"encoding",
")",
"parsed_url",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"existing_query_parameters",
"=",
"urlparse",
".",
"parse_qsl",
"(",
"parsed_url",
".",
"query",
")",
"# Convert unicode parameters to the default encoding.",
"byte_parameters",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"(",
"existing_query_parameters",
"+",
"parameters",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"unicode",
")",
":",
"key",
"=",
"key",
".",
"encode",
"(",
"encoding",
")",
"if",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"value",
"=",
"value",
".",
"encode",
"(",
"encoding",
")",
"byte_parameters",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")",
"# Generate the final URL with all of the updated parameters. Read",
"# http://docs.python.org/2/library/urlparse.html#urlparse.urlparse if this is",
"# confusing.",
"return",
"urlparse",
".",
"urlunparse",
"(",
"(",
"parsed_url",
".",
"scheme",
",",
"parsed_url",
".",
"netloc",
",",
"parsed_url",
".",
"path",
",",
"parsed_url",
".",
"params",
",",
"urlencode",
"(",
"byte_parameters",
")",
",",
"parsed_url",
".",
"fragment",
")",
")"
] |
Updates a URL's existing GET parameters.
:param url: a base URL to which to add additional parameters.
:param parameters: a dictionary of parameters, any mix of
unicode and string objects as the parameters and the values.
:parameter encoding: the byte encoding to use when passed unicode
for the base URL or for keys and values of the parameters dict. This
isnecessary because `urllib.urlencode` calls the `str()` function on all of
its inputs. This raises a `UnicodeDecodeError` when it encounters a
unicode string with characters outside of the default ASCII charset.
:rtype: a string URL.
|
[
"Updates",
"a",
"URL",
"s",
"existing",
"GET",
"parameters",
"."
] |
python
|
train
|
googledatalab/pydatalab
|
datalab/bigquery/_query_job.py
|
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query_job.py#L63-L99
|
def wait(self, timeout=None):
""" Wait for the job to complete, or a timeout to happen.
This is more efficient than the version in the base Job class, in that we can
use a call that blocks for the poll duration rather than a sleep. That means we
shouldn't block unnecessarily long and can also poll less.
Args:
timeout: how long to wait (in seconds) before giving up; default None which means no timeout.
Returns:
The QueryJob
"""
poll = 30
while not self._is_complete:
try:
query_result = self._api.jobs_query_results(self._job_id,
project_id=self._context.project_id,
page_size=0,
timeout=poll * 1000)
except Exception as e:
raise e
if query_result['jobComplete']:
if 'totalBytesProcessed' in query_result:
self._bytes_processed = int(query_result['totalBytesProcessed'])
self._cache_hit = query_result.get('cacheHit', None)
if 'totalRows' in query_result:
self._total_rows = int(query_result['totalRows'])
break
if timeout is not None:
timeout -= poll
if timeout <= 0:
break
self._refresh_state()
return self
|
[
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"poll",
"=",
"30",
"while",
"not",
"self",
".",
"_is_complete",
":",
"try",
":",
"query_result",
"=",
"self",
".",
"_api",
".",
"jobs_query_results",
"(",
"self",
".",
"_job_id",
",",
"project_id",
"=",
"self",
".",
"_context",
".",
"project_id",
",",
"page_size",
"=",
"0",
",",
"timeout",
"=",
"poll",
"*",
"1000",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"query_result",
"[",
"'jobComplete'",
"]",
":",
"if",
"'totalBytesProcessed'",
"in",
"query_result",
":",
"self",
".",
"_bytes_processed",
"=",
"int",
"(",
"query_result",
"[",
"'totalBytesProcessed'",
"]",
")",
"self",
".",
"_cache_hit",
"=",
"query_result",
".",
"get",
"(",
"'cacheHit'",
",",
"None",
")",
"if",
"'totalRows'",
"in",
"query_result",
":",
"self",
".",
"_total_rows",
"=",
"int",
"(",
"query_result",
"[",
"'totalRows'",
"]",
")",
"break",
"if",
"timeout",
"is",
"not",
"None",
":",
"timeout",
"-=",
"poll",
"if",
"timeout",
"<=",
"0",
":",
"break",
"self",
".",
"_refresh_state",
"(",
")",
"return",
"self"
] |
Wait for the job to complete, or a timeout to happen.
This is more efficient than the version in the base Job class, in that we can
use a call that blocks for the poll duration rather than a sleep. That means we
shouldn't block unnecessarily long and can also poll less.
Args:
timeout: how long to wait (in seconds) before giving up; default None which means no timeout.
Returns:
The QueryJob
|
[
"Wait",
"for",
"the",
"job",
"to",
"complete",
"or",
"a",
"timeout",
"to",
"happen",
"."
] |
python
|
train
|
seanpar203/event-bus
|
event_bus/bus.py
|
https://github.com/seanpar203/event-bus/blob/60319b9eb4e38c348e80f3ec625312eda75da765/event_bus/bus.py#L202-L212
|
def _event_funcs(self, event: str) -> Iterable[Callable]:
""" Returns an Iterable of the functions subscribed to a event.
:param event: Name of the event.
:type event: str
:return: A iterable to do things with.
:rtype: Iterable
"""
for func in self._events[event]:
yield func
|
[
"def",
"_event_funcs",
"(",
"self",
",",
"event",
":",
"str",
")",
"->",
"Iterable",
"[",
"Callable",
"]",
":",
"for",
"func",
"in",
"self",
".",
"_events",
"[",
"event",
"]",
":",
"yield",
"func"
] |
Returns an Iterable of the functions subscribed to a event.
:param event: Name of the event.
:type event: str
:return: A iterable to do things with.
:rtype: Iterable
|
[
"Returns",
"an",
"Iterable",
"of",
"the",
"functions",
"subscribed",
"to",
"a",
"event",
"."
] |
python
|
train
|
craffel/mir_eval
|
mir_eval/multipitch.py
|
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L456-L507
|
def evaluate(ref_time, ref_freqs, est_time, est_freqs, **kwargs):
"""Evaluate two multipitch (multi-f0) transcriptions, where the first is
treated as the reference (ground truth) and the second as the estimate to
be evaluated (prediction).
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_ragged_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_ragged_time_series('est.txt')
>>> scores = mir_eval.multipitch.evaluate(ref_time, ref_freq,
... est_time, est_freq)
Parameters
----------
ref_time : np.ndarray
Time of each reference frequency value
ref_freqs : list of np.ndarray
List of np.ndarrays of reference frequency values
est_time : np.ndarray
Time of each estimated frequency value
est_freqs : list of np.ndarray
List of np.ndarrays of estimate frequency values
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
scores = collections.OrderedDict()
(scores['Precision'],
scores['Recall'],
scores['Accuracy'],
scores['Substitution Error'],
scores['Miss Error'],
scores['False Alarm Error'],
scores['Total Error'],
scores['Chroma Precision'],
scores['Chroma Recall'],
scores['Chroma Accuracy'],
scores['Chroma Substitution Error'],
scores['Chroma Miss Error'],
scores['Chroma False Alarm Error'],
scores['Chroma Total Error']) = util.filter_kwargs(
metrics, ref_time, ref_freqs, est_time, est_freqs, **kwargs)
return scores
|
[
"def",
"evaluate",
"(",
"ref_time",
",",
"ref_freqs",
",",
"est_time",
",",
"est_freqs",
",",
"*",
"*",
"kwargs",
")",
":",
"scores",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"(",
"scores",
"[",
"'Precision'",
"]",
",",
"scores",
"[",
"'Recall'",
"]",
",",
"scores",
"[",
"'Accuracy'",
"]",
",",
"scores",
"[",
"'Substitution Error'",
"]",
",",
"scores",
"[",
"'Miss Error'",
"]",
",",
"scores",
"[",
"'False Alarm Error'",
"]",
",",
"scores",
"[",
"'Total Error'",
"]",
",",
"scores",
"[",
"'Chroma Precision'",
"]",
",",
"scores",
"[",
"'Chroma Recall'",
"]",
",",
"scores",
"[",
"'Chroma Accuracy'",
"]",
",",
"scores",
"[",
"'Chroma Substitution Error'",
"]",
",",
"scores",
"[",
"'Chroma Miss Error'",
"]",
",",
"scores",
"[",
"'Chroma False Alarm Error'",
"]",
",",
"scores",
"[",
"'Chroma Total Error'",
"]",
")",
"=",
"util",
".",
"filter_kwargs",
"(",
"metrics",
",",
"ref_time",
",",
"ref_freqs",
",",
"est_time",
",",
"est_freqs",
",",
"*",
"*",
"kwargs",
")",
"return",
"scores"
] |
Evaluate two multipitch (multi-f0) transcriptions, where the first is
treated as the reference (ground truth) and the second as the estimate to
be evaluated (prediction).
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_ragged_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_ragged_time_series('est.txt')
>>> scores = mir_eval.multipitch.evaluate(ref_time, ref_freq,
... est_time, est_freq)
Parameters
----------
ref_time : np.ndarray
Time of each reference frequency value
ref_freqs : list of np.ndarray
List of np.ndarrays of reference frequency values
est_time : np.ndarray
Time of each estimated frequency value
est_freqs : list of np.ndarray
List of np.ndarrays of estimate frequency values
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
|
[
"Evaluate",
"two",
"multipitch",
"(",
"multi",
"-",
"f0",
")",
"transcriptions",
"where",
"the",
"first",
"is",
"treated",
"as",
"the",
"reference",
"(",
"ground",
"truth",
")",
"and",
"the",
"second",
"as",
"the",
"estimate",
"to",
"be",
"evaluated",
"(",
"prediction",
")",
"."
] |
python
|
train
|
notifiers/notifiers
|
notifiers_cli/utils/callbacks.py
|
https://github.com/notifiers/notifiers/blob/6dd8aafff86935dbb4763db9c56f9cdd7fc08b65/notifiers_cli/utils/callbacks.py#L49-L59
|
def _resource(resource, pretty: bool = None, **data):
"""The callback func that will be hooked to the generic resource commands"""
data = clean_data(data)
ctx = click.get_current_context()
if ctx.obj.get("env_prefix"):
data["env_prefix"] = ctx.obj["env_prefix"]
rsp = resource(**data)
dump = partial(json.dumps, indent=4) if pretty else partial(json.dumps)
click.echo(dump(rsp))
|
[
"def",
"_resource",
"(",
"resource",
",",
"pretty",
":",
"bool",
"=",
"None",
",",
"*",
"*",
"data",
")",
":",
"data",
"=",
"clean_data",
"(",
"data",
")",
"ctx",
"=",
"click",
".",
"get_current_context",
"(",
")",
"if",
"ctx",
".",
"obj",
".",
"get",
"(",
"\"env_prefix\"",
")",
":",
"data",
"[",
"\"env_prefix\"",
"]",
"=",
"ctx",
".",
"obj",
"[",
"\"env_prefix\"",
"]",
"rsp",
"=",
"resource",
"(",
"*",
"*",
"data",
")",
"dump",
"=",
"partial",
"(",
"json",
".",
"dumps",
",",
"indent",
"=",
"4",
")",
"if",
"pretty",
"else",
"partial",
"(",
"json",
".",
"dumps",
")",
"click",
".",
"echo",
"(",
"dump",
"(",
"rsp",
")",
")"
] |
The callback func that will be hooked to the generic resource commands
|
[
"The",
"callback",
"func",
"that",
"will",
"be",
"hooked",
"to",
"the",
"generic",
"resource",
"commands"
] |
python
|
train
|
ActivisionGameScience/assertpy
|
assertpy/assertpy.py
|
https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L101-L131
|
def contents_of(f, encoding='utf-8'):
"""Helper to read the contents of the given file or path into a string with the given encoding.
Encoding defaults to 'utf-8', other useful encodings are 'ascii' and 'latin-1'."""
try:
contents = f.read()
except AttributeError:
try:
with open(f, 'r') as fp:
contents = fp.read()
except TypeError:
raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__)
except OSError:
if not isinstance(f, str_types):
raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__)
raise
if sys.version_info[0] == 3 and type(contents) is bytes:
# in PY3 force decoding of bytes to target encoding
return contents.decode(encoding, 'replace')
elif sys.version_info[0] == 2 and encoding == 'ascii':
# in PY2 force encoding back to ascii
return contents.encode('ascii', 'replace')
else:
# in all other cases, try to decode to target encoding
try:
return contents.decode(encoding, 'replace')
except AttributeError:
pass
# if all else fails, just return the contents "as is"
return contents
|
[
"def",
"contents_of",
"(",
"f",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"try",
":",
"contents",
"=",
"f",
".",
"read",
"(",
")",
"except",
"AttributeError",
":",
"try",
":",
"with",
"open",
"(",
"f",
",",
"'r'",
")",
"as",
"fp",
":",
"contents",
"=",
"fp",
".",
"read",
"(",
")",
"except",
"TypeError",
":",
"raise",
"ValueError",
"(",
"'val must be file or path, but was type <%s>'",
"%",
"type",
"(",
"f",
")",
".",
"__name__",
")",
"except",
"OSError",
":",
"if",
"not",
"isinstance",
"(",
"f",
",",
"str_types",
")",
":",
"raise",
"ValueError",
"(",
"'val must be file or path, but was type <%s>'",
"%",
"type",
"(",
"f",
")",
".",
"__name__",
")",
"raise",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
"and",
"type",
"(",
"contents",
")",
"is",
"bytes",
":",
"# in PY3 force decoding of bytes to target encoding",
"return",
"contents",
".",
"decode",
"(",
"encoding",
",",
"'replace'",
")",
"elif",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
"and",
"encoding",
"==",
"'ascii'",
":",
"# in PY2 force encoding back to ascii",
"return",
"contents",
".",
"encode",
"(",
"'ascii'",
",",
"'replace'",
")",
"else",
":",
"# in all other cases, try to decode to target encoding",
"try",
":",
"return",
"contents",
".",
"decode",
"(",
"encoding",
",",
"'replace'",
")",
"except",
"AttributeError",
":",
"pass",
"# if all else fails, just return the contents \"as is\"",
"return",
"contents"
] |
Helper to read the contents of the given file or path into a string with the given encoding.
Encoding defaults to 'utf-8', other useful encodings are 'ascii' and 'latin-1'.
|
[
"Helper",
"to",
"read",
"the",
"contents",
"of",
"the",
"given",
"file",
"or",
"path",
"into",
"a",
"string",
"with",
"the",
"given",
"encoding",
".",
"Encoding",
"defaults",
"to",
"utf",
"-",
"8",
"other",
"useful",
"encodings",
"are",
"ascii",
"and",
"latin",
"-",
"1",
"."
] |
python
|
valid
|
CalebBell/fluids
|
fluids/geometry.py
|
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/geometry.py#L1130-L1189
|
def SA_torispheroidal(D, fd, fk):
r'''Calculates surface area of a torispherical head according to [1]_.
Somewhat involved. Equations are adapted to be used for a full head.
.. math::
SA = S_1 + S_2
.. math::
S_1 = 2\pi D^2 f_d \alpha
.. math::
S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1}
\left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{
\alpha_1-\alpha_2}{f_k}\right)\right)\right)
.. math::
\alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k}
\right)^2}\right)
.. math::
\alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25}
.. math::
\alpha = \frac{a}{D_i}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
fd : float
Dish-radius parameter = f; fD = dish radius [1/m]
fk : float
knuckle-radius parameter = k; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Example from [1]_.
>>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205)
6.00394283477063
References
----------
.. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in
Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf
Whitepaper. 2014.
'''
alpha_1 = fd*(1 - (1 - ((0.5-fk)/(fd-fk))**2)**0.5)
alpha_2 = fd - (fd**2 - 2*fd*fk + fk - 0.25)**0.5
alpha = alpha_1 # Up to top of dome
S1 = 2*pi*D**2*fd*alpha_1
alpha = alpha_2 # up to top of torus
S2_sub = asin((alpha-alpha_2)/fk) - asin((alpha_1-alpha_2)/fk)
S2 = 2*pi*D**2*fk*(alpha - alpha_1 + (0.5-fk)*S2_sub)
return S1 + S2
|
[
"def",
"SA_torispheroidal",
"(",
"D",
",",
"fd",
",",
"fk",
")",
":",
"alpha_1",
"=",
"fd",
"*",
"(",
"1",
"-",
"(",
"1",
"-",
"(",
"(",
"0.5",
"-",
"fk",
")",
"/",
"(",
"fd",
"-",
"fk",
")",
")",
"**",
"2",
")",
"**",
"0.5",
")",
"alpha_2",
"=",
"fd",
"-",
"(",
"fd",
"**",
"2",
"-",
"2",
"*",
"fd",
"*",
"fk",
"+",
"fk",
"-",
"0.25",
")",
"**",
"0.5",
"alpha",
"=",
"alpha_1",
"# Up to top of dome",
"S1",
"=",
"2",
"*",
"pi",
"*",
"D",
"**",
"2",
"*",
"fd",
"*",
"alpha_1",
"alpha",
"=",
"alpha_2",
"# up to top of torus",
"S2_sub",
"=",
"asin",
"(",
"(",
"alpha",
"-",
"alpha_2",
")",
"/",
"fk",
")",
"-",
"asin",
"(",
"(",
"alpha_1",
"-",
"alpha_2",
")",
"/",
"fk",
")",
"S2",
"=",
"2",
"*",
"pi",
"*",
"D",
"**",
"2",
"*",
"fk",
"*",
"(",
"alpha",
"-",
"alpha_1",
"+",
"(",
"0.5",
"-",
"fk",
")",
"*",
"S2_sub",
")",
"return",
"S1",
"+",
"S2"
] |
r'''Calculates surface area of a torispherical head according to [1]_.
Somewhat involved. Equations are adapted to be used for a full head.
.. math::
SA = S_1 + S_2
.. math::
S_1 = 2\pi D^2 f_d \alpha
.. math::
S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1}
\left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{
\alpha_1-\alpha_2}{f_k}\right)\right)\right)
.. math::
\alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k}
\right)^2}\right)
.. math::
\alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25}
.. math::
\alpha = \frac{a}{D_i}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
fd : float
Dish-radius parameter = f; fD = dish radius [1/m]
fk : float
knuckle-radius parameter = k; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Example from [1]_.
>>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205)
6.00394283477063
References
----------
.. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in
Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf
Whitepaper. 2014.
|
[
"r",
"Calculates",
"surface",
"area",
"of",
"a",
"torispherical",
"head",
"according",
"to",
"[",
"1",
"]",
"_",
".",
"Somewhat",
"involved",
".",
"Equations",
"are",
"adapted",
"to",
"be",
"used",
"for",
"a",
"full",
"head",
"."
] |
python
|
train
|
spyder-ide/spyder
|
spyder/plugins/ipythonconsole/widgets/client.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/client.py#L305-L312
|
def stop_button_click_handler(self):
"""Method to handle what to do when the stop button is pressed"""
self.stop_button.setDisabled(True)
# Interrupt computations or stop debugging
if not self.shellwidget._reading:
self.interrupt_kernel()
else:
self.shellwidget.write_to_stdin('exit')
|
[
"def",
"stop_button_click_handler",
"(",
"self",
")",
":",
"self",
".",
"stop_button",
".",
"setDisabled",
"(",
"True",
")",
"# Interrupt computations or stop debugging\r",
"if",
"not",
"self",
".",
"shellwidget",
".",
"_reading",
":",
"self",
".",
"interrupt_kernel",
"(",
")",
"else",
":",
"self",
".",
"shellwidget",
".",
"write_to_stdin",
"(",
"'exit'",
")"
] |
Method to handle what to do when the stop button is pressed
|
[
"Method",
"to",
"handle",
"what",
"to",
"do",
"when",
"the",
"stop",
"button",
"is",
"pressed"
] |
python
|
train
|
RI-imaging/qpsphere
|
qpsphere/models/_bhfield/wrap.py
|
https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/models/_bhfield/wrap.py#L224-L303
|
def run_simulation(wdir, arp=True, **kwargs):
"""
Example
-------
100-nm silica sphere with 10-nm thick Ag coating,
embedded in water; arprec 20 digits; illuminated with YAG (1064nm);
scan xz plane (21x21, +-200nm)
bhfield-arp-db.exe mpdigit wl r_core r_coat
n_grid_x xspan_min xspan_max
n_grid_y yspan_min yspan_max
n_grid_z zspan_min zspan_max
case Kreibig
[n_med n_core k_core n_coat k_coat (case=other)]
bhfield-arp-db.exe 20 1.064 0.050 0.060
21 -0.2 0.2
1 0 0
21 -0.2 0.2
other 0
1.3205 1.53413 0 0.565838 7.23262
Explanation of parameters
-------------------------
mpdigit:
arprec's number of precision digits;
increase it to overcome round-off errors
wl[um]:
light wavelength in vacuum
r_core[um], r_coat[um]:
core & coat radii
n_grid_x xspan_min[um] xspan_max[um]:
number & span of grid points for field computation; x span
n_grid_y yspan_min[um] yspan_max[um]:
y span
n_grid_z zspan_min[um] zspan_max[um]:
z span
Kreibig:
Kreibig mean free path correction for Ag (0.0 - 1.0)
case:
nanoshell/liposome/HPC/barber/other
n_med n_core k_core n_coat k_coat (case=other only):
refractive indices of medium (real), core & coat (n, k)
If `case=other`, complex refractive indices
(n, k at the particular wavelength) must be specified.
Otherwise (case = nanoshell etc) the medium/core/coat
materials are predefined and the n,k values
are taken from the data file (Ag_palik.nk etc).
The latter reflects our own interest and is intended
for use in our lab, so general users may not find it useful :-)
"""
wdir = pathlib.Path(wdir)
cmd = "{pathbhfield} {mpdigit} {wl:f} {r_core:f} {r_coat:f} " \
+ "{n_grid_x:d} {xspan_min:f} {xspan_max:f} " \
+ "{n_grid_y:d} {yspan_min:f} {yspan_max:f} " \
+ "{n_grid_z:d} {zspan_min:f} {zspan_max:f} " \
+ "{case} {Kreibig:f} {n_med:f} {n_core:f} {k_core:f} " \
+ "{n_coat:f} {k_coat:f}"
old_dir = pathlib.Path.cwd()
os.chdir(wdir)
kwargs["pathbhfield"] = get_binary(arp=arp)
if arp:
kwargs["mpdigit"] = 16
else:
kwargs["mpdigit"] = ""
# run simulation with kwargs
sp.check_output(cmd.format(**kwargs), shell=True)
# Go back to orgignal directory before checking (checking might fail)
os.chdir(old_dir)
# Check bhdebug.txt to make sure that you specify enough digits to
# overcome roundoff errors.
check_simulation(wdir)
|
[
"def",
"run_simulation",
"(",
"wdir",
",",
"arp",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"wdir",
"=",
"pathlib",
".",
"Path",
"(",
"wdir",
")",
"cmd",
"=",
"\"{pathbhfield} {mpdigit} {wl:f} {r_core:f} {r_coat:f} \"",
"+",
"\"{n_grid_x:d} {xspan_min:f} {xspan_max:f} \"",
"+",
"\"{n_grid_y:d} {yspan_min:f} {yspan_max:f} \"",
"+",
"\"{n_grid_z:d} {zspan_min:f} {zspan_max:f} \"",
"+",
"\"{case} {Kreibig:f} {n_med:f} {n_core:f} {k_core:f} \"",
"+",
"\"{n_coat:f} {k_coat:f}\"",
"old_dir",
"=",
"pathlib",
".",
"Path",
".",
"cwd",
"(",
")",
"os",
".",
"chdir",
"(",
"wdir",
")",
"kwargs",
"[",
"\"pathbhfield\"",
"]",
"=",
"get_binary",
"(",
"arp",
"=",
"arp",
")",
"if",
"arp",
":",
"kwargs",
"[",
"\"mpdigit\"",
"]",
"=",
"16",
"else",
":",
"kwargs",
"[",
"\"mpdigit\"",
"]",
"=",
"\"\"",
"# run simulation with kwargs",
"sp",
".",
"check_output",
"(",
"cmd",
".",
"format",
"(",
"*",
"*",
"kwargs",
")",
",",
"shell",
"=",
"True",
")",
"# Go back to orgignal directory before checking (checking might fail)",
"os",
".",
"chdir",
"(",
"old_dir",
")",
"# Check bhdebug.txt to make sure that you specify enough digits to",
"# overcome roundoff errors.",
"check_simulation",
"(",
"wdir",
")"
] |
Example
-------
100-nm silica sphere with 10-nm thick Ag coating,
embedded in water; arprec 20 digits; illuminated with YAG (1064nm);
scan xz plane (21x21, +-200nm)
bhfield-arp-db.exe mpdigit wl r_core r_coat
n_grid_x xspan_min xspan_max
n_grid_y yspan_min yspan_max
n_grid_z zspan_min zspan_max
case Kreibig
[n_med n_core k_core n_coat k_coat (case=other)]
bhfield-arp-db.exe 20 1.064 0.050 0.060
21 -0.2 0.2
1 0 0
21 -0.2 0.2
other 0
1.3205 1.53413 0 0.565838 7.23262
Explanation of parameters
-------------------------
mpdigit:
arprec's number of precision digits;
increase it to overcome round-off errors
wl[um]:
light wavelength in vacuum
r_core[um], r_coat[um]:
core & coat radii
n_grid_x xspan_min[um] xspan_max[um]:
number & span of grid points for field computation; x span
n_grid_y yspan_min[um] yspan_max[um]:
y span
n_grid_z zspan_min[um] zspan_max[um]:
z span
Kreibig:
Kreibig mean free path correction for Ag (0.0 - 1.0)
case:
nanoshell/liposome/HPC/barber/other
n_med n_core k_core n_coat k_coat (case=other only):
refractive indices of medium (real), core & coat (n, k)
If `case=other`, complex refractive indices
(n, k at the particular wavelength) must be specified.
Otherwise (case = nanoshell etc) the medium/core/coat
materials are predefined and the n,k values
are taken from the data file (Ag_palik.nk etc).
The latter reflects our own interest and is intended
for use in our lab, so general users may not find it useful :-)
|
[
"Example",
"-------",
"100",
"-",
"nm",
"silica",
"sphere",
"with",
"10",
"-",
"nm",
"thick",
"Ag",
"coating",
"embedded",
"in",
"water",
";",
"arprec",
"20",
"digits",
";",
"illuminated",
"with",
"YAG",
"(",
"1064nm",
")",
";",
"scan",
"xz",
"plane",
"(",
"21x21",
"+",
"-",
"200nm",
")"
] |
python
|
train
|
Basic-Components/msgpack-rpc-protocol
|
python/pymprpc/mixins/encoder_decoder_mixin.py
|
https://github.com/Basic-Components/msgpack-rpc-protocol/blob/7983ace5d5cfd7214df6803f9b1de458df5fe3b1/python/pymprpc/mixins/encoder_decoder_mixin.py#L66-L91
|
def decoder(self, response: bytes):
"""编码请求为bytes.
检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串.
Parameters:
response (bytes): - 响应的字节串编码
Return:
(Dict[str, Any]): - python字典形式的响应
"""
response = response[:-(len(self.SEPARATOR))]
if self.compreser is not None:
response = self.compreser.decompress(response)
if self.debug is True:
response = json.loads(response.decode('utf-8'))
else:
response = msgpack.unpackb(response, encoding='utf-8')
version = response.get("MPRPC")
if version and version == self.VERSION:
return response
else:
raise ProtocolException("Wrong Protocol")
|
[
"def",
"decoder",
"(",
"self",
",",
"response",
":",
"bytes",
")",
":",
"response",
"=",
"response",
"[",
":",
"-",
"(",
"len",
"(",
"self",
".",
"SEPARATOR",
")",
")",
"]",
"if",
"self",
".",
"compreser",
"is",
"not",
"None",
":",
"response",
"=",
"self",
".",
"compreser",
".",
"decompress",
"(",
"response",
")",
"if",
"self",
".",
"debug",
"is",
"True",
":",
"response",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"response",
"=",
"msgpack",
".",
"unpackb",
"(",
"response",
",",
"encoding",
"=",
"'utf-8'",
")",
"version",
"=",
"response",
".",
"get",
"(",
"\"MPRPC\"",
")",
"if",
"version",
"and",
"version",
"==",
"self",
".",
"VERSION",
":",
"return",
"response",
"else",
":",
"raise",
"ProtocolException",
"(",
"\"Wrong Protocol\"",
")"
] |
编码请求为bytes.
检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串.
Parameters:
response (bytes): - 响应的字节串编码
Return:
(Dict[str, Any]): - python字典形式的响应
|
[
"编码请求为bytes",
"."
] |
python
|
train
|
robertpeteuil/multi-cloud-control
|
mcc/cldcnct.py
|
https://github.com/robertpeteuil/multi-cloud-control/blob/f1565af1c0b6ed465ff312d3ccc592ba0609f4a2/mcc/cldcnct.py#L108-L112
|
def busy_disp_off(dobj):
"""Turn OFF busy_display to indicate completion."""
dobj.kill(block=False)
sys.stdout.write("\033[D \033[D")
sys.stdout.flush()
|
[
"def",
"busy_disp_off",
"(",
"dobj",
")",
":",
"dobj",
".",
"kill",
"(",
"block",
"=",
"False",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\033[D \\033[D\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
Turn OFF busy_display to indicate completion.
|
[
"Turn",
"OFF",
"busy_display",
"to",
"indicate",
"completion",
"."
] |
python
|
train
|
senaite/senaite.core
|
bika/lims/browser/batchfolder.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/batchfolder.py#L143-L183
|
def folderitem(self, obj, item, index):
"""Applies new properties to the item (Batch) that is currently being
rendered as a row in the list
:param obj: client to be rendered as a row in the list
:param item: dict representation of the batch, suitable for the list
:param index: current position of the item within the list
:type obj: ATContentType/DexterityContentType
:type item: dict
:type index: int
:return: the dict representation of the item
:rtype: dict
"""
obj = api.get_object(obj)
url = "{}/analysisrequests".format(api.get_url(obj))
bid = api.get_id(obj)
cbid = obj.getClientBatchID()
title = api.get_title(obj)
client = obj.getClient()
created = api.get_creation_date(obj)
date = obj.getBatchDate()
item["BatchID"] = bid
item["ClientBatchID"] = cbid
item["replace"]["BatchID"] = get_link(url, bid)
item["Title"] = title
item["replace"]["Title"] = get_link(url, title)
item["created"] = self.ulocalized_time(created, long_format=True)
item["BatchDate"] = self.ulocalized_time(date, long_format=True)
if client:
client_url = api.get_url(client)
client_name = client.getName()
client_id = client.getClientID()
item["Client"] = client_name
item["ClientID"] = client_id
item["replace"]["Client"] = get_link(client_url, client_name)
item["replace"]["ClientID"] = get_link(client_url, client_id)
return item
|
[
"def",
"folderitem",
"(",
"self",
",",
"obj",
",",
"item",
",",
"index",
")",
":",
"obj",
"=",
"api",
".",
"get_object",
"(",
"obj",
")",
"url",
"=",
"\"{}/analysisrequests\"",
".",
"format",
"(",
"api",
".",
"get_url",
"(",
"obj",
")",
")",
"bid",
"=",
"api",
".",
"get_id",
"(",
"obj",
")",
"cbid",
"=",
"obj",
".",
"getClientBatchID",
"(",
")",
"title",
"=",
"api",
".",
"get_title",
"(",
"obj",
")",
"client",
"=",
"obj",
".",
"getClient",
"(",
")",
"created",
"=",
"api",
".",
"get_creation_date",
"(",
"obj",
")",
"date",
"=",
"obj",
".",
"getBatchDate",
"(",
")",
"item",
"[",
"\"BatchID\"",
"]",
"=",
"bid",
"item",
"[",
"\"ClientBatchID\"",
"]",
"=",
"cbid",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"BatchID\"",
"]",
"=",
"get_link",
"(",
"url",
",",
"bid",
")",
"item",
"[",
"\"Title\"",
"]",
"=",
"title",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Title\"",
"]",
"=",
"get_link",
"(",
"url",
",",
"title",
")",
"item",
"[",
"\"created\"",
"]",
"=",
"self",
".",
"ulocalized_time",
"(",
"created",
",",
"long_format",
"=",
"True",
")",
"item",
"[",
"\"BatchDate\"",
"]",
"=",
"self",
".",
"ulocalized_time",
"(",
"date",
",",
"long_format",
"=",
"True",
")",
"if",
"client",
":",
"client_url",
"=",
"api",
".",
"get_url",
"(",
"client",
")",
"client_name",
"=",
"client",
".",
"getName",
"(",
")",
"client_id",
"=",
"client",
".",
"getClientID",
"(",
")",
"item",
"[",
"\"Client\"",
"]",
"=",
"client_name",
"item",
"[",
"\"ClientID\"",
"]",
"=",
"client_id",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"Client\"",
"]",
"=",
"get_link",
"(",
"client_url",
",",
"client_name",
")",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"ClientID\"",
"]",
"=",
"get_link",
"(",
"client_url",
",",
"client_id",
")",
"return",
"item"
] |
Applies new properties to the item (Batch) that is currently being
rendered as a row in the list
:param obj: client to be rendered as a row in the list
:param item: dict representation of the batch, suitable for the list
:param index: current position of the item within the list
:type obj: ATContentType/DexterityContentType
:type item: dict
:type index: int
:return: the dict representation of the item
:rtype: dict
|
[
"Applies",
"new",
"properties",
"to",
"the",
"item",
"(",
"Batch",
")",
"that",
"is",
"currently",
"being",
"rendered",
"as",
"a",
"row",
"in",
"the",
"list"
] |
python
|
train
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/addons/guerilla/guerillamgmt.py
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L468-L486
|
def create_shot(self, ):
"""Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText("Please enter a name!")
return
desc = self.desc_pte.toPlainText()
try:
shot = djadapter.models.Shot(sequence=self.sequence, project=self.sequence.project, name=name, description=desc)
shot.save()
self.shot = shot
self.accept()
except:
log.exception("Could not create new shot")
|
[
"def",
"create_shot",
"(",
"self",
",",
")",
":",
"name",
"=",
"self",
".",
"name_le",
".",
"text",
"(",
")",
"if",
"not",
"name",
":",
"self",
".",
"name_le",
".",
"setPlaceholderText",
"(",
"\"Please enter a name!\"",
")",
"return",
"desc",
"=",
"self",
".",
"desc_pte",
".",
"toPlainText",
"(",
")",
"try",
":",
"shot",
"=",
"djadapter",
".",
"models",
".",
"Shot",
"(",
"sequence",
"=",
"self",
".",
"sequence",
",",
"project",
"=",
"self",
".",
"sequence",
".",
"project",
",",
"name",
"=",
"name",
",",
"description",
"=",
"desc",
")",
"shot",
".",
"save",
"(",
")",
"self",
".",
"shot",
"=",
"shot",
"self",
".",
"accept",
"(",
")",
"except",
":",
"log",
".",
"exception",
"(",
"\"Could not create new shot\"",
")"
] |
Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
|
[
"Create",
"a",
"shot",
"and",
"store",
"it",
"in",
"the",
"self",
".",
"shot"
] |
python
|
train
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L267-L284
|
def _annotate_objects(self):
"""
Extract meta-data describing the stored objects.
"""
self.metadata = []
sizer = Asizer()
sizes = sizer.asizesof(*self.objects)
self.total_size = sizer.total
for obj, sz in zip(self.objects, sizes):
md = _MetaObject()
md.size = sz
md.id = id(obj)
try:
md.type = obj.__class__.__name__
except (AttributeError, ReferenceError): # pragma: no cover
md.type = type(obj).__name__
md.str = safe_repr(obj, clip=128)
self.metadata.append(md)
|
[
"def",
"_annotate_objects",
"(",
"self",
")",
":",
"self",
".",
"metadata",
"=",
"[",
"]",
"sizer",
"=",
"Asizer",
"(",
")",
"sizes",
"=",
"sizer",
".",
"asizesof",
"(",
"*",
"self",
".",
"objects",
")",
"self",
".",
"total_size",
"=",
"sizer",
".",
"total",
"for",
"obj",
",",
"sz",
"in",
"zip",
"(",
"self",
".",
"objects",
",",
"sizes",
")",
":",
"md",
"=",
"_MetaObject",
"(",
")",
"md",
".",
"size",
"=",
"sz",
"md",
".",
"id",
"=",
"id",
"(",
"obj",
")",
"try",
":",
"md",
".",
"type",
"=",
"obj",
".",
"__class__",
".",
"__name__",
"except",
"(",
"AttributeError",
",",
"ReferenceError",
")",
":",
"# pragma: no cover",
"md",
".",
"type",
"=",
"type",
"(",
"obj",
")",
".",
"__name__",
"md",
".",
"str",
"=",
"safe_repr",
"(",
"obj",
",",
"clip",
"=",
"128",
")",
"self",
".",
"metadata",
".",
"append",
"(",
"md",
")"
] |
Extract meta-data describing the stored objects.
|
[
"Extract",
"meta",
"-",
"data",
"describing",
"the",
"stored",
"objects",
"."
] |
python
|
train
|
vijaykatam/django-cache-manager
|
django_cache_manager/models.py
|
https://github.com/vijaykatam/django-cache-manager/blob/05142c44eb349d3f24f962592945888d9d367375/django_cache_manager/models.py#L21-L26
|
def update_model_cache(table_name):
"""
Updates model cache by generating a new key for the model
"""
model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex)
model_cache_backend.share_model_cache_info(model_cache_info)
|
[
"def",
"update_model_cache",
"(",
"table_name",
")",
":",
"model_cache_info",
"=",
"ModelCacheInfo",
"(",
"table_name",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"model_cache_backend",
".",
"share_model_cache_info",
"(",
"model_cache_info",
")"
] |
Updates model cache by generating a new key for the model
|
[
"Updates",
"model",
"cache",
"by",
"generating",
"a",
"new",
"key",
"for",
"the",
"model"
] |
python
|
train
|
bpsmith/tia
|
tia/bbg/v3api.py
|
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/v3api.py#L23-L30
|
def security_iter(nodearr):
""" provide a security data iterator by returning a tuple of (Element, SecurityError) which are mutually exclusive """
assert nodearr.name() == 'securityData' and nodearr.isArray()
for i in range(nodearr.numValues()):
node = nodearr.getValue(i)
err = XmlHelper.get_security_error(node)
result = (None, err) if err else (node, None)
yield result
|
[
"def",
"security_iter",
"(",
"nodearr",
")",
":",
"assert",
"nodearr",
".",
"name",
"(",
")",
"==",
"'securityData'",
"and",
"nodearr",
".",
"isArray",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"nodearr",
".",
"numValues",
"(",
")",
")",
":",
"node",
"=",
"nodearr",
".",
"getValue",
"(",
"i",
")",
"err",
"=",
"XmlHelper",
".",
"get_security_error",
"(",
"node",
")",
"result",
"=",
"(",
"None",
",",
"err",
")",
"if",
"err",
"else",
"(",
"node",
",",
"None",
")",
"yield",
"result"
] |
provide a security data iterator by returning a tuple of (Element, SecurityError) which are mutually exclusive
|
[
"provide",
"a",
"security",
"data",
"iterator",
"by",
"returning",
"a",
"tuple",
"of",
"(",
"Element",
"SecurityError",
")",
"which",
"are",
"mutually",
"exclusive"
] |
python
|
train
|
tensorflow/tensorboard
|
tensorboard/backend/event_processing/sqlite_writer.py
|
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/sqlite_writer.py#L416-L430
|
def initialize_schema(connection):
"""Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
"""
cursor = connection.cursor()
cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID))
cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION))
with connection:
for statement in _SCHEMA_STATEMENTS:
lines = statement.strip('\n').split('\n')
message = lines[0] + ('...' if len(lines) > 1 else '')
logger.debug('Running DB init statement: %s', message)
cursor.execute(statement)
|
[
"def",
"initialize_schema",
"(",
"connection",
")",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"PRAGMA application_id={}\"",
".",
"format",
"(",
"_TENSORBOARD_APPLICATION_ID",
")",
")",
"cursor",
".",
"execute",
"(",
"\"PRAGMA user_version={}\"",
".",
"format",
"(",
"_TENSORBOARD_USER_VERSION",
")",
")",
"with",
"connection",
":",
"for",
"statement",
"in",
"_SCHEMA_STATEMENTS",
":",
"lines",
"=",
"statement",
".",
"strip",
"(",
"'\\n'",
")",
".",
"split",
"(",
"'\\n'",
")",
"message",
"=",
"lines",
"[",
"0",
"]",
"+",
"(",
"'...'",
"if",
"len",
"(",
"lines",
")",
">",
"1",
"else",
"''",
")",
"logger",
".",
"debug",
"(",
"'Running DB init statement: %s'",
",",
"message",
")",
"cursor",
".",
"execute",
"(",
"statement",
")"
] |
Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
|
[
"Initializes",
"the",
"TensorBoard",
"sqlite",
"schema",
"using",
"the",
"given",
"connection",
"."
] |
python
|
train
|
dhermes/bezier
|
src/bezier/_curve_helpers.py
|
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_curve_helpers.py#L299-L343
|
def _compute_length(nodes):
r"""Approximately compute the length of a curve.
.. _QUADPACK: https://en.wikipedia.org/wiki/QUADPACK
If ``degree`` is :math:`n`, then the Hodograph curve
:math:`B'(s)` is degree :math:`d = n - 1`. Using this curve, we
approximate the integral:
.. math::
\int_{B\left(\left[0, 1\right]\right)} 1 \, d\mathbf{x} =
\int_0^1 \left\lVert B'(s) \right\rVert_2 \, ds
using `QUADPACK`_ (via SciPy).
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes defining a curve.
Returns:
float: The length of the curve.
Raises:
OSError: If SciPy is not installed.
"""
_, num_nodes = np.shape(nodes)
# NOTE: We somewhat replicate code in ``evaluate_hodograph()``
# here. This is so we don't re-compute the nodes for the first
# derivative every time it is evaluated.
first_deriv = (num_nodes - 1) * (nodes[:, 1:] - nodes[:, :-1])
if num_nodes == 2:
# NOTE: We convert to 1D to make sure NumPy uses vector norm.
return np.linalg.norm(first_deriv[:, 0], ord=2)
if _scipy_int is None:
raise OSError("This function requires SciPy for quadrature.")
size_func = functools.partial(vec_size, first_deriv)
length, _ = _scipy_int.quad(size_func, 0.0, 1.0)
return length
|
[
"def",
"_compute_length",
"(",
"nodes",
")",
":",
"_",
",",
"num_nodes",
"=",
"np",
".",
"shape",
"(",
"nodes",
")",
"# NOTE: We somewhat replicate code in ``evaluate_hodograph()``",
"# here. This is so we don't re-compute the nodes for the first",
"# derivative every time it is evaluated.",
"first_deriv",
"=",
"(",
"num_nodes",
"-",
"1",
")",
"*",
"(",
"nodes",
"[",
":",
",",
"1",
":",
"]",
"-",
"nodes",
"[",
":",
",",
":",
"-",
"1",
"]",
")",
"if",
"num_nodes",
"==",
"2",
":",
"# NOTE: We convert to 1D to make sure NumPy uses vector norm.",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"first_deriv",
"[",
":",
",",
"0",
"]",
",",
"ord",
"=",
"2",
")",
"if",
"_scipy_int",
"is",
"None",
":",
"raise",
"OSError",
"(",
"\"This function requires SciPy for quadrature.\"",
")",
"size_func",
"=",
"functools",
".",
"partial",
"(",
"vec_size",
",",
"first_deriv",
")",
"length",
",",
"_",
"=",
"_scipy_int",
".",
"quad",
"(",
"size_func",
",",
"0.0",
",",
"1.0",
")",
"return",
"length"
] |
r"""Approximately compute the length of a curve.
.. _QUADPACK: https://en.wikipedia.org/wiki/QUADPACK
If ``degree`` is :math:`n`, then the Hodograph curve
:math:`B'(s)` is degree :math:`d = n - 1`. Using this curve, we
approximate the integral:
.. math::
\int_{B\left(\left[0, 1\right]\right)} 1 \, d\mathbf{x} =
\int_0^1 \left\lVert B'(s) \right\rVert_2 \, ds
using `QUADPACK`_ (via SciPy).
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes defining a curve.
Returns:
float: The length of the curve.
Raises:
OSError: If SciPy is not installed.
|
[
"r",
"Approximately",
"compute",
"the",
"length",
"of",
"a",
"curve",
"."
] |
python
|
train
|
odrling/peony-twitter
|
peony/client.py
|
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/client.py#L542-L640
|
async def _chunked_upload(self, media, media_size,
path=None,
media_type=None,
media_category=None,
chunk_size=2**20,
**params):
"""
upload media in chunks
Parameters
----------
media : file object
a file object of the media
media_size : int
size of the media
path : str, optional
filename of the media
media_type : str, optional
mime type of the media
media_category : str, optional
twitter media category, must be used with ``media_type``
chunk_size : int, optional
size of a chunk in bytes
params : dict, optional
additional parameters of the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(media, bytes):
media = io.BytesIO(media)
chunk = media.read(chunk_size)
is_coro = asyncio.iscoroutine(chunk)
if is_coro:
chunk = await chunk
if media_type is None:
media_metadata = await utils.get_media_metadata(chunk, path)
media_type, media_category = media_metadata
elif media_category is None:
media_category = utils.get_category(media_type)
response = await self.upload.media.upload.post(
command="INIT",
total_bytes=media_size,
media_type=media_type,
media_category=media_category,
**params
)
media_id = response['media_id']
i = 0
while chunk:
if is_coro:
req = self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
chunk, _ = await asyncio.gather(media.read(chunk_size), req)
else:
await self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
chunk = media.read(chunk_size)
i += 1
status = await self.upload.media.upload.post(command="FINALIZE",
media_id=media_id)
if 'processing_info' in status:
while status['processing_info'].get('state') != "succeeded":
processing_info = status['processing_info']
if processing_info.get('state') == "failed":
error = processing_info.get('error', {})
message = error.get('message', str(status))
raise exceptions.MediaProcessingError(data=status,
message=message,
**params)
delay = processing_info['check_after_secs']
await asyncio.sleep(delay)
status = await self.upload.media.upload.get(
command="STATUS",
media_id=media_id,
**params
)
return response
|
[
"async",
"def",
"_chunked_upload",
"(",
"self",
",",
"media",
",",
"media_size",
",",
"path",
"=",
"None",
",",
"media_type",
"=",
"None",
",",
"media_category",
"=",
"None",
",",
"chunk_size",
"=",
"2",
"**",
"20",
",",
"*",
"*",
"params",
")",
":",
"if",
"isinstance",
"(",
"media",
",",
"bytes",
")",
":",
"media",
"=",
"io",
".",
"BytesIO",
"(",
"media",
")",
"chunk",
"=",
"media",
".",
"read",
"(",
"chunk_size",
")",
"is_coro",
"=",
"asyncio",
".",
"iscoroutine",
"(",
"chunk",
")",
"if",
"is_coro",
":",
"chunk",
"=",
"await",
"chunk",
"if",
"media_type",
"is",
"None",
":",
"media_metadata",
"=",
"await",
"utils",
".",
"get_media_metadata",
"(",
"chunk",
",",
"path",
")",
"media_type",
",",
"media_category",
"=",
"media_metadata",
"elif",
"media_category",
"is",
"None",
":",
"media_category",
"=",
"utils",
".",
"get_category",
"(",
"media_type",
")",
"response",
"=",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"INIT\"",
",",
"total_bytes",
"=",
"media_size",
",",
"media_type",
"=",
"media_type",
",",
"media_category",
"=",
"media_category",
",",
"*",
"*",
"params",
")",
"media_id",
"=",
"response",
"[",
"'media_id'",
"]",
"i",
"=",
"0",
"while",
"chunk",
":",
"if",
"is_coro",
":",
"req",
"=",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"APPEND\"",
",",
"media_id",
"=",
"media_id",
",",
"media",
"=",
"chunk",
",",
"segment_index",
"=",
"i",
")",
"chunk",
",",
"_",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"media",
".",
"read",
"(",
"chunk_size",
")",
",",
"req",
")",
"else",
":",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"APPEND\"",
",",
"media_id",
"=",
"media_id",
",",
"media",
"=",
"chunk",
",",
"segment_index",
"=",
"i",
")",
"chunk",
"=",
"media",
".",
"read",
"(",
"chunk_size",
")",
"i",
"+=",
"1",
"status",
"=",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"post",
"(",
"command",
"=",
"\"FINALIZE\"",
",",
"media_id",
"=",
"media_id",
")",
"if",
"'processing_info'",
"in",
"status",
":",
"while",
"status",
"[",
"'processing_info'",
"]",
".",
"get",
"(",
"'state'",
")",
"!=",
"\"succeeded\"",
":",
"processing_info",
"=",
"status",
"[",
"'processing_info'",
"]",
"if",
"processing_info",
".",
"get",
"(",
"'state'",
")",
"==",
"\"failed\"",
":",
"error",
"=",
"processing_info",
".",
"get",
"(",
"'error'",
",",
"{",
"}",
")",
"message",
"=",
"error",
".",
"get",
"(",
"'message'",
",",
"str",
"(",
"status",
")",
")",
"raise",
"exceptions",
".",
"MediaProcessingError",
"(",
"data",
"=",
"status",
",",
"message",
"=",
"message",
",",
"*",
"*",
"params",
")",
"delay",
"=",
"processing_info",
"[",
"'check_after_secs'",
"]",
"await",
"asyncio",
".",
"sleep",
"(",
"delay",
")",
"status",
"=",
"await",
"self",
".",
"upload",
".",
"media",
".",
"upload",
".",
"get",
"(",
"command",
"=",
"\"STATUS\"",
",",
"media_id",
"=",
"media_id",
",",
"*",
"*",
"params",
")",
"return",
"response"
] |
upload media in chunks
Parameters
----------
media : file object
a file object of the media
media_size : int
size of the media
path : str, optional
filename of the media
media_type : str, optional
mime type of the media
media_category : str, optional
twitter media category, must be used with ``media_type``
chunk_size : int, optional
size of a chunk in bytes
params : dict, optional
additional parameters of the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
|
[
"upload",
"media",
"in",
"chunks"
] |
python
|
valid
|
numenta/nupic
|
src/nupic/algorithms/spatial_pooler.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L1694-L1715
|
def _getInputNeighborhood(self, centerInput):
"""
Gets a neighborhood of inputs.
Simply calls topology.wrappingNeighborhood or topology.neighborhood.
A subclass can insert different topology behavior by overriding this method.
:param centerInput (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The inputs in the neighborhood.
"""
if self._wrapAround:
return topology.wrappingNeighborhood(centerInput,
self._potentialRadius,
self._inputDimensions)
else:
return topology.neighborhood(centerInput,
self._potentialRadius,
self._inputDimensions)
|
[
"def",
"_getInputNeighborhood",
"(",
"self",
",",
"centerInput",
")",
":",
"if",
"self",
".",
"_wrapAround",
":",
"return",
"topology",
".",
"wrappingNeighborhood",
"(",
"centerInput",
",",
"self",
".",
"_potentialRadius",
",",
"self",
".",
"_inputDimensions",
")",
"else",
":",
"return",
"topology",
".",
"neighborhood",
"(",
"centerInput",
",",
"self",
".",
"_potentialRadius",
",",
"self",
".",
"_inputDimensions",
")"
] |
Gets a neighborhood of inputs.
Simply calls topology.wrappingNeighborhood or topology.neighborhood.
A subclass can insert different topology behavior by overriding this method.
:param centerInput (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The inputs in the neighborhood.
|
[
"Gets",
"a",
"neighborhood",
"of",
"inputs",
"."
] |
python
|
valid
|
cltrudeau/django-flowr
|
flowr/models.py
|
https://github.com/cltrudeau/django-flowr/blob/d077b90376ede33721db55ff29e08b8a16ed17ae/flowr/models.py#L321-L345
|
def prune(self):
"""Removes the node and all descendents without looping back past the
root. Note this does not remove the associated data objects.
:returns:
list of :class:`BaseDataNode` subclassers associated with the
removed ``Node`` objects.
"""
targets = self.descendents_root()
try:
targets.remove(self.graph.root)
except ValueError:
# root wasn't in the target list, no problem
pass
results = [n.data for n in targets]
results.append(self.data)
for node in targets:
node.delete()
for parent in self.parents.all():
parent.children.remove(self)
self.delete()
return results
|
[
"def",
"prune",
"(",
"self",
")",
":",
"targets",
"=",
"self",
".",
"descendents_root",
"(",
")",
"try",
":",
"targets",
".",
"remove",
"(",
"self",
".",
"graph",
".",
"root",
")",
"except",
"ValueError",
":",
"# root wasn't in the target list, no problem",
"pass",
"results",
"=",
"[",
"n",
".",
"data",
"for",
"n",
"in",
"targets",
"]",
"results",
".",
"append",
"(",
"self",
".",
"data",
")",
"for",
"node",
"in",
"targets",
":",
"node",
".",
"delete",
"(",
")",
"for",
"parent",
"in",
"self",
".",
"parents",
".",
"all",
"(",
")",
":",
"parent",
".",
"children",
".",
"remove",
"(",
"self",
")",
"self",
".",
"delete",
"(",
")",
"return",
"results"
] |
Removes the node and all descendents without looping back past the
root. Note this does not remove the associated data objects.
:returns:
list of :class:`BaseDataNode` subclassers associated with the
removed ``Node`` objects.
|
[
"Removes",
"the",
"node",
"and",
"all",
"descendents",
"without",
"looping",
"back",
"past",
"the",
"root",
".",
"Note",
"this",
"does",
"not",
"remove",
"the",
"associated",
"data",
"objects",
"."
] |
python
|
valid
|
OCR-D/core
|
ocrd_models/ocrd_models/ocrd_mets.py
|
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd_models/ocrd_models/ocrd_mets.py#L180-L213
|
def add_file(self, fileGrp, mimetype=None, url=None, ID=None, pageId=None, force=False, local_filename=None, **kwargs):
"""
Add a `OcrdFile </../../ocrd_models/ocrd_models.ocrd_file.html>`_.
Arguments:
fileGrp (string): Add file to ``mets:fileGrp`` with this ``USE`` attribute
mimetype (string):
url (string):
ID (string):
pageId (string):
force (boolean): Whether to add the file even if a ``mets:file`` with the same ``ID`` already exists.
local_filename (string):
mimetype (string):
"""
if not ID:
raise Exception("Must set ID of the mets:file")
el_fileGrp = self._tree.getroot().find(".//mets:fileGrp[@USE='%s']" % (fileGrp), NS)
if el_fileGrp is None:
el_fileGrp = self.add_file_group(fileGrp)
if ID is not None and self.find_files(ID=ID) != []:
if not force:
raise Exception("File with ID='%s' already exists" % ID)
mets_file = self.find_files(ID=ID)[0]
else:
mets_file = OcrdFile(ET.SubElement(el_fileGrp, TAG_METS_FILE), mets=self)
mets_file.url = url
mets_file.mimetype = mimetype
mets_file.ID = ID
mets_file.pageId = pageId
mets_file.local_filename = local_filename
self._file_by_id[ID] = mets_file
return mets_file
|
[
"def",
"add_file",
"(",
"self",
",",
"fileGrp",
",",
"mimetype",
"=",
"None",
",",
"url",
"=",
"None",
",",
"ID",
"=",
"None",
",",
"pageId",
"=",
"None",
",",
"force",
"=",
"False",
",",
"local_filename",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"ID",
":",
"raise",
"Exception",
"(",
"\"Must set ID of the mets:file\"",
")",
"el_fileGrp",
"=",
"self",
".",
"_tree",
".",
"getroot",
"(",
")",
".",
"find",
"(",
"\".//mets:fileGrp[@USE='%s']\"",
"%",
"(",
"fileGrp",
")",
",",
"NS",
")",
"if",
"el_fileGrp",
"is",
"None",
":",
"el_fileGrp",
"=",
"self",
".",
"add_file_group",
"(",
"fileGrp",
")",
"if",
"ID",
"is",
"not",
"None",
"and",
"self",
".",
"find_files",
"(",
"ID",
"=",
"ID",
")",
"!=",
"[",
"]",
":",
"if",
"not",
"force",
":",
"raise",
"Exception",
"(",
"\"File with ID='%s' already exists\"",
"%",
"ID",
")",
"mets_file",
"=",
"self",
".",
"find_files",
"(",
"ID",
"=",
"ID",
")",
"[",
"0",
"]",
"else",
":",
"mets_file",
"=",
"OcrdFile",
"(",
"ET",
".",
"SubElement",
"(",
"el_fileGrp",
",",
"TAG_METS_FILE",
")",
",",
"mets",
"=",
"self",
")",
"mets_file",
".",
"url",
"=",
"url",
"mets_file",
".",
"mimetype",
"=",
"mimetype",
"mets_file",
".",
"ID",
"=",
"ID",
"mets_file",
".",
"pageId",
"=",
"pageId",
"mets_file",
".",
"local_filename",
"=",
"local_filename",
"self",
".",
"_file_by_id",
"[",
"ID",
"]",
"=",
"mets_file",
"return",
"mets_file"
] |
Add a `OcrdFile </../../ocrd_models/ocrd_models.ocrd_file.html>`_.
Arguments:
fileGrp (string): Add file to ``mets:fileGrp`` with this ``USE`` attribute
mimetype (string):
url (string):
ID (string):
pageId (string):
force (boolean): Whether to add the file even if a ``mets:file`` with the same ``ID`` already exists.
local_filename (string):
mimetype (string):
|
[
"Add",
"a",
"OcrdFile",
"<",
"/",
"..",
"/",
"..",
"/",
"ocrd_models",
"/",
"ocrd_models",
".",
"ocrd_file",
".",
"html",
">",
"_",
"."
] |
python
|
train
|
openeventdata/mordecai
|
mordecai/geoparse.py
|
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L115-L149
|
def clean_entity(self, ent):
"""
Strip out extra words that often get picked up by spaCy's NER.
To do: preserve info about what got stripped out to help with ES/Geonames
resolution later.
Parameters
---------
ent: a spaCy named entity Span
Returns
-------
new_ent: a spaCy Span, with extra words stripped out.
"""
dump_list = ['province', 'the', 'area', 'airport', 'district', 'square',
'town', 'village', 'prison', "river", "valley", "provincial", "prison",
"region", "municipality", "state", "territory", "of", "in",
"county", "central"]
# maybe have 'city'? Works differently in different countries
# also, "District of Columbia". Might need to use cap/no cap
keep_positions = []
for word in ent:
if word.text.lower() not in dump_list:
keep_positions.append(word.i)
keep_positions = np.asarray(keep_positions)
try:
new_ent = ent.doc[keep_positions.min():keep_positions.max() + 1]
# can't set directly
#new_ent.label_.__set__(ent.label_)
except ValueError:
new_ent = ent
return new_ent
|
[
"def",
"clean_entity",
"(",
"self",
",",
"ent",
")",
":",
"dump_list",
"=",
"[",
"'province'",
",",
"'the'",
",",
"'area'",
",",
"'airport'",
",",
"'district'",
",",
"'square'",
",",
"'town'",
",",
"'village'",
",",
"'prison'",
",",
"\"river\"",
",",
"\"valley\"",
",",
"\"provincial\"",
",",
"\"prison\"",
",",
"\"region\"",
",",
"\"municipality\"",
",",
"\"state\"",
",",
"\"territory\"",
",",
"\"of\"",
",",
"\"in\"",
",",
"\"county\"",
",",
"\"central\"",
"]",
"# maybe have 'city'? Works differently in different countries",
"# also, \"District of Columbia\". Might need to use cap/no cap",
"keep_positions",
"=",
"[",
"]",
"for",
"word",
"in",
"ent",
":",
"if",
"word",
".",
"text",
".",
"lower",
"(",
")",
"not",
"in",
"dump_list",
":",
"keep_positions",
".",
"append",
"(",
"word",
".",
"i",
")",
"keep_positions",
"=",
"np",
".",
"asarray",
"(",
"keep_positions",
")",
"try",
":",
"new_ent",
"=",
"ent",
".",
"doc",
"[",
"keep_positions",
".",
"min",
"(",
")",
":",
"keep_positions",
".",
"max",
"(",
")",
"+",
"1",
"]",
"# can't set directly",
"#new_ent.label_.__set__(ent.label_)",
"except",
"ValueError",
":",
"new_ent",
"=",
"ent",
"return",
"new_ent"
] |
Strip out extra words that often get picked up by spaCy's NER.
To do: preserve info about what got stripped out to help with ES/Geonames
resolution later.
Parameters
---------
ent: a spaCy named entity Span
Returns
-------
new_ent: a spaCy Span, with extra words stripped out.
|
[
"Strip",
"out",
"extra",
"words",
"that",
"often",
"get",
"picked",
"up",
"by",
"spaCy",
"s",
"NER",
"."
] |
python
|
train
|
eirannejad/Revit-Journal-Maker
|
rjm/__init__.py
|
https://github.com/eirannejad/Revit-Journal-Maker/blob/09a4f27da6d183f63a2c93ed99dca8a8590d5241/rjm/__init__.py#L228-L242
|
def open_model(self, model_path, audit=False):
"""Append a open non-workshared model entry to the journal.
This instructs Revit to open a non-workshared model.
Args:
model_path (str): full path to non-workshared model
audit (bool): if True audits the model when opening
"""
if audit:
self._add_entry(templates.FILE_OPEN_AUDIT
.format(model_path=model_path))
else:
self._add_entry(templates.FILE_OPEN
.format(model_path=model_path))
|
[
"def",
"open_model",
"(",
"self",
",",
"model_path",
",",
"audit",
"=",
"False",
")",
":",
"if",
"audit",
":",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_OPEN_AUDIT",
".",
"format",
"(",
"model_path",
"=",
"model_path",
")",
")",
"else",
":",
"self",
".",
"_add_entry",
"(",
"templates",
".",
"FILE_OPEN",
".",
"format",
"(",
"model_path",
"=",
"model_path",
")",
")"
] |
Append a open non-workshared model entry to the journal.
This instructs Revit to open a non-workshared model.
Args:
model_path (str): full path to non-workshared model
audit (bool): if True audits the model when opening
|
[
"Append",
"a",
"open",
"non",
"-",
"workshared",
"model",
"entry",
"to",
"the",
"journal",
"."
] |
python
|
train
|
DiscordBotList/DBL-Python-Library
|
dbl/client.py
|
https://github.com/DiscordBotList/DBL-Python-Library/blob/c1461ae0acc644cdeedef8fd6b5e36f76d81c1aa/dbl/client.py#L330-L349
|
async def get_widget_small(self, bot_id: int = None):
"""This function is a coroutine.
Generates the default small widget.
Parameters
==========
bot_id: int
The bot_id of the bot you wish to make a widget for.
Returns
=======
URL of the widget: str
"""
if bot_id is None:
bot_id = self.bot_id
url = 'https://discordbots.org/api/widget/lib/{0}.png'.format(bot_id)
return url
|
[
"async",
"def",
"get_widget_small",
"(",
"self",
",",
"bot_id",
":",
"int",
"=",
"None",
")",
":",
"if",
"bot_id",
"is",
"None",
":",
"bot_id",
"=",
"self",
".",
"bot_id",
"url",
"=",
"'https://discordbots.org/api/widget/lib/{0}.png'",
".",
"format",
"(",
"bot_id",
")",
"return",
"url"
] |
This function is a coroutine.
Generates the default small widget.
Parameters
==========
bot_id: int
The bot_id of the bot you wish to make a widget for.
Returns
=======
URL of the widget: str
|
[
"This",
"function",
"is",
"a",
"coroutine",
"."
] |
python
|
test
|
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_0/work/work_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work/work_client.py#L28-L55
|
def get_backlog_configurations(self, team_context):
"""GetBacklogConfigurations.
Gets backlog configuration for a team
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<BacklogConfiguration> <azure.devops.v5_0.work.models.BacklogConfiguration>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='7799f497-3cb5-4f16-ad4f-5cd06012db64',
version='5.0',
route_values=route_values)
return self._deserialize('BacklogConfiguration', response)
|
[
"def",
"get_backlog_configurations",
"(",
"self",
",",
"team_context",
")",
":",
"project",
"=",
"None",
"team",
"=",
"None",
"if",
"team_context",
"is",
"not",
"None",
":",
"if",
"team_context",
".",
"project_id",
":",
"project",
"=",
"team_context",
".",
"project_id",
"else",
":",
"project",
"=",
"team_context",
".",
"project",
"if",
"team_context",
".",
"team_id",
":",
"team",
"=",
"team_context",
".",
"team_id",
"else",
":",
"team",
"=",
"team_context",
".",
"team",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'string'",
")",
"if",
"team",
"is",
"not",
"None",
":",
"route_values",
"[",
"'team'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'team'",
",",
"team",
",",
"'string'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'7799f497-3cb5-4f16-ad4f-5cd06012db64'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'BacklogConfiguration'",
",",
"response",
")"
] |
GetBacklogConfigurations.
Gets backlog configuration for a team
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<BacklogConfiguration> <azure.devops.v5_0.work.models.BacklogConfiguration>`
|
[
"GetBacklogConfigurations",
".",
"Gets",
"backlog",
"configuration",
"for",
"a",
"team",
":",
"param",
":",
"class",
":",
"<TeamContext",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"TeamContext",
">",
"team_context",
":",
"The",
"team",
"context",
"for",
"the",
"operation",
":",
"rtype",
":",
":",
"class",
":",
"<BacklogConfiguration",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"BacklogConfiguration",
">"
] |
python
|
train
|
bitshares/python-bitshares
|
bitsharesbase/operationids.py
|
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitsharesbase/operationids.py#L71-L83
|
def getOperationName(id: str):
""" This method returns the name representation of an operation given
its value as used in the API
"""
if isinstance(id, str):
# Some graphene chains (e.g. steem) do not encode the
# operation_type as id but in its string form
assert id in operations.keys(), "Unknown operation {}".format(id)
return id
elif isinstance(id, int):
return getOperationNameForId(id)
else:
raise ValueError
|
[
"def",
"getOperationName",
"(",
"id",
":",
"str",
")",
":",
"if",
"isinstance",
"(",
"id",
",",
"str",
")",
":",
"# Some graphene chains (e.g. steem) do not encode the",
"# operation_type as id but in its string form",
"assert",
"id",
"in",
"operations",
".",
"keys",
"(",
")",
",",
"\"Unknown operation {}\"",
".",
"format",
"(",
"id",
")",
"return",
"id",
"elif",
"isinstance",
"(",
"id",
",",
"int",
")",
":",
"return",
"getOperationNameForId",
"(",
"id",
")",
"else",
":",
"raise",
"ValueError"
] |
This method returns the name representation of an operation given
its value as used in the API
|
[
"This",
"method",
"returns",
"the",
"name",
"representation",
"of",
"an",
"operation",
"given",
"its",
"value",
"as",
"used",
"in",
"the",
"API"
] |
python
|
train
|
ModisWorks/modis
|
modis/discord_modis/modules/help/api_help.py
|
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/help/api_help.py#L32-L52
|
def get_help_commands(server_prefix):
"""
Get the help commands for all modules
Args:
server_prefix: The server command prefix
Returns:
datapacks (list): A list of datapacks for the help commands for all the modules
"""
datapacks = []
_dir = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
for module_name in os.listdir("{}/../".format(_dir)):
if not module_name.startswith("_") and not module_name.startswith("!"):
help_command = "`{}help {}`".format(server_prefix, module_name)
datapacks.append((module_name, help_command, True))
return datapacks
|
[
"def",
"get_help_commands",
"(",
"server_prefix",
")",
":",
"datapacks",
"=",
"[",
"]",
"_dir",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
")",
"for",
"module_name",
"in",
"os",
".",
"listdir",
"(",
"\"{}/../\"",
".",
"format",
"(",
"_dir",
")",
")",
":",
"if",
"not",
"module_name",
".",
"startswith",
"(",
"\"_\"",
")",
"and",
"not",
"module_name",
".",
"startswith",
"(",
"\"!\"",
")",
":",
"help_command",
"=",
"\"`{}help {}`\"",
".",
"format",
"(",
"server_prefix",
",",
"module_name",
")",
"datapacks",
".",
"append",
"(",
"(",
"module_name",
",",
"help_command",
",",
"True",
")",
")",
"return",
"datapacks"
] |
Get the help commands for all modules
Args:
server_prefix: The server command prefix
Returns:
datapacks (list): A list of datapacks for the help commands for all the modules
|
[
"Get",
"the",
"help",
"commands",
"for",
"all",
"modules"
] |
python
|
train
|
odlgroup/odl
|
odl/trafos/fourier.py
|
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/fourier.py#L146-L171
|
def _call(self, x, out, **kwargs):
"""Implement ``self(x, out[, **kwargs])``.
Parameters
----------
x : `domain` element
Discretized function to be transformed
out : `range` element
Element to which the output is written
Notes
-----
See the ``pyfftw_call`` function for ``**kwargs`` options.
The parameters ``axes`` and ``halfcomplex`` cannot be
overridden.
See Also
--------
odl.trafos.backends.pyfftw_bindings.pyfftw_call :
Call pyfftw backend directly
"""
# TODO: Implement zero padding
if self.impl == 'numpy':
out[:] = self._call_numpy(x.asarray())
else:
out[:] = self._call_pyfftw(x.asarray(), out.asarray(), **kwargs)
|
[
"def",
"_call",
"(",
"self",
",",
"x",
",",
"out",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: Implement zero padding",
"if",
"self",
".",
"impl",
"==",
"'numpy'",
":",
"out",
"[",
":",
"]",
"=",
"self",
".",
"_call_numpy",
"(",
"x",
".",
"asarray",
"(",
")",
")",
"else",
":",
"out",
"[",
":",
"]",
"=",
"self",
".",
"_call_pyfftw",
"(",
"x",
".",
"asarray",
"(",
")",
",",
"out",
".",
"asarray",
"(",
")",
",",
"*",
"*",
"kwargs",
")"
] |
Implement ``self(x, out[, **kwargs])``.
Parameters
----------
x : `domain` element
Discretized function to be transformed
out : `range` element
Element to which the output is written
Notes
-----
See the ``pyfftw_call`` function for ``**kwargs`` options.
The parameters ``axes`` and ``halfcomplex`` cannot be
overridden.
See Also
--------
odl.trafos.backends.pyfftw_bindings.pyfftw_call :
Call pyfftw backend directly
|
[
"Implement",
"self",
"(",
"x",
"out",
"[",
"**",
"kwargs",
"]",
")",
"."
] |
python
|
train
|
yinkaisheng/Python-UIAutomation-for-Windows
|
uiautomation/uiautomation.py
|
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L3146-L3164
|
def GetPixelColorsOfRects(self, rects: list) -> list:
"""
rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].
Return list, a list whose elements are ctypes.Array which is an iterable array of int values in argb.
"""
rects2 = [(x, y, x + width, y + height) for x, y, width, height in rects]
left, top, right, bottom = zip(*rects2)
left, top, right, bottom = min(left), min(top), max(right), max(bottom)
width, height = right - left, bottom - top
allColors = self.GetPixelColorsOfRect(left, top, width, height)
colorsOfRects = []
for x, y, w, h in rects:
x -= left
y -= top
colors = []
for row in range(h):
colors.extend(allColors[(y + row) * width + x:(y + row) * width + x + w])
colorsOfRects.append(colors)
return colorsOfRects
|
[
"def",
"GetPixelColorsOfRects",
"(",
"self",
",",
"rects",
":",
"list",
")",
"->",
"list",
":",
"rects2",
"=",
"[",
"(",
"x",
",",
"y",
",",
"x",
"+",
"width",
",",
"y",
"+",
"height",
")",
"for",
"x",
",",
"y",
",",
"width",
",",
"height",
"in",
"rects",
"]",
"left",
",",
"top",
",",
"right",
",",
"bottom",
"=",
"zip",
"(",
"*",
"rects2",
")",
"left",
",",
"top",
",",
"right",
",",
"bottom",
"=",
"min",
"(",
"left",
")",
",",
"min",
"(",
"top",
")",
",",
"max",
"(",
"right",
")",
",",
"max",
"(",
"bottom",
")",
"width",
",",
"height",
"=",
"right",
"-",
"left",
",",
"bottom",
"-",
"top",
"allColors",
"=",
"self",
".",
"GetPixelColorsOfRect",
"(",
"left",
",",
"top",
",",
"width",
",",
"height",
")",
"colorsOfRects",
"=",
"[",
"]",
"for",
"x",
",",
"y",
",",
"w",
",",
"h",
"in",
"rects",
":",
"x",
"-=",
"left",
"y",
"-=",
"top",
"colors",
"=",
"[",
"]",
"for",
"row",
"in",
"range",
"(",
"h",
")",
":",
"colors",
".",
"extend",
"(",
"allColors",
"[",
"(",
"y",
"+",
"row",
")",
"*",
"width",
"+",
"x",
":",
"(",
"y",
"+",
"row",
")",
"*",
"width",
"+",
"x",
"+",
"w",
"]",
")",
"colorsOfRects",
".",
"append",
"(",
"colors",
")",
"return",
"colorsOfRects"
] |
rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].
Return list, a list whose elements are ctypes.Array which is an iterable array of int values in argb.
|
[
"rects",
":",
"a",
"list",
"of",
"rects",
"such",
"as",
"[",
"(",
"0",
"0",
"10",
"10",
")",
"(",
"10",
"10",
"20",
"20",
")",
"(",
"x",
"y",
"width",
"height",
")",
"]",
".",
"Return",
"list",
"a",
"list",
"whose",
"elements",
"are",
"ctypes",
".",
"Array",
"which",
"is",
"an",
"iterable",
"array",
"of",
"int",
"values",
"in",
"argb",
"."
] |
python
|
valid
|
albahnsen/CostSensitiveClassification
|
costcla/models/cost_tree.py
|
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L654-L676
|
def pruning(self, X, y, cost_mat):
""" Function that prune the decision tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y_true : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
"""
self.tree_.tree_pruned = copy.deepcopy(self.tree_.tree)
if self.tree_.n_nodes > 0:
self._pruning(X, y, cost_mat)
nodes_pruned = self._nodes(self.tree_.tree_pruned)
self.tree_.n_nodes_pruned = len(nodes_pruned)
|
[
"def",
"pruning",
"(",
"self",
",",
"X",
",",
"y",
",",
"cost_mat",
")",
":",
"self",
".",
"tree_",
".",
"tree_pruned",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"tree_",
".",
"tree",
")",
"if",
"self",
".",
"tree_",
".",
"n_nodes",
">",
"0",
":",
"self",
".",
"_pruning",
"(",
"X",
",",
"y",
",",
"cost_mat",
")",
"nodes_pruned",
"=",
"self",
".",
"_nodes",
"(",
"self",
".",
"tree_",
".",
"tree_pruned",
")",
"self",
".",
"tree_",
".",
"n_nodes_pruned",
"=",
"len",
"(",
"nodes_pruned",
")"
] |
Function that prune the decision tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y_true : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
|
[
"Function",
"that",
"prune",
"the",
"decision",
"tree",
"."
] |
python
|
train
|
Valassis-Digital-Media/spylon
|
spylon/spark/launcher.py
|
https://github.com/Valassis-Digital-Media/spylon/blob/ac00e285fa1c790674606b793819c3e5baee0d48/spylon/spark/launcher.py#L584-L610
|
def with_sql_context(application_name, conf=None):
"""Context manager for a spark context
Returns
-------
sc : SparkContext
sql_context: SQLContext
Examples
--------
Used within a context manager
>>> with with_sql_context("MyApplication") as (sc, sql_context):
... import pyspark
... # Do stuff
... pass
"""
if conf is None:
conf = default_configuration
assert isinstance(conf, SparkConfiguration)
sc = conf.spark_context(application_name)
import pyspark.sql
try:
yield sc, pyspark.sql.SQLContext(sc)
finally:
sc.stop()
|
[
"def",
"with_sql_context",
"(",
"application_name",
",",
"conf",
"=",
"None",
")",
":",
"if",
"conf",
"is",
"None",
":",
"conf",
"=",
"default_configuration",
"assert",
"isinstance",
"(",
"conf",
",",
"SparkConfiguration",
")",
"sc",
"=",
"conf",
".",
"spark_context",
"(",
"application_name",
")",
"import",
"pyspark",
".",
"sql",
"try",
":",
"yield",
"sc",
",",
"pyspark",
".",
"sql",
".",
"SQLContext",
"(",
"sc",
")",
"finally",
":",
"sc",
".",
"stop",
"(",
")"
] |
Context manager for a spark context
Returns
-------
sc : SparkContext
sql_context: SQLContext
Examples
--------
Used within a context manager
>>> with with_sql_context("MyApplication") as (sc, sql_context):
... import pyspark
... # Do stuff
... pass
|
[
"Context",
"manager",
"for",
"a",
"spark",
"context"
] |
python
|
train
|
wharris/dougrain
|
dougrain/document.py
|
https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/document.py#L665-L731
|
def delete_embedded(self, rel=None, href=lambda _: True):
"""Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
"""
if EMBEDDED_KEY not in self.o:
return
if rel is None:
for rel in list(self.o[EMBEDDED_KEY].keys()):
self.delete_embedded(rel, href)
return
if rel not in self.o[EMBEDDED_KEY]:
return
if callable(href):
url_filter = href
else:
url_filter = lambda x: x == href
rel_embeds = self.o[EMBEDDED_KEY][rel]
if isinstance(rel_embeds, dict):
del self.o[EMBEDDED_KEY][rel]
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
return
new_rel_embeds = []
for embedded in list(rel_embeds):
embedded_doc = Document(embedded, self.base_uri)
if not url_filter(embedded_doc.url()):
new_rel_embeds.append(embedded)
if not new_rel_embeds:
del self.o[EMBEDDED_KEY][rel]
elif len(new_rel_embeds) == 1:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds[0]
else:
self.o[EMBEDDED_KEY][rel] = new_rel_embeds
if not self.o[EMBEDDED_KEY]:
del self.o[EMBEDDED_KEY]
|
[
"def",
"delete_embedded",
"(",
"self",
",",
"rel",
"=",
"None",
",",
"href",
"=",
"lambda",
"_",
":",
"True",
")",
":",
"if",
"EMBEDDED_KEY",
"not",
"in",
"self",
".",
"o",
":",
"return",
"if",
"rel",
"is",
"None",
":",
"for",
"rel",
"in",
"list",
"(",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"delete_embedded",
"(",
"rel",
",",
"href",
")",
"return",
"if",
"rel",
"not",
"in",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
":",
"return",
"if",
"callable",
"(",
"href",
")",
":",
"url_filter",
"=",
"href",
"else",
":",
"url_filter",
"=",
"lambda",
"x",
":",
"x",
"==",
"href",
"rel_embeds",
"=",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
"[",
"rel",
"]",
"if",
"isinstance",
"(",
"rel_embeds",
",",
"dict",
")",
":",
"del",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
"[",
"rel",
"]",
"if",
"not",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
":",
"del",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
"return",
"new_rel_embeds",
"=",
"[",
"]",
"for",
"embedded",
"in",
"list",
"(",
"rel_embeds",
")",
":",
"embedded_doc",
"=",
"Document",
"(",
"embedded",
",",
"self",
".",
"base_uri",
")",
"if",
"not",
"url_filter",
"(",
"embedded_doc",
".",
"url",
"(",
")",
")",
":",
"new_rel_embeds",
".",
"append",
"(",
"embedded",
")",
"if",
"not",
"new_rel_embeds",
":",
"del",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
"[",
"rel",
"]",
"elif",
"len",
"(",
"new_rel_embeds",
")",
"==",
"1",
":",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
"[",
"rel",
"]",
"=",
"new_rel_embeds",
"[",
"0",
"]",
"else",
":",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
"[",
"rel",
"]",
"=",
"new_rel_embeds",
"if",
"not",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]",
":",
"del",
"self",
".",
"o",
"[",
"EMBEDDED_KEY",
"]"
] |
Removes an embedded resource from this document.
Calling code should use this method to remove embedded resources
instead of modifying ``embedded`` directly.
The optional arguments, ``rel`` and ``href`` are used to select the
embedded resources that will be removed. If neither of the optional
arguments are given, this method removes every embedded resource from
this document. If ``rel`` is given, only embedded resources for the
matching link relationship type are removed. If ``href`` is given, only
embedded resources with a ``self`` link matching ``href`` are deleted.
If both ``rel`` and ``href`` are given, only embedded resources with
matching ``self`` link for the matching link relationship type are
removed.
Arguments:
- ``rel``: an optional string specifying the link relationship type of
the embedded resources to be removed.
- ``href``: optionally, a string specifying the ``href`` of the
``self`` links of the resources to be removed, or a
callable that returns true when its single argument matches
the ``href`` of the ``self`` link of one of the resources
to be removed.
|
[
"Removes",
"an",
"embedded",
"resource",
"from",
"this",
"document",
"."
] |
python
|
train
|
avalente/appmetrics
|
appmetrics/statistics.py
|
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/statistics.py#L442-L455
|
def percentile(data, n):
"""Return the n-th percentile of the given data
Assume that the data are already sorted
"""
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)]
|
[
"def",
"percentile",
"(",
"data",
",",
"n",
")",
":",
"size",
"=",
"len",
"(",
"data",
")",
"idx",
"=",
"(",
"n",
"/",
"100.0",
")",
"*",
"size",
"-",
"0.5",
"if",
"idx",
"<",
"0",
"or",
"idx",
">",
"size",
":",
"raise",
"StatisticsError",
"(",
"\"Too few data points ({}) for {}th percentile\"",
".",
"format",
"(",
"size",
",",
"n",
")",
")",
"return",
"data",
"[",
"int",
"(",
"idx",
")",
"]"
] |
Return the n-th percentile of the given data
Assume that the data are already sorted
|
[
"Return",
"the",
"n",
"-",
"th",
"percentile",
"of",
"the",
"given",
"data"
] |
python
|
train
|
deepmind/pysc2
|
pysc2/lib/colors.py
|
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/colors.py#L78-L121
|
def smooth_hue_palette(scale):
"""Takes an array of ints and returns a corresponding colored rgb array."""
# http://en.wikipedia.org/wiki/HSL_and_HSV#From_HSL
# Based on http://stackoverflow.com/a/17382854 , with simplifications and
# optimizations. Assumes S=1, L=0.5, meaning C=1 and m=0.
# 0 stays black, everything else moves into a hue.
# Some initial values and scaling. Check wikipedia for variable meanings.
array = numpy.arange(scale)
h = array * (6 / scale) # range of [0,6)
x = 255 * (1 - numpy.absolute(numpy.mod(h, 2) - 1))
c = 255
# Initialize outputs to zero/black
out = numpy.zeros(h.shape + (3,), float)
r = out[..., 0]
g = out[..., 1]
b = out[..., 2]
mask = (0 < h) & (h < 1)
r[mask] = c
g[mask] = x[mask]
mask = (1 <= h) & (h < 2)
r[mask] = x[mask]
g[mask] = c
mask = (2 <= h) & (h < 3)
g[mask] = c
b[mask] = x[mask]
mask = (3 <= h) & (h < 4)
g[mask] = x[mask]
b[mask] = c
mask = (4 <= h) & (h < 5)
r[mask] = x[mask]
b[mask] = c
mask = 5 <= h
r[mask] = c
b[mask] = x[mask]
return out
|
[
"def",
"smooth_hue_palette",
"(",
"scale",
")",
":",
"# http://en.wikipedia.org/wiki/HSL_and_HSV#From_HSL",
"# Based on http://stackoverflow.com/a/17382854 , with simplifications and",
"# optimizations. Assumes S=1, L=0.5, meaning C=1 and m=0.",
"# 0 stays black, everything else moves into a hue.",
"# Some initial values and scaling. Check wikipedia for variable meanings.",
"array",
"=",
"numpy",
".",
"arange",
"(",
"scale",
")",
"h",
"=",
"array",
"*",
"(",
"6",
"/",
"scale",
")",
"# range of [0,6)",
"x",
"=",
"255",
"*",
"(",
"1",
"-",
"numpy",
".",
"absolute",
"(",
"numpy",
".",
"mod",
"(",
"h",
",",
"2",
")",
"-",
"1",
")",
")",
"c",
"=",
"255",
"# Initialize outputs to zero/black",
"out",
"=",
"numpy",
".",
"zeros",
"(",
"h",
".",
"shape",
"+",
"(",
"3",
",",
")",
",",
"float",
")",
"r",
"=",
"out",
"[",
"...",
",",
"0",
"]",
"g",
"=",
"out",
"[",
"...",
",",
"1",
"]",
"b",
"=",
"out",
"[",
"...",
",",
"2",
"]",
"mask",
"=",
"(",
"0",
"<",
"h",
")",
"&",
"(",
"h",
"<",
"1",
")",
"r",
"[",
"mask",
"]",
"=",
"c",
"g",
"[",
"mask",
"]",
"=",
"x",
"[",
"mask",
"]",
"mask",
"=",
"(",
"1",
"<=",
"h",
")",
"&",
"(",
"h",
"<",
"2",
")",
"r",
"[",
"mask",
"]",
"=",
"x",
"[",
"mask",
"]",
"g",
"[",
"mask",
"]",
"=",
"c",
"mask",
"=",
"(",
"2",
"<=",
"h",
")",
"&",
"(",
"h",
"<",
"3",
")",
"g",
"[",
"mask",
"]",
"=",
"c",
"b",
"[",
"mask",
"]",
"=",
"x",
"[",
"mask",
"]",
"mask",
"=",
"(",
"3",
"<=",
"h",
")",
"&",
"(",
"h",
"<",
"4",
")",
"g",
"[",
"mask",
"]",
"=",
"x",
"[",
"mask",
"]",
"b",
"[",
"mask",
"]",
"=",
"c",
"mask",
"=",
"(",
"4",
"<=",
"h",
")",
"&",
"(",
"h",
"<",
"5",
")",
"r",
"[",
"mask",
"]",
"=",
"x",
"[",
"mask",
"]",
"b",
"[",
"mask",
"]",
"=",
"c",
"mask",
"=",
"5",
"<=",
"h",
"r",
"[",
"mask",
"]",
"=",
"c",
"b",
"[",
"mask",
"]",
"=",
"x",
"[",
"mask",
"]",
"return",
"out"
] |
Takes an array of ints and returns a corresponding colored rgb array.
|
[
"Takes",
"an",
"array",
"of",
"ints",
"and",
"returns",
"a",
"corresponding",
"colored",
"rgb",
"array",
"."
] |
python
|
train
|
Peter-Slump/django-dynamic-fixtures
|
src/dynamic_fixtures/management/commands/load_dynamic_fixtures.py
|
https://github.com/Peter-Slump/django-dynamic-fixtures/blob/da99b4b12b11be28ea4b36b6cf2896ca449c73c1/src/dynamic_fixtures/management/commands/load_dynamic_fixtures.py#L61-L78
|
def progress_callback(self, action, node, elapsed_time=None):
"""
Callback to report progress
:param str action:
:param list node: app, module
:param int | None elapsed_time:
"""
if action == 'load_start':
self.stdout.write('Loading fixture {}.{}...'.format(*node),
ending='')
self.stdout.flush()
elif action == 'load_success':
message = 'SUCCESS'
if elapsed_time:
message += ' ({:.03} seconds) '.format(elapsed_time)
self.stdout.write(message)
|
[
"def",
"progress_callback",
"(",
"self",
",",
"action",
",",
"node",
",",
"elapsed_time",
"=",
"None",
")",
":",
"if",
"action",
"==",
"'load_start'",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'Loading fixture {}.{}...'",
".",
"format",
"(",
"*",
"node",
")",
",",
"ending",
"=",
"''",
")",
"self",
".",
"stdout",
".",
"flush",
"(",
")",
"elif",
"action",
"==",
"'load_success'",
":",
"message",
"=",
"'SUCCESS'",
"if",
"elapsed_time",
":",
"message",
"+=",
"' ({:.03} seconds) '",
".",
"format",
"(",
"elapsed_time",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"message",
")"
] |
Callback to report progress
:param str action:
:param list node: app, module
:param int | None elapsed_time:
|
[
"Callback",
"to",
"report",
"progress"
] |
python
|
train
|
PmagPy/PmagPy
|
programs/demag_gui.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L1997-L2080
|
def calc_and_plot_sample_orient_check(self):
"""
If sample orientation is on plots the wrong arrow, wrong compass,
and rotated sample error directions for the current specimen
interpretation on the high level mean plot so that you can check
sample orientation good/bad.
"""
fit = self.current_fit
if fit == None:
return
pars = fit.get('specimen')
if 'specimen_dec' not in list(pars.keys()) or 'specimen_inc' not in list(pars.keys()):
fit.put(self.s, 'specimen', self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, 'specimen', fit.PCA_type))
pars = fit.get('specimen')
if not pars:
self.user_warning(
"could not calculate fit %s for specimen %s in specimen coordinate system while checking sample orientation please check data" % (fit.name, self.s))
return
dec, inc = pars['specimen_dec'], pars['specimen_inc']
sample = self.Data_hierarchy['sample_of_specimen'][self.s]
if sample not in list(self.Data_info["er_samples"].keys()) or "sample_azimuth" not in list(self.Data_info["er_samples"][sample].keys()) or "sample_dip" not in list(self.Data_info["er_samples"][sample].keys()):
self.user_warning(
"Could not display sample orientation checks because sample azimuth or sample dip is missing from er_samples table for sample %s" % sample)
self.check_orient_on = False # stop trying because this raises a ton of warnings
return
try:
azimuth = float(self.Data_info["er_samples"][sample]['sample_azimuth'])
dip = float(self.Data_info["er_samples"][sample]['sample_dip'])
except TypeError:
self.user_warning(
"Could not display sample orientation checks because azimuth or dip is missing (or invalid) for sample %s" % sample)
self.check_orient_on = False # stop trying because this raises a ton of warnings
return
# first test wrong direction of drill arrows (flip drill direction in opposite direction and re-calculate d,i)
d, i = pmag.dogeo(dec, inc, azimuth-180., -dip)
XY = pmag.dimap(d, i)
if i > 0:
FC = fit.color
SIZE = 15*self.GUI_RESOLUTION
else:
FC = 'white'
SIZE = 15*self.GUI_RESOLUTION
self.high_level_eqarea.scatter([XY[0]], [
XY[1]], marker='^', edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False)
if self.ie_open:
self.ie.scatter([XY[0]], [XY[1]], marker='^', edgecolor=fit.color,
facecolor=FC, s=SIZE, lw=1, clip_on=False)
# Then test wrong end of compass (take az-180.)
d, i = pmag.dogeo(dec, inc, azimuth-180., dip)
XY = pmag.dimap(d, i)
if i > 0:
FC = fit.color
SIZE = 15*self.GUI_RESOLUTION
else:
FC = 'white'
SIZE = 15*self.GUI_RESOLUTION
self.high_level_eqarea.scatter([XY[0]], [
XY[1]], marker='v', edgecolor=fit.color, facecolor=FC, s=SIZE, lw=1, clip_on=False)
if self.ie_open:
self.ie.scatter([XY[0]], [XY[1]], marker='v', edgecolor=fit.color,
facecolor=FC, s=SIZE, lw=1, clip_on=False)
# did the sample spin in the hole?
# now spin around specimen's z
X_up, Y_up, X_d, Y_d = [], [], [], []
for incr in range(0, 360, 5):
d, i = pmag.dogeo(dec+incr, inc, azimuth, dip)
XY = pmag.dimap(d, i)
if i >= 0:
X_d.append(XY[0])
Y_d.append(XY[1])
else:
X_up.append(XY[0])
Y_up.append(XY[1])
self.high_level_eqarea.scatter(
X_d, Y_d, marker='.', color=fit.color, alpha=.5, s=SIZE/2, lw=1, clip_on=False)
self.high_level_eqarea.scatter(
X_up, Y_up, marker='.', color=fit.color, s=SIZE/2, lw=1, clip_on=False)
if self.ie_open:
self.ie.scatter(X_d, Y_d, marker='.', color=fit.color,
alpha=.5, s=SIZE/2, lw=1, clip_on=False)
self.ie.scatter(X_up, Y_up, marker='.',
color=fit.color, s=SIZE/2, lw=1, clip_on=False)
|
[
"def",
"calc_and_plot_sample_orient_check",
"(",
"self",
")",
":",
"fit",
"=",
"self",
".",
"current_fit",
"if",
"fit",
"==",
"None",
":",
"return",
"pars",
"=",
"fit",
".",
"get",
"(",
"'specimen'",
")",
"if",
"'specimen_dec'",
"not",
"in",
"list",
"(",
"pars",
".",
"keys",
"(",
")",
")",
"or",
"'specimen_inc'",
"not",
"in",
"list",
"(",
"pars",
".",
"keys",
"(",
")",
")",
":",
"fit",
".",
"put",
"(",
"self",
".",
"s",
",",
"'specimen'",
",",
"self",
".",
"get_PCA_parameters",
"(",
"self",
".",
"s",
",",
"fit",
",",
"fit",
".",
"tmin",
",",
"fit",
".",
"tmax",
",",
"'specimen'",
",",
"fit",
".",
"PCA_type",
")",
")",
"pars",
"=",
"fit",
".",
"get",
"(",
"'specimen'",
")",
"if",
"not",
"pars",
":",
"self",
".",
"user_warning",
"(",
"\"could not calculate fit %s for specimen %s in specimen coordinate system while checking sample orientation please check data\"",
"%",
"(",
"fit",
".",
"name",
",",
"self",
".",
"s",
")",
")",
"return",
"dec",
",",
"inc",
"=",
"pars",
"[",
"'specimen_dec'",
"]",
",",
"pars",
"[",
"'specimen_inc'",
"]",
"sample",
"=",
"self",
".",
"Data_hierarchy",
"[",
"'sample_of_specimen'",
"]",
"[",
"self",
".",
"s",
"]",
"if",
"sample",
"not",
"in",
"list",
"(",
"self",
".",
"Data_info",
"[",
"\"er_samples\"",
"]",
".",
"keys",
"(",
")",
")",
"or",
"\"sample_azimuth\"",
"not",
"in",
"list",
"(",
"self",
".",
"Data_info",
"[",
"\"er_samples\"",
"]",
"[",
"sample",
"]",
".",
"keys",
"(",
")",
")",
"or",
"\"sample_dip\"",
"not",
"in",
"list",
"(",
"self",
".",
"Data_info",
"[",
"\"er_samples\"",
"]",
"[",
"sample",
"]",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"user_warning",
"(",
"\"Could not display sample orientation checks because sample azimuth or sample dip is missing from er_samples table for sample %s\"",
"%",
"sample",
")",
"self",
".",
"check_orient_on",
"=",
"False",
"# stop trying because this raises a ton of warnings",
"return",
"try",
":",
"azimuth",
"=",
"float",
"(",
"self",
".",
"Data_info",
"[",
"\"er_samples\"",
"]",
"[",
"sample",
"]",
"[",
"'sample_azimuth'",
"]",
")",
"dip",
"=",
"float",
"(",
"self",
".",
"Data_info",
"[",
"\"er_samples\"",
"]",
"[",
"sample",
"]",
"[",
"'sample_dip'",
"]",
")",
"except",
"TypeError",
":",
"self",
".",
"user_warning",
"(",
"\"Could not display sample orientation checks because azimuth or dip is missing (or invalid) for sample %s\"",
"%",
"sample",
")",
"self",
".",
"check_orient_on",
"=",
"False",
"# stop trying because this raises a ton of warnings",
"return",
"# first test wrong direction of drill arrows (flip drill direction in opposite direction and re-calculate d,i)",
"d",
",",
"i",
"=",
"pmag",
".",
"dogeo",
"(",
"dec",
",",
"inc",
",",
"azimuth",
"-",
"180.",
",",
"-",
"dip",
")",
"XY",
"=",
"pmag",
".",
"dimap",
"(",
"d",
",",
"i",
")",
"if",
"i",
">",
"0",
":",
"FC",
"=",
"fit",
".",
"color",
"SIZE",
"=",
"15",
"*",
"self",
".",
"GUI_RESOLUTION",
"else",
":",
"FC",
"=",
"'white'",
"SIZE",
"=",
"15",
"*",
"self",
".",
"GUI_RESOLUTION",
"self",
".",
"high_level_eqarea",
".",
"scatter",
"(",
"[",
"XY",
"[",
"0",
"]",
"]",
",",
"[",
"XY",
"[",
"1",
"]",
"]",
",",
"marker",
"=",
"'^'",
",",
"edgecolor",
"=",
"fit",
".",
"color",
",",
"facecolor",
"=",
"FC",
",",
"s",
"=",
"SIZE",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")",
"if",
"self",
".",
"ie_open",
":",
"self",
".",
"ie",
".",
"scatter",
"(",
"[",
"XY",
"[",
"0",
"]",
"]",
",",
"[",
"XY",
"[",
"1",
"]",
"]",
",",
"marker",
"=",
"'^'",
",",
"edgecolor",
"=",
"fit",
".",
"color",
",",
"facecolor",
"=",
"FC",
",",
"s",
"=",
"SIZE",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")",
"# Then test wrong end of compass (take az-180.)",
"d",
",",
"i",
"=",
"pmag",
".",
"dogeo",
"(",
"dec",
",",
"inc",
",",
"azimuth",
"-",
"180.",
",",
"dip",
")",
"XY",
"=",
"pmag",
".",
"dimap",
"(",
"d",
",",
"i",
")",
"if",
"i",
">",
"0",
":",
"FC",
"=",
"fit",
".",
"color",
"SIZE",
"=",
"15",
"*",
"self",
".",
"GUI_RESOLUTION",
"else",
":",
"FC",
"=",
"'white'",
"SIZE",
"=",
"15",
"*",
"self",
".",
"GUI_RESOLUTION",
"self",
".",
"high_level_eqarea",
".",
"scatter",
"(",
"[",
"XY",
"[",
"0",
"]",
"]",
",",
"[",
"XY",
"[",
"1",
"]",
"]",
",",
"marker",
"=",
"'v'",
",",
"edgecolor",
"=",
"fit",
".",
"color",
",",
"facecolor",
"=",
"FC",
",",
"s",
"=",
"SIZE",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")",
"if",
"self",
".",
"ie_open",
":",
"self",
".",
"ie",
".",
"scatter",
"(",
"[",
"XY",
"[",
"0",
"]",
"]",
",",
"[",
"XY",
"[",
"1",
"]",
"]",
",",
"marker",
"=",
"'v'",
",",
"edgecolor",
"=",
"fit",
".",
"color",
",",
"facecolor",
"=",
"FC",
",",
"s",
"=",
"SIZE",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")",
"# did the sample spin in the hole?",
"# now spin around specimen's z",
"X_up",
",",
"Y_up",
",",
"X_d",
",",
"Y_d",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"incr",
"in",
"range",
"(",
"0",
",",
"360",
",",
"5",
")",
":",
"d",
",",
"i",
"=",
"pmag",
".",
"dogeo",
"(",
"dec",
"+",
"incr",
",",
"inc",
",",
"azimuth",
",",
"dip",
")",
"XY",
"=",
"pmag",
".",
"dimap",
"(",
"d",
",",
"i",
")",
"if",
"i",
">=",
"0",
":",
"X_d",
".",
"append",
"(",
"XY",
"[",
"0",
"]",
")",
"Y_d",
".",
"append",
"(",
"XY",
"[",
"1",
"]",
")",
"else",
":",
"X_up",
".",
"append",
"(",
"XY",
"[",
"0",
"]",
")",
"Y_up",
".",
"append",
"(",
"XY",
"[",
"1",
"]",
")",
"self",
".",
"high_level_eqarea",
".",
"scatter",
"(",
"X_d",
",",
"Y_d",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"fit",
".",
"color",
",",
"alpha",
"=",
".5",
",",
"s",
"=",
"SIZE",
"/",
"2",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")",
"self",
".",
"high_level_eqarea",
".",
"scatter",
"(",
"X_up",
",",
"Y_up",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"fit",
".",
"color",
",",
"s",
"=",
"SIZE",
"/",
"2",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")",
"if",
"self",
".",
"ie_open",
":",
"self",
".",
"ie",
".",
"scatter",
"(",
"X_d",
",",
"Y_d",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"fit",
".",
"color",
",",
"alpha",
"=",
".5",
",",
"s",
"=",
"SIZE",
"/",
"2",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")",
"self",
".",
"ie",
".",
"scatter",
"(",
"X_up",
",",
"Y_up",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"fit",
".",
"color",
",",
"s",
"=",
"SIZE",
"/",
"2",
",",
"lw",
"=",
"1",
",",
"clip_on",
"=",
"False",
")"
] |
If sample orientation is on plots the wrong arrow, wrong compass,
and rotated sample error directions for the current specimen
interpretation on the high level mean plot so that you can check
sample orientation good/bad.
|
[
"If",
"sample",
"orientation",
"is",
"on",
"plots",
"the",
"wrong",
"arrow",
"wrong",
"compass",
"and",
"rotated",
"sample",
"error",
"directions",
"for",
"the",
"current",
"specimen",
"interpretation",
"on",
"the",
"high",
"level",
"mean",
"plot",
"so",
"that",
"you",
"can",
"check",
"sample",
"orientation",
"good",
"/",
"bad",
"."
] |
python
|
train
|
ynop/audiomate
|
audiomate/processing/pipeline/spectral.py
|
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/processing/pipeline/spectral.py#L10-L43
|
def stft_from_frames(frames, window='hann', dtype=np.complex64):
"""
Variation of the librosa.core.stft function,
that computes the short-time-fourier-transfrom from frames instead from the signal.
See http://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#stft
"""
win_length = frames.shape[0]
n_fft = win_length
fft_window = filters.get_window(window, win_length, fftbins=True)
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), frames.shape[1]),
dtype=dtype,
order='F')
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(util.MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
# RFFT and Conjugate here to match phase from DPWE code
stft_matrix[:, bl_s:bl_t] = fft.fft(fft_window *
frames[:, bl_s:bl_t],
axis=0)[:stft_matrix.shape[0]].conj()
return stft_matrix
|
[
"def",
"stft_from_frames",
"(",
"frames",
",",
"window",
"=",
"'hann'",
",",
"dtype",
"=",
"np",
".",
"complex64",
")",
":",
"win_length",
"=",
"frames",
".",
"shape",
"[",
"0",
"]",
"n_fft",
"=",
"win_length",
"fft_window",
"=",
"filters",
".",
"get_window",
"(",
"window",
",",
"win_length",
",",
"fftbins",
"=",
"True",
")",
"# Reshape so that the window can be broadcast",
"fft_window",
"=",
"fft_window",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"# Pre-allocate the STFT matrix",
"stft_matrix",
"=",
"np",
".",
"empty",
"(",
"(",
"int",
"(",
"1",
"+",
"n_fft",
"//",
"2",
")",
",",
"frames",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"dtype",
",",
"order",
"=",
"'F'",
")",
"# how many columns can we fit within MAX_MEM_BLOCK?",
"n_columns",
"=",
"int",
"(",
"util",
".",
"MAX_MEM_BLOCK",
"/",
"(",
"stft_matrix",
".",
"shape",
"[",
"0",
"]",
"*",
"stft_matrix",
".",
"itemsize",
")",
")",
"for",
"bl_s",
"in",
"range",
"(",
"0",
",",
"stft_matrix",
".",
"shape",
"[",
"1",
"]",
",",
"n_columns",
")",
":",
"bl_t",
"=",
"min",
"(",
"bl_s",
"+",
"n_columns",
",",
"stft_matrix",
".",
"shape",
"[",
"1",
"]",
")",
"# RFFT and Conjugate here to match phase from DPWE code",
"stft_matrix",
"[",
":",
",",
"bl_s",
":",
"bl_t",
"]",
"=",
"fft",
".",
"fft",
"(",
"fft_window",
"*",
"frames",
"[",
":",
",",
"bl_s",
":",
"bl_t",
"]",
",",
"axis",
"=",
"0",
")",
"[",
":",
"stft_matrix",
".",
"shape",
"[",
"0",
"]",
"]",
".",
"conj",
"(",
")",
"return",
"stft_matrix"
] |
Variation of the librosa.core.stft function,
that computes the short-time-fourier-transfrom from frames instead from the signal.
See http://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#stft
|
[
"Variation",
"of",
"the",
"librosa",
".",
"core",
".",
"stft",
"function",
"that",
"computes",
"the",
"short",
"-",
"time",
"-",
"fourier",
"-",
"transfrom",
"from",
"frames",
"instead",
"from",
"the",
"signal",
"."
] |
python
|
train
|
HazyResearch/pdftotree
|
pdftotree/utils/pdf/node.py
|
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L241-L245
|
def _one_contains_other(s1, s2):
"""
Whether one set contains the other
"""
return min(len(s1), len(s2)) == len(s1 & s2)
|
[
"def",
"_one_contains_other",
"(",
"s1",
",",
"s2",
")",
":",
"return",
"min",
"(",
"len",
"(",
"s1",
")",
",",
"len",
"(",
"s2",
")",
")",
"==",
"len",
"(",
"s1",
"&",
"s2",
")"
] |
Whether one set contains the other
|
[
"Whether",
"one",
"set",
"contains",
"the",
"other"
] |
python
|
train
|
opendatateam/udata
|
udata/core/dataset/preview.py
|
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/preview.py#L64-L77
|
def get_enabled_plugins():
'''
Returns enabled preview plugins.
Plugins are sorted, defaults come last
'''
plugins = entrypoints.get_enabled('udata.preview', current_app).values()
valid = [p for p in plugins if issubclass(p, PreviewPlugin)]
for plugin in plugins:
if plugin not in valid:
clsname = plugin.__name__
msg = '{0} is not a valid preview plugin'.format(clsname)
warnings.warn(msg, PreviewWarning)
return [p() for p in sorted(valid, key=lambda p: 1 if p.fallback else 0)]
|
[
"def",
"get_enabled_plugins",
"(",
")",
":",
"plugins",
"=",
"entrypoints",
".",
"get_enabled",
"(",
"'udata.preview'",
",",
"current_app",
")",
".",
"values",
"(",
")",
"valid",
"=",
"[",
"p",
"for",
"p",
"in",
"plugins",
"if",
"issubclass",
"(",
"p",
",",
"PreviewPlugin",
")",
"]",
"for",
"plugin",
"in",
"plugins",
":",
"if",
"plugin",
"not",
"in",
"valid",
":",
"clsname",
"=",
"plugin",
".",
"__name__",
"msg",
"=",
"'{0} is not a valid preview plugin'",
".",
"format",
"(",
"clsname",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"PreviewWarning",
")",
"return",
"[",
"p",
"(",
")",
"for",
"p",
"in",
"sorted",
"(",
"valid",
",",
"key",
"=",
"lambda",
"p",
":",
"1",
"if",
"p",
".",
"fallback",
"else",
"0",
")",
"]"
] |
Returns enabled preview plugins.
Plugins are sorted, defaults come last
|
[
"Returns",
"enabled",
"preview",
"plugins",
"."
] |
python
|
train
|
MartinThoma/hwrt
|
hwrt/download.py
|
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/download.py#L18-L21
|
def is_file_consistent(local_path_file, md5_hash):
"""Check if file is there and if the md5_hash is correct."""
return os.path.isfile(local_path_file) and \
hashlib.md5(open(local_path_file, 'rb').read()).hexdigest() == md5_hash
|
[
"def",
"is_file_consistent",
"(",
"local_path_file",
",",
"md5_hash",
")",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"local_path_file",
")",
"and",
"hashlib",
".",
"md5",
"(",
"open",
"(",
"local_path_file",
",",
"'rb'",
")",
".",
"read",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"==",
"md5_hash"
] |
Check if file is there and if the md5_hash is correct.
|
[
"Check",
"if",
"file",
"is",
"there",
"and",
"if",
"the",
"md5_hash",
"is",
"correct",
"."
] |
python
|
train
|
rootpy/rootpy
|
rootpy/logger/magic.py
|
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/logger/magic.py#L219-L269
|
def re_execute_with_exception(frame, exception, traceback):
"""
Dark magic. Causes ``frame`` to raise an exception at the current location
with ``traceback`` appended to it.
Note that since the line tracer is raising an exception, the interpreter
disables the global trace, so it's not possible to restore the previous
tracing conditions.
"""
if sys.gettrace() == globaltrace:
# If our trace handler is already installed, that means that this
# function has been called twice before the line tracer had a chance to
# run. That can happen if more than one exception was logged.
return
call_lineno = frame.f_lineno
def intercept_next_line(f, why, *args):
if f is not frame:
return
set_linetrace_on_frame(f)
# Undo modifications to the callers code (ick ick ick)
back_like_nothing_happened()
# Raise exception in (almost) the perfect place (except for duplication)
if sys.version_info[0] < 3:
#raise exception.__class__, exception, traceback
raise exception
raise exception.with_traceback(traceback)
set_linetrace_on_frame(frame, intercept_next_line)
linestarts = list(dis.findlinestarts(frame.f_code))
linestarts = [a for a, l in linestarts if l >= call_lineno]
# Jump target
dest = linestarts[0]
oc = frame.f_code.co_code[frame.f_lasti]
if sys.version_info[0] < 3:
oc = ord(oc)
opcode_size = 2 if oc >= opcode.HAVE_ARGUMENT else 0
# Opcode to overwrite
where = frame.f_lasti + 1 + opcode_size
#dis.disco(frame.f_code)
pc = PyCodeObject.from_address(id(frame.f_code))
back_like_nothing_happened = pc.co_code.contents.inject_jump(where, dest)
#print("#"*100)
#dis.disco(frame.f_code)
sys.settrace(globaltrace)
|
[
"def",
"re_execute_with_exception",
"(",
"frame",
",",
"exception",
",",
"traceback",
")",
":",
"if",
"sys",
".",
"gettrace",
"(",
")",
"==",
"globaltrace",
":",
"# If our trace handler is already installed, that means that this",
"# function has been called twice before the line tracer had a chance to",
"# run. That can happen if more than one exception was logged.",
"return",
"call_lineno",
"=",
"frame",
".",
"f_lineno",
"def",
"intercept_next_line",
"(",
"f",
",",
"why",
",",
"*",
"args",
")",
":",
"if",
"f",
"is",
"not",
"frame",
":",
"return",
"set_linetrace_on_frame",
"(",
"f",
")",
"# Undo modifications to the callers code (ick ick ick)",
"back_like_nothing_happened",
"(",
")",
"# Raise exception in (almost) the perfect place (except for duplication)",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"#raise exception.__class__, exception, traceback",
"raise",
"exception",
"raise",
"exception",
".",
"with_traceback",
"(",
"traceback",
")",
"set_linetrace_on_frame",
"(",
"frame",
",",
"intercept_next_line",
")",
"linestarts",
"=",
"list",
"(",
"dis",
".",
"findlinestarts",
"(",
"frame",
".",
"f_code",
")",
")",
"linestarts",
"=",
"[",
"a",
"for",
"a",
",",
"l",
"in",
"linestarts",
"if",
"l",
">=",
"call_lineno",
"]",
"# Jump target",
"dest",
"=",
"linestarts",
"[",
"0",
"]",
"oc",
"=",
"frame",
".",
"f_code",
".",
"co_code",
"[",
"frame",
".",
"f_lasti",
"]",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"oc",
"=",
"ord",
"(",
"oc",
")",
"opcode_size",
"=",
"2",
"if",
"oc",
">=",
"opcode",
".",
"HAVE_ARGUMENT",
"else",
"0",
"# Opcode to overwrite",
"where",
"=",
"frame",
".",
"f_lasti",
"+",
"1",
"+",
"opcode_size",
"#dis.disco(frame.f_code)",
"pc",
"=",
"PyCodeObject",
".",
"from_address",
"(",
"id",
"(",
"frame",
".",
"f_code",
")",
")",
"back_like_nothing_happened",
"=",
"pc",
".",
"co_code",
".",
"contents",
".",
"inject_jump",
"(",
"where",
",",
"dest",
")",
"#print(\"#\"*100)",
"#dis.disco(frame.f_code)",
"sys",
".",
"settrace",
"(",
"globaltrace",
")"
] |
Dark magic. Causes ``frame`` to raise an exception at the current location
with ``traceback`` appended to it.
Note that since the line tracer is raising an exception, the interpreter
disables the global trace, so it's not possible to restore the previous
tracing conditions.
|
[
"Dark",
"magic",
".",
"Causes",
"frame",
"to",
"raise",
"an",
"exception",
"at",
"the",
"current",
"location",
"with",
"traceback",
"appended",
"to",
"it",
"."
] |
python
|
train
|
aio-libs/aioredis
|
aioredis/commands/set.py
|
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/set.py#L59-L61
|
def srem(self, key, member, *members):
"""Remove one or more members from a set."""
return self.execute(b'SREM', key, member, *members)
|
[
"def",
"srem",
"(",
"self",
",",
"key",
",",
"member",
",",
"*",
"members",
")",
":",
"return",
"self",
".",
"execute",
"(",
"b'SREM'",
",",
"key",
",",
"member",
",",
"*",
"members",
")"
] |
Remove one or more members from a set.
|
[
"Remove",
"one",
"or",
"more",
"members",
"from",
"a",
"set",
"."
] |
python
|
train
|
PmagPy/PmagPy
|
programs/pt_rot.py
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/pt_rot.py#L12-L146
|
def main():
"""
NAME
pt_rot.py
DESCRIPTION
rotates pt according to specified age and plate
SYNTAX
pt_rot.py [command line options]
OPTIONS
-h prints help and quits
-f file with lon lat plate age Dplate as space delimited input
Dplate is the destination plate coordinates desires
- default is "fixed south africa"
Dplate should be one of: [nwaf, neaf,saf,aus, eur, ind, sam, ant, grn, nam]
-ff file Efile, file has lat lon data file and Efile has sequential rotation poles: Elat Elon Omega
-F OFILE, output sites (pmag_results) formatted file with rotated points stored in pole_lon, pole_lat (vgp_lon, vgp_lat). (data_model=2.5)
default is to print out rotated lon, lat to standard output
-dm [2.5,3] set data model for output. Default is 3
"""
dir_path='.'
PTS=[]
ResRecs=[]
ofile=""
data_model=3
Dplates=['nwaf', 'neaf','saf','aus', 'eur', 'ind', 'sam', 'ant', 'grn', 'nam']
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile=dir_path+'/'+sys.argv[ind+1]
if '-dm' in sys.argv:
ind = sys.argv.index('-dm')
data_model=dir_path+'/'+sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
file=dir_path+'/'+sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
elif '-ff' in sys.argv:
ind = sys.argv.index('-ff')
file=dir_path+'/'+sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
Efile=dir_path+'/'+sys.argv[ind+2]
f=open(Efile,'r')
edata=f.readlines()
Poles=[]
for p in edata:
rec=p.split()
pole=[float(rec[0]),float(rec[1]),float(rec[2])] # pole is lat/lon/omega
Poles.append(pole)
else:
data=sys.stdin.readlines()
polelatkey,polelonkey='pole_lat','pole_lon'
if data_model!=3:
polelatkey,polelonkey='vgp_lat','vgp_lon'
for line in data:
PtRec={}
rec=line.split()
PtRec['site_lon']=rec[0]
PtRec['site_lat']=rec[1]
if '-ff' in sys.argv:
pt_lat,pt_lon=float(rec[1]),float(rec[0])
for pole in Poles:
ptrot= pmag.pt_rot(pole,[pt_lat],[pt_lon])
pt_lat=ptrot[0][0]
pt_lon=ptrot[1][0]
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={polelonkey: '%7.1f'%(ptrot[0][0]),polelatkey:'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
else:
PtRec['cont']=rec[2]
if PtRec['cont']=='af':PtRec['cont']='saf' # use fixed south africa
PtRec['age']=rec[3]
if len(rec)>4:
PtRec['dcont']=rec[4]
PTS.append(PtRec)
if '-ff' not in sys.argv:
for pt in PTS:
pole='not specified'
pt_lat=float(pt['site_lat'])
pt_lon=float(pt['site_lon'])
age=float(pt['age'])
ptrot=[[pt_lat],[pt_lon]]
if pt['cont']=='ib':
pole=frp.get_pole(pt['cont'],age)
ptrot= pmag.pt_rot(pole,[pt_lat],[pt_lon])
pt_lat=ptrot[0][0]
pt_lon=ptrot[1][0]
pt['cont']='eur'
if pt['cont']!='saf':
pole1=frp.get_pole(pt['cont'],age)
ptrot= pmag.pt_rot(pole1,[pt_lat],[pt_lon])
if 'dcont' in list(pt.keys()):
pt_lat=ptrot[0][0]
pt_lon=ptrot[1][0]
pole=frp.get_pole(pt['dcont'],age)
pole[2]=-pole[2]
ptrot= pmag.pt_rot(pole,[pt_lat],[pt_lon])
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={polelonkey: '%7.1f'%(ptrot[0][0]),polelatkey:'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
else:
if 'dcont' in list(pt.keys()):
pole=frp.get_pole(pt['dcont'],age)
pole[2]=-pole[2]
ptrot= pmag.pt_rot(pole,[pt_lat],[pt_lon])
print(ptrot)
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={polelonkey: '%7.1f'%(ptrot[0][0]),polelatkey:'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
else:
if ofile=="":
print(ptrot[1][0], ptrot[0][0])
else:
ResRec={polelonkey: '%7.1f'%(ptrot[0][0]),polelatkey:'%7.1f'%( ptrot[1][0])}
ResRecs.append(ResRec)
if len(ResRecs)>0:
if data_model==3:
pmag.magic_write(ofile,ResRecs,'locations')
else:
pmag.magic_write(ofile,ResRecs,'pmag_results')
|
[
"def",
"main",
"(",
")",
":",
"dir_path",
"=",
"'.'",
"PTS",
"=",
"[",
"]",
"ResRecs",
"=",
"[",
"]",
"ofile",
"=",
"\"\"",
"data_model",
"=",
"3",
"Dplates",
"=",
"[",
"'nwaf'",
",",
"'neaf'",
",",
"'saf'",
",",
"'aus'",
",",
"'eur'",
",",
"'ind'",
",",
"'sam'",
",",
"'ant'",
",",
"'grn'",
",",
"'nam'",
"]",
"if",
"'-WD'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-WD'",
")",
"dir_path",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-F'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-F'",
")",
"ofile",
"=",
"dir_path",
"+",
"'/'",
"+",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-dm'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-dm'",
")",
"data_model",
"=",
"dir_path",
"+",
"'/'",
"+",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-f'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-f'",
")",
"file",
"=",
"dir_path",
"+",
"'/'",
"+",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"f",
"=",
"open",
"(",
"file",
",",
"'r'",
")",
"data",
"=",
"f",
".",
"readlines",
"(",
")",
"elif",
"'-ff'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-ff'",
")",
"file",
"=",
"dir_path",
"+",
"'/'",
"+",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"f",
"=",
"open",
"(",
"file",
",",
"'r'",
")",
"data",
"=",
"f",
".",
"readlines",
"(",
")",
"Efile",
"=",
"dir_path",
"+",
"'/'",
"+",
"sys",
".",
"argv",
"[",
"ind",
"+",
"2",
"]",
"f",
"=",
"open",
"(",
"Efile",
",",
"'r'",
")",
"edata",
"=",
"f",
".",
"readlines",
"(",
")",
"Poles",
"=",
"[",
"]",
"for",
"p",
"in",
"edata",
":",
"rec",
"=",
"p",
".",
"split",
"(",
")",
"pole",
"=",
"[",
"float",
"(",
"rec",
"[",
"0",
"]",
")",
",",
"float",
"(",
"rec",
"[",
"1",
"]",
")",
",",
"float",
"(",
"rec",
"[",
"2",
"]",
")",
"]",
"# pole is lat/lon/omega",
"Poles",
".",
"append",
"(",
"pole",
")",
"else",
":",
"data",
"=",
"sys",
".",
"stdin",
".",
"readlines",
"(",
")",
"polelatkey",
",",
"polelonkey",
"=",
"'pole_lat'",
",",
"'pole_lon'",
"if",
"data_model",
"!=",
"3",
":",
"polelatkey",
",",
"polelonkey",
"=",
"'vgp_lat'",
",",
"'vgp_lon'",
"for",
"line",
"in",
"data",
":",
"PtRec",
"=",
"{",
"}",
"rec",
"=",
"line",
".",
"split",
"(",
")",
"PtRec",
"[",
"'site_lon'",
"]",
"=",
"rec",
"[",
"0",
"]",
"PtRec",
"[",
"'site_lat'",
"]",
"=",
"rec",
"[",
"1",
"]",
"if",
"'-ff'",
"in",
"sys",
".",
"argv",
":",
"pt_lat",
",",
"pt_lon",
"=",
"float",
"(",
"rec",
"[",
"1",
"]",
")",
",",
"float",
"(",
"rec",
"[",
"0",
"]",
")",
"for",
"pole",
"in",
"Poles",
":",
"ptrot",
"=",
"pmag",
".",
"pt_rot",
"(",
"pole",
",",
"[",
"pt_lat",
"]",
",",
"[",
"pt_lon",
"]",
")",
"pt_lat",
"=",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
"pt_lon",
"=",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
"if",
"ofile",
"==",
"\"\"",
":",
"print",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"else",
":",
"ResRec",
"=",
"{",
"polelonkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"polelatkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"}",
"ResRecs",
".",
"append",
"(",
"ResRec",
")",
"else",
":",
"PtRec",
"[",
"'cont'",
"]",
"=",
"rec",
"[",
"2",
"]",
"if",
"PtRec",
"[",
"'cont'",
"]",
"==",
"'af'",
":",
"PtRec",
"[",
"'cont'",
"]",
"=",
"'saf'",
"# use fixed south africa",
"PtRec",
"[",
"'age'",
"]",
"=",
"rec",
"[",
"3",
"]",
"if",
"len",
"(",
"rec",
")",
">",
"4",
":",
"PtRec",
"[",
"'dcont'",
"]",
"=",
"rec",
"[",
"4",
"]",
"PTS",
".",
"append",
"(",
"PtRec",
")",
"if",
"'-ff'",
"not",
"in",
"sys",
".",
"argv",
":",
"for",
"pt",
"in",
"PTS",
":",
"pole",
"=",
"'not specified'",
"pt_lat",
"=",
"float",
"(",
"pt",
"[",
"'site_lat'",
"]",
")",
"pt_lon",
"=",
"float",
"(",
"pt",
"[",
"'site_lon'",
"]",
")",
"age",
"=",
"float",
"(",
"pt",
"[",
"'age'",
"]",
")",
"ptrot",
"=",
"[",
"[",
"pt_lat",
"]",
",",
"[",
"pt_lon",
"]",
"]",
"if",
"pt",
"[",
"'cont'",
"]",
"==",
"'ib'",
":",
"pole",
"=",
"frp",
".",
"get_pole",
"(",
"pt",
"[",
"'cont'",
"]",
",",
"age",
")",
"ptrot",
"=",
"pmag",
".",
"pt_rot",
"(",
"pole",
",",
"[",
"pt_lat",
"]",
",",
"[",
"pt_lon",
"]",
")",
"pt_lat",
"=",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
"pt_lon",
"=",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
"pt",
"[",
"'cont'",
"]",
"=",
"'eur'",
"if",
"pt",
"[",
"'cont'",
"]",
"!=",
"'saf'",
":",
"pole1",
"=",
"frp",
".",
"get_pole",
"(",
"pt",
"[",
"'cont'",
"]",
",",
"age",
")",
"ptrot",
"=",
"pmag",
".",
"pt_rot",
"(",
"pole1",
",",
"[",
"pt_lat",
"]",
",",
"[",
"pt_lon",
"]",
")",
"if",
"'dcont'",
"in",
"list",
"(",
"pt",
".",
"keys",
"(",
")",
")",
":",
"pt_lat",
"=",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
"pt_lon",
"=",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
"pole",
"=",
"frp",
".",
"get_pole",
"(",
"pt",
"[",
"'dcont'",
"]",
",",
"age",
")",
"pole",
"[",
"2",
"]",
"=",
"-",
"pole",
"[",
"2",
"]",
"ptrot",
"=",
"pmag",
".",
"pt_rot",
"(",
"pole",
",",
"[",
"pt_lat",
"]",
",",
"[",
"pt_lon",
"]",
")",
"if",
"ofile",
"==",
"\"\"",
":",
"print",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"else",
":",
"ResRec",
"=",
"{",
"polelonkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"polelatkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"}",
"ResRecs",
".",
"append",
"(",
"ResRec",
")",
"else",
":",
"if",
"'dcont'",
"in",
"list",
"(",
"pt",
".",
"keys",
"(",
")",
")",
":",
"pole",
"=",
"frp",
".",
"get_pole",
"(",
"pt",
"[",
"'dcont'",
"]",
",",
"age",
")",
"pole",
"[",
"2",
"]",
"=",
"-",
"pole",
"[",
"2",
"]",
"ptrot",
"=",
"pmag",
".",
"pt_rot",
"(",
"pole",
",",
"[",
"pt_lat",
"]",
",",
"[",
"pt_lon",
"]",
")",
"print",
"(",
"ptrot",
")",
"if",
"ofile",
"==",
"\"\"",
":",
"print",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"else",
":",
"ResRec",
"=",
"{",
"polelonkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"polelatkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"}",
"ResRecs",
".",
"append",
"(",
"ResRec",
")",
"else",
":",
"if",
"ofile",
"==",
"\"\"",
":",
"print",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"else",
":",
"ResRec",
"=",
"{",
"polelonkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"polelatkey",
":",
"'%7.1f'",
"%",
"(",
"ptrot",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"}",
"ResRecs",
".",
"append",
"(",
"ResRec",
")",
"if",
"len",
"(",
"ResRecs",
")",
">",
"0",
":",
"if",
"data_model",
"==",
"3",
":",
"pmag",
".",
"magic_write",
"(",
"ofile",
",",
"ResRecs",
",",
"'locations'",
")",
"else",
":",
"pmag",
".",
"magic_write",
"(",
"ofile",
",",
"ResRecs",
",",
"'pmag_results'",
")"
] |
NAME
pt_rot.py
DESCRIPTION
rotates pt according to specified age and plate
SYNTAX
pt_rot.py [command line options]
OPTIONS
-h prints help and quits
-f file with lon lat plate age Dplate as space delimited input
Dplate is the destination plate coordinates desires
- default is "fixed south africa"
Dplate should be one of: [nwaf, neaf,saf,aus, eur, ind, sam, ant, grn, nam]
-ff file Efile, file has lat lon data file and Efile has sequential rotation poles: Elat Elon Omega
-F OFILE, output sites (pmag_results) formatted file with rotated points stored in pole_lon, pole_lat (vgp_lon, vgp_lat). (data_model=2.5)
default is to print out rotated lon, lat to standard output
-dm [2.5,3] set data model for output. Default is 3
|
[
"NAME",
"pt_rot",
".",
"py"
] |
python
|
train
|
johnnoone/facts
|
facts/targeting.py
|
https://github.com/johnnoone/facts/blob/82d38a46c15d9c01200445526f4c0d1825fc1e51/facts/targeting.py#L50-L74
|
def read(self, obj):
"""
Returns
object: fragment
"""
path, frag = [], obj
for part in self.parts:
path.append(part)
if isinstance(frag, dict):
try:
frag = frag[part]
except KeyError as error:
raise NotFound(':'.join(path)) from error
elif isinstance(frag, (list, tuple)):
try:
frag = frag[int(part)]
except IndexError as error:
raise NotFound(':'.join(path)) from error
except ValueError as error:
raise WrongType(':'.join(path)) from error
elif isinstance(frag, (str, int)):
raise WrongType(':'.join(path))
else:
raise NotFound(':'.join(path))
return frag
|
[
"def",
"read",
"(",
"self",
",",
"obj",
")",
":",
"path",
",",
"frag",
"=",
"[",
"]",
",",
"obj",
"for",
"part",
"in",
"self",
".",
"parts",
":",
"path",
".",
"append",
"(",
"part",
")",
"if",
"isinstance",
"(",
"frag",
",",
"dict",
")",
":",
"try",
":",
"frag",
"=",
"frag",
"[",
"part",
"]",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"NotFound",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"from",
"error",
"elif",
"isinstance",
"(",
"frag",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"try",
":",
"frag",
"=",
"frag",
"[",
"int",
"(",
"part",
")",
"]",
"except",
"IndexError",
"as",
"error",
":",
"raise",
"NotFound",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"from",
"error",
"except",
"ValueError",
"as",
"error",
":",
"raise",
"WrongType",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"from",
"error",
"elif",
"isinstance",
"(",
"frag",
",",
"(",
"str",
",",
"int",
")",
")",
":",
"raise",
"WrongType",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"else",
":",
"raise",
"NotFound",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"return",
"frag"
] |
Returns
object: fragment
|
[
"Returns",
"object",
":",
"fragment"
] |
python
|
train
|
maceoutliner/django-fiction-outlines
|
fiction_outlines/views.py
|
https://github.com/maceoutliner/django-fiction-outlines/blob/6c58e356af3fbe7b23557643ba27e46eaef9d4e3/fiction_outlines/views.py#L1261-L1268
|
def return_opml_response(self, context, **response_kwargs):
'''
Returns export data as an opml file.
'''
self.template_name = 'fiction_outlines/outline.opml'
response = super().render_to_response(context, content_type='text/xml', **response_kwargs)
response['Content-Disposition'] = 'attachment; filename="{}.opml"'.format(slugify(self.object.title))
return response
|
[
"def",
"return_opml_response",
"(",
"self",
",",
"context",
",",
"*",
"*",
"response_kwargs",
")",
":",
"self",
".",
"template_name",
"=",
"'fiction_outlines/outline.opml'",
"response",
"=",
"super",
"(",
")",
".",
"render_to_response",
"(",
"context",
",",
"content_type",
"=",
"'text/xml'",
",",
"*",
"*",
"response_kwargs",
")",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"'attachment; filename=\"{}.opml\"'",
".",
"format",
"(",
"slugify",
"(",
"self",
".",
"object",
".",
"title",
")",
")",
"return",
"response"
] |
Returns export data as an opml file.
|
[
"Returns",
"export",
"data",
"as",
"an",
"opml",
"file",
"."
] |
python
|
train
|
onelogin/python3-saml
|
src/onelogin/saml2/response.py
|
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/response.py#L521-L535
|
def get_session_index(self):
"""
Gets the SessionIndex from the AuthnStatement
Could be used to be stored in the local session in order
to be used in a future Logout Request that the SP could
send to the SP, to set what specific session must be deleted
:returns: The SessionIndex value
:rtype: string|None
"""
session_index = None
authn_statement_nodes = self.__query_assertion('/saml:AuthnStatement[@SessionIndex]')
if authn_statement_nodes:
session_index = authn_statement_nodes[0].get('SessionIndex')
return session_index
|
[
"def",
"get_session_index",
"(",
"self",
")",
":",
"session_index",
"=",
"None",
"authn_statement_nodes",
"=",
"self",
".",
"__query_assertion",
"(",
"'/saml:AuthnStatement[@SessionIndex]'",
")",
"if",
"authn_statement_nodes",
":",
"session_index",
"=",
"authn_statement_nodes",
"[",
"0",
"]",
".",
"get",
"(",
"'SessionIndex'",
")",
"return",
"session_index"
] |
Gets the SessionIndex from the AuthnStatement
Could be used to be stored in the local session in order
to be used in a future Logout Request that the SP could
send to the SP, to set what specific session must be deleted
:returns: The SessionIndex value
:rtype: string|None
|
[
"Gets",
"the",
"SessionIndex",
"from",
"the",
"AuthnStatement",
"Could",
"be",
"used",
"to",
"be",
"stored",
"in",
"the",
"local",
"session",
"in",
"order",
"to",
"be",
"used",
"in",
"a",
"future",
"Logout",
"Request",
"that",
"the",
"SP",
"could",
"send",
"to",
"the",
"SP",
"to",
"set",
"what",
"specific",
"session",
"must",
"be",
"deleted"
] |
python
|
train
|
CodeReclaimers/neat-python
|
examples/memory-variable/visualize.py
|
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/examples/memory-variable/visualize.py#L12-L40
|
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
|
[
"def",
"plot_stats",
"(",
"statistics",
",",
"ylog",
"=",
"False",
",",
"view",
"=",
"False",
",",
"filename",
"=",
"'avg_fitness.svg'",
")",
":",
"if",
"plt",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"This display is not available due to a missing optional dependency (matplotlib)\"",
")",
"return",
"generation",
"=",
"range",
"(",
"len",
"(",
"statistics",
".",
"most_fit_genomes",
")",
")",
"best_fitness",
"=",
"[",
"c",
".",
"fitness",
"for",
"c",
"in",
"statistics",
".",
"most_fit_genomes",
"]",
"avg_fitness",
"=",
"np",
".",
"array",
"(",
"statistics",
".",
"get_fitness_mean",
"(",
")",
")",
"stdev_fitness",
"=",
"np",
".",
"array",
"(",
"statistics",
".",
"get_fitness_stdev",
"(",
")",
")",
"plt",
".",
"plot",
"(",
"generation",
",",
"avg_fitness",
",",
"'b-'",
",",
"label",
"=",
"\"average\"",
")",
"plt",
".",
"plot",
"(",
"generation",
",",
"avg_fitness",
"-",
"stdev_fitness",
",",
"'g-.'",
",",
"label",
"=",
"\"-1 sd\"",
")",
"plt",
".",
"plot",
"(",
"generation",
",",
"avg_fitness",
"+",
"stdev_fitness",
",",
"'g-.'",
",",
"label",
"=",
"\"+1 sd\"",
")",
"plt",
".",
"plot",
"(",
"generation",
",",
"best_fitness",
",",
"'r-'",
",",
"label",
"=",
"\"best\"",
")",
"plt",
".",
"title",
"(",
"\"Population's average and best fitness\"",
")",
"plt",
".",
"xlabel",
"(",
"\"Generations\"",
")",
"plt",
".",
"ylabel",
"(",
"\"Fitness\"",
")",
"plt",
".",
"grid",
"(",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"\"best\"",
")",
"if",
"ylog",
":",
"plt",
".",
"gca",
"(",
")",
".",
"set_yscale",
"(",
"'symlog'",
")",
"plt",
".",
"savefig",
"(",
"filename",
")",
"if",
"view",
":",
"plt",
".",
"show",
"(",
")",
"plt",
".",
"close",
"(",
")"
] |
Plots the population's average and best fitness.
|
[
"Plots",
"the",
"population",
"s",
"average",
"and",
"best",
"fitness",
"."
] |
python
|
train
|
SBRG/ssbio
|
ssbio/protein/structure/structprop.py
|
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L223-L238
|
def add_chain_ids(self, chains):
"""Add chains by ID into the chains attribute
Args:
chains (str, list): Chain ID or list of IDs
"""
chains = ssbio.utils.force_list(chains)
for c in chains:
if self.chains.has_id(c):
log.debug('{}: chain already present'.format(c))
else:
chain_prop = ChainProp(ident=c, pdb_parent=self.id)
self.chains.append(chain_prop)
log.debug('{}: added to chains list'.format(c))
|
[
"def",
"add_chain_ids",
"(",
"self",
",",
"chains",
")",
":",
"chains",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"chains",
")",
"for",
"c",
"in",
"chains",
":",
"if",
"self",
".",
"chains",
".",
"has_id",
"(",
"c",
")",
":",
"log",
".",
"debug",
"(",
"'{}: chain already present'",
".",
"format",
"(",
"c",
")",
")",
"else",
":",
"chain_prop",
"=",
"ChainProp",
"(",
"ident",
"=",
"c",
",",
"pdb_parent",
"=",
"self",
".",
"id",
")",
"self",
".",
"chains",
".",
"append",
"(",
"chain_prop",
")",
"log",
".",
"debug",
"(",
"'{}: added to chains list'",
".",
"format",
"(",
"c",
")",
")"
] |
Add chains by ID into the chains attribute
Args:
chains (str, list): Chain ID or list of IDs
|
[
"Add",
"chains",
"by",
"ID",
"into",
"the",
"chains",
"attribute"
] |
python
|
train
|
ilblackdragon/django-misc
|
misc/views.py
|
https://github.com/ilblackdragon/django-misc/blob/0accd2dc97de656a1c9e275be81e817f78a2eb9d/misc/views.py#L38-L52
|
def handler404(request, template_name='404.html'):
"""
404 error handler.
Templates: `404.html`
Context:
MEDIA_URL
Path of static media (e.g. "media.example.org")
STATIC_URL
"""
t = loader.get_template(template_name) # You need to create a 404.html template.
return http.HttpResponseNotFound(t.render(Context({
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL
})))
|
[
"def",
"handler404",
"(",
"request",
",",
"template_name",
"=",
"'404.html'",
")",
":",
"t",
"=",
"loader",
".",
"get_template",
"(",
"template_name",
")",
"# You need to create a 404.html template.",
"return",
"http",
".",
"HttpResponseNotFound",
"(",
"t",
".",
"render",
"(",
"Context",
"(",
"{",
"'MEDIA_URL'",
":",
"settings",
".",
"MEDIA_URL",
",",
"'STATIC_URL'",
":",
"settings",
".",
"STATIC_URL",
"}",
")",
")",
")"
] |
404 error handler.
Templates: `404.html`
Context:
MEDIA_URL
Path of static media (e.g. "media.example.org")
STATIC_URL
|
[
"404",
"error",
"handler",
"."
] |
python
|
train
|
DataDog/integrations-core
|
tokumx/datadog_checks/tokumx/vendor/pymongo/database.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/database.py#L192-L204
|
def outgoing_manipulators(self):
"""**DEPRECATED**: All outgoing SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0
"""
warnings.warn("Database.outgoing_manipulators() is deprecated",
DeprecationWarning, stacklevel=2)
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_manipulators]
|
[
"def",
"outgoing_manipulators",
"(",
"self",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Database.outgoing_manipulators() is deprecated\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"[",
"manipulator",
".",
"__class__",
".",
"__name__",
"for",
"manipulator",
"in",
"self",
".",
"__outgoing_manipulators",
"]"
] |
**DEPRECATED**: All outgoing SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0
|
[
"**",
"DEPRECATED",
"**",
":",
"All",
"outgoing",
"SON",
"manipulators",
"."
] |
python
|
train
|
manns/pyspread
|
pyspread/src/interfaces/pys.py
|
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/interfaces/pys.py#L392-L416
|
def to_code_array(self):
"""Replaces everything in code_array from pys_file"""
state = None
# Check if version section starts with first line
first_line = True
# Reset pys_file to start to enable multiple calls of this method
self.pys_file.seek(0)
for line in self.pys_file:
if first_line:
# If Version section does not start with first line then
# the file is invalid.
if line == "[Pyspread save file version]\n":
first_line = False
else:
raise ValueError(_("File format unsupported."))
if line in self._section2reader:
state = line
elif state is not None:
self._section2reader[state](line)
|
[
"def",
"to_code_array",
"(",
"self",
")",
":",
"state",
"=",
"None",
"# Check if version section starts with first line",
"first_line",
"=",
"True",
"# Reset pys_file to start to enable multiple calls of this method",
"self",
".",
"pys_file",
".",
"seek",
"(",
"0",
")",
"for",
"line",
"in",
"self",
".",
"pys_file",
":",
"if",
"first_line",
":",
"# If Version section does not start with first line then",
"# the file is invalid.",
"if",
"line",
"==",
"\"[Pyspread save file version]\\n\"",
":",
"first_line",
"=",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"\"File format unsupported.\"",
")",
")",
"if",
"line",
"in",
"self",
".",
"_section2reader",
":",
"state",
"=",
"line",
"elif",
"state",
"is",
"not",
"None",
":",
"self",
".",
"_section2reader",
"[",
"state",
"]",
"(",
"line",
")"
] |
Replaces everything in code_array from pys_file
|
[
"Replaces",
"everything",
"in",
"code_array",
"from",
"pys_file"
] |
python
|
train
|
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L479-L492
|
def delete_message_type(self, message_type_id):
"""
Delete an existing message type
:param message_type_id: is the id of the message type the
client wants to delete
"""
self._validate_uuid(message_type_id)
url = "/notification/v1/message-type/{}".format(message_type_id)
response = NWS_DAO().deleteURL(url, self._write_headers())
if response.status != 204:
raise DataFailureException(url, response.status, response.data)
return response.status
|
[
"def",
"delete_message_type",
"(",
"self",
",",
"message_type_id",
")",
":",
"self",
".",
"_validate_uuid",
"(",
"message_type_id",
")",
"url",
"=",
"\"/notification/v1/message-type/{}\"",
".",
"format",
"(",
"message_type_id",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"deleteURL",
"(",
"url",
",",
"self",
".",
"_write_headers",
"(",
")",
")",
"if",
"response",
".",
"status",
"!=",
"204",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"return",
"response",
".",
"status"
] |
Delete an existing message type
:param message_type_id: is the id of the message type the
client wants to delete
|
[
"Delete",
"an",
"existing",
"message",
"type",
":",
"param",
"message_type_id",
":",
"is",
"the",
"id",
"of",
"the",
"message",
"type",
"the",
"client",
"wants",
"to",
"delete"
] |
python
|
train
|
bsolomon1124/pyfinance
|
pyfinance/utils.py
|
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/utils.py#L418-L426
|
def _uniquewords(*args):
"""Dictionary of words to their indices. Helper function to `encode.`"""
words = {}
n = 0
for word in itertools.chain(*args):
if word not in words:
words[word] = n
n += 1
return words
|
[
"def",
"_uniquewords",
"(",
"*",
"args",
")",
":",
"words",
"=",
"{",
"}",
"n",
"=",
"0",
"for",
"word",
"in",
"itertools",
".",
"chain",
"(",
"*",
"args",
")",
":",
"if",
"word",
"not",
"in",
"words",
":",
"words",
"[",
"word",
"]",
"=",
"n",
"n",
"+=",
"1",
"return",
"words"
] |
Dictionary of words to their indices. Helper function to `encode.`
|
[
"Dictionary",
"of",
"words",
"to",
"their",
"indices",
".",
"Helper",
"function",
"to",
"encode",
"."
] |
python
|
train
|
fastavro/fastavro
|
fastavro/_read_py.py
|
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L311-L342
|
def read_map(fo, writer_schema, reader_schema=None):
"""Maps are encoded as a series of blocks.
Each block consists of a long count value, followed by that many key/value
pairs. A block with count zero indicates the end of the map. Each item is
encoded per the map's value schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written.
"""
if reader_schema:
def item_reader(fo, w_schema, r_schema):
return read_data(fo, w_schema['values'], r_schema['values'])
else:
def item_reader(fo, w_schema, _):
return read_data(fo, w_schema['values'])
read_items = {}
block_count = read_long(fo)
while block_count != 0:
if block_count < 0:
block_count = -block_count
# Read block size, unused
read_long(fo)
for i in xrange(block_count):
key = read_utf8(fo)
read_items[key] = item_reader(fo, writer_schema, reader_schema)
block_count = read_long(fo)
return read_items
|
[
"def",
"read_map",
"(",
"fo",
",",
"writer_schema",
",",
"reader_schema",
"=",
"None",
")",
":",
"if",
"reader_schema",
":",
"def",
"item_reader",
"(",
"fo",
",",
"w_schema",
",",
"r_schema",
")",
":",
"return",
"read_data",
"(",
"fo",
",",
"w_schema",
"[",
"'values'",
"]",
",",
"r_schema",
"[",
"'values'",
"]",
")",
"else",
":",
"def",
"item_reader",
"(",
"fo",
",",
"w_schema",
",",
"_",
")",
":",
"return",
"read_data",
"(",
"fo",
",",
"w_schema",
"[",
"'values'",
"]",
")",
"read_items",
"=",
"{",
"}",
"block_count",
"=",
"read_long",
"(",
"fo",
")",
"while",
"block_count",
"!=",
"0",
":",
"if",
"block_count",
"<",
"0",
":",
"block_count",
"=",
"-",
"block_count",
"# Read block size, unused",
"read_long",
"(",
"fo",
")",
"for",
"i",
"in",
"xrange",
"(",
"block_count",
")",
":",
"key",
"=",
"read_utf8",
"(",
"fo",
")",
"read_items",
"[",
"key",
"]",
"=",
"item_reader",
"(",
"fo",
",",
"writer_schema",
",",
"reader_schema",
")",
"block_count",
"=",
"read_long",
"(",
"fo",
")",
"return",
"read_items"
] |
Maps are encoded as a series of blocks.
Each block consists of a long count value, followed by that many key/value
pairs. A block with count zero indicates the end of the map. Each item is
encoded per the map's value schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written.
|
[
"Maps",
"are",
"encoded",
"as",
"a",
"series",
"of",
"blocks",
"."
] |
python
|
train
|
sporsh/carnifex
|
carnifex/ssh/client.py
|
https://github.com/sporsh/carnifex/blob/82dd3bd2bc134dfb69a78f43171e227f2127060b/carnifex/ssh/client.py#L44-L51
|
def receiveError(self, reasonCode, description):
"""
Called when we receive a disconnect error message from the other
side.
"""
error = disconnectErrors.get(reasonCode, DisconnectError)
self.connectionClosed(error(reasonCode, description))
SSHClientTransport.receiveError(self, reasonCode, description)
|
[
"def",
"receiveError",
"(",
"self",
",",
"reasonCode",
",",
"description",
")",
":",
"error",
"=",
"disconnectErrors",
".",
"get",
"(",
"reasonCode",
",",
"DisconnectError",
")",
"self",
".",
"connectionClosed",
"(",
"error",
"(",
"reasonCode",
",",
"description",
")",
")",
"SSHClientTransport",
".",
"receiveError",
"(",
"self",
",",
"reasonCode",
",",
"description",
")"
] |
Called when we receive a disconnect error message from the other
side.
|
[
"Called",
"when",
"we",
"receive",
"a",
"disconnect",
"error",
"message",
"from",
"the",
"other",
"side",
"."
] |
python
|
train
|
toastdriven/alligator
|
alligator/gator.py
|
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/gator.py#L234-L261
|
def task(self, func, *args, **kwargs):
"""
Pushes a task onto the queue.
This will instantiate a ``Gator.task_class`` instance, configure
the callable & its arguments, then push it onto the queue.
You'll typically want to use either this method or the ``Gator.options``
context manager (if you need to configure the ``Task`` arguments, such
as retries, async, task_id, etc.)
Ex::
on_queue = gator.task(increment, incr_by=2)
:param func: The callable with business logic to execute
:type func: callable
:param args: Positional arguments to pass to the callable task
:type args: list
:param kwargs: Keyword arguments to pass to the callable task
:type kwargs: dict
:returns: The ``Task`` instance
"""
task = self.task_class()
return self.push(task, func, *args, **kwargs)
|
[
"def",
"task",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"task",
"=",
"self",
".",
"task_class",
"(",
")",
"return",
"self",
".",
"push",
"(",
"task",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Pushes a task onto the queue.
This will instantiate a ``Gator.task_class`` instance, configure
the callable & its arguments, then push it onto the queue.
You'll typically want to use either this method or the ``Gator.options``
context manager (if you need to configure the ``Task`` arguments, such
as retries, async, task_id, etc.)
Ex::
on_queue = gator.task(increment, incr_by=2)
:param func: The callable with business logic to execute
:type func: callable
:param args: Positional arguments to pass to the callable task
:type args: list
:param kwargs: Keyword arguments to pass to the callable task
:type kwargs: dict
:returns: The ``Task`` instance
|
[
"Pushes",
"a",
"task",
"onto",
"the",
"queue",
"."
] |
python
|
train
|
amanusk/s-tui
|
s_tui/s_tui.py
|
https://github.com/amanusk/s-tui/blob/5e89d15081e716024db28ec03b1e3a7710330951/s_tui/s_tui.py#L256-L268
|
def on_reset_button(self, _):
"""Reset graph data and display empty graph"""
for graph in self.visible_graphs.values():
graph.reset()
for graph in self.graphs.values():
try:
graph.source.reset()
except NotImplementedError:
pass
# Reset clock
self.clock_view.set_text(ZERO_TIME)
self.update_displayed_information()
|
[
"def",
"on_reset_button",
"(",
"self",
",",
"_",
")",
":",
"for",
"graph",
"in",
"self",
".",
"visible_graphs",
".",
"values",
"(",
")",
":",
"graph",
".",
"reset",
"(",
")",
"for",
"graph",
"in",
"self",
".",
"graphs",
".",
"values",
"(",
")",
":",
"try",
":",
"graph",
".",
"source",
".",
"reset",
"(",
")",
"except",
"NotImplementedError",
":",
"pass",
"# Reset clock",
"self",
".",
"clock_view",
".",
"set_text",
"(",
"ZERO_TIME",
")",
"self",
".",
"update_displayed_information",
"(",
")"
] |
Reset graph data and display empty graph
|
[
"Reset",
"graph",
"data",
"and",
"display",
"empty",
"graph"
] |
python
|
train
|
summa-tx/riemann
|
riemann/blake256.py
|
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/blake256.py#L361-L415
|
def update(self, data):
""" update the state with new data, storing excess data
as necessary. may be called multiple times and if a
call sends less than a full block in size, the leftover
is cached and will be consumed in the next call
data: data to be hashed (bytestring)
"""
self.state = 2
BLKBYTES = self.BLKBYTES # de-referenced for improved readability
BLKBITS = self.BLKBITS
datalen = len(data)
if not datalen: return
if type(data) == type(u''):
# use either of the next two lines for a proper
# response under both Python2 and Python3
data = data.encode('UTF-8') # converts to byte string
#data = bytearray(data, 'utf-8') # use if want mutable
# This next line works for Py3 but fails under
# Py2 because the Py2 version of bytes() will
# accept only *one* argument. Arrrrgh!!!
#data = bytes(data, 'utf-8') # converts to immutable byte
# string but... under p7
# bytes() wants only 1 arg
# ...a dummy, 2nd argument like encoding=None
# that does nothing would at least allow
# compatibility between Python2 and Python3.
left = len(self.cache)
fill = BLKBYTES - left
# if any cached data and any added new data will fill a
# full block, fill and compress
if left and datalen >= fill:
self.cache = self.cache + data[:fill]
self.t += BLKBITS # update counter
self._compress(self.cache)
self.cache = b''
data = data[fill:]
datalen -= fill
# compress new data until not enough for a full block
while datalen >= BLKBYTES:
self.t += BLKBITS # update counter
self._compress(data[:BLKBYTES])
data = data[BLKBYTES:]
datalen -= BLKBYTES
# cache all leftover bytes until next call to update()
if datalen > 0:
self.cache = self.cache + data[:datalen]
|
[
"def",
"update",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"state",
"=",
"2",
"BLKBYTES",
"=",
"self",
".",
"BLKBYTES",
"# de-referenced for improved readability",
"BLKBITS",
"=",
"self",
".",
"BLKBITS",
"datalen",
"=",
"len",
"(",
"data",
")",
"if",
"not",
"datalen",
":",
"return",
"if",
"type",
"(",
"data",
")",
"==",
"type",
"(",
"u''",
")",
":",
"# use either of the next two lines for a proper",
"# response under both Python2 and Python3",
"data",
"=",
"data",
".",
"encode",
"(",
"'UTF-8'",
")",
"# converts to byte string",
"#data = bytearray(data, 'utf-8') # use if want mutable",
"# This next line works for Py3 but fails under",
"# Py2 because the Py2 version of bytes() will",
"# accept only *one* argument. Arrrrgh!!!",
"#data = bytes(data, 'utf-8') # converts to immutable byte",
"# string but... under p7",
"# bytes() wants only 1 arg",
"# ...a dummy, 2nd argument like encoding=None",
"# that does nothing would at least allow",
"# compatibility between Python2 and Python3.",
"left",
"=",
"len",
"(",
"self",
".",
"cache",
")",
"fill",
"=",
"BLKBYTES",
"-",
"left",
"# if any cached data and any added new data will fill a",
"# full block, fill and compress",
"if",
"left",
"and",
"datalen",
">=",
"fill",
":",
"self",
".",
"cache",
"=",
"self",
".",
"cache",
"+",
"data",
"[",
":",
"fill",
"]",
"self",
".",
"t",
"+=",
"BLKBITS",
"# update counter",
"self",
".",
"_compress",
"(",
"self",
".",
"cache",
")",
"self",
".",
"cache",
"=",
"b''",
"data",
"=",
"data",
"[",
"fill",
":",
"]",
"datalen",
"-=",
"fill",
"# compress new data until not enough for a full block",
"while",
"datalen",
">=",
"BLKBYTES",
":",
"self",
".",
"t",
"+=",
"BLKBITS",
"# update counter",
"self",
".",
"_compress",
"(",
"data",
"[",
":",
"BLKBYTES",
"]",
")",
"data",
"=",
"data",
"[",
"BLKBYTES",
":",
"]",
"datalen",
"-=",
"BLKBYTES",
"# cache all leftover bytes until next call to update()",
"if",
"datalen",
">",
"0",
":",
"self",
".",
"cache",
"=",
"self",
".",
"cache",
"+",
"data",
"[",
":",
"datalen",
"]"
] |
update the state with new data, storing excess data
as necessary. may be called multiple times and if a
call sends less than a full block in size, the leftover
is cached and will be consumed in the next call
data: data to be hashed (bytestring)
|
[
"update",
"the",
"state",
"with",
"new",
"data",
"storing",
"excess",
"data",
"as",
"necessary",
".",
"may",
"be",
"called",
"multiple",
"times",
"and",
"if",
"a",
"call",
"sends",
"less",
"than",
"a",
"full",
"block",
"in",
"size",
"the",
"leftover",
"is",
"cached",
"and",
"will",
"be",
"consumed",
"in",
"the",
"next",
"call",
"data",
":",
"data",
"to",
"be",
"hashed",
"(",
"bytestring",
")"
] |
python
|
train
|
bcbio/bcbio-nextgen
|
bcbio/structural/manta.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/manta.py#L71-L81
|
def _get_out_file(work_dir, paired):
"""Retrieve manta output variant file, depending on analysis.
"""
if paired:
if paired.normal_bam:
base_file = "somaticSV.vcf.gz"
else:
base_file = "tumorSV.vcf.gz"
else:
base_file = "diploidSV.vcf.gz"
return os.path.join(work_dir, "results", "variants", base_file)
|
[
"def",
"_get_out_file",
"(",
"work_dir",
",",
"paired",
")",
":",
"if",
"paired",
":",
"if",
"paired",
".",
"normal_bam",
":",
"base_file",
"=",
"\"somaticSV.vcf.gz\"",
"else",
":",
"base_file",
"=",
"\"tumorSV.vcf.gz\"",
"else",
":",
"base_file",
"=",
"\"diploidSV.vcf.gz\"",
"return",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"\"results\"",
",",
"\"variants\"",
",",
"base_file",
")"
] |
Retrieve manta output variant file, depending on analysis.
|
[
"Retrieve",
"manta",
"output",
"variant",
"file",
"depending",
"on",
"analysis",
"."
] |
python
|
train
|
fabioz/PyDev.Debugger
|
_pydev_bundle/_pydev_jy_imports_tipper.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_bundle/_pydev_jy_imports_tipper.py#L177-L295
|
def ismethod(func):
'''this function should return the information gathered on a function
@param func: this is the function we want to get info on
@return a tuple where:
0 = indicates whether the parameter passed is a method or not
1 = a list of classes 'Info', with the info gathered from the function
this is a list because when we have methods from java with the same name and different signatures,
we actually have many methods, each with its own set of arguments
'''
try:
if isinstance(func, core.PyFunction):
#ok, this is from python, created by jython
#print_ ' PyFunction'
def getargs(func_code):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
nargs = func_code.co_argcount
names = func_code.co_varnames
args = list(names[:nargs])
step = 0
if not hasattr(func_code, 'CO_VARARGS'):
from org.python.core import CodeFlag # @UnresolvedImport
co_varargs_flag = CodeFlag.CO_VARARGS.flag
co_varkeywords_flag = CodeFlag.CO_VARKEYWORDS.flag
else:
co_varargs_flag = func_code.CO_VARARGS
co_varkeywords_flag = func_code.CO_VARKEYWORDS
varargs = None
if func_code.co_flags & co_varargs_flag:
varargs = func_code.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if func_code.co_flags & co_varkeywords_flag:
varkw = func_code.co_varnames[nargs]
return args, varargs, varkw
args = getargs(func.func_code)
return 1, [Info(func.func_name, args=args[0], varargs=args[1], kwargs=args[2], doc=func.func_doc)]
if isinstance(func, core.PyMethod):
#this is something from java itself, and jython just wrapped it...
#things to play in func:
#['__call__', '__class__', '__cmp__', '__delattr__', '__dir__', '__doc__', '__findattr__', '__name__', '_doget', 'im_class',
#'im_func', 'im_self', 'toString']
#print_ ' PyMethod'
#that's the PyReflectedFunction... keep going to get it
func = func.im_func
if isinstance(func, PyReflectedFunction):
#this is something from java itself, and jython just wrapped it...
#print_ ' PyReflectedFunction'
infos = []
for i in xrange(len(func.argslist)):
#things to play in func.argslist[i]:
#'PyArgsCall', 'PyArgsKeywordsCall', 'REPLACE', 'StandardCall', 'args', 'compare', 'compareTo', 'data', 'declaringClass'
#'flags', 'isStatic', 'matches', 'precedence']
#print_ ' ', func.argslist[i].data.__class__
#func.argslist[i].data.__class__ == java.lang.reflect.Method
if func.argslist[i]:
met = func.argslist[i].data
name = met.getName()
try:
ret = met.getReturnType()
except AttributeError:
ret = ''
parameterTypes = met.getParameterTypes()
args = []
for j in xrange(len(parameterTypes)):
paramTypesClass = parameterTypes[j]
try:
try:
paramClassName = paramTypesClass.getName()
except:
paramClassName = paramTypesClass.getName(paramTypesClass)
except AttributeError:
try:
paramClassName = repr(paramTypesClass) #should be something like <type 'object'>
paramClassName = paramClassName.split('\'')[1]
except:
paramClassName = repr(paramTypesClass) #just in case something else happens... it will at least be visible
#if the parameter equals [C, it means it it a char array, so, let's change it
a = format_param_class_name(paramClassName)
#a = a.replace('[]','Array')
#a = a.replace('Object', 'obj')
#a = a.replace('String', 's')
#a = a.replace('Integer', 'i')
#a = a.replace('Char', 'c')
#a = a.replace('Double', 'd')
args.append(a) #so we don't leave invalid code
info = Info(name, args=args, ret=ret)
#print_ info.basic_as_str()
infos.append(info)
return 1, infos
except Exception:
s = StringIO.StringIO()
traceback.print_exc(file=s)
return 1, [Info(str('ERROR'), doc=s.getvalue())]
return 0, None
|
[
"def",
"ismethod",
"(",
"func",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"func",
",",
"core",
".",
"PyFunction",
")",
":",
"#ok, this is from python, created by jython",
"#print_ ' PyFunction'",
"def",
"getargs",
"(",
"func_code",
")",
":",
"\"\"\"Get information about the arguments accepted by a code object.\n\n Three things are returned: (args, varargs, varkw), where 'args' is\n a list of argument names (possibly containing nested lists), and\n 'varargs' and 'varkw' are the names of the * and ** arguments or None.\"\"\"",
"nargs",
"=",
"func_code",
".",
"co_argcount",
"names",
"=",
"func_code",
".",
"co_varnames",
"args",
"=",
"list",
"(",
"names",
"[",
":",
"nargs",
"]",
")",
"step",
"=",
"0",
"if",
"not",
"hasattr",
"(",
"func_code",
",",
"'CO_VARARGS'",
")",
":",
"from",
"org",
".",
"python",
".",
"core",
"import",
"CodeFlag",
"# @UnresolvedImport",
"co_varargs_flag",
"=",
"CodeFlag",
".",
"CO_VARARGS",
".",
"flag",
"co_varkeywords_flag",
"=",
"CodeFlag",
".",
"CO_VARKEYWORDS",
".",
"flag",
"else",
":",
"co_varargs_flag",
"=",
"func_code",
".",
"CO_VARARGS",
"co_varkeywords_flag",
"=",
"func_code",
".",
"CO_VARKEYWORDS",
"varargs",
"=",
"None",
"if",
"func_code",
".",
"co_flags",
"&",
"co_varargs_flag",
":",
"varargs",
"=",
"func_code",
".",
"co_varnames",
"[",
"nargs",
"]",
"nargs",
"=",
"nargs",
"+",
"1",
"varkw",
"=",
"None",
"if",
"func_code",
".",
"co_flags",
"&",
"co_varkeywords_flag",
":",
"varkw",
"=",
"func_code",
".",
"co_varnames",
"[",
"nargs",
"]",
"return",
"args",
",",
"varargs",
",",
"varkw",
"args",
"=",
"getargs",
"(",
"func",
".",
"func_code",
")",
"return",
"1",
",",
"[",
"Info",
"(",
"func",
".",
"func_name",
",",
"args",
"=",
"args",
"[",
"0",
"]",
",",
"varargs",
"=",
"args",
"[",
"1",
"]",
",",
"kwargs",
"=",
"args",
"[",
"2",
"]",
",",
"doc",
"=",
"func",
".",
"func_doc",
")",
"]",
"if",
"isinstance",
"(",
"func",
",",
"core",
".",
"PyMethod",
")",
":",
"#this is something from java itself, and jython just wrapped it...",
"#things to play in func:",
"#['__call__', '__class__', '__cmp__', '__delattr__', '__dir__', '__doc__', '__findattr__', '__name__', '_doget', 'im_class',",
"#'im_func', 'im_self', 'toString']",
"#print_ ' PyMethod'",
"#that's the PyReflectedFunction... keep going to get it",
"func",
"=",
"func",
".",
"im_func",
"if",
"isinstance",
"(",
"func",
",",
"PyReflectedFunction",
")",
":",
"#this is something from java itself, and jython just wrapped it...",
"#print_ ' PyReflectedFunction'",
"infos",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"func",
".",
"argslist",
")",
")",
":",
"#things to play in func.argslist[i]:",
"#'PyArgsCall', 'PyArgsKeywordsCall', 'REPLACE', 'StandardCall', 'args', 'compare', 'compareTo', 'data', 'declaringClass'",
"#'flags', 'isStatic', 'matches', 'precedence']",
"#print_ ' ', func.argslist[i].data.__class__",
"#func.argslist[i].data.__class__ == java.lang.reflect.Method",
"if",
"func",
".",
"argslist",
"[",
"i",
"]",
":",
"met",
"=",
"func",
".",
"argslist",
"[",
"i",
"]",
".",
"data",
"name",
"=",
"met",
".",
"getName",
"(",
")",
"try",
":",
"ret",
"=",
"met",
".",
"getReturnType",
"(",
")",
"except",
"AttributeError",
":",
"ret",
"=",
"''",
"parameterTypes",
"=",
"met",
".",
"getParameterTypes",
"(",
")",
"args",
"=",
"[",
"]",
"for",
"j",
"in",
"xrange",
"(",
"len",
"(",
"parameterTypes",
")",
")",
":",
"paramTypesClass",
"=",
"parameterTypes",
"[",
"j",
"]",
"try",
":",
"try",
":",
"paramClassName",
"=",
"paramTypesClass",
".",
"getName",
"(",
")",
"except",
":",
"paramClassName",
"=",
"paramTypesClass",
".",
"getName",
"(",
"paramTypesClass",
")",
"except",
"AttributeError",
":",
"try",
":",
"paramClassName",
"=",
"repr",
"(",
"paramTypesClass",
")",
"#should be something like <type 'object'>",
"paramClassName",
"=",
"paramClassName",
".",
"split",
"(",
"'\\''",
")",
"[",
"1",
"]",
"except",
":",
"paramClassName",
"=",
"repr",
"(",
"paramTypesClass",
")",
"#just in case something else happens... it will at least be visible",
"#if the parameter equals [C, it means it it a char array, so, let's change it",
"a",
"=",
"format_param_class_name",
"(",
"paramClassName",
")",
"#a = a.replace('[]','Array')",
"#a = a.replace('Object', 'obj')",
"#a = a.replace('String', 's')",
"#a = a.replace('Integer', 'i')",
"#a = a.replace('Char', 'c')",
"#a = a.replace('Double', 'd')",
"args",
".",
"append",
"(",
"a",
")",
"#so we don't leave invalid code",
"info",
"=",
"Info",
"(",
"name",
",",
"args",
"=",
"args",
",",
"ret",
"=",
"ret",
")",
"#print_ info.basic_as_str()",
"infos",
".",
"append",
"(",
"info",
")",
"return",
"1",
",",
"infos",
"except",
"Exception",
":",
"s",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"traceback",
".",
"print_exc",
"(",
"file",
"=",
"s",
")",
"return",
"1",
",",
"[",
"Info",
"(",
"str",
"(",
"'ERROR'",
")",
",",
"doc",
"=",
"s",
".",
"getvalue",
"(",
")",
")",
"]",
"return",
"0",
",",
"None"
] |
this function should return the information gathered on a function
@param func: this is the function we want to get info on
@return a tuple where:
0 = indicates whether the parameter passed is a method or not
1 = a list of classes 'Info', with the info gathered from the function
this is a list because when we have methods from java with the same name and different signatures,
we actually have many methods, each with its own set of arguments
|
[
"this",
"function",
"should",
"return",
"the",
"information",
"gathered",
"on",
"a",
"function"
] |
python
|
train
|
guma44/GEOparse
|
GEOparse/GEOparse.py
|
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L395-L492
|
def parse_GPL(filepath, entry_name=None, partial=None):
"""Parse GPL entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry
or list of lines representing GPL from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.GPL`: A GPL object.
"""
gsms = {}
gses = {}
gpl_soft = []
has_table = False
gpl_name = entry_name
database = None
if isinstance(filepath, str):
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SERIES":
is_data, data_group = next(groupper)
gse_metadata = parse_metadata(data_group)
gses[entry_name] = GSE(name=entry_name,
metadata=gse_metadata)
elif entry_type == "SAMPLE":
if partial and entry_name not in partial:
continue
is_data, data_group = next(groupper)
gsms[entry_name] = parse_GSM(data_group, entry_name)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
elif entry_type == "PLATFORM" or entry_type == "Annotation":
gpl_name = entry_name
is_data, data_group = next(groupper)
has_gpl_name = gpl_name or gpl_name is None
for line in data_group:
if ("_table_begin" in line or
not line.startswith(("^", "!", "#"))):
has_table = True
if not has_gpl_name:
if match("!Annotation_platform\s*=\s*", line):
gpl_name = split("\s*=\s*", line)[-1].strip()
has_gpl_name = True
gpl_soft.append(line)
else:
raise RuntimeError(
"Cannot parse {etype}. Unknown for GPL.".format(
etype=entry_type
))
else:
for line in filepath:
if "_table_begin" in line or (not line.startswith(("^", "!", "#"))):
has_table = True
gpl_soft.append(line.rstrip())
columns = None
try:
columns = parse_columns(gpl_soft)
except Exception:
pass
metadata = parse_metadata(gpl_soft)
if has_table:
table_data = parse_table_data(gpl_soft)
else:
table_data = DataFrame()
gpl = GPL(name=gpl_name,
gses=gses,
gsms=gsms,
table=table_data,
metadata=metadata,
columns=columns,
database=database
)
# link samples to series, if these were present in the GPL soft file
for gse_id, gse in gpl.gses.items():
for gsm_id in gse.metadata.get("sample_id", []):
if gsm_id in gpl.gsms:
gpl.gses[gse_id].gsms[gsm_id] = gpl.gsms[gsm_id]
return gpl
|
[
"def",
"parse_GPL",
"(",
"filepath",
",",
"entry_name",
"=",
"None",
",",
"partial",
"=",
"None",
")",
":",
"gsms",
"=",
"{",
"}",
"gses",
"=",
"{",
"}",
"gpl_soft",
"=",
"[",
"]",
"has_table",
"=",
"False",
"gpl_name",
"=",
"entry_name",
"database",
"=",
"None",
"if",
"isinstance",
"(",
"filepath",
",",
"str",
")",
":",
"with",
"utils",
".",
"smart_open",
"(",
"filepath",
")",
"as",
"soft",
":",
"groupper",
"=",
"groupby",
"(",
"soft",
",",
"lambda",
"x",
":",
"x",
".",
"startswith",
"(",
"\"^\"",
")",
")",
"for",
"is_new_entry",
",",
"group",
"in",
"groupper",
":",
"if",
"is_new_entry",
":",
"entry_type",
",",
"entry_name",
"=",
"__parse_entry",
"(",
"next",
"(",
"group",
")",
")",
"logger",
".",
"debug",
"(",
"\"%s: %s\"",
"%",
"(",
"entry_type",
".",
"upper",
"(",
")",
",",
"entry_name",
")",
")",
"if",
"entry_type",
"==",
"\"SERIES\"",
":",
"is_data",
",",
"data_group",
"=",
"next",
"(",
"groupper",
")",
"gse_metadata",
"=",
"parse_metadata",
"(",
"data_group",
")",
"gses",
"[",
"entry_name",
"]",
"=",
"GSE",
"(",
"name",
"=",
"entry_name",
",",
"metadata",
"=",
"gse_metadata",
")",
"elif",
"entry_type",
"==",
"\"SAMPLE\"",
":",
"if",
"partial",
"and",
"entry_name",
"not",
"in",
"partial",
":",
"continue",
"is_data",
",",
"data_group",
"=",
"next",
"(",
"groupper",
")",
"gsms",
"[",
"entry_name",
"]",
"=",
"parse_GSM",
"(",
"data_group",
",",
"entry_name",
")",
"elif",
"entry_type",
"==",
"\"DATABASE\"",
":",
"is_data",
",",
"data_group",
"=",
"next",
"(",
"groupper",
")",
"database_metadata",
"=",
"parse_metadata",
"(",
"data_group",
")",
"database",
"=",
"GEODatabase",
"(",
"name",
"=",
"entry_name",
",",
"metadata",
"=",
"database_metadata",
")",
"elif",
"entry_type",
"==",
"\"PLATFORM\"",
"or",
"entry_type",
"==",
"\"Annotation\"",
":",
"gpl_name",
"=",
"entry_name",
"is_data",
",",
"data_group",
"=",
"next",
"(",
"groupper",
")",
"has_gpl_name",
"=",
"gpl_name",
"or",
"gpl_name",
"is",
"None",
"for",
"line",
"in",
"data_group",
":",
"if",
"(",
"\"_table_begin\"",
"in",
"line",
"or",
"not",
"line",
".",
"startswith",
"(",
"(",
"\"^\"",
",",
"\"!\"",
",",
"\"#\"",
")",
")",
")",
":",
"has_table",
"=",
"True",
"if",
"not",
"has_gpl_name",
":",
"if",
"match",
"(",
"\"!Annotation_platform\\s*=\\s*\"",
",",
"line",
")",
":",
"gpl_name",
"=",
"split",
"(",
"\"\\s*=\\s*\"",
",",
"line",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"has_gpl_name",
"=",
"True",
"gpl_soft",
".",
"append",
"(",
"line",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot parse {etype}. Unknown for GPL.\"",
".",
"format",
"(",
"etype",
"=",
"entry_type",
")",
")",
"else",
":",
"for",
"line",
"in",
"filepath",
":",
"if",
"\"_table_begin\"",
"in",
"line",
"or",
"(",
"not",
"line",
".",
"startswith",
"(",
"(",
"\"^\"",
",",
"\"!\"",
",",
"\"#\"",
")",
")",
")",
":",
"has_table",
"=",
"True",
"gpl_soft",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"columns",
"=",
"None",
"try",
":",
"columns",
"=",
"parse_columns",
"(",
"gpl_soft",
")",
"except",
"Exception",
":",
"pass",
"metadata",
"=",
"parse_metadata",
"(",
"gpl_soft",
")",
"if",
"has_table",
":",
"table_data",
"=",
"parse_table_data",
"(",
"gpl_soft",
")",
"else",
":",
"table_data",
"=",
"DataFrame",
"(",
")",
"gpl",
"=",
"GPL",
"(",
"name",
"=",
"gpl_name",
",",
"gses",
"=",
"gses",
",",
"gsms",
"=",
"gsms",
",",
"table",
"=",
"table_data",
",",
"metadata",
"=",
"metadata",
",",
"columns",
"=",
"columns",
",",
"database",
"=",
"database",
")",
"# link samples to series, if these were present in the GPL soft file",
"for",
"gse_id",
",",
"gse",
"in",
"gpl",
".",
"gses",
".",
"items",
"(",
")",
":",
"for",
"gsm_id",
"in",
"gse",
".",
"metadata",
".",
"get",
"(",
"\"sample_id\"",
",",
"[",
"]",
")",
":",
"if",
"gsm_id",
"in",
"gpl",
".",
"gsms",
":",
"gpl",
".",
"gses",
"[",
"gse_id",
"]",
".",
"gsms",
"[",
"gsm_id",
"]",
"=",
"gpl",
".",
"gsms",
"[",
"gsm_id",
"]",
"return",
"gpl"
] |
Parse GPL entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry
or list of lines representing GPL from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.GPL`: A GPL object.
|
[
"Parse",
"GPL",
"entry",
"from",
"SOFT",
"file",
"."
] |
python
|
train
|
hozn/stravalib
|
stravalib/client.py
|
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/client.py#L82-L111
|
def authorization_url(self, client_id, redirect_uri, approval_prompt='auto',
scope=None, state=None):
"""
Get the URL needed to authorize your application to access a Strava user's information.
:param client_id: The numeric developer client id.
:type client_id: int
:param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization.
:type redirect_uri: str
:param approval_prompt: Whether to prompt for approval even if approval already granted to app.
Choices are 'auto' or 'force'. (Default is 'auto')
:type approval_prompt: str
:param scope: The access scope required. Omit to imply "public".
Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all',
'activity:read_all', 'activity:write'
:type scope: str
:param state: An arbitrary variable that will be returned to your application in the redirect URI.
:type state: str
:return: The URL to use for authorization link.
:rtype: str
"""
return self.protocol.authorization_url(client_id=client_id,
redirect_uri=redirect_uri,
approval_prompt=approval_prompt,
scope=scope, state=state)
|
[
"def",
"authorization_url",
"(",
"self",
",",
"client_id",
",",
"redirect_uri",
",",
"approval_prompt",
"=",
"'auto'",
",",
"scope",
"=",
"None",
",",
"state",
"=",
"None",
")",
":",
"return",
"self",
".",
"protocol",
".",
"authorization_url",
"(",
"client_id",
"=",
"client_id",
",",
"redirect_uri",
"=",
"redirect_uri",
",",
"approval_prompt",
"=",
"approval_prompt",
",",
"scope",
"=",
"scope",
",",
"state",
"=",
"state",
")"
] |
Get the URL needed to authorize your application to access a Strava user's information.
:param client_id: The numeric developer client id.
:type client_id: int
:param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization.
:type redirect_uri: str
:param approval_prompt: Whether to prompt for approval even if approval already granted to app.
Choices are 'auto' or 'force'. (Default is 'auto')
:type approval_prompt: str
:param scope: The access scope required. Omit to imply "public".
Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all',
'activity:read_all', 'activity:write'
:type scope: str
:param state: An arbitrary variable that will be returned to your application in the redirect URI.
:type state: str
:return: The URL to use for authorization link.
:rtype: str
|
[
"Get",
"the",
"URL",
"needed",
"to",
"authorize",
"your",
"application",
"to",
"access",
"a",
"Strava",
"user",
"s",
"information",
"."
] |
python
|
train
|
olitheolix/qtmacs
|
qtmacs/extensions/qtmacsscintilla_widget.py
|
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_widget.py#L840-L895
|
def SCIGetStyledText(self, selectionPos: tuple):
"""
Pythonic wrapper for the SCI_GETSTYLEDTEXT command.
For example, to get the raw text and styling bits
for the first five characters in the widget use::
text, style = SCIGetStyledText((0, 0, 0, 5))
print(text.decode('utf-8'))
|Args|
* ``selectionPos`` (**tuple**): selection position in the
form of (start_line, start_col, end_line, end_col).
|Returns|
**tuple** of two ``bytearrays``. The first contains the
the character bytes and the second the Scintilla styling
information.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Sanity check.
if not self.isSelectionPositionValid(selectionPos):
return None
# Convert the start- and end point of the selection into
# stream offsets. Ensure that start comes before end.
start = self.positionFromLineIndex(*selectionPos[:2])
end = self.positionFromLineIndex(*selectionPos[2:])
if start > end:
start, end = end, start
# Allocate a large enough buffer.
bufSize = 2 * (end - start) + 2
buf = bytearray(bufSize)
# Fetch the text- and styling information.
numRet = self.SendScintilla(self.SCI_GETSTYLEDTEXT, start, end, buf)
# The last two bytes are always Zero according to the
# Scintilla documentation, so remove them.
buf = buf[:-2]
# Double check that we did not receive more bytes than the buffer
# was long.
if numRet > bufSize:
qteMain.qteLogger.error('SCI_GETSTYLEDTEX function returned more'
' bytes than expected.')
text = buf[0::2]
style = buf[1::2]
return (text, style)
|
[
"def",
"SCIGetStyledText",
"(",
"self",
",",
"selectionPos",
":",
"tuple",
")",
":",
"# Sanity check.",
"if",
"not",
"self",
".",
"isSelectionPositionValid",
"(",
"selectionPos",
")",
":",
"return",
"None",
"# Convert the start- and end point of the selection into",
"# stream offsets. Ensure that start comes before end.",
"start",
"=",
"self",
".",
"positionFromLineIndex",
"(",
"*",
"selectionPos",
"[",
":",
"2",
"]",
")",
"end",
"=",
"self",
".",
"positionFromLineIndex",
"(",
"*",
"selectionPos",
"[",
"2",
":",
"]",
")",
"if",
"start",
">",
"end",
":",
"start",
",",
"end",
"=",
"end",
",",
"start",
"# Allocate a large enough buffer.",
"bufSize",
"=",
"2",
"*",
"(",
"end",
"-",
"start",
")",
"+",
"2",
"buf",
"=",
"bytearray",
"(",
"bufSize",
")",
"# Fetch the text- and styling information.",
"numRet",
"=",
"self",
".",
"SendScintilla",
"(",
"self",
".",
"SCI_GETSTYLEDTEXT",
",",
"start",
",",
"end",
",",
"buf",
")",
"# The last two bytes are always Zero according to the",
"# Scintilla documentation, so remove them.",
"buf",
"=",
"buf",
"[",
":",
"-",
"2",
"]",
"# Double check that we did not receive more bytes than the buffer",
"# was long.",
"if",
"numRet",
">",
"bufSize",
":",
"qteMain",
".",
"qteLogger",
".",
"error",
"(",
"'SCI_GETSTYLEDTEX function returned more'",
"' bytes than expected.'",
")",
"text",
"=",
"buf",
"[",
"0",
":",
":",
"2",
"]",
"style",
"=",
"buf",
"[",
"1",
":",
":",
"2",
"]",
"return",
"(",
"text",
",",
"style",
")"
] |
Pythonic wrapper for the SCI_GETSTYLEDTEXT command.
For example, to get the raw text and styling bits
for the first five characters in the widget use::
text, style = SCIGetStyledText((0, 0, 0, 5))
print(text.decode('utf-8'))
|Args|
* ``selectionPos`` (**tuple**): selection position in the
form of (start_line, start_col, end_line, end_col).
|Returns|
**tuple** of two ``bytearrays``. The first contains the
the character bytes and the second the Scintilla styling
information.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
|
[
"Pythonic",
"wrapper",
"for",
"the",
"SCI_GETSTYLEDTEXT",
"command",
"."
] |
python
|
train
|
cognitect/transit-python
|
transit/sosjson.py
|
https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/sosjson.py#L25-L39
|
def read_chunk(stream):
"""Ignore whitespace outside of strings. If we hit a string, read it in
its entirety.
"""
chunk = stream.read(1)
while chunk in SKIP:
chunk = stream.read(1)
if chunk == "\"":
chunk += stream.read(1)
while not chunk.endswith("\""):
if chunk[-1] == ESCAPE:
chunk += stream.read(2)
else:
chunk += stream.read(1)
return chunk
|
[
"def",
"read_chunk",
"(",
"stream",
")",
":",
"chunk",
"=",
"stream",
".",
"read",
"(",
"1",
")",
"while",
"chunk",
"in",
"SKIP",
":",
"chunk",
"=",
"stream",
".",
"read",
"(",
"1",
")",
"if",
"chunk",
"==",
"\"\\\"\"",
":",
"chunk",
"+=",
"stream",
".",
"read",
"(",
"1",
")",
"while",
"not",
"chunk",
".",
"endswith",
"(",
"\"\\\"\"",
")",
":",
"if",
"chunk",
"[",
"-",
"1",
"]",
"==",
"ESCAPE",
":",
"chunk",
"+=",
"stream",
".",
"read",
"(",
"2",
")",
"else",
":",
"chunk",
"+=",
"stream",
".",
"read",
"(",
"1",
")",
"return",
"chunk"
] |
Ignore whitespace outside of strings. If we hit a string, read it in
its entirety.
|
[
"Ignore",
"whitespace",
"outside",
"of",
"strings",
".",
"If",
"we",
"hit",
"a",
"string",
"read",
"it",
"in",
"its",
"entirety",
"."
] |
python
|
train
|
pyhys/minimalmodbus
|
omegacn7500.py
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/omegacn7500.py#L317-L330
|
def get_pattern_additional_cycles(self, patternnumber):
"""Get the number of additional cycles for a given pattern.
Args:
patternnumber (integer): 0-7
Returns:
The number of additional cycles (int).
"""
_checkPatternNumber(patternnumber)
address = _calculateRegisterAddress('cycles', patternnumber)
return self.read_register(address)
|
[
"def",
"get_pattern_additional_cycles",
"(",
"self",
",",
"patternnumber",
")",
":",
"_checkPatternNumber",
"(",
"patternnumber",
")",
"address",
"=",
"_calculateRegisterAddress",
"(",
"'cycles'",
",",
"patternnumber",
")",
"return",
"self",
".",
"read_register",
"(",
"address",
")"
] |
Get the number of additional cycles for a given pattern.
Args:
patternnumber (integer): 0-7
Returns:
The number of additional cycles (int).
|
[
"Get",
"the",
"number",
"of",
"additional",
"cycles",
"for",
"a",
"given",
"pattern",
"."
] |
python
|
train
|
saltstack/salt
|
salt/pillar/vmware_pillar.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/vmware_pillar.py#L495-L517
|
def _crawl_attribute(this_data, this_attr):
'''
helper function to crawl an attribute specified for retrieval
'''
if isinstance(this_data, list):
t_list = []
for d in this_data:
t_list.append(_crawl_attribute(d, this_attr))
return t_list
else:
if isinstance(this_attr, dict):
t_dict = {}
for k in this_attr:
if hasattr(this_data, k):
t_dict[k] = _crawl_attribute(getattr(this_data, k, None), this_attr[k])
return t_dict
elif isinstance(this_attr, list):
this_dict = {}
for l in this_attr:
this_dict = dictupdate.update(this_dict, _crawl_attribute(this_data, l))
return this_dict
else:
return {this_attr: _recurse_config_to_dict(getattr(this_data, this_attr, None))}
|
[
"def",
"_crawl_attribute",
"(",
"this_data",
",",
"this_attr",
")",
":",
"if",
"isinstance",
"(",
"this_data",
",",
"list",
")",
":",
"t_list",
"=",
"[",
"]",
"for",
"d",
"in",
"this_data",
":",
"t_list",
".",
"append",
"(",
"_crawl_attribute",
"(",
"d",
",",
"this_attr",
")",
")",
"return",
"t_list",
"else",
":",
"if",
"isinstance",
"(",
"this_attr",
",",
"dict",
")",
":",
"t_dict",
"=",
"{",
"}",
"for",
"k",
"in",
"this_attr",
":",
"if",
"hasattr",
"(",
"this_data",
",",
"k",
")",
":",
"t_dict",
"[",
"k",
"]",
"=",
"_crawl_attribute",
"(",
"getattr",
"(",
"this_data",
",",
"k",
",",
"None",
")",
",",
"this_attr",
"[",
"k",
"]",
")",
"return",
"t_dict",
"elif",
"isinstance",
"(",
"this_attr",
",",
"list",
")",
":",
"this_dict",
"=",
"{",
"}",
"for",
"l",
"in",
"this_attr",
":",
"this_dict",
"=",
"dictupdate",
".",
"update",
"(",
"this_dict",
",",
"_crawl_attribute",
"(",
"this_data",
",",
"l",
")",
")",
"return",
"this_dict",
"else",
":",
"return",
"{",
"this_attr",
":",
"_recurse_config_to_dict",
"(",
"getattr",
"(",
"this_data",
",",
"this_attr",
",",
"None",
")",
")",
"}"
] |
helper function to crawl an attribute specified for retrieval
|
[
"helper",
"function",
"to",
"crawl",
"an",
"attribute",
"specified",
"for",
"retrieval"
] |
python
|
train
|
goshuirc/irc
|
girc/__init__.py
|
https://github.com/goshuirc/irc/blob/d6a5e3e04d337566c009b087f108cd76f9e122cc/girc/__init__.py#L102-L130
|
def handler(self, direction, verb, priority=10):
"""Register this function as an event handler.
Args:
direction (str): ``in``, ``out``, ``both``, ``raw``.
verb (str): Event name.
priority (int): Handler priority (lower priority executes first).
Example:
These handlers print out a pretty raw log::
reactor = girc.Reactor()
@reactor.handler('in', 'raw', priority=1)
def handle_raw_in(event):
print(event['server'].name, ' ->', escape(event['data']))
@reactor.handler('out', 'raw', priority=1)
def handle_raw_out(event):
print(event['server'].name, '<- ', escape(event['data']))
"""
def parent_fn(func):
@functools.wraps(func)
def child_fn(msg):
func(msg)
self.register_event(direction, verb, child_fn, priority=priority)
return child_fn
return parent_fn
|
[
"def",
"handler",
"(",
"self",
",",
"direction",
",",
"verb",
",",
"priority",
"=",
"10",
")",
":",
"def",
"parent_fn",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"child_fn",
"(",
"msg",
")",
":",
"func",
"(",
"msg",
")",
"self",
".",
"register_event",
"(",
"direction",
",",
"verb",
",",
"child_fn",
",",
"priority",
"=",
"priority",
")",
"return",
"child_fn",
"return",
"parent_fn"
] |
Register this function as an event handler.
Args:
direction (str): ``in``, ``out``, ``both``, ``raw``.
verb (str): Event name.
priority (int): Handler priority (lower priority executes first).
Example:
These handlers print out a pretty raw log::
reactor = girc.Reactor()
@reactor.handler('in', 'raw', priority=1)
def handle_raw_in(event):
print(event['server'].name, ' ->', escape(event['data']))
@reactor.handler('out', 'raw', priority=1)
def handle_raw_out(event):
print(event['server'].name, '<- ', escape(event['data']))
|
[
"Register",
"this",
"function",
"as",
"an",
"event",
"handler",
"."
] |
python
|
train
|
NeuralEnsemble/lazyarray
|
lazyarray.py
|
https://github.com/NeuralEnsemble/lazyarray/blob/391a4cef3be85309c36adac0c17824de3d82f5be/lazyarray.py#L347-L384
|
def _partially_evaluate(self, addr, simplify=False):
"""
Return part of the lazy array.
"""
if self.is_homogeneous:
if simplify:
base_val = self.base_value
else:
base_val = self._homogeneous_array(addr) * self.base_value
elif isinstance(self.base_value, (int, long, numpy.integer, float, bool)):
base_val = self._homogeneous_array(addr) * self.base_value
elif isinstance(self.base_value, numpy.ndarray):
base_val = self.base_value[addr]
elif have_scipy and sparse.issparse(self.base_value): # For sparse matrices larr[2, :]
base_val = self.base_value[addr]
elif callable(self.base_value):
indices = self._array_indices(addr)
base_val = self.base_value(*indices)
if isinstance(base_val, numpy.ndarray) and base_val.shape == (1,):
base_val = base_val[0]
elif hasattr(self.base_value, "lazily_evaluate"):
base_val = self.base_value.lazily_evaluate(addr, shape=self._shape)
elif isinstance(self.base_value, VectorizedIterable):
partial_shape = self._partial_shape(addr)
if partial_shape:
n = reduce(operator.mul, partial_shape)
else:
n = 1
base_val = self.base_value.next(n) # note that the array contents will depend on the order of access to elements
if n == 1:
base_val = base_val[0]
elif partial_shape and base_val.shape != partial_shape:
base_val = base_val.reshape(partial_shape)
elif isinstance(self.base_value, collections.Iterator):
raise NotImplementedError("coming soon...")
else:
raise ValueError("invalid base value for array (%s)" % self.base_value)
return self._apply_operations(base_val, addr, simplify=simplify)
|
[
"def",
"_partially_evaluate",
"(",
"self",
",",
"addr",
",",
"simplify",
"=",
"False",
")",
":",
"if",
"self",
".",
"is_homogeneous",
":",
"if",
"simplify",
":",
"base_val",
"=",
"self",
".",
"base_value",
"else",
":",
"base_val",
"=",
"self",
".",
"_homogeneous_array",
"(",
"addr",
")",
"*",
"self",
".",
"base_value",
"elif",
"isinstance",
"(",
"self",
".",
"base_value",
",",
"(",
"int",
",",
"long",
",",
"numpy",
".",
"integer",
",",
"float",
",",
"bool",
")",
")",
":",
"base_val",
"=",
"self",
".",
"_homogeneous_array",
"(",
"addr",
")",
"*",
"self",
".",
"base_value",
"elif",
"isinstance",
"(",
"self",
".",
"base_value",
",",
"numpy",
".",
"ndarray",
")",
":",
"base_val",
"=",
"self",
".",
"base_value",
"[",
"addr",
"]",
"elif",
"have_scipy",
"and",
"sparse",
".",
"issparse",
"(",
"self",
".",
"base_value",
")",
":",
"# For sparse matrices larr[2, :]",
"base_val",
"=",
"self",
".",
"base_value",
"[",
"addr",
"]",
"elif",
"callable",
"(",
"self",
".",
"base_value",
")",
":",
"indices",
"=",
"self",
".",
"_array_indices",
"(",
"addr",
")",
"base_val",
"=",
"self",
".",
"base_value",
"(",
"*",
"indices",
")",
"if",
"isinstance",
"(",
"base_val",
",",
"numpy",
".",
"ndarray",
")",
"and",
"base_val",
".",
"shape",
"==",
"(",
"1",
",",
")",
":",
"base_val",
"=",
"base_val",
"[",
"0",
"]",
"elif",
"hasattr",
"(",
"self",
".",
"base_value",
",",
"\"lazily_evaluate\"",
")",
":",
"base_val",
"=",
"self",
".",
"base_value",
".",
"lazily_evaluate",
"(",
"addr",
",",
"shape",
"=",
"self",
".",
"_shape",
")",
"elif",
"isinstance",
"(",
"self",
".",
"base_value",
",",
"VectorizedIterable",
")",
":",
"partial_shape",
"=",
"self",
".",
"_partial_shape",
"(",
"addr",
")",
"if",
"partial_shape",
":",
"n",
"=",
"reduce",
"(",
"operator",
".",
"mul",
",",
"partial_shape",
")",
"else",
":",
"n",
"=",
"1",
"base_val",
"=",
"self",
".",
"base_value",
".",
"next",
"(",
"n",
")",
"# note that the array contents will depend on the order of access to elements",
"if",
"n",
"==",
"1",
":",
"base_val",
"=",
"base_val",
"[",
"0",
"]",
"elif",
"partial_shape",
"and",
"base_val",
".",
"shape",
"!=",
"partial_shape",
":",
"base_val",
"=",
"base_val",
".",
"reshape",
"(",
"partial_shape",
")",
"elif",
"isinstance",
"(",
"self",
".",
"base_value",
",",
"collections",
".",
"Iterator",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"coming soon...\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid base value for array (%s)\"",
"%",
"self",
".",
"base_value",
")",
"return",
"self",
".",
"_apply_operations",
"(",
"base_val",
",",
"addr",
",",
"simplify",
"=",
"simplify",
")"
] |
Return part of the lazy array.
|
[
"Return",
"part",
"of",
"the",
"lazy",
"array",
"."
] |
python
|
train
|
bcbio/bcbio-nextgen
|
bcbio/distributed/ipython.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/ipython.py#L67-L78
|
def per_machine_target_cores(cores, num_jobs):
"""Select target cores on larger machines to leave room for batch script and controller.
On resource constrained environments, we want to pack all bcbio submissions onto a specific
number of machines. This gives up some cores to enable sharing cores with the controller
and batch script on larger machines.
"""
if cores >= 32 and num_jobs == 1:
cores = cores - 2
elif cores >= 16 and num_jobs in [1, 2]:
cores = cores - 1
return cores
|
[
"def",
"per_machine_target_cores",
"(",
"cores",
",",
"num_jobs",
")",
":",
"if",
"cores",
">=",
"32",
"and",
"num_jobs",
"==",
"1",
":",
"cores",
"=",
"cores",
"-",
"2",
"elif",
"cores",
">=",
"16",
"and",
"num_jobs",
"in",
"[",
"1",
",",
"2",
"]",
":",
"cores",
"=",
"cores",
"-",
"1",
"return",
"cores"
] |
Select target cores on larger machines to leave room for batch script and controller.
On resource constrained environments, we want to pack all bcbio submissions onto a specific
number of machines. This gives up some cores to enable sharing cores with the controller
and batch script on larger machines.
|
[
"Select",
"target",
"cores",
"on",
"larger",
"machines",
"to",
"leave",
"room",
"for",
"batch",
"script",
"and",
"controller",
"."
] |
python
|
train
|
tradenity/python-sdk
|
tradenity/resources/payment_card.py
|
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/payment_card.py#L966-L987
|
def update_payment_card_by_id(cls, payment_card_id, payment_card, **kwargs):
"""Update PaymentCard
Update attributes of PaymentCard
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_payment_card_by_id(payment_card_id, payment_card, async=True)
>>> result = thread.get()
:param async bool
:param str payment_card_id: ID of paymentCard to update. (required)
:param PaymentCard payment_card: Attributes of paymentCard to update. (required)
:return: PaymentCard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_payment_card_by_id_with_http_info(payment_card_id, payment_card, **kwargs)
else:
(data) = cls._update_payment_card_by_id_with_http_info(payment_card_id, payment_card, **kwargs)
return data
|
[
"def",
"update_payment_card_by_id",
"(",
"cls",
",",
"payment_card_id",
",",
"payment_card",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_update_payment_card_by_id_with_http_info",
"(",
"payment_card_id",
",",
"payment_card",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_update_payment_card_by_id_with_http_info",
"(",
"payment_card_id",
",",
"payment_card",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] |
Update PaymentCard
Update attributes of PaymentCard
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_payment_card_by_id(payment_card_id, payment_card, async=True)
>>> result = thread.get()
:param async bool
:param str payment_card_id: ID of paymentCard to update. (required)
:param PaymentCard payment_card: Attributes of paymentCard to update. (required)
:return: PaymentCard
If the method is called asynchronously,
returns the request thread.
|
[
"Update",
"PaymentCard"
] |
python
|
train
|
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_0/build/build_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L576-L589
|
def get_build_controller(self, controller_id):
"""GetBuildController.
Gets a controller
:param int controller_id:
:rtype: :class:`<BuildController> <azure.devops.v5_0.build.models.BuildController>`
"""
route_values = {}
if controller_id is not None:
route_values['controllerId'] = self._serialize.url('controller_id', controller_id, 'int')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.0',
route_values=route_values)
return self._deserialize('BuildController', response)
|
[
"def",
"get_build_controller",
"(",
"self",
",",
"controller_id",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"controller_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'controllerId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'controller_id'",
",",
"controller_id",
",",
"'int'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'fcac1932-2ee1-437f-9b6f-7f696be858f6'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'BuildController'",
",",
"response",
")"
] |
GetBuildController.
Gets a controller
:param int controller_id:
:rtype: :class:`<BuildController> <azure.devops.v5_0.build.models.BuildController>`
|
[
"GetBuildController",
".",
"Gets",
"a",
"controller",
":",
"param",
"int",
"controller_id",
":",
":",
"rtype",
":",
":",
"class",
":",
"<BuildController",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"build",
".",
"models",
".",
"BuildController",
">"
] |
python
|
train
|
fkarb/xltable
|
xltable/expression.py
|
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/expression.py#L72-L79
|
def has_value(self):
"""return True if value has been set"""
try:
if isinstance(self.__value, Expression):
return self.__value.has_value
return True
except AttributeError:
return False
|
[
"def",
"has_value",
"(",
"self",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"self",
".",
"__value",
",",
"Expression",
")",
":",
"return",
"self",
".",
"__value",
".",
"has_value",
"return",
"True",
"except",
"AttributeError",
":",
"return",
"False"
] |
return True if value has been set
|
[
"return",
"True",
"if",
"value",
"has",
"been",
"set"
] |
python
|
train
|
moonso/vcftoolbox
|
vcftoolbox/add_variant_information.py
|
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/add_variant_information.py#L19-L79
|
def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None):
"""Replace the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
annotation (str): If the annotation is a key, value pair
this is the string that represents the value
Returns:
variant_line (str): A annotated variant line
"""
new_info = '{0}={1}'.format(keyword, annotation)
logger.debug("Replacing the variant information {0}".format(new_info))
fixed_variant = None
new_info_list = []
if variant_line:
logger.debug("Adding information to a variant line")
splitted_variant = variant_line.rstrip('\n').split('\t')
logger.debug("Adding information to splitted variant line")
old_info = splitted_variant[7]
if old_info == '.':
new_info_string = new_info
else:
splitted_info_string = old_info.split(';')
for info in splitted_info_string:
splitted_info_entry = info.split('=')
if splitted_info_entry[0] == keyword:
new_info_list.append(new_info)
else:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
splitted_variant[7] = new_info_string
fixed_variant = '\t'.join(splitted_variant)
elif variant_dict:
logger.debug("Adding information to a variant dict")
old_info = variant_dict['INFO']
if old_info == '.':
variant_dict['INFO'] = new_info
else:
for info in old_info.split(';'):
splitted_info_entry = info.split('=')
if splitted_info_entry[0] == keyword:
new_info_list.append(new_info)
else:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
variant_dict['INFO'] = new_info_string
fixed_variant = variant_dict
return fixed_variant
|
[
"def",
"replace_vcf_info",
"(",
"keyword",
",",
"annotation",
",",
"variant_line",
"=",
"None",
",",
"variant_dict",
"=",
"None",
")",
":",
"new_info",
"=",
"'{0}={1}'",
".",
"format",
"(",
"keyword",
",",
"annotation",
")",
"logger",
".",
"debug",
"(",
"\"Replacing the variant information {0}\"",
".",
"format",
"(",
"new_info",
")",
")",
"fixed_variant",
"=",
"None",
"new_info_list",
"=",
"[",
"]",
"if",
"variant_line",
":",
"logger",
".",
"debug",
"(",
"\"Adding information to a variant line\"",
")",
"splitted_variant",
"=",
"variant_line",
".",
"rstrip",
"(",
"'\\n'",
")",
".",
"split",
"(",
"'\\t'",
")",
"logger",
".",
"debug",
"(",
"\"Adding information to splitted variant line\"",
")",
"old_info",
"=",
"splitted_variant",
"[",
"7",
"]",
"if",
"old_info",
"==",
"'.'",
":",
"new_info_string",
"=",
"new_info",
"else",
":",
"splitted_info_string",
"=",
"old_info",
".",
"split",
"(",
"';'",
")",
"for",
"info",
"in",
"splitted_info_string",
":",
"splitted_info_entry",
"=",
"info",
".",
"split",
"(",
"'='",
")",
"if",
"splitted_info_entry",
"[",
"0",
"]",
"==",
"keyword",
":",
"new_info_list",
".",
"append",
"(",
"new_info",
")",
"else",
":",
"new_info_list",
".",
"append",
"(",
"info",
")",
"new_info_string",
"=",
"';'",
".",
"join",
"(",
"new_info_list",
")",
"splitted_variant",
"[",
"7",
"]",
"=",
"new_info_string",
"fixed_variant",
"=",
"'\\t'",
".",
"join",
"(",
"splitted_variant",
")",
"elif",
"variant_dict",
":",
"logger",
".",
"debug",
"(",
"\"Adding information to a variant dict\"",
")",
"old_info",
"=",
"variant_dict",
"[",
"'INFO'",
"]",
"if",
"old_info",
"==",
"'.'",
":",
"variant_dict",
"[",
"'INFO'",
"]",
"=",
"new_info",
"else",
":",
"for",
"info",
"in",
"old_info",
".",
"split",
"(",
"';'",
")",
":",
"splitted_info_entry",
"=",
"info",
".",
"split",
"(",
"'='",
")",
"if",
"splitted_info_entry",
"[",
"0",
"]",
"==",
"keyword",
":",
"new_info_list",
".",
"append",
"(",
"new_info",
")",
"else",
":",
"new_info_list",
".",
"append",
"(",
"info",
")",
"new_info_string",
"=",
"';'",
".",
"join",
"(",
"new_info_list",
")",
"variant_dict",
"[",
"'INFO'",
"]",
"=",
"new_info_string",
"fixed_variant",
"=",
"variant_dict",
"return",
"fixed_variant"
] |
Replace the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
annotation (str): If the annotation is a key, value pair
this is the string that represents the value
Returns:
variant_line (str): A annotated variant line
|
[
"Replace",
"the",
"information",
"of",
"a",
"info",
"field",
"of",
"a",
"vcf",
"variant",
"line",
"or",
"a",
"variant",
"dict",
".",
"Arguments",
":",
"variant_line",
"(",
"str",
")",
":",
"A",
"vcf",
"formatted",
"variant",
"line",
"variant_dict",
"(",
"dict",
")",
":",
"A",
"variant",
"dictionary",
"keyword",
"(",
"str",
")",
":",
"The",
"info",
"field",
"key",
"annotation",
"(",
"str",
")",
":",
"If",
"the",
"annotation",
"is",
"a",
"key",
"value",
"pair",
"this",
"is",
"the",
"string",
"that",
"represents",
"the",
"value",
"Returns",
":",
"variant_line",
"(",
"str",
")",
":",
"A",
"annotated",
"variant",
"line"
] |
python
|
train
|
robotools/fontParts
|
Lib/fontParts/base/segment.py
|
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/segment.py#L159-L193
|
def _set_type(self, newType):
"""
Subclasses may override this method.
"""
oldType = self.type
if oldType == newType:
return
contour = self.contour
if contour is None:
raise FontPartsError("The segment does not belong to a contour.")
# converting line <-> move
if newType in ("move", "line") and oldType in ("move", "line"):
pass
# converting to a move or line
elif newType not in ("curve", "qcurve"):
offCurves = self.offCurve
for point in offCurves:
contour.removePoint(point)
# converting a line/move to a curve/qcurve
else:
segments = contour.segments
i = segments.index(self)
prev = segments[i - 1].onCurve
on = self.onCurve
x = on.x
y = on.y
points = contour.points
i = points.index(on)
contour.insertPoint(i, (x, y), "offcurve")
off2 = contour.points[i]
contour.insertPoint(i, (prev.x, prev.y), "offcurve")
off1 = contour.points[i]
del self._points
self._setPoints((off1, off2, on))
self.onCurve.type = newType
|
[
"def",
"_set_type",
"(",
"self",
",",
"newType",
")",
":",
"oldType",
"=",
"self",
".",
"type",
"if",
"oldType",
"==",
"newType",
":",
"return",
"contour",
"=",
"self",
".",
"contour",
"if",
"contour",
"is",
"None",
":",
"raise",
"FontPartsError",
"(",
"\"The segment does not belong to a contour.\"",
")",
"# converting line <-> move",
"if",
"newType",
"in",
"(",
"\"move\"",
",",
"\"line\"",
")",
"and",
"oldType",
"in",
"(",
"\"move\"",
",",
"\"line\"",
")",
":",
"pass",
"# converting to a move or line",
"elif",
"newType",
"not",
"in",
"(",
"\"curve\"",
",",
"\"qcurve\"",
")",
":",
"offCurves",
"=",
"self",
".",
"offCurve",
"for",
"point",
"in",
"offCurves",
":",
"contour",
".",
"removePoint",
"(",
"point",
")",
"# converting a line/move to a curve/qcurve",
"else",
":",
"segments",
"=",
"contour",
".",
"segments",
"i",
"=",
"segments",
".",
"index",
"(",
"self",
")",
"prev",
"=",
"segments",
"[",
"i",
"-",
"1",
"]",
".",
"onCurve",
"on",
"=",
"self",
".",
"onCurve",
"x",
"=",
"on",
".",
"x",
"y",
"=",
"on",
".",
"y",
"points",
"=",
"contour",
".",
"points",
"i",
"=",
"points",
".",
"index",
"(",
"on",
")",
"contour",
".",
"insertPoint",
"(",
"i",
",",
"(",
"x",
",",
"y",
")",
",",
"\"offcurve\"",
")",
"off2",
"=",
"contour",
".",
"points",
"[",
"i",
"]",
"contour",
".",
"insertPoint",
"(",
"i",
",",
"(",
"prev",
".",
"x",
",",
"prev",
".",
"y",
")",
",",
"\"offcurve\"",
")",
"off1",
"=",
"contour",
".",
"points",
"[",
"i",
"]",
"del",
"self",
".",
"_points",
"self",
".",
"_setPoints",
"(",
"(",
"off1",
",",
"off2",
",",
"on",
")",
")",
"self",
".",
"onCurve",
".",
"type",
"=",
"newType"
] |
Subclasses may override this method.
|
[
"Subclasses",
"may",
"override",
"this",
"method",
"."
] |
python
|
train
|
raymontag/kppy
|
kppy/database.py
|
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L846-L859
|
def _cbc_decrypt(self, final_key, crypted_content):
"""This method decrypts the database"""
# Just decrypt the content with the created key
aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv)
decrypted_content = aes.decrypt(crypted_content)
padding = decrypted_content[-1]
if sys.version > '3':
padding = decrypted_content[-1]
else:
padding = ord(decrypted_content[-1])
decrypted_content = decrypted_content[:len(decrypted_content)-padding]
return decrypted_content
|
[
"def",
"_cbc_decrypt",
"(",
"self",
",",
"final_key",
",",
"crypted_content",
")",
":",
"# Just decrypt the content with the created key",
"aes",
"=",
"AES",
".",
"new",
"(",
"final_key",
",",
"AES",
".",
"MODE_CBC",
",",
"self",
".",
"_enc_iv",
")",
"decrypted_content",
"=",
"aes",
".",
"decrypt",
"(",
"crypted_content",
")",
"padding",
"=",
"decrypted_content",
"[",
"-",
"1",
"]",
"if",
"sys",
".",
"version",
">",
"'3'",
":",
"padding",
"=",
"decrypted_content",
"[",
"-",
"1",
"]",
"else",
":",
"padding",
"=",
"ord",
"(",
"decrypted_content",
"[",
"-",
"1",
"]",
")",
"decrypted_content",
"=",
"decrypted_content",
"[",
":",
"len",
"(",
"decrypted_content",
")",
"-",
"padding",
"]",
"return",
"decrypted_content"
] |
This method decrypts the database
|
[
"This",
"method",
"decrypts",
"the",
"database"
] |
python
|
train
|
yyuu/botornado
|
boto/dynamodb/layer2.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/layer2.py#L367-L383
|
def update_throughput(self, table, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object whose throughput is being updated.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
response = self.layer1.update_table(table.name,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
table.update_from_response(response['TableDescription'])
|
[
"def",
"update_throughput",
"(",
"self",
",",
"table",
",",
"read_units",
",",
"write_units",
")",
":",
"response",
"=",
"self",
".",
"layer1",
".",
"update_table",
"(",
"table",
".",
"name",
",",
"{",
"'ReadCapacityUnits'",
":",
"read_units",
",",
"'WriteCapacityUnits'",
":",
"write_units",
"}",
")",
"table",
".",
"update_from_response",
"(",
"response",
"[",
"'TableDescription'",
"]",
")"
] |
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object whose throughput is being updated.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
|
[
"Update",
"the",
"ProvisionedThroughput",
"for",
"the",
"Amazon",
"DynamoDB",
"Table",
"."
] |
python
|
train
|
gitpython-developers/GitPython
|
git/diff.py
|
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/diff.py#L404-L459
|
def _index_from_patch_format(cls, repo, proc):
"""Create a new DiffIndex from the given text which must be in patch format
:param repo: is the repository we are operating on - it is required
:param stream: result of 'git diff' as a stream (supporting file protocol)
:return: git.DiffIndex """
## FIXME: Here SLURPING raw, need to re-phrase header-regexes linewise.
text = []
handle_process_output(proc, text.append, None, finalize_process, decode_streams=False)
# for now, we have to bake the stream
text = b''.join(text)
index = DiffIndex()
previous_header = None
for header in cls.re_header.finditer(text):
a_path_fallback, b_path_fallback, \
old_mode, new_mode, \
rename_from, rename_to, \
new_file_mode, deleted_file_mode, \
a_blob_id, b_blob_id, b_mode, \
a_path, b_path = header.groups()
new_file, deleted_file = bool(new_file_mode), bool(deleted_file_mode)
a_path = cls._pick_best_path(a_path, rename_from, a_path_fallback)
b_path = cls._pick_best_path(b_path, rename_to, b_path_fallback)
# Our only means to find the actual text is to see what has not been matched by our regex,
# and then retro-actively assign it to our index
if previous_header is not None:
index[-1].diff = text[previous_header.end():header.start()]
# end assign actual diff
# Make sure the mode is set if the path is set. Otherwise the resulting blob is invalid
# We just use the one mode we should have parsed
a_mode = old_mode or deleted_file_mode or (a_path and (b_mode or new_mode or new_file_mode))
b_mode = b_mode or new_mode or new_file_mode or (b_path and a_mode)
index.append(Diff(repo,
a_path,
b_path,
a_blob_id and a_blob_id.decode(defenc),
b_blob_id and b_blob_id.decode(defenc),
a_mode and a_mode.decode(defenc),
b_mode and b_mode.decode(defenc),
new_file, deleted_file,
rename_from,
rename_to,
None, None, None))
previous_header = header
# end for each header we parse
if index:
index[-1].diff = text[header.end():]
# end assign last diff
return index
|
[
"def",
"_index_from_patch_format",
"(",
"cls",
",",
"repo",
",",
"proc",
")",
":",
"## FIXME: Here SLURPING raw, need to re-phrase header-regexes linewise.",
"text",
"=",
"[",
"]",
"handle_process_output",
"(",
"proc",
",",
"text",
".",
"append",
",",
"None",
",",
"finalize_process",
",",
"decode_streams",
"=",
"False",
")",
"# for now, we have to bake the stream",
"text",
"=",
"b''",
".",
"join",
"(",
"text",
")",
"index",
"=",
"DiffIndex",
"(",
")",
"previous_header",
"=",
"None",
"for",
"header",
"in",
"cls",
".",
"re_header",
".",
"finditer",
"(",
"text",
")",
":",
"a_path_fallback",
",",
"b_path_fallback",
",",
"old_mode",
",",
"new_mode",
",",
"rename_from",
",",
"rename_to",
",",
"new_file_mode",
",",
"deleted_file_mode",
",",
"a_blob_id",
",",
"b_blob_id",
",",
"b_mode",
",",
"a_path",
",",
"b_path",
"=",
"header",
".",
"groups",
"(",
")",
"new_file",
",",
"deleted_file",
"=",
"bool",
"(",
"new_file_mode",
")",
",",
"bool",
"(",
"deleted_file_mode",
")",
"a_path",
"=",
"cls",
".",
"_pick_best_path",
"(",
"a_path",
",",
"rename_from",
",",
"a_path_fallback",
")",
"b_path",
"=",
"cls",
".",
"_pick_best_path",
"(",
"b_path",
",",
"rename_to",
",",
"b_path_fallback",
")",
"# Our only means to find the actual text is to see what has not been matched by our regex,",
"# and then retro-actively assign it to our index",
"if",
"previous_header",
"is",
"not",
"None",
":",
"index",
"[",
"-",
"1",
"]",
".",
"diff",
"=",
"text",
"[",
"previous_header",
".",
"end",
"(",
")",
":",
"header",
".",
"start",
"(",
")",
"]",
"# end assign actual diff",
"# Make sure the mode is set if the path is set. Otherwise the resulting blob is invalid",
"# We just use the one mode we should have parsed",
"a_mode",
"=",
"old_mode",
"or",
"deleted_file_mode",
"or",
"(",
"a_path",
"and",
"(",
"b_mode",
"or",
"new_mode",
"or",
"new_file_mode",
")",
")",
"b_mode",
"=",
"b_mode",
"or",
"new_mode",
"or",
"new_file_mode",
"or",
"(",
"b_path",
"and",
"a_mode",
")",
"index",
".",
"append",
"(",
"Diff",
"(",
"repo",
",",
"a_path",
",",
"b_path",
",",
"a_blob_id",
"and",
"a_blob_id",
".",
"decode",
"(",
"defenc",
")",
",",
"b_blob_id",
"and",
"b_blob_id",
".",
"decode",
"(",
"defenc",
")",
",",
"a_mode",
"and",
"a_mode",
".",
"decode",
"(",
"defenc",
")",
",",
"b_mode",
"and",
"b_mode",
".",
"decode",
"(",
"defenc",
")",
",",
"new_file",
",",
"deleted_file",
",",
"rename_from",
",",
"rename_to",
",",
"None",
",",
"None",
",",
"None",
")",
")",
"previous_header",
"=",
"header",
"# end for each header we parse",
"if",
"index",
":",
"index",
"[",
"-",
"1",
"]",
".",
"diff",
"=",
"text",
"[",
"header",
".",
"end",
"(",
")",
":",
"]",
"# end assign last diff",
"return",
"index"
] |
Create a new DiffIndex from the given text which must be in patch format
:param repo: is the repository we are operating on - it is required
:param stream: result of 'git diff' as a stream (supporting file protocol)
:return: git.DiffIndex
|
[
"Create",
"a",
"new",
"DiffIndex",
"from",
"the",
"given",
"text",
"which",
"must",
"be",
"in",
"patch",
"format",
":",
"param",
"repo",
":",
"is",
"the",
"repository",
"we",
"are",
"operating",
"on",
"-",
"it",
"is",
"required",
":",
"param",
"stream",
":",
"result",
"of",
"git",
"diff",
"as",
"a",
"stream",
"(",
"supporting",
"file",
"protocol",
")",
":",
"return",
":",
"git",
".",
"DiffIndex"
] |
python
|
train
|
mojaie/chorus
|
chorus/topology.py
|
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/topology.py#L21-L81
|
def recognize(mol):
""" Detect cycle basis, biconnected and isolated components (DFS).
This will add following attribute to the molecule instance object.
mol.ring: Cycle basis
mol.scaffold: biconnected components
mol.isolated: isolated components other than the largest one
To find minimum set of rings, additionally execute topology.minify_ring.
Reference:
networkx cycle_basis function
"""
g = set(i for i, _ in mol.atoms_iter())
bccs = {} # BiConnected Components
isoc = [] # ISOlated Components
while g:
start = g.pop()
stack = [start]
pred = {start: start}
used = {start: set()}
root = {start: start}
while stack:
tail = stack.pop()
for nbr in mol.neighbors(tail):
if nbr not in used: # New node
pred[nbr] = tail
stack.append(nbr)
used[nbr] = {tail}
root[nbr] = nbr
elif nbr in stack: # Cycle found
pn = used[nbr]
cyc = [nbr, tail]
p = pred[tail]
end = pred[nbr]
root[nbr] = root[tail] = root[end]
while p not in pn: # Backtrack
cyc.append(p)
root[p] = root[end]
if p in bccs: # Append scaffold to new cycle
if root[end] not in bccs:
bccs[root[end]] = []
bccs[root[end]].extend(bccs[p])
del bccs[p]
p = pred[p]
cyc.append(p)
if root[end] not in bccs: # Append new cycle to scaffold
bccs[root[end]] = []
bccs[root[end]].append(cyc)
used[nbr].add(tail)
isoc.append(list(pred.keys()))
# print(pred)
g -= set(pred)
mol.rings = []
mol.scaffolds = []
for cycles in bccs.values():
rcnt = len(mol.rings)
mol.rings.extend(cycles)
mol.scaffolds.append(list(range(rcnt, rcnt + len(cycles))))
mol.isolated = list(sorted(isoc, key=len, reverse=True))[1:]
mol.descriptors.add("Topology")
|
[
"def",
"recognize",
"(",
"mol",
")",
":",
"g",
"=",
"set",
"(",
"i",
"for",
"i",
",",
"_",
"in",
"mol",
".",
"atoms_iter",
"(",
")",
")",
"bccs",
"=",
"{",
"}",
"# BiConnected Components",
"isoc",
"=",
"[",
"]",
"# ISOlated Components",
"while",
"g",
":",
"start",
"=",
"g",
".",
"pop",
"(",
")",
"stack",
"=",
"[",
"start",
"]",
"pred",
"=",
"{",
"start",
":",
"start",
"}",
"used",
"=",
"{",
"start",
":",
"set",
"(",
")",
"}",
"root",
"=",
"{",
"start",
":",
"start",
"}",
"while",
"stack",
":",
"tail",
"=",
"stack",
".",
"pop",
"(",
")",
"for",
"nbr",
"in",
"mol",
".",
"neighbors",
"(",
"tail",
")",
":",
"if",
"nbr",
"not",
"in",
"used",
":",
"# New node",
"pred",
"[",
"nbr",
"]",
"=",
"tail",
"stack",
".",
"append",
"(",
"nbr",
")",
"used",
"[",
"nbr",
"]",
"=",
"{",
"tail",
"}",
"root",
"[",
"nbr",
"]",
"=",
"nbr",
"elif",
"nbr",
"in",
"stack",
":",
"# Cycle found",
"pn",
"=",
"used",
"[",
"nbr",
"]",
"cyc",
"=",
"[",
"nbr",
",",
"tail",
"]",
"p",
"=",
"pred",
"[",
"tail",
"]",
"end",
"=",
"pred",
"[",
"nbr",
"]",
"root",
"[",
"nbr",
"]",
"=",
"root",
"[",
"tail",
"]",
"=",
"root",
"[",
"end",
"]",
"while",
"p",
"not",
"in",
"pn",
":",
"# Backtrack",
"cyc",
".",
"append",
"(",
"p",
")",
"root",
"[",
"p",
"]",
"=",
"root",
"[",
"end",
"]",
"if",
"p",
"in",
"bccs",
":",
"# Append scaffold to new cycle",
"if",
"root",
"[",
"end",
"]",
"not",
"in",
"bccs",
":",
"bccs",
"[",
"root",
"[",
"end",
"]",
"]",
"=",
"[",
"]",
"bccs",
"[",
"root",
"[",
"end",
"]",
"]",
".",
"extend",
"(",
"bccs",
"[",
"p",
"]",
")",
"del",
"bccs",
"[",
"p",
"]",
"p",
"=",
"pred",
"[",
"p",
"]",
"cyc",
".",
"append",
"(",
"p",
")",
"if",
"root",
"[",
"end",
"]",
"not",
"in",
"bccs",
":",
"# Append new cycle to scaffold",
"bccs",
"[",
"root",
"[",
"end",
"]",
"]",
"=",
"[",
"]",
"bccs",
"[",
"root",
"[",
"end",
"]",
"]",
".",
"append",
"(",
"cyc",
")",
"used",
"[",
"nbr",
"]",
".",
"add",
"(",
"tail",
")",
"isoc",
".",
"append",
"(",
"list",
"(",
"pred",
".",
"keys",
"(",
")",
")",
")",
"# print(pred)",
"g",
"-=",
"set",
"(",
"pred",
")",
"mol",
".",
"rings",
"=",
"[",
"]",
"mol",
".",
"scaffolds",
"=",
"[",
"]",
"for",
"cycles",
"in",
"bccs",
".",
"values",
"(",
")",
":",
"rcnt",
"=",
"len",
"(",
"mol",
".",
"rings",
")",
"mol",
".",
"rings",
".",
"extend",
"(",
"cycles",
")",
"mol",
".",
"scaffolds",
".",
"append",
"(",
"list",
"(",
"range",
"(",
"rcnt",
",",
"rcnt",
"+",
"len",
"(",
"cycles",
")",
")",
")",
")",
"mol",
".",
"isolated",
"=",
"list",
"(",
"sorted",
"(",
"isoc",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
")",
"[",
"1",
":",
"]",
"mol",
".",
"descriptors",
".",
"add",
"(",
"\"Topology\"",
")"
] |
Detect cycle basis, biconnected and isolated components (DFS).
This will add following attribute to the molecule instance object.
mol.ring: Cycle basis
mol.scaffold: biconnected components
mol.isolated: isolated components other than the largest one
To find minimum set of rings, additionally execute topology.minify_ring.
Reference:
networkx cycle_basis function
|
[
"Detect",
"cycle",
"basis",
"biconnected",
"and",
"isolated",
"components",
"(",
"DFS",
")",
".",
"This",
"will",
"add",
"following",
"attribute",
"to",
"the",
"molecule",
"instance",
"object",
"."
] |
python
|
train
|
tgbugs/pyontutils
|
ilxutils/ilxutils/tools.py
|
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/tools.py#L11-L49
|
def string_profiler(string, start_delimiter='(', end_delimiter=')', remove=True):
'''
long = '(life is is good) love world "(blah) blah" "here I am" once again "yes" blah '
print(string_profiler(long))
null = ''
print(string_profiler(null))
short = '(life love) yes(and much more)'
print(string_profiler(short))
short = 'yes "life love"'
print(string_profiler(short))
'''
mark = 0
string_list = []
tmp_string = ''
for i in range(len(string)):
curr_index = i + mark
if curr_index == len(string):
break
if string[curr_index] == start_delimiter:
flag = True
else:
flag = False
if flag:
if tmp_string:
string_list.extend(tmp_string.strip().split())
tmp_string = ''
quoted_string = ''
for j in range(curr_index+1, len(string)):
mark += 1
if string[j] == end_delimiter:
break
quoted_string += string[j]
if not remove:
string_list.append(quoted_string)
else:
tmp_string += string[curr_index]
if tmp_string:
string_list.extend(tmp_string.strip().split())
return string_list
|
[
"def",
"string_profiler",
"(",
"string",
",",
"start_delimiter",
"=",
"'('",
",",
"end_delimiter",
"=",
"')'",
",",
"remove",
"=",
"True",
")",
":",
"mark",
"=",
"0",
"string_list",
"=",
"[",
"]",
"tmp_string",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"string",
")",
")",
":",
"curr_index",
"=",
"i",
"+",
"mark",
"if",
"curr_index",
"==",
"len",
"(",
"string",
")",
":",
"break",
"if",
"string",
"[",
"curr_index",
"]",
"==",
"start_delimiter",
":",
"flag",
"=",
"True",
"else",
":",
"flag",
"=",
"False",
"if",
"flag",
":",
"if",
"tmp_string",
":",
"string_list",
".",
"extend",
"(",
"tmp_string",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
")",
"tmp_string",
"=",
"''",
"quoted_string",
"=",
"''",
"for",
"j",
"in",
"range",
"(",
"curr_index",
"+",
"1",
",",
"len",
"(",
"string",
")",
")",
":",
"mark",
"+=",
"1",
"if",
"string",
"[",
"j",
"]",
"==",
"end_delimiter",
":",
"break",
"quoted_string",
"+=",
"string",
"[",
"j",
"]",
"if",
"not",
"remove",
":",
"string_list",
".",
"append",
"(",
"quoted_string",
")",
"else",
":",
"tmp_string",
"+=",
"string",
"[",
"curr_index",
"]",
"if",
"tmp_string",
":",
"string_list",
".",
"extend",
"(",
"tmp_string",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
")",
"return",
"string_list"
] |
long = '(life is is good) love world "(blah) blah" "here I am" once again "yes" blah '
print(string_profiler(long))
null = ''
print(string_profiler(null))
short = '(life love) yes(and much more)'
print(string_profiler(short))
short = 'yes "life love"'
print(string_profiler(short))
|
[
"long",
"=",
"(",
"life",
"is",
"is",
"good",
")",
"love",
"world",
"(",
"blah",
")",
"blah",
"here",
"I",
"am",
"once",
"again",
"yes",
"blah",
"print",
"(",
"string_profiler",
"(",
"long",
"))",
"null",
"=",
"print",
"(",
"string_profiler",
"(",
"null",
"))",
"short",
"=",
"(",
"life",
"love",
")",
"yes",
"(",
"and",
"much",
"more",
")",
"print",
"(",
"string_profiler",
"(",
"short",
"))",
"short",
"=",
"yes",
"life",
"love",
"print",
"(",
"string_profiler",
"(",
"short",
"))"
] |
python
|
train
|
soravux/scoop
|
scoop/futures.py
|
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/futures.py#L282-L305
|
def submit(func, *args, **kwargs):
"""Submit an independent asynchronous :class:`~scoop._types.Future` that will
either run locally or remotely as `func(*args)`.
:param func: Any picklable callable object (function or class object with
*__call__* method); this object will be called to execute the Future.
The callable must return a value.
:param args: A tuple of positional arguments that will be passed to the
func object.
:param kwargs: A dictionary of additional arguments that will be passed to
the func object.
:returns: A future object for retrieving the Future result.
On return, the Future can be pending execution locally but may also be
transfered remotely depending on load or on remote distributed workers. You
may carry on with any further computations while the Future completes.
Result retrieval is made via the :meth:`~scoop._types.Future.result`
function on the Future."""
child = _createFuture(func, *args, **kwargs)
control.futureDict[control.current.id].children[child] = None
control.execQueue.append(child)
return child
|
[
"def",
"submit",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"child",
"=",
"_createFuture",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"control",
".",
"futureDict",
"[",
"control",
".",
"current",
".",
"id",
"]",
".",
"children",
"[",
"child",
"]",
"=",
"None",
"control",
".",
"execQueue",
".",
"append",
"(",
"child",
")",
"return",
"child"
] |
Submit an independent asynchronous :class:`~scoop._types.Future` that will
either run locally or remotely as `func(*args)`.
:param func: Any picklable callable object (function or class object with
*__call__* method); this object will be called to execute the Future.
The callable must return a value.
:param args: A tuple of positional arguments that will be passed to the
func object.
:param kwargs: A dictionary of additional arguments that will be passed to
the func object.
:returns: A future object for retrieving the Future result.
On return, the Future can be pending execution locally but may also be
transfered remotely depending on load or on remote distributed workers. You
may carry on with any further computations while the Future completes.
Result retrieval is made via the :meth:`~scoop._types.Future.result`
function on the Future.
|
[
"Submit",
"an",
"independent",
"asynchronous",
":",
"class",
":",
"~scoop",
".",
"_types",
".",
"Future",
"that",
"will",
"either",
"run",
"locally",
"or",
"remotely",
"as",
"func",
"(",
"*",
"args",
")",
"."
] |
python
|
train
|
lingthio/Flask-User
|
flask_user/user_manager__utils.py
|
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/user_manager__utils.py#L36-L44
|
def email_is_available(self, new_email):
"""Check if ``new_email`` is available.
| Returns True if ``new_email`` does not exist or belongs to the current user.
| Return False otherwise.
"""
user, user_email = self.db_manager.get_user_and_user_email_by_email(new_email)
return (user == None)
|
[
"def",
"email_is_available",
"(",
"self",
",",
"new_email",
")",
":",
"user",
",",
"user_email",
"=",
"self",
".",
"db_manager",
".",
"get_user_and_user_email_by_email",
"(",
"new_email",
")",
"return",
"(",
"user",
"==",
"None",
")"
] |
Check if ``new_email`` is available.
| Returns True if ``new_email`` does not exist or belongs to the current user.
| Return False otherwise.
|
[
"Check",
"if",
"new_email",
"is",
"available",
"."
] |
python
|
train
|
Kortemme-Lab/klab
|
klab/bio/pymolmod/colors.py
|
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pymolmod/colors.py#L385-L397
|
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
assert(type(path) == type(self.name))
assert(type(node) == type(self.name) or type(node) == type(predefined))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
d[tokens[-1]] = node
|
[
"def",
"update",
"(",
"self",
",",
"path",
",",
"node",
")",
":",
"assert",
"(",
"type",
"(",
"path",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
")",
"assert",
"(",
"type",
"(",
"node",
")",
"==",
"type",
"(",
"self",
".",
"name",
")",
"or",
"type",
"(",
"node",
")",
"==",
"type",
"(",
"predefined",
")",
")",
"d",
"=",
"self",
".",
"color_scheme",
"tokens",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"for",
"t",
"in",
"tokens",
"[",
":",
"-",
"1",
"]",
":",
"d",
"=",
"d",
".",
"get",
"(",
"t",
")",
"if",
"d",
"==",
"None",
":",
"raise",
"Exception",
"(",
"\"Path '%s' not found.\"",
")",
"d",
"[",
"tokens",
"[",
"-",
"1",
"]",
"]",
"=",
"node"
] |
Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.
|
[
"Update",
"the",
"dict",
"with",
"a",
"new",
"color",
"using",
"a",
"path",
"through",
"the",
"dict",
".",
"You",
"can",
"either",
"pass",
"an",
"existing",
"path",
"e",
".",
"g",
".",
"Scaffold",
".",
"mutations",
"to",
"override",
"a",
"color",
"or",
"part",
"of",
"the",
"hierarchy",
"or",
"you",
"can",
"add",
"a",
"new",
"leaf",
"node",
"or",
"dict",
"."
] |
python
|
train
|
QualiSystems/vCenterShell
|
package/cloudshell/cp/vcenter/commands/command_orchestrator.py
|
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/command_orchestrator.py#L358-L372
|
def DeleteInstance(self, context, ports):
"""
Destroy Vm Command, will only destroy the vm and will not remove the resource
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
"""
resource_details = self._parse_remote_model(context)
# execute command
res = self.command_wrapper.execute_command_with_connection(
context,
self.destroy_virtual_machine_command.DeleteInstance,
resource_details.vm_uuid,
resource_details.fullname)
return set_command_result(result=res, unpicklable=False)
|
[
"def",
"DeleteInstance",
"(",
"self",
",",
"context",
",",
"ports",
")",
":",
"resource_details",
"=",
"self",
".",
"_parse_remote_model",
"(",
"context",
")",
"# execute command",
"res",
"=",
"self",
".",
"command_wrapper",
".",
"execute_command_with_connection",
"(",
"context",
",",
"self",
".",
"destroy_virtual_machine_command",
".",
"DeleteInstance",
",",
"resource_details",
".",
"vm_uuid",
",",
"resource_details",
".",
"fullname",
")",
"return",
"set_command_result",
"(",
"result",
"=",
"res",
",",
"unpicklable",
"=",
"False",
")"
] |
Destroy Vm Command, will only destroy the vm and will not remove the resource
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
|
[
"Destroy",
"Vm",
"Command",
"will",
"only",
"destroy",
"the",
"vm",
"and",
"will",
"not",
"remove",
"the",
"resource"
] |
python
|
train
|
tdgunes/pyplyn
|
pyplyn/pyp.py
|
https://github.com/tdgunes/pyplyn/blob/4b390e9b6137432d5e3db17436fc32d2238d1eff/pyplyn/pyp.py#L32-L43
|
def make_assertions(input_pipe, other_pipes, output_pipe):
"""
To assure that the pipe is correctly settled
:param input_pipe:
:param other_pipes: can be []
:param output_pipe:
:return:
"""
assert isinstance(input_pipe, elements.InPypElement), 'Wrong input element type, want a InPypElement!'
assert isinstance(output_pipe, elements.OutPypElement), 'Wrong output element type, want a OutPypElement!'
for other_pipe in other_pipes:
assert isinstance(other_pipe, elements.MidPypElement), 'Wrong middle element type, want a MidPypElement!'
|
[
"def",
"make_assertions",
"(",
"input_pipe",
",",
"other_pipes",
",",
"output_pipe",
")",
":",
"assert",
"isinstance",
"(",
"input_pipe",
",",
"elements",
".",
"InPypElement",
")",
",",
"'Wrong input element type, want a InPypElement!'",
"assert",
"isinstance",
"(",
"output_pipe",
",",
"elements",
".",
"OutPypElement",
")",
",",
"'Wrong output element type, want a OutPypElement!'",
"for",
"other_pipe",
"in",
"other_pipes",
":",
"assert",
"isinstance",
"(",
"other_pipe",
",",
"elements",
".",
"MidPypElement",
")",
",",
"'Wrong middle element type, want a MidPypElement!'"
] |
To assure that the pipe is correctly settled
:param input_pipe:
:param other_pipes: can be []
:param output_pipe:
:return:
|
[
"To",
"assure",
"that",
"the",
"pipe",
"is",
"correctly",
"settled",
":",
"param",
"input_pipe",
":",
":",
"param",
"other_pipes",
":",
"can",
"be",
"[]",
":",
"param",
"output_pipe",
":",
":",
"return",
":"
] |
python
|
train
|
PredixDev/predixpy
|
predix/admin/blobstore.py
|
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/blobstore.py#L46-L75
|
def add_to_manifest(self, manifest):
"""
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
"""
# Add this service to the list of services
manifest.add_service(self.service.name)
# Add environment variables
url = predix.config.get_env_key(self.use_class, 'url')
manifest.add_env_var(url, self.service.settings.data['url'])
akid = predix.config.get_env_key(self.use_class, 'access_key_id')
manifest.add_env_var(akid, self.service.settings.data['access_key_id'])
bucket = predix.config.get_env_key(self.use_class, 'bucket_name')
manifest.add_env_var(bucket, self.service.settings.data['bucket_name'])
host = predix.config.get_env_key(self.use_class, 'host')
manifest.add_env_var(host, self.service.settings.data['host'])
secret_access_key = predix.config.get_env_key(self.use_class, 'secret_access_key')
manifest.add_env_var(secret_access_key, self.service.settings.data['secret_access_key'])
manifest.write_manifest()
|
[
"def",
"add_to_manifest",
"(",
"self",
",",
"manifest",
")",
":",
"# Add this service to the list of services",
"manifest",
".",
"add_service",
"(",
"self",
".",
"service",
".",
"name",
")",
"# Add environment variables",
"url",
"=",
"predix",
".",
"config",
".",
"get_env_key",
"(",
"self",
".",
"use_class",
",",
"'url'",
")",
"manifest",
".",
"add_env_var",
"(",
"url",
",",
"self",
".",
"service",
".",
"settings",
".",
"data",
"[",
"'url'",
"]",
")",
"akid",
"=",
"predix",
".",
"config",
".",
"get_env_key",
"(",
"self",
".",
"use_class",
",",
"'access_key_id'",
")",
"manifest",
".",
"add_env_var",
"(",
"akid",
",",
"self",
".",
"service",
".",
"settings",
".",
"data",
"[",
"'access_key_id'",
"]",
")",
"bucket",
"=",
"predix",
".",
"config",
".",
"get_env_key",
"(",
"self",
".",
"use_class",
",",
"'bucket_name'",
")",
"manifest",
".",
"add_env_var",
"(",
"bucket",
",",
"self",
".",
"service",
".",
"settings",
".",
"data",
"[",
"'bucket_name'",
"]",
")",
"host",
"=",
"predix",
".",
"config",
".",
"get_env_key",
"(",
"self",
".",
"use_class",
",",
"'host'",
")",
"manifest",
".",
"add_env_var",
"(",
"host",
",",
"self",
".",
"service",
".",
"settings",
".",
"data",
"[",
"'host'",
"]",
")",
"secret_access_key",
"=",
"predix",
".",
"config",
".",
"get_env_key",
"(",
"self",
".",
"use_class",
",",
"'secret_access_key'",
")",
"manifest",
".",
"add_env_var",
"(",
"secret_access_key",
",",
"self",
".",
"service",
".",
"settings",
".",
"data",
"[",
"'secret_access_key'",
"]",
")",
"manifest",
".",
"write_manifest",
"(",
")"
] |
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
|
[
"Add",
"useful",
"details",
"to",
"the",
"manifest",
"about",
"this",
"service",
"so",
"that",
"it",
"can",
"be",
"used",
"in",
"an",
"application",
"."
] |
python
|
train
|
gem/oq-engine
|
openquake/hazardlib/geo/surface/base.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/surface/base.py#L377-L414
|
def get_hypo_location(self, mesh_spacing, hypo_loc=None):
"""
The method determines the location of the hypocentre within the rupture
:param mesh:
:class:`~openquake.hazardlib.geo.mesh.Mesh` of points
:param mesh_spacing:
The desired distance between two adjacent points in source's
ruptures' mesh, in km. Mainly this parameter allows to balance
the trade-off between time needed to compute the distance
between the rupture surface and a site and the precision of that
computation.
:param hypo_loc:
Hypocentre location as fraction of rupture plane, as a tuple of
(Along Strike, Down Dip), e.g. a hypocentre located in the centroid
of the rupture would be input as (0.5, 0.5), whereas a
hypocentre located in a position 3/4 along the length, and 1/4 of
the way down dip of the rupture plane would be entered as
(0.75, 0.25).
:returns:
Hypocentre location as instance of
:class:`~openquake.hazardlib.geo.point.Point`
"""
mesh = self.mesh
centroid = mesh.get_middle_point()
if hypo_loc is None:
return centroid
total_len_y = (len(mesh.depths) - 1) * mesh_spacing
y_distance = hypo_loc[1] * total_len_y
y_node = int(numpy.round(y_distance / mesh_spacing))
total_len_x = (len(mesh.lons[y_node]) - 1) * mesh_spacing
x_distance = hypo_loc[0] * total_len_x
x_node = int(numpy.round(x_distance / mesh_spacing))
hypocentre = Point(mesh.lons[y_node][x_node],
mesh.lats[y_node][x_node],
mesh.depths[y_node][x_node])
return hypocentre
|
[
"def",
"get_hypo_location",
"(",
"self",
",",
"mesh_spacing",
",",
"hypo_loc",
"=",
"None",
")",
":",
"mesh",
"=",
"self",
".",
"mesh",
"centroid",
"=",
"mesh",
".",
"get_middle_point",
"(",
")",
"if",
"hypo_loc",
"is",
"None",
":",
"return",
"centroid",
"total_len_y",
"=",
"(",
"len",
"(",
"mesh",
".",
"depths",
")",
"-",
"1",
")",
"*",
"mesh_spacing",
"y_distance",
"=",
"hypo_loc",
"[",
"1",
"]",
"*",
"total_len_y",
"y_node",
"=",
"int",
"(",
"numpy",
".",
"round",
"(",
"y_distance",
"/",
"mesh_spacing",
")",
")",
"total_len_x",
"=",
"(",
"len",
"(",
"mesh",
".",
"lons",
"[",
"y_node",
"]",
")",
"-",
"1",
")",
"*",
"mesh_spacing",
"x_distance",
"=",
"hypo_loc",
"[",
"0",
"]",
"*",
"total_len_x",
"x_node",
"=",
"int",
"(",
"numpy",
".",
"round",
"(",
"x_distance",
"/",
"mesh_spacing",
")",
")",
"hypocentre",
"=",
"Point",
"(",
"mesh",
".",
"lons",
"[",
"y_node",
"]",
"[",
"x_node",
"]",
",",
"mesh",
".",
"lats",
"[",
"y_node",
"]",
"[",
"x_node",
"]",
",",
"mesh",
".",
"depths",
"[",
"y_node",
"]",
"[",
"x_node",
"]",
")",
"return",
"hypocentre"
] |
The method determines the location of the hypocentre within the rupture
:param mesh:
:class:`~openquake.hazardlib.geo.mesh.Mesh` of points
:param mesh_spacing:
The desired distance between two adjacent points in source's
ruptures' mesh, in km. Mainly this parameter allows to balance
the trade-off between time needed to compute the distance
between the rupture surface and a site and the precision of that
computation.
:param hypo_loc:
Hypocentre location as fraction of rupture plane, as a tuple of
(Along Strike, Down Dip), e.g. a hypocentre located in the centroid
of the rupture would be input as (0.5, 0.5), whereas a
hypocentre located in a position 3/4 along the length, and 1/4 of
the way down dip of the rupture plane would be entered as
(0.75, 0.25).
:returns:
Hypocentre location as instance of
:class:`~openquake.hazardlib.geo.point.Point`
|
[
"The",
"method",
"determines",
"the",
"location",
"of",
"the",
"hypocentre",
"within",
"the",
"rupture"
] |
python
|
train
|
pazz/urwidtrees
|
urwidtrees/tree.py
|
https://github.com/pazz/urwidtrees/blob/d1fa38ce4f37db00bdfc574b856023b5db4c7ead/urwidtrees/tree.py#L102-L111
|
def next_position(self, pos):
"""returns the next position in depth-first order"""
candidate = None
if pos is not None:
candidate = self.first_child_position(pos)
if candidate is None:
candidate = self.next_sibling_position(pos)
if candidate is None:
candidate = self._next_of_kin(pos)
return candidate
|
[
"def",
"next_position",
"(",
"self",
",",
"pos",
")",
":",
"candidate",
"=",
"None",
"if",
"pos",
"is",
"not",
"None",
":",
"candidate",
"=",
"self",
".",
"first_child_position",
"(",
"pos",
")",
"if",
"candidate",
"is",
"None",
":",
"candidate",
"=",
"self",
".",
"next_sibling_position",
"(",
"pos",
")",
"if",
"candidate",
"is",
"None",
":",
"candidate",
"=",
"self",
".",
"_next_of_kin",
"(",
"pos",
")",
"return",
"candidate"
] |
returns the next position in depth-first order
|
[
"returns",
"the",
"next",
"position",
"in",
"depth",
"-",
"first",
"order"
] |
python
|
train
|
Komnomnomnom/swigibpy
|
swigibpy.py
|
https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L1125-L1127
|
def placeOrder(self, id, contract, order):
"""placeOrder(EClient self, OrderId id, Contract contract, Order order)"""
return _swigibpy.EClient_placeOrder(self, id, contract, order)
|
[
"def",
"placeOrder",
"(",
"self",
",",
"id",
",",
"contract",
",",
"order",
")",
":",
"return",
"_swigibpy",
".",
"EClient_placeOrder",
"(",
"self",
",",
"id",
",",
"contract",
",",
"order",
")"
] |
placeOrder(EClient self, OrderId id, Contract contract, Order order)
|
[
"placeOrder",
"(",
"EClient",
"self",
"OrderId",
"id",
"Contract",
"contract",
"Order",
"order",
")"
] |
python
|
train
|
sdispater/cleo
|
cleo/commands/command.py
|
https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L284-L288
|
def progress_indicator(self, fmt=None, interval=100, values=None):
"""
Creates a new progress indicator.
"""
return ProgressIndicator(self.io, fmt, interval, values)
|
[
"def",
"progress_indicator",
"(",
"self",
",",
"fmt",
"=",
"None",
",",
"interval",
"=",
"100",
",",
"values",
"=",
"None",
")",
":",
"return",
"ProgressIndicator",
"(",
"self",
".",
"io",
",",
"fmt",
",",
"interval",
",",
"values",
")"
] |
Creates a new progress indicator.
|
[
"Creates",
"a",
"new",
"progress",
"indicator",
"."
] |
python
|
train
|
apache/incubator-mxnet
|
python/mxnet/recordio.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L123-L132
|
def close(self):
"""Closes the record file."""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
self.pid = None
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_open",
":",
"return",
"if",
"self",
".",
"writable",
":",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOWriterFree",
"(",
"self",
".",
"handle",
")",
")",
"else",
":",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOReaderFree",
"(",
"self",
".",
"handle",
")",
")",
"self",
".",
"is_open",
"=",
"False",
"self",
".",
"pid",
"=",
"None"
] |
Closes the record file.
|
[
"Closes",
"the",
"record",
"file",
"."
] |
python
|
train
|
pandas-dev/pandas
|
pandas/io/stata.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2810-L2835
|
def _write_map(self):
"""Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written."""
if self._map is None:
self._map = OrderedDict((('stata_data', 0),
('map', self._file.tell()),
('variable_types', 0),
('varnames', 0),
('sortlist', 0),
('formats', 0),
('value_label_names', 0),
('variable_labels', 0),
('characteristics', 0),
('data', 0),
('strls', 0),
('value_labels', 0),
('stata_data_close', 0),
('end-of-file', 0)))
# Move to start of map
self._file.seek(self._map['map'])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + 'Q', val))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'map'))
|
[
"def",
"_write_map",
"(",
"self",
")",
":",
"if",
"self",
".",
"_map",
"is",
"None",
":",
"self",
".",
"_map",
"=",
"OrderedDict",
"(",
"(",
"(",
"'stata_data'",
",",
"0",
")",
",",
"(",
"'map'",
",",
"self",
".",
"_file",
".",
"tell",
"(",
")",
")",
",",
"(",
"'variable_types'",
",",
"0",
")",
",",
"(",
"'varnames'",
",",
"0",
")",
",",
"(",
"'sortlist'",
",",
"0",
")",
",",
"(",
"'formats'",
",",
"0",
")",
",",
"(",
"'value_label_names'",
",",
"0",
")",
",",
"(",
"'variable_labels'",
",",
"0",
")",
",",
"(",
"'characteristics'",
",",
"0",
")",
",",
"(",
"'data'",
",",
"0",
")",
",",
"(",
"'strls'",
",",
"0",
")",
",",
"(",
"'value_labels'",
",",
"0",
")",
",",
"(",
"'stata_data_close'",
",",
"0",
")",
",",
"(",
"'end-of-file'",
",",
"0",
")",
")",
")",
"# Move to start of map",
"self",
".",
"_file",
".",
"seek",
"(",
"self",
".",
"_map",
"[",
"'map'",
"]",
")",
"bio",
"=",
"BytesIO",
"(",
")",
"for",
"val",
"in",
"self",
".",
"_map",
".",
"values",
"(",
")",
":",
"bio",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"self",
".",
"_byteorder",
"+",
"'Q'",
",",
"val",
")",
")",
"bio",
".",
"seek",
"(",
"0",
")",
"self",
".",
"_file",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"bio",
".",
"read",
"(",
")",
",",
"'map'",
")",
")"
] |
Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written.
|
[
"Called",
"twice",
"during",
"file",
"write",
".",
"The",
"first",
"populates",
"the",
"values",
"in",
"the",
"map",
"with",
"0s",
".",
"The",
"second",
"call",
"writes",
"the",
"final",
"map",
"locations",
"when",
"all",
"blocks",
"have",
"been",
"written",
"."
] |
python
|
train
|
pinax/pinax-ratings
|
pinax/ratings/templatetags/pinax_ratings_tags.py
|
https://github.com/pinax/pinax-ratings/blob/eca388fea1ccd09ba844ac29a7489e41b64267f5/pinax/ratings/templatetags/pinax_ratings_tags.py#L115-L126
|
def rating_count(obj):
"""
Total amount of users who have submitted a positive rating for this object.
Usage:
{% rating_count obj %}
"""
count = Rating.objects.filter(
object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj),
).exclude(rating=0).count()
return count
|
[
"def",
"rating_count",
"(",
"obj",
")",
":",
"count",
"=",
"Rating",
".",
"objects",
".",
"filter",
"(",
"object_id",
"=",
"obj",
".",
"pk",
",",
"content_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
")",
",",
")",
".",
"exclude",
"(",
"rating",
"=",
"0",
")",
".",
"count",
"(",
")",
"return",
"count"
] |
Total amount of users who have submitted a positive rating for this object.
Usage:
{% rating_count obj %}
|
[
"Total",
"amount",
"of",
"users",
"who",
"have",
"submitted",
"a",
"positive",
"rating",
"for",
"this",
"object",
"."
] |
python
|
train
|
dslackw/slpkg
|
slpkg/repolist.py
|
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/repolist.py#L40-L76
|
def repos(self):
"""View or enabled or disabled repositories
"""
def_cnt, cus_cnt = 0, 0
print("")
self.msg.template(78)
print("{0}{1}{2}{3}{4}{5}{6}".format(
"| Repo id", " " * 2,
"Repo URL", " " * 44,
"Default", " " * 3,
"Status"))
self.msg.template(78)
for repo_id, repo_URL in sorted(self.all_repos.iteritems()):
status, COLOR = "disabled", self.meta.color["RED"]
default = "yes"
if len(repo_URL) > 49:
repo_URL = repo_URL[:48] + "~"
if repo_id in self.meta.repositories:
def_cnt += 1
status, COLOR = "enabled", self.meta.color["GREEN"]
if repo_id not in self.meta.default_repositories:
cus_cnt += 1
default = "no"
print(" {0}{1}{2}{3}{4}{5}{6}{7:>8}{8}".format(
repo_id, " " * (9 - len(repo_id)),
repo_URL, " " * (52 - len(repo_URL)),
default, " " * (8 - len(default)),
COLOR, status, self.meta.color["ENDC"]))
print("\nRepositories summary")
print("=" * 79)
print("{0}{1}/{2} enabled default repositories and {3} custom.".format(
self.meta.color["GREY"], def_cnt, len(self.all_repos), cus_cnt))
print("Edit the file '/etc/slpkg/repositories.conf' for enable "
"and disable default\nrepositories or run 'slpkg "
"repo-enable' command.\n{0}".format(self.meta.color["ENDC"]))
raise SystemExit()
|
[
"def",
"repos",
"(",
"self",
")",
":",
"def_cnt",
",",
"cus_cnt",
"=",
"0",
",",
"0",
"print",
"(",
"\"\"",
")",
"self",
".",
"msg",
".",
"template",
"(",
"78",
")",
"print",
"(",
"\"{0}{1}{2}{3}{4}{5}{6}\"",
".",
"format",
"(",
"\"| Repo id\"",
",",
"\" \"",
"*",
"2",
",",
"\"Repo URL\"",
",",
"\" \"",
"*",
"44",
",",
"\"Default\"",
",",
"\" \"",
"*",
"3",
",",
"\"Status\"",
")",
")",
"self",
".",
"msg",
".",
"template",
"(",
"78",
")",
"for",
"repo_id",
",",
"repo_URL",
"in",
"sorted",
"(",
"self",
".",
"all_repos",
".",
"iteritems",
"(",
")",
")",
":",
"status",
",",
"COLOR",
"=",
"\"disabled\"",
",",
"self",
".",
"meta",
".",
"color",
"[",
"\"RED\"",
"]",
"default",
"=",
"\"yes\"",
"if",
"len",
"(",
"repo_URL",
")",
">",
"49",
":",
"repo_URL",
"=",
"repo_URL",
"[",
":",
"48",
"]",
"+",
"\"~\"",
"if",
"repo_id",
"in",
"self",
".",
"meta",
".",
"repositories",
":",
"def_cnt",
"+=",
"1",
"status",
",",
"COLOR",
"=",
"\"enabled\"",
",",
"self",
".",
"meta",
".",
"color",
"[",
"\"GREEN\"",
"]",
"if",
"repo_id",
"not",
"in",
"self",
".",
"meta",
".",
"default_repositories",
":",
"cus_cnt",
"+=",
"1",
"default",
"=",
"\"no\"",
"print",
"(",
"\" {0}{1}{2}{3}{4}{5}{6}{7:>8}{8}\"",
".",
"format",
"(",
"repo_id",
",",
"\" \"",
"*",
"(",
"9",
"-",
"len",
"(",
"repo_id",
")",
")",
",",
"repo_URL",
",",
"\" \"",
"*",
"(",
"52",
"-",
"len",
"(",
"repo_URL",
")",
")",
",",
"default",
",",
"\" \"",
"*",
"(",
"8",
"-",
"len",
"(",
"default",
")",
")",
",",
"COLOR",
",",
"status",
",",
"self",
".",
"meta",
".",
"color",
"[",
"\"ENDC\"",
"]",
")",
")",
"print",
"(",
"\"\\nRepositories summary\"",
")",
"print",
"(",
"\"=\"",
"*",
"79",
")",
"print",
"(",
"\"{0}{1}/{2} enabled default repositories and {3} custom.\"",
".",
"format",
"(",
"self",
".",
"meta",
".",
"color",
"[",
"\"GREY\"",
"]",
",",
"def_cnt",
",",
"len",
"(",
"self",
".",
"all_repos",
")",
",",
"cus_cnt",
")",
")",
"print",
"(",
"\"Edit the file '/etc/slpkg/repositories.conf' for enable \"",
"\"and disable default\\nrepositories or run 'slpkg \"",
"\"repo-enable' command.\\n{0}\"",
".",
"format",
"(",
"self",
".",
"meta",
".",
"color",
"[",
"\"ENDC\"",
"]",
")",
")",
"raise",
"SystemExit",
"(",
")"
] |
View or enabled or disabled repositories
|
[
"View",
"or",
"enabled",
"or",
"disabled",
"repositories"
] |
python
|
train
|
JukeboxPipeline/jukeboxmaya
|
src/jukeboxmaya/launcher.py
|
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/launcher.py#L58-L68
|
def setup_launch_parser(self, parser):
"""Setup the given parser for the launch command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
"""
parser.set_defaults(func=self.launch)
parser.add_argument("addon", help="The jukebox addon to launch. The addon should be a standalone plugin.")
|
[
"def",
"setup_launch_parser",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"self",
".",
"launch",
")",
"parser",
".",
"add_argument",
"(",
"\"addon\"",
",",
"help",
"=",
"\"The jukebox addon to launch. The addon should be a standalone plugin.\"",
")"
] |
Setup the given parser for the launch command
:param parser: the argument parser to setup
:type parser: :class:`argparse.ArgumentParser`
:returns: None
:rtype: None
:raises: None
|
[
"Setup",
"the",
"given",
"parser",
"for",
"the",
"launch",
"command"
] |
python
|
train
|
Alignak-monitoring/alignak
|
alignak/contactdowntime.py
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/contactdowntime.py#L123-L131
|
def exit(self, contacts):
"""Wrapper to call raise_exit_downtime_log_entry for ref (host/service)
set can_be_deleted to True
:return: None
"""
contact = contacts[self.ref]
contact.raise_exit_downtime_log_entry()
self.can_be_deleted = True
|
[
"def",
"exit",
"(",
"self",
",",
"contacts",
")",
":",
"contact",
"=",
"contacts",
"[",
"self",
".",
"ref",
"]",
"contact",
".",
"raise_exit_downtime_log_entry",
"(",
")",
"self",
".",
"can_be_deleted",
"=",
"True"
] |
Wrapper to call raise_exit_downtime_log_entry for ref (host/service)
set can_be_deleted to True
:return: None
|
[
"Wrapper",
"to",
"call",
"raise_exit_downtime_log_entry",
"for",
"ref",
"(",
"host",
"/",
"service",
")",
"set",
"can_be_deleted",
"to",
"True"
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.