repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
openspending/babbage | babbage/cube.py | https://github.com/openspending/babbage/blob/9e03efe62e0be0cceabafd4de2a09cb8ec794b92/babbage/cube.py#L182-L188 | def compute_cardinalities(self):
""" This will count the number of distinct values for each dimension in
the dataset and add that count to the model so that it can be used as a
hint by UI components. """
for dimension in self.model.dimensions:
result = self.members(dimension.ref, page_size=0)
dimension.spec['cardinality'] = result.get('total_member_count') | [
"def",
"compute_cardinalities",
"(",
"self",
")",
":",
"for",
"dimension",
"in",
"self",
".",
"model",
".",
"dimensions",
":",
"result",
"=",
"self",
".",
"members",
"(",
"dimension",
".",
"ref",
",",
"page_size",
"=",
"0",
")",
"dimension",
".",
"spec",
"[",
"'cardinality'",
"]",
"=",
"result",
".",
"get",
"(",
"'total_member_count'",
")"
] | This will count the number of distinct values for each dimension in
the dataset and add that count to the model so that it can be used as a
hint by UI components. | [
"This",
"will",
"count",
"the",
"number",
"of",
"distinct",
"values",
"for",
"each",
"dimension",
"in",
"the",
"dataset",
"and",
"add",
"that",
"count",
"to",
"the",
"model",
"so",
"that",
"it",
"can",
"be",
"used",
"as",
"a",
"hint",
"by",
"UI",
"components",
"."
] | python | train |
linkedin/naarad | src/naarad/__init__.py | https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/__init__.py#L224-L266 | def analyze(self, input_directory, output_directory, **kwargs):
"""
Run all the analysis saved in self._analyses, sorted by test_id.
This is useful when Naarad() is used by other programs and multiple analyses are run
In naarad CLI mode, len(_analyses) == 1
:param: input_directory: location of log files
:param: output_directory: root directory for analysis output
:param: **kwargs: Optional keyword args
:return: int: status code.
"""
is_api_call = True
if len(self._analyses) == 0:
if 'config' not in kwargs.keys():
return CONSTANTS.ERROR
self.create_analysis(kwargs['config'])
if 'args' in kwargs:
self._process_args(self._analyses[0], kwargs['args'])
is_api_call = False
error_count = 0
self._input_directory = input_directory
self._output_directory = output_directory
for test_id in sorted(self._analyses.keys()):
# Setup
if not self._analyses[test_id].input_directory:
self._analyses[test_id].input_directory = input_directory
if not self._analyses[test_id].output_directory:
if len(self._analyses) > 1:
self._analyses[test_id].output_directory = os.path.join(output_directory, str(test_id))
else:
self._analyses[test_id].output_directory = output_directory
if('config' in kwargs.keys()) and (not self._analyses[test_id].config):
self._analyses[test_id].config = kwargs['config']
self._create_output_directories(self._analyses[test_id])
# Actually run analysis
self._analyses[test_id].status = self.run(self._analyses[test_id], is_api_call, **kwargs)
if self._analyses[test_id].status != CONSTANTS.OK:
error_count += 1
if len(self._analyses) == 1:
return self._analyses[0].status
elif error_count > 0:
return CONSTANTS.ERROR
else:
return CONSTANTS.OK | [
"def",
"analyze",
"(",
"self",
",",
"input_directory",
",",
"output_directory",
",",
"*",
"*",
"kwargs",
")",
":",
"is_api_call",
"=",
"True",
"if",
"len",
"(",
"self",
".",
"_analyses",
")",
"==",
"0",
":",
"if",
"'config'",
"not",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"return",
"CONSTANTS",
".",
"ERROR",
"self",
".",
"create_analysis",
"(",
"kwargs",
"[",
"'config'",
"]",
")",
"if",
"'args'",
"in",
"kwargs",
":",
"self",
".",
"_process_args",
"(",
"self",
".",
"_analyses",
"[",
"0",
"]",
",",
"kwargs",
"[",
"'args'",
"]",
")",
"is_api_call",
"=",
"False",
"error_count",
"=",
"0",
"self",
".",
"_input_directory",
"=",
"input_directory",
"self",
".",
"_output_directory",
"=",
"output_directory",
"for",
"test_id",
"in",
"sorted",
"(",
"self",
".",
"_analyses",
".",
"keys",
"(",
")",
")",
":",
"# Setup",
"if",
"not",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"input_directory",
":",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"input_directory",
"=",
"input_directory",
"if",
"not",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"output_directory",
":",
"if",
"len",
"(",
"self",
".",
"_analyses",
")",
">",
"1",
":",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"output_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"str",
"(",
"test_id",
")",
")",
"else",
":",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"output_directory",
"=",
"output_directory",
"if",
"(",
"'config'",
"in",
"kwargs",
".",
"keys",
"(",
")",
")",
"and",
"(",
"not",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"config",
")",
":",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"config",
"=",
"kwargs",
"[",
"'config'",
"]",
"self",
".",
"_create_output_directories",
"(",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
")",
"# Actually run analysis",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"status",
"=",
"self",
".",
"run",
"(",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
",",
"is_api_call",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"_analyses",
"[",
"test_id",
"]",
".",
"status",
"!=",
"CONSTANTS",
".",
"OK",
":",
"error_count",
"+=",
"1",
"if",
"len",
"(",
"self",
".",
"_analyses",
")",
"==",
"1",
":",
"return",
"self",
".",
"_analyses",
"[",
"0",
"]",
".",
"status",
"elif",
"error_count",
">",
"0",
":",
"return",
"CONSTANTS",
".",
"ERROR",
"else",
":",
"return",
"CONSTANTS",
".",
"OK"
] | Run all the analysis saved in self._analyses, sorted by test_id.
This is useful when Naarad() is used by other programs and multiple analyses are run
In naarad CLI mode, len(_analyses) == 1
:param: input_directory: location of log files
:param: output_directory: root directory for analysis output
:param: **kwargs: Optional keyword args
:return: int: status code. | [
"Run",
"all",
"the",
"analysis",
"saved",
"in",
"self",
".",
"_analyses",
"sorted",
"by",
"test_id",
".",
"This",
"is",
"useful",
"when",
"Naarad",
"()",
"is",
"used",
"by",
"other",
"programs",
"and",
"multiple",
"analyses",
"are",
"run",
"In",
"naarad",
"CLI",
"mode",
"len",
"(",
"_analyses",
")",
"==",
"1",
":",
"param",
":",
"input_directory",
":",
"location",
"of",
"log",
"files",
":",
"param",
":",
"output_directory",
":",
"root",
"directory",
"for",
"analysis",
"output",
":",
"param",
":",
"**",
"kwargs",
":",
"Optional",
"keyword",
"args",
":",
"return",
":",
"int",
":",
"status",
"code",
"."
] | python | valid |
spyder-ide/spyder | spyder/app/mainwindow.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2874-L2902 | def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.preferences.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_() | [
"def",
"edit_preferences",
"(",
"self",
")",
":",
"from",
"spyder",
".",
"preferences",
".",
"configdialog",
"import",
"ConfigDialog",
"dlg",
"=",
"ConfigDialog",
"(",
"self",
")",
"dlg",
".",
"size_change",
".",
"connect",
"(",
"self",
".",
"set_prefs_size",
")",
"if",
"self",
".",
"prefs_dialog_size",
"is",
"not",
"None",
":",
"dlg",
".",
"resize",
"(",
"self",
".",
"prefs_dialog_size",
")",
"for",
"PrefPageClass",
"in",
"self",
".",
"general_prefs",
":",
"widget",
"=",
"PrefPageClass",
"(",
"dlg",
",",
"main",
"=",
"self",
")",
"widget",
".",
"initialize",
"(",
")",
"dlg",
".",
"add_page",
"(",
"widget",
")",
"for",
"plugin",
"in",
"[",
"self",
".",
"workingdirectory",
",",
"self",
".",
"editor",
",",
"self",
".",
"projects",
",",
"self",
".",
"ipyconsole",
",",
"self",
".",
"historylog",
",",
"self",
".",
"help",
",",
"self",
".",
"variableexplorer",
",",
"self",
".",
"onlinehelp",
",",
"self",
".",
"explorer",
",",
"self",
".",
"findinfiles",
"]",
"+",
"self",
".",
"thirdparty_plugins",
":",
"if",
"plugin",
"is",
"not",
"None",
":",
"try",
":",
"widget",
"=",
"plugin",
".",
"create_configwidget",
"(",
"dlg",
")",
"if",
"widget",
"is",
"not",
"None",
":",
"dlg",
".",
"add_page",
"(",
"widget",
")",
"except",
"Exception",
":",
"traceback",
".",
"print_exc",
"(",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"self",
".",
"prefs_index",
"is",
"not",
"None",
":",
"dlg",
".",
"set_current_index",
"(",
"self",
".",
"prefs_index",
")",
"dlg",
".",
"show",
"(",
")",
"dlg",
".",
"check_all_settings",
"(",
")",
"dlg",
".",
"pages_widget",
".",
"currentChanged",
".",
"connect",
"(",
"self",
".",
"__preference_page_changed",
")",
"dlg",
".",
"exec_",
"(",
")"
] | Edit Spyder preferences | [
"Edit",
"Spyder",
"preferences"
] | python | train |
Unidata/siphon | siphon/ncss.py | https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L334-L345 | def combine_xml_points(l, units, handle_units):
"""Combine multiple Point tags into an array."""
ret = {}
for item in l:
for key, value in item.items():
ret.setdefault(key, []).append(value)
for key, value in ret.items():
if key != 'date':
ret[key] = handle_units(value, units.get(key, None))
return ret | [
"def",
"combine_xml_points",
"(",
"l",
",",
"units",
",",
"handle_units",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"item",
"in",
"l",
":",
"for",
"key",
",",
"value",
"in",
"item",
".",
"items",
"(",
")",
":",
"ret",
".",
"setdefault",
"(",
"key",
",",
"[",
"]",
")",
".",
"append",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"ret",
".",
"items",
"(",
")",
":",
"if",
"key",
"!=",
"'date'",
":",
"ret",
"[",
"key",
"]",
"=",
"handle_units",
"(",
"value",
",",
"units",
".",
"get",
"(",
"key",
",",
"None",
")",
")",
"return",
"ret"
] | Combine multiple Point tags into an array. | [
"Combine",
"multiple",
"Point",
"tags",
"into",
"an",
"array",
"."
] | python | train |
dev-pipeline/dev-pipeline-core | lib/devpipeline_core/env.py | https://github.com/dev-pipeline/dev-pipeline-core/blob/fa40c050a56202485070b0300bb8695e9388c34f/lib/devpipeline_core/env.py#L41-L55 | def create_environment(component_config):
"""
Create a modified environment.
Arguments
component_config - The configuration for a component.
"""
ret = os.environ.copy()
for env in component_config.get_list("dp.env_list"):
real_env = env.upper()
value = os.environ.get(real_env)
value = _prepend_env(component_config, env, value)
value = _append_env(component_config, env, value)
_apply_change(ret, real_env, value, component_config)
return ret | [
"def",
"create_environment",
"(",
"component_config",
")",
":",
"ret",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"for",
"env",
"in",
"component_config",
".",
"get_list",
"(",
"\"dp.env_list\"",
")",
":",
"real_env",
"=",
"env",
".",
"upper",
"(",
")",
"value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"real_env",
")",
"value",
"=",
"_prepend_env",
"(",
"component_config",
",",
"env",
",",
"value",
")",
"value",
"=",
"_append_env",
"(",
"component_config",
",",
"env",
",",
"value",
")",
"_apply_change",
"(",
"ret",
",",
"real_env",
",",
"value",
",",
"component_config",
")",
"return",
"ret"
] | Create a modified environment.
Arguments
component_config - The configuration for a component. | [
"Create",
"a",
"modified",
"environment",
"."
] | python | train |
appknox/google-chartwrapper | GChartWrapper/GChart.py | https://github.com/appknox/google-chartwrapper/blob/3769aecbef6c83b6cd93ee72ece478ffe433ac57/GChartWrapper/GChart.py#L396-L406 | def label(self, *args):
"""
Add a simple label to your chart
call each time for each dataset
APIPARAM: chl
"""
if self['cht'] == 'qr':
self['chl'] = ''.join(map(str,args))
else:
self['chl'] = '|'.join(map(str,args))
return self | [
"def",
"label",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
"[",
"'cht'",
"]",
"==",
"'qr'",
":",
"self",
"[",
"'chl'",
"]",
"=",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"args",
")",
")",
"else",
":",
"self",
"[",
"'chl'",
"]",
"=",
"'|'",
".",
"join",
"(",
"map",
"(",
"str",
",",
"args",
")",
")",
"return",
"self"
] | Add a simple label to your chart
call each time for each dataset
APIPARAM: chl | [
"Add",
"a",
"simple",
"label",
"to",
"your",
"chart",
"call",
"each",
"time",
"for",
"each",
"dataset",
"APIPARAM",
":",
"chl"
] | python | test |
rstoneback/pysat | pysat/instruments/nasa_cdaweb_methods.py | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/nasa_cdaweb_methods.py#L15-L85 | def list_files(tag=None, sat_id=None, data_path=None, format_str=None,
supported_tags=None, fake_daily_files_from_monthly=False,
two_digit_year_break=None):
"""Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>. (default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are tags supported by list_files routine. Values are the
default format_str values for key. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b':fname}
list_files = functools.partial(nasa_cdaweb_methods.list_files,
supported_tags=supported_tags)
ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'':ivm_fname}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags)
"""
if data_path is not None:
if format_str is None:
try:
format_str = supported_tags[sat_id][tag]
except KeyError:
raise ValueError('Unknown tag')
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if (not out.empty) and fake_daily_files_from_monthly:
out.ix[out.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
return out
else:
estr = 'A directory must be passed to the loading routine for <Instrument Code>'
raise ValueError (estr) | [
"def",
"list_files",
"(",
"tag",
"=",
"None",
",",
"sat_id",
"=",
"None",
",",
"data_path",
"=",
"None",
",",
"format_str",
"=",
"None",
",",
"supported_tags",
"=",
"None",
",",
"fake_daily_files_from_monthly",
"=",
"False",
",",
"two_digit_year_break",
"=",
"None",
")",
":",
"if",
"data_path",
"is",
"not",
"None",
":",
"if",
"format_str",
"is",
"None",
":",
"try",
":",
"format_str",
"=",
"supported_tags",
"[",
"sat_id",
"]",
"[",
"tag",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'Unknown tag'",
")",
"out",
"=",
"pysat",
".",
"Files",
".",
"from_os",
"(",
"data_path",
"=",
"data_path",
",",
"format_str",
"=",
"format_str",
")",
"if",
"(",
"not",
"out",
".",
"empty",
")",
"and",
"fake_daily_files_from_monthly",
":",
"out",
".",
"ix",
"[",
"out",
".",
"index",
"[",
"-",
"1",
"]",
"+",
"pds",
".",
"DateOffset",
"(",
"months",
"=",
"1",
")",
"-",
"pds",
".",
"DateOffset",
"(",
"days",
"=",
"1",
")",
"]",
"=",
"out",
".",
"iloc",
"[",
"-",
"1",
"]",
"out",
"=",
"out",
".",
"asfreq",
"(",
"'D'",
",",
"'pad'",
")",
"out",
"=",
"out",
"+",
"'_'",
"+",
"out",
".",
"index",
".",
"strftime",
"(",
"'%Y-%m-%d'",
")",
"return",
"out",
"return",
"out",
"else",
":",
"estr",
"=",
"'A directory must be passed to the loading routine for <Instrument Code>'",
"raise",
"ValueError",
"(",
"estr",
")"
] | Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>. (default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are tags supported by list_files routine. Values are the
default format_str values for key. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b':fname}
list_files = functools.partial(nasa_cdaweb_methods.list_files,
supported_tags=supported_tags)
ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'':ivm_fname}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags) | [
"Return",
"a",
"Pandas",
"Series",
"of",
"every",
"file",
"for",
"chosen",
"satellite",
"data",
".",
"This",
"routine",
"is",
"intended",
"to",
"be",
"used",
"by",
"pysat",
"instrument",
"modules",
"supporting",
"a",
"particular",
"NASA",
"CDAWeb",
"dataset",
"."
] | python | train |
saltstack/salt | salt/utils/aggregation.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/aggregation.py#L188-L199 | def mark(obj, map_class=Map, sequence_class=Sequence):
'''
Convert obj into an Aggregate instance
'''
if isinstance(obj, Aggregate):
return obj
if isinstance(obj, dict):
return map_class(obj)
if isinstance(obj, (list, tuple, set)):
return sequence_class(obj)
else:
return sequence_class([obj]) | [
"def",
"mark",
"(",
"obj",
",",
"map_class",
"=",
"Map",
",",
"sequence_class",
"=",
"Sequence",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Aggregate",
")",
":",
"return",
"obj",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"map_class",
"(",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"return",
"sequence_class",
"(",
"obj",
")",
"else",
":",
"return",
"sequence_class",
"(",
"[",
"obj",
"]",
")"
] | Convert obj into an Aggregate instance | [
"Convert",
"obj",
"into",
"an",
"Aggregate",
"instance"
] | python | train |
djgagne/hagelslag | hagelslag/evaluation/ProbabilityMetrics.py | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ProbabilityMetrics.py#L148-L153 | def auc(self):
"""
Calculate the Area Under the ROC Curve (AUC).
"""
roc_curve = self.roc_curve()
return np.abs(np.trapz(roc_curve['POD'], x=roc_curve['POFD'])) | [
"def",
"auc",
"(",
"self",
")",
":",
"roc_curve",
"=",
"self",
".",
"roc_curve",
"(",
")",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"trapz",
"(",
"roc_curve",
"[",
"'POD'",
"]",
",",
"x",
"=",
"roc_curve",
"[",
"'POFD'",
"]",
")",
")"
] | Calculate the Area Under the ROC Curve (AUC). | [
"Calculate",
"the",
"Area",
"Under",
"the",
"ROC",
"Curve",
"(",
"AUC",
")",
"."
] | python | train |
artefactual-labs/mets-reader-writer | metsrw/fsentry.py | https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/fsentry.py#L210-L220 | def group_id(self):
"""
Returns the @GROUPID.
If derived_from is set, returns that group_id.
"""
if self.derived_from is not None:
return self.derived_from.group_id()
if self.file_uuid is None:
return None
return utils.GROUP_ID_PREFIX + self.file_uuid | [
"def",
"group_id",
"(",
"self",
")",
":",
"if",
"self",
".",
"derived_from",
"is",
"not",
"None",
":",
"return",
"self",
".",
"derived_from",
".",
"group_id",
"(",
")",
"if",
"self",
".",
"file_uuid",
"is",
"None",
":",
"return",
"None",
"return",
"utils",
".",
"GROUP_ID_PREFIX",
"+",
"self",
".",
"file_uuid"
] | Returns the @GROUPID.
If derived_from is set, returns that group_id. | [
"Returns",
"the",
"@GROUPID",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/gluon/trainer.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/trainer.py#L272-L286 | def _row_sparse_pull(self, parameter, out, row_id, full_idx=False):
"""Internal method to invoke pull operations on KVStore. If `full_idx` is set to True,
`kv.pull` is preferred instead of `kv.row_sparse_pull`.
"""
# initialize kv and params if not already
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
idx = self._param2idx[parameter.name]
if full_idx and 'dist' not in self._kvstore.type:
assert row_id.size == out.shape[0]
self._kvstore.pull(idx, out=out, priority=-idx, ignore_sparse=False)
else:
self._kvstore.row_sparse_pull(idx, out=out, row_ids=row_id, priority=-idx) | [
"def",
"_row_sparse_pull",
"(",
"self",
",",
"parameter",
",",
"out",
",",
"row_id",
",",
"full_idx",
"=",
"False",
")",
":",
"# initialize kv and params if not already",
"if",
"not",
"self",
".",
"_kv_initialized",
":",
"self",
".",
"_init_kvstore",
"(",
")",
"if",
"self",
".",
"_params_to_init",
":",
"self",
".",
"_init_params",
"(",
")",
"idx",
"=",
"self",
".",
"_param2idx",
"[",
"parameter",
".",
"name",
"]",
"if",
"full_idx",
"and",
"'dist'",
"not",
"in",
"self",
".",
"_kvstore",
".",
"type",
":",
"assert",
"row_id",
".",
"size",
"==",
"out",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"_kvstore",
".",
"pull",
"(",
"idx",
",",
"out",
"=",
"out",
",",
"priority",
"=",
"-",
"idx",
",",
"ignore_sparse",
"=",
"False",
")",
"else",
":",
"self",
".",
"_kvstore",
".",
"row_sparse_pull",
"(",
"idx",
",",
"out",
"=",
"out",
",",
"row_ids",
"=",
"row_id",
",",
"priority",
"=",
"-",
"idx",
")"
] | Internal method to invoke pull operations on KVStore. If `full_idx` is set to True,
`kv.pull` is preferred instead of `kv.row_sparse_pull`. | [
"Internal",
"method",
"to",
"invoke",
"pull",
"operations",
"on",
"KVStore",
".",
"If",
"full_idx",
"is",
"set",
"to",
"True",
"kv",
".",
"pull",
"is",
"preferred",
"instead",
"of",
"kv",
".",
"row_sparse_pull",
"."
] | python | train |
pdkit/pdkit | pdkit/utils.py | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L26-L66 | def load_cloudupdrs_data(filename, convert_times=1000000000.0):
"""
This method loads data in the cloudupdrs format
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, x_0, y_0, z_0
timestamp_1, x_1, y_1, z_1
timestamp_2, x_2, y_2, z_2
.
.
.
timestamp_n, x_n, y_n, z_n
where x, y, z are the components of the acceleration
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from nanoseconds to seconds.
:type convert_times: float
"""
# data_m = pd.read_table(filename, sep=',', header=None)
try:
data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False)
date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0]))
time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times
magnitude_sum_acceleration = \
np.sqrt(data_m[:, 1] ** 2 + data_m[:, 2] ** 2 + data_m[:, 3] ** 2)
data = {'td': time_difference, 'x': data_m[:, 1], 'y': data_m[:, 2], 'z': data_m[:, 3],
'mag_sum_acc': magnitude_sum_acceleration}
data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'x', 'y', 'z', 'mag_sum_acc'])
return data_frame
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("load data, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("load data ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on load data method: %s", sys.exc_info()[0]) | [
"def",
"load_cloudupdrs_data",
"(",
"filename",
",",
"convert_times",
"=",
"1000000000.0",
")",
":",
"# data_m = pd.read_table(filename, sep=',', header=None)",
"try",
":",
"data_m",
"=",
"np",
".",
"genfromtxt",
"(",
"filename",
",",
"delimiter",
"=",
"','",
",",
"invalid_raise",
"=",
"False",
")",
"date_times",
"=",
"pd",
".",
"to_datetime",
"(",
"(",
"data_m",
"[",
":",
",",
"0",
"]",
"-",
"data_m",
"[",
"0",
",",
"0",
"]",
")",
")",
"time_difference",
"=",
"(",
"data_m",
"[",
":",
",",
"0",
"]",
"-",
"data_m",
"[",
"0",
",",
"0",
"]",
")",
"/",
"convert_times",
"magnitude_sum_acceleration",
"=",
"np",
".",
"sqrt",
"(",
"data_m",
"[",
":",
",",
"1",
"]",
"**",
"2",
"+",
"data_m",
"[",
":",
",",
"2",
"]",
"**",
"2",
"+",
"data_m",
"[",
":",
",",
"3",
"]",
"**",
"2",
")",
"data",
"=",
"{",
"'td'",
":",
"time_difference",
",",
"'x'",
":",
"data_m",
"[",
":",
",",
"1",
"]",
",",
"'y'",
":",
"data_m",
"[",
":",
",",
"2",
"]",
",",
"'z'",
":",
"data_m",
"[",
":",
",",
"3",
"]",
",",
"'mag_sum_acc'",
":",
"magnitude_sum_acceleration",
"}",
"data_frame",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"index",
"=",
"date_times",
",",
"columns",
"=",
"[",
"'td'",
",",
"'x'",
",",
"'y'",
",",
"'z'",
",",
"'mag_sum_acc'",
"]",
")",
"return",
"data_frame",
"except",
"IOError",
"as",
"e",
":",
"ierr",
"=",
"\"({}): {}\"",
".",
"format",
"(",
"e",
".",
"errno",
",",
"e",
".",
"strerror",
")",
"logging",
".",
"error",
"(",
"\"load data, file not found, I/O error %s\"",
",",
"ierr",
")",
"except",
"ValueError",
"as",
"verr",
":",
"logging",
".",
"error",
"(",
"\"load data ValueError ->%s\"",
",",
"verr",
".",
"message",
")",
"except",
":",
"logging",
".",
"error",
"(",
"\"Unexpected error on load data method: %s\"",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
")"
] | This method loads data in the cloudupdrs format
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, x_0, y_0, z_0
timestamp_1, x_1, y_1, z_1
timestamp_2, x_2, y_2, z_2
.
.
.
timestamp_n, x_n, y_n, z_n
where x, y, z are the components of the acceleration
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from nanoseconds to seconds.
:type convert_times: float | [
"This",
"method",
"loads",
"data",
"in",
"the",
"cloudupdrs",
"format",
"Usually",
"the",
"data",
"will",
"be",
"saved",
"in",
"a",
"csv",
"file",
"and",
"it",
"should",
"look",
"like",
"this",
":",
"..",
"code",
"-",
"block",
"::",
"json",
"timestamp_0",
"x_0",
"y_0",
"z_0",
"timestamp_1",
"x_1",
"y_1",
"z_1",
"timestamp_2",
"x_2",
"y_2",
"z_2",
".",
".",
".",
"timestamp_n",
"x_n",
"y_n",
"z_n",
"where",
"x",
"y",
"z",
"are",
"the",
"components",
"of",
"the",
"acceleration"
] | python | train |
user-cont/colin | colin/core/result.py | https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/result.py#L133-L171 | def generate_pretty_output(self, stat, verbose, output_function, logs=True):
"""
Send the formated to the provided function
:param stat: if True print stat instead of full output
:param verbose: bool
:param output_function: function to send output to
"""
has_check = False
for r in self.results:
has_check = True
if stat:
output_function(OUTPUT_CHARS[r.status],
fg=COLOURS[r.status],
nl=False)
else:
output_function(str(r), fg=COLOURS[r.status])
if verbose:
output_function(" -> {}\n"
" -> {}".format(r.description,
r.reference_url),
fg=COLOURS[r.status])
if logs and r.logs:
output_function(" -> logs:",
fg=COLOURS[r.status])
for l in r.logs:
output_function(" -> {}".format(l),
fg=COLOURS[r.status])
if not has_check:
output_function("No check found.")
elif stat and not verbose:
output_function("")
else:
output_function("")
for status, count in six.iteritems(self.statistics):
output_function("{}:{} ".format(status, count), nl=False)
output_function("") | [
"def",
"generate_pretty_output",
"(",
"self",
",",
"stat",
",",
"verbose",
",",
"output_function",
",",
"logs",
"=",
"True",
")",
":",
"has_check",
"=",
"False",
"for",
"r",
"in",
"self",
".",
"results",
":",
"has_check",
"=",
"True",
"if",
"stat",
":",
"output_function",
"(",
"OUTPUT_CHARS",
"[",
"r",
".",
"status",
"]",
",",
"fg",
"=",
"COLOURS",
"[",
"r",
".",
"status",
"]",
",",
"nl",
"=",
"False",
")",
"else",
":",
"output_function",
"(",
"str",
"(",
"r",
")",
",",
"fg",
"=",
"COLOURS",
"[",
"r",
".",
"status",
"]",
")",
"if",
"verbose",
":",
"output_function",
"(",
"\" -> {}\\n\"",
"\" -> {}\"",
".",
"format",
"(",
"r",
".",
"description",
",",
"r",
".",
"reference_url",
")",
",",
"fg",
"=",
"COLOURS",
"[",
"r",
".",
"status",
"]",
")",
"if",
"logs",
"and",
"r",
".",
"logs",
":",
"output_function",
"(",
"\" -> logs:\"",
",",
"fg",
"=",
"COLOURS",
"[",
"r",
".",
"status",
"]",
")",
"for",
"l",
"in",
"r",
".",
"logs",
":",
"output_function",
"(",
"\" -> {}\"",
".",
"format",
"(",
"l",
")",
",",
"fg",
"=",
"COLOURS",
"[",
"r",
".",
"status",
"]",
")",
"if",
"not",
"has_check",
":",
"output_function",
"(",
"\"No check found.\"",
")",
"elif",
"stat",
"and",
"not",
"verbose",
":",
"output_function",
"(",
"\"\"",
")",
"else",
":",
"output_function",
"(",
"\"\"",
")",
"for",
"status",
",",
"count",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"statistics",
")",
":",
"output_function",
"(",
"\"{}:{} \"",
".",
"format",
"(",
"status",
",",
"count",
")",
",",
"nl",
"=",
"False",
")",
"output_function",
"(",
"\"\"",
")"
] | Send the formated to the provided function
:param stat: if True print stat instead of full output
:param verbose: bool
:param output_function: function to send output to | [
"Send",
"the",
"formated",
"to",
"the",
"provided",
"function"
] | python | train |
Ouranosinc/xclim | xclim/indices.py | https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/indices.py#L2113-L2159 | def warm_spell_duration_index(tasmax, tx90, window=6, freq='YS'):
r"""Warm spell duration index
Number of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the
1961-1990 period.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
tx90 : float
90th percentile of daily maximum temperature [℃] or [K]
window : int
Minimum number of days with temperature below threshold to qualify as a warm spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile [days].
References
----------
From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI).
Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and
precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290.
"""
if 'dayofyear' not in tx90.coords.keys():
raise AttributeError("tx90 should have dayofyear coordinates.")
# The day of year value of the tasmax series.
doy = tasmax.indexes['time'].dayofyear
# adjustment of tx90 to tasmax doy range
tx90 = utils.adjust_doy_calendar(tx90, tasmax)
# Create an array with the shape and coords of tasmax, but with values set to tx90 according to the doy index.
thresh = xr.full_like(tasmax, np.nan)
thresh.data = tx90.sel(dayofyear=doy)
above = (tasmax > thresh)
return above.resample(time=freq).apply(rl.windowed_run_count, window=window, dim='time') | [
"def",
"warm_spell_duration_index",
"(",
"tasmax",
",",
"tx90",
",",
"window",
"=",
"6",
",",
"freq",
"=",
"'YS'",
")",
":",
"if",
"'dayofyear'",
"not",
"in",
"tx90",
".",
"coords",
".",
"keys",
"(",
")",
":",
"raise",
"AttributeError",
"(",
"\"tx90 should have dayofyear coordinates.\"",
")",
"# The day of year value of the tasmax series.",
"doy",
"=",
"tasmax",
".",
"indexes",
"[",
"'time'",
"]",
".",
"dayofyear",
"# adjustment of tx90 to tasmax doy range",
"tx90",
"=",
"utils",
".",
"adjust_doy_calendar",
"(",
"tx90",
",",
"tasmax",
")",
"# Create an array with the shape and coords of tasmax, but with values set to tx90 according to the doy index.",
"thresh",
"=",
"xr",
".",
"full_like",
"(",
"tasmax",
",",
"np",
".",
"nan",
")",
"thresh",
".",
"data",
"=",
"tx90",
".",
"sel",
"(",
"dayofyear",
"=",
"doy",
")",
"above",
"=",
"(",
"tasmax",
">",
"thresh",
")",
"return",
"above",
".",
"resample",
"(",
"time",
"=",
"freq",
")",
".",
"apply",
"(",
"rl",
".",
"windowed_run_count",
",",
"window",
"=",
"window",
",",
"dim",
"=",
"'time'",
")"
] | r"""Warm spell duration index
Number of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the
1961-1990 period.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
tx90 : float
90th percentile of daily maximum temperature [℃] or [K]
window : int
Minimum number of days with temperature below threshold to qualify as a warm spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile [days].
References
----------
From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI).
Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and
precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290. | [
"r",
"Warm",
"spell",
"duration",
"index"
] | python | train |
raamana/hiwenet | hiwenet/pairwise_dist.py | https://github.com/raamana/hiwenet/blob/b12699b3722fd0a6a835e7d7ca4baf58fb181809/hiwenet/pairwise_dist.py#L498-L554 | def check_weight_method(weight_method_spec,
use_orig_distr=False,
allow_non_symmetric=False):
"Check if weight_method is recognized and implemented, or ensure it is callable."
if not isinstance(use_orig_distr, bool):
raise TypeError('use_original_distribution flag must be boolean!')
if not isinstance(allow_non_symmetric, bool):
raise TypeError('allow_non_symmetric flag must be boolean')
if isinstance(weight_method_spec, str):
weight_method_spec = weight_method_spec.lower()
if weight_method_spec in list_medpy_histogram_metrics:
from medpy.metric import histogram as medpy_hist_metrics
weight_func = getattr(medpy_hist_metrics, weight_method_spec)
if use_orig_distr:
warnings.warn('use_original_distribution must be False when using builtin histogram metrics, '
'which expect histograms as input - setting it to False.', HiwenetWarning)
use_orig_distr = False
elif weight_method_spec in metrics_on_original_features:
weight_func = getattr(more_metrics, weight_method_spec)
if not use_orig_distr:
warnings.warn('use_original_distribution must be True when using builtin non-histogram metrics, '
'which expect original feature values in ROI/node as input '
'- setting it to True.', HiwenetWarning)
use_orig_distr = True
if weight_method_spec in symmetric_metrics_on_original_features:
print('Chosen metric is symmetric. Ignoring asymmetric=False flag.')
allow_non_symmetric=False
else:
raise NotImplementedError('Chosen histogram distance/metric not implemented or invalid.')
elif callable(weight_method_spec):
# ensure 1) takes two ndarrays
try:
dummy_weight = weight_method_spec(make_random_histogram(), make_random_histogram())
except:
raise TypeError('Error applying given callable on two input arrays.\n'
'{} must accept two arrays and return a single scalar value!')
else:
# and 2) returns only one number
if not np.isscalar(dummy_weight):
raise TypeError('Given callable does not return a single scalar as output.')
weight_func = weight_method_spec
else:
raise ValueError('Supplied method to compute edge weight is not recognized:\n'
'must be a string identifying one of the implemented methods\n{}'
'\n or a valid callable that accepts that two arrays '
'and returns 1 scalar.'.format(list_medpy_histogram_metrics))
return weight_func, use_orig_distr, allow_non_symmetric | [
"def",
"check_weight_method",
"(",
"weight_method_spec",
",",
"use_orig_distr",
"=",
"False",
",",
"allow_non_symmetric",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"use_orig_distr",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"'use_original_distribution flag must be boolean!'",
")",
"if",
"not",
"isinstance",
"(",
"allow_non_symmetric",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"'allow_non_symmetric flag must be boolean'",
")",
"if",
"isinstance",
"(",
"weight_method_spec",
",",
"str",
")",
":",
"weight_method_spec",
"=",
"weight_method_spec",
".",
"lower",
"(",
")",
"if",
"weight_method_spec",
"in",
"list_medpy_histogram_metrics",
":",
"from",
"medpy",
".",
"metric",
"import",
"histogram",
"as",
"medpy_hist_metrics",
"weight_func",
"=",
"getattr",
"(",
"medpy_hist_metrics",
",",
"weight_method_spec",
")",
"if",
"use_orig_distr",
":",
"warnings",
".",
"warn",
"(",
"'use_original_distribution must be False when using builtin histogram metrics, '",
"'which expect histograms as input - setting it to False.'",
",",
"HiwenetWarning",
")",
"use_orig_distr",
"=",
"False",
"elif",
"weight_method_spec",
"in",
"metrics_on_original_features",
":",
"weight_func",
"=",
"getattr",
"(",
"more_metrics",
",",
"weight_method_spec",
")",
"if",
"not",
"use_orig_distr",
":",
"warnings",
".",
"warn",
"(",
"'use_original_distribution must be True when using builtin non-histogram metrics, '",
"'which expect original feature values in ROI/node as input '",
"'- setting it to True.'",
",",
"HiwenetWarning",
")",
"use_orig_distr",
"=",
"True",
"if",
"weight_method_spec",
"in",
"symmetric_metrics_on_original_features",
":",
"print",
"(",
"'Chosen metric is symmetric. Ignoring asymmetric=False flag.'",
")",
"allow_non_symmetric",
"=",
"False",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Chosen histogram distance/metric not implemented or invalid.'",
")",
"elif",
"callable",
"(",
"weight_method_spec",
")",
":",
"# ensure 1) takes two ndarrays",
"try",
":",
"dummy_weight",
"=",
"weight_method_spec",
"(",
"make_random_histogram",
"(",
")",
",",
"make_random_histogram",
"(",
")",
")",
"except",
":",
"raise",
"TypeError",
"(",
"'Error applying given callable on two input arrays.\\n'",
"'{} must accept two arrays and return a single scalar value!'",
")",
"else",
":",
"# and 2) returns only one number",
"if",
"not",
"np",
".",
"isscalar",
"(",
"dummy_weight",
")",
":",
"raise",
"TypeError",
"(",
"'Given callable does not return a single scalar as output.'",
")",
"weight_func",
"=",
"weight_method_spec",
"else",
":",
"raise",
"ValueError",
"(",
"'Supplied method to compute edge weight is not recognized:\\n'",
"'must be a string identifying one of the implemented methods\\n{}'",
"'\\n or a valid callable that accepts that two arrays '",
"'and returns 1 scalar.'",
".",
"format",
"(",
"list_medpy_histogram_metrics",
")",
")",
"return",
"weight_func",
",",
"use_orig_distr",
",",
"allow_non_symmetric"
] | Check if weight_method is recognized and implemented, or ensure it is callable. | [
"Check",
"if",
"weight_method",
"is",
"recognized",
"and",
"implemented",
"or",
"ensure",
"it",
"is",
"callable",
"."
] | python | train |
adafruit/Adafruit_Python_BluefruitLE | Adafruit_BluefruitLE/corebluetooth/provider.py | https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/corebluetooth/provider.py#L179-L188 | def peripheral_didUpdateValueForCharacteristic_error_(self, peripheral, characteristic, error):
"""Called when characteristic value was read or updated."""
logger.debug('peripheral_didUpdateValueForCharacteristic_error called')
# Stop if there was some kind of error.
if error is not None:
return
# Notify the device about the updated characteristic value.
device = device_list().get(peripheral)
if device is not None:
device._characteristic_changed(characteristic) | [
"def",
"peripheral_didUpdateValueForCharacteristic_error_",
"(",
"self",
",",
"peripheral",
",",
"characteristic",
",",
"error",
")",
":",
"logger",
".",
"debug",
"(",
"'peripheral_didUpdateValueForCharacteristic_error called'",
")",
"# Stop if there was some kind of error.",
"if",
"error",
"is",
"not",
"None",
":",
"return",
"# Notify the device about the updated characteristic value.",
"device",
"=",
"device_list",
"(",
")",
".",
"get",
"(",
"peripheral",
")",
"if",
"device",
"is",
"not",
"None",
":",
"device",
".",
"_characteristic_changed",
"(",
"characteristic",
")"
] | Called when characteristic value was read or updated. | [
"Called",
"when",
"characteristic",
"value",
"was",
"read",
"or",
"updated",
"."
] | python | valid |
BerkeleyAutomation/perception | perception/image.py | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L2902-L2924 | def transform(self, translation, theta, method='opencv'):
"""Create a new image by translating and rotating the current image.
Parameters
----------
translation : :obj:`numpy.ndarray` of float
The XY translation vector.
theta : float
Rotation angle in radians, with positive meaning counter-clockwise.
method : :obj:`str`
Method to use for image transformations (opencv or scipy)
Returns
-------
:obj:`Image`
An image of the same type that has been rotated and translated.
"""
# transform channels separately
color_im_tf = self.color.transform(translation, theta, method=method)
depth_im_tf = self.depth.transform(translation, theta, method=method)
# return combination of cropped data
return RgbdImage.from_color_and_depth(color_im_tf, depth_im_tf) | [
"def",
"transform",
"(",
"self",
",",
"translation",
",",
"theta",
",",
"method",
"=",
"'opencv'",
")",
":",
"# transform channels separately",
"color_im_tf",
"=",
"self",
".",
"color",
".",
"transform",
"(",
"translation",
",",
"theta",
",",
"method",
"=",
"method",
")",
"depth_im_tf",
"=",
"self",
".",
"depth",
".",
"transform",
"(",
"translation",
",",
"theta",
",",
"method",
"=",
"method",
")",
"# return combination of cropped data",
"return",
"RgbdImage",
".",
"from_color_and_depth",
"(",
"color_im_tf",
",",
"depth_im_tf",
")"
] | Create a new image by translating and rotating the current image.
Parameters
----------
translation : :obj:`numpy.ndarray` of float
The XY translation vector.
theta : float
Rotation angle in radians, with positive meaning counter-clockwise.
method : :obj:`str`
Method to use for image transformations (opencv or scipy)
Returns
-------
:obj:`Image`
An image of the same type that has been rotated and translated. | [
"Create",
"a",
"new",
"image",
"by",
"translating",
"and",
"rotating",
"the",
"current",
"image",
"."
] | python | train |
fermiPy/fermipy | fermipy/roi_model.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/roi_model.py#L398-L401 | def is_free(self):
""" returns True if any of the spectral model parameters is set to free, else False
"""
return bool(np.array([int(value.get("free", False)) for key, value in self.spectral_pars.items()]).sum()) | [
"def",
"is_free",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"np",
".",
"array",
"(",
"[",
"int",
"(",
"value",
".",
"get",
"(",
"\"free\"",
",",
"False",
")",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"spectral_pars",
".",
"items",
"(",
")",
"]",
")",
".",
"sum",
"(",
")",
")"
] | returns True if any of the spectral model parameters is set to free, else False | [
"returns",
"True",
"if",
"any",
"of",
"the",
"spectral",
"model",
"parameters",
"is",
"set",
"to",
"free",
"else",
"False"
] | python | train |
Asana/python-asana | asana/resources/gen/projects.py | https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/projects.py#L136-L147 | def find_by_team(self, team, params={}, **options):
"""Returns the compact project records for all projects in the team.
Parameters
----------
team : {Id} The team to find projects in.
[params] : {Object} Parameters for the request
- [archived] : {Boolean} Only return projects whose `archived` field takes on the value of
this parameter.
"""
path = "/teams/%s/projects" % (team)
return self.client.get_collection(path, params, **options) | [
"def",
"find_by_team",
"(",
"self",
",",
"team",
",",
"params",
"=",
"{",
"}",
",",
"*",
"*",
"options",
")",
":",
"path",
"=",
"\"/teams/%s/projects\"",
"%",
"(",
"team",
")",
"return",
"self",
".",
"client",
".",
"get_collection",
"(",
"path",
",",
"params",
",",
"*",
"*",
"options",
")"
] | Returns the compact project records for all projects in the team.
Parameters
----------
team : {Id} The team to find projects in.
[params] : {Object} Parameters for the request
- [archived] : {Boolean} Only return projects whose `archived` field takes on the value of
this parameter. | [
"Returns",
"the",
"compact",
"project",
"records",
"for",
"all",
"projects",
"in",
"the",
"team",
"."
] | python | train |
kennydo/nyaalib | nyaalib/__init__.py | https://github.com/kennydo/nyaalib/blob/ab787b7ba141ed53d2ad978bf13eb7b8bcdd4b0d/nyaalib/__init__.py#L38-L65 | def _get_page_content(self, response):
"""Given a :class:`requests.Response`, return the
:class:`xml.etree.Element` of the content `div`.
:param response: a :class:`requests.Response` to parse
:returns: the :class:`Element` of the first content `div` or `None`
"""
document = html5lib.parse(
response.content,
encoding=response.encoding,
treebuilder='etree',
namespaceHTMLElements=False
)
# etree doesn't fully support XPath, so we can't just search
# the attribute values for "content"
divs = document.findall(
".//body//div[@class]")
content_div = None
for div in divs:
if "content" in div.attrib['class'].split(' '):
content_div = div
break
# The `Element` object is False-y when there are no subelements,
# so compare to `None`
if content_div is None:
return None
return content_div | [
"def",
"_get_page_content",
"(",
"self",
",",
"response",
")",
":",
"document",
"=",
"html5lib",
".",
"parse",
"(",
"response",
".",
"content",
",",
"encoding",
"=",
"response",
".",
"encoding",
",",
"treebuilder",
"=",
"'etree'",
",",
"namespaceHTMLElements",
"=",
"False",
")",
"# etree doesn't fully support XPath, so we can't just search",
"# the attribute values for \"content\"",
"divs",
"=",
"document",
".",
"findall",
"(",
"\".//body//div[@class]\"",
")",
"content_div",
"=",
"None",
"for",
"div",
"in",
"divs",
":",
"if",
"\"content\"",
"in",
"div",
".",
"attrib",
"[",
"'class'",
"]",
".",
"split",
"(",
"' '",
")",
":",
"content_div",
"=",
"div",
"break",
"# The `Element` object is False-y when there are no subelements,",
"# so compare to `None`",
"if",
"content_div",
"is",
"None",
":",
"return",
"None",
"return",
"content_div"
] | Given a :class:`requests.Response`, return the
:class:`xml.etree.Element` of the content `div`.
:param response: a :class:`requests.Response` to parse
:returns: the :class:`Element` of the first content `div` or `None` | [
"Given",
"a",
":",
"class",
":",
"requests",
".",
"Response",
"return",
"the",
":",
"class",
":",
"xml",
".",
"etree",
".",
"Element",
"of",
"the",
"content",
"div",
"."
] | python | train |
metagriffin/asset | asset/resource.py | https://github.com/metagriffin/asset/blob/f2c5e599cd4688f82216d4b5cfa87aab96d8bb8c/asset/resource.py#L244-L276 | def load(pattern, *args, **kw):
'''
Given a package asset-spec glob-pattern `pattern`, returns an
:class:`AssetGroup` object, which in turn can act as a generator of
:class:`Asset` objects that match the pattern.
Example:
.. code-block:: python
import asset
# concatenate all 'css' files into one string:
css = asset.load('mypackage:static/style/**.css').read()
'''
spec = pattern
if ':' not in pattern:
raise ValueError('`pattern` must be in the format "PACKAGE:GLOB"')
pkgname, pkgpat = pattern.split(':', 1)
pkgdir, pattern = globre.compile(pkgpat, split_prefix=True, flags=globre.EXACT)
if pkgdir:
idx = pkgdir.rfind('/')
pkgdir = pkgdir[:idx] if idx >= 0 else ''
group = AssetGroup(pkgname, pkgdir, pattern, spec)
if globre.iswild(pkgpat):
return group
return Asset(group, pkgname, pkgpat) | [
"def",
"load",
"(",
"pattern",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"spec",
"=",
"pattern",
"if",
"':'",
"not",
"in",
"pattern",
":",
"raise",
"ValueError",
"(",
"'`pattern` must be in the format \"PACKAGE:GLOB\"'",
")",
"pkgname",
",",
"pkgpat",
"=",
"pattern",
".",
"split",
"(",
"':'",
",",
"1",
")",
"pkgdir",
",",
"pattern",
"=",
"globre",
".",
"compile",
"(",
"pkgpat",
",",
"split_prefix",
"=",
"True",
",",
"flags",
"=",
"globre",
".",
"EXACT",
")",
"if",
"pkgdir",
":",
"idx",
"=",
"pkgdir",
".",
"rfind",
"(",
"'/'",
")",
"pkgdir",
"=",
"pkgdir",
"[",
":",
"idx",
"]",
"if",
"idx",
">=",
"0",
"else",
"''",
"group",
"=",
"AssetGroup",
"(",
"pkgname",
",",
"pkgdir",
",",
"pattern",
",",
"spec",
")",
"if",
"globre",
".",
"iswild",
"(",
"pkgpat",
")",
":",
"return",
"group",
"return",
"Asset",
"(",
"group",
",",
"pkgname",
",",
"pkgpat",
")"
] | Given a package asset-spec glob-pattern `pattern`, returns an
:class:`AssetGroup` object, which in turn can act as a generator of
:class:`Asset` objects that match the pattern.
Example:
.. code-block:: python
import asset
# concatenate all 'css' files into one string:
css = asset.load('mypackage:static/style/**.css').read() | [
"Given",
"a",
"package",
"asset",
"-",
"spec",
"glob",
"-",
"pattern",
"pattern",
"returns",
"an",
":",
"class",
":",
"AssetGroup",
"object",
"which",
"in",
"turn",
"can",
"act",
"as",
"a",
"generator",
"of",
":",
"class",
":",
"Asset",
"objects",
"that",
"match",
"the",
"pattern",
"."
] | python | train |
open-mmlab/mmcv | mmcv/image/transforms/colorspace.py | https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/colorspace.py#L33-L44 | def gray2bgr(img):
"""Convert a grayscale image to BGR image.
Args:
img (ndarray or str): The input image.
Returns:
ndarray: The converted BGR image.
"""
img = img[..., None] if img.ndim == 2 else img
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return out_img | [
"def",
"gray2bgr",
"(",
"img",
")",
":",
"img",
"=",
"img",
"[",
"...",
",",
"None",
"]",
"if",
"img",
".",
"ndim",
"==",
"2",
"else",
"img",
"out_img",
"=",
"cv2",
".",
"cvtColor",
"(",
"img",
",",
"cv2",
".",
"COLOR_GRAY2BGR",
")",
"return",
"out_img"
] | Convert a grayscale image to BGR image.
Args:
img (ndarray or str): The input image.
Returns:
ndarray: The converted BGR image. | [
"Convert",
"a",
"grayscale",
"image",
"to",
"BGR",
"image",
"."
] | python | test |
misli/django-cms-articles | cms_articles/models/managers.py | https://github.com/misli/django-cms-articles/blob/d96ac77e049022deb4c70d268e4eab74d175145c/cms_articles/models/managers.py#L19-L60 | def search(self, q, language=None, current_site_only=True):
"""Simple search function
Plugins can define a 'search_fields' tuple similar to ModelAdmin classes
"""
from cms.plugin_pool import plugin_pool
qs = self.get_queryset()
qs = qs.public()
if current_site_only:
site = Site.objects.get_current()
qs = qs.filter(tree__site=site)
qt = Q(title_set__title__icontains=q)
# find 'searchable' plugins and build query
qp = Q()
plugins = plugin_pool.get_all_plugins()
for plugin in plugins:
cmsplugin = plugin.model
if not (
hasattr(cmsplugin, 'search_fields') and
hasattr(cmsplugin, 'cmsplugin_ptr')
):
continue
field = cmsplugin.cmsplugin_ptr.field
related_query_name = field.related_query_name()
if related_query_name and not related_query_name.startswith('+'):
for field in cmsplugin.search_fields:
qp |= Q(**{
'placeholders__cmsplugin__{0}__{1}__icontains'.format(
related_query_name,
field,
): q})
if language:
qt &= Q(title_set__language=language)
qp &= Q(cmsplugin__language=language)
qs = qs.filter(qt | qp)
return qs.distinct() | [
"def",
"search",
"(",
"self",
",",
"q",
",",
"language",
"=",
"None",
",",
"current_site_only",
"=",
"True",
")",
":",
"from",
"cms",
".",
"plugin_pool",
"import",
"plugin_pool",
"qs",
"=",
"self",
".",
"get_queryset",
"(",
")",
"qs",
"=",
"qs",
".",
"public",
"(",
")",
"if",
"current_site_only",
":",
"site",
"=",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
"qs",
"=",
"qs",
".",
"filter",
"(",
"tree__site",
"=",
"site",
")",
"qt",
"=",
"Q",
"(",
"title_set__title__icontains",
"=",
"q",
")",
"# find 'searchable' plugins and build query",
"qp",
"=",
"Q",
"(",
")",
"plugins",
"=",
"plugin_pool",
".",
"get_all_plugins",
"(",
")",
"for",
"plugin",
"in",
"plugins",
":",
"cmsplugin",
"=",
"plugin",
".",
"model",
"if",
"not",
"(",
"hasattr",
"(",
"cmsplugin",
",",
"'search_fields'",
")",
"and",
"hasattr",
"(",
"cmsplugin",
",",
"'cmsplugin_ptr'",
")",
")",
":",
"continue",
"field",
"=",
"cmsplugin",
".",
"cmsplugin_ptr",
".",
"field",
"related_query_name",
"=",
"field",
".",
"related_query_name",
"(",
")",
"if",
"related_query_name",
"and",
"not",
"related_query_name",
".",
"startswith",
"(",
"'+'",
")",
":",
"for",
"field",
"in",
"cmsplugin",
".",
"search_fields",
":",
"qp",
"|=",
"Q",
"(",
"*",
"*",
"{",
"'placeholders__cmsplugin__{0}__{1}__icontains'",
".",
"format",
"(",
"related_query_name",
",",
"field",
",",
")",
":",
"q",
"}",
")",
"if",
"language",
":",
"qt",
"&=",
"Q",
"(",
"title_set__language",
"=",
"language",
")",
"qp",
"&=",
"Q",
"(",
"cmsplugin__language",
"=",
"language",
")",
"qs",
"=",
"qs",
".",
"filter",
"(",
"qt",
"|",
"qp",
")",
"return",
"qs",
".",
"distinct",
"(",
")"
] | Simple search function
Plugins can define a 'search_fields' tuple similar to ModelAdmin classes | [
"Simple",
"search",
"function"
] | python | train |
tensorflow/hub | examples/text_embeddings/export.py | https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/examples/text_embeddings/export.py#L62-L90 | def load(file_path, parse_line_fn):
"""Loads a text embedding into memory as a numpy matrix.
Args:
file_path: Path to the text embedding file.
parse_line_fn: callback function to parse each file line.
Returns:
A tuple of (list of vocabulary tokens, numpy matrix of embedding vectors).
Raises:
ValueError: if the data in the sstable is inconsistent.
"""
vocabulary = []
embeddings = []
embeddings_dim = None
for line in tf.gfile.GFile(file_path):
token, embedding = parse_line_fn(line)
if not embeddings_dim:
embeddings_dim = len(embedding)
elif embeddings_dim != len(embedding):
raise ValueError(
"Inconsistent embedding dimension detected, %d != %d for token %s",
embeddings_dim, len(embedding), token)
vocabulary.append(token)
embeddings.append(embedding)
return vocabulary, np.array(embeddings) | [
"def",
"load",
"(",
"file_path",
",",
"parse_line_fn",
")",
":",
"vocabulary",
"=",
"[",
"]",
"embeddings",
"=",
"[",
"]",
"embeddings_dim",
"=",
"None",
"for",
"line",
"in",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"file_path",
")",
":",
"token",
",",
"embedding",
"=",
"parse_line_fn",
"(",
"line",
")",
"if",
"not",
"embeddings_dim",
":",
"embeddings_dim",
"=",
"len",
"(",
"embedding",
")",
"elif",
"embeddings_dim",
"!=",
"len",
"(",
"embedding",
")",
":",
"raise",
"ValueError",
"(",
"\"Inconsistent embedding dimension detected, %d != %d for token %s\"",
",",
"embeddings_dim",
",",
"len",
"(",
"embedding",
")",
",",
"token",
")",
"vocabulary",
".",
"append",
"(",
"token",
")",
"embeddings",
".",
"append",
"(",
"embedding",
")",
"return",
"vocabulary",
",",
"np",
".",
"array",
"(",
"embeddings",
")"
] | Loads a text embedding into memory as a numpy matrix.
Args:
file_path: Path to the text embedding file.
parse_line_fn: callback function to parse each file line.
Returns:
A tuple of (list of vocabulary tokens, numpy matrix of embedding vectors).
Raises:
ValueError: if the data in the sstable is inconsistent. | [
"Loads",
"a",
"text",
"embedding",
"into",
"memory",
"as",
"a",
"numpy",
"matrix",
"."
] | python | train |
lepture/flask-oauthlib | flask_oauthlib/provider/oauth1.py | https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth1.py#L657-L671 | def get_access_token_secret(self, client_key, token, request):
"""Get access token secret.
The access token object should a ``secret`` attribute.
"""
log.debug('Get access token secret of %r for %r',
token, client_key)
tok = request.access_token or self._tokengetter(
client_key=client_key,
token=token,
)
if tok:
request.access_token = tok
return tok.secret
return None | [
"def",
"get_access_token_secret",
"(",
"self",
",",
"client_key",
",",
"token",
",",
"request",
")",
":",
"log",
".",
"debug",
"(",
"'Get access token secret of %r for %r'",
",",
"token",
",",
"client_key",
")",
"tok",
"=",
"request",
".",
"access_token",
"or",
"self",
".",
"_tokengetter",
"(",
"client_key",
"=",
"client_key",
",",
"token",
"=",
"token",
",",
")",
"if",
"tok",
":",
"request",
".",
"access_token",
"=",
"tok",
"return",
"tok",
".",
"secret",
"return",
"None"
] | Get access token secret.
The access token object should a ``secret`` attribute. | [
"Get",
"access",
"token",
"secret",
"."
] | python | test |
mzucker/noteshrink | noteshrink.py | https://github.com/mzucker/noteshrink/blob/7d876e5b43923c6bf8d64b7ef18f6855bfb30ce3/noteshrink.py#L371-L396 | def get_palette(samples, options, return_mask=False, kmeans_iter=40):
'''Extract the palette for the set of sampled RGB values. The first
palette entry is always the background color; the rest are determined
from foreground pixels by running K-means clustering. Returns the
palette, as well as a mask corresponding to the foreground pixels.
'''
if not options.quiet:
print(' getting palette...')
bg_color = get_bg_color(samples, 6)
fg_mask = get_fg_mask(bg_color, samples, options)
centers, _ = kmeans(samples[fg_mask].astype(np.float32),
options.num_colors-1,
iter=kmeans_iter)
palette = np.vstack((bg_color, centers)).astype(np.uint8)
if not return_mask:
return palette
else:
return palette, fg_mask | [
"def",
"get_palette",
"(",
"samples",
",",
"options",
",",
"return_mask",
"=",
"False",
",",
"kmeans_iter",
"=",
"40",
")",
":",
"if",
"not",
"options",
".",
"quiet",
":",
"print",
"(",
"' getting palette...'",
")",
"bg_color",
"=",
"get_bg_color",
"(",
"samples",
",",
"6",
")",
"fg_mask",
"=",
"get_fg_mask",
"(",
"bg_color",
",",
"samples",
",",
"options",
")",
"centers",
",",
"_",
"=",
"kmeans",
"(",
"samples",
"[",
"fg_mask",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")",
",",
"options",
".",
"num_colors",
"-",
"1",
",",
"iter",
"=",
"kmeans_iter",
")",
"palette",
"=",
"np",
".",
"vstack",
"(",
"(",
"bg_color",
",",
"centers",
")",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"if",
"not",
"return_mask",
":",
"return",
"palette",
"else",
":",
"return",
"palette",
",",
"fg_mask"
] | Extract the palette for the set of sampled RGB values. The first
palette entry is always the background color; the rest are determined
from foreground pixels by running K-means clustering. Returns the
palette, as well as a mask corresponding to the foreground pixels. | [
"Extract",
"the",
"palette",
"for",
"the",
"set",
"of",
"sampled",
"RGB",
"values",
".",
"The",
"first",
"palette",
"entry",
"is",
"always",
"the",
"background",
"color",
";",
"the",
"rest",
"are",
"determined",
"from",
"foreground",
"pixels",
"by",
"running",
"K",
"-",
"means",
"clustering",
".",
"Returns",
"the",
"palette",
"as",
"well",
"as",
"a",
"mask",
"corresponding",
"to",
"the",
"foreground",
"pixels",
"."
] | python | train |
yunojuno/elasticsearch-django | elasticsearch_django/models.py | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L59-L80 | def in_search_queryset(self, instance_id, index="_all"):
"""
Return True if an object is part of the search index queryset.
Sometimes it's useful to know if an object _should_ be indexed. If
an object is saved, how do you know if you should push that change
to the search index? The simplest (albeit not most efficient) way
is to check if it appears in the underlying search queryset.
NB this method doesn't evaluate the entire dataset, it chains an
additional queryset filter expression on the end. That's why it's
important that the `get_search_queryset` method returns a queryset.
Args:
instance_id: the id of model object that we are looking for.
Kwargs:
index: string, the name of the index in which to check.
Defaults to '_all'.
"""
return self.get_search_queryset(index=index).filter(pk=instance_id).exists() | [
"def",
"in_search_queryset",
"(",
"self",
",",
"instance_id",
",",
"index",
"=",
"\"_all\"",
")",
":",
"return",
"self",
".",
"get_search_queryset",
"(",
"index",
"=",
"index",
")",
".",
"filter",
"(",
"pk",
"=",
"instance_id",
")",
".",
"exists",
"(",
")"
] | Return True if an object is part of the search index queryset.
Sometimes it's useful to know if an object _should_ be indexed. If
an object is saved, how do you know if you should push that change
to the search index? The simplest (albeit not most efficient) way
is to check if it appears in the underlying search queryset.
NB this method doesn't evaluate the entire dataset, it chains an
additional queryset filter expression on the end. That's why it's
important that the `get_search_queryset` method returns a queryset.
Args:
instance_id: the id of model object that we are looking for.
Kwargs:
index: string, the name of the index in which to check.
Defaults to '_all'. | [
"Return",
"True",
"if",
"an",
"object",
"is",
"part",
"of",
"the",
"search",
"index",
"queryset",
"."
] | python | train |
joeblackwaslike/pricing | pricing/metaconfigure.py | https://github.com/joeblackwaslike/pricing/blob/be988b0851b4313af81f1db475bc33248700e39c/pricing/metaconfigure.py#L37-L46 | def currencyFormat(_context, code, symbol, format,
currency_digits=True, decimal_quantization=True,
name=''):
"""Handle currencyFormat subdirectives."""
_context.action(
discriminator=('currency', name, code),
callable=_register_currency,
args=(name, code, symbol, format, currency_digits,
decimal_quantization)
) | [
"def",
"currencyFormat",
"(",
"_context",
",",
"code",
",",
"symbol",
",",
"format",
",",
"currency_digits",
"=",
"True",
",",
"decimal_quantization",
"=",
"True",
",",
"name",
"=",
"''",
")",
":",
"_context",
".",
"action",
"(",
"discriminator",
"=",
"(",
"'currency'",
",",
"name",
",",
"code",
")",
",",
"callable",
"=",
"_register_currency",
",",
"args",
"=",
"(",
"name",
",",
"code",
",",
"symbol",
",",
"format",
",",
"currency_digits",
",",
"decimal_quantization",
")",
")"
] | Handle currencyFormat subdirectives. | [
"Handle",
"currencyFormat",
"subdirectives",
"."
] | python | test |
latchset/jwcrypto | jwcrypto/jws.py | https://github.com/latchset/jwcrypto/blob/961df898dc08f63fe3d900f2002618740bc66b4a/jwcrypto/jws.py#L419-L510 | def add_signature(self, key, alg=None, protected=None, header=None):
"""Adds a new signature to the object.
:param key: A (:class:`jwcrypto.jwk.JWK`) key of appropriate for
the "alg" provided.
:param alg: An optional algorithm name. If already provided as an
element of the protected or unprotected header it can be safely
omitted.
:param potected: The Protected Header (optional)
:param header: The Unprotected Header (optional)
:raises InvalidJWSObject: if no payload has been set on the object,
or invalid headers are provided.
:raises ValueError: if the key is not a :class:`JWK` object.
:raises ValueError: if the algorithm is missing or is not provided
by one of the headers.
:raises InvalidJWAAlgorithm: if the algorithm is not valid, is
unknown or otherwise not yet implemented.
"""
if not self.objects.get('payload', None):
raise InvalidJWSObject('Missing Payload')
b64 = True
p = dict()
if protected:
if isinstance(protected, dict):
p = protected
protected = json_encode(p)
else:
p = json_decode(protected)
# If b64 is present we must enforce criticality
if 'b64' in list(p.keys()):
crit = p.get('crit', [])
if 'b64' not in crit:
raise InvalidJWSObject('b64 header must always be critical')
b64 = p['b64']
if 'b64' in self.objects:
if b64 != self.objects['b64']:
raise InvalidJWSObject('Mixed b64 headers on signatures')
h = None
if header:
if isinstance(header, dict):
h = header
header = json_encode(header)
else:
h = json_decode(header)
p = self._merge_check_headers(p, h)
if 'alg' in p:
if alg is None:
alg = p['alg']
elif alg != p['alg']:
raise ValueError('"alg" value mismatch, specified "alg" '
'does not match JOSE header value')
if alg is None:
raise ValueError('"alg" not specified')
c = JWSCore(alg, key, protected, self.objects['payload'])
sig = c.sign()
o = dict()
o['signature'] = base64url_decode(sig['signature'])
if protected:
o['protected'] = protected
if header:
o['header'] = h
o['valid'] = True
if 'signatures' in self.objects:
self.objects['signatures'].append(o)
elif 'signature' in self.objects:
self.objects['signatures'] = list()
n = dict()
n['signature'] = self.objects.pop('signature')
if 'protected' in self.objects:
n['protected'] = self.objects.pop('protected')
if 'header' in self.objects:
n['header'] = self.objects.pop('header')
if 'valid' in self.objects:
n['valid'] = self.objects.pop('valid')
self.objects['signatures'].append(n)
self.objects['signatures'].append(o)
else:
self.objects.update(o)
self.objects['b64'] = b64 | [
"def",
"add_signature",
"(",
"self",
",",
"key",
",",
"alg",
"=",
"None",
",",
"protected",
"=",
"None",
",",
"header",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"objects",
".",
"get",
"(",
"'payload'",
",",
"None",
")",
":",
"raise",
"InvalidJWSObject",
"(",
"'Missing Payload'",
")",
"b64",
"=",
"True",
"p",
"=",
"dict",
"(",
")",
"if",
"protected",
":",
"if",
"isinstance",
"(",
"protected",
",",
"dict",
")",
":",
"p",
"=",
"protected",
"protected",
"=",
"json_encode",
"(",
"p",
")",
"else",
":",
"p",
"=",
"json_decode",
"(",
"protected",
")",
"# If b64 is present we must enforce criticality",
"if",
"'b64'",
"in",
"list",
"(",
"p",
".",
"keys",
"(",
")",
")",
":",
"crit",
"=",
"p",
".",
"get",
"(",
"'crit'",
",",
"[",
"]",
")",
"if",
"'b64'",
"not",
"in",
"crit",
":",
"raise",
"InvalidJWSObject",
"(",
"'b64 header must always be critical'",
")",
"b64",
"=",
"p",
"[",
"'b64'",
"]",
"if",
"'b64'",
"in",
"self",
".",
"objects",
":",
"if",
"b64",
"!=",
"self",
".",
"objects",
"[",
"'b64'",
"]",
":",
"raise",
"InvalidJWSObject",
"(",
"'Mixed b64 headers on signatures'",
")",
"h",
"=",
"None",
"if",
"header",
":",
"if",
"isinstance",
"(",
"header",
",",
"dict",
")",
":",
"h",
"=",
"header",
"header",
"=",
"json_encode",
"(",
"header",
")",
"else",
":",
"h",
"=",
"json_decode",
"(",
"header",
")",
"p",
"=",
"self",
".",
"_merge_check_headers",
"(",
"p",
",",
"h",
")",
"if",
"'alg'",
"in",
"p",
":",
"if",
"alg",
"is",
"None",
":",
"alg",
"=",
"p",
"[",
"'alg'",
"]",
"elif",
"alg",
"!=",
"p",
"[",
"'alg'",
"]",
":",
"raise",
"ValueError",
"(",
"'\"alg\" value mismatch, specified \"alg\" '",
"'does not match JOSE header value'",
")",
"if",
"alg",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'\"alg\" not specified'",
")",
"c",
"=",
"JWSCore",
"(",
"alg",
",",
"key",
",",
"protected",
",",
"self",
".",
"objects",
"[",
"'payload'",
"]",
")",
"sig",
"=",
"c",
".",
"sign",
"(",
")",
"o",
"=",
"dict",
"(",
")",
"o",
"[",
"'signature'",
"]",
"=",
"base64url_decode",
"(",
"sig",
"[",
"'signature'",
"]",
")",
"if",
"protected",
":",
"o",
"[",
"'protected'",
"]",
"=",
"protected",
"if",
"header",
":",
"o",
"[",
"'header'",
"]",
"=",
"h",
"o",
"[",
"'valid'",
"]",
"=",
"True",
"if",
"'signatures'",
"in",
"self",
".",
"objects",
":",
"self",
".",
"objects",
"[",
"'signatures'",
"]",
".",
"append",
"(",
"o",
")",
"elif",
"'signature'",
"in",
"self",
".",
"objects",
":",
"self",
".",
"objects",
"[",
"'signatures'",
"]",
"=",
"list",
"(",
")",
"n",
"=",
"dict",
"(",
")",
"n",
"[",
"'signature'",
"]",
"=",
"self",
".",
"objects",
".",
"pop",
"(",
"'signature'",
")",
"if",
"'protected'",
"in",
"self",
".",
"objects",
":",
"n",
"[",
"'protected'",
"]",
"=",
"self",
".",
"objects",
".",
"pop",
"(",
"'protected'",
")",
"if",
"'header'",
"in",
"self",
".",
"objects",
":",
"n",
"[",
"'header'",
"]",
"=",
"self",
".",
"objects",
".",
"pop",
"(",
"'header'",
")",
"if",
"'valid'",
"in",
"self",
".",
"objects",
":",
"n",
"[",
"'valid'",
"]",
"=",
"self",
".",
"objects",
".",
"pop",
"(",
"'valid'",
")",
"self",
".",
"objects",
"[",
"'signatures'",
"]",
".",
"append",
"(",
"n",
")",
"self",
".",
"objects",
"[",
"'signatures'",
"]",
".",
"append",
"(",
"o",
")",
"else",
":",
"self",
".",
"objects",
".",
"update",
"(",
"o",
")",
"self",
".",
"objects",
"[",
"'b64'",
"]",
"=",
"b64"
] | Adds a new signature to the object.
:param key: A (:class:`jwcrypto.jwk.JWK`) key of appropriate for
the "alg" provided.
:param alg: An optional algorithm name. If already provided as an
element of the protected or unprotected header it can be safely
omitted.
:param potected: The Protected Header (optional)
:param header: The Unprotected Header (optional)
:raises InvalidJWSObject: if no payload has been set on the object,
or invalid headers are provided.
:raises ValueError: if the key is not a :class:`JWK` object.
:raises ValueError: if the algorithm is missing or is not provided
by one of the headers.
:raises InvalidJWAAlgorithm: if the algorithm is not valid, is
unknown or otherwise not yet implemented. | [
"Adds",
"a",
"new",
"signature",
"to",
"the",
"object",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py#L195-L206 | def run(self):
"""The thread's main activity. Call start() instead."""
self.socket = self.context.socket(zmq.DEALER)
self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
self.socket.connect('tcp://%s:%i' % self.address)
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
self._run_loop()
try:
self.socket.close()
except:
pass | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"socket",
"=",
"self",
".",
"context",
".",
"socket",
"(",
"zmq",
".",
"DEALER",
")",
"self",
".",
"socket",
".",
"setsockopt",
"(",
"zmq",
".",
"IDENTITY",
",",
"self",
".",
"session",
".",
"bsession",
")",
"self",
".",
"socket",
".",
"connect",
"(",
"'tcp://%s:%i'",
"%",
"self",
".",
"address",
")",
"self",
".",
"stream",
"=",
"zmqstream",
".",
"ZMQStream",
"(",
"self",
".",
"socket",
",",
"self",
".",
"ioloop",
")",
"self",
".",
"stream",
".",
"on_recv",
"(",
"self",
".",
"_handle_recv",
")",
"self",
".",
"_run_loop",
"(",
")",
"try",
":",
"self",
".",
"socket",
".",
"close",
"(",
")",
"except",
":",
"pass"
] | The thread's main activity. Call start() instead. | [
"The",
"thread",
"s",
"main",
"activity",
".",
"Call",
"start",
"()",
"instead",
"."
] | python | test |
OnroerendErfgoed/oe_utils | oe_utils/views/atom.py | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/views/atom.py#L147-L157 | def _generate_atom_feed(self, feed):
"""
A function returning a feed like `feedgen.feed.FeedGenerator`.
The function can be overwritten when used in other applications.
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
"""
atom_feed = self.init_atom_feed(feed)
atom_feed.title("Feed")
return atom_feed | [
"def",
"_generate_atom_feed",
"(",
"self",
",",
"feed",
")",
":",
"atom_feed",
"=",
"self",
".",
"init_atom_feed",
"(",
"feed",
")",
"atom_feed",
".",
"title",
"(",
"\"Feed\"",
")",
"return",
"atom_feed"
] | A function returning a feed like `feedgen.feed.FeedGenerator`.
The function can be overwritten when used in other applications.
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator` | [
"A",
"function",
"returning",
"a",
"feed",
"like",
"feedgen",
".",
"feed",
".",
"FeedGenerator",
".",
"The",
"function",
"can",
"be",
"overwritten",
"when",
"used",
"in",
"other",
"applications",
"."
] | python | train |
codeghar/brokerlso | brokerlso/qmfv2.py | https://github.com/codeghar/brokerlso/blob/e110e12502b090e12b06c7615dd0a96a14a92585/brokerlso/qmfv2.py#L157-L166 | def list_queues(self):
"""Create message content and properties to list all queues with QMFv2
:returns: Tuple containing content and query properties
"""
content = {"_what": "OBJECT",
"_schema_id": {"_class_name": "queue"}}
logger.debug("Message content -> {0}".format(content))
return content, self.query_properties | [
"def",
"list_queues",
"(",
"self",
")",
":",
"content",
"=",
"{",
"\"_what\"",
":",
"\"OBJECT\"",
",",
"\"_schema_id\"",
":",
"{",
"\"_class_name\"",
":",
"\"queue\"",
"}",
"}",
"logger",
".",
"debug",
"(",
"\"Message content -> {0}\"",
".",
"format",
"(",
"content",
")",
")",
"return",
"content",
",",
"self",
".",
"query_properties"
] | Create message content and properties to list all queues with QMFv2
:returns: Tuple containing content and query properties | [
"Create",
"message",
"content",
"and",
"properties",
"to",
"list",
"all",
"queues",
"with",
"QMFv2"
] | python | test |
Netflix-Skunkworks/cloudaux | cloudaux/orchestration/aws/iam/group.py | https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/orchestration/aws/iam/group.py#L74-L103 | def get_group(group, flags=FLAGS.BASE | FLAGS.INLINE_POLICIES | FLAGS.MANAGED_POLICIES, **conn):
"""
Orchestrates all the calls required to fully build out an IAM Group in the following format:
{
"Arn": ...,
"GroupName": ...,
"Path": ...,
"GroupId": ...,
"CreateDate": ..., # str
"InlinePolicies": ...,
"ManagedPolicies": ..., # These are just the names of the Managed Policies.
"Users": ..., # False by default -- these are just the names of the users.
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the `get_group` call
multiple times.
:param group: dict MUST contain the GroupName and also a combination of either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Group.
"""
if not group.get('GroupName'):
raise MissingFieldException('Must include GroupName.')
group = modify(group, output='camelized')
_conn_from_args(group, conn)
return registry.build_out(flags, start_with=group, pass_datastructure=True, **conn) | [
"def",
"get_group",
"(",
"group",
",",
"flags",
"=",
"FLAGS",
".",
"BASE",
"|",
"FLAGS",
".",
"INLINE_POLICIES",
"|",
"FLAGS",
".",
"MANAGED_POLICIES",
",",
"*",
"*",
"conn",
")",
":",
"if",
"not",
"group",
".",
"get",
"(",
"'GroupName'",
")",
":",
"raise",
"MissingFieldException",
"(",
"'Must include GroupName.'",
")",
"group",
"=",
"modify",
"(",
"group",
",",
"output",
"=",
"'camelized'",
")",
"_conn_from_args",
"(",
"group",
",",
"conn",
")",
"return",
"registry",
".",
"build_out",
"(",
"flags",
",",
"start_with",
"=",
"group",
",",
"pass_datastructure",
"=",
"True",
",",
"*",
"*",
"conn",
")"
] | Orchestrates all the calls required to fully build out an IAM Group in the following format:
{
"Arn": ...,
"GroupName": ...,
"Path": ...,
"GroupId": ...,
"CreateDate": ..., # str
"InlinePolicies": ...,
"ManagedPolicies": ..., # These are just the names of the Managed Policies.
"Users": ..., # False by default -- these are just the names of the users.
"_version": 1
}
:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the `get_group` call
multiple times.
:param group: dict MUST contain the GroupName and also a combination of either the ARN or the account_number.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing fully built out Group. | [
"Orchestrates",
"all",
"the",
"calls",
"required",
"to",
"fully",
"build",
"out",
"an",
"IAM",
"Group",
"in",
"the",
"following",
"format",
":"
] | python | valid |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavparse.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavparse.py#L396-L427 | def merge_enums(xml):
'''merge enums between XML files'''
emap = {}
for x in xml:
newenums = []
for enum in x.enum:
if enum.name in emap:
emapitem = emap[enum.name]
# check for possible conflicting auto-assigned values after merge
if (emapitem.start_value <= enum.highest_value and emapitem.highest_value >= enum.start_value):
for entry in emapitem.entry:
# correct the value if necessary, but only if it was auto-assigned to begin with
if entry.value <= enum.highest_value and entry.autovalue == True:
entry.value = enum.highest_value + 1
enum.highest_value = entry.value
# merge the entries
emapitem.entry.extend(enum.entry)
if not emapitem.description:
emapitem.description = enum.description
print("Merged enum %s" % enum.name)
else:
newenums.append(enum)
emap[enum.name] = enum
x.enum = newenums
for e in emap:
# sort by value
emap[e].entry = sorted(emap[e].entry,
key=operator.attrgetter('value'),
reverse=False)
# add a ENUM_END
emap[e].entry.append(MAVEnumEntry("%s_ENUM_END" % emap[e].name,
emap[e].entry[-1].value+1, end_marker=True)) | [
"def",
"merge_enums",
"(",
"xml",
")",
":",
"emap",
"=",
"{",
"}",
"for",
"x",
"in",
"xml",
":",
"newenums",
"=",
"[",
"]",
"for",
"enum",
"in",
"x",
".",
"enum",
":",
"if",
"enum",
".",
"name",
"in",
"emap",
":",
"emapitem",
"=",
"emap",
"[",
"enum",
".",
"name",
"]",
"# check for possible conflicting auto-assigned values after merge",
"if",
"(",
"emapitem",
".",
"start_value",
"<=",
"enum",
".",
"highest_value",
"and",
"emapitem",
".",
"highest_value",
">=",
"enum",
".",
"start_value",
")",
":",
"for",
"entry",
"in",
"emapitem",
".",
"entry",
":",
"# correct the value if necessary, but only if it was auto-assigned to begin with",
"if",
"entry",
".",
"value",
"<=",
"enum",
".",
"highest_value",
"and",
"entry",
".",
"autovalue",
"==",
"True",
":",
"entry",
".",
"value",
"=",
"enum",
".",
"highest_value",
"+",
"1",
"enum",
".",
"highest_value",
"=",
"entry",
".",
"value",
"# merge the entries",
"emapitem",
".",
"entry",
".",
"extend",
"(",
"enum",
".",
"entry",
")",
"if",
"not",
"emapitem",
".",
"description",
":",
"emapitem",
".",
"description",
"=",
"enum",
".",
"description",
"print",
"(",
"\"Merged enum %s\"",
"%",
"enum",
".",
"name",
")",
"else",
":",
"newenums",
".",
"append",
"(",
"enum",
")",
"emap",
"[",
"enum",
".",
"name",
"]",
"=",
"enum",
"x",
".",
"enum",
"=",
"newenums",
"for",
"e",
"in",
"emap",
":",
"# sort by value",
"emap",
"[",
"e",
"]",
".",
"entry",
"=",
"sorted",
"(",
"emap",
"[",
"e",
"]",
".",
"entry",
",",
"key",
"=",
"operator",
".",
"attrgetter",
"(",
"'value'",
")",
",",
"reverse",
"=",
"False",
")",
"# add a ENUM_END",
"emap",
"[",
"e",
"]",
".",
"entry",
".",
"append",
"(",
"MAVEnumEntry",
"(",
"\"%s_ENUM_END\"",
"%",
"emap",
"[",
"e",
"]",
".",
"name",
",",
"emap",
"[",
"e",
"]",
".",
"entry",
"[",
"-",
"1",
"]",
".",
"value",
"+",
"1",
",",
"end_marker",
"=",
"True",
")",
")"
] | merge enums between XML files | [
"merge",
"enums",
"between",
"XML",
"files"
] | python | train |
supercoderz/pyflightdata | pyflightdata/flightdata.py | https://github.com/supercoderz/pyflightdata/blob/2caf9f429288f9a171893d1b8377d0c6244541cc/pyflightdata/flightdata.py#L556-L577 | def decode_metar(self, metar):
"""
Simple method that decodes a given metar string.
Args:
metar (str): The metar data
Returns:
The metar data in readable format
Example::
from pyflightdata import FlightData
f=FlightData()
f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')
"""
try:
from metar import Metar
except:
return "Unable to parse metars. Please install parser from https://github.com/tomp/python-metar."
m = Metar.Metar(metar)
return m.string() | [
"def",
"decode_metar",
"(",
"self",
",",
"metar",
")",
":",
"try",
":",
"from",
"metar",
"import",
"Metar",
"except",
":",
"return",
"\"Unable to parse metars. Please install parser from https://github.com/tomp/python-metar.\"",
"m",
"=",
"Metar",
".",
"Metar",
"(",
"metar",
")",
"return",
"m",
".",
"string",
"(",
")"
] | Simple method that decodes a given metar string.
Args:
metar (str): The metar data
Returns:
The metar data in readable format
Example::
from pyflightdata import FlightData
f=FlightData()
f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG') | [
"Simple",
"method",
"that",
"decodes",
"a",
"given",
"metar",
"string",
"."
] | python | train |
SBRG/ssbio | ssbio/biopython/Bio/Struct/WWW/WHATIFXML.py | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/WWW/WHATIFXML.py#L81-L182 | def _parse(self):
"""
Parse atomic data of the XML file.
"""
atom_counter = 0
structure_build = self.structure_builder
residues = self._extract_residues()
cur_model = None
cur_chain = None
structure_build.init_seg(' ') # There is never a SEGID present
for r in residues:
# New model?
if cur_model != r['model']:
cur_model = r['model']
try:
structure_build.init_model(cur_model)
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
# New chain?
if cur_chain != r['chain']:
cur_chain = r['chain']
try:
structure_build.init_chain(cur_chain)
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
# Create residue
if r['name'] in AA_LIST: # Get residue type crudely since there is no HETATM / ATOM
hetero_flag = ' '
elif r['name'] == 'WAT' or r['name'] == 'HOH':
hetero_flag = 'W'
else:
hetero_flag = 'H'
# Some terminal atoms are added at residue 0. This residue has a small number of atoms.
# Protonated non-terminal glycine has 7 atoms. Any of these residues is smaller.
# HETATMs have only a couple of atoms (3 for water for example) and they are ok.
if (len(r['atoms']) >= 7) or (hetero_flag != " "):
try:
structure_build.init_residue(r['name'], hetero_flag, r['number'], r['icode'])
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
# Create Atoms
for atom in r['atoms']:
a = self._parse_atom(atom)
if not sum(a['coord']): # e.g. HG of metal bound CYS coords are 0,0,0.
continue
try:
atom_counter += 1
# fullname = name; altloc is empty;
structure_build.init_atom(a['name'], a['coord'], a['bfactor'], a['occupancy'], ' ',
a['name'], atom_counter, a['element'], hetero_flag)
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
elif len(r['atoms']) < 7: # Terminal Residues
for atom in r['atoms']:
a = self._parse_atom(atom)
if not sum(a['coord']): # e.g. HG of metal bound CYS coords are 0,0,0.
continue
atom_counter += 1
ter_atom = Atom(a['name'], a['coord'], a['bfactor'], a['occupancy'], ' ',
a['name'], atom_counter, a['element'], hetero_flag)
if a['name'] in N_TERMINAL_ATOMS:
inc_struct = self.structure_builder.get_structure()
for model in inc_struct:
for chain in model:
if chain.id == r['chain']:
for residue in chain: # Find First residue matching name
if residue.resname == r['name']:
residue.add(ter_atom)
break
elif a['name'] in C_TERMINAL_ATOMS:
inc_struct = self.structure_builder.get_structure()
c_ter = None
for model in inc_struct:
for chain in model:
if chain.id == r['chain']:
for residue in chain: # Find Last residue matching name
if residue.resname == r['name']:
c_ter = residue
if c_ter:
c_ter.add(ter_atom) | [
"def",
"_parse",
"(",
"self",
")",
":",
"atom_counter",
"=",
"0",
"structure_build",
"=",
"self",
".",
"structure_builder",
"residues",
"=",
"self",
".",
"_extract_residues",
"(",
")",
"cur_model",
"=",
"None",
"cur_chain",
"=",
"None",
"structure_build",
".",
"init_seg",
"(",
"' '",
")",
"# There is never a SEGID present",
"for",
"r",
"in",
"residues",
":",
"# New model?",
"if",
"cur_model",
"!=",
"r",
"[",
"'model'",
"]",
":",
"cur_model",
"=",
"r",
"[",
"'model'",
"]",
"try",
":",
"structure_build",
".",
"init_model",
"(",
"cur_model",
")",
"except",
"PDBConstructionException",
",",
"message",
":",
"self",
".",
"_handle_builder_exception",
"(",
"message",
",",
"r",
")",
"# New chain?",
"if",
"cur_chain",
"!=",
"r",
"[",
"'chain'",
"]",
":",
"cur_chain",
"=",
"r",
"[",
"'chain'",
"]",
"try",
":",
"structure_build",
".",
"init_chain",
"(",
"cur_chain",
")",
"except",
"PDBConstructionException",
",",
"message",
":",
"self",
".",
"_handle_builder_exception",
"(",
"message",
",",
"r",
")",
"# Create residue",
"if",
"r",
"[",
"'name'",
"]",
"in",
"AA_LIST",
":",
"# Get residue type crudely since there is no HETATM / ATOM",
"hetero_flag",
"=",
"' '",
"elif",
"r",
"[",
"'name'",
"]",
"==",
"'WAT'",
"or",
"r",
"[",
"'name'",
"]",
"==",
"'HOH'",
":",
"hetero_flag",
"=",
"'W'",
"else",
":",
"hetero_flag",
"=",
"'H'",
"# Some terminal atoms are added at residue 0. This residue has a small number of atoms.",
"# Protonated non-terminal glycine has 7 atoms. Any of these residues is smaller.",
"# HETATMs have only a couple of atoms (3 for water for example) and they are ok.",
"if",
"(",
"len",
"(",
"r",
"[",
"'atoms'",
"]",
")",
">=",
"7",
")",
"or",
"(",
"hetero_flag",
"!=",
"\" \"",
")",
":",
"try",
":",
"structure_build",
".",
"init_residue",
"(",
"r",
"[",
"'name'",
"]",
",",
"hetero_flag",
",",
"r",
"[",
"'number'",
"]",
",",
"r",
"[",
"'icode'",
"]",
")",
"except",
"PDBConstructionException",
",",
"message",
":",
"self",
".",
"_handle_builder_exception",
"(",
"message",
",",
"r",
")",
"# Create Atoms",
"for",
"atom",
"in",
"r",
"[",
"'atoms'",
"]",
":",
"a",
"=",
"self",
".",
"_parse_atom",
"(",
"atom",
")",
"if",
"not",
"sum",
"(",
"a",
"[",
"'coord'",
"]",
")",
":",
"# e.g. HG of metal bound CYS coords are 0,0,0.",
"continue",
"try",
":",
"atom_counter",
"+=",
"1",
"# fullname = name; altloc is empty;",
"structure_build",
".",
"init_atom",
"(",
"a",
"[",
"'name'",
"]",
",",
"a",
"[",
"'coord'",
"]",
",",
"a",
"[",
"'bfactor'",
"]",
",",
"a",
"[",
"'occupancy'",
"]",
",",
"' '",
",",
"a",
"[",
"'name'",
"]",
",",
"atom_counter",
",",
"a",
"[",
"'element'",
"]",
",",
"hetero_flag",
")",
"except",
"PDBConstructionException",
",",
"message",
":",
"self",
".",
"_handle_builder_exception",
"(",
"message",
",",
"r",
")",
"elif",
"len",
"(",
"r",
"[",
"'atoms'",
"]",
")",
"<",
"7",
":",
"# Terminal Residues",
"for",
"atom",
"in",
"r",
"[",
"'atoms'",
"]",
":",
"a",
"=",
"self",
".",
"_parse_atom",
"(",
"atom",
")",
"if",
"not",
"sum",
"(",
"a",
"[",
"'coord'",
"]",
")",
":",
"# e.g. HG of metal bound CYS coords are 0,0,0.",
"continue",
"atom_counter",
"+=",
"1",
"ter_atom",
"=",
"Atom",
"(",
"a",
"[",
"'name'",
"]",
",",
"a",
"[",
"'coord'",
"]",
",",
"a",
"[",
"'bfactor'",
"]",
",",
"a",
"[",
"'occupancy'",
"]",
",",
"' '",
",",
"a",
"[",
"'name'",
"]",
",",
"atom_counter",
",",
"a",
"[",
"'element'",
"]",
",",
"hetero_flag",
")",
"if",
"a",
"[",
"'name'",
"]",
"in",
"N_TERMINAL_ATOMS",
":",
"inc_struct",
"=",
"self",
".",
"structure_builder",
".",
"get_structure",
"(",
")",
"for",
"model",
"in",
"inc_struct",
":",
"for",
"chain",
"in",
"model",
":",
"if",
"chain",
".",
"id",
"==",
"r",
"[",
"'chain'",
"]",
":",
"for",
"residue",
"in",
"chain",
":",
"# Find First residue matching name",
"if",
"residue",
".",
"resname",
"==",
"r",
"[",
"'name'",
"]",
":",
"residue",
".",
"add",
"(",
"ter_atom",
")",
"break",
"elif",
"a",
"[",
"'name'",
"]",
"in",
"C_TERMINAL_ATOMS",
":",
"inc_struct",
"=",
"self",
".",
"structure_builder",
".",
"get_structure",
"(",
")",
"c_ter",
"=",
"None",
"for",
"model",
"in",
"inc_struct",
":",
"for",
"chain",
"in",
"model",
":",
"if",
"chain",
".",
"id",
"==",
"r",
"[",
"'chain'",
"]",
":",
"for",
"residue",
"in",
"chain",
":",
"# Find Last residue matching name",
"if",
"residue",
".",
"resname",
"==",
"r",
"[",
"'name'",
"]",
":",
"c_ter",
"=",
"residue",
"if",
"c_ter",
":",
"c_ter",
".",
"add",
"(",
"ter_atom",
")"
] | Parse atomic data of the XML file. | [
"Parse",
"atomic",
"data",
"of",
"the",
"XML",
"file",
"."
] | python | train |
sunlightlabs/django-locksmith | locksmith/hub/views.py | https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/views.py#L180-L208 | def profile(request):
'''
Viewing of signup details and editing of password
'''
context = {}
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
form.save()
messages.info(request, 'Password Changed.')
else:
form = PasswordChangeForm(request.user)
key = Key.objects.get(email=request.user.email)
#analytics
endpoint_q = key.reports.values('api__name', 'endpoint').annotate(calls=Sum('calls')).order_by('-calls')
endpoints = [{'endpoint':'.'.join((d['api__name'], d['endpoint'])),
'calls': d['calls']} for d in endpoint_q]
date_q = key.reports.values('date').annotate(calls=Sum('calls')).order_by('date')
context['endpoints'], context['endpoint_calls'] = _dictlist_to_lists(endpoints, 'endpoint', 'calls')
context['timeline'] = date_q
context['form'] = form
context['key'] = key
context['password_is_key'] = request.user.check_password(key.key)
return render_to_response('locksmith/profile.html', context,
context_instance=RequestContext(request)) | [
"def",
"profile",
"(",
"request",
")",
":",
"context",
"=",
"{",
"}",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"PasswordChangeForm",
"(",
"request",
".",
"user",
",",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"form",
".",
"save",
"(",
")",
"messages",
".",
"info",
"(",
"request",
",",
"'Password Changed.'",
")",
"else",
":",
"form",
"=",
"PasswordChangeForm",
"(",
"request",
".",
"user",
")",
"key",
"=",
"Key",
".",
"objects",
".",
"get",
"(",
"email",
"=",
"request",
".",
"user",
".",
"email",
")",
"#analytics",
"endpoint_q",
"=",
"key",
".",
"reports",
".",
"values",
"(",
"'api__name'",
",",
"'endpoint'",
")",
".",
"annotate",
"(",
"calls",
"=",
"Sum",
"(",
"'calls'",
")",
")",
".",
"order_by",
"(",
"'-calls'",
")",
"endpoints",
"=",
"[",
"{",
"'endpoint'",
":",
"'.'",
".",
"join",
"(",
"(",
"d",
"[",
"'api__name'",
"]",
",",
"d",
"[",
"'endpoint'",
"]",
")",
")",
",",
"'calls'",
":",
"d",
"[",
"'calls'",
"]",
"}",
"for",
"d",
"in",
"endpoint_q",
"]",
"date_q",
"=",
"key",
".",
"reports",
".",
"values",
"(",
"'date'",
")",
".",
"annotate",
"(",
"calls",
"=",
"Sum",
"(",
"'calls'",
")",
")",
".",
"order_by",
"(",
"'date'",
")",
"context",
"[",
"'endpoints'",
"]",
",",
"context",
"[",
"'endpoint_calls'",
"]",
"=",
"_dictlist_to_lists",
"(",
"endpoints",
",",
"'endpoint'",
",",
"'calls'",
")",
"context",
"[",
"'timeline'",
"]",
"=",
"date_q",
"context",
"[",
"'form'",
"]",
"=",
"form",
"context",
"[",
"'key'",
"]",
"=",
"key",
"context",
"[",
"'password_is_key'",
"]",
"=",
"request",
".",
"user",
".",
"check_password",
"(",
"key",
".",
"key",
")",
"return",
"render_to_response",
"(",
"'locksmith/profile.html'",
",",
"context",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] | Viewing of signup details and editing of password | [
"Viewing",
"of",
"signup",
"details",
"and",
"editing",
"of",
"password"
] | python | train |
dnanexus/dx-toolkit | src/python/dxpy/bindings/search.py | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/search.py#L73-L108 | def _find(api_method, query, limit, return_handler, first_page_size, **kwargs):
''' Takes an API method handler (dxpy.api.find*) and calls it with *query*,
and then wraps a generator around its output. Used by the methods below.
Note that this function may only be used for /system/find* methods.
'''
num_results = 0
if "limit" not in query:
query["limit"] = first_page_size
while True:
resp = api_method(query, **kwargs)
by_parent = resp.get('byParent')
descriptions = resp.get('describe')
def format_result(result):
if return_handler:
result = dxpy.get_handler(result['id'], project=result.get('project'))
if by_parent is not None:
return result, by_parent, descriptions
else:
return result
for i in resp["results"]:
if num_results == limit:
return
num_results += 1
yield format_result(i)
# set up next query
if resp["next"] is not None:
query["starting"] = resp["next"]
query["limit"] = min(query["limit"]*2, 1000)
else:
return | [
"def",
"_find",
"(",
"api_method",
",",
"query",
",",
"limit",
",",
"return_handler",
",",
"first_page_size",
",",
"*",
"*",
"kwargs",
")",
":",
"num_results",
"=",
"0",
"if",
"\"limit\"",
"not",
"in",
"query",
":",
"query",
"[",
"\"limit\"",
"]",
"=",
"first_page_size",
"while",
"True",
":",
"resp",
"=",
"api_method",
"(",
"query",
",",
"*",
"*",
"kwargs",
")",
"by_parent",
"=",
"resp",
".",
"get",
"(",
"'byParent'",
")",
"descriptions",
"=",
"resp",
".",
"get",
"(",
"'describe'",
")",
"def",
"format_result",
"(",
"result",
")",
":",
"if",
"return_handler",
":",
"result",
"=",
"dxpy",
".",
"get_handler",
"(",
"result",
"[",
"'id'",
"]",
",",
"project",
"=",
"result",
".",
"get",
"(",
"'project'",
")",
")",
"if",
"by_parent",
"is",
"not",
"None",
":",
"return",
"result",
",",
"by_parent",
",",
"descriptions",
"else",
":",
"return",
"result",
"for",
"i",
"in",
"resp",
"[",
"\"results\"",
"]",
":",
"if",
"num_results",
"==",
"limit",
":",
"return",
"num_results",
"+=",
"1",
"yield",
"format_result",
"(",
"i",
")",
"# set up next query",
"if",
"resp",
"[",
"\"next\"",
"]",
"is",
"not",
"None",
":",
"query",
"[",
"\"starting\"",
"]",
"=",
"resp",
"[",
"\"next\"",
"]",
"query",
"[",
"\"limit\"",
"]",
"=",
"min",
"(",
"query",
"[",
"\"limit\"",
"]",
"*",
"2",
",",
"1000",
")",
"else",
":",
"return"
] | Takes an API method handler (dxpy.api.find*) and calls it with *query*,
and then wraps a generator around its output. Used by the methods below.
Note that this function may only be used for /system/find* methods. | [
"Takes",
"an",
"API",
"method",
"handler",
"(",
"dxpy",
".",
"api",
".",
"find",
"*",
")",
"and",
"calls",
"it",
"with",
"*",
"query",
"*",
"and",
"then",
"wraps",
"a",
"generator",
"around",
"its",
"output",
".",
"Used",
"by",
"the",
"methods",
"below",
"."
] | python | train |
cloudendpoints/endpoints-management-python | endpoints_management/control/vendor/py3/sched.py | https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/vendor/py3/sched.py#L94-L103 | def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError.
"""
with self._lock:
self._queue.remove(event)
heapq.heapify(self._queue) | [
"def",
"cancel",
"(",
"self",
",",
"event",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_queue",
".",
"remove",
"(",
"event",
")",
"heapq",
".",
"heapify",
"(",
"self",
".",
"_queue",
")"
] | Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError. | [
"Remove",
"an",
"event",
"from",
"the",
"queue",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/notebookmanager.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/notebookmanager.py#L268-L276 | def new_notebook(self):
"""Create a new notebook and return its notebook_id."""
path, name = self.increment_filename('Untitled')
notebook_id = self.new_notebook_id(name)
metadata = current.new_metadata(name=name)
nb = current.new_notebook(metadata=metadata)
with open(path,'w') as f:
current.write(nb, f, u'json')
return notebook_id | [
"def",
"new_notebook",
"(",
"self",
")",
":",
"path",
",",
"name",
"=",
"self",
".",
"increment_filename",
"(",
"'Untitled'",
")",
"notebook_id",
"=",
"self",
".",
"new_notebook_id",
"(",
"name",
")",
"metadata",
"=",
"current",
".",
"new_metadata",
"(",
"name",
"=",
"name",
")",
"nb",
"=",
"current",
".",
"new_notebook",
"(",
"metadata",
"=",
"metadata",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"current",
".",
"write",
"(",
"nb",
",",
"f",
",",
"u'json'",
")",
"return",
"notebook_id"
] | Create a new notebook and return its notebook_id. | [
"Create",
"a",
"new",
"notebook",
"and",
"return",
"its",
"notebook_id",
"."
] | python | test |
shoebot/shoebot | shoebot/grammar/nodebox.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/nodebox.py#L432-L439 | def transform(self, mode=None):
'''
Set the current transform mode.
:param mode: CENTER or CORNER'''
if mode:
self._canvas.mode = mode
return self._canvas.mode | [
"def",
"transform",
"(",
"self",
",",
"mode",
"=",
"None",
")",
":",
"if",
"mode",
":",
"self",
".",
"_canvas",
".",
"mode",
"=",
"mode",
"return",
"self",
".",
"_canvas",
".",
"mode"
] | Set the current transform mode.
:param mode: CENTER or CORNER | [
"Set",
"the",
"current",
"transform",
"mode",
"."
] | python | valid |
spacetelescope/stsci.tools | lib/stsci/tools/vtor_checks.py | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/vtor_checks.py#L18-L40 | def sigStrToKwArgsDict(checkFuncSig):
""" Take a check function signature (string), and parse it to get a dict
of the keyword args and their values. """
p1 = checkFuncSig.find('(')
p2 = checkFuncSig.rfind(')')
assert p1 > 0 and p2 > 0 and p2 > p1, "Invalid signature: "+checkFuncSig
argParts = irafutils.csvSplit(checkFuncSig[p1+1:p2], ',', True)
argParts = [x.strip() for x in argParts]
retval = {}
for argPair in argParts:
argSpl = argPair.split('=', 1)
if len(argSpl) > 1:
if argSpl[0] in retval:
if isinstance(retval[argSpl[0]], (list,tuple)):
retval[argSpl[0]]+=(irafutils.stripQuotes(argSpl[1]),) # 3rd
else: # 2nd in, so convert to tuple
retval[argSpl[0]] = (retval[argSpl[0]],
irafutils.stripQuotes(argSpl[1]),)
else:
retval[argSpl[0]] = irafutils.stripQuotes(argSpl[1]) # 1st in
else:
retval[argSpl[0]] = None # eg. found "triggers=, max=6, ..."
return retval | [
"def",
"sigStrToKwArgsDict",
"(",
"checkFuncSig",
")",
":",
"p1",
"=",
"checkFuncSig",
".",
"find",
"(",
"'('",
")",
"p2",
"=",
"checkFuncSig",
".",
"rfind",
"(",
"')'",
")",
"assert",
"p1",
">",
"0",
"and",
"p2",
">",
"0",
"and",
"p2",
">",
"p1",
",",
"\"Invalid signature: \"",
"+",
"checkFuncSig",
"argParts",
"=",
"irafutils",
".",
"csvSplit",
"(",
"checkFuncSig",
"[",
"p1",
"+",
"1",
":",
"p2",
"]",
",",
"','",
",",
"True",
")",
"argParts",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"argParts",
"]",
"retval",
"=",
"{",
"}",
"for",
"argPair",
"in",
"argParts",
":",
"argSpl",
"=",
"argPair",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"len",
"(",
"argSpl",
")",
">",
"1",
":",
"if",
"argSpl",
"[",
"0",
"]",
"in",
"retval",
":",
"if",
"isinstance",
"(",
"retval",
"[",
"argSpl",
"[",
"0",
"]",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"retval",
"[",
"argSpl",
"[",
"0",
"]",
"]",
"+=",
"(",
"irafutils",
".",
"stripQuotes",
"(",
"argSpl",
"[",
"1",
"]",
")",
",",
")",
"# 3rd",
"else",
":",
"# 2nd in, so convert to tuple",
"retval",
"[",
"argSpl",
"[",
"0",
"]",
"]",
"=",
"(",
"retval",
"[",
"argSpl",
"[",
"0",
"]",
"]",
",",
"irafutils",
".",
"stripQuotes",
"(",
"argSpl",
"[",
"1",
"]",
")",
",",
")",
"else",
":",
"retval",
"[",
"argSpl",
"[",
"0",
"]",
"]",
"=",
"irafutils",
".",
"stripQuotes",
"(",
"argSpl",
"[",
"1",
"]",
")",
"# 1st in",
"else",
":",
"retval",
"[",
"argSpl",
"[",
"0",
"]",
"]",
"=",
"None",
"# eg. found \"triggers=, max=6, ...\"",
"return",
"retval"
] | Take a check function signature (string), and parse it to get a dict
of the keyword args and their values. | [
"Take",
"a",
"check",
"function",
"signature",
"(",
"string",
")",
"and",
"parse",
"it",
"to",
"get",
"a",
"dict",
"of",
"the",
"keyword",
"args",
"and",
"their",
"values",
"."
] | python | train |
cocagne/txdbus | txdbus/marshal.py | https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/txdbus/marshal.py#L221-L240 | def validateMemberName(n):
"""
Verifies that the supplied name is a valid DBus member name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus member name
"""
try:
if len(n) < 1:
raise Exception('Name must be at least one byte in length')
if len(n) > 255:
raise Exception('Name exceeds maximum length of 255')
if n[0].isdigit():
raise Exception('Names may not begin with a digit')
if mbr_re.search(n):
raise Exception(
'Names contains a character outside the set [A-Za-z0-9_]')
except Exception as e:
raise MarshallingError('Invalid member name "%s": %s' % (n, str(e))) | [
"def",
"validateMemberName",
"(",
"n",
")",
":",
"try",
":",
"if",
"len",
"(",
"n",
")",
"<",
"1",
":",
"raise",
"Exception",
"(",
"'Name must be at least one byte in length'",
")",
"if",
"len",
"(",
"n",
")",
">",
"255",
":",
"raise",
"Exception",
"(",
"'Name exceeds maximum length of 255'",
")",
"if",
"n",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Names may not begin with a digit'",
")",
"if",
"mbr_re",
".",
"search",
"(",
"n",
")",
":",
"raise",
"Exception",
"(",
"'Names contains a character outside the set [A-Za-z0-9_]'",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"MarshallingError",
"(",
"'Invalid member name \"%s\": %s'",
"%",
"(",
"n",
",",
"str",
"(",
"e",
")",
")",
")"
] | Verifies that the supplied name is a valid DBus member name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus member name | [
"Verifies",
"that",
"the",
"supplied",
"name",
"is",
"a",
"valid",
"DBus",
"member",
"name",
".",
"Throws",
"an",
"L",
"{",
"error",
".",
"MarshallingError",
"}",
"if",
"the",
"format",
"is",
"invalid"
] | python | train |
pytroll/satpy | satpy/readers/eps_l1b.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/eps_l1b.py#L291-L394 | def get_dataset(self, key, info):
"""Get calibrated channel data."""
if self.mdrs is None:
self._read_all(self.filename)
if key.name in ['longitude', 'latitude']:
lons, lats = self.get_full_lonlats()
if key.name == 'longitude':
dataset = create_xarray(lons)
else:
dataset = create_xarray(lats)
elif key.name in ['solar_zenith_angle', 'solar_azimuth_angle',
'satellite_zenith_angle', 'satellite_azimuth_angle']:
sun_azi, sun_zen, sat_azi, sat_zen = self.get_full_angles()
if key.name == 'solar_zenith_angle':
dataset = create_xarray(sun_zen)
elif key.name == 'solar_azimuth_angle':
dataset = create_xarray(sun_azi)
if key.name == 'satellite_zenith_angle':
dataset = create_xarray(sat_zen)
elif key.name == 'satellite_azimuth_angle':
dataset = create_xarray(sat_azi)
else:
mask = None
if key.calibration == 'counts':
raise ValueError('calibration=counts is not supported! ' +
'This reader cannot return counts')
elif key.calibration not in ['reflectance', 'brightness_temperature', 'radiance']:
raise ValueError('calibration type ' + str(key.calibration) +
' is not supported!')
if key.name in ['3A', '3a'] and self.three_a_mask is None:
self.three_a_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 2 ** 16)
if key.name in ['3B', '3b'] and self.three_b_mask is None:
self.three_b_mask = ((self["FRAME_INDICATOR"] & 2 ** 16) != 0)
if key.name not in ["1", "2", "3a", "3A", "3b", "3B", "4", "5"]:
LOG.info("Can't load channel in eps_l1b: " + str(key.name))
return
if key.name == "1":
if key.calibration == 'reflectance':
array = radiance_to_refl(self["SCENE_RADIANCES"][:, 0, :],
self["CH1_SOLAR_FILTERED_IRRADIANCE"])
else:
array = self["SCENE_RADIANCES"][:, 0, :]
if key.name == "2":
if key.calibration == 'reflectance':
array = radiance_to_refl(self["SCENE_RADIANCES"][:, 1, :],
self["CH2_SOLAR_FILTERED_IRRADIANCE"])
else:
array = self["SCENE_RADIANCES"][:, 1, :]
if key.name.lower() == "3a":
if key.calibration == 'reflectance':
array = radiance_to_refl(self["SCENE_RADIANCES"][:, 2, :],
self["CH3A_SOLAR_FILTERED_IRRADIANCE"])
else:
array = self["SCENE_RADIANCES"][:, 2, :]
mask = np.empty(array.shape, dtype=bool)
mask[:, :] = self.three_a_mask[:, np.newaxis]
if key.name.lower() == "3b":
if key.calibration == 'brightness_temperature':
array = radiance_to_bt(self["SCENE_RADIANCES"][:, 2, :],
self["CH3B_CENTRAL_WAVENUMBER"],
self["CH3B_CONSTANT1"],
self["CH3B_CONSTANT2_SLOPE"])
else:
array = self["SCENE_RADIANCES"][:, 2, :]
mask = np.empty(array.shape, dtype=bool)
mask[:, :] = self.three_b_mask[:, np.newaxis]
if key.name == "4":
if key.calibration == 'brightness_temperature':
array = radiance_to_bt(self["SCENE_RADIANCES"][:, 3, :],
self["CH4_CENTRAL_WAVENUMBER"],
self["CH4_CONSTANT1"],
self["CH4_CONSTANT2_SLOPE"])
else:
array = self["SCENE_RADIANCES"][:, 3, :]
if key.name == "5":
if key.calibration == 'brightness_temperature':
array = radiance_to_bt(self["SCENE_RADIANCES"][:, 4, :],
self["CH5_CENTRAL_WAVENUMBER"],
self["CH5_CONSTANT1"],
self["CH5_CONSTANT2_SLOPE"])
else:
array = self["SCENE_RADIANCES"][:, 4, :]
dataset = create_xarray(array)
if mask is not None:
dataset = dataset.where(~mask)
dataset.attrs['platform_name'] = self.platform_name
dataset.attrs['sensor'] = self.sensor_name
dataset.attrs.update(info)
dataset.attrs.update(key.to_dict())
return dataset | [
"def",
"get_dataset",
"(",
"self",
",",
"key",
",",
"info",
")",
":",
"if",
"self",
".",
"mdrs",
"is",
"None",
":",
"self",
".",
"_read_all",
"(",
"self",
".",
"filename",
")",
"if",
"key",
".",
"name",
"in",
"[",
"'longitude'",
",",
"'latitude'",
"]",
":",
"lons",
",",
"lats",
"=",
"self",
".",
"get_full_lonlats",
"(",
")",
"if",
"key",
".",
"name",
"==",
"'longitude'",
":",
"dataset",
"=",
"create_xarray",
"(",
"lons",
")",
"else",
":",
"dataset",
"=",
"create_xarray",
"(",
"lats",
")",
"elif",
"key",
".",
"name",
"in",
"[",
"'solar_zenith_angle'",
",",
"'solar_azimuth_angle'",
",",
"'satellite_zenith_angle'",
",",
"'satellite_azimuth_angle'",
"]",
":",
"sun_azi",
",",
"sun_zen",
",",
"sat_azi",
",",
"sat_zen",
"=",
"self",
".",
"get_full_angles",
"(",
")",
"if",
"key",
".",
"name",
"==",
"'solar_zenith_angle'",
":",
"dataset",
"=",
"create_xarray",
"(",
"sun_zen",
")",
"elif",
"key",
".",
"name",
"==",
"'solar_azimuth_angle'",
":",
"dataset",
"=",
"create_xarray",
"(",
"sun_azi",
")",
"if",
"key",
".",
"name",
"==",
"'satellite_zenith_angle'",
":",
"dataset",
"=",
"create_xarray",
"(",
"sat_zen",
")",
"elif",
"key",
".",
"name",
"==",
"'satellite_azimuth_angle'",
":",
"dataset",
"=",
"create_xarray",
"(",
"sat_azi",
")",
"else",
":",
"mask",
"=",
"None",
"if",
"key",
".",
"calibration",
"==",
"'counts'",
":",
"raise",
"ValueError",
"(",
"'calibration=counts is not supported! '",
"+",
"'This reader cannot return counts'",
")",
"elif",
"key",
".",
"calibration",
"not",
"in",
"[",
"'reflectance'",
",",
"'brightness_temperature'",
",",
"'radiance'",
"]",
":",
"raise",
"ValueError",
"(",
"'calibration type '",
"+",
"str",
"(",
"key",
".",
"calibration",
")",
"+",
"' is not supported!'",
")",
"if",
"key",
".",
"name",
"in",
"[",
"'3A'",
",",
"'3a'",
"]",
"and",
"self",
".",
"three_a_mask",
"is",
"None",
":",
"self",
".",
"three_a_mask",
"=",
"(",
"(",
"self",
"[",
"\"FRAME_INDICATOR\"",
"]",
"&",
"2",
"**",
"16",
")",
"!=",
"2",
"**",
"16",
")",
"if",
"key",
".",
"name",
"in",
"[",
"'3B'",
",",
"'3b'",
"]",
"and",
"self",
".",
"three_b_mask",
"is",
"None",
":",
"self",
".",
"three_b_mask",
"=",
"(",
"(",
"self",
"[",
"\"FRAME_INDICATOR\"",
"]",
"&",
"2",
"**",
"16",
")",
"!=",
"0",
")",
"if",
"key",
".",
"name",
"not",
"in",
"[",
"\"1\"",
",",
"\"2\"",
",",
"\"3a\"",
",",
"\"3A\"",
",",
"\"3b\"",
",",
"\"3B\"",
",",
"\"4\"",
",",
"\"5\"",
"]",
":",
"LOG",
".",
"info",
"(",
"\"Can't load channel in eps_l1b: \"",
"+",
"str",
"(",
"key",
".",
"name",
")",
")",
"return",
"if",
"key",
".",
"name",
"==",
"\"1\"",
":",
"if",
"key",
".",
"calibration",
"==",
"'reflectance'",
":",
"array",
"=",
"radiance_to_refl",
"(",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"0",
",",
":",
"]",
",",
"self",
"[",
"\"CH1_SOLAR_FILTERED_IRRADIANCE\"",
"]",
")",
"else",
":",
"array",
"=",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"0",
",",
":",
"]",
"if",
"key",
".",
"name",
"==",
"\"2\"",
":",
"if",
"key",
".",
"calibration",
"==",
"'reflectance'",
":",
"array",
"=",
"radiance_to_refl",
"(",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"1",
",",
":",
"]",
",",
"self",
"[",
"\"CH2_SOLAR_FILTERED_IRRADIANCE\"",
"]",
")",
"else",
":",
"array",
"=",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"1",
",",
":",
"]",
"if",
"key",
".",
"name",
".",
"lower",
"(",
")",
"==",
"\"3a\"",
":",
"if",
"key",
".",
"calibration",
"==",
"'reflectance'",
":",
"array",
"=",
"radiance_to_refl",
"(",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"2",
",",
":",
"]",
",",
"self",
"[",
"\"CH3A_SOLAR_FILTERED_IRRADIANCE\"",
"]",
")",
"else",
":",
"array",
"=",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"2",
",",
":",
"]",
"mask",
"=",
"np",
".",
"empty",
"(",
"array",
".",
"shape",
",",
"dtype",
"=",
"bool",
")",
"mask",
"[",
":",
",",
":",
"]",
"=",
"self",
".",
"three_a_mask",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"key",
".",
"name",
".",
"lower",
"(",
")",
"==",
"\"3b\"",
":",
"if",
"key",
".",
"calibration",
"==",
"'brightness_temperature'",
":",
"array",
"=",
"radiance_to_bt",
"(",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"2",
",",
":",
"]",
",",
"self",
"[",
"\"CH3B_CENTRAL_WAVENUMBER\"",
"]",
",",
"self",
"[",
"\"CH3B_CONSTANT1\"",
"]",
",",
"self",
"[",
"\"CH3B_CONSTANT2_SLOPE\"",
"]",
")",
"else",
":",
"array",
"=",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"2",
",",
":",
"]",
"mask",
"=",
"np",
".",
"empty",
"(",
"array",
".",
"shape",
",",
"dtype",
"=",
"bool",
")",
"mask",
"[",
":",
",",
":",
"]",
"=",
"self",
".",
"three_b_mask",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"key",
".",
"name",
"==",
"\"4\"",
":",
"if",
"key",
".",
"calibration",
"==",
"'brightness_temperature'",
":",
"array",
"=",
"radiance_to_bt",
"(",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"3",
",",
":",
"]",
",",
"self",
"[",
"\"CH4_CENTRAL_WAVENUMBER\"",
"]",
",",
"self",
"[",
"\"CH4_CONSTANT1\"",
"]",
",",
"self",
"[",
"\"CH4_CONSTANT2_SLOPE\"",
"]",
")",
"else",
":",
"array",
"=",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"3",
",",
":",
"]",
"if",
"key",
".",
"name",
"==",
"\"5\"",
":",
"if",
"key",
".",
"calibration",
"==",
"'brightness_temperature'",
":",
"array",
"=",
"radiance_to_bt",
"(",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"4",
",",
":",
"]",
",",
"self",
"[",
"\"CH5_CENTRAL_WAVENUMBER\"",
"]",
",",
"self",
"[",
"\"CH5_CONSTANT1\"",
"]",
",",
"self",
"[",
"\"CH5_CONSTANT2_SLOPE\"",
"]",
")",
"else",
":",
"array",
"=",
"self",
"[",
"\"SCENE_RADIANCES\"",
"]",
"[",
":",
",",
"4",
",",
":",
"]",
"dataset",
"=",
"create_xarray",
"(",
"array",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"dataset",
"=",
"dataset",
".",
"where",
"(",
"~",
"mask",
")",
"dataset",
".",
"attrs",
"[",
"'platform_name'",
"]",
"=",
"self",
".",
"platform_name",
"dataset",
".",
"attrs",
"[",
"'sensor'",
"]",
"=",
"self",
".",
"sensor_name",
"dataset",
".",
"attrs",
".",
"update",
"(",
"info",
")",
"dataset",
".",
"attrs",
".",
"update",
"(",
"key",
".",
"to_dict",
"(",
")",
")",
"return",
"dataset"
] | Get calibrated channel data. | [
"Get",
"calibrated",
"channel",
"data",
"."
] | python | train |
saltstack/salt | salt/states/win_iis.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_iis.py#L783-L871 | def set_app(name, site, settings=None):
# pylint: disable=anomalous-backslash-in-string
'''
.. versionadded:: 2017.7.0
Set the value of the setting for an IIS web application.
.. note::
This function only configures existing app. Params are case sensitive.
:param str name: The IIS application.
:param str site: The IIS site name.
:param str settings: A dictionary of the setting names and their values.
Available settings:
- ``physicalPath`` - The physical path of the webapp
- ``applicationPool`` - The application pool for the webapp
- ``userName`` "connectAs" user
- ``password`` "connectAs" password for user
:rtype: bool
Example of usage:
.. code-block:: yaml
site0-webapp-setting:
win_iis.set_app:
- name: app0
- site: Default Web Site
- settings:
userName: domain\\user
password: pass
physicalPath: c:\inetpub\wwwroot
applicationPool: appPool0
'''
# pylint: enable=anomalous-backslash-in-string
ret = {'name': name,
'changes': {},
'comment': str(),
'result': None}
if not settings:
ret['comment'] = 'No settings to change provided.'
ret['result'] = True
return ret
ret_settings = {
'changes': {},
'failures': {},
}
current_settings = __salt__['win_iis.get_webapp_settings'](name=name,
site=site,
settings=settings.keys())
for setting in settings:
if str(settings[setting]) != str(current_settings[setting]):
ret_settings['changes'][setting] = {'old': current_settings[setting],
'new': settings[setting]}
if not ret_settings['changes']:
ret['comment'] = 'Settings already contain the provided values.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Settings will be changed.'
ret['changes'] = ret_settings
return ret
__salt__['win_iis.set_webapp_settings'](name=name, site=site,
settings=settings)
new_settings = __salt__['win_iis.get_webapp_settings'](name=name, site=site, settings=settings.keys())
for setting in settings:
if str(settings[setting]) != str(new_settings[setting]):
ret_settings['failures'][setting] = {'old': current_settings[setting],
'new': new_settings[setting]}
ret_settings['changes'].pop(setting, None)
if ret_settings['failures']:
ret['comment'] = 'Some settings failed to change.'
ret['changes'] = ret_settings
ret['result'] = False
else:
ret['comment'] = 'Set settings to contain the provided values.'
ret['changes'] = ret_settings['changes']
ret['result'] = True
return ret | [
"def",
"set_app",
"(",
"name",
",",
"site",
",",
"settings",
"=",
"None",
")",
":",
"# pylint: disable=anomalous-backslash-in-string",
"# pylint: enable=anomalous-backslash-in-string",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"str",
"(",
")",
",",
"'result'",
":",
"None",
"}",
"if",
"not",
"settings",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'No settings to change provided.'",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"return",
"ret",
"ret_settings",
"=",
"{",
"'changes'",
":",
"{",
"}",
",",
"'failures'",
":",
"{",
"}",
",",
"}",
"current_settings",
"=",
"__salt__",
"[",
"'win_iis.get_webapp_settings'",
"]",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
",",
"settings",
"=",
"settings",
".",
"keys",
"(",
")",
")",
"for",
"setting",
"in",
"settings",
":",
"if",
"str",
"(",
"settings",
"[",
"setting",
"]",
")",
"!=",
"str",
"(",
"current_settings",
"[",
"setting",
"]",
")",
":",
"ret_settings",
"[",
"'changes'",
"]",
"[",
"setting",
"]",
"=",
"{",
"'old'",
":",
"current_settings",
"[",
"setting",
"]",
",",
"'new'",
":",
"settings",
"[",
"setting",
"]",
"}",
"if",
"not",
"ret_settings",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Settings already contain the provided values.'",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"return",
"ret",
"elif",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Settings will be changed.'",
"ret",
"[",
"'changes'",
"]",
"=",
"ret_settings",
"return",
"ret",
"__salt__",
"[",
"'win_iis.set_webapp_settings'",
"]",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
",",
"settings",
"=",
"settings",
")",
"new_settings",
"=",
"__salt__",
"[",
"'win_iis.get_webapp_settings'",
"]",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"site",
",",
"settings",
"=",
"settings",
".",
"keys",
"(",
")",
")",
"for",
"setting",
"in",
"settings",
":",
"if",
"str",
"(",
"settings",
"[",
"setting",
"]",
")",
"!=",
"str",
"(",
"new_settings",
"[",
"setting",
"]",
")",
":",
"ret_settings",
"[",
"'failures'",
"]",
"[",
"setting",
"]",
"=",
"{",
"'old'",
":",
"current_settings",
"[",
"setting",
"]",
",",
"'new'",
":",
"new_settings",
"[",
"setting",
"]",
"}",
"ret_settings",
"[",
"'changes'",
"]",
".",
"pop",
"(",
"setting",
",",
"None",
")",
"if",
"ret_settings",
"[",
"'failures'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Some settings failed to change.'",
"ret",
"[",
"'changes'",
"]",
"=",
"ret_settings",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Set settings to contain the provided values.'",
"ret",
"[",
"'changes'",
"]",
"=",
"ret_settings",
"[",
"'changes'",
"]",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"return",
"ret"
] | .. versionadded:: 2017.7.0
Set the value of the setting for an IIS web application.
.. note::
This function only configures existing app. Params are case sensitive.
:param str name: The IIS application.
:param str site: The IIS site name.
:param str settings: A dictionary of the setting names and their values.
Available settings:
- ``physicalPath`` - The physical path of the webapp
- ``applicationPool`` - The application pool for the webapp
- ``userName`` "connectAs" user
- ``password`` "connectAs" password for user
:rtype: bool
Example of usage:
.. code-block:: yaml
site0-webapp-setting:
win_iis.set_app:
- name: app0
- site: Default Web Site
- settings:
userName: domain\\user
password: pass
physicalPath: c:\inetpub\wwwroot
applicationPool: appPool0 | [
"..",
"versionadded",
"::",
"2017",
".",
"7",
".",
"0"
] | python | train |
authomatic/authomatic | authomatic/providers/oauth2.py | https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/providers/oauth2.py#L115-L215 | def create_request_elements(
cls, request_type, credentials, url, method='GET', params=None,
headers=None, body='', secret=None, redirect_uri='', scope='',
csrf='', user_state=''
):
"""
Creates |oauth2| request elements.
"""
headers = headers or {}
params = params or {}
consumer_key = credentials.consumer_key or ''
consumer_secret = credentials.consumer_secret or ''
token = credentials.token or ''
refresh_token = credentials.refresh_token or credentials.token or ''
# Separate url base and query parameters.
url, base_params = cls._split_url(url)
# Add params extracted from URL.
params.update(dict(base_params))
if request_type == cls.USER_AUTHORIZATION_REQUEST_TYPE:
# User authorization request.
# TODO: Raise error for specific message for each missing argument.
if consumer_key and redirect_uri and (
csrf or not cls.supports_csrf_protection):
params['client_id'] = consumer_key
params['redirect_uri'] = redirect_uri
params['scope'] = scope
if cls.supports_user_state:
params['state'] = base64.urlsafe_b64encode(
json.dumps(
{"csrf": csrf, "user_state": user_state}
).encode('utf-8')
)
else:
params['state'] = csrf
params['response_type'] = 'code'
# Add authorization header
headers.update(cls._authorization_header(credentials))
else:
raise OAuth2Error(
'Credentials with valid consumer_key and arguments '
'redirect_uri, scope and state are required to create '
'OAuth 2.0 user authorization request elements!')
elif request_type == cls.ACCESS_TOKEN_REQUEST_TYPE:
# Access token request.
if consumer_key and consumer_secret:
params['code'] = token
params['client_id'] = consumer_key
params['client_secret'] = consumer_secret
params['redirect_uri'] = redirect_uri
params['grant_type'] = 'authorization_code'
# TODO: Check whether all providers accept it
headers.update(cls._authorization_header(credentials))
else:
raise OAuth2Error(
'Credentials with valid token, consumer_key, '
'consumer_secret and argument redirect_uri are required '
'to create OAuth 2.0 access token request elements!')
elif request_type == cls.REFRESH_TOKEN_REQUEST_TYPE:
# Refresh access token request.
if refresh_token and consumer_key and consumer_secret:
params['refresh_token'] = refresh_token
params['client_id'] = consumer_key
params['client_secret'] = consumer_secret
params['grant_type'] = 'refresh_token'
else:
raise OAuth2Error(
'Credentials with valid refresh_token, consumer_key, '
'consumer_secret are required to create OAuth 2.0 '
'refresh token request elements!')
elif request_type == cls.PROTECTED_RESOURCE_REQUEST_TYPE:
# Protected resource request.
# Add Authorization header. See:
# http://tools.ietf.org/html/rfc6749#section-7.1
if credentials.token_type == cls.BEARER:
# http://tools.ietf.org/html/rfc6750#section-2.1
headers.update(
{'Authorization': 'Bearer {0}'.format(credentials.token)})
elif token:
params['access_token'] = token
else:
raise OAuth2Error(
'Credentials with valid token are required to create '
'OAuth 2.0 protected resources request elements!')
request_elements = core.RequestElements(
url, method, params, headers, body)
return cls._x_request_elements_filter(
request_type, request_elements, credentials) | [
"def",
"create_request_elements",
"(",
"cls",
",",
"request_type",
",",
"credentials",
",",
"url",
",",
"method",
"=",
"'GET'",
",",
"params",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"body",
"=",
"''",
",",
"secret",
"=",
"None",
",",
"redirect_uri",
"=",
"''",
",",
"scope",
"=",
"''",
",",
"csrf",
"=",
"''",
",",
"user_state",
"=",
"''",
")",
":",
"headers",
"=",
"headers",
"or",
"{",
"}",
"params",
"=",
"params",
"or",
"{",
"}",
"consumer_key",
"=",
"credentials",
".",
"consumer_key",
"or",
"''",
"consumer_secret",
"=",
"credentials",
".",
"consumer_secret",
"or",
"''",
"token",
"=",
"credentials",
".",
"token",
"or",
"''",
"refresh_token",
"=",
"credentials",
".",
"refresh_token",
"or",
"credentials",
".",
"token",
"or",
"''",
"# Separate url base and query parameters.",
"url",
",",
"base_params",
"=",
"cls",
".",
"_split_url",
"(",
"url",
")",
"# Add params extracted from URL.",
"params",
".",
"update",
"(",
"dict",
"(",
"base_params",
")",
")",
"if",
"request_type",
"==",
"cls",
".",
"USER_AUTHORIZATION_REQUEST_TYPE",
":",
"# User authorization request.",
"# TODO: Raise error for specific message for each missing argument.",
"if",
"consumer_key",
"and",
"redirect_uri",
"and",
"(",
"csrf",
"or",
"not",
"cls",
".",
"supports_csrf_protection",
")",
":",
"params",
"[",
"'client_id'",
"]",
"=",
"consumer_key",
"params",
"[",
"'redirect_uri'",
"]",
"=",
"redirect_uri",
"params",
"[",
"'scope'",
"]",
"=",
"scope",
"if",
"cls",
".",
"supports_user_state",
":",
"params",
"[",
"'state'",
"]",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"json",
".",
"dumps",
"(",
"{",
"\"csrf\"",
":",
"csrf",
",",
"\"user_state\"",
":",
"user_state",
"}",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"params",
"[",
"'state'",
"]",
"=",
"csrf",
"params",
"[",
"'response_type'",
"]",
"=",
"'code'",
"# Add authorization header",
"headers",
".",
"update",
"(",
"cls",
".",
"_authorization_header",
"(",
"credentials",
")",
")",
"else",
":",
"raise",
"OAuth2Error",
"(",
"'Credentials with valid consumer_key and arguments '",
"'redirect_uri, scope and state are required to create '",
"'OAuth 2.0 user authorization request elements!'",
")",
"elif",
"request_type",
"==",
"cls",
".",
"ACCESS_TOKEN_REQUEST_TYPE",
":",
"# Access token request.",
"if",
"consumer_key",
"and",
"consumer_secret",
":",
"params",
"[",
"'code'",
"]",
"=",
"token",
"params",
"[",
"'client_id'",
"]",
"=",
"consumer_key",
"params",
"[",
"'client_secret'",
"]",
"=",
"consumer_secret",
"params",
"[",
"'redirect_uri'",
"]",
"=",
"redirect_uri",
"params",
"[",
"'grant_type'",
"]",
"=",
"'authorization_code'",
"# TODO: Check whether all providers accept it",
"headers",
".",
"update",
"(",
"cls",
".",
"_authorization_header",
"(",
"credentials",
")",
")",
"else",
":",
"raise",
"OAuth2Error",
"(",
"'Credentials with valid token, consumer_key, '",
"'consumer_secret and argument redirect_uri are required '",
"'to create OAuth 2.0 access token request elements!'",
")",
"elif",
"request_type",
"==",
"cls",
".",
"REFRESH_TOKEN_REQUEST_TYPE",
":",
"# Refresh access token request.",
"if",
"refresh_token",
"and",
"consumer_key",
"and",
"consumer_secret",
":",
"params",
"[",
"'refresh_token'",
"]",
"=",
"refresh_token",
"params",
"[",
"'client_id'",
"]",
"=",
"consumer_key",
"params",
"[",
"'client_secret'",
"]",
"=",
"consumer_secret",
"params",
"[",
"'grant_type'",
"]",
"=",
"'refresh_token'",
"else",
":",
"raise",
"OAuth2Error",
"(",
"'Credentials with valid refresh_token, consumer_key, '",
"'consumer_secret are required to create OAuth 2.0 '",
"'refresh token request elements!'",
")",
"elif",
"request_type",
"==",
"cls",
".",
"PROTECTED_RESOURCE_REQUEST_TYPE",
":",
"# Protected resource request.",
"# Add Authorization header. See:",
"# http://tools.ietf.org/html/rfc6749#section-7.1",
"if",
"credentials",
".",
"token_type",
"==",
"cls",
".",
"BEARER",
":",
"# http://tools.ietf.org/html/rfc6750#section-2.1",
"headers",
".",
"update",
"(",
"{",
"'Authorization'",
":",
"'Bearer {0}'",
".",
"format",
"(",
"credentials",
".",
"token",
")",
"}",
")",
"elif",
"token",
":",
"params",
"[",
"'access_token'",
"]",
"=",
"token",
"else",
":",
"raise",
"OAuth2Error",
"(",
"'Credentials with valid token are required to create '",
"'OAuth 2.0 protected resources request elements!'",
")",
"request_elements",
"=",
"core",
".",
"RequestElements",
"(",
"url",
",",
"method",
",",
"params",
",",
"headers",
",",
"body",
")",
"return",
"cls",
".",
"_x_request_elements_filter",
"(",
"request_type",
",",
"request_elements",
",",
"credentials",
")"
] | Creates |oauth2| request elements. | [
"Creates",
"|oauth2|",
"request",
"elements",
"."
] | python | test |
bitesofcode/projexui | projexui/widgets/xviewwidget/xview.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xview.py#L492-L502 | def setMaximumWidth(self, width):
"""
Sets the maximum width value to the inputed width and emits the \
sizeConstraintChanged signal.
:param width | <int>
"""
super(XView, self).setMaximumWidth(width)
if ( not self.signalsBlocked() ):
self.sizeConstraintChanged.emit() | [
"def",
"setMaximumWidth",
"(",
"self",
",",
"width",
")",
":",
"super",
"(",
"XView",
",",
"self",
")",
".",
"setMaximumWidth",
"(",
"width",
")",
"if",
"(",
"not",
"self",
".",
"signalsBlocked",
"(",
")",
")",
":",
"self",
".",
"sizeConstraintChanged",
".",
"emit",
"(",
")"
] | Sets the maximum width value to the inputed width and emits the \
sizeConstraintChanged signal.
:param width | <int> | [
"Sets",
"the",
"maximum",
"width",
"value",
"to",
"the",
"inputed",
"width",
"and",
"emits",
"the",
"\\",
"sizeConstraintChanged",
"signal",
".",
":",
"param",
"width",
"|",
"<int",
">"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/parallel/controller/scheduler.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/scheduler.py#L477-L531 | def maybe_run(self, job):
"""check location dependencies, and run if they are met."""
msg_id = job.msg_id
self.log.debug("Attempting to assign task %s", msg_id)
if not self.targets:
# no engines, definitely can't run
return False
if job.follow or job.targets or job.blacklist or self.hwm:
# we need a can_run filter
def can_run(idx):
# check hwm
if self.hwm and self.loads[idx] == self.hwm:
return False
target = self.targets[idx]
# check blacklist
if target in job.blacklist:
return False
# check targets
if job.targets and target not in job.targets:
return False
# check follow
return job.follow.check(self.completed[target], self.failed[target])
indices = filter(can_run, range(len(self.targets)))
if not indices:
# couldn't run
if job.follow.all:
# check follow for impossibility
dests = set()
relevant = set()
if job.follow.success:
relevant = self.all_completed
if job.follow.failure:
relevant = relevant.union(self.all_failed)
for m in job.follow.intersection(relevant):
dests.add(self.destinations[m])
if len(dests) > 1:
self.depending[msg_id] = job
self.fail_unreachable(msg_id)
return False
if job.targets:
# check blacklist+targets for impossibility
job.targets.difference_update(job.blacklist)
if not job.targets or not job.targets.intersection(self.targets):
self.depending[msg_id] = job
self.fail_unreachable(msg_id)
return False
return False
else:
indices = None
self.submit_task(job, indices)
return True | [
"def",
"maybe_run",
"(",
"self",
",",
"job",
")",
":",
"msg_id",
"=",
"job",
".",
"msg_id",
"self",
".",
"log",
".",
"debug",
"(",
"\"Attempting to assign task %s\"",
",",
"msg_id",
")",
"if",
"not",
"self",
".",
"targets",
":",
"# no engines, definitely can't run",
"return",
"False",
"if",
"job",
".",
"follow",
"or",
"job",
".",
"targets",
"or",
"job",
".",
"blacklist",
"or",
"self",
".",
"hwm",
":",
"# we need a can_run filter",
"def",
"can_run",
"(",
"idx",
")",
":",
"# check hwm",
"if",
"self",
".",
"hwm",
"and",
"self",
".",
"loads",
"[",
"idx",
"]",
"==",
"self",
".",
"hwm",
":",
"return",
"False",
"target",
"=",
"self",
".",
"targets",
"[",
"idx",
"]",
"# check blacklist",
"if",
"target",
"in",
"job",
".",
"blacklist",
":",
"return",
"False",
"# check targets",
"if",
"job",
".",
"targets",
"and",
"target",
"not",
"in",
"job",
".",
"targets",
":",
"return",
"False",
"# check follow",
"return",
"job",
".",
"follow",
".",
"check",
"(",
"self",
".",
"completed",
"[",
"target",
"]",
",",
"self",
".",
"failed",
"[",
"target",
"]",
")",
"indices",
"=",
"filter",
"(",
"can_run",
",",
"range",
"(",
"len",
"(",
"self",
".",
"targets",
")",
")",
")",
"if",
"not",
"indices",
":",
"# couldn't run",
"if",
"job",
".",
"follow",
".",
"all",
":",
"# check follow for impossibility",
"dests",
"=",
"set",
"(",
")",
"relevant",
"=",
"set",
"(",
")",
"if",
"job",
".",
"follow",
".",
"success",
":",
"relevant",
"=",
"self",
".",
"all_completed",
"if",
"job",
".",
"follow",
".",
"failure",
":",
"relevant",
"=",
"relevant",
".",
"union",
"(",
"self",
".",
"all_failed",
")",
"for",
"m",
"in",
"job",
".",
"follow",
".",
"intersection",
"(",
"relevant",
")",
":",
"dests",
".",
"add",
"(",
"self",
".",
"destinations",
"[",
"m",
"]",
")",
"if",
"len",
"(",
"dests",
")",
">",
"1",
":",
"self",
".",
"depending",
"[",
"msg_id",
"]",
"=",
"job",
"self",
".",
"fail_unreachable",
"(",
"msg_id",
")",
"return",
"False",
"if",
"job",
".",
"targets",
":",
"# check blacklist+targets for impossibility",
"job",
".",
"targets",
".",
"difference_update",
"(",
"job",
".",
"blacklist",
")",
"if",
"not",
"job",
".",
"targets",
"or",
"not",
"job",
".",
"targets",
".",
"intersection",
"(",
"self",
".",
"targets",
")",
":",
"self",
".",
"depending",
"[",
"msg_id",
"]",
"=",
"job",
"self",
".",
"fail_unreachable",
"(",
"msg_id",
")",
"return",
"False",
"return",
"False",
"else",
":",
"indices",
"=",
"None",
"self",
".",
"submit_task",
"(",
"job",
",",
"indices",
")",
"return",
"True"
] | check location dependencies, and run if they are met. | [
"check",
"location",
"dependencies",
"and",
"run",
"if",
"they",
"are",
"met",
"."
] | python | test |
wmayner/pyphi | pyphi/actual.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/actual.py#L366-L383 | def potential_purviews(self, direction, mechanism, purviews=False):
"""Return all purviews that could belong to the |MIC|/|MIE|.
Filters out trivially-reducible purviews.
Args:
direction (str): Either |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The mechanism of interest.
Keyword Args:
purviews (tuple[int]): Optional subset of purviews of interest.
"""
system = self.system[direction]
return [
purview for purview in system.potential_purviews(
direction, mechanism, purviews)
if set(purview).issubset(self.purview_indices(direction))
] | [
"def",
"potential_purviews",
"(",
"self",
",",
"direction",
",",
"mechanism",
",",
"purviews",
"=",
"False",
")",
":",
"system",
"=",
"self",
".",
"system",
"[",
"direction",
"]",
"return",
"[",
"purview",
"for",
"purview",
"in",
"system",
".",
"potential_purviews",
"(",
"direction",
",",
"mechanism",
",",
"purviews",
")",
"if",
"set",
"(",
"purview",
")",
".",
"issubset",
"(",
"self",
".",
"purview_indices",
"(",
"direction",
")",
")",
"]"
] | Return all purviews that could belong to the |MIC|/|MIE|.
Filters out trivially-reducible purviews.
Args:
direction (str): Either |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The mechanism of interest.
Keyword Args:
purviews (tuple[int]): Optional subset of purviews of interest. | [
"Return",
"all",
"purviews",
"that",
"could",
"belong",
"to",
"the",
"|MIC|",
"/",
"|MIE|",
"."
] | python | train |
saltstack/salt | salt/modules/win_file.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_file.py#L1110-L1161 | def symlink(src, link):
'''
Create a symbolic link to a file
This is only supported with Windows Vista or later and must be executed by
a user with the SeCreateSymbolicLink privilege.
The behavior of this function matches the Unix equivalent, with one
exception - invalid symlinks cannot be created. The source path must exist.
If it doesn't, an error will be raised.
Args:
src (str): The path to a file or directory
link (str): The path to the link
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link
'''
# When Python 3.2 or later becomes the minimum version, this function can be
# replaced with the built-in os.symlink function, which supports Windows.
if sys.getwindowsversion().major < 6:
raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')
if not os.path.exists(src):
raise SaltInvocationError('The given source path does not exist.')
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
# ensure paths are using the right slashes
src = os.path.normpath(src)
link = os.path.normpath(link)
is_dir = os.path.isdir(src)
try:
win32file.CreateSymbolicLink(link, src, int(is_dir))
return True
except pywinerror as exc:
raise CommandExecutionError(
'Could not create \'{0}\' - [{1}] {2}'.format(
link,
exc.winerror,
exc.strerror
)
) | [
"def",
"symlink",
"(",
"src",
",",
"link",
")",
":",
"# When Python 3.2 or later becomes the minimum version, this function can be",
"# replaced with the built-in os.symlink function, which supports Windows.",
"if",
"sys",
".",
"getwindowsversion",
"(",
")",
".",
"major",
"<",
"6",
":",
"raise",
"SaltInvocationError",
"(",
"'Symlinks are only supported on Windows Vista or later.'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"src",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'The given source path does not exist.'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"src",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'File path must be absolute.'",
")",
"# ensure paths are using the right slashes",
"src",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"src",
")",
"link",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"link",
")",
"is_dir",
"=",
"os",
".",
"path",
".",
"isdir",
"(",
"src",
")",
"try",
":",
"win32file",
".",
"CreateSymbolicLink",
"(",
"link",
",",
"src",
",",
"int",
"(",
"is_dir",
")",
")",
"return",
"True",
"except",
"pywinerror",
"as",
"exc",
":",
"raise",
"CommandExecutionError",
"(",
"'Could not create \\'{0}\\' - [{1}] {2}'",
".",
"format",
"(",
"link",
",",
"exc",
".",
"winerror",
",",
"exc",
".",
"strerror",
")",
")"
] | Create a symbolic link to a file
This is only supported with Windows Vista or later and must be executed by
a user with the SeCreateSymbolicLink privilege.
The behavior of this function matches the Unix equivalent, with one
exception - invalid symlinks cannot be created. The source path must exist.
If it doesn't, an error will be raised.
Args:
src (str): The path to a file or directory
link (str): The path to the link
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link | [
"Create",
"a",
"symbolic",
"link",
"to",
"a",
"file"
] | python | train |
Chilipp/docrep | docrep/__init__.py | https://github.com/Chilipp/docrep/blob/637971f76e1a6e1c70e36dcd1b02bbc37ba02487/docrep/__init__.py#L641-L727 | def keep_params(self, base_key, *params):
"""
Method to keep only specific parameters from a parameter documentation.
This method extracts the given `param` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation with only the description of the param. This method works
for ``'Parameters'`` like sections.
The new docstring with the selected parts will be accessible as
``base_key + '.' + '|'.join(params)``, e.g.
``'original_key.param1|param2'``
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
``*params``
str. Parameter identifier of which the documentations shall be
in the new section
See Also
--------
keep_types, delete_params
Examples
--------
To extract just two parameters from a function and reuse their
docstrings, you can type::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor()
>>> @d.get_sectionsf('do_something')
... def do_something(a=1, b=2, c=3):
... '''
... That's %(doc_key)s
...
... Parameters
... ----------
... a: int, optional
... A dummy parameter description
... b: int, optional
... A second dummy parameter that will be excluded
... c: float, optional
... A third parameter'''
... print(a)
>>> d.keep_params('do_something.parameters', 'a', 'c')
>>> @d.dedent
... def do_less(a=1, c=4):
... '''
... My second function with only `a` and `c`
...
... Parameters
... ----------
... %(do_something.parameters.a|c)s'''
... pass
>>> print(do_less.__doc__)
My second function with only `a` and `c`
<BLANKLINE>
Parameters
----------
a: int, optional
A dummy parameter description
c: float, optional
A third parameter
Equivalently, you can use the :meth:`delete_params` method to remove
parameters::
>>> d.delete_params('do_something.parameters', 'b')
>>> @d.dedent
... def do_less(a=1, c=4):
... '''
... My second function with only `a` and `c`
...
... Parameters
... ----------
... %(do_something.parameters.no_b)s'''
... pass
"""
self.params[base_key + '.' + '|'.join(params)] = self.keep_params_s(
self.params[base_key], params) | [
"def",
"keep_params",
"(",
"self",
",",
"base_key",
",",
"*",
"params",
")",
":",
"self",
".",
"params",
"[",
"base_key",
"+",
"'.'",
"+",
"'|'",
".",
"join",
"(",
"params",
")",
"]",
"=",
"self",
".",
"keep_params_s",
"(",
"self",
".",
"params",
"[",
"base_key",
"]",
",",
"params",
")"
] | Method to keep only specific parameters from a parameter documentation.
This method extracts the given `param` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation with only the description of the param. This method works
for ``'Parameters'`` like sections.
The new docstring with the selected parts will be accessible as
``base_key + '.' + '|'.join(params)``, e.g.
``'original_key.param1|param2'``
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
``*params``
str. Parameter identifier of which the documentations shall be
in the new section
See Also
--------
keep_types, delete_params
Examples
--------
To extract just two parameters from a function and reuse their
docstrings, you can type::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor()
>>> @d.get_sectionsf('do_something')
... def do_something(a=1, b=2, c=3):
... '''
... That's %(doc_key)s
...
... Parameters
... ----------
... a: int, optional
... A dummy parameter description
... b: int, optional
... A second dummy parameter that will be excluded
... c: float, optional
... A third parameter'''
... print(a)
>>> d.keep_params('do_something.parameters', 'a', 'c')
>>> @d.dedent
... def do_less(a=1, c=4):
... '''
... My second function with only `a` and `c`
...
... Parameters
... ----------
... %(do_something.parameters.a|c)s'''
... pass
>>> print(do_less.__doc__)
My second function with only `a` and `c`
<BLANKLINE>
Parameters
----------
a: int, optional
A dummy parameter description
c: float, optional
A third parameter
Equivalently, you can use the :meth:`delete_params` method to remove
parameters::
>>> d.delete_params('do_something.parameters', 'b')
>>> @d.dedent
... def do_less(a=1, c=4):
... '''
... My second function with only `a` and `c`
...
... Parameters
... ----------
... %(do_something.parameters.no_b)s'''
... pass | [
"Method",
"to",
"keep",
"only",
"specific",
"parameters",
"from",
"a",
"parameter",
"documentation",
"."
] | python | train |
MagicStack/asyncpg | asyncpg/connresource.py | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connresource.py#L14-L22 | def guarded(meth):
"""A decorator to add a sanity check to ConnectionResource methods."""
@functools.wraps(meth)
def _check(self, *args, **kwargs):
self._check_conn_validity(meth.__name__)
return meth(self, *args, **kwargs)
return _check | [
"def",
"guarded",
"(",
"meth",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"meth",
")",
"def",
"_check",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_conn_validity",
"(",
"meth",
".",
"__name__",
")",
"return",
"meth",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_check"
] | A decorator to add a sanity check to ConnectionResource methods. | [
"A",
"decorator",
"to",
"add",
"a",
"sanity",
"check",
"to",
"ConnectionResource",
"methods",
"."
] | python | train |
pylp/pylp | pylp/utils/paths.py | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/utils/paths.py#L13-L19 | def make_readable_path(path):
"""Make a path more "readable"""
home = os.path.expanduser("~")
if path.startswith(home):
path = "~" + path[len(home):]
return path | [
"def",
"make_readable_path",
"(",
"path",
")",
":",
"home",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
"if",
"path",
".",
"startswith",
"(",
"home",
")",
":",
"path",
"=",
"\"~\"",
"+",
"path",
"[",
"len",
"(",
"home",
")",
":",
"]",
"return",
"path"
] | Make a path more "readable | [
"Make",
"a",
"path",
"more",
"readable"
] | python | train |
genialis/resolwe | resolwe/flow/models/utils.py | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L235-L292 | def _hydrate_values(output, output_schema, data):
"""Hydrate basic:file and basic:json values.
Find fields with basic:file type and assign a full path to the file.
Find fields with basic:json type and assign a JSON object from storage.
"""
def hydrate_path(file_name):
"""Hydrate file paths."""
from resolwe.flow.managers import manager
class HydratedPath(str):
"""String wrapper, which also stores the original filename."""
__slots__ = ('data_id', 'file_name')
def __new__(cls, value=''):
"""Initialize hydrated path."""
hydrated = str.__new__(cls, value)
hydrated.data_id = data.id
hydrated.file_name = file_name
return hydrated
return HydratedPath(manager.get_executor().resolve_data_path(data, file_name))
def hydrate_storage(storage_id):
"""Hydrate storage fields."""
from .storage import LazyStorageJSON # Prevent circular import.
return LazyStorageJSON(pk=storage_id)
for field_schema, fields in iterate_fields(output, output_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('basic:file:'):
value['file'] = hydrate_path(value['file'])
value['refs'] = [hydrate_path(ref) for ref in value.get('refs', [])]
elif field_schema['type'].startswith('list:basic:file:'):
for obj in value:
obj['file'] = hydrate_path(obj['file'])
obj['refs'] = [hydrate_path(ref) for ref in obj.get('refs', [])]
if field_schema['type'].startswith('basic:dir:'):
value['dir'] = hydrate_path(value['dir'])
value['refs'] = [hydrate_path(ref) for ref in value.get('refs', [])]
elif field_schema['type'].startswith('list:basic:dir:'):
for obj in value:
obj['dir'] = hydrate_path(obj['dir'])
obj['refs'] = [hydrate_path(ref) for ref in obj.get('refs', [])]
elif field_schema['type'].startswith('basic:json:'):
fields[name] = hydrate_storage(value)
elif field_schema['type'].startswith('list:basic:json:'):
fields[name] = [hydrate_storage(storage_id) for storage_id in value] | [
"def",
"_hydrate_values",
"(",
"output",
",",
"output_schema",
",",
"data",
")",
":",
"def",
"hydrate_path",
"(",
"file_name",
")",
":",
"\"\"\"Hydrate file paths.\"\"\"",
"from",
"resolwe",
".",
"flow",
".",
"managers",
"import",
"manager",
"class",
"HydratedPath",
"(",
"str",
")",
":",
"\"\"\"String wrapper, which also stores the original filename.\"\"\"",
"__slots__",
"=",
"(",
"'data_id'",
",",
"'file_name'",
")",
"def",
"__new__",
"(",
"cls",
",",
"value",
"=",
"''",
")",
":",
"\"\"\"Initialize hydrated path.\"\"\"",
"hydrated",
"=",
"str",
".",
"__new__",
"(",
"cls",
",",
"value",
")",
"hydrated",
".",
"data_id",
"=",
"data",
".",
"id",
"hydrated",
".",
"file_name",
"=",
"file_name",
"return",
"hydrated",
"return",
"HydratedPath",
"(",
"manager",
".",
"get_executor",
"(",
")",
".",
"resolve_data_path",
"(",
"data",
",",
"file_name",
")",
")",
"def",
"hydrate_storage",
"(",
"storage_id",
")",
":",
"\"\"\"Hydrate storage fields.\"\"\"",
"from",
".",
"storage",
"import",
"LazyStorageJSON",
"# Prevent circular import.",
"return",
"LazyStorageJSON",
"(",
"pk",
"=",
"storage_id",
")",
"for",
"field_schema",
",",
"fields",
"in",
"iterate_fields",
"(",
"output",
",",
"output_schema",
")",
":",
"name",
"=",
"field_schema",
"[",
"'name'",
"]",
"value",
"=",
"fields",
"[",
"name",
"]",
"if",
"'type'",
"in",
"field_schema",
":",
"if",
"field_schema",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'basic:file:'",
")",
":",
"value",
"[",
"'file'",
"]",
"=",
"hydrate_path",
"(",
"value",
"[",
"'file'",
"]",
")",
"value",
"[",
"'refs'",
"]",
"=",
"[",
"hydrate_path",
"(",
"ref",
")",
"for",
"ref",
"in",
"value",
".",
"get",
"(",
"'refs'",
",",
"[",
"]",
")",
"]",
"elif",
"field_schema",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'list:basic:file:'",
")",
":",
"for",
"obj",
"in",
"value",
":",
"obj",
"[",
"'file'",
"]",
"=",
"hydrate_path",
"(",
"obj",
"[",
"'file'",
"]",
")",
"obj",
"[",
"'refs'",
"]",
"=",
"[",
"hydrate_path",
"(",
"ref",
")",
"for",
"ref",
"in",
"obj",
".",
"get",
"(",
"'refs'",
",",
"[",
"]",
")",
"]",
"if",
"field_schema",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'basic:dir:'",
")",
":",
"value",
"[",
"'dir'",
"]",
"=",
"hydrate_path",
"(",
"value",
"[",
"'dir'",
"]",
")",
"value",
"[",
"'refs'",
"]",
"=",
"[",
"hydrate_path",
"(",
"ref",
")",
"for",
"ref",
"in",
"value",
".",
"get",
"(",
"'refs'",
",",
"[",
"]",
")",
"]",
"elif",
"field_schema",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'list:basic:dir:'",
")",
":",
"for",
"obj",
"in",
"value",
":",
"obj",
"[",
"'dir'",
"]",
"=",
"hydrate_path",
"(",
"obj",
"[",
"'dir'",
"]",
")",
"obj",
"[",
"'refs'",
"]",
"=",
"[",
"hydrate_path",
"(",
"ref",
")",
"for",
"ref",
"in",
"obj",
".",
"get",
"(",
"'refs'",
",",
"[",
"]",
")",
"]",
"elif",
"field_schema",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'basic:json:'",
")",
":",
"fields",
"[",
"name",
"]",
"=",
"hydrate_storage",
"(",
"value",
")",
"elif",
"field_schema",
"[",
"'type'",
"]",
".",
"startswith",
"(",
"'list:basic:json:'",
")",
":",
"fields",
"[",
"name",
"]",
"=",
"[",
"hydrate_storage",
"(",
"storage_id",
")",
"for",
"storage_id",
"in",
"value",
"]"
] | Hydrate basic:file and basic:json values.
Find fields with basic:file type and assign a full path to the file.
Find fields with basic:json type and assign a JSON object from storage. | [
"Hydrate",
"basic",
":",
"file",
"and",
"basic",
":",
"json",
"values",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/resource/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/objects.py#L192-L202 | def get_group_metadata(self):
"""Gets the metadata for a group.
return: (osid.Metadata) - metadata for the group
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['group'])
metadata.update({'existing_boolean_values': self._my_map['group']})
return Metadata(**metadata) | [
"def",
"get_group_metadata",
"(",
"self",
")",
":",
"# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template",
"metadata",
"=",
"dict",
"(",
"self",
".",
"_mdata",
"[",
"'group'",
"]",
")",
"metadata",
".",
"update",
"(",
"{",
"'existing_boolean_values'",
":",
"self",
".",
"_my_map",
"[",
"'group'",
"]",
"}",
")",
"return",
"Metadata",
"(",
"*",
"*",
"metadata",
")"
] | Gets the metadata for a group.
return: (osid.Metadata) - metadata for the group
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"metadata",
"for",
"a",
"group",
"."
] | python | train |
jobovy/galpy | galpy/potential/MovingObjectPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/MovingObjectPotential.py#L63-L83 | def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z, phi
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi)
HISTORY:
2011-04-10 - Started - Bovy (NYU)
2018-10-18 - Updated for general object potential - James Lane (UofT)
"""
#Cylindrical distance
Rdist = _cylR(R,phi,self._orb.R(t),self._orb.phi(t))
#Evaluate potential
return evaluatePotentials( self._pot, Rdist, self._orb.z(t)-z, use_physical=False) | [
"def",
"_evaluate",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"#Cylindrical distance",
"Rdist",
"=",
"_cylR",
"(",
"R",
",",
"phi",
",",
"self",
".",
"_orb",
".",
"R",
"(",
"t",
")",
",",
"self",
".",
"_orb",
".",
"phi",
"(",
"t",
")",
")",
"#Evaluate potential",
"return",
"evaluatePotentials",
"(",
"self",
".",
"_pot",
",",
"Rdist",
",",
"self",
".",
"_orb",
".",
"z",
"(",
"t",
")",
"-",
"z",
",",
"use_physical",
"=",
"False",
")"
] | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z, phi
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi)
HISTORY:
2011-04-10 - Started - Bovy (NYU)
2018-10-18 - Updated for general object potential - James Lane (UofT) | [
"NAME",
":",
"_evaluate",
"PURPOSE",
":",
"evaluate",
"the",
"potential",
"at",
"R",
"z",
"phi",
"INPUT",
":",
"R",
"-",
"Galactocentric",
"cylindrical",
"radius",
"z",
"-",
"vertical",
"height",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"Phi",
"(",
"R",
"z",
"phi",
")",
"HISTORY",
":",
"2011",
"-",
"04",
"-",
"10",
"-",
"Started",
"-",
"Bovy",
"(",
"NYU",
")",
"2018",
"-",
"10",
"-",
"18",
"-",
"Updated",
"for",
"general",
"object",
"potential",
"-",
"James",
"Lane",
"(",
"UofT",
")"
] | python | train |
aaugustin/websockets | src/websockets/http.py | https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/http.py#L187-L204 | async def read_line(stream: asyncio.StreamReader) -> bytes:
"""
Read a single line from ``stream``.
``stream`` is an :class:`~asyncio.StreamReader`.
Return :class:`bytes` without CRLF.
"""
# Security: this is bounded by the StreamReader's limit (default = 32 KiB).
line = await stream.readline()
# Security: this guarantees header values are small (hard-coded = 4 KiB)
if len(line) > MAX_LINE:
raise ValueError("Line too long")
# Not mandatory but safe - https://tools.ietf.org/html/rfc7230#section-3.5
if not line.endswith(b"\r\n"):
raise ValueError("Line without CRLF")
return line[:-2] | [
"async",
"def",
"read_line",
"(",
"stream",
":",
"asyncio",
".",
"StreamReader",
")",
"->",
"bytes",
":",
"# Security: this is bounded by the StreamReader's limit (default = 32 KiB).",
"line",
"=",
"await",
"stream",
".",
"readline",
"(",
")",
"# Security: this guarantees header values are small (hard-coded = 4 KiB)",
"if",
"len",
"(",
"line",
")",
">",
"MAX_LINE",
":",
"raise",
"ValueError",
"(",
"\"Line too long\"",
")",
"# Not mandatory but safe - https://tools.ietf.org/html/rfc7230#section-3.5",
"if",
"not",
"line",
".",
"endswith",
"(",
"b\"\\r\\n\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Line without CRLF\"",
")",
"return",
"line",
"[",
":",
"-",
"2",
"]"
] | Read a single line from ``stream``.
``stream`` is an :class:`~asyncio.StreamReader`.
Return :class:`bytes` without CRLF. | [
"Read",
"a",
"single",
"line",
"from",
"stream",
"."
] | python | train |
MolSSI-BSE/basis_set_exchange | basis_set_exchange/cli/bse_handlers.py | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/cli/bse_handlers.py#L52-L61 | def _bse_cli_list_roles(args):
'''Handles the list-roles subcommand'''
all_roles = api.get_roles()
if args.no_description:
liststr = all_roles.keys()
else:
liststr = format_columns(all_roles.items())
return '\n'.join(liststr) | [
"def",
"_bse_cli_list_roles",
"(",
"args",
")",
":",
"all_roles",
"=",
"api",
".",
"get_roles",
"(",
")",
"if",
"args",
".",
"no_description",
":",
"liststr",
"=",
"all_roles",
".",
"keys",
"(",
")",
"else",
":",
"liststr",
"=",
"format_columns",
"(",
"all_roles",
".",
"items",
"(",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"liststr",
")"
] | Handles the list-roles subcommand | [
"Handles",
"the",
"list",
"-",
"roles",
"subcommand"
] | python | train |
IdentityPython/pysaml2 | src/saml2/mcache.py | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mcache.py#L153-L174 | def active(self, subject_id, entity_id):
""" Returns the status of assertions from a specific entity_id.
:param subject_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False depending on if the assertion is still
valid or not.
"""
try:
(timestamp, info) = self._cache.get(_key(subject_id, entity_id))
except ValueError:
return False
except TypeError:
return False
# if not info:
# return False
try:
return time_util.not_on_or_after(timestamp)
except ToOld:
return False | [
"def",
"active",
"(",
"self",
",",
"subject_id",
",",
"entity_id",
")",
":",
"try",
":",
"(",
"timestamp",
",",
"info",
")",
"=",
"self",
".",
"_cache",
".",
"get",
"(",
"_key",
"(",
"subject_id",
",",
"entity_id",
")",
")",
"except",
"ValueError",
":",
"return",
"False",
"except",
"TypeError",
":",
"return",
"False",
"# if not info:",
"# return False",
"try",
":",
"return",
"time_util",
".",
"not_on_or_after",
"(",
"timestamp",
")",
"except",
"ToOld",
":",
"return",
"False"
] | Returns the status of assertions from a specific entity_id.
:param subject_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False depending on if the assertion is still
valid or not. | [
"Returns",
"the",
"status",
"of",
"assertions",
"from",
"a",
"specific",
"entity_id",
"."
] | python | train |
nion-software/nionswift | nion/swift/model/Symbolic.py | https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/Symbolic.py#L886-L891 | def unbind(self):
"""Unlisten and close each bound item."""
for variable in self.variables:
self.__unbind_variable(variable)
for result in self.results:
self.__unbind_result(result) | [
"def",
"unbind",
"(",
"self",
")",
":",
"for",
"variable",
"in",
"self",
".",
"variables",
":",
"self",
".",
"__unbind_variable",
"(",
"variable",
")",
"for",
"result",
"in",
"self",
".",
"results",
":",
"self",
".",
"__unbind_result",
"(",
"result",
")"
] | Unlisten and close each bound item. | [
"Unlisten",
"and",
"close",
"each",
"bound",
"item",
"."
] | python | train |
swift-nav/libsbp | python/sbp/client/drivers/cdc_driver.py | https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/cdc_driver.py#L27-L48 | def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read.
"""
try:
return_val = self.handle.read(size)
if return_val == '':
print()
print("Piksi disconnected")
print()
raise IOError
return return_val
except OSError:
print()
print("Piksi disconnected")
print()
raise IOError | [
"def",
"read",
"(",
"self",
",",
"size",
")",
":",
"try",
":",
"return_val",
"=",
"self",
".",
"handle",
".",
"read",
"(",
"size",
")",
"if",
"return_val",
"==",
"''",
":",
"print",
"(",
")",
"print",
"(",
"\"Piksi disconnected\"",
")",
"print",
"(",
")",
"raise",
"IOError",
"return",
"return_val",
"except",
"OSError",
":",
"print",
"(",
")",
"print",
"(",
"\"Piksi disconnected\"",
")",
"print",
"(",
")",
"raise",
"IOError"
] | Read wrapper.
Parameters
----------
size : int
Number of bytes to read. | [
"Read",
"wrapper",
"."
] | python | train |
stephrdev/django-formwizard | formwizard/views.py | https://github.com/stephrdev/django-formwizard/blob/7b35165f0340aae4e8302d5b05b0cb443f6c9904/formwizard/views.py#L303-L325 | def render_done(self, form, **kwargs):
"""
This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form don't
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`.
"""
final_form_list = []
# walk through the form list and try to validate the data again.
for form_key in self.get_form_list():
form_obj = self.get_form(step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(form_key))
if not form_obj.is_valid():
return self.render_revalidation_failure(form_key, form_obj, **kwargs)
final_form_list.append(form_obj)
# render the done view and reset the wizard before returning the
# response. This is needed to prevent from rendering done with the
# same data twice.
done_response = self.done(final_form_list, **kwargs)
self.storage.reset()
return done_response | [
"def",
"render_done",
"(",
"self",
",",
"form",
",",
"*",
"*",
"kwargs",
")",
":",
"final_form_list",
"=",
"[",
"]",
"# walk through the form list and try to validate the data again.",
"for",
"form_key",
"in",
"self",
".",
"get_form_list",
"(",
")",
":",
"form_obj",
"=",
"self",
".",
"get_form",
"(",
"step",
"=",
"form_key",
",",
"data",
"=",
"self",
".",
"storage",
".",
"get_step_data",
"(",
"form_key",
")",
",",
"files",
"=",
"self",
".",
"storage",
".",
"get_step_files",
"(",
"form_key",
")",
")",
"if",
"not",
"form_obj",
".",
"is_valid",
"(",
")",
":",
"return",
"self",
".",
"render_revalidation_failure",
"(",
"form_key",
",",
"form_obj",
",",
"*",
"*",
"kwargs",
")",
"final_form_list",
".",
"append",
"(",
"form_obj",
")",
"# render the done view and reset the wizard before returning the",
"# response. This is needed to prevent from rendering done with the",
"# same data twice.",
"done_response",
"=",
"self",
".",
"done",
"(",
"final_form_list",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"storage",
".",
"reset",
"(",
")",
"return",
"done_response"
] | This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form don't
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`. | [
"This",
"method",
"gets",
"called",
"when",
"all",
"forms",
"passed",
".",
"The",
"method",
"should",
"also",
"re",
"-",
"validate",
"all",
"steps",
"to",
"prevent",
"manipulation",
".",
"If",
"any",
"form",
"don",
"t",
"validate",
"render_revalidation_failure",
"should",
"get",
"called",
".",
"If",
"everything",
"is",
"fine",
"call",
"done",
"."
] | python | train |
Azure/azure-cli-extensions | src/storage-preview/azext_storage_preview/_validators.py | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/storage-preview/azext_storage_preview/_validators.py#L388-L398 | def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl | [
"def",
"resource_type_type",
"(",
"loader",
")",
":",
"def",
"impl",
"(",
"string",
")",
":",
"t_resources",
"=",
"loader",
".",
"get_models",
"(",
"'common.models#ResourceTypes'",
")",
"if",
"set",
"(",
"string",
")",
"-",
"set",
"(",
"\"sco\"",
")",
":",
"raise",
"ValueError",
"return",
"t_resources",
"(",
"_str",
"=",
"''",
".",
"join",
"(",
"set",
"(",
"string",
")",
")",
")",
"return",
"impl"
] | Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. | [
"Returns",
"a",
"function",
"which",
"validates",
"that",
"resource",
"types",
"string",
"contains",
"only",
"a",
"combination",
"of",
"service",
"container",
"and",
"object",
".",
"Their",
"shorthand",
"representations",
"are",
"s",
"c",
"and",
"o",
"."
] | python | train |
softlayer/softlayer-python | SoftLayer/CLI/block/lun.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/block/lun.py#L14-L36 | def cli(env, volume_id, lun_id):
"""Set the LUN ID on an existing block storage volume.
The LUN ID only takes effect during the Host Authorization process. It is
recommended (but not necessary) to de-authorize all hosts before using this
method. See `block access-revoke`.
VOLUME_ID - the volume ID on which to set the LUN ID.
LUN_ID - recommended range is an integer between 0 and 255. Advanced users
can use an integer between 0 and 4095.
"""
block_storage_manager = SoftLayer.BlockStorageManager(env.client)
res = block_storage_manager.create_or_update_lun_id(volume_id, lun_id)
if 'value' in res and lun_id == res['value']:
click.echo(
'Block volume with id %s is reporting LUN ID %s' % (res['volumeId'], res['value']))
else:
click.echo(
'Failed to confirm the new LUN ID on volume %s' % (volume_id)) | [
"def",
"cli",
"(",
"env",
",",
"volume_id",
",",
"lun_id",
")",
":",
"block_storage_manager",
"=",
"SoftLayer",
".",
"BlockStorageManager",
"(",
"env",
".",
"client",
")",
"res",
"=",
"block_storage_manager",
".",
"create_or_update_lun_id",
"(",
"volume_id",
",",
"lun_id",
")",
"if",
"'value'",
"in",
"res",
"and",
"lun_id",
"==",
"res",
"[",
"'value'",
"]",
":",
"click",
".",
"echo",
"(",
"'Block volume with id %s is reporting LUN ID %s'",
"%",
"(",
"res",
"[",
"'volumeId'",
"]",
",",
"res",
"[",
"'value'",
"]",
")",
")",
"else",
":",
"click",
".",
"echo",
"(",
"'Failed to confirm the new LUN ID on volume %s'",
"%",
"(",
"volume_id",
")",
")"
] | Set the LUN ID on an existing block storage volume.
The LUN ID only takes effect during the Host Authorization process. It is
recommended (but not necessary) to de-authorize all hosts before using this
method. See `block access-revoke`.
VOLUME_ID - the volume ID on which to set the LUN ID.
LUN_ID - recommended range is an integer between 0 and 255. Advanced users
can use an integer between 0 and 4095. | [
"Set",
"the",
"LUN",
"ID",
"on",
"an",
"existing",
"block",
"storage",
"volume",
"."
] | python | train |
zenodo/zenodo-accessrequests | zenodo_accessrequests/models.py | https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/models.py#L360-L369 | def create_secret_link(self, title, description=None, expires_at=None):
"""Create a secret link from request."""
self.link = SecretLink.create(
title,
self.receiver,
extra_data=dict(recid=self.recid),
description=description,
expires_at=expires_at,
)
return self.link | [
"def",
"create_secret_link",
"(",
"self",
",",
"title",
",",
"description",
"=",
"None",
",",
"expires_at",
"=",
"None",
")",
":",
"self",
".",
"link",
"=",
"SecretLink",
".",
"create",
"(",
"title",
",",
"self",
".",
"receiver",
",",
"extra_data",
"=",
"dict",
"(",
"recid",
"=",
"self",
".",
"recid",
")",
",",
"description",
"=",
"description",
",",
"expires_at",
"=",
"expires_at",
",",
")",
"return",
"self",
".",
"link"
] | Create a secret link from request. | [
"Create",
"a",
"secret",
"link",
"from",
"request",
"."
] | python | test |
yunojuno/elasticsearch-django | elasticsearch_django/settings.py | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/settings.py#L29-L34 | def get_setting(key, *default):
"""Return specific search setting from Django conf."""
if default:
return get_settings().get(key, default[0])
else:
return get_settings()[key] | [
"def",
"get_setting",
"(",
"key",
",",
"*",
"default",
")",
":",
"if",
"default",
":",
"return",
"get_settings",
"(",
")",
".",
"get",
"(",
"key",
",",
"default",
"[",
"0",
"]",
")",
"else",
":",
"return",
"get_settings",
"(",
")",
"[",
"key",
"]"
] | Return specific search setting from Django conf. | [
"Return",
"specific",
"search",
"setting",
"from",
"Django",
"conf",
"."
] | python | train |
ask/carrot | carrot/backends/queue.py | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/queue.py#L33-L45 | def get(self, *args, **kwargs):
"""Get the next waiting message from the queue.
:returns: A :class:`Message` instance, or ``None`` if there is
no messages waiting.
"""
if not mqueue.qsize():
return None
message_data, content_type, content_encoding = mqueue.get()
return self.Message(backend=self, body=message_data,
content_type=content_type,
content_encoding=content_encoding) | [
"def",
"get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"mqueue",
".",
"qsize",
"(",
")",
":",
"return",
"None",
"message_data",
",",
"content_type",
",",
"content_encoding",
"=",
"mqueue",
".",
"get",
"(",
")",
"return",
"self",
".",
"Message",
"(",
"backend",
"=",
"self",
",",
"body",
"=",
"message_data",
",",
"content_type",
"=",
"content_type",
",",
"content_encoding",
"=",
"content_encoding",
")"
] | Get the next waiting message from the queue.
:returns: A :class:`Message` instance, or ``None`` if there is
no messages waiting. | [
"Get",
"the",
"next",
"waiting",
"message",
"from",
"the",
"queue",
"."
] | python | train |
swharden/PyOriginTools | PyOriginTools/highlevel.py | https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/PyOriginTools/highlevel.py#L227-L254 | def getSheet(book=None,sheet=None):
"""returns the pyorigin object for a sheet."""
# figure out what book to use
if book and not book.lower() in [x.lower() for x in bookNames()]:
print("book %s doesn't exist"%book)
return
if book is None:
book=activeBook().lower()
if book is None:
print("no book given or selected")
return
# figure out what sheet to use
if sheet and not sheet.lower() in [x.lower() for x in sheetNames(book)]:
print("sheet %s doesn't exist"%sheet)
return
if sheet is None:
sheet=activeSheet().lower()
if sheet is None:
return("no sheet given or selected")
print
# by now, we know the book/sheet exists and can be found
for poSheet in PyOrigin.WorksheetPages(book).Layers():
if poSheet.GetName().lower()==sheet.lower():
return poSheet
return False | [
"def",
"getSheet",
"(",
"book",
"=",
"None",
",",
"sheet",
"=",
"None",
")",
":",
"# figure out what book to use",
"if",
"book",
"and",
"not",
"book",
".",
"lower",
"(",
")",
"in",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"bookNames",
"(",
")",
"]",
":",
"print",
"(",
"\"book %s doesn't exist\"",
"%",
"book",
")",
"return",
"if",
"book",
"is",
"None",
":",
"book",
"=",
"activeBook",
"(",
")",
".",
"lower",
"(",
")",
"if",
"book",
"is",
"None",
":",
"print",
"(",
"\"no book given or selected\"",
")",
"return",
"# figure out what sheet to use",
"if",
"sheet",
"and",
"not",
"sheet",
".",
"lower",
"(",
")",
"in",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"sheetNames",
"(",
"book",
")",
"]",
":",
"print",
"(",
"\"sheet %s doesn't exist\"",
"%",
"sheet",
")",
"return",
"if",
"sheet",
"is",
"None",
":",
"sheet",
"=",
"activeSheet",
"(",
")",
".",
"lower",
"(",
")",
"if",
"sheet",
"is",
"None",
":",
"return",
"(",
"\"no sheet given or selected\"",
")",
"print",
"# by now, we know the book/sheet exists and can be found",
"for",
"poSheet",
"in",
"PyOrigin",
".",
"WorksheetPages",
"(",
"book",
")",
".",
"Layers",
"(",
")",
":",
"if",
"poSheet",
".",
"GetName",
"(",
")",
".",
"lower",
"(",
")",
"==",
"sheet",
".",
"lower",
"(",
")",
":",
"return",
"poSheet",
"return",
"False"
] | returns the pyorigin object for a sheet. | [
"returns",
"the",
"pyorigin",
"object",
"for",
"a",
"sheet",
"."
] | python | train |
PmagPy/PmagPy | pmagpy/contribution_builder.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1689-L1712 | def drop_stub_rows(self, ignore_cols=('specimen',
'sample',
'software_packages',
'num')):
"""
Drop self.df rows that have only null values,
ignoring certain columns.
Parameters
----------
ignore_cols : list-like
list of column names to ignore for
Returns
---------
self.df : pandas DataFrame
"""
# ignore citations if they just say 'This study'
if 'citations' in self.df.columns:
if list(self.df['citations'].unique()) == ['This study']:
ignore_cols = ignore_cols + ('citations',)
drop_cols = self.df.columns.difference(ignore_cols)
self.df.dropna(axis='index', subset=drop_cols, how='all', inplace=True)
return self.df | [
"def",
"drop_stub_rows",
"(",
"self",
",",
"ignore_cols",
"=",
"(",
"'specimen'",
",",
"'sample'",
",",
"'software_packages'",
",",
"'num'",
")",
")",
":",
"# ignore citations if they just say 'This study'",
"if",
"'citations'",
"in",
"self",
".",
"df",
".",
"columns",
":",
"if",
"list",
"(",
"self",
".",
"df",
"[",
"'citations'",
"]",
".",
"unique",
"(",
")",
")",
"==",
"[",
"'This study'",
"]",
":",
"ignore_cols",
"=",
"ignore_cols",
"+",
"(",
"'citations'",
",",
")",
"drop_cols",
"=",
"self",
".",
"df",
".",
"columns",
".",
"difference",
"(",
"ignore_cols",
")",
"self",
".",
"df",
".",
"dropna",
"(",
"axis",
"=",
"'index'",
",",
"subset",
"=",
"drop_cols",
",",
"how",
"=",
"'all'",
",",
"inplace",
"=",
"True",
")",
"return",
"self",
".",
"df"
] | Drop self.df rows that have only null values,
ignoring certain columns.
Parameters
----------
ignore_cols : list-like
list of column names to ignore for
Returns
---------
self.df : pandas DataFrame | [
"Drop",
"self",
".",
"df",
"rows",
"that",
"have",
"only",
"null",
"values",
"ignoring",
"certain",
"columns",
"."
] | python | train |
econ-ark/HARK | HARK/utilities.py | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/utilities.py#L259-L279 | def CRRAutility_invP(u, gam):
'''
Evaluates the derivative of the inverse of the CRRA utility function (with
risk aversion parameter gam) at a given utility level u.
Parameters
----------
u : float
Utility value
gam : float
Risk aversion
Returns
-------
(unnamed) : float
Marginal consumption corresponding to given utility value
'''
if gam == 1:
return np.exp(u)
else:
return( ((1.0-gam)*u)**(gam/(1.0-gam)) ) | [
"def",
"CRRAutility_invP",
"(",
"u",
",",
"gam",
")",
":",
"if",
"gam",
"==",
"1",
":",
"return",
"np",
".",
"exp",
"(",
"u",
")",
"else",
":",
"return",
"(",
"(",
"(",
"1.0",
"-",
"gam",
")",
"*",
"u",
")",
"**",
"(",
"gam",
"/",
"(",
"1.0",
"-",
"gam",
")",
")",
")"
] | Evaluates the derivative of the inverse of the CRRA utility function (with
risk aversion parameter gam) at a given utility level u.
Parameters
----------
u : float
Utility value
gam : float
Risk aversion
Returns
-------
(unnamed) : float
Marginal consumption corresponding to given utility value | [
"Evaluates",
"the",
"derivative",
"of",
"the",
"inverse",
"of",
"the",
"CRRA",
"utility",
"function",
"(",
"with",
"risk",
"aversion",
"parameter",
"gam",
")",
"at",
"a",
"given",
"utility",
"level",
"u",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py#L100-L113 | def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result | [
"def",
"compress_tokens",
"(",
"tokens",
")",
":",
"result",
"=",
"[",
"tokens",
"[",
"0",
"]",
"]",
"for",
"tok",
"in",
"tokens",
"[",
"1",
":",
"]",
":",
"if",
"(",
"not",
"result",
"[",
"-",
"1",
"]",
".",
"post_tags",
"and",
"not",
"tok",
".",
"pre_tags",
"and",
"result",
"[",
"-",
"1",
"]",
".",
"annotation",
"==",
"tok",
".",
"annotation",
")",
":",
"compress_merge_back",
"(",
"result",
",",
"tok",
")",
"else",
":",
"result",
".",
"append",
"(",
"tok",
")",
"return",
"result"
] | Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation | [
"Combine",
"adjacent",
"tokens",
"when",
"there",
"is",
"no",
"HTML",
"between",
"the",
"tokens",
"and",
"they",
"share",
"an",
"annotation"
] | python | test |
SeattleTestbed/seash | seash_modules.py | https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/seash_modules.py#L598-L608 | def _ensure_module_folder_exists():
"""
Checks to see if the module folder exists. If it does not, create it.
If there is an existing file with the same name, we raise a RuntimeError.
"""
if not os.path.isdir(MODULES_FOLDER_PATH):
try:
os.mkdir(MODULES_FOLDER_PATH)
except OSError, e:
if "file already exists" in str(e):
raise RuntimeError("Could not create modules folder: file exists with the same name") | [
"def",
"_ensure_module_folder_exists",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"MODULES_FOLDER_PATH",
")",
":",
"try",
":",
"os",
".",
"mkdir",
"(",
"MODULES_FOLDER_PATH",
")",
"except",
"OSError",
",",
"e",
":",
"if",
"\"file already exists\"",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Could not create modules folder: file exists with the same name\"",
")"
] | Checks to see if the module folder exists. If it does not, create it.
If there is an existing file with the same name, we raise a RuntimeError. | [
"Checks",
"to",
"see",
"if",
"the",
"module",
"folder",
"exists",
".",
"If",
"it",
"does",
"not",
"create",
"it",
".",
"If",
"there",
"is",
"an",
"existing",
"file",
"with",
"the",
"same",
"name",
"we",
"raise",
"a",
"RuntimeError",
"."
] | python | train |
TheHive-Project/Cortex-Analyzers | analyzers/Malpedia/malpedia_analyzer.py | https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/Malpedia/malpedia_analyzer.py#L58-L92 | def check(self, file):
"""
Checks a given file against all available yara rules
:param file: Path to file
:type file:str
:returns: Python list with matched rules info
:rtype: list
"""
result = []
all_matches = []
for filerules in os.listdir(self.rulepaths):
try:
rule = yara.compile(os.path.join(self.rulepaths, filerules))
except yara.SyntaxError:
continue
matches = rule.match(file)
if len(matches) > 0:
for rulem in matches:
rule_family = "_".join([x for x in rulem.rule.replace("_", ".", 1).split("_")[:-1]])
if rule_family not in all_matches:
all_matches.append(rule_family)
for rule_family in all_matches:
rules_info_txt = requests.get('{}/family/{}'.format(self.baseurl, rule_family),
auth=HTTPBasicAuth(self.user, self.pwd))
rules_info_json = json.loads(rules_info_txt.text)
result.append({
'family': rule_family,
'common_name': rules_info_json['common_name'],
'description': rules_info_json['description'],
'attribution': rules_info_json['attribution'],
'alt_names': rules_info_json['alt_names'],
'urls': rules_info_json['urls']
})
return result | [
"def",
"check",
"(",
"self",
",",
"file",
")",
":",
"result",
"=",
"[",
"]",
"all_matches",
"=",
"[",
"]",
"for",
"filerules",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"rulepaths",
")",
":",
"try",
":",
"rule",
"=",
"yara",
".",
"compile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"rulepaths",
",",
"filerules",
")",
")",
"except",
"yara",
".",
"SyntaxError",
":",
"continue",
"matches",
"=",
"rule",
".",
"match",
"(",
"file",
")",
"if",
"len",
"(",
"matches",
")",
">",
"0",
":",
"for",
"rulem",
"in",
"matches",
":",
"rule_family",
"=",
"\"_\"",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"rulem",
".",
"rule",
".",
"replace",
"(",
"\"_\"",
",",
"\".\"",
",",
"1",
")",
".",
"split",
"(",
"\"_\"",
")",
"[",
":",
"-",
"1",
"]",
"]",
")",
"if",
"rule_family",
"not",
"in",
"all_matches",
":",
"all_matches",
".",
"append",
"(",
"rule_family",
")",
"for",
"rule_family",
"in",
"all_matches",
":",
"rules_info_txt",
"=",
"requests",
".",
"get",
"(",
"'{}/family/{}'",
".",
"format",
"(",
"self",
".",
"baseurl",
",",
"rule_family",
")",
",",
"auth",
"=",
"HTTPBasicAuth",
"(",
"self",
".",
"user",
",",
"self",
".",
"pwd",
")",
")",
"rules_info_json",
"=",
"json",
".",
"loads",
"(",
"rules_info_txt",
".",
"text",
")",
"result",
".",
"append",
"(",
"{",
"'family'",
":",
"rule_family",
",",
"'common_name'",
":",
"rules_info_json",
"[",
"'common_name'",
"]",
",",
"'description'",
":",
"rules_info_json",
"[",
"'description'",
"]",
",",
"'attribution'",
":",
"rules_info_json",
"[",
"'attribution'",
"]",
",",
"'alt_names'",
":",
"rules_info_json",
"[",
"'alt_names'",
"]",
",",
"'urls'",
":",
"rules_info_json",
"[",
"'urls'",
"]",
"}",
")",
"return",
"result"
] | Checks a given file against all available yara rules
:param file: Path to file
:type file:str
:returns: Python list with matched rules info
:rtype: list | [
"Checks",
"a",
"given",
"file",
"against",
"all",
"available",
"yara",
"rules",
":",
"param",
"file",
":",
"Path",
"to",
"file",
":",
"type",
"file",
":",
"str",
":",
"returns",
":",
"Python",
"list",
"with",
"matched",
"rules",
"info",
":",
"rtype",
":",
"list"
] | python | train |
wummel/patool | patoolib/programs/rzip.py | https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/rzip.py#L19-L26 | def extract_rzip (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract an RZIP archive."""
cmdlist = [cmd, '-d', '-k']
if verbosity > 1:
cmdlist.append('-v')
outfile = util.get_single_outfile(outdir, archive)
cmdlist.extend(["-o", outfile, archive])
return cmdlist | [
"def",
"extract_rzip",
"(",
"archive",
",",
"compression",
",",
"cmd",
",",
"verbosity",
",",
"interactive",
",",
"outdir",
")",
":",
"cmdlist",
"=",
"[",
"cmd",
",",
"'-d'",
",",
"'-k'",
"]",
"if",
"verbosity",
">",
"1",
":",
"cmdlist",
".",
"append",
"(",
"'-v'",
")",
"outfile",
"=",
"util",
".",
"get_single_outfile",
"(",
"outdir",
",",
"archive",
")",
"cmdlist",
".",
"extend",
"(",
"[",
"\"-o\"",
",",
"outfile",
",",
"archive",
"]",
")",
"return",
"cmdlist"
] | Extract an RZIP archive. | [
"Extract",
"an",
"RZIP",
"archive",
"."
] | python | train |
aboSamoor/polyglot | polyglot/__main__.py | https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/__main__.py#L68-L76 | def transliterate(args):
"""Transliterate words according to the target language."""
t = Transliterator(source_lang=args.lang,
target_lang=args.target)
for l in args.input:
words = l.strip().split()
line_annotations = [u"{:<16}{:<16}".format(w, t.transliterate(w)) for w in words]
_print(u"\n".join(line_annotations))
_print(u"") | [
"def",
"transliterate",
"(",
"args",
")",
":",
"t",
"=",
"Transliterator",
"(",
"source_lang",
"=",
"args",
".",
"lang",
",",
"target_lang",
"=",
"args",
".",
"target",
")",
"for",
"l",
"in",
"args",
".",
"input",
":",
"words",
"=",
"l",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"line_annotations",
"=",
"[",
"u\"{:<16}{:<16}\"",
".",
"format",
"(",
"w",
",",
"t",
".",
"transliterate",
"(",
"w",
")",
")",
"for",
"w",
"in",
"words",
"]",
"_print",
"(",
"u\"\\n\"",
".",
"join",
"(",
"line_annotations",
")",
")",
"_print",
"(",
"u\"\"",
")"
] | Transliterate words according to the target language. | [
"Transliterate",
"words",
"according",
"to",
"the",
"target",
"language",
"."
] | python | train |
ArchiveTeam/wpull | wpull/network/pool.py | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/pool.py#L251-L272 | def session(self, host: str, port: int, use_ssl: bool=False):
'''Return a context manager that returns a connection.
Usage::
session = yield from connection_pool.session('example.com', 80)
with session as connection:
connection.write(b'blah')
connection.close()
Coroutine.
'''
connection = yield from self.acquire(host, port, use_ssl)
@contextlib.contextmanager
def context_wrapper():
try:
yield connection
finally:
self.no_wait_release(connection)
return context_wrapper() | [
"def",
"session",
"(",
"self",
",",
"host",
":",
"str",
",",
"port",
":",
"int",
",",
"use_ssl",
":",
"bool",
"=",
"False",
")",
":",
"connection",
"=",
"yield",
"from",
"self",
".",
"acquire",
"(",
"host",
",",
"port",
",",
"use_ssl",
")",
"@",
"contextlib",
".",
"contextmanager",
"def",
"context_wrapper",
"(",
")",
":",
"try",
":",
"yield",
"connection",
"finally",
":",
"self",
".",
"no_wait_release",
"(",
"connection",
")",
"return",
"context_wrapper",
"(",
")"
] | Return a context manager that returns a connection.
Usage::
session = yield from connection_pool.session('example.com', 80)
with session as connection:
connection.write(b'blah')
connection.close()
Coroutine. | [
"Return",
"a",
"context",
"manager",
"that",
"returns",
"a",
"connection",
"."
] | python | train |
gholt/swiftly | swiftly/cli/cli.py | https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/cli.py#L433-L458 | def _resolve_option(self, options, option_name, section_name):
"""Resolves an option value into options.
Sets options.<option_name> to a resolved value. Any value
already in options overrides a value in os.environ which
overrides self.context.conf.
:param options: The options instance as returned by optparse.
:param option_name: The name of the option, such as
``auth_url``.
:param section_name: The name of the section, such as
``swiftly``.
"""
if getattr(options, option_name, None) is not None:
return
if option_name.startswith(section_name + '_'):
environ_name = option_name.upper()
conf_name = option_name[len(section_name) + 1:]
else:
environ_name = (section_name + '_' + option_name).upper()
conf_name = option_name
setattr(
options, option_name,
os.environ.get(
environ_name,
(self.context.conf.get(section_name, {})).get(conf_name))) | [
"def",
"_resolve_option",
"(",
"self",
",",
"options",
",",
"option_name",
",",
"section_name",
")",
":",
"if",
"getattr",
"(",
"options",
",",
"option_name",
",",
"None",
")",
"is",
"not",
"None",
":",
"return",
"if",
"option_name",
".",
"startswith",
"(",
"section_name",
"+",
"'_'",
")",
":",
"environ_name",
"=",
"option_name",
".",
"upper",
"(",
")",
"conf_name",
"=",
"option_name",
"[",
"len",
"(",
"section_name",
")",
"+",
"1",
":",
"]",
"else",
":",
"environ_name",
"=",
"(",
"section_name",
"+",
"'_'",
"+",
"option_name",
")",
".",
"upper",
"(",
")",
"conf_name",
"=",
"option_name",
"setattr",
"(",
"options",
",",
"option_name",
",",
"os",
".",
"environ",
".",
"get",
"(",
"environ_name",
",",
"(",
"self",
".",
"context",
".",
"conf",
".",
"get",
"(",
"section_name",
",",
"{",
"}",
")",
")",
".",
"get",
"(",
"conf_name",
")",
")",
")"
] | Resolves an option value into options.
Sets options.<option_name> to a resolved value. Any value
already in options overrides a value in os.environ which
overrides self.context.conf.
:param options: The options instance as returned by optparse.
:param option_name: The name of the option, such as
``auth_url``.
:param section_name: The name of the section, such as
``swiftly``. | [
"Resolves",
"an",
"option",
"value",
"into",
"options",
"."
] | python | test |
campbellr/smashrun-client | smashrun/client.py | https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L129-L138 | def get_splits(self, id_num, unit='mi'):
"""Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
"""
url = self._build_url('my', 'activities', id_num, 'splits', unit)
return self._json(url) | [
"def",
"get_splits",
"(",
"self",
",",
"id_num",
",",
"unit",
"=",
"'mi'",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'my'",
",",
"'activities'",
",",
"id_num",
",",
"'splits'",
",",
"unit",
")",
"return",
"self",
".",
"_json",
"(",
"url",
")"
] | Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'. | [
"Return",
"the",
"splits",
"of",
"the",
"activity",
"with",
"the",
"given",
"id",
"."
] | python | train |
vvangelovski/django-audit-log | audit_log/models/managers.py | https://github.com/vvangelovski/django-audit-log/blob/f1bee75360a67390fbef67c110e9a245b41ebb92/audit_log/models/managers.py#L128-L186 | def copy_fields(self, model):
"""
Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object.
"""
fields = {'__module__' : model.__module__}
for field in model._meta.fields:
if not field.name in self._exclude:
field = copy.deepcopy(field)
if isinstance(field, models.AutoField):
#we replace the AutoField of the original model
#with an IntegerField because a model can
#have only one autofield.
field.__class__ = models.IntegerField
if field.primary_key:
field.serialize = True
#OneToOne fields should really be tracked
#as ForeignKey fields
if isinstance(field, models.OneToOneField):
field.__class__ = models.ForeignKey
if field.primary_key or field.unique:
#unique fields of the original model
#can not be guaranteed to be unique
#in the audit log entry but they
#should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True
if field.remote_field and field.remote_field.related_name:
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.related_name
)
elif field.remote_field:
try:
if field.remote_field.get_accessor_name():
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.get_accessor_name()
)
except e:
pass
fields[field.name] = field
return fields | [
"def",
"copy_fields",
"(",
"self",
",",
"model",
")",
":",
"fields",
"=",
"{",
"'__module__'",
":",
"model",
".",
"__module__",
"}",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
":",
"if",
"not",
"field",
".",
"name",
"in",
"self",
".",
"_exclude",
":",
"field",
"=",
"copy",
".",
"deepcopy",
"(",
"field",
")",
"if",
"isinstance",
"(",
"field",
",",
"models",
".",
"AutoField",
")",
":",
"#we replace the AutoField of the original model",
"#with an IntegerField because a model can",
"#have only one autofield.",
"field",
".",
"__class__",
"=",
"models",
".",
"IntegerField",
"if",
"field",
".",
"primary_key",
":",
"field",
".",
"serialize",
"=",
"True",
"#OneToOne fields should really be tracked",
"#as ForeignKey fields",
"if",
"isinstance",
"(",
"field",
",",
"models",
".",
"OneToOneField",
")",
":",
"field",
".",
"__class__",
"=",
"models",
".",
"ForeignKey",
"if",
"field",
".",
"primary_key",
"or",
"field",
".",
"unique",
":",
"#unique fields of the original model",
"#can not be guaranteed to be unique",
"#in the audit log entry but they",
"#should still be indexed for faster lookups.",
"field",
".",
"primary_key",
"=",
"False",
"field",
".",
"_unique",
"=",
"False",
"field",
".",
"db_index",
"=",
"True",
"if",
"field",
".",
"remote_field",
"and",
"field",
".",
"remote_field",
".",
"related_name",
":",
"field",
".",
"remote_field",
".",
"related_name",
"=",
"'_auditlog_{}_{}'",
".",
"format",
"(",
"model",
".",
"_meta",
".",
"model_name",
",",
"field",
".",
"remote_field",
".",
"related_name",
")",
"elif",
"field",
".",
"remote_field",
":",
"try",
":",
"if",
"field",
".",
"remote_field",
".",
"get_accessor_name",
"(",
")",
":",
"field",
".",
"remote_field",
".",
"related_name",
"=",
"'_auditlog_{}_{}'",
".",
"format",
"(",
"model",
".",
"_meta",
".",
"model_name",
",",
"field",
".",
"remote_field",
".",
"get_accessor_name",
"(",
")",
")",
"except",
"e",
":",
"pass",
"fields",
"[",
"field",
".",
"name",
"]",
"=",
"field",
"return",
"fields"
] | Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object. | [
"Creates",
"copies",
"of",
"the",
"fields",
"we",
"are",
"keeping",
"track",
"of",
"for",
"the",
"provided",
"model",
"returning",
"a",
"dictionary",
"mapping",
"field",
"name",
"to",
"a",
"copied",
"field",
"object",
"."
] | python | train |
skyfielders/python-skyfield | skyfield/precessionlib.py | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/precessionlib.py#L5-L69 | def compute_precession(jd_tdb):
"""Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input.
"""
eps0 = 84381.406
# 't' is time in TDB centuries.
t = (jd_tdb - T0) / 36525.0
# Numerical coefficients of psi_a, omega_a, and chi_a, along with
# epsilon_0, the obliquity at J2000.0, are 4-angle formulation from
# Capitaine et al. (2003), eqs. (4), (37), & (39).
psia = ((((- 0.0000000951 * t
+ 0.000132851 ) * t
- 0.00114045 ) * t
- 1.0790069 ) * t
+ 5038.481507 ) * t
omegaa = ((((+ 0.0000003337 * t
- 0.000000467 ) * t
- 0.00772503 ) * t
+ 0.0512623 ) * t
- 0.025754 ) * t + eps0
chia = ((((- 0.0000000560 * t
+ 0.000170663 ) * t
- 0.00121197 ) * t
- 2.3814292 ) * t
+ 10.556403 ) * t
eps0 = eps0 * ASEC2RAD
psia = psia * ASEC2RAD
omegaa = omegaa * ASEC2RAD
chia = chia * ASEC2RAD
sa = sin(eps0)
ca = cos(eps0)
sb = sin(-psia)
cb = cos(-psia)
sc = sin(-omegaa)
cc = cos(-omegaa)
sd = sin(chia)
cd = cos(chia)
# Compute elements of precession rotation matrix equivalent to
# R3(chi_a) R1(-omega_a) R3(-psi_a) R1(epsilon_0).
rot3 = array(((cd * cb - sb * sd * cc,
cd * sb * ca + sd * cc * cb * ca - sa * sd * sc,
cd * sb * sa + sd * cc * cb * sa + ca * sd * sc),
(-sd * cb - sb * cd * cc,
-sd * sb * ca + cd * cc * cb * ca - sa * cd * sc,
-sd * sb * sa + cd * cc * cb * sa + ca * cd * sc),
(sb * sc,
-sc * cb * ca - sa * cc,
-sc * cb * sa + cc * ca)))
return rot3 | [
"def",
"compute_precession",
"(",
"jd_tdb",
")",
":",
"eps0",
"=",
"84381.406",
"# 't' is time in TDB centuries.",
"t",
"=",
"(",
"jd_tdb",
"-",
"T0",
")",
"/",
"36525.0",
"# Numerical coefficients of psi_a, omega_a, and chi_a, along with",
"# epsilon_0, the obliquity at J2000.0, are 4-angle formulation from",
"# Capitaine et al. (2003), eqs. (4), (37), & (39).",
"psia",
"=",
"(",
"(",
"(",
"(",
"-",
"0.0000000951",
"*",
"t",
"+",
"0.000132851",
")",
"*",
"t",
"-",
"0.00114045",
")",
"*",
"t",
"-",
"1.0790069",
")",
"*",
"t",
"+",
"5038.481507",
")",
"*",
"t",
"omegaa",
"=",
"(",
"(",
"(",
"(",
"+",
"0.0000003337",
"*",
"t",
"-",
"0.000000467",
")",
"*",
"t",
"-",
"0.00772503",
")",
"*",
"t",
"+",
"0.0512623",
")",
"*",
"t",
"-",
"0.025754",
")",
"*",
"t",
"+",
"eps0",
"chia",
"=",
"(",
"(",
"(",
"(",
"-",
"0.0000000560",
"*",
"t",
"+",
"0.000170663",
")",
"*",
"t",
"-",
"0.00121197",
")",
"*",
"t",
"-",
"2.3814292",
")",
"*",
"t",
"+",
"10.556403",
")",
"*",
"t",
"eps0",
"=",
"eps0",
"*",
"ASEC2RAD",
"psia",
"=",
"psia",
"*",
"ASEC2RAD",
"omegaa",
"=",
"omegaa",
"*",
"ASEC2RAD",
"chia",
"=",
"chia",
"*",
"ASEC2RAD",
"sa",
"=",
"sin",
"(",
"eps0",
")",
"ca",
"=",
"cos",
"(",
"eps0",
")",
"sb",
"=",
"sin",
"(",
"-",
"psia",
")",
"cb",
"=",
"cos",
"(",
"-",
"psia",
")",
"sc",
"=",
"sin",
"(",
"-",
"omegaa",
")",
"cc",
"=",
"cos",
"(",
"-",
"omegaa",
")",
"sd",
"=",
"sin",
"(",
"chia",
")",
"cd",
"=",
"cos",
"(",
"chia",
")",
"# Compute elements of precession rotation matrix equivalent to",
"# R3(chi_a) R1(-omega_a) R3(-psi_a) R1(epsilon_0).",
"rot3",
"=",
"array",
"(",
"(",
"(",
"cd",
"*",
"cb",
"-",
"sb",
"*",
"sd",
"*",
"cc",
",",
"cd",
"*",
"sb",
"*",
"ca",
"+",
"sd",
"*",
"cc",
"*",
"cb",
"*",
"ca",
"-",
"sa",
"*",
"sd",
"*",
"sc",
",",
"cd",
"*",
"sb",
"*",
"sa",
"+",
"sd",
"*",
"cc",
"*",
"cb",
"*",
"sa",
"+",
"ca",
"*",
"sd",
"*",
"sc",
")",
",",
"(",
"-",
"sd",
"*",
"cb",
"-",
"sb",
"*",
"cd",
"*",
"cc",
",",
"-",
"sd",
"*",
"sb",
"*",
"ca",
"+",
"cd",
"*",
"cc",
"*",
"cb",
"*",
"ca",
"-",
"sa",
"*",
"cd",
"*",
"sc",
",",
"-",
"sd",
"*",
"sb",
"*",
"sa",
"+",
"cd",
"*",
"cc",
"*",
"cb",
"*",
"sa",
"+",
"ca",
"*",
"cd",
"*",
"sc",
")",
",",
"(",
"sb",
"*",
"sc",
",",
"-",
"sc",
"*",
"cb",
"*",
"ca",
"-",
"sa",
"*",
"cc",
",",
"-",
"sc",
"*",
"cb",
"*",
"sa",
"+",
"cc",
"*",
"ca",
")",
")",
")",
"return",
"rot3"
] | Return the rotation matrices for precessing to an array of epochs.
`jd_tdb` - array of TDB Julian dates
The array returned has the shape `(3, 3, n)` where `n` is the number
of dates that have been provided as input. | [
"Return",
"the",
"rotation",
"matrices",
"for",
"precessing",
"to",
"an",
"array",
"of",
"epochs",
"."
] | python | train |
frictionlessdata/datapackage-py | datapackage/package.py | https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L396-L405 | def iter_errors(self):
""""Lazily yields each ValidationError for the received data dict.
"""
# Deprecate
warnings.warn(
'Property "package.iter_errors" is deprecated.',
UserWarning)
return self.profile.iter_errors(self.to_dict()) | [
"def",
"iter_errors",
"(",
"self",
")",
":",
"# Deprecate",
"warnings",
".",
"warn",
"(",
"'Property \"package.iter_errors\" is deprecated.'",
",",
"UserWarning",
")",
"return",
"self",
".",
"profile",
".",
"iter_errors",
"(",
"self",
".",
"to_dict",
"(",
")",
")"
] | Lazily yields each ValidationError for the received data dict. | [
"Lazily",
"yields",
"each",
"ValidationError",
"for",
"the",
"received",
"data",
"dict",
"."
] | python | valid |
fprimex/zdesk | zdesk/zdesk_api.py | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2403-L2407 | def nps_survey_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/nps-api/surveys#show-survey"
api_path = "/api/v2/nps/surveys/{id}"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | [
"def",
"nps_survey_show",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/nps/surveys/{id}\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"id",
"=",
"id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"*",
"*",
"kwargs",
")"
] | https://developer.zendesk.com/rest_api/docs/nps-api/surveys#show-survey | [
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"nps",
"-",
"api",
"/",
"surveys#show",
"-",
"survey"
] | python | train |
portfoliome/postpy | postpy/admin.py | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L24-L44 | def get_primary_keys(conn, table: str, schema='public'):
"""Returns primary key columns for a specific table."""
query = """\
SELECT
c.constraint_name AS pkey_constraint_name,
c.column_name AS column_name
FROM
information_schema.key_column_usage AS c
JOIN information_schema.table_constraints AS t
ON t.constraint_name = c.constraint_name
AND t.table_catalog = c.table_catalog
AND t.table_schema = c.table_schema
AND t.table_name = c.table_name
WHERE t.constraint_type = 'PRIMARY KEY'
AND c.table_schema=%s
AND c.table_name=%s
ORDER BY c.ordinal_position"""
for record in select_dict(conn, query, params=(schema, table)):
yield record['column_name'] | [
"def",
"get_primary_keys",
"(",
"conn",
",",
"table",
":",
"str",
",",
"schema",
"=",
"'public'",
")",
":",
"query",
"=",
"\"\"\"\\\nSELECT\n c.constraint_name AS pkey_constraint_name,\n c.column_name AS column_name\nFROM\n information_schema.key_column_usage AS c\n JOIN information_schema.table_constraints AS t\n ON t.constraint_name = c.constraint_name\n AND t.table_catalog = c.table_catalog\n AND t.table_schema = c.table_schema\n AND t.table_name = c.table_name\nWHERE t.constraint_type = 'PRIMARY KEY'\n AND c.table_schema=%s\n AND c.table_name=%s\nORDER BY c.ordinal_position\"\"\"",
"for",
"record",
"in",
"select_dict",
"(",
"conn",
",",
"query",
",",
"params",
"=",
"(",
"schema",
",",
"table",
")",
")",
":",
"yield",
"record",
"[",
"'column_name'",
"]"
] | Returns primary key columns for a specific table. | [
"Returns",
"primary",
"key",
"columns",
"for",
"a",
"specific",
"table",
"."
] | python | train |
vanheeringen-lab/gimmemotifs | gimmemotifs/scanner.py | https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L515-L522 | def count(self, seqs, nreport=100, scan_rc=True):
"""
count the number of matches above the cutoff
returns an iterator of lists containing integer counts
"""
for matches in self.scan(seqs, nreport, scan_rc):
counts = [len(m) for m in matches]
yield counts | [
"def",
"count",
"(",
"self",
",",
"seqs",
",",
"nreport",
"=",
"100",
",",
"scan_rc",
"=",
"True",
")",
":",
"for",
"matches",
"in",
"self",
".",
"scan",
"(",
"seqs",
",",
"nreport",
",",
"scan_rc",
")",
":",
"counts",
"=",
"[",
"len",
"(",
"m",
")",
"for",
"m",
"in",
"matches",
"]",
"yield",
"counts"
] | count the number of matches above the cutoff
returns an iterator of lists containing integer counts | [
"count",
"the",
"number",
"of",
"matches",
"above",
"the",
"cutoff",
"returns",
"an",
"iterator",
"of",
"lists",
"containing",
"integer",
"counts"
] | python | train |
zmathew/django-backbone | backbone/views.py | https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L90-L101 | def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request) | [
"def",
"post",
"(",
"self",
",",
"request",
",",
"id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"id",
":",
"# No posting to an object detail page",
"return",
"HttpResponseForbidden",
"(",
")",
"else",
":",
"if",
"not",
"self",
".",
"has_add_permission",
"(",
"request",
")",
":",
"return",
"HttpResponseForbidden",
"(",
"_",
"(",
"'You do not have permission to perform this action.'",
")",
")",
"else",
":",
"return",
"self",
".",
"add_object",
"(",
"request",
")"
] | Handles post requests. | [
"Handles",
"post",
"requests",
"."
] | python | train |
juju/theblues | theblues/charmstore.py | https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/charmstore.py#L66-L106 | def _get(self, url):
"""Make a get request against the charmstore.
This method is used by other API methods to standardize querying.
@param url The full url to query
(e.g. https://api.jujucharms.com/charmstore/v4/macaroon)
"""
try:
response = requests.get(url, verify=self.verify,
cookies=self.cookies, timeout=self.timeout,
auth=self._client.auth())
response.raise_for_status()
return response
except HTTPError as exc:
if exc.response.status_code in (404, 407):
raise EntityNotFound(url)
else:
message = ('Error during request: {url} '
'status code:({code}) '
'message: {message}').format(
url=url,
code=exc.response.status_code,
message=exc.response.text)
logging.error(message)
raise ServerError(exc.response.status_code,
exc.response.text,
message)
except Timeout:
message = 'Request timed out: {url} timeout: {timeout}'
message = message.format(url=url, timeout=self.timeout)
logging.error(message)
raise ServerError(message)
except RequestException as exc:
message = ('Error during request: {url} '
'message: {message}').format(
url=url,
message=exc)
logging.error(message)
raise ServerError(exc.args[0][1].errno,
exc.args[0][1].strerror,
message) | [
"def",
"_get",
"(",
"self",
",",
"url",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"verify",
"=",
"self",
".",
"verify",
",",
"cookies",
"=",
"self",
".",
"cookies",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"auth",
"=",
"self",
".",
"_client",
".",
"auth",
"(",
")",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response",
"except",
"HTTPError",
"as",
"exc",
":",
"if",
"exc",
".",
"response",
".",
"status_code",
"in",
"(",
"404",
",",
"407",
")",
":",
"raise",
"EntityNotFound",
"(",
"url",
")",
"else",
":",
"message",
"=",
"(",
"'Error during request: {url} '",
"'status code:({code}) '",
"'message: {message}'",
")",
".",
"format",
"(",
"url",
"=",
"url",
",",
"code",
"=",
"exc",
".",
"response",
".",
"status_code",
",",
"message",
"=",
"exc",
".",
"response",
".",
"text",
")",
"logging",
".",
"error",
"(",
"message",
")",
"raise",
"ServerError",
"(",
"exc",
".",
"response",
".",
"status_code",
",",
"exc",
".",
"response",
".",
"text",
",",
"message",
")",
"except",
"Timeout",
":",
"message",
"=",
"'Request timed out: {url} timeout: {timeout}'",
"message",
"=",
"message",
".",
"format",
"(",
"url",
"=",
"url",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"logging",
".",
"error",
"(",
"message",
")",
"raise",
"ServerError",
"(",
"message",
")",
"except",
"RequestException",
"as",
"exc",
":",
"message",
"=",
"(",
"'Error during request: {url} '",
"'message: {message}'",
")",
".",
"format",
"(",
"url",
"=",
"url",
",",
"message",
"=",
"exc",
")",
"logging",
".",
"error",
"(",
"message",
")",
"raise",
"ServerError",
"(",
"exc",
".",
"args",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"errno",
",",
"exc",
".",
"args",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"strerror",
",",
"message",
")"
] | Make a get request against the charmstore.
This method is used by other API methods to standardize querying.
@param url The full url to query
(e.g. https://api.jujucharms.com/charmstore/v4/macaroon) | [
"Make",
"a",
"get",
"request",
"against",
"the",
"charmstore",
"."
] | python | train |
craffel/mir_eval | mir_eval/chord.py | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1238-L1290 | def sevenths(reference_labels, estimated_labels):
"""Compare chords along MIREX 'sevenths' rules. Chords with qualities
outside [maj, maj7, 7, min, min7, N] are ignored.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.sevenths(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut.
"""
validate(reference_labels, estimated_labels)
seventh_qualities = ['maj', 'min', 'maj7', '7', 'min7', '']
valid_semitones = np.array([QUALITIES[name] for name in seventh_qualities])
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_root = ref_roots == est_roots
eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1)
comparison_scores = (eq_root * eq_semitones).astype(np.float)
# Test for reference chord inclusion
is_valid = np.array([np.all(np.equal(ref_semitones, semitones), axis=1)
for semitones in valid_semitones])
# Drop if NOR
comparison_scores[np.sum(is_valid, axis=0) == 0] = -1
return comparison_scores | [
"def",
"sevenths",
"(",
"reference_labels",
",",
"estimated_labels",
")",
":",
"validate",
"(",
"reference_labels",
",",
"estimated_labels",
")",
"seventh_qualities",
"=",
"[",
"'maj'",
",",
"'min'",
",",
"'maj7'",
",",
"'7'",
",",
"'min7'",
",",
"''",
"]",
"valid_semitones",
"=",
"np",
".",
"array",
"(",
"[",
"QUALITIES",
"[",
"name",
"]",
"for",
"name",
"in",
"seventh_qualities",
"]",
")",
"ref_roots",
",",
"ref_semitones",
"=",
"encode_many",
"(",
"reference_labels",
",",
"False",
")",
"[",
":",
"2",
"]",
"est_roots",
",",
"est_semitones",
"=",
"encode_many",
"(",
"estimated_labels",
",",
"False",
")",
"[",
":",
"2",
"]",
"eq_root",
"=",
"ref_roots",
"==",
"est_roots",
"eq_semitones",
"=",
"np",
".",
"all",
"(",
"np",
".",
"equal",
"(",
"ref_semitones",
",",
"est_semitones",
")",
",",
"axis",
"=",
"1",
")",
"comparison_scores",
"=",
"(",
"eq_root",
"*",
"eq_semitones",
")",
".",
"astype",
"(",
"np",
".",
"float",
")",
"# Test for reference chord inclusion",
"is_valid",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"all",
"(",
"np",
".",
"equal",
"(",
"ref_semitones",
",",
"semitones",
")",
",",
"axis",
"=",
"1",
")",
"for",
"semitones",
"in",
"valid_semitones",
"]",
")",
"# Drop if NOR",
"comparison_scores",
"[",
"np",
".",
"sum",
"(",
"is_valid",
",",
"axis",
"=",
"0",
")",
"==",
"0",
"]",
"=",
"-",
"1",
"return",
"comparison_scores"
] | Compare chords along MIREX 'sevenths' rules. Chords with qualities
outside [maj, maj7, 7, min, min7, N] are ignored.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.sevenths(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut. | [
"Compare",
"chords",
"along",
"MIREX",
"sevenths",
"rules",
".",
"Chords",
"with",
"qualities",
"outside",
"[",
"maj",
"maj7",
"7",
"min",
"min7",
"N",
"]",
"are",
"ignored",
"."
] | python | train |
pjuren/pyokit | src/pyokit/datastruct/read.py | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/read.py#L232-L256 | def merge(self, other, forceMerge=False):
"""
Merge two reads by concatenating their sequence data and their
quality data (<self> first, then <other>); <self> and <other> must have
the same sequence name. A new merged FastqSequence object is returned;
<Self> and <other> are left unaltered.
:param other: the other sequence to merge with self.
:param forceMerge: force the merge to occur, even if sequences names
don't match. In this case, <self> takes precedence.
:return: A new FastqSequence that represents the merging of <self> and
<other>
:raise: FastqSequenceError if the sequences names do not match, and the
forceMerge parameter is not set.
"""
if self.sequenceName != other.sequenceName and not forceMerge:
raise NGSReadError("cannot merge " + self.sequenceName + " with " +
other.sequenceName + " -- different " +
"sequence names")
name = self.sequenceName
seq = self.sequenceData + other.sequenceData
qual = self.sequenceQual + other.sequenceQual
return NGSReadError(name, seq, qual) | [
"def",
"merge",
"(",
"self",
",",
"other",
",",
"forceMerge",
"=",
"False",
")",
":",
"if",
"self",
".",
"sequenceName",
"!=",
"other",
".",
"sequenceName",
"and",
"not",
"forceMerge",
":",
"raise",
"NGSReadError",
"(",
"\"cannot merge \"",
"+",
"self",
".",
"sequenceName",
"+",
"\" with \"",
"+",
"other",
".",
"sequenceName",
"+",
"\" -- different \"",
"+",
"\"sequence names\"",
")",
"name",
"=",
"self",
".",
"sequenceName",
"seq",
"=",
"self",
".",
"sequenceData",
"+",
"other",
".",
"sequenceData",
"qual",
"=",
"self",
".",
"sequenceQual",
"+",
"other",
".",
"sequenceQual",
"return",
"NGSReadError",
"(",
"name",
",",
"seq",
",",
"qual",
")"
] | Merge two reads by concatenating their sequence data and their
quality data (<self> first, then <other>); <self> and <other> must have
the same sequence name. A new merged FastqSequence object is returned;
<Self> and <other> are left unaltered.
:param other: the other sequence to merge with self.
:param forceMerge: force the merge to occur, even if sequences names
don't match. In this case, <self> takes precedence.
:return: A new FastqSequence that represents the merging of <self> and
<other>
:raise: FastqSequenceError if the sequences names do not match, and the
forceMerge parameter is not set. | [
"Merge",
"two",
"reads",
"by",
"concatenating",
"their",
"sequence",
"data",
"and",
"their",
"quality",
"data",
"(",
"<self",
">",
"first",
"then",
"<other",
">",
")",
";",
"<self",
">",
"and",
"<other",
">",
"must",
"have",
"the",
"same",
"sequence",
"name",
".",
"A",
"new",
"merged",
"FastqSequence",
"object",
"is",
"returned",
";",
"<Self",
">",
"and",
"<other",
">",
"are",
"left",
"unaltered",
"."
] | python | train |
reingart/pyafipws | utils.py | https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/utils.py#L566-L614 | def leer(linea, formato, expandir_fechas=False):
"Analiza una linea de texto dado un formato, devuelve un diccionario"
dic = {}
comienzo = 1
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = (len(fmt)>3 and isinstance(fmt[3], int)) and fmt[3] or 2
valor = linea[comienzo-1:comienzo-1+longitud].strip()
try:
if chr(8) in valor or chr(127) in valor or chr(255) in valor:
valor = None # nulo
elif tipo == N:
if valor:
valor = long(valor)
else:
valor = 0
elif tipo == I:
if valor:
try:
if '.' in valor:
valor = float(valor)
else:
valor = valor.strip(" ")
if valor[0] == "-":
sign = -1
valor = valor[1:]
else:
sign = +1
valor = sign * float(("%%s.%%0%sd" % dec) % (long(valor[:-dec] or '0'), int(valor[-dec:] or '0')))
except ValueError:
raise ValueError("Campo invalido: %s = '%s'" % (clave, valor))
else:
valor = 0.00
elif expandir_fechas and clave.lower().startswith("fec") and longitud <= 8:
if valor:
valor = "%s-%s-%s" % (valor[0:4], valor[4:6], valor[6:8])
else:
valor = None
else:
valor = valor.decode("ascii","ignore")
if not valor and clave in dic and len(linea) <= comienzo:
pass # ignorar - compatibilidad hacia atrás (cambios tamaño)
else:
dic[clave] = valor
comienzo += longitud
except Exception, e:
raise ValueError("Error al leer campo %s pos %s val '%s': %s" % (
clave, comienzo, valor, str(e)))
return dic | [
"def",
"leer",
"(",
"linea",
",",
"formato",
",",
"expandir_fechas",
"=",
"False",
")",
":",
"dic",
"=",
"{",
"}",
"comienzo",
"=",
"1",
"for",
"fmt",
"in",
"formato",
":",
"clave",
",",
"longitud",
",",
"tipo",
"=",
"fmt",
"[",
"0",
":",
"3",
"]",
"dec",
"=",
"(",
"len",
"(",
"fmt",
")",
">",
"3",
"and",
"isinstance",
"(",
"fmt",
"[",
"3",
"]",
",",
"int",
")",
")",
"and",
"fmt",
"[",
"3",
"]",
"or",
"2",
"valor",
"=",
"linea",
"[",
"comienzo",
"-",
"1",
":",
"comienzo",
"-",
"1",
"+",
"longitud",
"]",
".",
"strip",
"(",
")",
"try",
":",
"if",
"chr",
"(",
"8",
")",
"in",
"valor",
"or",
"chr",
"(",
"127",
")",
"in",
"valor",
"or",
"chr",
"(",
"255",
")",
"in",
"valor",
":",
"valor",
"=",
"None",
"# nulo",
"elif",
"tipo",
"==",
"N",
":",
"if",
"valor",
":",
"valor",
"=",
"long",
"(",
"valor",
")",
"else",
":",
"valor",
"=",
"0",
"elif",
"tipo",
"==",
"I",
":",
"if",
"valor",
":",
"try",
":",
"if",
"'.'",
"in",
"valor",
":",
"valor",
"=",
"float",
"(",
"valor",
")",
"else",
":",
"valor",
"=",
"valor",
".",
"strip",
"(",
"\" \"",
")",
"if",
"valor",
"[",
"0",
"]",
"==",
"\"-\"",
":",
"sign",
"=",
"-",
"1",
"valor",
"=",
"valor",
"[",
"1",
":",
"]",
"else",
":",
"sign",
"=",
"+",
"1",
"valor",
"=",
"sign",
"*",
"float",
"(",
"(",
"\"%%s.%%0%sd\"",
"%",
"dec",
")",
"%",
"(",
"long",
"(",
"valor",
"[",
":",
"-",
"dec",
"]",
"or",
"'0'",
")",
",",
"int",
"(",
"valor",
"[",
"-",
"dec",
":",
"]",
"or",
"'0'",
")",
")",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Campo invalido: %s = '%s'\"",
"%",
"(",
"clave",
",",
"valor",
")",
")",
"else",
":",
"valor",
"=",
"0.00",
"elif",
"expandir_fechas",
"and",
"clave",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"fec\"",
")",
"and",
"longitud",
"<=",
"8",
":",
"if",
"valor",
":",
"valor",
"=",
"\"%s-%s-%s\"",
"%",
"(",
"valor",
"[",
"0",
":",
"4",
"]",
",",
"valor",
"[",
"4",
":",
"6",
"]",
",",
"valor",
"[",
"6",
":",
"8",
"]",
")",
"else",
":",
"valor",
"=",
"None",
"else",
":",
"valor",
"=",
"valor",
".",
"decode",
"(",
"\"ascii\"",
",",
"\"ignore\"",
")",
"if",
"not",
"valor",
"and",
"clave",
"in",
"dic",
"and",
"len",
"(",
"linea",
")",
"<=",
"comienzo",
":",
"pass",
"# ignorar - compatibilidad hacia atrás (cambios tamaño)",
"else",
":",
"dic",
"[",
"clave",
"]",
"=",
"valor",
"comienzo",
"+=",
"longitud",
"except",
"Exception",
",",
"e",
":",
"raise",
"ValueError",
"(",
"\"Error al leer campo %s pos %s val '%s': %s\"",
"%",
"(",
"clave",
",",
"comienzo",
",",
"valor",
",",
"str",
"(",
"e",
")",
")",
")",
"return",
"dic"
] | Analiza una linea de texto dado un formato, devuelve un diccionario | [
"Analiza",
"una",
"linea",
"de",
"texto",
"dado",
"un",
"formato",
"devuelve",
"un",
"diccionario"
] | python | train |
vertexproject/synapse | synapse/lib/syntax.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/syntax.py#L328-L347 | def parse_cmd_kwlist(text, off=0):
'''
Parse a foo:bar=<valu>[,...] kwarg list into (prop,valu),off
'''
kwlist = []
_, off = nom(text, off, whites)
while off < len(text):
(p, v), off = parse_cmd_kwarg(text, off=off)
kwlist.append((p, v))
_, off = nom(text, off, whites)
if not nextchar(text, off, ','):
break
_, off = nom(text, off, whites)
return kwlist, off | [
"def",
"parse_cmd_kwlist",
"(",
"text",
",",
"off",
"=",
"0",
")",
":",
"kwlist",
"=",
"[",
"]",
"_",
",",
"off",
"=",
"nom",
"(",
"text",
",",
"off",
",",
"whites",
")",
"while",
"off",
"<",
"len",
"(",
"text",
")",
":",
"(",
"p",
",",
"v",
")",
",",
"off",
"=",
"parse_cmd_kwarg",
"(",
"text",
",",
"off",
"=",
"off",
")",
"kwlist",
".",
"append",
"(",
"(",
"p",
",",
"v",
")",
")",
"_",
",",
"off",
"=",
"nom",
"(",
"text",
",",
"off",
",",
"whites",
")",
"if",
"not",
"nextchar",
"(",
"text",
",",
"off",
",",
"','",
")",
":",
"break",
"_",
",",
"off",
"=",
"nom",
"(",
"text",
",",
"off",
",",
"whites",
")",
"return",
"kwlist",
",",
"off"
] | Parse a foo:bar=<valu>[,...] kwarg list into (prop,valu),off | [
"Parse",
"a",
"foo",
":",
"bar",
"=",
"<valu",
">",
"[",
"...",
"]",
"kwarg",
"list",
"into",
"(",
"prop",
"valu",
")",
"off"
] | python | train |
anjishnu/ask-alexa-pykit | examples/twitter/lambda_function.py | https://github.com/anjishnu/ask-alexa-pykit/blob/a47c278ca7a60532bbe1a9b789f6c37e609fea8b/examples/twitter/lambda_function.py#L322-L337 | def next_intent_handler(request):
"""
Takes care of things whenver the user says 'next'
"""
message = "Sorry, couldn't find anything in your next queue"
end_session = True
if True:
user_queue = twitter_cache.user_queue(request.access_token())
if not user_queue.is_finished():
message = user_queue.read_out_next(MAX_RESPONSE_TWEETS)
if not user_queue.is_finished():
end_session = False
message = message + ". Please, say 'next' if you want me to read out more. "
return alexa.create_response(message=message,
end_session=end_session) | [
"def",
"next_intent_handler",
"(",
"request",
")",
":",
"message",
"=",
"\"Sorry, couldn't find anything in your next queue\"",
"end_session",
"=",
"True",
"if",
"True",
":",
"user_queue",
"=",
"twitter_cache",
".",
"user_queue",
"(",
"request",
".",
"access_token",
"(",
")",
")",
"if",
"not",
"user_queue",
".",
"is_finished",
"(",
")",
":",
"message",
"=",
"user_queue",
".",
"read_out_next",
"(",
"MAX_RESPONSE_TWEETS",
")",
"if",
"not",
"user_queue",
".",
"is_finished",
"(",
")",
":",
"end_session",
"=",
"False",
"message",
"=",
"message",
"+",
"\". Please, say 'next' if you want me to read out more. \"",
"return",
"alexa",
".",
"create_response",
"(",
"message",
"=",
"message",
",",
"end_session",
"=",
"end_session",
")"
] | Takes care of things whenver the user says 'next' | [
"Takes",
"care",
"of",
"things",
"whenver",
"the",
"user",
"says",
"next"
] | python | train |
saltstack/salt | salt/returners/influxdb_return.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/influxdb_return.py#L313-L326 | def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
sql = "select distinct(id) from returns"
data = serv.query(sql)
ret = []
if data:
for jid in data[0]['points']:
ret.append(jid[1])
return ret | [
"def",
"get_minions",
"(",
")",
":",
"serv",
"=",
"_get_serv",
"(",
"ret",
"=",
"None",
")",
"sql",
"=",
"\"select distinct(id) from returns\"",
"data",
"=",
"serv",
".",
"query",
"(",
"sql",
")",
"ret",
"=",
"[",
"]",
"if",
"data",
":",
"for",
"jid",
"in",
"data",
"[",
"0",
"]",
"[",
"'points'",
"]",
":",
"ret",
".",
"append",
"(",
"jid",
"[",
"1",
"]",
")",
"return",
"ret"
] | Return a list of minions | [
"Return",
"a",
"list",
"of",
"minions"
] | python | train |
scanny/python-pptx | pptx/chart/data.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/data.py#L711-L719 | def add_series(self, name, number_format=None):
"""
Return a |BubbleSeriesData| object newly created and added at the end
of this sequence, and having series named *name* and values formatted
with *number_format*.
"""
series_data = BubbleSeriesData(self, name, number_format)
self.append(series_data)
return series_data | [
"def",
"add_series",
"(",
"self",
",",
"name",
",",
"number_format",
"=",
"None",
")",
":",
"series_data",
"=",
"BubbleSeriesData",
"(",
"self",
",",
"name",
",",
"number_format",
")",
"self",
".",
"append",
"(",
"series_data",
")",
"return",
"series_data"
] | Return a |BubbleSeriesData| object newly created and added at the end
of this sequence, and having series named *name* and values formatted
with *number_format*. | [
"Return",
"a",
"|BubbleSeriesData|",
"object",
"newly",
"created",
"and",
"added",
"at",
"the",
"end",
"of",
"this",
"sequence",
"and",
"having",
"series",
"named",
"*",
"name",
"*",
"and",
"values",
"formatted",
"with",
"*",
"number_format",
"*",
"."
] | python | train |
lemieuxl/pyplink | pyplink/pyplink.py | https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L410-L414 | def _write_bed_header(self):
"""Writes the BED first 3 bytes."""
# Writing the first three bytes
final_byte = 1 if self._bed_format == "SNP-major" else 0
self._bed.write(bytearray((108, 27, final_byte))) | [
"def",
"_write_bed_header",
"(",
"self",
")",
":",
"# Writing the first three bytes",
"final_byte",
"=",
"1",
"if",
"self",
".",
"_bed_format",
"==",
"\"SNP-major\"",
"else",
"0",
"self",
".",
"_bed",
".",
"write",
"(",
"bytearray",
"(",
"(",
"108",
",",
"27",
",",
"final_byte",
")",
")",
")"
] | Writes the BED first 3 bytes. | [
"Writes",
"the",
"BED",
"first",
"3",
"bytes",
"."
] | python | train |
aetros/aetros-cli | aetros/client.py | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L496-L537 | def wait_until_queue_empty(self, channels, report=True, clear_end=True):
"""
Waits until all queues of channels are empty.
"""
state = {'message': ''}
self.logger.debug("wait_until_queue_empty: report=%s %s"
% (str(report), str([channel+':'+str(len(self.queues[channel])) for channel in channels]), ))
queues = []
for channel in channels:
queues += self.queues[channel][:]
def print_progress():
if report:
self.logger.debug("all_empty=%s" % (str(all_empty),))
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
state['message'] = "%.2f kB/s // %.2fkB of %.2fkB // %.2f%%" \
% (self.bytes_speed / 1024, self.bytes_sent / 1024, self.bytes_total / 1024,
(self.bytes_sent / self.bytes_total * 100) if self.bytes_total else 0)
sys.__stderr__.write(state['message'])
sys.__stderr__.flush()
while True:
all_empty = all(m['_sent'] for m in queues)
print_progress()
if all_empty:
break
time.sleep(0.2)
print_progress()
if report and clear_end:
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
sys.__stderr__.flush() | [
"def",
"wait_until_queue_empty",
"(",
"self",
",",
"channels",
",",
"report",
"=",
"True",
",",
"clear_end",
"=",
"True",
")",
":",
"state",
"=",
"{",
"'message'",
":",
"''",
"}",
"self",
".",
"logger",
".",
"debug",
"(",
"\"wait_until_queue_empty: report=%s %s\"",
"%",
"(",
"str",
"(",
"report",
")",
",",
"str",
"(",
"[",
"channel",
"+",
"':'",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"queues",
"[",
"channel",
"]",
")",
")",
"for",
"channel",
"in",
"channels",
"]",
")",
",",
")",
")",
"queues",
"=",
"[",
"]",
"for",
"channel",
"in",
"channels",
":",
"queues",
"+=",
"self",
".",
"queues",
"[",
"channel",
"]",
"[",
":",
"]",
"def",
"print_progress",
"(",
")",
":",
"if",
"report",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"all_empty=%s\"",
"%",
"(",
"str",
"(",
"all_empty",
")",
",",
")",
")",
"sys",
".",
"__stderr__",
".",
"write",
"(",
"'\\b'",
"*",
"len",
"(",
"state",
"[",
"'message'",
"]",
")",
")",
"sys",
".",
"__stderr__",
".",
"write",
"(",
"\"\\033[K\"",
")",
"state",
"[",
"'message'",
"]",
"=",
"\"%.2f kB/s // %.2fkB of %.2fkB // %.2f%%\"",
"%",
"(",
"self",
".",
"bytes_speed",
"/",
"1024",
",",
"self",
".",
"bytes_sent",
"/",
"1024",
",",
"self",
".",
"bytes_total",
"/",
"1024",
",",
"(",
"self",
".",
"bytes_sent",
"/",
"self",
".",
"bytes_total",
"*",
"100",
")",
"if",
"self",
".",
"bytes_total",
"else",
"0",
")",
"sys",
".",
"__stderr__",
".",
"write",
"(",
"state",
"[",
"'message'",
"]",
")",
"sys",
".",
"__stderr__",
".",
"flush",
"(",
")",
"while",
"True",
":",
"all_empty",
"=",
"all",
"(",
"m",
"[",
"'_sent'",
"]",
"for",
"m",
"in",
"queues",
")",
"print_progress",
"(",
")",
"if",
"all_empty",
":",
"break",
"time",
".",
"sleep",
"(",
"0.2",
")",
"print_progress",
"(",
")",
"if",
"report",
"and",
"clear_end",
":",
"sys",
".",
"__stderr__",
".",
"write",
"(",
"'\\b'",
"*",
"len",
"(",
"state",
"[",
"'message'",
"]",
")",
")",
"sys",
".",
"__stderr__",
".",
"write",
"(",
"\"\\033[K\"",
")",
"sys",
".",
"__stderr__",
".",
"flush",
"(",
")"
] | Waits until all queues of channels are empty. | [
"Waits",
"until",
"all",
"queues",
"of",
"channels",
"are",
"empty",
"."
] | python | train |
portfoliome/foil | foil/deserializers.py | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/deserializers.py#L45-L65 | def json_decoder_hook(dct, str_decoders=STRING_DECODERS,
converters=MappingProxyType(dict())) -> dict:
"""Decoder for parsing typical objects like uuid's and dates."""
for k, v in dct.items():
if k in converters:
parse_func = converters[k]
dct[k] = parse_func(v)
elif isinstance(v, str):
for decode_func in str_decoders:
v = decode_func(v)
if not isinstance(v, str):
break
dct[k] = v
elif isinstance(v, collections.Mapping):
dct[k] = json_decoder_hook(v, str_decoders, converters)
return dct | [
"def",
"json_decoder_hook",
"(",
"dct",
",",
"str_decoders",
"=",
"STRING_DECODERS",
",",
"converters",
"=",
"MappingProxyType",
"(",
"dict",
"(",
")",
")",
")",
"->",
"dict",
":",
"for",
"k",
",",
"v",
"in",
"dct",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"converters",
":",
"parse_func",
"=",
"converters",
"[",
"k",
"]",
"dct",
"[",
"k",
"]",
"=",
"parse_func",
"(",
"v",
")",
"elif",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"for",
"decode_func",
"in",
"str_decoders",
":",
"v",
"=",
"decode_func",
"(",
"v",
")",
"if",
"not",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"break",
"dct",
"[",
"k",
"]",
"=",
"v",
"elif",
"isinstance",
"(",
"v",
",",
"collections",
".",
"Mapping",
")",
":",
"dct",
"[",
"k",
"]",
"=",
"json_decoder_hook",
"(",
"v",
",",
"str_decoders",
",",
"converters",
")",
"return",
"dct"
] | Decoder for parsing typical objects like uuid's and dates. | [
"Decoder",
"for",
"parsing",
"typical",
"objects",
"like",
"uuid",
"s",
"and",
"dates",
"."
] | python | train |
tanghaibao/jcvi | jcvi/formats/fasta.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L2209-L2257 | def tidy(args):
"""
%prog tidy fastafile
Trim terminal Ns, normalize gap sizes and remove small components.
"""
p = OptionParser(tidy.__doc__)
p.add_option("--gapsize", dest="gapsize", default=0, type="int",
help="Set all gaps to the same size [default: %default]")
p.add_option("--minlen", dest="minlen", default=100, type="int",
help="Minimum component size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
gapsize = opts.gapsize
minlen = opts.minlen
tidyfastafile = fastafile.rsplit(".", 1)[0] + ".tidy.fasta"
fw = must_open(tidyfastafile, "w")
removed = normalized = 0
fasta = Fasta(fastafile, lazy=True)
for name, rec in fasta.iteritems_ordered():
rec.seq = rec.seq.upper()
if minlen:
removed += remove_small_components(rec, minlen)
trim_terminal_Ns(rec)
if gapsize:
normalized += normalize_gaps(rec, gapsize)
if len(rec) == 0:
logging.debug("Drop seq {0}".format(rec.id))
continue
SeqIO.write([rec], fw, "fasta")
# Print statistics
if removed:
logging.debug("Total discarded bases: {0}".format(removed))
if normalized:
logging.debug("Gaps normalized: {0}".format(normalized))
logging.debug("Tidy FASTA written to `{0}`.".format(tidyfastafile))
fw.close()
return tidyfastafile | [
"def",
"tidy",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"tidy",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--gapsize\"",
",",
"dest",
"=",
"\"gapsize\"",
",",
"default",
"=",
"0",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Set all gaps to the same size [default: %default]\"",
")",
"p",
".",
"add_option",
"(",
"\"--minlen\"",
",",
"dest",
"=",
"\"minlen\"",
",",
"default",
"=",
"100",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Minimum component size [default: %default]\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"fastafile",
",",
"=",
"args",
"gapsize",
"=",
"opts",
".",
"gapsize",
"minlen",
"=",
"opts",
".",
"minlen",
"tidyfastafile",
"=",
"fastafile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\".tidy.fasta\"",
"fw",
"=",
"must_open",
"(",
"tidyfastafile",
",",
"\"w\"",
")",
"removed",
"=",
"normalized",
"=",
"0",
"fasta",
"=",
"Fasta",
"(",
"fastafile",
",",
"lazy",
"=",
"True",
")",
"for",
"name",
",",
"rec",
"in",
"fasta",
".",
"iteritems_ordered",
"(",
")",
":",
"rec",
".",
"seq",
"=",
"rec",
".",
"seq",
".",
"upper",
"(",
")",
"if",
"minlen",
":",
"removed",
"+=",
"remove_small_components",
"(",
"rec",
",",
"minlen",
")",
"trim_terminal_Ns",
"(",
"rec",
")",
"if",
"gapsize",
":",
"normalized",
"+=",
"normalize_gaps",
"(",
"rec",
",",
"gapsize",
")",
"if",
"len",
"(",
"rec",
")",
"==",
"0",
":",
"logging",
".",
"debug",
"(",
"\"Drop seq {0}\"",
".",
"format",
"(",
"rec",
".",
"id",
")",
")",
"continue",
"SeqIO",
".",
"write",
"(",
"[",
"rec",
"]",
",",
"fw",
",",
"\"fasta\"",
")",
"# Print statistics",
"if",
"removed",
":",
"logging",
".",
"debug",
"(",
"\"Total discarded bases: {0}\"",
".",
"format",
"(",
"removed",
")",
")",
"if",
"normalized",
":",
"logging",
".",
"debug",
"(",
"\"Gaps normalized: {0}\"",
".",
"format",
"(",
"normalized",
")",
")",
"logging",
".",
"debug",
"(",
"\"Tidy FASTA written to `{0}`.\"",
".",
"format",
"(",
"tidyfastafile",
")",
")",
"fw",
".",
"close",
"(",
")",
"return",
"tidyfastafile"
] | %prog tidy fastafile
Trim terminal Ns, normalize gap sizes and remove small components. | [
"%prog",
"tidy",
"fastafile"
] | python | train |
carpedm20/fbchat | fbchat/_client.py | https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L2319-L2332 | def deleteMessages(self, message_ids):
"""
Deletes specifed messages
:param message_ids: Message IDs to delete
:return: Whether the request was successful
:raises: FBchatException if request failed
"""
message_ids = require_list(message_ids)
data = dict()
for i, message_id in enumerate(message_ids):
data["message_ids[{}]".format(i)] = message_id
r = self._post(self.req_url.DELETE_MESSAGES, data)
return r.ok | [
"def",
"deleteMessages",
"(",
"self",
",",
"message_ids",
")",
":",
"message_ids",
"=",
"require_list",
"(",
"message_ids",
")",
"data",
"=",
"dict",
"(",
")",
"for",
"i",
",",
"message_id",
"in",
"enumerate",
"(",
"message_ids",
")",
":",
"data",
"[",
"\"message_ids[{}]\"",
".",
"format",
"(",
"i",
")",
"]",
"=",
"message_id",
"r",
"=",
"self",
".",
"_post",
"(",
"self",
".",
"req_url",
".",
"DELETE_MESSAGES",
",",
"data",
")",
"return",
"r",
".",
"ok"
] | Deletes specifed messages
:param message_ids: Message IDs to delete
:return: Whether the request was successful
:raises: FBchatException if request failed | [
"Deletes",
"specifed",
"messages"
] | python | train |
szairis/sakmapper | sakmapper/network.py | https://github.com/szairis/sakmapper/blob/ac462fd2674e6aa1aa3b209222d8ac4e9268a790/sakmapper/network.py#L173-L220 | def mapper_graph(df, lens_data=None, lens='pca', resolution=10, gain=0.5, equalize=True, clust='kmeans', stat='db',
max_K=5):
"""
input: N x n_dim image of of raw data under lens function, as a dataframe
output: (undirected graph, list of node contents, dictionary of patches)
"""
if lens_data is None:
lens_data = apply_lens(df, lens=lens)
patch_clusterings = {}
counter = 0
patches = covering_patches(lens_data, resolution=resolution, gain=gain, equalize=equalize)
for key, patch in patches.items():
if len(patch) > 0:
patch_clusterings[key] = optimal_clustering(df, patch, method=clust, statistic=stat, max_K=max_K)
counter += 1
print 'total of {} patches required clustering'.format(counter)
all_clusters = []
for key in patch_clusterings:
all_clusters += patch_clusterings[key]
num_nodes = len(all_clusters)
print 'this implies {} nodes in the mapper graph'.format(num_nodes)
A = np.zeros((num_nodes, num_nodes))
for i in range(num_nodes):
for j in range(i):
overlap = set(all_clusters[i]).intersection(set(all_clusters[j]))
if len(overlap) > 0:
A[i, j] = 1
A[j, i] = 1
G = nx.from_numpy_matrix(A)
total = []
all_clusters_new = []
mapping = {}
cont = 0
for m in all_clusters:
total += m
for n, m in enumerate(all_clusters):
if len(m) == 1 and total.count(m) > 1:
G.remove_node(n)
else:
all_clusters_new.append(m)
mapping[n] = cont
cont += 1
H = nx.relabel_nodes(G, mapping)
return H, all_clusters_new, patches | [
"def",
"mapper_graph",
"(",
"df",
",",
"lens_data",
"=",
"None",
",",
"lens",
"=",
"'pca'",
",",
"resolution",
"=",
"10",
",",
"gain",
"=",
"0.5",
",",
"equalize",
"=",
"True",
",",
"clust",
"=",
"'kmeans'",
",",
"stat",
"=",
"'db'",
",",
"max_K",
"=",
"5",
")",
":",
"if",
"lens_data",
"is",
"None",
":",
"lens_data",
"=",
"apply_lens",
"(",
"df",
",",
"lens",
"=",
"lens",
")",
"patch_clusterings",
"=",
"{",
"}",
"counter",
"=",
"0",
"patches",
"=",
"covering_patches",
"(",
"lens_data",
",",
"resolution",
"=",
"resolution",
",",
"gain",
"=",
"gain",
",",
"equalize",
"=",
"equalize",
")",
"for",
"key",
",",
"patch",
"in",
"patches",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"patch",
")",
">",
"0",
":",
"patch_clusterings",
"[",
"key",
"]",
"=",
"optimal_clustering",
"(",
"df",
",",
"patch",
",",
"method",
"=",
"clust",
",",
"statistic",
"=",
"stat",
",",
"max_K",
"=",
"max_K",
")",
"counter",
"+=",
"1",
"print",
"'total of {} patches required clustering'",
".",
"format",
"(",
"counter",
")",
"all_clusters",
"=",
"[",
"]",
"for",
"key",
"in",
"patch_clusterings",
":",
"all_clusters",
"+=",
"patch_clusterings",
"[",
"key",
"]",
"num_nodes",
"=",
"len",
"(",
"all_clusters",
")",
"print",
"'this implies {} nodes in the mapper graph'",
".",
"format",
"(",
"num_nodes",
")",
"A",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_nodes",
",",
"num_nodes",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_nodes",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
")",
":",
"overlap",
"=",
"set",
"(",
"all_clusters",
"[",
"i",
"]",
")",
".",
"intersection",
"(",
"set",
"(",
"all_clusters",
"[",
"j",
"]",
")",
")",
"if",
"len",
"(",
"overlap",
")",
">",
"0",
":",
"A",
"[",
"i",
",",
"j",
"]",
"=",
"1",
"A",
"[",
"j",
",",
"i",
"]",
"=",
"1",
"G",
"=",
"nx",
".",
"from_numpy_matrix",
"(",
"A",
")",
"total",
"=",
"[",
"]",
"all_clusters_new",
"=",
"[",
"]",
"mapping",
"=",
"{",
"}",
"cont",
"=",
"0",
"for",
"m",
"in",
"all_clusters",
":",
"total",
"+=",
"m",
"for",
"n",
",",
"m",
"in",
"enumerate",
"(",
"all_clusters",
")",
":",
"if",
"len",
"(",
"m",
")",
"==",
"1",
"and",
"total",
".",
"count",
"(",
"m",
")",
">",
"1",
":",
"G",
".",
"remove_node",
"(",
"n",
")",
"else",
":",
"all_clusters_new",
".",
"append",
"(",
"m",
")",
"mapping",
"[",
"n",
"]",
"=",
"cont",
"cont",
"+=",
"1",
"H",
"=",
"nx",
".",
"relabel_nodes",
"(",
"G",
",",
"mapping",
")",
"return",
"H",
",",
"all_clusters_new",
",",
"patches"
] | input: N x n_dim image of of raw data under lens function, as a dataframe
output: (undirected graph, list of node contents, dictionary of patches) | [
"input",
":",
"N",
"x",
"n_dim",
"image",
"of",
"of",
"raw",
"data",
"under",
"lens",
"function",
"as",
"a",
"dataframe",
"output",
":",
"(",
"undirected",
"graph",
"list",
"of",
"node",
"contents",
"dictionary",
"of",
"patches",
")"
] | python | train |
KnowledgeLinks/rdfframework | rdfframework/search/esmappings.py | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/esmappings.py#L71-L88 | def list_indexes(cls):
"""
Returns a dictionary with the key as the es_index name and the
object is a list of rdfclasses for that index
args:
None
"""
cls_list = cls.list_mapped_classes()
rtn_obj = {}
for key, value in cls_list.items():
idx = value.es_defs.get('kds_esIndex')[0]
try:
rtn_obj[idx].append(value)
except KeyError:
rtn_obj[idx] = [value]
return rtn_obj | [
"def",
"list_indexes",
"(",
"cls",
")",
":",
"cls_list",
"=",
"cls",
".",
"list_mapped_classes",
"(",
")",
"rtn_obj",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"cls_list",
".",
"items",
"(",
")",
":",
"idx",
"=",
"value",
".",
"es_defs",
".",
"get",
"(",
"'kds_esIndex'",
")",
"[",
"0",
"]",
"try",
":",
"rtn_obj",
"[",
"idx",
"]",
".",
"append",
"(",
"value",
")",
"except",
"KeyError",
":",
"rtn_obj",
"[",
"idx",
"]",
"=",
"[",
"value",
"]",
"return",
"rtn_obj"
] | Returns a dictionary with the key as the es_index name and the
object is a list of rdfclasses for that index
args:
None | [
"Returns",
"a",
"dictionary",
"with",
"the",
"key",
"as",
"the",
"es_index",
"name",
"and",
"the",
"object",
"is",
"a",
"list",
"of",
"rdfclasses",
"for",
"that",
"index",
"args",
":",
"None"
] | python | train |
Subsets and Splits